]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jan 2015 05:47:27 +0000 (21:47 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jan 2015 05:47:27 +0000 (21:47 -0800)
added patches:
acpi-pm-fix-pm-initialization-for-devices-that-are-not-present.patch
arm64-kernel-add-missing-__init-section-marker-to-cpu_suspend_init.patch
arm64-kernel-fix-__cpu_suspend-mm-switch-on-warm-boot.patch
arm64-kernel-refactor-the-cpu-suspend-api-for-retention-states.patch
arm64-move-cpu_resume-into-the-text-section.patch
btrfs-don-t-delay-inode-ref-updates-during-log-replay.patch
perf-fix-events-installation-during-moving-group.patch
perf-session-do-not-fail-on-processing-out-of-order-event.patch
perf-x86-intel-uncore-make-sure-only-uncore-events-are-collected.patch

queue-3.14/acpi-pm-fix-pm-initialization-for-devices-that-are-not-present.patch [new file with mode: 0644]
queue-3.14/arm64-kernel-add-missing-__init-section-marker-to-cpu_suspend_init.patch [new file with mode: 0644]
queue-3.14/arm64-kernel-fix-__cpu_suspend-mm-switch-on-warm-boot.patch [new file with mode: 0644]
queue-3.14/arm64-kernel-refactor-the-cpu-suspend-api-for-retention-states.patch [new file with mode: 0644]
queue-3.14/arm64-move-cpu_resume-into-the-text-section.patch [new file with mode: 0644]
queue-3.14/btrfs-don-t-delay-inode-ref-updates-during-log-replay.patch [new file with mode: 0644]
queue-3.14/perf-fix-events-installation-during-moving-group.patch [new file with mode: 0644]
queue-3.14/perf-session-do-not-fail-on-processing-out-of-order-event.patch [new file with mode: 0644]
queue-3.14/perf-x86-intel-uncore-make-sure-only-uncore-events-are-collected.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/acpi-pm-fix-pm-initialization-for-devices-that-are-not-present.patch b/queue-3.14/acpi-pm-fix-pm-initialization-for-devices-that-are-not-present.patch
new file mode 100644 (file)
index 0000000..1a76594
--- /dev/null
@@ -0,0 +1,99 @@
+From 1b1f3e1699a9886f1070f94171097ab4ccdbfc95 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 1 Jan 2015 23:38:28 +0100
+Subject: ACPI / PM: Fix PM initialization for devices that are not present
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit 1b1f3e1699a9886f1070f94171097ab4ccdbfc95 upstream.
+
+If an ACPI device object whose _STA returns 0 (not present and not
+functional) has _PR0 or _PS0, its power_manageable flag will be set
+and acpi_bus_init_power() will return 0 for it.  Consequently, if
+such a device object is passed to the ACPI device PM functions, they
+will attempt to carry out the requested operation on the device,
+although they should not do that for devices that are not present.
+
+To fix that problem make acpi_bus_init_power() return an error code
+for devices that are not present which will cause power_manageable to
+be cleared for them as appropriate in acpi_bus_get_power_flags().
+However, the lists of power resources should not be freed for the
+device in that case, so modify acpi_bus_get_power_flags() to keep
+those lists even if acpi_bus_init_power() returns an error.
+Accordingly, when deciding whether or not the lists of power
+resources need to be freed, acpi_free_power_resources_lists()
+should check the power.flags.power_resources flag instead of
+flags.power_manageable, so make that change too.
+
+Furthermore, if acpi_bus_attach() sees that flags.initialized is
+unset for the given device, it should reset the power management
+settings of the device and re-initialize them from scratch instead
+of relying on the previous settings (the device may have appeared
+after being not present previously, for example), so make it use
+the 'valid' flag of the D0 power state as the initial value of
+flags.power_manageable for it and call acpi_bus_init_power() to
+discover its current power state.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/device_pm.c |    2 +-
+ drivers/acpi/scan.c      |   13 ++++++++-----
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_devi
+       device->power.state = ACPI_STATE_UNKNOWN;
+       if (!acpi_device_is_present(device))
+-              return 0;
++              return -ENXIO;
+       result = acpi_device_get_power(device, &state);
+       if (result)
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -865,7 +865,7 @@ static void acpi_free_power_resources_li
+       if (device->wakeup.flags.valid)
+               acpi_power_resources_list_free(&device->wakeup.resources);
+-      if (!device->flags.power_manageable)
++      if (!device->power.flags.power_resources)
+               return;
+       for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
+@@ -1554,10 +1554,8 @@ static void acpi_bus_get_power_flags(str
+                       device->power.flags.power_resources)
+               device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
+-      if (acpi_bus_init_power(device)) {
+-              acpi_free_power_resources_lists(device);
++      if (acpi_bus_init_power(device))
+               device->flags.power_manageable = 0;
+-      }
+ }
+ static void acpi_bus_get_flags(struct acpi_device *device)
+@@ -2043,13 +2041,18 @@ static void acpi_bus_attach(struct acpi_
+       /* Skip devices that are not present. */
+       if (!acpi_device_is_present(device)) {
+               device->flags.visited = false;
++              device->flags.power_manageable = 0;
+               return;
+       }
+       if (device->handler)
+               goto ok;
+       if (!device->flags.initialized) {
+-              acpi_bus_update_power(device, NULL);
++              device->flags.power_manageable =
++                      device->power.states[ACPI_STATE_D0].flags.valid;
++              if (acpi_bus_init_power(device))
++                      device->flags.power_manageable = 0;
++
+               device->flags.initialized = true;
+       }
+       device->flags.visited = false;
diff --git a/queue-3.14/arm64-kernel-add-missing-__init-section-marker-to-cpu_suspend_init.patch b/queue-3.14/arm64-kernel-add-missing-__init-section-marker-to-cpu_suspend_init.patch
new file mode 100644 (file)
index 0000000..0c5055d
--- /dev/null
@@ -0,0 +1,32 @@
+From 18ab7db6b749ac27aac08d572afbbd2f4d937934 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <Lorenzo.Pieralisi@arm.com>
+Date: Thu, 17 Jul 2014 18:19:20 +0100
+Subject: arm64: kernel: add missing __init section marker to cpu_suspend_init
+
+From: Lorenzo Pieralisi <Lorenzo.Pieralisi@arm.com>
+
+commit 18ab7db6b749ac27aac08d572afbbd2f4d937934 upstream.
+
+Suspend init function must be marked as __init, since it is not needed
+after the kernel has booted. This patch moves the cpu_suspend_init()
+function to the __init section.
+
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/suspend.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg)
+ extern struct sleep_save_sp sleep_save_sp;
+ extern phys_addr_t sleep_idmap_phys;
+-static int cpu_suspend_init(void)
++static int __init cpu_suspend_init(void)
+ {
+       void *ctx_ptr;
diff --git a/queue-3.14/arm64-kernel-fix-__cpu_suspend-mm-switch-on-warm-boot.patch b/queue-3.14/arm64-kernel-fix-__cpu_suspend-mm-switch-on-warm-boot.patch
new file mode 100644 (file)
index 0000000..3ad27eb
--- /dev/null
@@ -0,0 +1,71 @@
+From f43c27188a49111b58e9611afa2f0365b0b55625 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Fri, 19 Dec 2014 17:03:47 +0000
+Subject: arm64: kernel: fix __cpu_suspend mm switch on warm-boot
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit f43c27188a49111b58e9611afa2f0365b0b55625 upstream.
+
+On arm64 the TTBR0_EL1 register is set to either the reserved TTBR0
+page tables on boot or to the active_mm mappings belonging to user space
+processes, it must never be set to swapper_pg_dir page tables mappings.
+
+When a CPU is booted its active_mm is set to init_mm even though its
+TTBR0_EL1 points at the reserved TTBR0 page mappings. This implies
+that when __cpu_suspend is triggered the active_mm can point at
+init_mm even if the current TTBR0_EL1 register contains the reserved
+TTBR0_EL1 mappings.
+
+Therefore, the mm save and restore executed in __cpu_suspend might
+turn out to be erroneous in that, if the current->active_mm corresponds
+to init_mm, on resume from low power it ends up restoring in the
+TTBR0_EL1 the init_mm mappings that are global and can cause speculation
+of TLB entries which end up being propagated to user space.
+
+This patch fixes the issue by checking the active_mm pointer before
+restoring the TTBR0 mappings. If the current active_mm == &init_mm,
+the code sets the TTBR0_EL1 to the reserved TTBR0 mapping instead of
+switching back to the active_mm, which is the expected behaviour
+corresponding to the TTBR0_EL1 settings when __cpu_suspend was entered.
+
+Fixes: 95322526ef62 ("arm64: kernel: cpu_{suspend/resume} implementation")
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/suspend.c |   14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -5,6 +5,7 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/pgtable.h>
+ #include <asm/memory.h>
++#include <asm/mmu_context.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
+ #include <asm/tlbflush.h>
+@@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int
+        */
+       ret = __cpu_suspend_enter(arg, fn);
+       if (ret == 0) {
+-              cpu_switch_mm(mm->pgd, mm);
++              /*
++               * We are resuming from reset with TTBR0_EL1 set to the
++               * idmap to enable the MMU; restore the active_mm mappings in
++               * TTBR0_EL1 unless the active_mm == &init_mm, in which case
++               * the thread entered __cpu_suspend with TTBR0_EL1 set to
++               * reserved TTBR0 page tables and should be restored as such.
++               */
++              if (mm == &init_mm)
++                      cpu_set_reserved_ttbr0();
++              else
++                      cpu_switch_mm(mm->pgd, mm);
++
+               flush_tlb_all();
+               /*
diff --git a/queue-3.14/arm64-kernel-refactor-the-cpu-suspend-api-for-retention-states.patch b/queue-3.14/arm64-kernel-refactor-the-cpu-suspend-api-for-retention-states.patch
new file mode 100644 (file)
index 0000000..f38c7c6
--- /dev/null
@@ -0,0 +1,270 @@
+From 714f59925595b9c2ea9c22b107b340d38e3b3bc9 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Thu, 7 Aug 2014 14:54:50 +0100
+Subject: arm64: kernel: refactor the CPU suspend API for retention states
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit 714f59925595b9c2ea9c22b107b340d38e3b3bc9 upstream.
+
+CPU suspend is the standard kernel interface to be used to enter
+low-power states on ARM64 systems. Current cpu_suspend implementation
+by default assumes that all low power states are losing the CPU context,
+so the CPU registers must be saved and cleaned to DRAM upon state
+entry. Furthermore, the current cpu_suspend() implementation assumes
+that if the CPU suspend back-end method returns when called, this has
+to be considered an error regardless of the return code (which can be
+successful) since the CPU was not expected to return from a code path that
+is different from cpu_resume code path - eg returning from the reset vector.
+
+All in all this means that the current API does not cope well with low-power
+states that preserve the CPU context when entered (ie retention states),
+since first of all the context is saved for nothing on state entry for
+those states and a successful state entry can return as a normal function
+return, which is considered an error by the current CPU suspend
+implementation.
+
+This patch refactors the cpu_suspend() API so that it can be split in
+two separate functionalities. The arm64 cpu_suspend API just provides
+a wrapper around CPU suspend operation hook. A new function is
+introduced (for architecture code use only) for states that require
+context saving upon entry:
+
+__cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+
+__cpu_suspend() saves the context on function entry and calls the
+so called suspend finisher (ie fn) to complete the suspend operation.
+The finisher is not expected to return, unless it fails in which case
+the error is propagated back to the __cpu_suspend caller.
+
+The API refactoring results in the following pseudo code call sequence for a
+suspending CPU, when triggered from a kernel subsystem:
+
+/*
+ * int cpu_suspend(unsigned long idx)
+ * @idx: idle state index
+ */
+{
+-> cpu_suspend(idx)
+       |---> CPU operations suspend hook called, if present
+               |--> if (retention_state)
+                       |--> direct suspend back-end call (eg PSCI suspend)
+                    else
+                       |--> __cpu_suspend(idx, &back_end_finisher);
+}
+
+By refactoring the cpu_suspend API this way, the CPU operations back-end
+has a chance to detect whether idle states require state saving or not
+and can call the required suspend operations accordingly either through
+simple function call or indirectly through __cpu_suspend() which carries out
+state saving and suspend finisher dispatching to complete idle state entry.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Hanjun Guo <hanjun.guo@linaro.org>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/suspend.h |    1 
+ arch/arm64/kernel/sleep.S        |   47 +++++++++++++++++++++++++++-----------
+ arch/arm64/kernel/suspend.c      |   48 +++++++++++++++++++++++----------------
+ 3 files changed, 64 insertions(+), 32 deletions(-)
+
+--- a/arch/arm64/include/asm/suspend.h
++++ b/arch/arm64/include/asm/suspend.h
+@@ -21,6 +21,7 @@ struct sleep_save_sp {
+       phys_addr_t save_ptr_stash_phys;
+ };
++extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
+ extern void cpu_resume(void);
+ extern int cpu_suspend(unsigned long);
+--- a/arch/arm64/kernel/sleep.S
++++ b/arch/arm64/kernel/sleep.S
+@@ -49,28 +49,39 @@
+       orr     \dst, \dst, \mask               // dst|=(aff3>>rs3)
+       .endm
+ /*
+- * Save CPU state for a suspend.  This saves callee registers, and allocates
+- * space on the kernel stack to save the CPU specific registers + some
+- * other data for resume.
++ * Save CPU state for a suspend and execute the suspend finisher.
++ * On success it will return 0 through cpu_resume - ie through a CPU
++ * soft/hard reboot from the reset vector.
++ * On failure it returns the suspend finisher return value or force
++ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
++ * is not allowed to return, if it does this must be considered failure).
++ * It saves callee registers, and allocates space on the kernel stack
++ * to save the CPU specific registers + some other data for resume.
+  *
+  *  x0 = suspend finisher argument
++ *  x1 = suspend finisher function pointer
+  */
+-ENTRY(__cpu_suspend)
++ENTRY(__cpu_suspend_enter)
+       stp     x29, lr, [sp, #-96]!
+       stp     x19, x20, [sp,#16]
+       stp     x21, x22, [sp,#32]
+       stp     x23, x24, [sp,#48]
+       stp     x25, x26, [sp,#64]
+       stp     x27, x28, [sp,#80]
++      /*
++       * Stash suspend finisher and its argument in x20 and x19
++       */
++      mov     x19, x0
++      mov     x20, x1
+       mov     x2, sp
+       sub     sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
+-      mov     x1, sp
++      mov     x0, sp
+       /*
+-       * x1 now points to struct cpu_suspend_ctx allocated on the stack
++       * x0 now points to struct cpu_suspend_ctx allocated on the stack
+        */
+-      str     x2, [x1, #CPU_CTX_SP]
+-      ldr     x2, =sleep_save_sp
+-      ldr     x2, [x2, #SLEEP_SAVE_SP_VIRT]
++      str     x2, [x0, #CPU_CTX_SP]
++      ldr     x1, =sleep_save_sp
++      ldr     x1, [x1, #SLEEP_SAVE_SP_VIRT]
+ #ifdef CONFIG_SMP
+       mrs     x7, mpidr_el1
+       ldr     x9, =mpidr_hash
+@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
+       ldp     w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+       ldp     w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+       compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+-      add     x2, x2, x8, lsl #3
++      add     x1, x1, x8, lsl #3
+ #endif
+-      bl      __cpu_suspend_finisher
++      bl      __cpu_suspend_save
++      /*
++       * Grab suspend finisher in x20 and its argument in x19
++       */
++      mov     x0, x19
++      mov     x1, x20
++      /*
++       * We are ready for power down, fire off the suspend finisher
++       * in x1, with argument in x0
++       */
++      blr     x1
+         /*
+-       * Never gets here, unless suspend fails.
++       * Never gets here, unless suspend finisher fails.
+        * Successful cpu_suspend should return from cpu_resume, returning
+        * through this code path is considered an error
+        * If the return value is set to 0 force x0 = -EOPNOTSUPP
+@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
+       ldp     x27, x28, [sp, #80]
+       ldp     x29, lr, [sp], #96
+       ret
+-ENDPROC(__cpu_suspend)
++ENDPROC(__cpu_suspend_enter)
+       .ltorg
+ /*
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -9,22 +9,19 @@
+ #include <asm/suspend.h>
+ #include <asm/tlbflush.h>
+-extern int __cpu_suspend(unsigned long);
++extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
+ /*
+- * This is called by __cpu_suspend() to save the state, and do whatever
++ * This is called by __cpu_suspend_enter() to save the state, and do whatever
+  * flushing is required to ensure that when the CPU goes to sleep we have
+  * the necessary data available when the caches are not searched.
+  *
+- * @arg: Argument to pass to suspend operations
+- * @ptr: CPU context virtual address
+- * @save_ptr: address of the location where the context physical address
+- *            must be saved
++ * ptr: CPU context virtual address
++ * save_ptr: address of the location where the context physical address
++ *           must be saved
+  */
+-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
+-                         phys_addr_t *save_ptr)
++void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
++                              phys_addr_t *save_ptr)
+ {
+-      int cpu = smp_processor_id();
+-
+       *save_ptr = virt_to_phys(ptr);
+       cpu_do_suspend(ptr);
+@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long
+        */
+       __flush_dcache_area(ptr, sizeof(*ptr));
+       __flush_dcache_area(save_ptr, sizeof(*save_ptr));
+-
+-      return cpu_ops[cpu]->cpu_suspend(arg);
+ }
+ /*
+@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer
+ }
+ /**
+- * cpu_suspend
++ * cpu_suspend() - function to enter a low-power state
++ * @arg: argument to pass to CPU suspend operations
+  *
+- * @arg: argument to pass to the finisher function
++ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
++ * operations back-end error code otherwise.
+  */
+ int cpu_suspend(unsigned long arg)
+ {
+-      struct mm_struct *mm = current->active_mm;
+-      int ret, cpu = smp_processor_id();
+-      unsigned long flags;
++      int cpu = smp_processor_id();
+       /*
+        * If cpu_ops have not been registered or suspend
+@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg)
+        */
+       if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+               return -EOPNOTSUPP;
++      return cpu_ops[cpu]->cpu_suspend(arg);
++}
++
++/*
++ * __cpu_suspend
++ *
++ * arg: argument to pass to the finisher function
++ * fn: finisher function pointer
++ *
++ */
++int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
++{
++      struct mm_struct *mm = current->active_mm;
++      int ret;
++      unsigned long flags;
+       /*
+        * From this point debug exceptions are disabled to prevent
+@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg)
+        * page tables, so that the thread address space is properly
+        * set-up on function return.
+        */
+-      ret = __cpu_suspend(arg);
++      ret = __cpu_suspend_enter(arg, fn);
+       if (ret == 0) {
+               cpu_switch_mm(mm->pgd, mm);
+               flush_tlb_all();
+@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg)
+                * Restore per-cpu offset before any kernel
+                * subsystem relying on it has a chance to run.
+                */
+-              set_my_cpu_offset(per_cpu_offset(cpu));
++              set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+               /*
+                * Restore HW breakpoint registers to sane values
diff --git a/queue-3.14/arm64-move-cpu_resume-into-the-text-section.patch b/queue-3.14/arm64-move-cpu_resume-into-the-text-section.patch
new file mode 100644 (file)
index 0000000..b405848
--- /dev/null
@@ -0,0 +1,110 @@
+From c3684fbb446501b48dec6677a6a9f61c215053de Mon Sep 17 00:00:00 2001
+From: Laura Abbott <lauraa@codeaurora.org>
+Date: Fri, 21 Nov 2014 21:50:40 +0000
+Subject: arm64: Move cpu_resume into the text section
+
+From: Laura Abbott <lauraa@codeaurora.org>
+
+commit c3684fbb446501b48dec6677a6a9f61c215053de upstream.
+
+The function cpu_resume currently lives in the .data section.
+There's no reason for it to be there since we can use relative
+instructions without a problem. Move a few cpu_resume data
+structures out of the assembly file so the .data annotation
+can be dropped completely and cpu_resume ends up in the read
+only text section.
+
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Tested-by: Kees Cook <keescook@chromium.org>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/sleep.S   |   36 ++++++------------------------------
+ arch/arm64/kernel/suspend.c |    4 ++--
+ 2 files changed, 8 insertions(+), 32 deletions(-)
+
+--- a/arch/arm64/kernel/sleep.S
++++ b/arch/arm64/kernel/sleep.S
+@@ -147,14 +147,12 @@ cpu_resume_after_mmu:
+       ret
+ ENDPROC(cpu_resume_after_mmu)
+-      .data
+ ENTRY(cpu_resume)
+       bl      el2_setup               // if in EL2 drop to EL1 cleanly
+ #ifdef CONFIG_SMP
+       mrs     x1, mpidr_el1
+-      adr     x4, mpidr_hash_ptr
+-      ldr     x5, [x4]
+-      add     x8, x4, x5              // x8 = struct mpidr_hash phys address
++      adrp    x8, mpidr_hash
++      add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
+         /* retrieve mpidr_hash members to compute the hash */
+       ldr     x2, [x8, #MPIDR_HASH_MASK]
+       ldp     w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+@@ -164,14 +162,15 @@ ENTRY(cpu_resume)
+ #else
+       mov     x7, xzr
+ #endif
+-      adr     x0, sleep_save_sp
++      adrp    x0, sleep_save_sp
++      add     x0, x0, #:lo12:sleep_save_sp
+       ldr     x0, [x0, #SLEEP_SAVE_SP_PHYS]
+       ldr     x0, [x0, x7, lsl #3]
+       /* load sp from context */
+       ldr     x2, [x0, #CPU_CTX_SP]
+-      adr     x1, sleep_idmap_phys
++      adrp    x1, sleep_idmap_phys
+       /* load physical address of identity map page table in x1 */
+-      ldr     x1, [x1]
++      ldr     x1, [x1, #:lo12:sleep_idmap_phys]
+       mov     sp, x2
+       /*
+        * cpu_do_resume expects x0 to contain context physical address
+@@ -180,26 +179,3 @@ ENTRY(cpu_resume)
+       bl      cpu_do_resume           // PC relative jump, MMU off
+       b       cpu_resume_mmu          // Resume MMU, never returns
+ ENDPROC(cpu_resume)
+-
+-      .align 3
+-mpidr_hash_ptr:
+-      /*
+-       * offset of mpidr_hash symbol from current location
+-       * used to obtain run-time mpidr_hash address with MMU off
+-         */
+-      .quad   mpidr_hash - .
+-/*
+- * physical address of identity mapped page tables
+- */
+-      .type   sleep_idmap_phys, #object
+-ENTRY(sleep_idmap_phys)
+-      .quad   0
+-/*
+- * struct sleep_save_sp {
+- *    phys_addr_t *save_ptr_stash;
+- *    phys_addr_t save_ptr_stash_phys;
+- * };
+- */
+-      .type   sleep_save_sp, #object
+-ENTRY(sleep_save_sp)
+-      .space  SLEEP_SAVE_SP_SZ        // struct sleep_save_sp
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -126,8 +126,8 @@ int __cpu_suspend(unsigned long arg, int
+       return ret;
+ }
+-extern struct sleep_save_sp sleep_save_sp;
+-extern phys_addr_t sleep_idmap_phys;
++struct sleep_save_sp sleep_save_sp;
++phys_addr_t sleep_idmap_phys;
+ static int __init cpu_suspend_init(void)
+ {
diff --git a/queue-3.14/btrfs-don-t-delay-inode-ref-updates-during-log-replay.patch b/queue-3.14/btrfs-don-t-delay-inode-ref-updates-during-log-replay.patch
new file mode 100644 (file)
index 0000000..90216b7
--- /dev/null
@@ -0,0 +1,50 @@
+From 6f8960541b1eb6054a642da48daae2320fddba93 Mon Sep 17 00:00:00 2001
+From: Chris Mason <clm@fb.com>
+Date: Wed, 31 Dec 2014 12:18:29 -0500
+Subject: Btrfs: don't delay inode ref updates during log replay
+
+From: Chris Mason <clm@fb.com>
+
+commit 6f8960541b1eb6054a642da48daae2320fddba93 upstream.
+
+Commit 1d52c78afbb (Btrfs: try not to ENOSPC on log replay) added a
+check to skip delayed inode updates during log replay because it
+confuses the enospc code.  But the delayed processing will end up
+ignoring delayed refs from log replay because the inode itself wasn't
+put through the delayed code.
+
+This can end up triggering a warning at commit time:
+
+WARNING: CPU: 2 PID: 778 at fs/btrfs/delayed-inode.c:1410 btrfs_assert_delayed_root_empty+0x32/0x34()
+
+Which is repeated for each commit because we never process the delayed
+inode ref update.
+
+The fix used here is to change btrfs_delayed_delete_inode_ref to return
+an error if we're currently in log replay.  The caller will do the ref
+deletion immediately and everything will work properly.
+
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/delayed-inode.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1854,6 +1854,14 @@ int btrfs_delayed_delete_inode_ref(struc
+ {
+       struct btrfs_delayed_node *delayed_node;
++      /*
++       * we don't do delayed inode updates during log recovery because it
++       * leads to enospc problems.  This means we also can't do
++       * delayed inode refs
++       */
++      if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
++              return -EAGAIN;
++
+       delayed_node = btrfs_get_or_create_delayed_node(inode);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
diff --git a/queue-3.14/perf-fix-events-installation-during-moving-group.patch b/queue-3.14/perf-fix-events-installation-during-moving-group.patch
new file mode 100644 (file)
index 0000000..a6aba52
--- /dev/null
@@ -0,0 +1,76 @@
+From 9fc81d87420d0d3fd62d5e5529972c0ad9eab9cc Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 10 Dec 2014 21:23:51 +0100
+Subject: perf: Fix events installation during moving group
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 9fc81d87420d0d3fd62d5e5529972c0ad9eab9cc upstream.
+
+We allow PMU driver to change the cpu on which the event
+should be installed to. This happened in patch:
+
+  e2d37cd213dc ("perf: Allow the PMU driver to choose the CPU on which to install events")
+
+This patch also forces all the group members to follow
+the currently opened events cpu if the group happened
+to be moved.
+
+This and the change of event->cpu in perf_install_in_context()
+function introduced in:
+
+  0cda4c023132 ("perf: Introduce perf_pmu_migrate_context()")
+
+forces group members to change their event->cpu,
+if the currently-opened-event's PMU changed the cpu
+and there is a group move.
+
+Above behaviour causes problem for breakpoint events,
+which uses event->cpu to touch cpu specific data for
+breakpoints accounting. By changing event->cpu, some
+breakpoints slots were wrongly accounted for given
+cpu.
+
+Vinces's perf fuzzer hit this issue and caused following
+WARN on my setup:
+
+   WARNING: CPU: 0 PID: 20214 at arch/x86/kernel/hw_breakpoint.c:119 arch_install_hw_breakpoint+0x142/0x150()
+   Can't find any breakpoint slot
+   [...]
+
+This patch changes the group moving code to keep the event's
+original cpu.
+
+Reported-by: Vince Weaver <vince@deater.net>
+Signed-off-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Vince Weaver <vince@deater.net>
+Cc: Yan, Zheng <zheng.z.yan@intel.com>
+Link: http://lkml.kernel.org/r/1418243031-20367-3-git-send-email-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7240,11 +7240,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       if (move_group) {
+               synchronize_rcu();
+-              perf_install_in_context(ctx, group_leader, event->cpu);
++              perf_install_in_context(ctx, group_leader, group_leader->cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+-                      perf_install_in_context(ctx, sibling, event->cpu);
++                      perf_install_in_context(ctx, sibling, sibling->cpu);
+                       get_ctx(ctx);
+               }
+       }
diff --git a/queue-3.14/perf-session-do-not-fail-on-processing-out-of-order-event.patch b/queue-3.14/perf-session-do-not-fail-on-processing-out-of-order-event.patch
new file mode 100644 (file)
index 0000000..6892383
--- /dev/null
@@ -0,0 +1,86 @@
+From f61ff6c06dc8f32c7036013ad802c899ec590607 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 26 Nov 2014 16:39:31 +0100
+Subject: perf session: Do not fail on processing out of order event
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit f61ff6c06dc8f32c7036013ad802c899ec590607 upstream.
+
+Linus reported perf report command being interrupted due to processing
+of 'out of order' event, with following error:
+
+  Timestamp below last timeslice flush
+  0x5733a8 [0x28]: failed to process type: 3
+
+I could reproduce the issue and in my case it was caused by one CPU
+(mmap) being behind during record and userspace mmap reader seeing the
+data after other CPUs data were already stored.
+
+This is expected under some circumstances because we need to limit the
+number of events that we queue for reordering when we receive a
+PERF_RECORD_FINISHED_ROUND or when we force flush due to memory
+pressure.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt.fleming@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Stephane Eranian <eranian@google.com>
+Link: http://lkml.kernel.org/r/1417016371-30249-1-git-send-email-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+[zhangzhiqiang: backport to 3.10:
+ - adjust context
+ - commit f61ff6c06d struct events_stats was defined in tools/perf/util/event.h
+   while 3.10 stable defined in tools/perf/util/hist.h.
+ - 3.10 stable there is no pr_oe_time() which used for debug.
+ - After the above adjustments, becomes same to the original patch:
+   https://github.com/torvalds/linux/commit/f61ff6c06dc8f32c7036013ad802c899ec590607
+]
+Signed-off-by: Zhiqiang Zhang <zhangzhiqiang.zhang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/hist.h    |    1 +
+ tools/perf/util/session.c |    5 +++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -36,6 +36,7 @@ struct events_stats {
+       u32 nr_invalid_chains;
+       u32 nr_unknown_id;
+       u32 nr_unprocessable_samples;
++      u32 nr_unordered_events;
+ };
+ enum hist_column {
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -638,8 +638,7 @@ int perf_session_queue_event(struct perf
+               return -ETIME;
+       if (timestamp < s->ordered_samples.last_flush) {
+-              printf("Warning: Timestamp below last timeslice flush\n");
+-              return -EINVAL;
++              s->stats.nr_unordered_events++;
+       }
+       if (!list_empty(sc)) {
+@@ -1135,6 +1134,8 @@ static void perf_session__warn_about_err
+                           "Do you have a KVM guest running and not using 'perf kvm'?\n",
+                           session->stats.nr_unprocessable_samples);
+       }
++      if (session->stats.nr_unordered_events != 0)
++              ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
+ }
+ volatile int session_done;
diff --git a/queue-3.14/perf-x86-intel-uncore-make-sure-only-uncore-events-are-collected.patch b/queue-3.14/perf-x86-intel-uncore-make-sure-only-uncore-events-are-collected.patch
new file mode 100644 (file)
index 0000000..0f09286
--- /dev/null
@@ -0,0 +1,89 @@
+From af91568e762d04931dcbdd6bef4655433d8b9418 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 10 Dec 2014 21:23:50 +0100
+Subject: perf/x86/intel/uncore: Make sure only uncore events are collected
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit af91568e762d04931dcbdd6bef4655433d8b9418 upstream.
+
+The uncore_collect_events functions assumes that event group
+might contain only uncore events which is wrong, because it
+might contain any type of events.
+
+This bug leads to uncore framework touching 'not' uncore events,
+which could end up all sorts of bugs.
+
+One was triggered by Vince's perf fuzzer, when the uncore code
+touched breakpoint event private event space as if it was uncore
+event and caused BUG:
+
+   BUG: unable to handle kernel paging request at ffffffff82822068
+   IP: [<ffffffff81020338>] uncore_assign_events+0x188/0x250
+   ...
+
+The code in uncore_assign_events() function was looking for
+event->hw.idx data while the event was initialized as a
+breakpoint with different members in event->hw union.
+
+This patch forces uncore_collect_events() to collect only uncore
+events.
+
+Reported-by: Vince Weaver <vince@deater.net>
+Signed-off-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Yan, Zheng <zheng.z.yan@intel.com>
+Link: http://lkml.kernel.org/r/1418243031-20367-2-git-send-email-jolsa@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel_uncore.c |   22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -2886,6 +2886,17 @@ static struct intel_uncore_box *uncore_e
+       return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+ }
++/*
++ * Using uncore_pmu_event_init pmu event_init callback
++ * as a detection point for uncore events.
++ */
++static int uncore_pmu_event_init(struct perf_event *event);
++
++static bool is_uncore_event(struct perf_event *event)
++{
++      return event->pmu->event_init == uncore_pmu_event_init;
++}
++
+ static int
+ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
+ {
+@@ -2900,13 +2911,18 @@ uncore_collect_events(struct intel_uncor
+               return -EINVAL;
+       n = box->n_events;
+-      box->event_list[n] = leader;
+-      n++;
++
++      if (is_uncore_event(leader)) {
++              box->event_list[n] = leader;
++              n++;
++      }
++
+       if (!dogrp)
+               return n;
+       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+-              if (event->state <= PERF_EVENT_STATE_OFF)
++              if (!is_uncore_event(event) ||
++                  event->state <= PERF_EVENT_STATE_OFF)
+                       continue;
+               if (n >= max_count)
index 4c62f5cfa6cfa57f8760cd5bef52534d5e6a39f6..3cb6366cb921d46da24627ef9385d3f38be75b88 100644 (file)
@@ -61,3 +61,12 @@ arm-dts-enable-pwm-node-by-default-for-s3c64xx.patch
 arm-omap4-pm-only-do-static-dependency-configuration-in-omap4_init_static_deps.patch
 revert-arm-7830-1-delay-don-t-bother-reporting-bogomips-in-proc-cpuinfo.patch
 arm-mvebu-disable-i-o-coherency-on-non-smp-situations-on-armada-370-375-38x-xp.patch
+acpi-pm-fix-pm-initialization-for-devices-that-are-not-present.patch
+arm64-kernel-add-missing-__init-section-marker-to-cpu_suspend_init.patch
+arm64-kernel-refactor-the-cpu-suspend-api-for-retention-states.patch
+arm64-move-cpu_resume-into-the-text-section.patch
+arm64-kernel-fix-__cpu_suspend-mm-switch-on-warm-boot.patch
+btrfs-don-t-delay-inode-ref-updates-during-log-replay.patch
+perf-x86-intel-uncore-make-sure-only-uncore-events-are-collected.patch
+perf-fix-events-installation-during-moving-group.patch
+perf-session-do-not-fail-on-processing-out-of-order-event.patch