--- /dev/null
+From ad1cfdf518976447e6b0d31517bad4e3ebbce6bb Mon Sep 17 00:00:00 2001
+From: Caesar Wang <wxt@rock-chips.com>
+Date: Wed, 18 May 2016 22:41:50 +0800
+Subject: arm64: dts: rockchip: fixes the gic400 2nd region size for rk3368
+
+From: Caesar Wang <wxt@rock-chips.com>
+
+commit ad1cfdf518976447e6b0d31517bad4e3ebbce6bb upstream.
+
+The 2nd additional region is the GIC virtual cpu interface register
+base and size.
+
+As the gic400 of rk3368 says, the cpu interface register map as below
+
+:
+
+-0x0000 GICC_CTRL
+.
+.
+.
+-0x00fc GICC_IIDR
+-0x1000 GICC_IDR
+
+Obviously, the region size should be greater than 0x1000.
+So we should make sure to include the GICC_IDR since the kernel will access
+it in some cases.
+
+Fixes: b790c2cab5ca ("arm64: dts: add Rockchip rk3368 core dtsi and board dts for the r88 board")
+Signed-off-by: Caesar Wang <wxt@rock-chips.com>
+Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+[added Fixes and stable-cc]
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3368.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -670,7 +670,7 @@
+ #address-cells = <0>;
+
+ reg = <0x0 0xffb71000 0x0 0x1000>,
+- <0x0 0xffb72000 0x0 0x1000>,
++ <0x0 0xffb72000 0x0 0x2000>,
+ <0x0 0xffb74000 0x0 0x2000>,
+ <0x0 0xffb76000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9
--- /dev/null
+From 0194e760f7d2f42adb5e1db31b27a4331dd89c2f Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 11 Aug 2016 14:11:05 +0100
+Subject: arm64: hibernate: avoid potential TLB conflict
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0194e760f7d2f42adb5e1db31b27a4331dd89c2f upstream.
+
+In create_safe_exec_page we install a set of global mappings in TTBR0,
+then subsequently invalidate TLBs. While TTBR0 points at the zero page,
+and the TLBs should be free of stale global entries, we may have stale
+ASID-tagged entries (e.g. from the EFI runtime services mappings) for
+the same VAs. Per the ARM ARM these ASID-tagged entries may conflict
+with newly-allocated global entries, and we must follow a
+Break-Before-Make approach to avoid issues resulting from this.
+
+This patch reworks create_safe_exec_page to invalidate TLBs while the
+zero page is still in place, ensuring that there are no potential
+conflicts when the new TTBR0 value is installed. As a single CPU is
+online while this code executes, we do not need to perform broadcast TLB
+maintenance, and can call local_flush_tlb_all(), which also subsumes
+some barriers. The remaining assembly is converted to use write_sysreg()
+and isb().
+
+Other than this, we safely manipulate TTBRs in the hibernate dance. The
+code we install as part of the new TTBR0 mapping (the hibernated
+kernel's swsusp_arch_suspend_exit) installs a zero page into TTBR1,
+invalidates TLBs, then installs its preferred value. Upon being restored
+to the middle of swsusp_arch_suspend, the new image will call
+__cpu_suspend_exit, which will call cpu_uninstall_idmap, installing the
+zero page in TTBR0 and invalidating all TLB entries.
+
+Fixes: 82869ac57b5d ("arm64: kernel: Add support for hibernate/suspend-to-disk")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: James Morse <james.morse@arm.com>
+Tested-by: James Morse <james.morse@arm.com>
+Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/hibernate.c | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -35,6 +35,7 @@
+ #include <asm/sections.h>
+ #include <asm/smp.h>
+ #include <asm/suspend.h>
++#include <asm/sysreg.h>
+ #include <asm/virt.h>
+
+ /*
+@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *s
+ set_pte(pte, __pte(virt_to_phys((void *)dst) |
+ pgprot_val(PAGE_KERNEL_EXEC)));
+
+- /* Load our new page tables */
+- asm volatile("msr ttbr0_el1, %0;"
+- "isb;"
+- "tlbi vmalle1is;"
+- "dsb ish;"
+- "isb" : : "r"(virt_to_phys(pgd)));
++ /*
++ * Load our new page tables. A strict BBM approach requires that we
++ * ensure that TLBs are free of any entries that may overlap with the
++ * global mappings we are about to install.
++ *
++ * For a real hibernate/resume cycle TTBR0 currently points to a zero
++ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
++ * runtime services), while for a userspace-driven test_resume cycle it
++ * points to userspace page tables (and we must point it at a zero page
++ * ourselves). Elsewhere we only (un)install the idmap with preemption
++ * disabled, so T0SZ should be as required regardless.
++ */
++ cpu_set_reserved_ttbr0();
++ local_flush_tlb_all();
++ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
++ isb();
+
+ *phys_dst_addr = virt_to_phys((void *)dst);
+
--- /dev/null
+From dfbca61af0b654990b9af8297ac574a9986d8275 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 11 Aug 2016 14:11:06 +0100
+Subject: arm64: hibernate: handle allocation failures
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit dfbca61af0b654990b9af8297ac574a9986d8275 upstream.
+
+In create_safe_exec_page(), we create a copy of the hibernate exit text,
+along with some page tables to map this via TTBR0. We then install the
+new tables in TTBR0.
+
+In swsusp_arch_resume() we call create_safe_exec_page() before trying a
+number of operations which may fail (e.g. copying the linear map page
+tables). If these fail, we bail out of swsusp_arch_resume() and return
+an error code, but leave TTBR0 as-is. Subsequently, the core hibernate
+code will call free_basic_memory_bitmaps(), which will free all of the
+memory allocations we made, including the page tables installed in
+TTBR0.
+
+Thus, we may have TTBR0 pointing at dangling freed memory for some
+period of time. If the hibernate attempt was triggered by a user
+requesting a hibernate test via the reboot syscall, we may return to
+userspace with the clobbered TTBR0 value.
+
+Avoid these issues by reorganising swsusp_arch_resume() such that we
+have no failure paths after create_safe_exec_page(). We also add a check
+that the zero page allocation succeeded, matching what we have for other
+allocations.
+
+Fixes: 82869ac57b5d ("arm64: kernel: Add support for hibernate/suspend-to-disk")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: James Morse <james.morse@arm.com>
+Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/hibernate.c | 59 ++++++++++++++++++++++--------------------
+ 1 file changed, 32 insertions(+), 27 deletions(-)
+
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -405,6 +405,38 @@ int swsusp_arch_resume(void)
+ void *, phys_addr_t, phys_addr_t);
+
+ /*
++ * Restoring the memory image will overwrite the ttbr1 page tables.
++ * Create a second copy of just the linear map, and use this when
++ * restoring.
++ */
++ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
++ if (!tmp_pg_dir) {
++ pr_err("Failed to allocate memory for temporary page tables.");
++ rc = -ENOMEM;
++ goto out;
++ }
++ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
++ if (rc)
++ goto out;
++
++ /*
++ * Since we only copied the linear map, we need to find restore_pblist's
++ * linear map address.
++ */
++ lm_restore_pblist = LMADDR(restore_pblist);
++
++ /*
++ * We need a zero page that is zero before & after resume in order to
++ * to break before make on the ttbr1 page tables.
++ */
++ zero_page = (void *)get_safe_page(GFP_ATOMIC);
++ if (!zero_page) {
++ pr_err("Failed to allocate zero page.");
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ /*
+ * Locate the exit code in the bottom-but-one page, so that *NULL
+ * still has disastrous affects.
+ */
+@@ -430,27 +462,6 @@ int swsusp_arch_resume(void)
+ __flush_dcache_area(hibernate_exit, exit_size);
+
+ /*
+- * Restoring the memory image will overwrite the ttbr1 page tables.
+- * Create a second copy of just the linear map, and use this when
+- * restoring.
+- */
+- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+- if (!tmp_pg_dir) {
+- pr_err("Failed to allocate memory for temporary page tables.");
+- rc = -ENOMEM;
+- goto out;
+- }
+- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+- if (rc)
+- goto out;
+-
+- /*
+- * Since we only copied the linear map, we need to find restore_pblist's
+- * linear map address.
+- */
+- lm_restore_pblist = LMADDR(restore_pblist);
+-
+- /*
+ * KASLR will cause the el2 vectors to be in a different location in
+ * the resumed kernel. Load hibernate's temporary copy into el2.
+ *
+@@ -464,12 +475,6 @@ int swsusp_arch_resume(void)
+ __hyp_set_vectors(el2_vectors);
+ }
+
+- /*
+- * We need a zero page that is zero before & after resume in order to
+- * to break before make on the ttbr1 page tables.
+- */
+- zero_page = (void *)get_safe_page(GFP_ATOMIC);
+-
+ hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+ resume_hdr.reenter_kernel, lm_restore_pblist,
+ resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
--- /dev/null
+From 89581f06b2bc225f0c9822fa52e714aa2e3810dd Mon Sep 17 00:00:00 2001
+From: Andrew Jones <drjones@redhat.com>
+Date: Fri, 22 Jul 2016 10:38:46 -0400
+Subject: arm64: KVM: Set cpsr before spsr on fault injection
+
+From: Andrew Jones <drjones@redhat.com>
+
+commit 89581f06b2bc225f0c9822fa52e714aa2e3810dd upstream.
+
+We need to set cpsr before determining the spsr bank, as the bank
+depends on the target exception level of the injection, not the
+current mode of the vcpu. Normally this is one in the same (EL1),
+but not when we manage to trap an EL0 fault. It still doesn't really
+matter for the 64-bit EL0 case though, as vcpu_spsr() unconditionally
+uses the EL1 bank for that. However the 32-bit EL0 case gets fun, as
+that path will lead to the BUG() in vcpu_spsr32().
+
+This patch fixes the assignment order and also modifies some white
+space in order to better group pairs of lines that have strict order.
+
+Signed-off-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/inject_fault.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -132,16 +132,14 @@ static u64 get_except_vector(struct kvm_
+ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
+ {
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+- bool is_aarch32;
++ bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
+ u32 esr = 0;
+
+- is_aarch32 = vcpu_mode_is_32bit(vcpu);
+-
+- *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+-
+ *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
++
+ *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++ *vcpu_spsr(vcpu) = cpsr;
+
+ vcpu_sys_reg(vcpu, FAR_EL1) = addr;
+
+@@ -172,11 +170,11 @@ static void inject_undef64(struct kvm_vc
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+
+- *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+-
+ *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
++
+ *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++ *vcpu_spsr(vcpu) = cpsr;
+
+ /*
+ * Build an unknown exception, depending on the instruction
--- /dev/null
+From 04a848106193b134741672f7e4e444b50c70b631 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 1 Aug 2016 13:29:31 +0200
+Subject: arm64: mm: avoid fdt_check_header() before the FDT is fully mapped
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 04a848106193b134741672f7e4e444b50c70b631 upstream.
+
+As reported by Zijun, the fdt_check_header() call in __fixmap_remap_fdt()
+is not safe since it is not guaranteed that the FDT header is mapped
+completely. Due to the minimum alignment of 8 bytes, the only fields we
+can assume to be mapped are 'magic' and 'totalsize'.
+
+Since the OF layer is in charge of validating the FDT image, and we are
+only interested in making reasonably sure that the size field contains
+a meaningful value, replace the fdt_check_header() call with an explicit
+comparison of the magic field's value against the expected value.
+
+Reported-by: Zijun Hu <zijun_hu@htc.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -748,9 +748,9 @@ void *__init __fixmap_remap_fdt(phys_add
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+- * at least 8 bytes so that we can always access the size field of the
+- * FDT header after mapping the first chunk, double check here if that
+- * is indeed the case.
++ * at least 8 bytes so that we can always access the magic and size
++ * fields of the FDT header after mapping the first chunk, double check
++ * here if that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+@@ -778,7 +778,7 @@ void *__init __fixmap_remap_fdt(phys_add
+ create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+ dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+
+- if (fdt_check_header(dt_virt) != 0)
++ if (fdt_magic(dt_virt) != FDT_MAGIC)
+ return NULL;
+
+ *size = fdt_totalsize(dt_virt);
--- /dev/null
+From d6732fc402c2665f61e72faf206a0268e65236e9 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 28 Jul 2016 16:15:14 +0200
+Subject: arm64: vmlinux.lds: make __rela_offset and __dynsym_offset ABSOLUTE
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit d6732fc402c2665f61e72faf206a0268e65236e9 upstream.
+
+Due to the untyped KIMAGE_VADDR constant, the linker may not notice
+that the __rela_offset and __dynsym_offset expressions are absolute
+values (i.e., are not subject to relocation). This does not matter for
+KASLR, but it does confuse kallsyms in relative mode, since it uses
+the lowest non-absolute symbol address as the anchor point, and expects
+all other symbol addresses to be within 4 GB of it.
+
+Fix this by qualifying these expressions as ABSOLUTE() explicitly.
+
+Fixes: 0cd3defe0af4 ("arm64: kernel: perform relocation processing from ID map")
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/vmlinux.lds.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -181,9 +181,9 @@ SECTIONS
+ *(.hash)
+ }
+
+- __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
++ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
+ __rela_size = SIZEOF(.rela);
+- __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
++ __dynsym_offset = ABSOLUTE(ADDR(.dynsym) - KIMAGE_VADDR);
+
+ . = ALIGN(SEGMENT_ALIGN);
+ __init_end = .;
--- /dev/null
+From 3dbd3212f81b2b410a34a922055e2da792864829 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 6 Jul 2016 12:50:12 +0300
+Subject: gpio: intel-mid: Remove potentially harmful code
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 3dbd3212f81b2b410a34a922055e2da792864829 upstream.
+
+The commit d56d6b3d7d69 ("gpio: langwell: add Intel Merrifield support")
+doesn't look at all as a proper support for Intel Merrifield and I dare to say
+that it distorts the behaviour of the hardware.
+
+The register map is different on Intel Merrifield, i.e. only 6 out of 8
+register have the same purpose but none of them has same location in the
+address space. The current case potentially harmful to existing hardware since
+it's poking registers on wrong offsets and may set some pin to be GPIO output
+when connected hardware doesn't expect such.
+
+Besides the above GPIO and pinctrl on Intel Merrifield have been located in
+different IP blocks. The functionality has been extended as well, i.e. added
+support of level interrupts, special registers for wake capable sources and
+thus, in my opinion, requires a completele separate driver.
+
+If someone wondering the existing gpio-intel-mid.c would be converted to actual
+pinctrl (which by the fact it is now), though I wouldn't be a volunteer to do
+that.
+
+Fixes: d56d6b3d7d69 ("gpio: langwell: add Intel Merrifield support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-intel-mid.c | 19 -------------------
+ 1 file changed, 19 deletions(-)
+
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -17,7 +17,6 @@
+ * Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
+ * Clovertrail platform Cloverview chip.
+- * Merrifield platform Tangier chip.
+ */
+
+ #include <linux/module.h>
+@@ -64,10 +63,6 @@ enum GPIO_REG {
+ /* intel_mid gpio driver data */
+ struct intel_mid_gpio_ddata {
+ u16 ngpio; /* number of gpio pins */
+- u32 gplr_offset; /* offset of first GPLR register from base */
+- u32 flis_base; /* base address of FLIS registers */
+- u32 flis_len; /* length of FLIS registers */
+- u32 (*get_flis_offset)(int gpio);
+ u32 chip_irq_type; /* chip interrupt type */
+ };
+
+@@ -252,15 +247,6 @@ static const struct intel_mid_gpio_ddata
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+ };
+
+-static const struct intel_mid_gpio_ddata gpio_tangier = {
+- .ngpio = 192,
+- .gplr_offset = 4,
+- .flis_base = 0xff0c0000,
+- .flis_len = 0x8000,
+- .get_flis_offset = NULL,
+- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+-};
+-
+ static const struct pci_device_id intel_gpio_ids[] = {
+ {
+ /* Lincroft */
+@@ -287,11 +273,6 @@ static const struct pci_device_id intel_
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
+ },
+- {
+- /* Tangier */
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+- .driver_data = (kernel_ulong_t)&gpio_tangier,
+- },
+ { 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
--- /dev/null
+From dd3b204af11b50be6dc77e18b88b3c646bba354c Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Sun, 19 Jun 2016 23:49:57 +0300
+Subject: gpio: intel-mid: switch to devm_gpiochip_add_data()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit dd3b204af11b50be6dc77e18b88b3c646bba354c upstream.
+
+The error handling is not correct since the commit 3f7dbfd8eea9 ("gpio:
+intel-mid: switch to using gpiolib irqchip helpers"). Switch to devres API to
+fix the potential resource leak.
+
+Fixes: commit 3f7dbfd8eea9 ("gpio: intel-mid: switch to using gpiolib irqchip helpers")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-intel-mid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -382,7 +382,7 @@ static int intel_gpio_probe(struct pci_d
+ spin_lock_init(&priv->lock);
+
+ pci_set_drvdata(pdev, priv);
+- retval = gpiochip_add_data(&priv->chip, priv);
++ retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
+ if (retval) {
+ dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
+ return retval;
--- /dev/null
+From a246b8198f776a16d1d3a3bbfc2d437bad766b29 Mon Sep 17 00:00:00 2001
+From: Vignesh R <vigneshr@ti.com>
+Date: Thu, 9 Jun 2016 11:02:04 +0530
+Subject: gpio: pca953x: Fix NBANK calculation for PCA9536
+
+From: Vignesh R <vigneshr@ti.com>
+
+commit a246b8198f776a16d1d3a3bbfc2d437bad766b29 upstream.
+
+NBANK() macro assumes that ngpios is a multiple of 8(BANK_SZ) and
+hence results in 0 banks for PCA9536 which has just 4 gpios. This is
+wrong as PCA9356 has 1 bank with 4 gpios. This results in uninitialized
+PCA953X_INVERT register. Fix this by using DIV_ROUND_UP macro in
+NBANK().
+
+Signed-off-by: Vignesh R <vigneshr@ti.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-pca953x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -90,7 +90,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_i
+ #define MAX_BANK 5
+ #define BANK_SZ 8
+
+-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+
+ struct pca953x_chip {
+ unsigned gpio_start;
--- /dev/null
+From 5fc8f707a2aa40c767c3a338738b9b6fcd151ac1 Mon Sep 17 00:00:00 2001
+From: Jan Kiszka <jan.kiszka@siemens.com>
+Date: Fri, 8 Jul 2016 20:42:04 +0200
+Subject: intel_pstate: Fix MSR_CONFIG_TDP_x addressing in core_get_max_pstate()
+
+From: Jan Kiszka <jan.kiszka@siemens.com>
+
+commit 5fc8f707a2aa40c767c3a338738b9b6fcd151ac1 upstream.
+
+If MSR_CONFIG_TDP_CONTROL is locked, we currently try to address some
+MSR 0x80000648 or so. Mask out the relevant level bits 0 and 1.
+
+Found while running over the Jailhouse hypervisor which became upset
+about this strange MSR index.
+
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -944,7 +944,7 @@ static int core_get_max_pstate(void)
+ if (err)
+ goto skip_tar;
+
+- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
++ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
+ err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+ if (err)
+ goto skip_tar;
--- /dev/null
+From 30b072ce0356e8b141f4ca6da7220486fa3641d9 Mon Sep 17 00:00:00 2001
+From: Alexis Dambricourt <alexis.dambricourt@gmail.com>
+Date: Mon, 4 Jul 2016 21:05:15 +0200
+Subject: KVM: MTRR: fix kvm_mtrr_check_gfn_range_consistency page fault
+
+From: Alexis Dambricourt <alexis.dambricourt@gmail.com>
+
+commit 30b072ce0356e8b141f4ca6da7220486fa3641d9 upstream.
+
+The following #PF may occurs:
+[ 1403.317041] BUG: unable to handle kernel paging request at 0000000200000068
+[ 1403.317045] IP: [<ffffffffc04c20b0>] __mtrr_lookup_var_next+0x10/0xa0 [kvm]
+
+[ 1403.317123] Call Trace:
+[ 1403.317134] [<ffffffffc04c2a65>] ? kvm_mtrr_check_gfn_range_consistency+0xc5/0x120 [kvm]
+[ 1403.317143] [<ffffffffc04ac11f>] ? tdp_page_fault+0x9f/0x2c0 [kvm]
+[ 1403.317152] [<ffffffffc0498128>] ? kvm_set_msr_common+0x858/0xc00 [kvm]
+[ 1403.317161] [<ffffffffc04b8883>] ? x86_emulate_insn+0x273/0xd30 [kvm]
+[ 1403.317171] [<ffffffffc04c04e4>] ? kvm_cpuid+0x34/0x190 [kvm]
+[ 1403.317180] [<ffffffffc04a5bb9>] ? kvm_mmu_page_fault+0x59/0xe0 [kvm]
+[ 1403.317183] [<ffffffffc0d729e1>] ? vmx_handle_exit+0x1d1/0x14a0 [kvm_intel]
+[ 1403.317185] [<ffffffffc0d75f3f>] ? atomic_switch_perf_msrs+0x6f/0xa0 [kvm_intel]
+[ 1403.317187] [<ffffffffc0d7621d>] ? vmx_vcpu_run+0x2ad/0x420 [kvm_intel]
+[ 1403.317196] [<ffffffffc04a0962>] ? kvm_arch_vcpu_ioctl_run+0x622/0x1550 [kvm]
+[ 1403.317204] [<ffffffffc049abb9>] ? kvm_arch_vcpu_load+0x59/0x210 [kvm]
+[ 1403.317206] [<ffffffff81036245>] ? __kernel_fpu_end+0x35/0x100
+[ 1403.317213] [<ffffffffc0487eb6>] ? kvm_vcpu_ioctl+0x316/0x5d0 [kvm]
+[ 1403.317215] [<ffffffff81088225>] ? do_sigtimedwait+0xd5/0x220
+[ 1403.317217] [<ffffffff811f84dd>] ? do_vfs_ioctl+0x9d/0x5c0
+[ 1403.317224] [<ffffffffc04928ae>] ? kvm_on_user_return+0x3e/0x70 [kvm]
+[ 1403.317225] [<ffffffff811f8a74>] ? SyS_ioctl+0x74/0x80
+[ 1403.317227] [<ffffffff815bf0b6>] ? entry_SYSCALL_64_fastpath+0x1e/0xa8
+[ 1403.317242] RIP [<ffffffffc04c20b0>] __mtrr_lookup_var_next+0x10/0xa0 [kvm]
+
+At mtrr_lookup_fixed_next(), when the condition
+'if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))' becomes true,
+mtrr_lookup_var_start() is called with iter->range with gargabe values from the
+fixed MTRR union field. Then, list_prepare_entry() do not call list_entry()
+initialization, keeping a garbage pointer in iter->range which is accessed in
+the following __mtrr_lookup_var_next() call.
+
+Fixes: f571c0973e4b8c888e049b6842e4b4f93b5c609c
+Signed-off-by: Alexis Dambricourt <alexis@blade-group.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mtrr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct
+
+ iter->fixed = false;
+ iter->start_max = iter->start;
++ iter->range = NULL;
+ iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
+
+ __mtrr_lookup_var_next(iter);
--- /dev/null
+From 2f1fe81123f59271bddda673b60116bde9660385 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Fri, 8 Jul 2016 15:36:06 -0700
+Subject: KVM: nVMX: Fix memory corruption when using VMCS shadowing
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 2f1fe81123f59271bddda673b60116bde9660385 upstream.
+
+When freeing the nested resources of a vcpu, there is an assumption that
+the vcpu's vmcs01 is the current VMCS on the CPU that executes
+nested_release_vmcs12(). If this assumption is violated, the vcpu's
+vmcs01 may be made active on multiple CPUs at the same time, in
+violation of Intel's specification. Moreover, since the vcpu's vmcs01 is
+not VMCLEARed on every CPU on which it is active, it can linger in a
+CPU's VMCS cache after it has been freed and potentially
+repurposed. Subsequent eviction from the CPU's VMCS cache on a capacity
+miss can result in memory corruption.
+
+It is not sufficient for vmx_free_vcpu() to call vmx_load_vmcs01(). If
+the vcpu in question was last loaded on a different CPU, it must be
+migrated to the current CPU before calling vmx_load_vmcs01().
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 19 +++++++++++++++++--
+ virt/kvm/kvm_main.c | 2 ++
+ 2 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8855,6 +8855,22 @@ static void vmx_load_vmcs01(struct kvm_v
+ put_cpu();
+ }
+
++/*
++ * Ensure that the current vmcs of the logical processor is the
++ * vmcs01 of the vcpu before calling free_nested().
++ */
++static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
++{
++ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ int r;
++
++ r = vcpu_load(vcpu);
++ BUG_ON(r);
++ vmx_load_vmcs01(vcpu);
++ free_nested(vmx);
++ vcpu_put(vcpu);
++}
++
+ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -8863,8 +8879,7 @@ static void vmx_free_vcpu(struct kvm_vcp
+ vmx_destroy_pml_buffer(vmx);
+ free_vpid(vmx->vpid);
+ leave_guest_mode(vcpu);
+- vmx_load_vmcs01(vcpu);
+- free_nested(vmx);
++ vmx_free_vcpu_nested(vcpu);
+ free_loaded_vmcs(vmx->loaded_vmcs);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
+ put_cpu();
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(vcpu_load);
+
+ void vcpu_put(struct kvm_vcpu *vcpu)
+ {
+@@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
+ preempt_enable();
+ mutex_unlock(&vcpu->mutex);
+ }
++EXPORT_SYMBOL_GPL(vcpu_put);
+
+ static void ack_flush(void *_completed)
+ {
--- /dev/null
+From f024ee098476a3e620232e4a78cfac505f121245 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Wed, 22 Jun 2016 14:21:59 +1000
+Subject: KVM: PPC: Book3S HV: Pull out TM state save/restore into separate procedures
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit f024ee098476a3e620232e4a78cfac505f121245 upstream.
+
+This moves the transactional memory state save and restore sequences
+out of the guest entry/exit paths into separate procedures. This is
+so that these sequences can be used in going into and out of nap
+in a subsequent patch.
+
+The only code changes here are (a) saving and restore LR on the
+stack, since these new procedures get called with a bl instruction,
+(b) explicitly saving r1 into the PACA instead of assuming that
+HSTATE_HOST_R1(r13) is already set, and (c) removing an unnecessary
+and redundant setting of MSR[TM] that should have been removed by
+commit 9d4d0bdd9e0a ("KVM: PPC: Book3S HV: Add transactional memory
+support", 2013-09-24) but wasn't.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 449 ++++++++++++++++----------------
+ 1 file changed, 237 insertions(+), 212 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+- b skip_tm
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-
+- /* Turn on TM/FP/VSX/VMX so we can restore them. */
+- mfmsr r5
+- li r6, MSR_TM >> 32
+- sldi r6, r6, 32
+- or r5, r5, r6
+- ori r5, r5, MSR_FP
+- oris r5, r5, (MSR_VEC | MSR_VSX)@h
+- mtmsrd r5
+-
+- /*
+- * The user may change these outside of a transaction, so they must
+- * always be context switched.
+- */
+- ld r5, VCPU_TFHAR(r4)
+- ld r6, VCPU_TFIAR(r4)
+- ld r7, VCPU_TEXASR(r4)
+- mtspr SPRN_TFHAR, r5
+- mtspr SPRN_TFIAR, r6
+- mtspr SPRN_TEXASR, r7
+-
+- ld r5, VCPU_MSR(r4)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+- beq skip_tm /* TM not active in guest */
+-
+- /* Make sure the failure summary is set, otherwise we'll program check
+- * when we trechkpt. It's possible that this might have been not set
+- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+- * host.
+- */
+- oris r7, r7, (TEXASR_FS)@h
+- mtspr SPRN_TEXASR, r7
+-
+- /*
+- * We need to load up the checkpointed state for the guest.
+- * We need to do this early as it will blow away any GPRs, VSRs and
+- * some SPRs.
+- */
+-
+- mr r31, r4
+- addi r3, r31, VCPU_FPRS_TM
+- bl load_fp_state
+- addi r3, r31, VCPU_VRS_TM
+- bl load_vr_state
+- mr r4, r31
+- lwz r7, VCPU_VRSAVE_TM(r4)
+- mtspr SPRN_VRSAVE, r7
+-
+- ld r5, VCPU_LR_TM(r4)
+- lwz r6, VCPU_CR_TM(r4)
+- ld r7, VCPU_CTR_TM(r4)
+- ld r8, VCPU_AMR_TM(r4)
+- ld r9, VCPU_TAR_TM(r4)
+- mtlr r5
+- mtcr r6
+- mtctr r7
+- mtspr SPRN_AMR, r8
+- mtspr SPRN_TAR, r9
+-
+- /*
+- * Load up PPR and DSCR values but don't put them in the actual SPRs
+- * till the last moment to avoid running with userspace PPR and DSCR for
+- * too long.
+- */
+- ld r29, VCPU_DSCR_TM(r4)
+- ld r30, VCPU_PPR_TM(r4)
+-
+- std r2, PACATMSCRATCH(r13) /* Save TOC */
+-
+- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+- li r5, 0
+- mtmsrd r5, 1
+-
+- /* Load GPRs r0-r28 */
+- reg = 0
+- .rept 29
+- ld reg, VCPU_GPRS_TM(reg)(r31)
+- reg = reg + 1
+- .endr
+-
+- mtspr SPRN_DSCR, r29
+- mtspr SPRN_PPR, r30
+-
+- /* Load final GPRs */
+- ld 29, VCPU_GPRS_TM(29)(r31)
+- ld 30, VCPU_GPRS_TM(30)(r31)
+- ld 31, VCPU_GPRS_TM(31)(r31)
+-
+- /* TM checkpointed state is now setup. All GPRs are now volatile. */
+- TRECHKPT
+-
+- /* Now let's get back the state we need. */
+- HMT_MEDIUM
+- GET_PACA(r13)
+- ld r29, HSTATE_DSCR(r13)
+- mtspr SPRN_DSCR, r29
+- ld r4, HSTATE_KVM_VCPU(r13)
+- ld r1, HSTATE_HOST_R1(r13)
+- ld r2, PACATMSCRATCH(r13)
+-
+- /* Set the MSR RI since we have our registers back. */
+- li r5, MSR_RI
+- mtmsrd r5, 1
+-skip_tm:
++ bl kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+ /* Load guest PMU registers */
+@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
+ /* Skip next section on POWER7 */
+ b 8f
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+- mfmsr r8
+- li r0, 1
+- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+- mtmsrd r8
+-
+ /* Load up POWER8-specific registers */
+ ld r5, VCPU_IAMR(r4)
+ lwz r6, VCPU_PSPB(r4)
+@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+- b 2f
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+- /* Turn on TM. */
+- mfmsr r8
+- li r0, 1
+- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+- mtmsrd r8
+-
+- ld r5, VCPU_MSR(r9)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+- beq 1f /* TM not active in guest. */
+-
+- li r3, TM_CAUSE_KVM_RESCHED
+-
+- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+- li r5, 0
+- mtmsrd r5, 1
+-
+- /* All GPRs are volatile at this point. */
+- TRECLAIM(R3)
+-
+- /* Temporarily store r13 and r9 so we have some regs to play with */
+- SET_SCRATCH0(r13)
+- GET_PACA(r13)
+- std r9, PACATMSCRATCH(r13)
+- ld r9, HSTATE_KVM_VCPU(r13)
+-
+- /* Get a few more GPRs free. */
+- std r29, VCPU_GPRS_TM(29)(r9)
+- std r30, VCPU_GPRS_TM(30)(r9)
+- std r31, VCPU_GPRS_TM(31)(r9)
+-
+- /* Save away PPR and DSCR soon so don't run with user values. */
+- mfspr r31, SPRN_PPR
+- HMT_MEDIUM
+- mfspr r30, SPRN_DSCR
+- ld r29, HSTATE_DSCR(r13)
+- mtspr SPRN_DSCR, r29
+-
+- /* Save all but r9, r13 & r29-r31 */
+- reg = 0
+- .rept 29
+- .if (reg != 9) && (reg != 13)
+- std reg, VCPU_GPRS_TM(reg)(r9)
+- .endif
+- reg = reg + 1
+- .endr
+- /* ... now save r13 */
+- GET_SCRATCH0(r4)
+- std r4, VCPU_GPRS_TM(13)(r9)
+- /* ... and save r9 */
+- ld r4, PACATMSCRATCH(r13)
+- std r4, VCPU_GPRS_TM(9)(r9)
+-
+- /* Reload stack pointer and TOC. */
+- ld r1, HSTATE_HOST_R1(r13)
+- ld r2, PACATOC(r13)
+-
+- /* Set MSR RI now we have r1 and r13 back. */
+- li r5, MSR_RI
+- mtmsrd r5, 1
+-
+- /* Save away checkpinted SPRs. */
+- std r31, VCPU_PPR_TM(r9)
+- std r30, VCPU_DSCR_TM(r9)
+- mflr r5
+- mfcr r6
+- mfctr r7
+- mfspr r8, SPRN_AMR
+- mfspr r10, SPRN_TAR
+- std r5, VCPU_LR_TM(r9)
+- stw r6, VCPU_CR_TM(r9)
+- std r7, VCPU_CTR_TM(r9)
+- std r8, VCPU_AMR_TM(r9)
+- std r10, VCPU_TAR_TM(r9)
+-
+- /* Restore r12 as trap number. */
+- lwz r12, VCPU_TRAP(r9)
+-
+- /* Save FP/VSX. */
+- addi r3, r9, VCPU_FPRS_TM
+- bl store_fp_state
+- addi r3, r9, VCPU_VRS_TM
+- bl store_vr_state
+- mfspr r6, SPRN_VRSAVE
+- stw r6, VCPU_VRSAVE_TM(r9)
+-1:
+- /*
+- * We need to save these SPRs after the treclaim so that the software
+- * error code is recorded correctly in the TEXASR. Also the user may
+- * change these outside of a transaction, so they must always be
+- * context switched.
+- */
+- mfspr r5, SPRN_TFHAR
+- mfspr r6, SPRN_TFIAR
+- mfspr r7, SPRN_TEXASR
+- std r5, VCPU_TFHAR(r9)
+- std r6, VCPU_TFIAR(r9)
+- std r7, VCPU_TEXASR(r9)
+-2:
++ bl kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+ /* Increment yield count if they have a VPA */
+@@ -2631,6 +2423,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ mr r4,r31
+ blr
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++/*
++ * Save transactional state and TM-related registers.
++ * Called with r9 pointing to the vcpu struct.
++ * This can modify all checkpointed registers, but
++ * restores r1, r2 and r9 (vcpu pointer) before exit.
++ */
++kvmppc_save_tm:
++ mflr r0
++ std r0, PPC_LR_STKOFF(r1)
++
++ /* Turn on TM. */
++ mfmsr r8
++ li r0, 1
++ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
++ mtmsrd r8
++
++ ld r5, VCPU_MSR(r9)
++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ beq 1f /* TM not active in guest. */
++
++ std r1, HSTATE_HOST_R1(r13)
++ li r3, TM_CAUSE_KVM_RESCHED
++
++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
++ li r5, 0
++ mtmsrd r5, 1
++
++ /* All GPRs are volatile at this point. */
++ TRECLAIM(R3)
++
++ /* Temporarily store r13 and r9 so we have some regs to play with */
++ SET_SCRATCH0(r13)
++ GET_PACA(r13)
++ std r9, PACATMSCRATCH(r13)
++ ld r9, HSTATE_KVM_VCPU(r13)
++
++ /* Get a few more GPRs free. */
++ std r29, VCPU_GPRS_TM(29)(r9)
++ std r30, VCPU_GPRS_TM(30)(r9)
++ std r31, VCPU_GPRS_TM(31)(r9)
++
++ /* Save away PPR and DSCR soon so don't run with user values. */
++ mfspr r31, SPRN_PPR
++ HMT_MEDIUM
++ mfspr r30, SPRN_DSCR
++ ld r29, HSTATE_DSCR(r13)
++ mtspr SPRN_DSCR, r29
++
++ /* Save all but r9, r13 & r29-r31 */
++ reg = 0
++ .rept 29
++ .if (reg != 9) && (reg != 13)
++ std reg, VCPU_GPRS_TM(reg)(r9)
++ .endif
++ reg = reg + 1
++ .endr
++ /* ... now save r13 */
++ GET_SCRATCH0(r4)
++ std r4, VCPU_GPRS_TM(13)(r9)
++ /* ... and save r9 */
++ ld r4, PACATMSCRATCH(r13)
++ std r4, VCPU_GPRS_TM(9)(r9)
++
++ /* Reload stack pointer and TOC. */
++ ld r1, HSTATE_HOST_R1(r13)
++ ld r2, PACATOC(r13)
++
++ /* Set MSR RI now we have r1 and r13 back. */
++ li r5, MSR_RI
++ mtmsrd r5, 1
++
++ /* Save away checkpinted SPRs. */
++ std r31, VCPU_PPR_TM(r9)
++ std r30, VCPU_DSCR_TM(r9)
++ mflr r5
++ mfcr r6
++ mfctr r7
++ mfspr r8, SPRN_AMR
++ mfspr r10, SPRN_TAR
++ std r5, VCPU_LR_TM(r9)
++ stw r6, VCPU_CR_TM(r9)
++ std r7, VCPU_CTR_TM(r9)
++ std r8, VCPU_AMR_TM(r9)
++ std r10, VCPU_TAR_TM(r9)
++
++ /* Restore r12 as trap number. */
++ lwz r12, VCPU_TRAP(r9)
++
++ /* Save FP/VSX. */
++ addi r3, r9, VCPU_FPRS_TM
++ bl store_fp_state
++ addi r3, r9, VCPU_VRS_TM
++ bl store_vr_state
++ mfspr r6, SPRN_VRSAVE
++ stw r6, VCPU_VRSAVE_TM(r9)
++1:
++ /*
++ * We need to save these SPRs after the treclaim so that the software
++ * error code is recorded correctly in the TEXASR. Also the user may
++ * change these outside of a transaction, so they must always be
++ * context switched.
++ */
++ mfspr r5, SPRN_TFHAR
++ mfspr r6, SPRN_TFIAR
++ mfspr r7, SPRN_TEXASR
++ std r5, VCPU_TFHAR(r9)
++ std r6, VCPU_TFIAR(r9)
++ std r7, VCPU_TEXASR(r9)
++
++ ld r0, PPC_LR_STKOFF(r1)
++ mtlr r0
++ blr
++
++/*
++ * Restore transactional state and TM-related registers.
++ * Called with r4 pointing to the vcpu struct.
++ * This potentially modifies all checkpointed registers.
++ * It restores r1, r2, r4 from the PACA.
++ */
++kvmppc_restore_tm:
++ mflr r0
++ std r0, PPC_LR_STKOFF(r1)
++
++ /* Turn on TM/FP/VSX/VMX so we can restore them. */
++ mfmsr r5
++ li r6, MSR_TM >> 32
++ sldi r6, r6, 32
++ or r5, r5, r6
++ ori r5, r5, MSR_FP
++ oris r5, r5, (MSR_VEC | MSR_VSX)@h
++ mtmsrd r5
++
++ /*
++ * The user may change these outside of a transaction, so they must
++ * always be context switched.
++ */
++ ld r5, VCPU_TFHAR(r4)
++ ld r6, VCPU_TFIAR(r4)
++ ld r7, VCPU_TEXASR(r4)
++ mtspr SPRN_TFHAR, r5
++ mtspr SPRN_TFIAR, r6
++ mtspr SPRN_TEXASR, r7
++
++ ld r5, VCPU_MSR(r4)
++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ beqlr /* TM not active in guest */
++ std r1, HSTATE_HOST_R1(r13)
++
++ /* Make sure the failure summary is set, otherwise we'll program check
++ * when we trechkpt. It's possible that this might have been not set
++ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
++ * host.
++ */
++ oris r7, r7, (TEXASR_FS)@h
++ mtspr SPRN_TEXASR, r7
++
++ /*
++ * We need to load up the checkpointed state for the guest.
++ * We need to do this early as it will blow away any GPRs, VSRs and
++ * some SPRs.
++ */
++
++ mr r31, r4
++ addi r3, r31, VCPU_FPRS_TM
++ bl load_fp_state
++ addi r3, r31, VCPU_VRS_TM
++ bl load_vr_state
++ mr r4, r31
++ lwz r7, VCPU_VRSAVE_TM(r4)
++ mtspr SPRN_VRSAVE, r7
++
++ ld r5, VCPU_LR_TM(r4)
++ lwz r6, VCPU_CR_TM(r4)
++ ld r7, VCPU_CTR_TM(r4)
++ ld r8, VCPU_AMR_TM(r4)
++ ld r9, VCPU_TAR_TM(r4)
++ mtlr r5
++ mtcr r6
++ mtctr r7
++ mtspr SPRN_AMR, r8
++ mtspr SPRN_TAR, r9
++
++ /*
++ * Load up PPR and DSCR values but don't put them in the actual SPRs
++ * till the last moment to avoid running with userspace PPR and DSCR for
++ * too long.
++ */
++ ld r29, VCPU_DSCR_TM(r4)
++ ld r30, VCPU_PPR_TM(r4)
++
++ std r2, PACATMSCRATCH(r13) /* Save TOC */
++
++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
++ li r5, 0
++ mtmsrd r5, 1
++
++ /* Load GPRs r0-r28 */
++ reg = 0
++ .rept 29
++ ld reg, VCPU_GPRS_TM(reg)(r31)
++ reg = reg + 1
++ .endr
++
++ mtspr SPRN_DSCR, r29
++ mtspr SPRN_PPR, r30
++
++ /* Load final GPRs */
++ ld 29, VCPU_GPRS_TM(29)(r31)
++ ld 30, VCPU_GPRS_TM(30)(r31)
++ ld 31, VCPU_GPRS_TM(31)(r31)
++
++ /* TM checkpointed state is now setup. All GPRs are now volatile. */
++ TRECHKPT
++
++ /* Now let's get back the state we need. */
++ HMT_MEDIUM
++ GET_PACA(r13)
++ ld r29, HSTATE_DSCR(r13)
++ mtspr SPRN_DSCR, r29
++ ld r4, HSTATE_KVM_VCPU(r13)
++ ld r1, HSTATE_HOST_R1(r13)
++ ld r2, PACATMSCRATCH(r13)
++
++ /* Set the MSR RI since we have our registers back. */
++ li r5, MSR_RI
++ mtmsrd r5, 1
++
++ ld r0, PPC_LR_STKOFF(r1)
++ mtlr r0
++ blr
++#endif
++
+ /*
+ * We come here if we get any exception or interrupt while we are
+ * executing host real mode code while in guest MMU context.
--- /dev/null
+From 93d17397e4e2182fdaad503e2f9da46202c0f1c3 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Wed, 22 Jun 2016 15:52:55 +1000
+Subject: KVM: PPC: Book3S HV: Save/restore TM state in H_CEDE
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 93d17397e4e2182fdaad503e2f9da46202c0f1c3 upstream.
+
+It turns out that if the guest does a H_CEDE while the CPU is in
+a transactional state, and the H_CEDE does a nap, and the nap
+loses the architected state of the CPU (which is is allowed to do),
+then we lose the checkpointed state of the virtual CPU. In addition,
+the transactional-memory state recorded in the MSR gets reset back
+to non-transactional, and when we try to return to the guest, we take
+a TM bad thing type of program interrupt because we are trying to
+transition from non-transactional to transactional with a hrfid
+instruction, which is not permitted.
+
+The result of the program interrupt occurring at that point is that
+the host CPU will hang in an infinite loop with interrupts disabled.
+Thus this is a denial of service vulnerability in the host which can
+be triggered by any guest (and depending on the guest kernel, it can
+potentially triggered by unprivileged userspace in the guest).
+
+This vulnerability has been assigned the ID CVE-2016-5412.
+
+To fix this, we save the TM state before napping and restore it
+on exit from the nap, when handling a H_CEDE in real mode. The
+case where H_CEDE exits to host virtual mode is already OK (as are
+other hcalls which exit to host virtual mode) because the exit
+path saves the TM state.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2037,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu poi
+ /* save FP state */
+ bl kvmppc_save_fp
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++ ld r9, HSTATE_KVM_VCPU(r13)
++ bl kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ /*
+ * Set DEC to the smaller of DEC and HDEC, so that we wake
+ * no later than the end of our timeslice (HDEC interrupts
+@@ -2113,6 +2120,12 @@ kvm_end_cede:
+ bl kvmhv_accumulate_time
+ #endif
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++ bl kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ /* load up FP state */
+ bl kvmppc_load_fp
+
--- /dev/null
+From b244c9fc251e14a083a1cbf04bef10bd99303a76 Mon Sep 17 00:00:00 2001
+From: "Cao, Lei" <Lei.Cao@stratus.com>
+Date: Fri, 15 Jul 2016 13:54:04 +0000
+Subject: KVM: VMX: handle PML full VMEXIT that occurs during event delivery
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cao, Lei <Lei.Cao@stratus.com>
+
+commit b244c9fc251e14a083a1cbf04bef10bd99303a76 upstream.
+
+With PML enabled, guest will shut down if a PML full VMEXIT occurs during
+event delivery. According to Intel SDM 27.2.3, PML full VMEXIT can occur when
+event is being delivered through IDT, so KVM should not exit to user space
+with error. Instead, it should let EXIT_REASON_PML_FULL go through and the
+event will be re-injected on the next VMENTRY.
+
+Signed-off-by: Lei Cao <lei.cao@stratus.com>
+Fixes: 843e4330573c ("KVM: VMX: Add PML support in VMX")
+[Shortened the summary and Cc'd stable.]
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8224,6 +8224,7 @@ static int vmx_handle_exit(struct kvm_vc
+ if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+ exit_reason != EXIT_REASON_EPT_VIOLATION &&
++ exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
--- /dev/null
+From c43203cab1e2e193c43f8295f01dfb2a0721d9e5 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 1 Jun 2016 22:26:00 +0200
+Subject: KVM: x86: avoid simultaneous queueing of both IRQ and SMI
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c43203cab1e2e193c43f8295f01dfb2a0721d9e5 upstream.
+
+If the processor exits to KVM while delivering an interrupt,
+the hypervisor then requeues the interrupt for the next vmentry.
+Trying to enter SMM in this same window causes to enter non-root
+mode in emulated SMM (i.e. with IF=0) and with a request to
+inject an IRQ (i.e. with a valid VM-entry interrupt info field).
+This is invalid guest state (SDM 26.3.1.4 "Check on Guest RIP
+and RFLAGS") and the processor fails vmentry.
+
+The fix is to defer the injection from KVM_REQ_SMI to KVM_REQ_EVENT,
+like we already do for e.g. NMIs. This patch doesn't change the
+name of the process_smi function so that it can be applied to
+stable releases. The next patch will modify the names so that
+process_nmi and process_smi handle respectively KVM_REQ_NMI and
+KVM_REQ_SMI.
+
+This is especially common with Windows, probably due to the
+self-IPI trick that it uses to deliver deferred procedure
+calls (DPCs).
+
+Reported-by: Laszlo Ersek <lersek@redhat.com>
+Reported-by: Michał Zegan <webczat_200@poczta.onet.pl>
+Fixes: 64d6067057d9658acb8675afcfba549abdb7fc16
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 45 +++++++++++++++++++++++++++++----------------
+ 1 file changed, 29 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_b
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
++static void process_smi(struct kvm_vcpu *vcpu);
+ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+
+ struct kvm_x86_ops *kvm_x86_ops __read_mostly;
+@@ -5296,13 +5297,8 @@ static void kvm_smm_changed(struct kvm_v
+ /* This is a good place to trace that we are exiting SMM. */
+ trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
+
+- if (unlikely(vcpu->arch.smi_pending)) {
+- kvm_make_request(KVM_REQ_SMI, vcpu);
+- vcpu->arch.smi_pending = 0;
+- } else {
+- /* Process a latched INIT, if any. */
+- kvm_make_request(KVM_REQ_EVENT, vcpu);
+- }
++ /* Process a latched INIT or SMI, if any. */
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
+
+ kvm_mmu_reset_context(vcpu);
+@@ -6102,7 +6098,10 @@ static int inject_pending_event(struct k
+ }
+
+ /* try to inject new event if pending */
+- if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
++ if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
++ vcpu->arch.smi_pending = false;
++ process_smi(vcpu);
++ } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ --vcpu->arch.nmi_pending;
+ vcpu->arch.nmi_injected = true;
+ kvm_x86_ops->set_nmi(vcpu);
+@@ -6312,11 +6311,6 @@ static void process_smi(struct kvm_vcpu
+ char buf[512];
+ u32 cr0;
+
+- if (is_smm(vcpu)) {
+- vcpu->arch.smi_pending = true;
+- return;
+- }
+-
+ trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ memset(buf, 0, 512);
+@@ -6379,6 +6373,12 @@ static void process_smi(struct kvm_vcpu
+ kvm_mmu_reset_context(vcpu);
+ }
+
++static void process_smi_request(struct kvm_vcpu *vcpu)
++{
++ vcpu->arch.smi_pending = true;
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++}
++
+ void kvm_make_scan_ioapic_request(struct kvm *kvm)
+ {
+ kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
+@@ -6500,7 +6500,7 @@ static int vcpu_enter_guest(struct kvm_v
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ record_steal_time(vcpu);
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+- process_smi(vcpu);
++ process_smi_request(vcpu);
+ if (kvm_check_request(KVM_REQ_NMI, vcpu))
+ process_nmi(vcpu);
+ if (kvm_check_request(KVM_REQ_PMU, vcpu))
+@@ -6573,8 +6573,18 @@ static int vcpu_enter_guest(struct kvm_v
+
+ if (inject_pending_event(vcpu, req_int_win) != 0)
+ req_immediate_exit = true;
+- /* enable NMI/IRQ window open exits if needed */
+ else {
++ /* Enable NMI/IRQ window open exits if needed.
++ *
++ * SMIs have two cases: 1) they can be nested, and
++ * then there is nothing to do here because RSM will
++ * cause a vmexit anyway; 2) or the SMI can be pending
++ * because inject_pending_event has completed the
++ * injection of an IRQ or NMI from the previous vmexit,
++ * and then we request an immediate exit to inject the SMI.
++ */
++ if (vcpu->arch.smi_pending && !is_smm(vcpu))
++ req_immediate_exit = true;
+ if (vcpu->arch.nmi_pending)
+ kvm_x86_ops->enable_nmi_window(vcpu);
+ if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+@@ -6625,8 +6635,10 @@ static int vcpu_enter_guest(struct kvm_v
+
+ kvm_load_guest_xcr0(vcpu);
+
+- if (req_immediate_exit)
++ if (req_immediate_exit) {
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ smp_send_reschedule(vcpu->cpu);
++ }
+
+ trace_kvm_entry(vcpu->vcpu_id);
+ wait_lapic_expire(vcpu);
+@@ -7427,6 +7439,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
+ {
+ vcpu->arch.hflags = 0;
+
++ vcpu->arch.smi_pending = 0;
+ atomic_set(&vcpu->arch.nmi_queued, 0);
+ vcpu->arch.nmi_pending = 0;
+ vcpu->arch.nmi_injected = false;
--- /dev/null
+From 9835f1b70bb3890d38308b9be4fb9d7451ba67f1 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 15 Jun 2016 01:02:26 +0200
+Subject: mfd: qcom_rpm: Fix offset error for msm8660
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 9835f1b70bb3890d38308b9be4fb9d7451ba67f1 upstream.
+
+The RPM in MSM8660/APQ8060 has different offsets to the selector
+ACK and request context ACK registers. Make all these register
+offsets part of the per-SoC data and assign the right values.
+
+The bug was found by verifying backwards to the vendor tree in
+the out-of-tree files <mach/rpm-[8660|8064|8960]>: all were using
+offsets 3,11,15,23 and a select size of 4, except the MSM8660/APQ8060
+which was using offsets 3,11,19,27 and a select size of 7.
+
+All other platforms apart from msm8660 were affected by reading
+excess registers, since 7 was hardcoded as the number of select
+words, this patch makes also this part dynamic so we only write/read
+as many select words as the platform actually use.
+
+Symptoms of this bug when using msm8660: the first RPM transaction
+would work, but the next would stall or raise an error since the
+previous transaction was not properly ACKed as the ACK words were
+read at the wrong offset.
+
+Fixes: 58e214382bdd ("mfd: qcom-rpm: Driver for the Qualcomm RPM")
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Björn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/qcom_rpm.c | 50 +++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 36 insertions(+), 14 deletions(-)
+
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -34,7 +34,12 @@ struct qcom_rpm_resource {
+ struct qcom_rpm_data {
+ u32 version;
+ const struct qcom_rpm_resource *resource_table;
+- unsigned n_resources;
++ unsigned int n_resources;
++ unsigned int req_ctx_off;
++ unsigned int req_sel_off;
++ unsigned int ack_ctx_off;
++ unsigned int ack_sel_off;
++ unsigned int sel_size;
+ };
+
+ struct qcom_rpm {
+@@ -61,11 +66,7 @@ struct qcom_rpm {
+
+ #define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+-#define RPM_REQUEST_CONTEXT 3
+-#define RPM_REQ_SELECT 11
+-#define RPM_ACK_CONTEXT 15
+-#define RPM_ACK_SELECTOR 23
+-#define RPM_SELECT_SIZE 7
++#define RPM_MAX_SEL_SIZE 7
+
+ #define RPM_NOTIFICATION BIT(30)
+ #define RPM_REJECTED BIT(31)
+@@ -157,6 +158,11 @@ static const struct qcom_rpm_data apq806
+ .version = 3,
+ .resource_table = apq8064_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -240,6 +246,11 @@ static const struct qcom_rpm_data msm866
+ .version = 2,
+ .resource_table = msm8660_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 19,
++ .ack_sel_off = 27,
++ .sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -322,6 +333,11 @@ static const struct qcom_rpm_data msm896
+ .version = 3,
+ .resource_table = msm8960_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -362,6 +378,11 @@ static const struct qcom_rpm_data ipq806
+ .version = 3,
+ .resource_table = ipq806x_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -380,7 +401,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ {
+ const struct qcom_rpm_resource *res;
+ const struct qcom_rpm_data *data = rpm->data;
+- u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
++ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
+ int left;
+ int ret = 0;
+ int i;
+@@ -398,12 +419,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+
+ bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
++ for (i = 0; i < rpm->data->sel_size; i++) {
+ writel_relaxed(sel_mask[i],
+- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
++ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ }
+
+- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
++ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
+
+ reinit_completion(&rpm->ack);
+ regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+@@ -426,10 +447,11 @@ static irqreturn_t qcom_rpm_ack_interrup
+ u32 ack;
+ int i;
+
+- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+- for (i = 0; i < RPM_SELECT_SIZE; i++)
+- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
+- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
++ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
++ for (i = 0; i < rpm->data->sel_size; i++)
++ writel_relaxed(0,
++ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
++ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+
+ if (ack & RPM_NOTIFICATION) {
+ dev_warn(rpm->dev, "ignoring notification!\n");
--- /dev/null
+From f37be01e6dc606f2fcc5e95c9933d948ce19bd35 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 22 Jun 2016 08:27:17 +0200
+Subject: mfd: qcom_rpm: Parametrize also ack selector size
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit f37be01e6dc606f2fcc5e95c9933d948ce19bd35 upstream.
+
+The RPM has two sets of selectors (IPC bit fields): request and
+acknowledge. Apparently, some models use 4*32 bit words for select
+and some use 7*32 bit words for request, but all use 7*32 words
+for acknowledge bits.
+
+So apparently you can on the models with requests of 4*32 select
+bits send 4*32 messages and get 7*32 different replies, so on ACK
+interrupt, 7*32 bit words need to be read. This is how the vendor
+code apparently works.
+
+Reported-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/qcom_rpm.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -39,7 +39,8 @@ struct qcom_rpm_data {
+ unsigned int req_sel_off;
+ unsigned int ack_ctx_off;
+ unsigned int ack_sel_off;
+- unsigned int sel_size;
++ unsigned int req_sel_size;
++ unsigned int ack_sel_size;
+ };
+
+ struct qcom_rpm {
+@@ -162,7 +163,8 @@ static const struct qcom_rpm_data apq806
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -250,7 +252,8 @@ static const struct qcom_rpm_data msm866
+ .req_sel_off = 11,
+ .ack_ctx_off = 19,
+ .ack_sel_off = 27,
+- .sel_size = 7,
++ .req_sel_size = 7,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -337,7 +340,8 @@ static const struct qcom_rpm_data msm896
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -382,7 +386,8 @@ static const struct qcom_rpm_data ipq806
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -419,7 +424,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+
+ bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+- for (i = 0; i < rpm->data->sel_size; i++) {
++ for (i = 0; i < rpm->data->req_sel_size; i++) {
+ writel_relaxed(sel_mask[i],
+ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ }
+@@ -448,7 +453,7 @@ static irqreturn_t qcom_rpm_ack_interrup
+ int i;
+
+ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+- for (i = 0; i < rpm->data->sel_size; i++)
++ for (i = 0; i < rpm->data->ack_sel_size; i++)
+ writel_relaxed(0,
+ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
+ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
--- /dev/null
+From 5cada17426505b09a045cd9e6d7fb6db19b76ea1 Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Mon, 25 Jul 2016 08:19:31 -0500
+Subject: objtool: Add 'fixdep' to objtool/.gitignore
+
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+
+commit 5cada17426505b09a045cd9e6d7fb6db19b76ea1 upstream.
+
+To fix:
+
+ Untracked files:
+ (use "git add <file>..." to include in what will be committed)
+
+ tools/objtool/fixdep
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/a4571f6893caf737d05524cfa3829c2abc1fb240.1469452729.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/objtool/.gitignore | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/objtool/.gitignore
++++ b/tools/objtool/.gitignore
+@@ -1,2 +1,3 @@
+ arch/x86/insn/inat-tables.c
+ objtool
++fixdep
--- /dev/null
+From 10e9e7bd598f9a66a11a22514c68c13c41fc821b Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@intel.com>
+Date: Thu, 11 Aug 2016 07:30:20 -0700
+Subject: perf/x86/intel/uncore: Fix uncore num_counters
+
+From: Kan Liang <kan.liang@intel.com>
+
+commit 10e9e7bd598f9a66a11a22514c68c13c41fc821b upstream.
+
+Some uncore boxes' num_counters value for Haswell server and
+Broadwell server are not correct (too large, off by one).
+
+This issue was found by comparing the code with the document. Although
+there is no bug report from users yet, accessing non-existent counters
+is dangerous and the behavior is undefined: it may cause miscounting or
+even crashes.
+
+This patch makes them consistent with the uncore document.
+
+Reported-by: Lukasz Odzioba <lukasz.odzioba@intel.com>
+Signed-off-by: Kan Liang <kan.liang@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: http://lkml.kernel.org/r/1470925820-59847-1-git-send-email-kan.liang@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/uncore_snbep.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -2546,7 +2546,7 @@ void hswep_uncore_cpu_init(void)
+
+ static struct intel_uncore_type hswep_uncore_ha = {
+ .name = "ha",
+- .num_counters = 5,
++ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+@@ -2565,7 +2565,7 @@ static struct uncore_event_desc hswep_un
+
+ static struct intel_uncore_type hswep_uncore_imc = {
+ .name = "imc",
+- .num_counters = 5,
++ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+@@ -2611,7 +2611,7 @@ static struct intel_uncore_type hswep_un
+
+ static struct intel_uncore_type hswep_uncore_qpi = {
+ .name = "qpi",
+- .num_counters = 5,
++ .num_counters = 4,
+ .num_boxes = 3,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+@@ -2693,7 +2693,7 @@ static struct event_constraint hswep_unc
+
+ static struct intel_uncore_type hswep_uncore_r3qpi = {
+ .name = "r3qpi",
+- .num_counters = 4,
++ .num_counters = 3,
+ .num_boxes = 3,
+ .perf_ctr_bits = 44,
+ .constraints = hswep_uncore_r3qpi_constraints,
+@@ -2892,7 +2892,7 @@ static struct intel_uncore_type bdx_unco
+
+ static struct intel_uncore_type bdx_uncore_imc = {
+ .name = "imc",
+- .num_counters = 5,
++ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
--- /dev/null
+From 0bd50d719b004110e791800450ad204399100a86 Mon Sep 17 00:00:00 2001
+From: Dan O'Donovan <dan@emutex.com>
+Date: Fri, 10 Jun 2016 13:23:34 +0100
+Subject: pinctrl: cherryview: prevent concurrent access to GPIO controllers
+
+From: Dan O'Donovan <dan@emutex.com>
+
+commit 0bd50d719b004110e791800450ad204399100a86 upstream.
+
+Due to a silicon issue on the Atom X5-Z8000 "Cherry Trail" processor
+series, a common lock must be used to prevent concurrent accesses
+across the 4 GPIO controllers managed by this driver.
+
+See Intel Atom Z8000 Processor Series Specification Update
+(Rev. 005), errata #CHT34, for further information.
+
+Signed-off-by: Dan O'Donovan <dan@emutex.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-cherryview.c | 80 +++++++++++++++--------------
+ 1 file changed, 44 insertions(+), 36 deletions(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -160,7 +160,6 @@ struct chv_pin_context {
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @regs: MMIO registers
+- * @lock: Lock to serialize register accesses
+ * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
+ * offset (in GPIO number space)
+ * @community: Community this pinctrl instance represents
+@@ -174,7 +173,6 @@ struct chv_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ void __iomem *regs;
+- raw_spinlock_t lock;
+ unsigned intr_lines[16];
+ const struct chv_community *community;
+ u32 saved_intmask;
+@@ -657,6 +655,17 @@ static const struct chv_community *chv_c
+ &southeast_community,
+ };
+
++/*
++ * Lock to serialize register accesses
++ *
++ * Due to a silicon issue, a shared lock must be used to prevent
++ * concurrent accesses across the 4 GPIO controllers.
++ *
++ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
++ * errata #CHT34, for further information.
++ */
++static DEFINE_RAW_SPINLOCK(chv_lock);
++
+ static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
+ unsigned reg)
+ {
+@@ -718,13 +727,13 @@ static void chv_pin_dbg_show(struct pinc
+ u32 ctrl0, ctrl1;
+ bool locked;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+ locked = chv_pad_locked(pctrl, offset);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
+ seq_puts(s, "GPIO ");
+@@ -787,14 +796,14 @@ static int chv_pinmux_set_mux(struct pin
+
+ grp = &pctrl->community->groups[group];
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ /* Check first that the pad is not locked */
+ for (i = 0; i < grp->npins; i++) {
+ if (chv_pad_locked(pctrl, grp->pins[i])) {
+ dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
+ grp->pins[i]);
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EBUSY;
+ }
+ }
+@@ -837,7 +846,7 @@ static int chv_pinmux_set_mux(struct pin
+ pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ }
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -851,13 +860,13 @@ static int chv_gpio_request_enable(struc
+ void __iomem *reg;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ if (chv_pad_locked(pctrl, offset)) {
+ value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ if (!(value & CHV_PADCTRL0_GPIOEN)) {
+ /* Locked so cannot enable */
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EBUSY;
+ }
+ } else {
+@@ -897,7 +906,7 @@ static int chv_gpio_request_enable(struc
+ chv_writel(value, reg);
+ }
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -911,13 +920,13 @@ static void chv_gpio_disable_free(struct
+ void __iomem *reg;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
+ chv_writel(value, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+@@ -929,7 +938,7 @@ static int chv_gpio_set_direction(struct
+ unsigned long flags;
+ u32 ctrl0;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+ if (input)
+@@ -938,7 +947,7 @@ static int chv_gpio_set_direction(struct
+ ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ chv_writel(ctrl0, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -963,10 +972,10 @@ static int chv_config_get(struct pinctrl
+ u16 arg = 0;
+ u32 term;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
+
+@@ -1040,7 +1049,7 @@ static int chv_config_set_pull(struct ch
+ unsigned long flags;
+ u32 ctrl0, pull;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(reg);
+
+ switch (param) {
+@@ -1063,7 +1072,7 @@ static int chv_config_set_pull(struct ch
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+@@ -1081,7 +1090,7 @@ static int chv_config_set_pull(struct ch
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+@@ -1089,12 +1098,12 @@ static int chv_config_set_pull(struct ch
+ break;
+
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+ chv_writel(ctrl0, reg);
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -1160,9 +1169,9 @@ static int chv_gpio_get(struct gpio_chip
+ unsigned long flags;
+ u32 ctrl0, cfg;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1180,7 +1189,7 @@ static void chv_gpio_set(struct gpio_chi
+ void __iomem *reg;
+ u32 ctrl0;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ ctrl0 = readl(reg);
+@@ -1192,7 +1201,7 @@ static void chv_gpio_set(struct gpio_chi
+
+ chv_writel(ctrl0, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+@@ -1202,9 +1211,9 @@ static int chv_gpio_get_direction(struct
+ u32 ctrl0, direction;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1242,14 +1251,14 @@ static void chv_gpio_irq_ack(struct irq_
+ int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ u32 intr_line;
+
+- raw_spin_lock(&pctrl->lock);
++ raw_spin_lock(&chv_lock);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+ chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+
+- raw_spin_unlock(&pctrl->lock);
++ raw_spin_unlock(&chv_lock);
+ }
+
+ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+@@ -1260,7 +1269,7 @@ static void chv_gpio_irq_mask_unmask(str
+ u32 value, intr_line;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+@@ -1273,7 +1282,7 @@ static void chv_gpio_irq_mask_unmask(str
+ value |= BIT(intr_line);
+ chv_writel(value, pctrl->regs + CHV_INTMASK);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static void chv_gpio_irq_mask(struct irq_data *d)
+@@ -1307,7 +1316,7 @@ static unsigned chv_gpio_irq_startup(str
+ unsigned long flags;
+ u32 intsel, value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+@@ -1322,7 +1331,7 @@ static unsigned chv_gpio_irq_startup(str
+ irq_set_handler_locked(d, handler);
+ pctrl->intr_lines[intsel] = offset;
+ }
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ chv_gpio_irq_unmask(d);
+@@ -1338,7 +1347,7 @@ static int chv_gpio_irq_type(struct irq_
+ unsigned long flags;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ /*
+ * Pins which can be used as shared interrupt are configured in
+@@ -1387,7 +1396,7 @@ static int chv_gpio_irq_type(struct irq_
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -1499,7 +1508,6 @@ static int chv_pinctrl_probe(struct plat
+ if (i == ARRAY_SIZE(chv_communities))
+ return -ENODEV;
+
+- raw_spin_lock_init(&pctrl->lock);
+ pctrl->dev = &pdev->dev;
+
+ #ifdef CONFIG_PM_SLEEP
--- /dev/null
+From 0f5d050ceaa31b2229102211d60c149f920df3aa Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Tue, 12 Jul 2016 19:57:57 +0200
+Subject: s390/cio: allow to reset channel measurement block
+
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+
+commit 0f5d050ceaa31b2229102211d60c149f920df3aa upstream.
+
+Prior to commit 1bc6664bdfb949bc69a08113801e7d6acbf6bc3f a call to
+enable_cmf for a device for which channel measurement was already
+enabled resulted in a reset of the measurement data.
+
+What looked like bugs at the time (a 2nd allocation was triggered
+but failed, reset was called regardless of previous failures, and
+errors have not been reported to userspace) was actually something
+at least one userspace tool depended on. Restore that behavior in
+a sane way.
+
+Fixes: 1bc6664bdfb ("s390/cio: use device_lock during cmb activation")
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/cmf.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+--- a/drivers/s390/cio/cmf.c
++++ b/drivers/s390/cio/cmf.c
+@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device
+ cmf_generic_reset(cdev);
+ }
+
++static int cmf_enabled(struct ccw_device *cdev)
++{
++ int enabled;
++
++ spin_lock_irq(cdev->ccwlock);
++ enabled = !!cdev->private->cmb;
++ spin_unlock_irq(cdev->ccwlock);
++
++ return enabled;
++}
++
+ static struct attribute_group cmf_attr_group;
+
+ static struct cmb_operations cmbops_basic = {
+@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct de
+ char *buf)
+ {
+ struct ccw_device *cdev = to_ccwdev(dev);
+- int enabled;
+
+- spin_lock_irq(cdev->ccwlock);
+- enabled = !!cdev->private->cmb;
+- spin_unlock_irq(cdev->ccwlock);
+-
+- return sprintf(buf, "%d\n", enabled);
++ return sprintf(buf, "%d\n", cmf_enabled(cdev));
+ }
+
+ static ssize_t cmb_enable_store(struct device *dev,
+@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev,
+ * @cdev: The ccw device to be enabled
+ *
+ * Returns %0 for success or a negative error value.
+- *
++ * Note: If this is called on a device for which channel measurement is already
++ * enabled a reset of the measurement data is triggered.
+ * Context:
+ * non-atomic
+ */
+ int enable_cmf(struct ccw_device *cdev)
+ {
+- int ret;
++ int ret = 0;
+
+ device_lock(&cdev->dev);
++ if (cmf_enabled(cdev)) {
++ cmbops->reset(cdev);
++ goto out_unlock;
++ }
+ get_device(&cdev->dev);
+ ret = cmbops->alloc(cdev);
+ if (ret)
+@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
+ out:
+ if (ret)
+ put_device(&cdev->dev);
+-
++out_unlock:
+ device_unlock(&cdev->dev);
+ return ret;
+ }
--- /dev/null
+From c427c42cd612719e8fb8b5891cc9761e7770024e Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Date: Tue, 10 May 2016 13:51:54 +0200
+Subject: s390/mm: don't drop errors in get_guest_storage_key
+
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+
+commit c427c42cd612719e8fb8b5891cc9761e7770024e upstream.
+
+Commit 1e133ab296f3 ("s390/mm: split arch/s390/mm/pgtable.c") changed
+the return value of get_guest_storage_key to an unsigned char, resulting
+in -EFAULT getting interpreted as a valid storage key.
+
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/pgtable.h | 2 +-
+ arch/s390/mm/pgtable.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -893,7 +893,7 @@ void ptep_zap_key(struct mm_struct *mm,
+ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char key, bool nq);
+-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
++unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+
+ /*
+ * Certain architectures need to do special things when PTEs
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -543,7 +543,7 @@ int set_guest_storage_key(struct mm_stru
+ }
+ EXPORT_SYMBOL(set_guest_storage_key);
+
+-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
++unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+ {
+ unsigned char key;
+ spinlock_t *ptl;
--- /dev/null
+From f045402984404ddc11016358411e445192919047 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Date: Thu, 7 Jul 2016 10:44:10 +0200
+Subject: s390/mm: fix gmap tlb flush issues
+
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+
+commit f045402984404ddc11016358411e445192919047 upstream.
+
+__tlb_flush_asce() should never be used if multiple asce belong to a mm.
+
+As this function changes mm logic determining if local or global tlb
+flushes will be neded, we might end up flushing only the gmap asce on all
+CPUs and a follow up mm asce flushes will only flush on the local CPU,
+although that asce ran on multiple CPUs.
+
+The missing tlb flushes will provoke strange faults in user space and even
+low address protections in user space, crashing the kernel.
+
+Fixes: 1b948d6caec4 ("s390/mm,tlb: optimize TLB flushing for zEC12")
+Reported-by: Sascha Silbe <silbe@linux.vnet.ibm.com>
+Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/tlbflush.h | 3 ++-
+ arch/s390/mm/gmap.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -81,7 +81,8 @@ static inline void __tlb_flush_full(stru
+ }
+
+ /*
+- * Flush TLB entries for a specific ASCE on all CPUs.
++ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
++ * when more than one asce (e.g. gmap) ran on this mm.
+ */
+ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+ {
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
+ static void gmap_flush_tlb(struct gmap *gmap)
+ {
+ if (MACHINE_HAS_IDTE)
+- __tlb_flush_asce(gmap->mm, gmap->asce);
++ __tlb_flush_idte(gmap->asce);
+ else
+ __tlb_flush_global();
+ }
+@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+- __tlb_flush_asce(gmap->mm, gmap->asce);
++ __tlb_flush_idte(gmap->asce);
+ else
+ __tlb_flush_global();
+
--- /dev/null
+From e51e4d8a185de90424b03f30181b35f29c46a25a Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Date: Thu, 16 Jun 2016 08:27:35 +0200
+Subject: serial: samsung: Fix ERR pointer dereference on deferred probe
+
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+
+commit e51e4d8a185de90424b03f30181b35f29c46a25a upstream.
+
+When the clk_get() of "uart" clock returns EPROBE_DEFER, the next re-probe
+finishes with success but uses invalid (ERR_PTR) values. This leads to
+dereferencing of ERR_PTR stored under ourport->clk:
+
+ 12c30000.serial: Controller clock not found
+ (...)
+ 12c30000.serial: ttySAC3 at MMIO 0x12c30000 (irq = 61, base_baud = 0) is a S3C6400/10
+ Unable to handle kernel paging request at virtual address fffffdfb
+
+ (clk_prepare) from [<c039f7d0>] (s3c24xx_serial_pm+0x20/0x128)
+ (s3c24xx_serial_pm) from [<c0395414>] (uart_change_pm+0x38/0x40)
+ (uart_change_pm) from [<c039689c>] (uart_add_one_port+0x31c/0x44c)
+ (uart_add_one_port) from [<c03a035c>] (s3c24xx_serial_probe+0x2a8/0x418)
+ (s3c24xx_serial_probe) from [<c03ee110>] (platform_drv_probe+0x50/0xb0)
+ (platform_drv_probe) from [<c03ecb44>] (driver_probe_device+0x1f4/0x2b0)
+ (driver_probe_device) from [<c03eb0c0>] (bus_for_each_drv+0x44/0x8c)
+ (bus_for_each_drv) from [<c03ec8c8>] (__device_attach+0x9c/0x100)
+ (__device_attach) from [<c03ebf54>] (bus_probe_device+0x84/0x8c)
+ (bus_probe_device) from [<c03ec388>] (deferred_probe_work_func+0x60/0x8c)
+ (deferred_probe_work_func) from [<c012fee4>] (process_one_work+0x120/0x328)
+ (process_one_work) from [<c0130150>] (worker_thread+0x2c/0x4ac)
+ (worker_thread) from [<c0135320>] (kthread+0xd8/0xf4)
+ (kthread) from [<c0107978>] (ret_from_fork+0x14/0x3c)
+
+The first unsuccessful clk_get() causes s3c24xx_serial_init_port() to
+exit with failure but the s3c24xx_uart_port is left half-configured
+(e.g. port->mapbase is set, clk contains ERR_PTR). On next re-probe,
+the function s3c24xx_serial_init_port() will exit early with success
+because of configured port->mapbase and driver will use old values,
+including the ERR_PTR as clock.
+
+Fix this by cleaning the port->mapbase on error path so each re-probe
+will initialize all of the port settings.
+
+Fixes: 60e93575476f ("serial: samsung: enable clock before clearing pending interrupts during init")
+Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Reviewed-by: Javier Martinez Canillas <javier@osg.samsung.com>
+Tested-by: Javier Martinez Canillas <javier@osg.samsung.com>
+Tested-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/samsung.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1684,7 +1684,7 @@ static int s3c24xx_serial_init_port(stru
+ return -ENODEV;
+
+ if (port->mapbase != 0)
+- return 0;
++ return -EINVAL;
+
+ /* setup info for port */
+ port->dev = &platdev->dev;
+@@ -1738,22 +1738,25 @@ static int s3c24xx_serial_init_port(stru
+ ourport->dma = devm_kzalloc(port->dev,
+ sizeof(*ourport->dma),
+ GFP_KERNEL);
+- if (!ourport->dma)
+- return -ENOMEM;
++ if (!ourport->dma) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ }
+
+ ourport->clk = clk_get(&platdev->dev, "uart");
+ if (IS_ERR(ourport->clk)) {
+ pr_err("%s: Controller clock not found\n",
+ dev_name(&platdev->dev));
+- return PTR_ERR(ourport->clk);
++ ret = PTR_ERR(ourport->clk);
++ goto err;
+ }
+
+ ret = clk_prepare_enable(ourport->clk);
+ if (ret) {
+ pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+ clk_put(ourport->clk);
+- return ret;
++ goto err;
+ }
+
+ /* Keep all interrupts masked and cleared */
+@@ -1769,7 +1772,12 @@ static int s3c24xx_serial_init_port(stru
+
+ /* reset the fifos (and setup the uart) */
+ s3c24xx_serial_resetport(port, cfg);
++
+ return 0;
++
++err:
++ port->mapbase = 0;
++ return ret;
+ }
+
+ /* Device driver serial port probe */
arm64-fix-incorrect-per-cpu-usage-for-boot-cpu.patch
arm64-only-select-arm64_module_plts-if-modules-y.patch
arm64-honor-nosmp-kernel-command-line-option.patch
+tty-serial-msm-don-t-read-off-end-of-tx-fifo.patch
+serial-samsung-fix-err-pointer-dereference-on-deferred-probe.patch
+tty-serial-atmel-fix-rs485-half-duplex-with-dma.patch
+gpio-pca953x-fix-nbank-calculation-for-pca9536.patch
+gpio-intel-mid-remove-potentially-harmful-code.patch
+gpio-intel-mid-switch-to-devm_gpiochip_add_data.patch
+pinctrl-cherryview-prevent-concurrent-access-to-gpio-controllers.patch
+arm64-dts-rockchip-fixes-the-gic400-2nd-region-size-for-rk3368.patch
+arm64-mm-avoid-fdt_check_header-before-the-fdt-is-fully-mapped.patch
+arm64-vmlinux.lds-make-__rela_offset-and-__dynsym_offset-absolute.patch
+arm64-kvm-set-cpsr-before-spsr-on-fault-injection.patch
+arm64-hibernate-avoid-potential-tlb-conflict.patch
+arm64-hibernate-handle-allocation-failures.patch
+kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate-procedures.patch
+kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch
+kvm-mtrr-fix-kvm_mtrr_check_gfn_range_consistency-page-fault.patch
+kvm-vmx-handle-pml-full-vmexit-that-occurs-during-event-delivery.patch
+kvm-nvmx-fix-memory-corruption-when-using-vmcs-shadowing.patch
+kvm-x86-avoid-simultaneous-queueing-of-both-irq-and-smi.patch
+s390-cio-allow-to-reset-channel-measurement-block.patch
+s390-mm-fix-gmap-tlb-flush-issues.patch
+s390-mm-don-t-drop-errors-in-get_guest_storage_key.patch
+intel_pstate-fix-msr_config_tdp_x-addressing-in-core_get_max_pstate.patch
+mfd-qcom_rpm-fix-offset-error-for-msm8660.patch
+mfd-qcom_rpm-parametrize-also-ack-selector-size.patch
+perf-x86-intel-uncore-fix-uncore-num_counters.patch
+objtool-add-fixdep-to-objtool-.gitignore.patch
--- /dev/null
+From 0058f0871efe7b01c6f2b3046c68196ab73e96da Mon Sep 17 00:00:00 2001
+From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Date: Sat, 28 May 2016 00:54:08 +0200
+Subject: tty/serial: atmel: fix RS485 half duplex with DMA
+
+From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+
+commit 0058f0871efe7b01c6f2b3046c68196ab73e96da upstream.
+
+When using DMA, half duplex doesn't work properly because rx is not stopped
+before starting tx. Ensure we call atmel_stop_rx() in the DMA case.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/atmel_serial.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -482,19 +482,21 @@ static void atmel_start_tx(struct uart_p
+ {
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+- if (atmel_use_pdc_tx(port)) {
+- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
+- /* The transmitter is already running. Yes, we
+- really need this.*/
+- return;
++ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
++ & ATMEL_PDC_TXTEN))
++ /* The transmitter is already running. Yes, we
++ really need this.*/
++ return;
+
++ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ atmel_stop_rx(port);
+
++ if (atmel_use_pdc_tx(port))
+ /* re-enable PDC transmit */
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+- }
++
+ /* Enable interrupts */
+ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+ }
--- /dev/null
+From 30acf549ca1e81859a67590ab9ecfce3d1050a0b Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Thu, 2 Jun 2016 17:48:28 -0700
+Subject: tty: serial: msm: Don't read off end of tx fifo
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 30acf549ca1e81859a67590ab9ecfce3d1050a0b upstream.
+
+For dm uarts in pio mode tx data is transferred to the fifo register 4
+bytes at a time, but care is not taken when these 4 bytes spans the end
+of the xmit buffer so the loop might read up to 3 bytes past the buffer
+and then skip the actual data at the beginning of the buffer.
+
+Fix this by, analogous to the DMA case, make sure the chunk doesn't
+wrap the xmit buffer.
+
+Fixes: 3a878c430fd6 ("tty: serial: msm: Add TX DMA support")
+Cc: Ivan Ivanov <iivanov.xz@gmail.com>
+Reported-by: Frank Rowand <frowand.list@gmail.com>
+Reported-by: Nicolas Dechesne <nicolas.dechesne@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Acked-by: Andy Gross <andy.gross@linaro.org>
+Tested-by: Frank Rowand <frank.rowand@am.sony.com>
+Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/msm_serial.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_po
+ return;
+ }
+
+- pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
++ pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ dma_min = 1; /* Always DMA */