--- /dev/null
+From ad1cfdf518976447e6b0d31517bad4e3ebbce6bb Mon Sep 17 00:00:00 2001
+From: Caesar Wang <wxt@rock-chips.com>
+Date: Wed, 18 May 2016 22:41:50 +0800
+Subject: arm64: dts: rockchip: fixes the gic400 2nd region size for rk3368
+
+From: Caesar Wang <wxt@rock-chips.com>
+
+commit ad1cfdf518976447e6b0d31517bad4e3ebbce6bb upstream.
+
+The 2nd additional region is the GIC virtual cpu interface register
+base and size.
+
+As the gic400 of rk3368 says, the cpu interface register map as below
+
+:
+
+-0x0000 GICC_CTRL
+.
+.
+.
+-0x00fc GICC_IIDR
+-0x1000 GICC_IDR
+
+Obviously, the region size should be greater than 0x1000.
+So we should make sure to include the GICC_IDR since the kernel will access
+it in some cases.
+
+Fixes: b790c2cab5ca ("arm64: dts: add Rockchip rk3368 core dtsi and board dts for the r88 board")
+Signed-off-by: Caesar Wang <wxt@rock-chips.com>
+Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+[added Fixes and stable-cc]
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+
+---
+ arch/arm64/boot/dts/rockchip/rk3368.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -517,7 +517,7 @@
+ #address-cells = <0>;
+
+ reg = <0x0 0xffb71000 0x0 0x1000>,
+- <0x0 0xffb72000 0x0 0x1000>,
++ <0x0 0xffb72000 0x0 0x2000>,
+ <0x0 0xffb74000 0x0 0x2000>,
+ <0x0 0xffb76000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9
--- /dev/null
+From 04a848106193b134741672f7e4e444b50c70b631 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 1 Aug 2016 13:29:31 +0200
+Subject: arm64: mm: avoid fdt_check_header() before the FDT is fully mapped
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 04a848106193b134741672f7e4e444b50c70b631 upstream.
+
+As reported by Zijun, the fdt_check_header() call in __fixmap_remap_fdt()
+is not safe since it is not guaranteed that the FDT header is mapped
+completely. Due to the minimum alignment of 8 bytes, the only fields we
+can assume to be mapped are 'magic' and 'totalsize'.
+
+Since the OF layer is in charge of validating the FDT image, and we are
+only interested in making reasonably sure that the size field contains
+a meaningful value, replace the fdt_check_header() call with an explicit
+comparison of the magic field's value against the expected value.
+
+Reported-by: Zijun Hu <zijun_hu@htc.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -652,9 +652,9 @@ void *__init fixmap_remap_fdt(phys_addr_
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+- * at least 8 bytes so that we can always access the size field of the
+- * FDT header after mapping the first chunk, double check here if that
+- * is indeed the case.
++ * at least 8 bytes so that we can always access the magic and size
++ * fields of the FDT header after mapping the first chunk, double check
++ * here if that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+@@ -682,7 +682,7 @@ void *__init fixmap_remap_fdt(phys_addr_
+ create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+ SWAPPER_BLOCK_SIZE, prot);
+
+- if (fdt_check_header(dt_virt) != 0)
++ if (fdt_magic(dt_virt) != FDT_MAGIC)
+ return NULL;
+
+ size = fdt_totalsize(dt_virt);
--- /dev/null
+From 32b9ccbc3522811c0e483637b85ae25f5491296f Mon Sep 17 00:00:00 2001
+From: Loic Poulain <loic.poulain@intel.com>
+Date: Thu, 28 Apr 2016 18:48:25 +0200
+Subject: Bluetooth: hci_intel: Fix null gpio desc pointer dereference
+
+From: Loic Poulain <loic.poulain@intel.com>
+
+commit 32b9ccbc3522811c0e483637b85ae25f5491296f upstream.
+
+gpiod_get_optional can return either ERR_PTR or NULL pointer.
+NULL case is not tested and then dereferenced later in desc_to_gpio.
+Fix this by using non optional version which returns ERR_PTR in any
+error case (this is not an optional gpio).
+Use the same non optional version for the host-wake gpio.
+
+Fixes: 765ea3abd116 ("Bluetooth: hci_intel: Retrieve host-wake IRQ")
+Signed-off-by: Loic Poulain <loic.poulain@intel.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/hci_intel.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/bluetooth/hci_intel.c
++++ b/drivers/bluetooth/hci_intel.c
+@@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_d
+
+ idev->pdev = pdev;
+
+- idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
+- GPIOD_OUT_LOW);
++ idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(idev->reset)) {
+ dev_err(&pdev->dev, "Unable to retrieve gpio\n");
+ return PTR_ERR(idev->reset);
+@@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_d
+
+ dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
+
+- host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
+- GPIOD_IN);
++ host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
+ if (IS_ERR(host_wake)) {
+ dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
+ goto no_irq;
--- /dev/null
+From 3dbd3212f81b2b410a34a922055e2da792864829 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 6 Jul 2016 12:50:12 +0300
+Subject: gpio: intel-mid: Remove potentially harmful code
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 3dbd3212f81b2b410a34a922055e2da792864829 upstream.
+
+The commit d56d6b3d7d69 ("gpio: langwell: add Intel Merrifield support")
+doesn't look at all as a proper support for Intel Merrifield and I dare to say
+that it distorts the behaviour of the hardware.
+
+The register map is different on Intel Merrifield, i.e. only 6 out of 8
+register have the same purpose but none of them has same location in the
+address space. The current case potentially harmful to existing hardware since
+it's poking registers on wrong offsets and may set some pin to be GPIO output
+when connected hardware doesn't expect such.
+
+Besides the above GPIO and pinctrl on Intel Merrifield have been located in
+different IP blocks. The functionality has been extended as well, i.e. added
+support of level interrupts, special registers for wake capable sources and
+thus, in my opinion, requires a completele separate driver.
+
+If someone wondering the existing gpio-intel-mid.c would be converted to actual
+pinctrl (which by the fact it is now), though I wouldn't be a volunteer to do
+that.
+
+Fixes: d56d6b3d7d69 ("gpio: langwell: add Intel Merrifield support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-intel-mid.c | 19 -------------------
+ 1 file changed, 19 deletions(-)
+
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -17,7 +17,6 @@
+ * Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
+ * Clovertrail platform Cloverview chip.
+- * Merrifield platform Tangier chip.
+ */
+
+ #include <linux/module.h>
+@@ -64,10 +63,6 @@ enum GPIO_REG {
+ /* intel_mid gpio driver data */
+ struct intel_mid_gpio_ddata {
+ u16 ngpio; /* number of gpio pins */
+- u32 gplr_offset; /* offset of first GPLR register from base */
+- u32 flis_base; /* base address of FLIS registers */
+- u32 flis_len; /* length of FLIS registers */
+- u32 (*get_flis_offset)(int gpio);
+ u32 chip_irq_type; /* chip interrupt type */
+ };
+
+@@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata
+ .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+ };
+
+-static const struct intel_mid_gpio_ddata gpio_tangier = {
+- .ngpio = 192,
+- .gplr_offset = 4,
+- .flis_base = 0xff0c0000,
+- .flis_len = 0x8000,
+- .get_flis_offset = NULL,
+- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+-};
+-
+ static const struct pci_device_id intel_gpio_ids[] = {
+ {
+ /* Lincroft */
+@@ -292,11 +278,6 @@ static const struct pci_device_id intel_
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
+ },
+- {
+- /* Tangier */
+- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+- .driver_data = (kernel_ulong_t)&gpio_tangier,
+- },
+ { 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
--- /dev/null
+From a246b8198f776a16d1d3a3bbfc2d437bad766b29 Mon Sep 17 00:00:00 2001
+From: Vignesh R <vigneshr@ti.com>
+Date: Thu, 9 Jun 2016 11:02:04 +0530
+Subject: gpio: pca953x: Fix NBANK calculation for PCA9536
+
+From: Vignesh R <vigneshr@ti.com>
+
+commit a246b8198f776a16d1d3a3bbfc2d437bad766b29 upstream.
+
+NBANK() macro assumes that ngpios is a multiple of 8(BANK_SZ) and
+hence results in 0 banks for PCA9536 which has just 4 gpios. This is
+wrong as PCA9356 has 1 bank with 4 gpios. This results in uninitialized
+PCA953X_INVERT register. Fix this by using DIV_ROUND_UP macro in
+NBANK().
+
+Signed-off-by: Vignesh R <vigneshr@ti.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-pca953x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_i
+ #define MAX_BANK 5
+ #define BANK_SZ 8
+
+-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+
+ struct pca953x_chip {
+ unsigned gpio_start;
--- /dev/null
+From 5fc8f707a2aa40c767c3a338738b9b6fcd151ac1 Mon Sep 17 00:00:00 2001
+From: Jan Kiszka <jan.kiszka@siemens.com>
+Date: Fri, 8 Jul 2016 20:42:04 +0200
+Subject: intel_pstate: Fix MSR_CONFIG_TDP_x addressing in core_get_max_pstate()
+
+From: Jan Kiszka <jan.kiszka@siemens.com>
+
+commit 5fc8f707a2aa40c767c3a338738b9b6fcd151ac1 upstream.
+
+If MSR_CONFIG_TDP_CONTROL is locked, we currently try to address some
+MSR 0x80000648 or so. Mask out the relevant level bits 0 and 1.
+
+Found while running over the Jailhouse hypervisor which became upset
+about this strange MSR index.
+
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -662,7 +662,7 @@ static int core_get_max_pstate(void)
+ if (err)
+ goto skip_tar;
+
+- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
++ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
+ err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+ if (err)
+ goto skip_tar;
--- /dev/null
+From 30b072ce0356e8b141f4ca6da7220486fa3641d9 Mon Sep 17 00:00:00 2001
+From: Alexis Dambricourt <alexis.dambricourt@gmail.com>
+Date: Mon, 4 Jul 2016 21:05:15 +0200
+Subject: KVM: MTRR: fix kvm_mtrr_check_gfn_range_consistency page fault
+
+From: Alexis Dambricourt <alexis.dambricourt@gmail.com>
+
+commit 30b072ce0356e8b141f4ca6da7220486fa3641d9 upstream.
+
+The following #PF may occurs:
+[ 1403.317041] BUG: unable to handle kernel paging request at 0000000200000068
+[ 1403.317045] IP: [<ffffffffc04c20b0>] __mtrr_lookup_var_next+0x10/0xa0 [kvm]
+
+[ 1403.317123] Call Trace:
+[ 1403.317134] [<ffffffffc04c2a65>] ? kvm_mtrr_check_gfn_range_consistency+0xc5/0x120 [kvm]
+[ 1403.317143] [<ffffffffc04ac11f>] ? tdp_page_fault+0x9f/0x2c0 [kvm]
+[ 1403.317152] [<ffffffffc0498128>] ? kvm_set_msr_common+0x858/0xc00 [kvm]
+[ 1403.317161] [<ffffffffc04b8883>] ? x86_emulate_insn+0x273/0xd30 [kvm]
+[ 1403.317171] [<ffffffffc04c04e4>] ? kvm_cpuid+0x34/0x190 [kvm]
+[ 1403.317180] [<ffffffffc04a5bb9>] ? kvm_mmu_page_fault+0x59/0xe0 [kvm]
+[ 1403.317183] [<ffffffffc0d729e1>] ? vmx_handle_exit+0x1d1/0x14a0 [kvm_intel]
+[ 1403.317185] [<ffffffffc0d75f3f>] ? atomic_switch_perf_msrs+0x6f/0xa0 [kvm_intel]
+[ 1403.317187] [<ffffffffc0d7621d>] ? vmx_vcpu_run+0x2ad/0x420 [kvm_intel]
+[ 1403.317196] [<ffffffffc04a0962>] ? kvm_arch_vcpu_ioctl_run+0x622/0x1550 [kvm]
+[ 1403.317204] [<ffffffffc049abb9>] ? kvm_arch_vcpu_load+0x59/0x210 [kvm]
+[ 1403.317206] [<ffffffff81036245>] ? __kernel_fpu_end+0x35/0x100
+[ 1403.317213] [<ffffffffc0487eb6>] ? kvm_vcpu_ioctl+0x316/0x5d0 [kvm]
+[ 1403.317215] [<ffffffff81088225>] ? do_sigtimedwait+0xd5/0x220
+[ 1403.317217] [<ffffffff811f84dd>] ? do_vfs_ioctl+0x9d/0x5c0
+[ 1403.317224] [<ffffffffc04928ae>] ? kvm_on_user_return+0x3e/0x70 [kvm]
+[ 1403.317225] [<ffffffff811f8a74>] ? SyS_ioctl+0x74/0x80
+[ 1403.317227] [<ffffffff815bf0b6>] ? entry_SYSCALL_64_fastpath+0x1e/0xa8
+[ 1403.317242] RIP [<ffffffffc04c20b0>] __mtrr_lookup_var_next+0x10/0xa0 [kvm]
+
+At mtrr_lookup_fixed_next(), when the condition
+'if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))' becomes true,
+mtrr_lookup_var_start() is called with iter->range with gargabe values from the
+fixed MTRR union field. Then, list_prepare_entry() do not call list_entry()
+initialization, keeping a garbage pointer in iter->range which is accessed in
+the following __mtrr_lookup_var_next() call.
+
+Fixes: f571c0973e4b8c888e049b6842e4b4f93b5c609c
+Signed-off-by: Alexis Dambricourt <alexis@blade-group.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mtrr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct
+
+ iter->fixed = false;
+ iter->start_max = iter->start;
++ iter->range = NULL;
+ iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
+
+ __mtrr_lookup_var_next(iter);
--- /dev/null
+From 2f1fe81123f59271bddda673b60116bde9660385 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Fri, 8 Jul 2016 15:36:06 -0700
+Subject: KVM: nVMX: Fix memory corruption when using VMCS shadowing
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 2f1fe81123f59271bddda673b60116bde9660385 upstream.
+
+When freeing the nested resources of a vcpu, there is an assumption that
+the vcpu's vmcs01 is the current VMCS on the CPU that executes
+nested_release_vmcs12(). If this assumption is violated, the vcpu's
+vmcs01 may be made active on multiple CPUs at the same time, in
+violation of Intel's specification. Moreover, since the vcpu's vmcs01 is
+not VMCLEARed on every CPU on which it is active, it can linger in a
+CPU's VMCS cache after it has been freed and potentially
+repurposed. Subsequent eviction from the CPU's VMCS cache on a capacity
+miss can result in memory corruption.
+
+It is not sufficient for vmx_free_vcpu() to call vmx_load_vmcs01(). If
+the vcpu in question was last loaded on a different CPU, it must be
+migrated to the current CPU before calling vmx_load_vmcs01().
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 19 +++++++++++++++++--
+ virt/kvm/kvm_main.c | 2 ++
+ 2 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8737,6 +8737,22 @@ static void vmx_load_vmcs01(struct kvm_v
+ put_cpu();
+ }
+
++/*
++ * Ensure that the current vmcs of the logical processor is the
++ * vmcs01 of the vcpu before calling free_nested().
++ */
++static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
++{
++ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ int r;
++
++ r = vcpu_load(vcpu);
++ BUG_ON(r);
++ vmx_load_vmcs01(vcpu);
++ free_nested(vmx);
++ vcpu_put(vcpu);
++}
++
+ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -8745,8 +8761,7 @@ static void vmx_free_vcpu(struct kvm_vcp
+ vmx_destroy_pml_buffer(vmx);
+ free_vpid(vmx->vpid);
+ leave_guest_mode(vcpu);
+- vmx_load_vmcs01(vcpu);
+- free_nested(vmx);
++ vmx_free_vcpu_nested(vcpu);
+ free_loaded_vmcs(vmx->loaded_vmcs);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -142,6 +142,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
+ put_cpu();
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(vcpu_load);
+
+ void vcpu_put(struct kvm_vcpu *vcpu)
+ {
+@@ -151,6 +152,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
+ preempt_enable();
+ mutex_unlock(&vcpu->mutex);
+ }
++EXPORT_SYMBOL_GPL(vcpu_put);
+
+ static void ack_flush(void *_completed)
+ {
--- /dev/null
+From f024ee098476a3e620232e4a78cfac505f121245 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Wed, 22 Jun 2016 14:21:59 +1000
+Subject: KVM: PPC: Book3S HV: Pull out TM state save/restore into separate procedures
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit f024ee098476a3e620232e4a78cfac505f121245 upstream.
+
+This moves the transactional memory state save and restore sequences
+out of the guest entry/exit paths into separate procedures. This is
+so that these sequences can be used in going into and out of nap
+in a subsequent patch.
+
+The only code changes here are (a) saving and restore LR on the
+stack, since these new procedures get called with a bl instruction,
+(b) explicitly saving r1 into the PACA instead of assuming that
+HSTATE_HOST_R1(r13) is already set, and (c) removing an unnecessary
+and redundant setting of MSR[TM] that should have been removed by
+commit 9d4d0bdd9e0a ("KVM: PPC: Book3S HV: Add transactional memory
+support", 2013-09-24) but wasn't.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 449 ++++++++++++++++----------------
+ 1 file changed, 237 insertions(+), 212 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+- b skip_tm
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-
+- /* Turn on TM/FP/VSX/VMX so we can restore them. */
+- mfmsr r5
+- li r6, MSR_TM >> 32
+- sldi r6, r6, 32
+- or r5, r5, r6
+- ori r5, r5, MSR_FP
+- oris r5, r5, (MSR_VEC | MSR_VSX)@h
+- mtmsrd r5
+-
+- /*
+- * The user may change these outside of a transaction, so they must
+- * always be context switched.
+- */
+- ld r5, VCPU_TFHAR(r4)
+- ld r6, VCPU_TFIAR(r4)
+- ld r7, VCPU_TEXASR(r4)
+- mtspr SPRN_TFHAR, r5
+- mtspr SPRN_TFIAR, r6
+- mtspr SPRN_TEXASR, r7
+-
+- ld r5, VCPU_MSR(r4)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+- beq skip_tm /* TM not active in guest */
+-
+- /* Make sure the failure summary is set, otherwise we'll program check
+- * when we trechkpt. It's possible that this might have been not set
+- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+- * host.
+- */
+- oris r7, r7, (TEXASR_FS)@h
+- mtspr SPRN_TEXASR, r7
+-
+- /*
+- * We need to load up the checkpointed state for the guest.
+- * We need to do this early as it will blow away any GPRs, VSRs and
+- * some SPRs.
+- */
+-
+- mr r31, r4
+- addi r3, r31, VCPU_FPRS_TM
+- bl load_fp_state
+- addi r3, r31, VCPU_VRS_TM
+- bl load_vr_state
+- mr r4, r31
+- lwz r7, VCPU_VRSAVE_TM(r4)
+- mtspr SPRN_VRSAVE, r7
+-
+- ld r5, VCPU_LR_TM(r4)
+- lwz r6, VCPU_CR_TM(r4)
+- ld r7, VCPU_CTR_TM(r4)
+- ld r8, VCPU_AMR_TM(r4)
+- ld r9, VCPU_TAR_TM(r4)
+- mtlr r5
+- mtcr r6
+- mtctr r7
+- mtspr SPRN_AMR, r8
+- mtspr SPRN_TAR, r9
+-
+- /*
+- * Load up PPR and DSCR values but don't put them in the actual SPRs
+- * till the last moment to avoid running with userspace PPR and DSCR for
+- * too long.
+- */
+- ld r29, VCPU_DSCR_TM(r4)
+- ld r30, VCPU_PPR_TM(r4)
+-
+- std r2, PACATMSCRATCH(r13) /* Save TOC */
+-
+- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+- li r5, 0
+- mtmsrd r5, 1
+-
+- /* Load GPRs r0-r28 */
+- reg = 0
+- .rept 29
+- ld reg, VCPU_GPRS_TM(reg)(r31)
+- reg = reg + 1
+- .endr
+-
+- mtspr SPRN_DSCR, r29
+- mtspr SPRN_PPR, r30
+-
+- /* Load final GPRs */
+- ld 29, VCPU_GPRS_TM(29)(r31)
+- ld 30, VCPU_GPRS_TM(30)(r31)
+- ld 31, VCPU_GPRS_TM(31)(r31)
+-
+- /* TM checkpointed state is now setup. All GPRs are now volatile. */
+- TRECHKPT
+-
+- /* Now let's get back the state we need. */
+- HMT_MEDIUM
+- GET_PACA(r13)
+- ld r29, HSTATE_DSCR(r13)
+- mtspr SPRN_DSCR, r29
+- ld r4, HSTATE_KVM_VCPU(r13)
+- ld r1, HSTATE_HOST_R1(r13)
+- ld r2, PACATMSCRATCH(r13)
+-
+- /* Set the MSR RI since we have our registers back. */
+- li r5, MSR_RI
+- mtmsrd r5, 1
+-skip_tm:
++ bl kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+ /* Load guest PMU registers */
+@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
+ /* Skip next section on POWER7 */
+ b 8f
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+- mfmsr r8
+- li r0, 1
+- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+- mtmsrd r8
+-
+ /* Load up POWER8-specific registers */
+ ld r5, VCPU_IAMR(r4)
+ lwz r6, VCPU_PSPB(r4)
+@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+- b 2f
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+- /* Turn on TM. */
+- mfmsr r8
+- li r0, 1
+- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+- mtmsrd r8
+-
+- ld r5, VCPU_MSR(r9)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+- beq 1f /* TM not active in guest. */
+-
+- li r3, TM_CAUSE_KVM_RESCHED
+-
+- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+- li r5, 0
+- mtmsrd r5, 1
+-
+- /* All GPRs are volatile at this point. */
+- TRECLAIM(R3)
+-
+- /* Temporarily store r13 and r9 so we have some regs to play with */
+- SET_SCRATCH0(r13)
+- GET_PACA(r13)
+- std r9, PACATMSCRATCH(r13)
+- ld r9, HSTATE_KVM_VCPU(r13)
+-
+- /* Get a few more GPRs free. */
+- std r29, VCPU_GPRS_TM(29)(r9)
+- std r30, VCPU_GPRS_TM(30)(r9)
+- std r31, VCPU_GPRS_TM(31)(r9)
+-
+- /* Save away PPR and DSCR soon so don't run with user values. */
+- mfspr r31, SPRN_PPR
+- HMT_MEDIUM
+- mfspr r30, SPRN_DSCR
+- ld r29, HSTATE_DSCR(r13)
+- mtspr SPRN_DSCR, r29
+-
+- /* Save all but r9, r13 & r29-r31 */
+- reg = 0
+- .rept 29
+- .if (reg != 9) && (reg != 13)
+- std reg, VCPU_GPRS_TM(reg)(r9)
+- .endif
+- reg = reg + 1
+- .endr
+- /* ... now save r13 */
+- GET_SCRATCH0(r4)
+- std r4, VCPU_GPRS_TM(13)(r9)
+- /* ... and save r9 */
+- ld r4, PACATMSCRATCH(r13)
+- std r4, VCPU_GPRS_TM(9)(r9)
+-
+- /* Reload stack pointer and TOC. */
+- ld r1, HSTATE_HOST_R1(r13)
+- ld r2, PACATOC(r13)
+-
+- /* Set MSR RI now we have r1 and r13 back. */
+- li r5, MSR_RI
+- mtmsrd r5, 1
+-
+- /* Save away checkpinted SPRs. */
+- std r31, VCPU_PPR_TM(r9)
+- std r30, VCPU_DSCR_TM(r9)
+- mflr r5
+- mfcr r6
+- mfctr r7
+- mfspr r8, SPRN_AMR
+- mfspr r10, SPRN_TAR
+- std r5, VCPU_LR_TM(r9)
+- stw r6, VCPU_CR_TM(r9)
+- std r7, VCPU_CTR_TM(r9)
+- std r8, VCPU_AMR_TM(r9)
+- std r10, VCPU_TAR_TM(r9)
+-
+- /* Restore r12 as trap number. */
+- lwz r12, VCPU_TRAP(r9)
+-
+- /* Save FP/VSX. */
+- addi r3, r9, VCPU_FPRS_TM
+- bl store_fp_state
+- addi r3, r9, VCPU_VRS_TM
+- bl store_vr_state
+- mfspr r6, SPRN_VRSAVE
+- stw r6, VCPU_VRSAVE_TM(r9)
+-1:
+- /*
+- * We need to save these SPRs after the treclaim so that the software
+- * error code is recorded correctly in the TEXASR. Also the user may
+- * change these outside of a transaction, so they must always be
+- * context switched.
+- */
+- mfspr r5, SPRN_TFHAR
+- mfspr r6, SPRN_TFIAR
+- mfspr r7, SPRN_TEXASR
+- std r5, VCPU_TFHAR(r9)
+- std r6, VCPU_TFIAR(r9)
+- std r7, VCPU_TEXASR(r9)
+-2:
++ bl kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+ /* Increment yield count if they have a VPA */
+@@ -2629,6 +2421,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ mr r4,r31
+ blr
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++/*
++ * Save transactional state and TM-related registers.
++ * Called with r9 pointing to the vcpu struct.
++ * This can modify all checkpointed registers, but
++ * restores r1, r2 and r9 (vcpu pointer) before exit.
++ */
++kvmppc_save_tm:
++ mflr r0
++ std r0, PPC_LR_STKOFF(r1)
++
++ /* Turn on TM. */
++ mfmsr r8
++ li r0, 1
++ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
++ mtmsrd r8
++
++ ld r5, VCPU_MSR(r9)
++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ beq 1f /* TM not active in guest. */
++
++ std r1, HSTATE_HOST_R1(r13)
++ li r3, TM_CAUSE_KVM_RESCHED
++
++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
++ li r5, 0
++ mtmsrd r5, 1
++
++ /* All GPRs are volatile at this point. */
++ TRECLAIM(R3)
++
++ /* Temporarily store r13 and r9 so we have some regs to play with */
++ SET_SCRATCH0(r13)
++ GET_PACA(r13)
++ std r9, PACATMSCRATCH(r13)
++ ld r9, HSTATE_KVM_VCPU(r13)
++
++ /* Get a few more GPRs free. */
++ std r29, VCPU_GPRS_TM(29)(r9)
++ std r30, VCPU_GPRS_TM(30)(r9)
++ std r31, VCPU_GPRS_TM(31)(r9)
++
++ /* Save away PPR and DSCR soon so don't run with user values. */
++ mfspr r31, SPRN_PPR
++ HMT_MEDIUM
++ mfspr r30, SPRN_DSCR
++ ld r29, HSTATE_DSCR(r13)
++ mtspr SPRN_DSCR, r29
++
++ /* Save all but r9, r13 & r29-r31 */
++ reg = 0
++ .rept 29
++ .if (reg != 9) && (reg != 13)
++ std reg, VCPU_GPRS_TM(reg)(r9)
++ .endif
++ reg = reg + 1
++ .endr
++ /* ... now save r13 */
++ GET_SCRATCH0(r4)
++ std r4, VCPU_GPRS_TM(13)(r9)
++ /* ... and save r9 */
++ ld r4, PACATMSCRATCH(r13)
++ std r4, VCPU_GPRS_TM(9)(r9)
++
++ /* Reload stack pointer and TOC. */
++ ld r1, HSTATE_HOST_R1(r13)
++ ld r2, PACATOC(r13)
++
++ /* Set MSR RI now we have r1 and r13 back. */
++ li r5, MSR_RI
++ mtmsrd r5, 1
++
++ /* Save away checkpinted SPRs. */
++ std r31, VCPU_PPR_TM(r9)
++ std r30, VCPU_DSCR_TM(r9)
++ mflr r5
++ mfcr r6
++ mfctr r7
++ mfspr r8, SPRN_AMR
++ mfspr r10, SPRN_TAR
++ std r5, VCPU_LR_TM(r9)
++ stw r6, VCPU_CR_TM(r9)
++ std r7, VCPU_CTR_TM(r9)
++ std r8, VCPU_AMR_TM(r9)
++ std r10, VCPU_TAR_TM(r9)
++
++ /* Restore r12 as trap number. */
++ lwz r12, VCPU_TRAP(r9)
++
++ /* Save FP/VSX. */
++ addi r3, r9, VCPU_FPRS_TM
++ bl store_fp_state
++ addi r3, r9, VCPU_VRS_TM
++ bl store_vr_state
++ mfspr r6, SPRN_VRSAVE
++ stw r6, VCPU_VRSAVE_TM(r9)
++1:
++ /*
++ * We need to save these SPRs after the treclaim so that the software
++ * error code is recorded correctly in the TEXASR. Also the user may
++ * change these outside of a transaction, so they must always be
++ * context switched.
++ */
++ mfspr r5, SPRN_TFHAR
++ mfspr r6, SPRN_TFIAR
++ mfspr r7, SPRN_TEXASR
++ std r5, VCPU_TFHAR(r9)
++ std r6, VCPU_TFIAR(r9)
++ std r7, VCPU_TEXASR(r9)
++
++ ld r0, PPC_LR_STKOFF(r1)
++ mtlr r0
++ blr
++
++/*
++ * Restore transactional state and TM-related registers.
++ * Called with r4 pointing to the vcpu struct.
++ * This potentially modifies all checkpointed registers.
++ * It restores r1, r2, r4 from the PACA.
++ */
++kvmppc_restore_tm:
++ mflr r0
++ std r0, PPC_LR_STKOFF(r1)
++
++ /* Turn on TM/FP/VSX/VMX so we can restore them. */
++ mfmsr r5
++ li r6, MSR_TM >> 32
++ sldi r6, r6, 32
++ or r5, r5, r6
++ ori r5, r5, MSR_FP
++ oris r5, r5, (MSR_VEC | MSR_VSX)@h
++ mtmsrd r5
++
++ /*
++ * The user may change these outside of a transaction, so they must
++ * always be context switched.
++ */
++ ld r5, VCPU_TFHAR(r4)
++ ld r6, VCPU_TFIAR(r4)
++ ld r7, VCPU_TEXASR(r4)
++ mtspr SPRN_TFHAR, r5
++ mtspr SPRN_TFIAR, r6
++ mtspr SPRN_TEXASR, r7
++
++ ld r5, VCPU_MSR(r4)
++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ beqlr /* TM not active in guest */
++ std r1, HSTATE_HOST_R1(r13)
++
++ /* Make sure the failure summary is set, otherwise we'll program check
++ * when we trechkpt. It's possible that this might have been not set
++ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
++ * host.
++ */
++ oris r7, r7, (TEXASR_FS)@h
++ mtspr SPRN_TEXASR, r7
++
++ /*
++ * We need to load up the checkpointed state for the guest.
++ * We need to do this early as it will blow away any GPRs, VSRs and
++ * some SPRs.
++ */
++
++ mr r31, r4
++ addi r3, r31, VCPU_FPRS_TM
++ bl load_fp_state
++ addi r3, r31, VCPU_VRS_TM
++ bl load_vr_state
++ mr r4, r31
++ lwz r7, VCPU_VRSAVE_TM(r4)
++ mtspr SPRN_VRSAVE, r7
++
++ ld r5, VCPU_LR_TM(r4)
++ lwz r6, VCPU_CR_TM(r4)
++ ld r7, VCPU_CTR_TM(r4)
++ ld r8, VCPU_AMR_TM(r4)
++ ld r9, VCPU_TAR_TM(r4)
++ mtlr r5
++ mtcr r6
++ mtctr r7
++ mtspr SPRN_AMR, r8
++ mtspr SPRN_TAR, r9
++
++ /*
++ * Load up PPR and DSCR values but don't put them in the actual SPRs
++ * till the last moment to avoid running with userspace PPR and DSCR for
++ * too long.
++ */
++ ld r29, VCPU_DSCR_TM(r4)
++ ld r30, VCPU_PPR_TM(r4)
++
++ std r2, PACATMSCRATCH(r13) /* Save TOC */
++
++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
++ li r5, 0
++ mtmsrd r5, 1
++
++ /* Load GPRs r0-r28 */
++ reg = 0
++ .rept 29
++ ld reg, VCPU_GPRS_TM(reg)(r31)
++ reg = reg + 1
++ .endr
++
++ mtspr SPRN_DSCR, r29
++ mtspr SPRN_PPR, r30
++
++ /* Load final GPRs */
++ ld 29, VCPU_GPRS_TM(29)(r31)
++ ld 30, VCPU_GPRS_TM(30)(r31)
++ ld 31, VCPU_GPRS_TM(31)(r31)
++
++ /* TM checkpointed state is now setup. All GPRs are now volatile. */
++ TRECHKPT
++
++ /* Now let's get back the state we need. */
++ HMT_MEDIUM
++ GET_PACA(r13)
++ ld r29, HSTATE_DSCR(r13)
++ mtspr SPRN_DSCR, r29
++ ld r4, HSTATE_KVM_VCPU(r13)
++ ld r1, HSTATE_HOST_R1(r13)
++ ld r2, PACATMSCRATCH(r13)
++
++ /* Set the MSR RI since we have our registers back. */
++ li r5, MSR_RI
++ mtmsrd r5, 1
++
++ ld r0, PPC_LR_STKOFF(r1)
++ mtlr r0
++ blr
++#endif
++
+ /*
+ * We come here if we get any exception or interrupt while we are
+ * executing host real mode code while in guest MMU context.
--- /dev/null
+From 93d17397e4e2182fdaad503e2f9da46202c0f1c3 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Wed, 22 Jun 2016 15:52:55 +1000
+Subject: KVM: PPC: Book3S HV: Save/restore TM state in H_CEDE
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 93d17397e4e2182fdaad503e2f9da46202c0f1c3 upstream.
+
+It turns out that if the guest does a H_CEDE while the CPU is in
+a transactional state, and the H_CEDE does a nap, and the nap
+loses the architected state of the CPU (which is is allowed to do),
+then we lose the checkpointed state of the virtual CPU. In addition,
+the transactional-memory state recorded in the MSR gets reset back
+to non-transactional, and when we try to return to the guest, we take
+a TM bad thing type of program interrupt because we are trying to
+transition from non-transactional to transactional with a hrfid
+instruction, which is not permitted.
+
+The result of the program interrupt occurring at that point is that
+the host CPU will hang in an infinite loop with interrupts disabled.
+Thus this is a denial of service vulnerability in the host which can
+be triggered by any guest (and depending on the guest kernel, it can
+potentially triggered by unprivileged userspace in the guest).
+
+This vulnerability has been assigned the ID CVE-2016-5412.
+
+To fix this, we save the TM state before napping and restore it
+on exit from the nap, when handling a H_CEDE in real mode. The
+case where H_CEDE exits to host virtual mode is already OK (as are
+other hcalls which exit to host virtual mode) because the exit
+path saves the TM state.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2037,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu poi
+ /* save FP state */
+ bl kvmppc_save_fp
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++ ld r9, HSTATE_KVM_VCPU(r13)
++ bl kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ /*
+ * Set DEC to the smaller of DEC and HDEC, so that we wake
+ * no later than the end of our timeslice (HDEC interrupts
+@@ -2113,6 +2120,12 @@ kvm_end_cede:
+ bl kvmhv_accumulate_time
+ #endif
+
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++ bl kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ /* load up FP state */
+ bl kvmppc_load_fp
+
--- /dev/null
+From b244c9fc251e14a083a1cbf04bef10bd99303a76 Mon Sep 17 00:00:00 2001
+From: "Cao, Lei" <Lei.Cao@stratus.com>
+Date: Fri, 15 Jul 2016 13:54:04 +0000
+Subject: KVM: VMX: handle PML full VMEXIT that occurs during event delivery
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cao, Lei <Lei.Cao@stratus.com>
+
+commit b244c9fc251e14a083a1cbf04bef10bd99303a76 upstream.
+
+With PML enabled, guest will shut down if a PML full VMEXIT occurs during
+event delivery. According to Intel SDM 27.2.3, PML full VMEXIT can occur when
+event is being delivered through IDT, so KVM should not exit to user space
+with error. Instead, it should let EXIT_REASON_PML_FULL go through and the
+event will be re-injected on the next VMENTRY.
+
+Signed-off-by: Lei Cao <lei.cao@stratus.com>
+Fixes: 843e4330573c ("KVM: VMX: Add PML support in VMX")
+[Shortened the summary and Cc'd stable.]
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8124,6 +8124,7 @@ static int vmx_handle_exit(struct kvm_vc
+ if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+ exit_reason != EXIT_REASON_EPT_VIOLATION &&
++ exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
--- /dev/null
+From 9835f1b70bb3890d38308b9be4fb9d7451ba67f1 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 15 Jun 2016 01:02:26 +0200
+Subject: mfd: qcom_rpm: Fix offset error for msm8660
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 9835f1b70bb3890d38308b9be4fb9d7451ba67f1 upstream.
+
+The RPM in MSM8660/APQ8060 has different offsets to the selector
+ACK and request context ACK registers. Make all these register
+offsets part of the per-SoC data and assign the right values.
+
+The bug was found by verifying backwards to the vendor tree in
+the out-of-tree files <mach/rpm-[8660|8064|8960]>: all were using
+offsets 3,11,15,23 and a select size of 4, except the MSM8660/APQ8060
+which was using offsets 3,11,19,27 and a select size of 7.
+
+All other platforms apart from msm8660 were affected by reading
+excess registers, since 7 was hardcoded as the number of select
+words, this patch makes also this part dynamic so we only write/read
+as many select words as the platform actually use.
+
+Symptoms of this bug when using msm8660: the first RPM transaction
+would work, but the next would stall or raise an error since the
+previous transaction was not properly ACKed as the ACK words were
+read at the wrong offset.
+
+Fixes: 58e214382bdd ("mfd: qcom-rpm: Driver for the Qualcomm RPM")
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Björn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/qcom_rpm.c | 50 +++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 36 insertions(+), 14 deletions(-)
+
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -34,7 +34,12 @@ struct qcom_rpm_resource {
+ struct qcom_rpm_data {
+ u32 version;
+ const struct qcom_rpm_resource *resource_table;
+- unsigned n_resources;
++ unsigned int n_resources;
++ unsigned int req_ctx_off;
++ unsigned int req_sel_off;
++ unsigned int ack_ctx_off;
++ unsigned int ack_sel_off;
++ unsigned int sel_size;
+ };
+
+ struct qcom_rpm {
+@@ -61,11 +66,7 @@ struct qcom_rpm {
+
+ #define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+-#define RPM_REQUEST_CONTEXT 3
+-#define RPM_REQ_SELECT 11
+-#define RPM_ACK_CONTEXT 15
+-#define RPM_ACK_SELECTOR 23
+-#define RPM_SELECT_SIZE 7
++#define RPM_MAX_SEL_SIZE 7
+
+ #define RPM_NOTIFICATION BIT(30)
+ #define RPM_REJECTED BIT(31)
+@@ -157,6 +158,11 @@ static const struct qcom_rpm_data apq806
+ .version = 3,
+ .resource_table = apq8064_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -240,6 +246,11 @@ static const struct qcom_rpm_data msm866
+ .version = 2,
+ .resource_table = msm8660_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 19,
++ .ack_sel_off = 27,
++ .sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -322,6 +333,11 @@ static const struct qcom_rpm_data msm896
+ .version = 3,
+ .resource_table = msm8960_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -362,6 +378,11 @@ static const struct qcom_rpm_data ipq806
+ .version = 3,
+ .resource_table = ipq806x_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
++ .req_ctx_off = 3,
++ .req_sel_off = 11,
++ .ack_ctx_off = 15,
++ .ack_sel_off = 23,
++ .sel_size = 4,
+ };
+
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -380,7 +401,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ {
+ const struct qcom_rpm_resource *res;
+ const struct qcom_rpm_data *data = rpm->data;
+- u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
++ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
+ int left;
+ int ret = 0;
+ int i;
+@@ -398,12 +419,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+
+ bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
++ for (i = 0; i < rpm->data->sel_size; i++) {
+ writel_relaxed(sel_mask[i],
+- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
++ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ }
+
+- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
++ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
+
+ reinit_completion(&rpm->ack);
+ regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+@@ -426,10 +447,11 @@ static irqreturn_t qcom_rpm_ack_interrup
+ u32 ack;
+ int i;
+
+- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+- for (i = 0; i < RPM_SELECT_SIZE; i++)
+- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
+- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
++ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
++ for (i = 0; i < rpm->data->sel_size; i++)
++ writel_relaxed(0,
++ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
++ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+
+ if (ack & RPM_NOTIFICATION) {
+ dev_warn(rpm->dev, "ignoring notification!\n");
--- /dev/null
+From f37be01e6dc606f2fcc5e95c9933d948ce19bd35 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 22 Jun 2016 08:27:17 +0200
+Subject: mfd: qcom_rpm: Parametrize also ack selector size
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit f37be01e6dc606f2fcc5e95c9933d948ce19bd35 upstream.
+
+The RPM has two sets of selectors (IPC bit fields): request and
+acknowledge. Apparently, some models use 4*32 bit words for select
+and some use 7*32 bit words for request, but all use 7*32 words
+for acknowledge bits.
+
+So apparently you can on the models with requests of 4*32 select
+bits send 4*32 messages and get 7*32 different replies, so on ACK
+interrupt, 7*32 bit words need to be read. This is how the vendor
+code apparently works.
+
+Reported-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/qcom_rpm.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -39,7 +39,8 @@ struct qcom_rpm_data {
+ unsigned int req_sel_off;
+ unsigned int ack_ctx_off;
+ unsigned int ack_sel_off;
+- unsigned int sel_size;
++ unsigned int req_sel_size;
++ unsigned int ack_sel_size;
+ };
+
+ struct qcom_rpm {
+@@ -162,7 +163,8 @@ static const struct qcom_rpm_data apq806
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -250,7 +252,8 @@ static const struct qcom_rpm_data msm866
+ .req_sel_off = 11,
+ .ack_ctx_off = 19,
+ .ack_sel_off = 27,
+- .sel_size = 7,
++ .req_sel_size = 7,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -337,7 +340,8 @@ static const struct qcom_rpm_data msm896
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -382,7 +386,8 @@ static const struct qcom_rpm_data ipq806
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+- .sel_size = 4,
++ .req_sel_size = 4,
++ .ack_sel_size = 7,
+ };
+
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -419,7 +424,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+
+ bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+- for (i = 0; i < rpm->data->sel_size; i++) {
++ for (i = 0; i < rpm->data->req_sel_size; i++) {
+ writel_relaxed(sel_mask[i],
+ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ }
+@@ -448,7 +453,7 @@ static irqreturn_t qcom_rpm_ack_interrup
+ int i;
+
+ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+- for (i = 0; i < rpm->data->sel_size; i++)
++ for (i = 0; i < rpm->data->ack_sel_size; i++)
+ writel_relaxed(0,
+ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
+ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
--- /dev/null
+From 0bd50d719b004110e791800450ad204399100a86 Mon Sep 17 00:00:00 2001
+From: Dan O'Donovan <dan@emutex.com>
+Date: Fri, 10 Jun 2016 13:23:34 +0100
+Subject: pinctrl: cherryview: prevent concurrent access to GPIO controllers
+
+From: Dan O'Donovan <dan@emutex.com>
+
+commit 0bd50d719b004110e791800450ad204399100a86 upstream.
+
+Due to a silicon issue on the Atom X5-Z8000 "Cherry Trail" processor
+series, a common lock must be used to prevent concurrent accesses
+across the 4 GPIO controllers managed by this driver.
+
+See Intel Atom Z8000 Processor Series Specification Update
+(Rev. 005), errata #CHT34, for further information.
+
+Signed-off-by: Dan O'Donovan <dan@emutex.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-cherryview.c | 80 +++++++++++++++--------------
+ 1 file changed, 44 insertions(+), 36 deletions(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -160,7 +160,6 @@ struct chv_pin_context {
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @regs: MMIO registers
+- * @lock: Lock to serialize register accesses
+ * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
+ * offset (in GPIO number space)
+ * @community: Community this pinctrl instance represents
+@@ -174,7 +173,6 @@ struct chv_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ void __iomem *regs;
+- raw_spinlock_t lock;
+ unsigned intr_lines[16];
+ const struct chv_community *community;
+ u32 saved_intmask;
+@@ -659,6 +657,17 @@ static const struct chv_community *chv_c
+ &southeast_community,
+ };
+
++/*
++ * Lock to serialize register accesses
++ *
++ * Due to a silicon issue, a shared lock must be used to prevent
++ * concurrent accesses across the 4 GPIO controllers.
++ *
++ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
++ * errata #CHT34, for further information.
++ */
++static DEFINE_RAW_SPINLOCK(chv_lock);
++
+ static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
+ unsigned reg)
+ {
+@@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinc
+ u32 ctrl0, ctrl1;
+ bool locked;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+ locked = chv_pad_locked(pctrl, offset);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
+ seq_puts(s, "GPIO ");
+@@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pin
+
+ grp = &pctrl->community->groups[group];
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ /* Check first that the pad is not locked */
+ for (i = 0; i < grp->npins; i++) {
+ if (chv_pad_locked(pctrl, grp->pins[i])) {
+ dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
+ grp->pins[i]);
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EBUSY;
+ }
+ }
+@@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pin
+ pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ }
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struc
+ void __iomem *reg;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ if (chv_pad_locked(pctrl, offset)) {
+ value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ if (!(value & CHV_PADCTRL0_GPIOEN)) {
+ /* Locked so cannot enable */
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EBUSY;
+ }
+ } else {
+@@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struc
+ chv_writel(value, reg);
+ }
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct
+ void __iomem *reg;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
+ chv_writel(value, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+@@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct
+ unsigned long flags;
+ u32 ctrl0;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+ if (input)
+@@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct
+ ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ chv_writel(ctrl0, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl
+ u16 arg = 0;
+ u32 term;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
+
+@@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct ch
+ unsigned long flags;
+ u32 ctrl0, pull;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(reg);
+
+ switch (param) {
+@@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct ch
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+@@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct ch
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+@@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct ch
+ break;
+
+ default:
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ return -EINVAL;
+ }
+
+ chv_writel(ctrl0, reg);
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -1162,9 +1171,9 @@ static int chv_gpio_get(struct gpio_chip
+ unsigned long flags;
+ u32 ctrl0, cfg;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1182,7 +1191,7 @@ static void chv_gpio_set(struct gpio_chi
+ void __iomem *reg;
+ u32 ctrl0;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ ctrl0 = readl(reg);
+@@ -1194,7 +1203,7 @@ static void chv_gpio_set(struct gpio_chi
+
+ chv_writel(ctrl0, reg);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+@@ -1204,9 +1213,9 @@ static int chv_gpio_get_direction(struct
+ u32 ctrl0, direction;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1244,14 +1253,14 @@ static void chv_gpio_irq_ack(struct irq_
+ int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ u32 intr_line;
+
+- raw_spin_lock(&pctrl->lock);
++ raw_spin_lock(&chv_lock);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+ chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+
+- raw_spin_unlock(&pctrl->lock);
++ raw_spin_unlock(&chv_lock);
+ }
+
+ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+@@ -1262,7 +1271,7 @@ static void chv_gpio_irq_mask_unmask(str
+ u32 value, intr_line;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+@@ -1275,7 +1284,7 @@ static void chv_gpio_irq_mask_unmask(str
+ value |= BIT(intr_line);
+ chv_writel(value, pctrl->regs + CHV_INTMASK);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ static void chv_gpio_irq_mask(struct irq_data *d)
+@@ -1309,7 +1318,7 @@ static unsigned chv_gpio_irq_startup(str
+ unsigned long flags;
+ u32 intsel, value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+@@ -1324,7 +1333,7 @@ static unsigned chv_gpio_irq_startup(str
+ irq_set_handler_locked(d, handler);
+ pctrl->intr_lines[intsel] = offset;
+ }
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+
+ chv_gpio_irq_unmask(d);
+@@ -1340,7 +1349,7 @@ static int chv_gpio_irq_type(struct irq_
+ unsigned long flags;
+ u32 value;
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
++ raw_spin_lock_irqsave(&chv_lock, flags);
+
+ /*
+ * Pins which can be used as shared interrupt are configured in
+@@ -1389,7 +1398,7 @@ static int chv_gpio_irq_type(struct irq_
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+ }
+@@ -1501,7 +1510,6 @@ static int chv_pinctrl_probe(struct plat
+ if (i == ARRAY_SIZE(chv_communities))
+ return -ENODEV;
+
+- raw_spin_lock_init(&pctrl->lock);
+ pctrl->dev = &pdev->dev;
+
+ #ifdef CONFIG_PM_SLEEP
--- /dev/null
+From 0f5d050ceaa31b2229102211d60c149f920df3aa Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Date: Tue, 12 Jul 2016 19:57:57 +0200
+Subject: s390/cio: allow to reset channel measurement block
+
+From: Sebastian Ott <sebott@linux.vnet.ibm.com>
+
+commit 0f5d050ceaa31b2229102211d60c149f920df3aa upstream.
+
+Prior to commit 1bc6664bdfb949bc69a08113801e7d6acbf6bc3f a call to
+enable_cmf for a device for which channel measurement was already
+enabled resulted in a reset of the measurement data.
+
+What looked like bugs at the time (a 2nd allocation was triggered
+but failed, reset was called regardless of previous failures, and
+errors have not been reported to userspace) was actually something
+at least one userspace tool depended on. Restore that behavior in
+a sane way.
+
+Fixes: 1bc6664bdfb ("s390/cio: use device_lock during cmb activation")
+Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/cmf.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+--- a/drivers/s390/cio/cmf.c
++++ b/drivers/s390/cio/cmf.c
+@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device
+ cmf_generic_reset(cdev);
+ }
+
++static int cmf_enabled(struct ccw_device *cdev)
++{
++ int enabled;
++
++ spin_lock_irq(cdev->ccwlock);
++ enabled = !!cdev->private->cmb;
++ spin_unlock_irq(cdev->ccwlock);
++
++ return enabled;
++}
++
+ static struct attribute_group cmf_attr_group;
+
+ static struct cmb_operations cmbops_basic = {
+@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct de
+ char *buf)
+ {
+ struct ccw_device *cdev = to_ccwdev(dev);
+- int enabled;
+
+- spin_lock_irq(cdev->ccwlock);
+- enabled = !!cdev->private->cmb;
+- spin_unlock_irq(cdev->ccwlock);
+-
+- return sprintf(buf, "%d\n", enabled);
++ return sprintf(buf, "%d\n", cmf_enabled(cdev));
+ }
+
+ static ssize_t cmb_enable_store(struct device *dev,
+@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev,
+ * @cdev: The ccw device to be enabled
+ *
+ * Returns %0 for success or a negative error value.
+- *
++ * Note: If this is called on a device for which channel measurement is already
++ * enabled a reset of the measurement data is triggered.
+ * Context:
+ * non-atomic
+ */
+ int enable_cmf(struct ccw_device *cdev)
+ {
+- int ret;
++ int ret = 0;
+
+ device_lock(&cdev->dev);
++ if (cmf_enabled(cdev)) {
++ cmbops->reset(cdev);
++ goto out_unlock;
++ }
+ get_device(&cdev->dev);
+ ret = cmbops->alloc(cdev);
+ if (ret)
+@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
+ out:
+ if (ret)
+ put_device(&cdev->dev);
+-
++out_unlock:
+ device_unlock(&cdev->dev);
+ return ret;
+ }
--- /dev/null
+From e51e4d8a185de90424b03f30181b35f29c46a25a Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Date: Thu, 16 Jun 2016 08:27:35 +0200
+Subject: serial: samsung: Fix ERR pointer dereference on deferred probe
+
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+
+commit e51e4d8a185de90424b03f30181b35f29c46a25a upstream.
+
+When the clk_get() of "uart" clock returns EPROBE_DEFER, the next re-probe
+finishes with success but uses invalid (ERR_PTR) values. This leads to
+dereferencing of ERR_PTR stored under ourport->clk:
+
+ 12c30000.serial: Controller clock not found
+ (...)
+ 12c30000.serial: ttySAC3 at MMIO 0x12c30000 (irq = 61, base_baud = 0) is a S3C6400/10
+ Unable to handle kernel paging request at virtual address fffffdfb
+
+ (clk_prepare) from [<c039f7d0>] (s3c24xx_serial_pm+0x20/0x128)
+ (s3c24xx_serial_pm) from [<c0395414>] (uart_change_pm+0x38/0x40)
+ (uart_change_pm) from [<c039689c>] (uart_add_one_port+0x31c/0x44c)
+ (uart_add_one_port) from [<c03a035c>] (s3c24xx_serial_probe+0x2a8/0x418)
+ (s3c24xx_serial_probe) from [<c03ee110>] (platform_drv_probe+0x50/0xb0)
+ (platform_drv_probe) from [<c03ecb44>] (driver_probe_device+0x1f4/0x2b0)
+ (driver_probe_device) from [<c03eb0c0>] (bus_for_each_drv+0x44/0x8c)
+ (bus_for_each_drv) from [<c03ec8c8>] (__device_attach+0x9c/0x100)
+ (__device_attach) from [<c03ebf54>] (bus_probe_device+0x84/0x8c)
+ (bus_probe_device) from [<c03ec388>] (deferred_probe_work_func+0x60/0x8c)
+ (deferred_probe_work_func) from [<c012fee4>] (process_one_work+0x120/0x328)
+ (process_one_work) from [<c0130150>] (worker_thread+0x2c/0x4ac)
+ (worker_thread) from [<c0135320>] (kthread+0xd8/0xf4)
+ (kthread) from [<c0107978>] (ret_from_fork+0x14/0x3c)
+
+The first unsuccessful clk_get() causes s3c24xx_serial_init_port() to
+exit with failure but the s3c24xx_uart_port is left half-configured
+(e.g. port->mapbase is set, clk contains ERR_PTR). On next re-probe,
+the function s3c24xx_serial_init_port() will exit early with success
+because of configured port->mapbase and driver will use old values,
+including the ERR_PTR as clock.
+
+Fix this by cleaning the port->mapbase on error path so each re-probe
+will initialize all of the port settings.
+
+Fixes: 60e93575476f ("serial: samsung: enable clock before clearing pending interrupts during init")
+Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Reviewed-by: Javier Martinez Canillas <javier@osg.samsung.com>
+Tested-by: Javier Martinez Canillas <javier@osg.samsung.com>
+Tested-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/samsung.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1676,7 +1676,7 @@ static int s3c24xx_serial_init_port(stru
+ return -ENODEV;
+
+ if (port->mapbase != 0)
+- return 0;
++ return -EINVAL;
+
+ /* setup info for port */
+ port->dev = &platdev->dev;
+@@ -1730,22 +1730,25 @@ static int s3c24xx_serial_init_port(stru
+ ourport->dma = devm_kzalloc(port->dev,
+ sizeof(*ourport->dma),
+ GFP_KERNEL);
+- if (!ourport->dma)
+- return -ENOMEM;
++ if (!ourport->dma) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ }
+
+ ourport->clk = clk_get(&platdev->dev, "uart");
+ if (IS_ERR(ourport->clk)) {
+ pr_err("%s: Controller clock not found\n",
+ dev_name(&platdev->dev));
+- return PTR_ERR(ourport->clk);
++ ret = PTR_ERR(ourport->clk);
++ goto err;
+ }
+
+ ret = clk_prepare_enable(ourport->clk);
+ if (ret) {
+ pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+ clk_put(ourport->clk);
+- return ret;
++ goto err;
+ }
+
+ /* Keep all interrupts masked and cleared */
+@@ -1761,7 +1764,12 @@ static int s3c24xx_serial_init_port(stru
+
+ /* reset the fifos (and setup the uart) */
+ s3c24xx_serial_resetport(port, cfg);
++
+ return 0;
++
++err:
++ port->mapbase = 0;
++ return ret;
+ }
+
+ /* Device driver serial port probe */
arm64-kernel-save-and-restore-uao-and-addr_limit-on-exception-entry.patch
arm64-debug-unmask-pstate.d-earlier.patch
arm64-fix-incorrect-per-cpu-usage-for-boot-cpu.patch
+tty-serial-msm-don-t-read-off-end-of-tx-fifo.patch
+serial-samsung-fix-err-pointer-dereference-on-deferred-probe.patch
+tty-serial-atmel-fix-rs485-half-duplex-with-dma.patch
+gpio-pca953x-fix-nbank-calculation-for-pca9536.patch
+gpio-intel-mid-remove-potentially-harmful-code.patch
+bluetooth-hci_intel-fix-null-gpio-desc-pointer-dereference.patch
+pinctrl-cherryview-prevent-concurrent-access-to-gpio-controllers.patch
+arm64-dts-rockchip-fixes-the-gic400-2nd-region-size-for-rk3368.patch
+arm64-mm-avoid-fdt_check_header-before-the-fdt-is-fully-mapped.patch
+kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate-procedures.patch
+kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch
+kvm-mtrr-fix-kvm_mtrr_check_gfn_range_consistency-page-fault.patch
+kvm-vmx-handle-pml-full-vmexit-that-occurs-during-event-delivery.patch
+kvm-nvmx-fix-memory-corruption-when-using-vmcs-shadowing.patch
+s390-cio-allow-to-reset-channel-measurement-block.patch
+intel_pstate-fix-msr_config_tdp_x-addressing-in-core_get_max_pstate.patch
+mfd-qcom_rpm-fix-offset-error-for-msm8660.patch
+mfd-qcom_rpm-parametrize-also-ack-selector-size.patch
--- /dev/null
+From 0058f0871efe7b01c6f2b3046c68196ab73e96da Mon Sep 17 00:00:00 2001
+From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Date: Sat, 28 May 2016 00:54:08 +0200
+Subject: tty/serial: atmel: fix RS485 half duplex with DMA
+
+From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+
+commit 0058f0871efe7b01c6f2b3046c68196ab73e96da upstream.
+
+When using DMA, half duplex doesn't work properly because rx is not stopped
+before starting tx. Ensure we call atmel_stop_rx() in the DMA case.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/atmel_serial.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -485,19 +485,21 @@ static void atmel_start_tx(struct uart_p
+ {
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+- if (atmel_use_pdc_tx(port)) {
+- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
+- /* The transmitter is already running. Yes, we
+- really need this.*/
+- return;
++ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
++ & ATMEL_PDC_TXTEN))
++ /* The transmitter is already running. Yes, we
++ really need this.*/
++ return;
+
++ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ atmel_stop_rx(port);
+
++ if (atmel_use_pdc_tx(port))
+ /* re-enable PDC transmit */
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+- }
++
+ /* Enable interrupts */
+ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+ }
--- /dev/null
+From 30acf549ca1e81859a67590ab9ecfce3d1050a0b Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Thu, 2 Jun 2016 17:48:28 -0700
+Subject: tty: serial: msm: Don't read off end of tx fifo
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 30acf549ca1e81859a67590ab9ecfce3d1050a0b upstream.
+
+For dm uarts in pio mode tx data is transferred to the fifo register 4
+bytes at a time, but care is not taken when these 4 bytes spans the end
+of the xmit buffer so the loop might read up to 3 bytes past the buffer
+and then skip the actual data at the beginning of the buffer.
+
+Fix this by, analogous to the DMA case, make sure the chunk doesn't
+wrap the xmit buffer.
+
+Fixes: 3a878c430fd6 ("tty: serial: msm: Add TX DMA support")
+Cc: Ivan Ivanov <iivanov.xz@gmail.com>
+Reported-by: Frank Rowand <frowand.list@gmail.com>
+Reported-by: Nicolas Dechesne <nicolas.dechesne@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Acked-by: Andy Gross <andy.gross@linaro.org>
+Tested-by: Frank Rowand <frank.rowand@am.sony.com>
+Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/msm_serial.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_po
+ return;
+ }
+
+- pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
++ pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ dma_min = 1; /* Always DMA */