--- /dev/null
+From 688bc577ac42ae3d07c889a1f0a72f0b23763d58 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 16 Sep 2015 16:18:59 +0100
+Subject: arm: KVM: Disable virtual timer even if the guest is not using it
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 688bc577ac42ae3d07c889a1f0a72f0b23763d58 upstream.
+
+When running a guest with the architected timer disabled (with QEMU and
+the kernel_irqchip=off option, for example), it is important to make
+sure the timer gets turned off. Otherwise, the guest may try to
+enable it anyway, leading to a screaming HW interrupt.
+
+The fix is to unconditionally turn off the virtual timer on guest
+exit.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/interrupts_head.S | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
+
+ mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+ str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
+- bic r2, #1 @ Clear ENABLE
+- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
++
+ isb
+
+ mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
+@@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
+ mcrr p15, 4, r2, r2, c14 @ CNTVOFF
+
+ 1:
++ mov r2, #0 @ Clear ENABLE
++ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
++
+ @ Allow physical timer/counter access for the host
+ mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
+ orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
--- /dev/null
+From ca09f02f122b2ecb0f5ddfc5fd47b29ed657d4fd Mon Sep 17 00:00:00 2001
+From: Marek Majtyka <marek.majtyka@tieto.com>
+Date: Wed, 16 Sep 2015 12:04:55 +0200
+Subject: arm: KVM: Fix incorrect device to IPA mapping
+
+From: Marek Majtyka <marek.majtyka@tieto.com>
+
+commit ca09f02f122b2ecb0f5ddfc5fd47b29ed657d4fd upstream.
+
+A critical bug has been found in device memory stage1 translation for
+VMs with more then 4GB of address space. Once vm_pgoff size is smaller
+then pa (which is true for LPAE case, u32 and u64 respectively) some
+more significant bits of pa may be lost as a shift operation is performed
+on u32 and later cast onto u64.
+
+Example: vm_pgoff(u32)=0x00210030, PAGE_SHIFT=12
+ expected pa(u64): 0x0000002010030000
+ produced pa(u64): 0x0000000010030000
+
+The fix is to change the order of operations (casting first onto phys_addr_t
+and then shifting).
+
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+[maz: fixed changelog and patch formatting]
+Signed-off-by: Marek Majtyka <marek.majtyka@tieto.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struc
+ if (vma->vm_flags & VM_PFNMAP) {
+ gpa_t gpa = mem->guest_phys_addr +
+ (vm_start - mem->userspace_addr);
+- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
+- vm_start - vma->vm_start;
++ phys_addr_t pa;
++
++ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
++ pa += vm_start - vma->vm_start;
+
+ /* IO region dirty page logging not allowed */
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
--- /dev/null
+From 8453fecbecae26edb3f278627376caab05d9a88d Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 15 Sep 2015 14:41:54 +0800
+Subject: kvm: don't try to register to KVM_FAST_MMIO_BUS for non mmio eventfd
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 8453fecbecae26edb3f278627376caab05d9a88d upstream.
+
+We only want zero length mmio eventfd to be registered on
+KVM_FAST_MMIO_BUS. So check this explicitly when arg->len is zero to
+make sure this.
+
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/eventfd.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -846,7 +846,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, st
+ /* When length is ignored, MMIO is also put on a separate bus, for
+ * faster lookups.
+ */
+- if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
++ if (!args->len && bus_idx == KVM_MMIO_BUS) {
+ ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
+ p->addr, 0, &p->dev);
+ if (ret < 0)
+@@ -901,7 +901,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm,
+ continue;
+
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+- if (!p->length) {
++ if (!p->length && p->bus_idx == KVM_MMIO_BUS) {
+ kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
+ &p->dev);
+ }
--- /dev/null
+From 85da11ca587c8eb73993a1b503052391a73586f9 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 15 Sep 2015 14:41:55 +0800
+Subject: kvm: factor out core eventfd assign/deassign logic
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 85da11ca587c8eb73993a1b503052391a73586f9 upstream.
+
+This patch factors out core eventfd assign/deassign logic and leaves
+the argument checking and bus index selection to callers.
+
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/eventfd.c | 85 +++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 50 insertions(+), 35 deletions(-)
+
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_f
+ return KVM_MMIO_BUS;
+ }
+
+-static int
+-kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
++ enum kvm_bus bus_idx,
++ struct kvm_ioeventfd *args)
+ {
+- enum kvm_bus bus_idx;
+- struct _ioeventfd *p;
+- struct eventfd_ctx *eventfd;
+- int ret;
+
+- bus_idx = ioeventfd_bus_from_flags(args->flags);
+- /* must be natural-word sized, or 0 to ignore length */
+- switch (args->len) {
+- case 0:
+- case 1:
+- case 2:
+- case 4:
+- case 8:
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- /* check for range overflow */
+- if (args->addr + args->len < args->addr)
+- return -EINVAL;
+-
+- /* check for extra flags that we don't understand */
+- if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
+- return -EINVAL;
+-
+- /* ioeventfd with no length can't be combined with DATAMATCH */
+- if (!args->len &&
+- args->flags & (KVM_IOEVENTFD_FLAG_PIO |
+- KVM_IOEVENTFD_FLAG_DATAMATCH))
+- return -EINVAL;
++ struct eventfd_ctx *eventfd;
++ struct _ioeventfd *p;
++ int ret;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+@@ -873,14 +847,13 @@ fail:
+ }
+
+ static int
+-kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
++ struct kvm_ioeventfd *args)
+ {
+- enum kvm_bus bus_idx;
+ struct _ioeventfd *p, *tmp;
+ struct eventfd_ctx *eventfd;
+ int ret = -ENOENT;
+
+- bus_idx = ioeventfd_bus_from_flags(args->flags);
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+@@ -918,6 +891,48 @@ kvm_deassign_ioeventfd(struct kvm *kvm,
+ return ret;
+ }
+
++static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++{
++ enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
++
++ return kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++}
++
++static int
++kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++{
++ enum kvm_bus bus_idx;
++
++ bus_idx = ioeventfd_bus_from_flags(args->flags);
++ /* must be natural-word sized, or 0 to ignore length */
++ switch (args->len) {
++ case 0:
++ case 1:
++ case 2:
++ case 4:
++ case 8:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* check for range overflow */
++ if (args->addr + args->len < args->addr)
++ return -EINVAL;
++
++ /* check for extra flags that we don't understand */
++ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
++ return -EINVAL;
++
++ /* ioeventfd with no length can't be combined with DATAMATCH */
++ if (!args->len &&
++ args->flags & (KVM_IOEVENTFD_FLAG_PIO |
++ KVM_IOEVENTFD_FLAG_DATAMATCH))
++ return -EINVAL;
++
++ return kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
++}
++
+ int
+ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ {
--- /dev/null
+From eefd6b06b17c5478e7c24bea6f64beaa2c431ca6 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 15 Sep 2015 14:41:56 +0800
+Subject: kvm: fix double free for fast mmio eventfd
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit eefd6b06b17c5478e7c24bea6f64beaa2c431ca6 upstream.
+
+We register wildcard mmio eventfd on two buses, once for KVM_MMIO_BUS
+and once on KVM_FAST_MMIO_BUS but with a single iodev
+instance. This will lead to an issue: kvm_io_bus_destroy() knows
+nothing about the devices on two buses pointing to a single dev. Which
+will lead to double free[1] during exit. Fix this by allocating two
+instances of iodevs then registering one on KVM_MMIO_BUS and another
+on KVM_FAST_MMIO_BUS.
+
+CPU: 1 PID: 2894 Comm: qemu-system-x86 Not tainted 3.19.0-26-generic #28-Ubuntu
+Hardware name: LENOVO 2356BG6/2356BG6, BIOS G7ET96WW (2.56 ) 09/12/2013
+task: ffff88009ae0c4b0 ti: ffff88020e7f0000 task.ti: ffff88020e7f0000
+RIP: 0010:[<ffffffffc07e25d8>] [<ffffffffc07e25d8>] ioeventfd_release+0x28/0x60 [kvm]
+RSP: 0018:ffff88020e7f3bc8 EFLAGS: 00010292
+RAX: dead000000200200 RBX: ffff8801ec19c900 RCX: 000000018200016d
+RDX: ffff8801ec19cf80 RSI: ffffea0008bf1d40 RDI: ffff8801ec19c900
+RBP: ffff88020e7f3bd8 R08: 000000002fc75a01 R09: 000000018200016d
+R10: ffffffffc07df6ae R11: ffff88022fc75a98 R12: ffff88021e7cc000
+R13: ffff88021e7cca48 R14: ffff88021e7cca50 R15: ffff8801ec19c880
+FS: 00007fc1ee3e6700(0000) GS:ffff88023e240000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8f389d8000 CR3: 000000023dc13000 CR4: 00000000001427e0
+Stack:
+ffff88021e7cc000 0000000000000000 ffff88020e7f3be8 ffffffffc07e2622
+ffff88020e7f3c38 ffffffffc07df69a ffff880232524160 ffff88020e792d80
+ 0000000000000000 ffff880219b78c00 0000000000000008 ffff8802321686a8
+Call Trace:
+[<ffffffffc07e2622>] ioeventfd_destructor+0x12/0x20 [kvm]
+[<ffffffffc07df69a>] kvm_put_kvm+0xca/0x210 [kvm]
+[<ffffffffc07df818>] kvm_vcpu_release+0x18/0x20 [kvm]
+[<ffffffff811f69f7>] __fput+0xe7/0x250
+[<ffffffff811f6bae>] ____fput+0xe/0x10
+[<ffffffff81093f04>] task_work_run+0xd4/0xf0
+[<ffffffff81079358>] do_exit+0x368/0xa50
+[<ffffffff81082c8f>] ? recalc_sigpending+0x1f/0x60
+[<ffffffff81079ad5>] do_group_exit+0x45/0xb0
+[<ffffffff81085c71>] get_signal+0x291/0x750
+[<ffffffff810144d8>] do_signal+0x28/0xab0
+[<ffffffff810f3a3b>] ? do_futex+0xdb/0x5d0
+[<ffffffff810b7028>] ? __wake_up_locked_key+0x18/0x20
+[<ffffffff810f3fa6>] ? SyS_futex+0x76/0x170
+[<ffffffff81014fc9>] do_notify_resume+0x69/0xb0
+[<ffffffff817cb9af>] int_signal+0x12/0x17
+Code: 5d c3 90 0f 1f 44 00 00 55 48 89 e5 53 48 89 fb 48 83 ec 08 48 8b 7f 20 e8 06 d6 a5 c0 48 8b 43 08 48 8b 13 48 89 df 48 89 42 08 <48> 89 10 48 b8 00 01 10 00 00
+ RIP [<ffffffffc07e25d8>] ioeventfd_release+0x28/0x60 [kvm]
+ RSP <ffff88020e7f3bc8>
+
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/eventfd.c | 43 +++++++++++++++++++++++++------------------
+ 1 file changed, 25 insertions(+), 18 deletions(-)
+
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -817,16 +817,6 @@ static int kvm_assign_ioeventfd_idx(stru
+ if (ret < 0)
+ goto unlock_fail;
+
+- /* When length is ignored, MMIO is also put on a separate bus, for
+- * faster lookups.
+- */
+- if (!args->len && bus_idx == KVM_MMIO_BUS) {
+- ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
+- p->addr, 0, &p->dev);
+- if (ret < 0)
+- goto register_fail;
+- }
+-
+ kvm->buses[bus_idx]->ioeventfd_count++;
+ list_add_tail(&p->list, &kvm->ioeventfds);
+
+@@ -834,8 +824,6 @@ static int kvm_assign_ioeventfd_idx(stru
+
+ return 0;
+
+-register_fail:
+- kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+ unlock_fail:
+ mutex_unlock(&kvm->slots_lock);
+
+@@ -874,10 +862,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *k
+ continue;
+
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+- if (!p->length && p->bus_idx == KVM_MMIO_BUS) {
+- kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
+- &p->dev);
+- }
+ kvm->buses[bus_idx]->ioeventfd_count--;
+ ioeventfd_release(p);
+ ret = 0;
+@@ -894,14 +878,19 @@ kvm_deassign_ioeventfd_idx(struct kvm *k
+ static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ {
+ enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
++ int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++
++ if (!args->len && bus_idx == KVM_MMIO_BUS)
++ kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+
+- return kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++ return ret;
+ }
+
+ static int
+ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ {
+ enum kvm_bus bus_idx;
++ int ret;
+
+ bus_idx = ioeventfd_bus_from_flags(args->flags);
+ /* must be natural-word sized, or 0 to ignore length */
+@@ -930,7 +919,25 @@ kvm_assign_ioeventfd(struct kvm *kvm, st
+ KVM_IOEVENTFD_FLAG_DATAMATCH))
+ return -EINVAL;
+
+- return kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
++ ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
++ if (ret)
++ goto fail;
++
++ /* When length is ignored, MMIO is also put on a separate bus, for
++ * faster lookups.
++ */
++ if (!args->len && bus_idx == KVM_MMIO_BUS) {
++ ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
++ if (ret < 0)
++ goto fast_fail;
++ }
++
++ return 0;
++
++fast_fail:
++ kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++fail:
++ return ret;
+ }
+
+ int
--- /dev/null
+From 8f4216c7d28976f7ec1b2bcbfa0a9f787133c45e Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 15 Sep 2015 14:41:57 +0800
+Subject: kvm: fix zero length mmio searching
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 8f4216c7d28976f7ec1b2bcbfa0a9f787133c45e upstream.
+
+Currently, if we had a zero length mmio eventfd assigned on
+KVM_MMIO_BUS. It will never be found by kvm_io_bus_cmp() since it
+always compares the kvm_io_range() with the length that guest
+wrote. This will cause e.g for vhost, kick will be trapped by qemu
+userspace instead of vhost. Fixing this by using zero length if an
+iodevice is zero length.
+
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3080,10 +3080,25 @@ static void kvm_io_bus_destroy(struct kv
+ static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
+ const struct kvm_io_range *r2)
+ {
+- if (r1->addr < r2->addr)
++ gpa_t addr1 = r1->addr;
++ gpa_t addr2 = r2->addr;
++
++ if (addr1 < addr2)
+ return -1;
+- if (r1->addr + r1->len > r2->addr + r2->len)
++
++ /* If r2->len == 0, match the exact address. If r2->len != 0,
++ * accept any overlapping write. Any order is acceptable for
++ * overlapping ranges, because kvm_io_bus_get_first_dev ensures
++ * we process all of them.
++ */
++ if (r2->len) {
++ addr1 += r1->len;
++ addr2 += r2->len;
++ }
++
++ if (addr1 > addr2)
+ return 1;
++
+ return 0;
+ }
+
--- /dev/null
+From 7e022e717f54897e396504306d0c9b61452adf4e Mon Sep 17 00:00:00 2001
+From: "Gautham R. Shenoy" <ego@linux.vnet.ibm.com>
+Date: Thu, 21 May 2015 13:57:04 +0530
+Subject: KVM: PPC: Book3S HV: Pass the correct trap argument to kvmhv_commence_exit
+
+From: "Gautham R. Shenoy" <ego@linux.vnet.ibm.com>
+
+commit 7e022e717f54897e396504306d0c9b61452adf4e upstream.
+
+In guest_exit_cont we call kvmhv_commence_exit which expects the trap
+number as the argument. However r3 doesn't contain the trap number at
+this point and as a result we would be calling the function with a
+spurious trap number.
+
+Fix this by copying r12 into r3 before calling kvmhv_commence_exit as
+r12 contains the trap number.
+
+Fixes: eddb60fb1443
+Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1171,6 +1171,7 @@ mc_cont:
+ bl kvmhv_accumulate_time
+ #endif
+
++ mr r3, r12
+ /* Increment exit count, poke other threads to exit */
+ bl kvmhv_commence_exit
+ nop
--- /dev/null
+From 3eb4ee68254235e4f47bc0410538fcdaede39589 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Fri, 18 Sep 2015 08:57:28 +0200
+Subject: KVM: PPC: Book3S: Take the kvm->srcu lock in kvmppc_h_logical_ci_load/store()
+
+From: Thomas Huth <thuth@redhat.com>
+
+commit 3eb4ee68254235e4f47bc0410538fcdaede39589 upstream.
+
+Access to the kvm->buses (like with the kvm_io_bus_read() and -write()
+functions) has to be protected via the kvm->srcu lock.
+The kvmppc_h_logical_ci_load() and -store() functions are missing
+this lock so far, so let's add it there, too.
+This fixes the problem that the kernel reports "suspicious RCU usage"
+when lock debugging is enabled.
+
+Fixes: 99342cf8044420eebdf9297ca03a14cb6a7085a1
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -827,12 +827,15 @@ int kvmppc_h_logical_ci_load(struct kvm_
+ unsigned long size = kvmppc_get_gpr(vcpu, 4);
+ unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ u64 buf;
++ int srcu_idx;
+ int ret;
+
+ if (!is_power_of_2(size) || (size > sizeof(buf)))
+ return H_TOO_HARD;
+
++ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
++ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ if (ret != 0)
+ return H_TOO_HARD;
+
+@@ -867,6 +870,7 @@ int kvmppc_h_logical_ci_store(struct kvm
+ unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ unsigned long val = kvmppc_get_gpr(vcpu, 6);
+ u64 buf;
++ int srcu_idx;
+ int ret;
+
+ switch (size) {
+@@ -890,7 +894,9 @@ int kvmppc_h_logical_ci_store(struct kvm
+ return H_TOO_HARD;
+ }
+
++ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
++ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ if (ret != 0)
+ return H_TOO_HARD;
+
--- /dev/null
+From ebae871a509d3c24b32ff67af2671dadffc58770 Mon Sep 17 00:00:00 2001
+From: Igor Mammedov <imammedo@redhat.com>
+Date: Fri, 18 Sep 2015 15:39:05 +0200
+Subject: kvm: svm: reset mmu on VCPU reset
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Igor Mammedov <imammedo@redhat.com>
+
+commit ebae871a509d3c24b32ff67af2671dadffc58770 upstream.
+
+When INIT/SIPI sequence is sent to VCPU which before that
+was in use by OS, VMRUN might fail with:
+
+ KVM: entry failed, hardware error 0xffffffff
+ EAX=00000000 EBX=00000000 ECX=00000000 EDX=000006d3
+ ESI=00000000 EDI=00000000 EBP=00000000 ESP=00000000
+ EIP=00000000 EFL=00000002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0
+ ES =0000 00000000 0000ffff 00009300
+ CS =9a00 0009a000 0000ffff 00009a00
+ [...]
+ CR0=60000010 CR2=b6f3e000 CR3=01942000 CR4=000007e0
+ [...]
+ EFER=0000000000000000
+
+with corresponding SVM error:
+ KVM: FAILED VMRUN WITH VMCB:
+ [...]
+ cpl: 0 efer: 0000000000001000
+ cr0: 0000000080010010 cr2: 00007fd7fe85bf90
+ cr3: 0000000187d0c000 cr4: 0000000000000020
+ [...]
+
+What happens is that VCPU state right after offlinig:
+CR0: 0x80050033 EFER: 0xd01 CR4: 0x7e0
+ -> long mode with CR3 pointing to longmode page tables
+
+and when VCPU gets INIT/SIPI following transition happens
+CR0: 0 -> 0x60000010 EFER: 0x0 CR4: 0x7e0
+ -> paging disabled with stale CR3
+
+However SVM under the hood puts VCPU in Paged Real Mode*
+which effectively translates CR0 0x60000010 -> 80010010 after
+
+ svm_vcpu_reset()
+ -> init_vmcb()
+ -> kvm_set_cr0()
+ -> svm_set_cr0()
+
+but from kvm_set_cr0() perspective CR0: 0 -> 0x60000010
+only caching bits are changed and
+commit d81135a57aa6
+ ("KVM: x86: do not reset mmu if CR0.CD and CR0.NW are changed")'
+regressed svm_vcpu_reset() which relied on MMU being reset.
+
+As result VMRUN after svm_vcpu_reset() tries to run
+VCPU in Paged Real Mode with stale MMU context (longmode page tables),
+which causes some AMD CPUs** to bail out with VMEXIT_INVALID.
+
+Fix issue by unconditionally resetting MMU context
+at init_vmcb() time.
+
+ * AMD64 Architecture Programmer’s Manual,
+ Volume 2: System Programming, rev: 3.25
+ 15.19 Paged Real Mode
+ ** Opteron 1216
+
+Signed-off-by: Igor Mammedov <imammedo@redhat.com>
+Fixes: d81135a57aa6
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1260,6 +1260,7 @@ static void init_vmcb(struct vcpu_svm *s
+ * It also updates the guest-visible cr0 value.
+ */
+ (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
++ kvm_mmu_reset_context(&svm->vcpu);
+
+ save->cr4 = X86_CR4_PAE;
+ /* rdx = ?? */
--- /dev/null
+From 04bb92e4b4cf06a66889d37b892b78f926faa9d4 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Wed, 16 Sep 2015 19:31:11 +0800
+Subject: KVM: vmx: fix VPID is 0000H in non-root operation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit 04bb92e4b4cf06a66889d37b892b78f926faa9d4 upstream.
+
+Reference SDM 28.1:
+
+The current VPID is 0000H in the following situations:
+- Outside VMX operation. (This includes operation in system-management
+ mode under the default treatment of SMIs and SMM with VMX operation;
+ see Section 34.14.)
+- In VMX root operation.
+- In VMX non-root operation when the “enable VPID” VM-execution control
+ is 0.
+
+The VPID should never be 0000H in non-root operation when "enable VPID"
+VM-execution control is 1. However, commit 34a1cd60 ("kvm: x86: vmx:
+move some vmx setting from vmx_init() to hardware_setup()") remove the
+codes which reserve 0000H for VMX root operation.
+
+This patch fix it by again reserving 0000H for VMX root operation.
+
+Fixes: 34a1cd60d17f62c1f077c1478a6c2ca8c3d17af4
+Reported-by: Wincy Van <fanwenyi0529@gmail.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6134,6 +6134,8 @@ static __init int hardware_setup(void)
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
++ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
++
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
--- /dev/null
+From 3afb1121800128aae9f5722e50097fcf1a9d4d88 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 18 Sep 2015 17:33:04 +0200
+Subject: KVM: x86: trap AMD MSRs for the TSeg base and mask
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 3afb1121800128aae9f5722e50097fcf1a9d4d88 upstream.
+
+These have roughly the same purpose as the SMRR, which we do not need
+to implement in KVM. However, Linux accesses MSR_K8_TSEG_ADDR at
+boot, which causes problems when running a Xen dom0 under KVM.
+Just return 0, meaning that processor protection of SMRAM is not
+in effect.
+
+Reported-by: M A Young <m.a.young@durham.ac.uk>
+Acked-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kvm/x86.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -311,6 +311,7 @@
+ /* C1E active bits in int pending message */
+ #define K8_INTP_C1E_ACTIVE_MASK 0x18000000
+ #define MSR_K8_TSEG_ADDR 0xc0010112
++#define MSR_K8_TSEG_MASK 0xc0010113
+ #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
+ #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
+ #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2388,6 +2388,8 @@ int kvm_get_msr_common(struct kvm_vcpu *
+ case MSR_IA32_LASTINTFROMIP:
+ case MSR_IA32_LASTINTTOIP:
+ case MSR_K8_SYSCFG:
++ case MSR_K8_TSEG_ADDR:
++ case MSR_K8_TSEG_MASK:
+ case MSR_K7_HWCR:
+ case MSR_VM_HSAVE_PA:
+ case MSR_K8_INT_PENDING_MSG:
--- /dev/null
+From 625422f60c55bbc368b8568ff925770b36bfc189 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 1 Oct 2015 13:28:15 +0200
+Subject: Revert "KVM: SVM: Sync g_pat with guest-written PAT value"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 625422f60c55bbc368b8568ff925770b36bfc189 upstream.
+
+This reverts commit e098223b789b4a618dacd79e5e0dad4a9d5018d1,
+which has a dependency on other commits being reverted.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3255,16 +3255,6 @@ static int svm_set_msr(struct kvm_vcpu *
+ case MSR_VM_IGNNE:
+ vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
+ break;
+- case MSR_IA32_CR_PAT:
+- if (npt_enabled) {
+- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+- return 1;
+- vcpu->arch.pat = data;
+- svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
+- mark_dirty(svm->vmcb, VMCB_NPT);
+- break;
+- }
+- /* fall through */
+ default:
+ return kvm_set_msr_common(vcpu, msr);
+ }
--- /dev/null
+From fc07e76ac7ffa3afd621a1c3858a503386a14281 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 1 Oct 2015 13:20:22 +0200
+Subject: Revert "KVM: SVM: use NPT page attributes"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit fc07e76ac7ffa3afd621a1c3858a503386a14281 upstream.
+
+This reverts commit 3c2e7f7de3240216042b61073803b61b9b3cfb22.
+Initializing the mapping from MTRR to PAT values was reported to
+fail nondeterministically, and it also caused extremely slow boot
+(due to caching getting disabled---bug 103321) with assigned devices.
+
+Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
+Reported-by: Sebastian Schuette <dracon@ewetel.net>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 101 ++---------------------------------------------------
+ 1 file changed, 5 insertions(+), 96 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -865,64 +865,6 @@ static void svm_disable_lbrv(struct vcpu
+ set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
+ }
+
+-#define MTRR_TYPE_UC_MINUS 7
+-#define MTRR2PROTVAL_INVALID 0xff
+-
+-static u8 mtrr2protval[8];
+-
+-static u8 fallback_mtrr_type(int mtrr)
+-{
+- /*
+- * WT and WP aren't always available in the host PAT. Treat
+- * them as UC and UC- respectively. Everything else should be
+- * there.
+- */
+- switch (mtrr)
+- {
+- case MTRR_TYPE_WRTHROUGH:
+- return MTRR_TYPE_UNCACHABLE;
+- case MTRR_TYPE_WRPROT:
+- return MTRR_TYPE_UC_MINUS;
+- default:
+- BUG();
+- }
+-}
+-
+-static void build_mtrr2protval(void)
+-{
+- int i;
+- u64 pat;
+-
+- for (i = 0; i < 8; i++)
+- mtrr2protval[i] = MTRR2PROTVAL_INVALID;
+-
+- /* Ignore the invalid MTRR types. */
+- mtrr2protval[2] = 0;
+- mtrr2protval[3] = 0;
+-
+- /*
+- * Use host PAT value to figure out the mapping from guest MTRR
+- * values to nested page table PAT/PCD/PWT values. We do not
+- * want to change the host PAT value every time we enter the
+- * guest.
+- */
+- rdmsrl(MSR_IA32_CR_PAT, pat);
+- for (i = 0; i < 8; i++) {
+- u8 mtrr = pat >> (8 * i);
+-
+- if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
+- mtrr2protval[mtrr] = __cm_idx2pte(i);
+- }
+-
+- for (i = 0; i < 8; i++) {
+- if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
+- u8 fallback = fallback_mtrr_type(i);
+- mtrr2protval[i] = mtrr2protval[fallback];
+- BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
+- }
+- }
+-}
+-
+ static __init int svm_hardware_setup(void)
+ {
+ int cpu;
+@@ -989,7 +931,6 @@ static __init int svm_hardware_setup(voi
+ } else
+ kvm_disable_tdp();
+
+- build_mtrr2protval();
+ return 0;
+
+ err:
+@@ -1144,42 +1085,6 @@ static u64 svm_compute_tsc_offset(struct
+ return target_tsc - tsc;
+ }
+
+-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
+-{
+- struct kvm_vcpu *vcpu = &svm->vcpu;
+-
+- /* Unlike Intel, AMD takes the guest's CR0.CD into account.
+- *
+- * AMD does not have IPAT. To emulate it for the case of guests
+- * with no assigned devices, just set everything to WB. If guests
+- * have assigned devices, however, we cannot force WB for RAM
+- * pages only, so use the guest PAT directly.
+- */
+- if (!kvm_arch_has_assigned_device(vcpu->kvm))
+- *g_pat = 0x0606060606060606;
+- else
+- *g_pat = vcpu->arch.pat;
+-}
+-
+-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+-{
+- u8 mtrr;
+-
+- /*
+- * 1. MMIO: always map as UC
+- * 2. No passthrough: always map as WB, and force guest PAT to WB as well
+- * 3. Passthrough: can't guarantee the result, try to trust guest.
+- */
+- if (is_mmio)
+- return _PAGE_NOCACHE;
+-
+- if (!kvm_arch_has_assigned_device(vcpu->kvm))
+- return 0;
+-
+- mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+- return mtrr2protval[mtrr];
+-}
+-
+ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
+ {
+ struct vmcb_control_area *control = &svm->vmcb->control;
+@@ -1276,7 +1181,6 @@ static void init_vmcb(struct vcpu_svm *s
+ clr_cr_intercept(svm, INTERCEPT_CR3_READ);
+ clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
+ save->g_pat = svm->vcpu.arch.pat;
+- svm_set_guest_pat(svm, &save->g_pat);
+ save->cr3 = 0;
+ save->cr4 = 0;
+ }
+@@ -4195,6 +4099,11 @@ static bool svm_has_high_real_mode_segba
+ return true;
+ }
+
++static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
++{
++ return 0;
++}
++
+ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
+ {
+ }
--- /dev/null
+From 606decd67049217684e3cb5a54104d51ddd4ef35 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 1 Oct 2015 13:12:47 +0200
+Subject: Revert "KVM: x86: apply guest MTRR virtualization on host reserved pages"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 606decd67049217684e3cb5a54104d51ddd4ef35 upstream.
+
+This reverts commit fd717f11015f673487ffc826e59b2bad69d20fe5.
+It was reported to cause Machine Check Exceptions (bug 104091).
+
+Reported-by: harn-solo@gmx.de
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 7 +++++--
+ arch/x86/kvm/vmx.c | 11 ++++++++---
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1166,11 +1166,14 @@ static u64 svm_get_mt_mask(struct kvm_vc
+ u8 mtrr;
+
+ /*
+- * 1. MMIO: trust guest MTRR, so same as item 3.
++ * 1. MMIO: always map as UC
+ * 2. No passthrough: always map as WB, and force guest PAT to WB as well
+ * 3. Passthrough: can't guarantee the result, try to trust guest.
+ */
+- if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
++ if (is_mmio)
++ return _PAGE_NOCACHE;
++
++ if (!kvm_arch_has_assigned_device(vcpu->kvm))
+ return 0;
+
+ mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8634,17 +8634,22 @@ static u64 vmx_get_mt_mask(struct kvm_vc
+ u64 ipat = 0;
+
+ /* For VT-d and EPT combination
+- * 1. MMIO: guest may want to apply WC, trust it.
++ * 1. MMIO: always map as UC
+ * 2. EPT with VT-d:
+ * a. VT-d without snooping control feature: can't guarantee the
+- * result, try to trust guest. So the same as item 1.
++ * result, try to trust guest.
+ * b. VT-d with snooping control feature: snooping control feature of
+ * VT-d engine can guarantee the cache correctness. Just set it
+ * to WB to keep consistent with host. So the same as item 3.
+ * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
+ * consistent with host MTRR
+ */
+- if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
++ if (is_mmio) {
++ cache = MTRR_TYPE_UNCACHABLE;
++ goto exit;
++ }
++
++ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ ipat = VMX_EPT_IPAT_BIT;
+ cache = MTRR_TYPE_WRBACK;
+ goto exit;
--- /dev/null
+From 2619d7e9c92d524cb155ec89fd72875321512e5b Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Wed, 9 Sep 2015 16:07:30 -0700
+Subject: time: Fix timekeeping_freqadjust()'s incorrect use of abs() instead of abs64()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit 2619d7e9c92d524cb155ec89fd72875321512e5b upstream.
+
+The internal clocksteering done for fine-grained error
+correction uses a logarithmic approximation, so any time
+adjtimex() adjusts the clock steering, timekeeping_freqadjust()
+quickly approximates the correct clock frequency over a series
+of ticks.
+
+Unfortunately, the logic in timekeeping_freqadjust(), introduced
+in commit:
+
+ dc491596f639 ("timekeeping: Rework frequency adjustments to work better w/ nohz")
+
+used the abs() function with a s64 error value to calculate the
+size of the approximated adjustment to be made.
+
+Per include/linux/kernel.h:
+
+ "abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()".
+
+Thus on 32-bit platforms, this resulted in the clocksteering to
+take a quite dampended random walk trying to converge on the
+proper frequency, which caused the adjustments to be made much
+slower then intended (most easily observed when large
+adjustments are made).
+
+This patch fixes the issue by using abs64() instead.
+
+Reported-by: Nuno Gonçalves <nunojpg@gmail.com>
+Tested-by: Nuno Goncalves <nunojpg@gmail.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Miroslav Lichvar <mlichvar@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1441840051-20244-1-git-send-email-john.stultz@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/timekeeping.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1607,7 +1607,7 @@ static __always_inline void timekeeping_
+ negative = (tick_error < 0);
+
+ /* Sort out the magnitude of the correction */
+- tick_error = abs(tick_error);
++ tick_error = abs64(tick_error);
+ for (adj = 0; tick_error > interval; adj++)
+ tick_error >>= 1;
+