]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 2 May 2015 12:22:59 +0000 (14:22 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 2 May 2015 12:22:59 +0000 (14:22 +0200)
added patches:
kvm-arm-arm64-check-irq-number-on-userland-injection.patch
kvm-s390-fix-get_all_floating_irqs.patch
kvm-s390-fix-handling-of-write-errors-in-the-tpi-handler.patch
kvm-s390-reinjection-of-irqs-can-fail-in-the-tpi-handler.patch
kvm-s390-zero-out-current-vmdb-of-stsi-before-including-level3-data.patch
kvm-use-slowpath-for-cross-page-cached-accesses.patch
kvm-vmx-preserve-host-cr4.mce-value-while-in-guest-mode.patch
mips-asm-asm-eva-introduce-kernel-load-store-variants.patch
mips-hibernate-flush-tlb-entries-earlier.patch
mips-kvm-handle-msa-disabled-exceptions-from-guest.patch
mips-loongson-3-add-irqf_no_suspend-to-cascade-irqaction.patch
mips-lose_fpu-disable-fpu-when-msa-enabled.patch
mips-malta-detect-and-fix-bad-memsize-values.patch
mips-unaligned-fix-regular-load-store-instruction-emulation-for-eva.patch
mips-unaligned-prevent-eva-instructions-on-kernel-unaligned-accesses.patch
mips-unaligned-surround-load-store-macros-in-do-while-statements.patch
s390-hibernate-fix-save-and-restore-of-kernel-text-section.patch

18 files changed:
queue-4.0/kvm-arm-arm64-check-irq-number-on-userland-injection.patch [new file with mode: 0644]
queue-4.0/kvm-s390-fix-get_all_floating_irqs.patch [new file with mode: 0644]
queue-4.0/kvm-s390-fix-handling-of-write-errors-in-the-tpi-handler.patch [new file with mode: 0644]
queue-4.0/kvm-s390-reinjection-of-irqs-can-fail-in-the-tpi-handler.patch [new file with mode: 0644]
queue-4.0/kvm-s390-zero-out-current-vmdb-of-stsi-before-including-level3-data.patch [new file with mode: 0644]
queue-4.0/kvm-use-slowpath-for-cross-page-cached-accesses.patch [new file with mode: 0644]
queue-4.0/kvm-vmx-preserve-host-cr4.mce-value-while-in-guest-mode.patch [new file with mode: 0644]
queue-4.0/mips-asm-asm-eva-introduce-kernel-load-store-variants.patch [new file with mode: 0644]
queue-4.0/mips-hibernate-flush-tlb-entries-earlier.patch [new file with mode: 0644]
queue-4.0/mips-kvm-handle-msa-disabled-exceptions-from-guest.patch [new file with mode: 0644]
queue-4.0/mips-loongson-3-add-irqf_no_suspend-to-cascade-irqaction.patch [new file with mode: 0644]
queue-4.0/mips-lose_fpu-disable-fpu-when-msa-enabled.patch [new file with mode: 0644]
queue-4.0/mips-malta-detect-and-fix-bad-memsize-values.patch [new file with mode: 0644]
queue-4.0/mips-unaligned-fix-regular-load-store-instruction-emulation-for-eva.patch [new file with mode: 0644]
queue-4.0/mips-unaligned-prevent-eva-instructions-on-kernel-unaligned-accesses.patch [new file with mode: 0644]
queue-4.0/mips-unaligned-surround-load-store-macros-in-do-while-statements.patch [new file with mode: 0644]
queue-4.0/s390-hibernate-fix-save-and-restore-of-kernel-text-section.patch [new file with mode: 0644]
queue-4.0/series

diff --git a/queue-4.0/kvm-arm-arm64-check-irq-number-on-userland-injection.patch b/queue-4.0/kvm-arm-arm64-check-irq-number-on-userland-injection.patch
new file mode 100644 (file)
index 0000000..ae7843d
--- /dev/null
@@ -0,0 +1,113 @@
+From fd1d0ddf2ae92fb3df42ed476939861806c5d785 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Fri, 10 Apr 2015 16:17:59 +0100
+Subject: KVM: arm/arm64: check IRQ number on userland injection
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+commit fd1d0ddf2ae92fb3df42ed476939861806c5d785 upstream.
+
+When userland injects a SPI via the KVM_IRQ_LINE ioctl we currently
+only check it against a fixed limit, which historically is set
+to 127. With the new dynamic IRQ allocation the effective limit may
+actually be smaller (64).
+So when now a malicious or buggy userland injects a SPI in that
+range, we spill over on our VGIC bitmaps and bytemaps memory.
+I could trigger a host kernel NULL pointer dereference with current
+mainline by injecting some bogus IRQ number from a hacked kvmtool:
+-----------------
+....
+DEBUG: kvm_vgic_inject_irq(kvm, cpu=0, irq=114, level=1)
+DEBUG: vgic_update_irq_pending(kvm, cpu=0, irq=114, level=1)
+DEBUG: IRQ #114 still in the game, writing to bytemap now...
+Unable to handle kernel NULL pointer dereference at virtual address 00000000
+pgd = ffffffc07652e000
+[00000000] *pgd=00000000f658b003, *pud=00000000f658b003, *pmd=0000000000000000
+Internal error: Oops: 96000006 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 1 PID: 1053 Comm: lkvm-msi-irqinj Not tainted 4.0.0-rc7+ #3027
+Hardware name: FVP Base (DT)
+task: ffffffc0774e9680 ti: ffffffc0765a8000 task.ti: ffffffc0765a8000
+PC is at kvm_vgic_inject_irq+0x234/0x310
+LR is at kvm_vgic_inject_irq+0x30c/0x310
+pc : [<ffffffc0000ae0a8>] lr : [<ffffffc0000ae180>] pstate: 80000145
+.....
+
+So this patch fixes this by checking the SPI number against the
+actual limit. Also we remove the former legacy hard limit of
+127 in the ioctl code.
+
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+[maz: wrap KVM_ARM_IRQ_GIC_MAX with #ifndef __KERNEL__,
+as suggested by Christopher Covington]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/uapi/asm/kvm.h   |    8 +++++++-
+ arch/arm/kvm/arm.c                |    3 +--
+ arch/arm64/include/uapi/asm/kvm.h |    8 +++++++-
+ virt/kvm/arm/vgic.c               |    3 +++
+ 4 files changed, 18 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/uapi/asm/kvm.h
++++ b/arch/arm/include/uapi/asm/kvm.h
+@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ           0
+ #define KVM_ARM_IRQ_CPU_FIQ           1
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX           127
++#endif
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE              0x95c1ba5e
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kv
+               if (!irqchip_in_kernel(kvm))
+                       return -ENXIO;
+-              if (irq_num < VGIC_NR_PRIVATE_IRQS ||
+-                  irq_num > KVM_ARM_IRQ_GIC_MAX)
++              if (irq_num < VGIC_NR_PRIVATE_IRQS)
+                       return -EINVAL;
+               return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ           0
+ #define KVM_ARM_IRQ_CPU_FIQ           1
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX           127
++#endif
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE              0x95c1ba5e
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm,
+                       goto out;
+       }
++      if (irq_num >= kvm->arch.vgic.nr_irqs)
++              return -EINVAL;
++
+       vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+       if (vcpu_id >= 0) {
+               /* kick the specified vcpu */
diff --git a/queue-4.0/kvm-s390-fix-get_all_floating_irqs.patch b/queue-4.0/kvm-s390-fix-get_all_floating_irqs.patch
new file mode 100644 (file)
index 0000000..10cfcfc
--- /dev/null
@@ -0,0 +1,150 @@
+From 94aa033efcac47b09db22cb561e135baf37b7887 Mon Sep 17 00:00:00 2001
+From: Jens Freimann <jfrei@linux.vnet.ibm.com>
+Date: Mon, 16 Mar 2015 12:17:13 +0100
+Subject: KVM: s390: fix get_all_floating_irqs
+
+From: Jens Freimann <jfrei@linux.vnet.ibm.com>
+
+commit 94aa033efcac47b09db22cb561e135baf37b7887 upstream.
+
+This fixes a bug introduced with commit c05c4186bbe4 ("KVM: s390:
+add floating irq controller").
+
+get_all_floating_irqs() does copy_to_user() while holding
+a spin lock. Let's fix this by filling a temporary buffer
+first and copy it to userspace after giving up the lock.
+
+Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Jens Freimann <jfrei@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/virtual/kvm/devices/s390_flic.txt |    3 +
+ arch/s390/kvm/interrupt.c                       |   58 +++++++++++++-----------
+ 2 files changed, 35 insertions(+), 26 deletions(-)
+
+--- a/Documentation/virtual/kvm/devices/s390_flic.txt
++++ b/Documentation/virtual/kvm/devices/s390_flic.txt
+@@ -27,6 +27,9 @@ Groups:
+     Copies all floating interrupts into a buffer provided by userspace.
+     When the buffer is too small it returns -ENOMEM, which is the indication
+     for userspace to try again with a bigger buffer.
++    -ENOBUFS is returned when the allocation of a kernelspace buffer has
++    failed.
++    -EFAULT is returned when copying data to userspace failed.
+     All interrupts remain pending, i.e. are not deleted from the list of
+     currently pending interrupts.
+     attr->addr contains the userspace address of the buffer into which all
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -17,6 +17,7 @@
+ #include <linux/signal.h>
+ #include <linux/slab.h>
+ #include <linux/bitmap.h>
++#include <linux/vmalloc.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/uaccess.h>
+ #include <asm/sclp.h>
+@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kv
+       spin_unlock(&fi->lock);
+ }
+-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
+-                                 u8 *addr)
++static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
++                     struct kvm_s390_irq *irq)
+ {
+-      struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
+-      struct kvm_s390_irq irq = {0};
+-
+-      irq.type = inti->type;
++      irq->type = inti->type;
+       switch (inti->type) {
+       case KVM_S390_INT_PFAULT_INIT:
+       case KVM_S390_INT_PFAULT_DONE:
+       case KVM_S390_INT_VIRTIO:
+       case KVM_S390_INT_SERVICE:
+-              irq.u.ext = inti->ext;
++              irq->u.ext = inti->ext;
+               break;
+       case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+-              irq.u.io = inti->io;
++              irq->u.io = inti->io;
+               break;
+       case KVM_S390_MCHK:
+-              irq.u.mchk = inti->mchk;
++              irq->u.mchk = inti->mchk;
+               break;
+-      default:
+-              return -EINVAL;
+       }
+-
+-      if (copy_to_user(uptr, &irq, sizeof(irq)))
+-              return -EFAULT;
+-
+-      return 0;
+ }
+-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
++static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
+ {
+       struct kvm_s390_interrupt_info *inti;
+       struct kvm_s390_float_interrupt *fi;
++      struct kvm_s390_irq *buf;
++      int max_irqs;
+       int ret = 0;
+       int n = 0;
++      if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
++              return -EINVAL;
++
++      /*
++       * We are already using -ENOMEM to signal
++       * userspace it may retry with a bigger buffer,
++       * so we need to use something else for this case
++       */
++      buf = vzalloc(len);
++      if (!buf)
++              return -ENOBUFS;
++
++      max_irqs = len / sizeof(struct kvm_s390_irq);
++
+       fi = &kvm->arch.float_int;
+       spin_lock(&fi->lock);
+-
+       list_for_each_entry(inti, &fi->list, list) {
+-              if (len < sizeof(struct kvm_s390_irq)) {
++              if (n == max_irqs) {
+                       /* signal userspace to try again */
+                       ret = -ENOMEM;
+                       break;
+               }
+-              ret = copy_irq_to_user(inti, buf);
+-              if (ret)
+-                      break;
+-              buf += sizeof(struct kvm_s390_irq);
+-              len -= sizeof(struct kvm_s390_irq);
++              inti_to_irq(inti, &buf[n]);
+               n++;
+       }
+-
+       spin_unlock(&fi->lock);
++      if (!ret && n > 0) {
++              if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
++                      ret = -EFAULT;
++      }
++      vfree(buf);
+       return ret < 0 ? ret : n;
+ }
+@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_devi
+       switch (attr->group) {
+       case KVM_DEV_FLIC_GET_ALL_IRQS:
+-              r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
++              r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
+                                         attr->attr);
+               break;
+       default:
diff --git a/queue-4.0/kvm-s390-fix-handling-of-write-errors-in-the-tpi-handler.patch b/queue-4.0/kvm-s390-fix-handling-of-write-errors-in-the-tpi-handler.patch
new file mode 100644 (file)
index 0000000..e874e92
--- /dev/null
@@ -0,0 +1,102 @@
+From 261520dcfcba93ca5dfe671b88ffab038cd940c8 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Date: Wed, 4 Feb 2015 15:53:42 +0100
+Subject: KVM: s390: fix handling of write errors in the tpi handler
+
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+
+commit 261520dcfcba93ca5dfe671b88ffab038cd940c8 upstream.
+
+If the I/O interrupt could not be written to the guest provided
+area (e.g. access exception), a program exception was injected into the
+guest but "inti" wasn't freed, therefore resulting in a memory leak.
+
+In addition, the I/O interrupt wasn't reinjected. Therefore the dequeued
+interrupt is lost.
+
+This patch fixes the problem while cleaning up the function and making the
+cc and rc logic easier to handle.
+
+Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/priv.c |   40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *v
+       struct kvm_s390_interrupt_info *inti;
+       unsigned long len;
+       u32 tpi_data[3];
+-      int cc, rc;
++      int rc;
+       u64 addr;
+-      rc = 0;
+       addr = kvm_s390_get_base_disp_s(vcpu);
+       if (addr & 3)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+-      cc = 0;
++
+       inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
+-      if (!inti)
+-              goto no_interrupt;
+-      cc = 1;
++      if (!inti) {
++              kvm_s390_set_psw_cc(vcpu, 0);
++              return 0;
++      }
++
+       tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
+       tpi_data[1] = inti->io.io_int_parm;
+       tpi_data[2] = inti->io.io_int_word;
+@@ -251,30 +252,35 @@ static int handle_tpi(struct kvm_vcpu *v
+                */
+               len = sizeof(tpi_data) - 4;
+               rc = write_guest(vcpu, addr, &tpi_data, len);
+-              if (rc)
+-                      return kvm_s390_inject_prog_cond(vcpu, rc);
++              if (rc) {
++                      rc = kvm_s390_inject_prog_cond(vcpu, rc);
++                      goto reinject_interrupt;
++              }
+       } else {
+               /*
+                * Store the three-word I/O interruption code into
+                * the appropriate lowcore area.
+                */
+               len = sizeof(tpi_data);
+-              if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
++              if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
++                      /* failed writes to the low core are not recoverable */
+                       rc = -EFAULT;
++                      goto reinject_interrupt;
++              }
+       }
++
++      /* irq was successfully handed to the guest */
++      kfree(inti);
++      kvm_s390_set_psw_cc(vcpu, 1);
++      return 0;
++reinject_interrupt:
+       /*
+        * If we encounter a problem storing the interruption code, the
+        * instruction is suppressed from the guest's view: reinject the
+        * interrupt.
+        */
+-      if (!rc)
+-              kfree(inti);
+-      else
+-              kvm_s390_reinject_io_int(vcpu->kvm, inti);
+-no_interrupt:
+-      /* Set condition code and we're done. */
+-      if (!rc)
+-              kvm_s390_set_psw_cc(vcpu, cc);
++      kvm_s390_reinject_io_int(vcpu->kvm, inti);
++      /* don't set the cc, a pgm irq was injected or we drop to user space */
+       return rc ? -EFAULT : 0;
+ }
diff --git a/queue-4.0/kvm-s390-reinjection-of-irqs-can-fail-in-the-tpi-handler.patch b/queue-4.0/kvm-s390-reinjection-of-irqs-can-fail-in-the-tpi-handler.patch
new file mode 100644 (file)
index 0000000..48e145a
--- /dev/null
@@ -0,0 +1,70 @@
+From 15462e37ca848abac7477dece65f8af25febd744 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Date: Wed, 4 Feb 2015 15:59:11 +0100
+Subject: KVM: s390: reinjection of irqs can fail in the tpi handler
+
+From: David Hildenbrand <dahi@linux.vnet.ibm.com>
+
+commit 15462e37ca848abac7477dece65f8af25febd744 upstream.
+
+The reinjection of an I/O interrupt can fail if the list is at the limit
+and between the dequeue and the reinjection, another I/O interrupt is
+injected (e.g. if user space floods kvm with I/O interrupts).
+
+This patch avoids this memory leak and returns -EFAULT in this special
+case. This error is not recoverable, so let's fail hard. This can later
+be avoided by not dequeuing the interrupt but working directly on the
+locked list.
+
+Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/interrupt.c |    4 ++--
+ arch/s390/kvm/kvm-s390.h  |    4 ++--
+ arch/s390/kvm/priv.c      |    5 ++++-
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1332,10 +1332,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+       return rc;
+ }
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
++int kvm_s390_reinject_io_int(struct kvm *kvm,
+                             struct kvm_s390_interrupt_info *inti)
+ {
+-      __inject_vm(kvm, inti);
++      return __inject_vm(kvm, inti);
+ }
+ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(st
+ int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+                                                   u64 cr6, u64 schid);
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
+-                            struct kvm_s390_interrupt_info *inti);
++int kvm_s390_reinject_io_int(struct kvm *kvm,
++                           struct kvm_s390_interrupt_info *inti);
+ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+ /* implemented in intercept.c */
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -279,7 +279,10 @@ reinject_interrupt:
+        * instruction is suppressed from the guest's view: reinject the
+        * interrupt.
+        */
+-      kvm_s390_reinject_io_int(vcpu->kvm, inti);
++      if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
++              kfree(inti);
++              rc = -EFAULT;
++      }
+       /* don't set the cc, a pgm irq was injected or we drop to user space */
+       return rc ? -EFAULT : 0;
+ }
diff --git a/queue-4.0/kvm-s390-zero-out-current-vmdb-of-stsi-before-including-level3-data.patch b/queue-4.0/kvm-s390-zero-out-current-vmdb-of-stsi-before-including-level3-data.patch
new file mode 100644 (file)
index 0000000..75f55b1
--- /dev/null
@@ -0,0 +1,31 @@
+From b75f4c9afac2604feb971441116c07a24ecca1ec Mon Sep 17 00:00:00 2001
+From: Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+Date: Tue, 3 Mar 2015 09:54:41 +0100
+Subject: KVM: s390: Zero out current VMDB of STSI before including level3 data.
+
+From: Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+
+commit b75f4c9afac2604feb971441116c07a24ecca1ec upstream.
+
+s390 documentation requires words 0 and 10-15 to be reserved and stored as
+zeros. As we fill out all other fields, we can memset the full structure.
+
+Signed-off-by: Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/priv.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -476,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm
+       for (n = mem->count - 1; n > 0 ; n--)
+               memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
++      memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+       mem->vm[0].cpus_total = cpus;
+       mem->vm[0].cpus_configured = cpus;
+       mem->vm[0].cpus_standby = 0;
diff --git a/queue-4.0/kvm-use-slowpath-for-cross-page-cached-accesses.patch b/queue-4.0/kvm-use-slowpath-for-cross-page-cached-accesses.patch
new file mode 100644 (file)
index 0000000..a007291
--- /dev/null
@@ -0,0 +1,43 @@
+From ca3f0874723fad81d0c701b63ae3a17a408d5f25 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Wed, 8 Apr 2015 14:16:48 +0200
+Subject: KVM: use slowpath for cross page cached accesses
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+
+commit ca3f0874723fad81d0c701b63ae3a17a408d5f25 upstream.
+
+kvm_write_guest_cached() does not mark all written pages as dirty and
+code comments in kvm_gfn_to_hva_cache_init() talk about NULL memslot
+with cross page accesses.  Fix all the easy way.
+
+The check is '<= 1' to have the same result for 'len = 0' cache anywhere
+in the page.  (nr_pages_needed is 0 on page boundary.)
+
+Fixes: 8f964525a121 ("KVM: Allow cross page reads and writes from cached translations.")
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Message-Id: <20150408121648.GA3519@potion.brq.redhat.com>
+Reviewed-by: Wanpeng Li <wanpeng.li@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm
+       ghc->generation = slots->generation;
+       ghc->len = len;
+       ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+-      ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+-      if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++      ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++      if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+               ghc->hva += offset;
+       } else {
+               /*
diff --git a/queue-4.0/kvm-vmx-preserve-host-cr4.mce-value-while-in-guest-mode.patch b/queue-4.0/kvm-vmx-preserve-host-cr4.mce-value-while-in-guest-mode.patch
new file mode 100644 (file)
index 0000000..6c2ea86
--- /dev/null
@@ -0,0 +1,53 @@
+From 085e68eeafbf76e21848ad5bafaecec88a11dd64 Mon Sep 17 00:00:00 2001
+From: Ben Serebrin <serebrin@google.com>
+Date: Thu, 16 Apr 2015 11:58:05 -0700
+Subject: KVM: VMX: Preserve host CR4.MCE value while in guest mode.
+
+From: Ben Serebrin <serebrin@google.com>
+
+commit 085e68eeafbf76e21848ad5bafaecec88a11dd64 upstream.
+
+The host's decision to enable machine check exceptions should remain
+in force during non-root mode.  KVM was writing 0 to cr4 on VCPU reset
+and passed a slightly-modified 0 to the vmcs.guest_cr4 value.
+
+Tested: Built.
+On earlier version, tested by injecting machine check
+while a guest is spinning.
+
+Before the change, if guest CR4.MCE==0, then the machine check is
+escalated to Catastrophic Error (CATERR) and the machine dies.
+If guest CR4.MCE==1, then the machine check causes VMEXIT and is
+handled normally by host Linux. After the change, injecting a machine
+check causes normal Linux machine check handling.
+
+Signed-off-by: Ben Serebrin <serebrin@google.com>
+Reviewed-by: Venkatesh Srinivas <venkateshs@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu
+ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+-      unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
+-                  KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
++      /*
++       * Pass through host's Machine Check Enable value to hw_cr4, which
++       * is in force while we are in guest mode.  Do not let guests control
++       * this bit, even if host CR4.MCE == 0.
++       */
++      unsigned long hw_cr4 =
++              (cr4_read_shadow() & X86_CR4_MCE) |
++              (cr4 & ~X86_CR4_MCE) |
++              (to_vmx(vcpu)->rmode.vm86_active ?
++               KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+       if (cr4 & X86_CR4_VMXE) {
+               /*
diff --git a/queue-4.0/mips-asm-asm-eva-introduce-kernel-load-store-variants.patch b/queue-4.0/mips-asm-asm-eva-introduce-kernel-load-store-variants.patch
new file mode 100644 (file)
index 0000000..2feddac
--- /dev/null
@@ -0,0 +1,195 @@
+From 60cd7e08e453bc6828ac4b539f949e4acd80f143 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 9 Mar 2015 14:54:49 +0000
+Subject: MIPS: asm: asm-eva: Introduce kernel load/store variants
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 60cd7e08e453bc6828ac4b539f949e4acd80f143 upstream.
+
+Introduce new macros for kernel load/store variants which will be
+used to perform regular kernel space load/store operations in EVA
+mode.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9500/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/asm-eva.h |  137 +++++++++++++++++++++++++++-------------
+ 1 file changed, 93 insertions(+), 44 deletions(-)
+
+--- a/arch/mips/include/asm/asm-eva.h
++++ b/arch/mips/include/asm/asm-eva.h
+@@ -11,6 +11,36 @@
+ #define __ASM_ASM_EVA_H
+ #ifndef __ASSEMBLY__
++
++/* Kernel variants */
++
++#define kernel_cache(op, base)                "cache " op ", " base "\n"
++#define kernel_ll(reg, addr)          "ll " reg ", " addr "\n"
++#define kernel_sc(reg, addr)          "sc " reg ", " addr "\n"
++#define kernel_lw(reg, addr)          "lw " reg ", " addr "\n"
++#define kernel_lwl(reg, addr)         "lwl " reg ", " addr "\n"
++#define kernel_lwr(reg, addr)         "lwr " reg ", " addr "\n"
++#define kernel_lh(reg, addr)          "lh " reg ", " addr "\n"
++#define kernel_lb(reg, addr)          "lb " reg ", " addr "\n"
++#define kernel_lbu(reg, addr)         "lbu " reg ", " addr "\n"
++#define kernel_sw(reg, addr)          "sw " reg ", " addr "\n"
++#define kernel_swl(reg, addr)         "swl " reg ", " addr "\n"
++#define kernel_swr(reg, addr)         "swr " reg ", " addr "\n"
++#define kernel_sh(reg, addr)          "sh " reg ", " addr "\n"
++#define kernel_sb(reg, addr)          "sb " reg ", " addr "\n"
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr)          user_sw(reg, addr)
++#define kernel_ld(reg, addr)          user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr)          "sd " reg", " addr "\n"
++#define kernel_ld(reg, addr)          "ld " reg", " addr "\n"
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+ #define __BUILD_EVA_INSN(insn, reg, addr)                             \
+@@ -41,37 +71,60 @@
+ #else
+-#define user_cache(op, base)          "cache " op ", " base "\n"
+-#define user_ll(reg, addr)            "ll " reg ", " addr "\n"
+-#define user_sc(reg, addr)            "sc " reg ", " addr "\n"
+-#define user_lw(reg, addr)            "lw " reg ", " addr "\n"
+-#define user_lwl(reg, addr)           "lwl " reg ", " addr "\n"
+-#define user_lwr(reg, addr)           "lwr " reg ", " addr "\n"
+-#define user_lh(reg, addr)            "lh " reg ", " addr "\n"
+-#define user_lb(reg, addr)            "lb " reg ", " addr "\n"
+-#define user_lbu(reg, addr)           "lbu " reg ", " addr "\n"
+-#define user_sw(reg, addr)            "sw " reg ", " addr "\n"
+-#define user_swl(reg, addr)           "swl " reg ", " addr "\n"
+-#define user_swr(reg, addr)           "swr " reg ", " addr "\n"
+-#define user_sh(reg, addr)            "sh " reg ", " addr "\n"
+-#define user_sb(reg, addr)            "sb " reg ", " addr "\n"
++#define user_cache(op, base)          kernel_cache(op, base)
++#define user_ll(reg, addr)            kernel_ll(reg, addr)
++#define user_sc(reg, addr)            kernel_sc(reg, addr)
++#define user_lw(reg, addr)            kernel_lw(reg, addr)
++#define user_lwl(reg, addr)           kernel_lwl(reg, addr)
++#define user_lwr(reg, addr)           kernel_lwr(reg, addr)
++#define user_lh(reg, addr)            kernel_lh(reg, addr)
++#define user_lb(reg, addr)            kernel_lb(reg, addr)
++#define user_lbu(reg, addr)           kernel_lbu(reg, addr)
++#define user_sw(reg, addr)            kernel_sw(reg, addr)
++#define user_swl(reg, addr)           kernel_swl(reg, addr)
++#define user_swr(reg, addr)           kernel_swr(reg, addr)
++#define user_sh(reg, addr)            kernel_sh(reg, addr)
++#define user_sb(reg, addr)            kernel_sb(reg, addr)
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr)            user_sw(reg, addr)
+-#define user_ld(reg, addr)            user_lw(reg, addr)
++#define user_sd(reg, addr)            kernel_sw(reg, addr)
++#define user_ld(reg, addr)            kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr)            "sd " reg", " addr "\n"
+-#define user_ld(reg, addr)            "ld " reg", " addr "\n"
++#define user_sd(reg, addr)            kernel_sd(reg, addr)
++#define user_ld(reg, addr)            kernel_ld(reg, addr)
+ #endif /* CONFIG_32BIT */
+ #endif /* CONFIG_EVA */
+ #else /* __ASSEMBLY__ */
++#define kernel_cache(op, base)                cache op, base
++#define kernel_ll(reg, addr)          ll reg, addr
++#define kernel_sc(reg, addr)          sc reg, addr
++#define kernel_lw(reg, addr)          lw reg, addr
++#define kernel_lwl(reg, addr)         lwl reg, addr
++#define kernel_lwr(reg, addr)         lwr reg, addr
++#define kernel_lh(reg, addr)          lh reg, addr
++#define kernel_lb(reg, addr)          lb reg, addr
++#define kernel_lbu(reg, addr)         lbu reg, addr
++#define kernel_sw(reg, addr)          sw reg, addr
++#define kernel_swl(reg, addr)         swl reg, addr
++#define kernel_swr(reg, addr)         swr reg, addr
++#define kernel_sh(reg, addr)          sh reg, addr
++#define kernel_sb(reg, addr)          sb reg, addr
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr)          user_sw(reg, addr)
++#define kernel_ld(reg, addr)          user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr)          sd reg, addr
++#define kernel_ld(reg, addr)          ld reg, addr
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+ #define __BUILD_EVA_INSN(insn, reg, addr)                     \
+@@ -101,31 +154,27 @@
+ #define user_sd(reg, addr)            user_sw(reg, addr)
+ #else
+-#define user_cache(op, base)          cache op, base
+-#define user_ll(reg, addr)            ll reg, addr
+-#define user_sc(reg, addr)            sc reg, addr
+-#define user_lw(reg, addr)            lw reg, addr
+-#define user_lwl(reg, addr)           lwl reg, addr
+-#define user_lwr(reg, addr)           lwr reg, addr
+-#define user_lh(reg, addr)            lh reg, addr
+-#define user_lb(reg, addr)            lb reg, addr
+-#define user_lbu(reg, addr)           lbu reg, addr
+-#define user_sw(reg, addr)            sw reg, addr
+-#define user_swl(reg, addr)           swl reg, addr
+-#define user_swr(reg, addr)           swr reg, addr
+-#define user_sh(reg, addr)            sh reg, addr
+-#define user_sb(reg, addr)            sb reg, addr
++#define user_cache(op, base)          kernel_cache(op, base)
++#define user_ll(reg, addr)            kernel_ll(reg, addr)
++#define user_sc(reg, addr)            kernel_sc(reg, addr)
++#define user_lw(reg, addr)            kernel_lw(reg, addr)
++#define user_lwl(reg, addr)           kernel_lwl(reg, addr)
++#define user_lwr(reg, addr)           kernel_lwr(reg, addr)
++#define user_lh(reg, addr)            kernel_lh(reg, addr)
++#define user_lb(reg, addr)            kernel_lb(reg, addr)
++#define user_lbu(reg, addr)           kernel_lbu(reg, addr)
++#define user_sw(reg, addr)            kernel_sw(reg, addr)
++#define user_swl(reg, addr)           kernel_swl(reg, addr)
++#define user_swr(reg, addr)           kernel_swr(reg, addr)
++#define user_sh(reg, addr)            kernel_sh(reg, addr)
++#define user_sb(reg, addr)            kernel_sb(reg, addr)
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr)            user_sw(reg, addr)
+-#define user_ld(reg, addr)            user_lw(reg, addr)
++#define user_sd(reg, addr)            kernel_sw(reg, addr)
++#define user_ld(reg, addr)            kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr)            sd reg, addr
+-#define user_ld(reg, addr)            ld reg, addr
++#define user_sd(reg, addr)            kernel_sd(reg, addr)
++#define user_ld(reg, addr)            kernel_sd(reg, addr)
+ #endif /* CONFIG_32BIT */
+ #endif /* CONFIG_EVA */
diff --git a/queue-4.0/mips-hibernate-flush-tlb-entries-earlier.patch b/queue-4.0/mips-hibernate-flush-tlb-entries-earlier.patch
new file mode 100644 (file)
index 0000000..764694c
--- /dev/null
@@ -0,0 +1,45 @@
+From a843d00d038b11267279e3b5388222320f9ddc1d Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Sun, 29 Mar 2015 10:54:05 +0800
+Subject: MIPS: Hibernate: flush TLB entries earlier
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit a843d00d038b11267279e3b5388222320f9ddc1d upstream.
+
+We found that TLB mismatch not only happens after kernel resume, but
+also happens during snapshot restore. So move it to the beginning of
+swsusp_arch_suspend().
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: Steven J. Hill <Steven.Hill@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Patchwork: https://patchwork.linux-mips.org/patch/9621/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/power/hibernate.S |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+ LEAF(swsusp_arch_resume)
++      /* Avoid TLB mismatch during and after kernel resume */
++      jal local_flush_tlb_all
+       PTR_L t0, restore_pblist
+ 0:
+       PTR_L t1, PBE_ADDRESS(t0)   /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+       bne t1, t3, 1b
+       PTR_L t0, PBE_NEXT(t0)
+       bnez t0, 0b
+-      jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+       PTR_LA t0, saved_regs
+       PTR_L ra, PT_R31(t0)
+       PTR_L sp, PT_R29(t0)
diff --git a/queue-4.0/mips-kvm-handle-msa-disabled-exceptions-from-guest.patch b/queue-4.0/mips-kvm-handle-msa-disabled-exceptions-from-guest.patch
new file mode 100644 (file)
index 0000000..2762999
--- /dev/null
@@ -0,0 +1,129 @@
+From 98119ad53376885819d93dfb8737b6a9a61ca0ba Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 6 Feb 2015 11:11:56 +0000
+Subject: MIPS: KVM: Handle MSA Disabled exceptions from guest
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 98119ad53376885819d93dfb8737b6a9a61ca0ba upstream.
+
+Guest user mode can generate a guest MSA Disabled exception on an MSA
+capable core by simply trying to execute an MSA instruction. Since this
+exception is unknown to KVM it will be passed on to the guest kernel.
+However guest Linux kernels prior to v3.15 do not set up an exception
+handler for the MSA Disabled exception as they don't support any MSA
+capable cores. This results in a guest OS panic.
+
+Since an older processor ID may be being emulated, and MSA support is
+not advertised to the guest, the correct behaviour is to generate a
+Reserved Instruction exception in the guest kernel so it can send the
+guest process an illegal instruction signal (SIGILL), as would happen
+with a non-MSA-capable core.
+
+Fix this as minimally as reasonably possible by preventing
+kvm_mips_check_privilege() from relaying MSA Disabled exceptions from
+guest user mode to the guest kernel, and handling the MSA Disabled
+exception by emulating a Reserved Instruction exception in the guest,
+via a new handle_msa_disabled() KVM callback.
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: linux-mips@linux-mips.org
+Cc: kvm@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/kvm_host.h |    2 ++
+ arch/mips/kvm/emulate.c          |    1 +
+ arch/mips/kvm/mips.c             |    4 ++++
+ arch/mips/kvm/trap_emul.c        |   28 ++++++++++++++++++++++++++++
+ 4 files changed, 35 insertions(+)
+
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -322,6 +322,7 @@ enum mips_mmu_types {
+ #define T_TRAP                        13      /* Trap instruction */
+ #define T_VCEI                        14      /* Virtual coherency exception */
+ #define T_FPE                 15      /* Floating point exception */
++#define T_MSADIS              21      /* MSA disabled exception */
+ #define T_WATCH                       23      /* Watch address reference */
+ #define T_VCED                        31      /* Virtual coherency data */
+@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
+       int (*handle_syscall)(struct kvm_vcpu *vcpu);
+       int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+       int (*handle_break)(struct kvm_vcpu *vcpu);
++      int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+       int (*vm_init)(struct kvm *kvm);
+       int (*vcpu_init)(struct kvm_vcpu *vcpu);
+       int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_pri
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
++              case T_MSADIS:
+                       break;
+               case T_COP_UNUSABLE:
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
++      case T_MSADIS:
++              ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
++              break;
++
+       default:
+               kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                       exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(st
+       return ret;
+ }
++static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
++{
++      struct kvm_run *run = vcpu->run;
++      uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
++      unsigned long cause = vcpu->arch.host_cp0_cause;
++      enum emulation_result er = EMULATE_DONE;
++      int ret = RESUME_GUEST;
++
++      /* No MSA supported in guest, guest reserved instruction exception */
++      er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
++
++      switch (er) {
++      case EMULATE_DONE:
++              ret = RESUME_GUEST;
++              break;
++
++      case EMULATE_FAIL:
++              run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++              ret = RESUME_HOST;
++              break;
++
++      default:
++              BUG();
++      }
++      return ret;
++}
++
+ static int kvm_trap_emul_vm_init(struct kvm *kvm)
+ {
+       return 0;
+@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_tra
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
++      .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
diff --git a/queue-4.0/mips-loongson-3-add-irqf_no_suspend-to-cascade-irqaction.patch b/queue-4.0/mips-loongson-3-add-irqf_no_suspend-to-cascade-irqaction.patch
new file mode 100644 (file)
index 0000000..c17e49b
--- /dev/null
@@ -0,0 +1,37 @@
+From 0add9c2f1cff9f3f1f2eb7e9babefa872a9d14b9 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 12 Mar 2015 11:51:06 +0800
+Subject: MIPS: Loongson-3: Add IRQF_NO_SUSPEND to Cascade irqaction
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 0add9c2f1cff9f3f1f2eb7e9babefa872a9d14b9 upstream.
+
+HPET irq is routed to i8259 and then to MIPS CPU irq (cascade). After
+commit a3e6c1eff5 (MIPS: IRQ: Fix disable_irq on CPU IRQs), if without
+IRQF_NO_SUSPEND in cascade_irqaction, HPET interrupts will lost during
+suspend. The result is machine cannot be waken up.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: Steven J. Hill <Steven.Hill@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Patchwork: https://patchwork.linux-mips.org/patch/9528/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/loongson/loongson-3/irq.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/loongson/loongson-3/irq.c
++++ b/arch/mips/loongson/loongson-3/irq.c
+@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pend
+ static struct irqaction cascade_irqaction = {
+       .handler = no_action,
++      .flags = IRQF_NO_SUSPEND,
+       .name = "cascade",
+ };
diff --git a/queue-4.0/mips-lose_fpu-disable-fpu-when-msa-enabled.patch b/queue-4.0/mips-lose_fpu-disable-fpu-when-msa-enabled.patch
new file mode 100644 (file)
index 0000000..e84dd16
--- /dev/null
@@ -0,0 +1,45 @@
+From acaf6a97d623af123314c2f8ce4cf7254f6b2fc1 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Wed, 25 Feb 2015 13:08:05 +0000
+Subject: MIPS: lose_fpu(): Disable FPU when MSA enabled
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit acaf6a97d623af123314c2f8ce4cf7254f6b2fc1 upstream.
+
+The lose_fpu() function only disables the FPU in CP0_Status.CU1 if the
+FPU is in use and MSA isn't enabled.
+
+This isn't necessarily a problem because KSTK_STATUS(current), the
+version of CP0_Status stored on the kernel stack on entry from user
+mode, does always get updated and gets restored when returning to user
+mode, but I don't think it was intended, and it is inconsistent with the
+case of only the FPU being in use. Sometimes leaving the FPU enabled may
+also mask kernel bugs where FPU operations are executed when the FPU
+might not be enabled.
+
+So lets disable the FPU in the MSA case too.
+
+Fixes: 33c771ba5c5d ("MIPS: save/disable MSA in lose_fpu")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9323/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/fpu.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
+               }
+               disable_msa();
+               clear_thread_flag(TIF_USEDMSA);
++              __disable_fpu();
+       } else if (is_fpu_owner()) {
+               if (save)
+                       _save_fp(current);
diff --git a/queue-4.0/mips-malta-detect-and-fix-bad-memsize-values.patch b/queue-4.0/mips-malta-detect-and-fix-bad-memsize-values.patch
new file mode 100644 (file)
index 0000000..bced60a
--- /dev/null
@@ -0,0 +1,43 @@
+From f7f8aea4b97c4d48e42f02cb37026bee445f239f Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Fri, 27 Feb 2015 07:51:32 +0000
+Subject: MIPS: Malta: Detect and fix bad memsize values
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit f7f8aea4b97c4d48e42f02cb37026bee445f239f upstream.
+
+memsize denotes the amount of RAM we can access from kseg{0,1} and
+that should be up to 256M. In case the bootloader reports a value
+higher than that (perhaps reporting all the available RAM) it's best
+if we fix it ourselves and just warn the user about that. This is
+usually a problem with the bootloader and/or its environment.
+
+[ralf@linux-mips.org: Remove useless parens as suggested bei Sergei.
+Reformat long pr_warn statement to fit into 80 column limit.]
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9362/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mti-malta/malta-memory.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/mips/mti-malta/malta-memory.c
++++ b/arch/mips/mti-malta/malta-memory.c
+@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int e
+               pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+               physical_memsize = 0x02000000;
+       } else {
++              if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
++                      pr_warn("Unsupported memsize value (0x%lx) detected! "
++                              "Using 0x10000000 (256M) instead\n",
++                              memsize);
++                      memsize = 256 << 20;
++              }
+               /* If ememsize is set, then set physical_memsize to that */
+               physical_memsize = ememsize ? : memsize;
+       }
diff --git a/queue-4.0/mips-unaligned-fix-regular-load-store-instruction-emulation-for-eva.patch b/queue-4.0/mips-unaligned-fix-regular-load-store-instruction-emulation-for-eva.patch
new file mode 100644 (file)
index 0000000..1e009b3
--- /dev/null
@@ -0,0 +1,115 @@
+From 6eae35485b26f9e51ab896eb8a936bed9908fdf6 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 9 Mar 2015 14:54:52 +0000
+Subject: MIPS: unaligned: Fix regular load/store instruction emulation for EVA
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 6eae35485b26f9e51ab896eb8a936bed9908fdf6 upstream.
+
+When emulating a regular lh/lw/lhu/sh/sw we need to use the appropriate
+instruction if we are in EVA mode. This is necessary for userspace
+applications which trigger alignment exceptions. In such case, the
+userspace load/store instruction needs to be emulated with the correct
+eva/non-eva instruction by the kernel emulator.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Fixes: c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned accesses for EVA")
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9503/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/unaligned.c |   52 ++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 47 insertions(+), 5 deletions(-)
+
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -1023,7 +1023,15 @@ static void emulate_load_store_insn(stru
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+-              LoadHW(addr, value, res);
++              if (config_enabled(CONFIG_EVA)) {
++                      if (segment_eq(get_fs(), get_ds()))
++                              LoadHW(addr, value, res);
++                      else
++                              LoadHWE(addr, value, res);
++              } else {
++                      LoadHW(addr, value, res);
++              }
++
+               if (res)
+                       goto fault;
+               compute_return_epc(regs);
+@@ -1034,7 +1042,15 @@ static void emulate_load_store_insn(stru
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+-              LoadW(addr, value, res);
++              if (config_enabled(CONFIG_EVA)) {
++                      if (segment_eq(get_fs(), get_ds()))
++                              LoadW(addr, value, res);
++                      else
++                              LoadWE(addr, value, res);
++              } else {
++                      LoadW(addr, value, res);
++              }
++
+               if (res)
+                       goto fault;
+               compute_return_epc(regs);
+@@ -1045,7 +1061,15 @@ static void emulate_load_store_insn(stru
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+-              LoadHWU(addr, value, res);
++              if (config_enabled(CONFIG_EVA)) {
++                      if (segment_eq(get_fs(), get_ds()))
++                              LoadHWU(addr, value, res);
++                      else
++                              LoadHWUE(addr, value, res);
++              } else {
++                      LoadHWU(addr, value, res);
++              }
++
+               if (res)
+                       goto fault;
+               compute_return_epc(regs);
+@@ -1104,7 +1128,16 @@ static void emulate_load_store_insn(stru
+               compute_return_epc(regs);
+               value = regs->regs[insn.i_format.rt];
+-              StoreHW(addr, value, res);
++
++              if (config_enabled(CONFIG_EVA)) {
++                      if (segment_eq(get_fs(), get_ds()))
++                              StoreHW(addr, value, res);
++                      else
++                              StoreHWE(addr, value, res);
++              } else {
++                      StoreHW(addr, value, res);
++              }
++
+               if (res)
+                       goto fault;
+               break;
+@@ -1115,7 +1148,16 @@ static void emulate_load_store_insn(stru
+               compute_return_epc(regs);
+               value = regs->regs[insn.i_format.rt];
+-              StoreW(addr, value, res);
++
++              if (config_enabled(CONFIG_EVA)) {
++                      if (segment_eq(get_fs(), get_ds()))
++                              StoreW(addr, value, res);
++                      else
++                              StoreWE(addr, value, res);
++              } else {
++                      StoreW(addr, value, res);
++              }
++
+               if (res)
+                       goto fault;
+               break;
diff --git a/queue-4.0/mips-unaligned-prevent-eva-instructions-on-kernel-unaligned-accesses.patch b/queue-4.0/mips-unaligned-prevent-eva-instructions-on-kernel-unaligned-accesses.patch
new file mode 100644 (file)
index 0000000..1be2355
--- /dev/null
@@ -0,0 +1,479 @@
+From eeb538950367e3966cbf0237ab1a1dc30e059818 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 9 Mar 2015 14:54:50 +0000
+Subject: MIPS: unaligned: Prevent EVA instructions on kernel unaligned accesses
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit eeb538950367e3966cbf0237ab1a1dc30e059818 upstream.
+
+Commit c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned
+accesses for EVA") allowed unaligned accesses to be emulated for
+EVA. However, when emulating regular load/store unaligned accesses,
+we need to use the appropriate "address space" instructions for that.
+Previously, an unaligned load/store instruction in kernel space would
+have used the corresponding EVA instructions to emulate it which led to
+segmentation faults because of the address translation that happens
+with EVA instructions. This is now fixed by using the EVA instruction
+only when emulating EVA unaligned accesses.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Fixes: c1771216ab48 ("MIPS: kernel: unaligned: Handle unaligned accesses for EVA")
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9501/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/unaligned.c |  172 +++++++++++++++++++++++--------------------
+ 1 file changed, 94 insertions(+), 78 deletions(-)
+
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -109,10 +109,10 @@ static u32 unaligned_action;
+ extern void show_registers(struct pt_regs *regs);
+ #ifdef __BIG_ENDIAN
+-#define     LoadHW(addr, value, res)  \
++#define     _LoadHW(addr, value, res, type)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+-                      "1:\t"user_lb("%0", "0(%2)")"\n"    \
+-                      "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++                      "1:\t"type##_lb("%0", "0(%2)")"\n"  \
++                      "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -130,10 +130,10 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     LoadW(addr, value, res)   \
++#define     _LoadW(addr, value, res, type)   \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_lwl("%0", "(%2)")"\n"    \
+-                      "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++                      "1:\t"type##_lwl("%0", "(%2)")"\n"   \
++                      "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+@@ -149,18 +149,18 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define     LoadW(addr, value, res) \
++#define     _LoadW(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_lb("%0", "0(%2)")"\n\t"    \
+-                      "2:"user_lbu("$1", "1(%2)")"\n\t"   \
++                      "1:"type##_lb("%0", "0(%2)")"\n\t"  \
++                      "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "3:"user_lbu("$1", "2(%2)")"\n\t"   \
++                      "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "4:"user_lbu("$1", "3(%2)")"\n\t"   \
++                      "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -181,11 +181,11 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #endif /* CONFIG_CPU_MIPSR6 */
+-#define     LoadHWU(addr, value, res) \
++#define     _LoadHWU(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+-                      "1:\t"user_lbu("%0", "0(%2)")"\n"   \
+-                      "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++                      "1:\t"type##_lbu("%0", "0(%2)")"\n" \
++                      "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -204,10 +204,10 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     LoadWU(addr, value, res)  \
++#define     _LoadWU(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_lwl("%0", "(%2)")"\n"    \
+-                      "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++                      "1:\t"type##_lwl("%0", "(%2)")"\n"  \
++                      "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+@@ -224,7 +224,7 @@ extern void show_registers(struct pt_reg
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+-#define     LoadDW(addr, value, res)  \
++#define     _LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, (%2)\n"               \
+                       "2:\tldr\t%0, 7(%2)\n\t"            \
+@@ -243,18 +243,18 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define           LoadWU(addr, value, res) \
++#define           _LoadWU(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_lbu("%0", "0(%2)")"\n\t"   \
+-                      "2:"user_lbu("$1", "1(%2)")"\n\t"   \
++                      "1:"type##_lbu("%0", "0(%2)")"\n\t" \
++                      "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "3:"user_lbu("$1", "2(%2)")"\n\t"   \
++                      "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "4:"user_lbu("$1", "3(%2)")"\n\t"   \
++                      "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -274,7 +274,7 @@ extern void show_registers(struct pt_reg
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+-#define     LoadDW(addr, value, res)  \
++#define     _LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -323,12 +323,12 @@ extern void show_registers(struct pt_reg
+ #endif /* CONFIG_CPU_MIPSR6 */
+-#define     StoreHW(addr, value, res) \
++#define     _StoreHW(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+-                      "1:\t"user_sb("%1", "1(%2)")"\n"    \
++                      "1:\t"type##_sb("%1", "1(%2)")"\n"  \
+                       "srl\t$1, %1, 0x8\n"                \
+-                      "2:\t"user_sb("$1", "0(%2)")"\n"    \
++                      "2:\t"type##_sb("$1", "0(%2)")"\n"  \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+@@ -345,10 +345,10 @@ extern void show_registers(struct pt_reg
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     StoreW(addr, value, res)  \
++#define     _StoreW(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_swl("%1", "(%2)")"\n"    \
+-                      "2:\t"user_swr("%1", "3(%2)")"\n\t" \
++                      "1:\t"type##_swl("%1", "(%2)")"\n"  \
++                      "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+@@ -363,7 +363,7 @@ extern void show_registers(struct pt_reg
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+-#define     StoreDW(addr, value, res) \
++#define     _StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1,(%2)\n"                \
+                       "2:\tsdr\t%1, 7(%2)\n\t"            \
+@@ -382,17 +382,17 @@ extern void show_registers(struct pt_reg
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define     StoreW(addr, value, res)  \
++#define     _StoreW(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_sb("%1", "3(%2)")"\n\t"    \
++                      "1:"type##_sb("%1", "3(%2)")"\n\t"  \
+                       "srl\t$1, %1, 0x8\n\t"              \
+-                      "2:"user_sb("$1", "2(%2)")"\n\t"    \
++                      "2:"type##_sb("$1", "2(%2)")"\n\t"  \
+                       "srl\t$1, $1,  0x8\n\t"             \
+-                      "3:"user_sb("$1", "1(%2)")"\n\t"    \
++                      "3:"type##_sb("$1", "1(%2)")"\n\t"  \
+                       "srl\t$1, $1, 0x8\n\t"              \
+-                      "4:"user_sb("$1", "0(%2)")"\n\t"    \
++                      "4:"type##_sb("$1", "0(%2)")"\n\t"  \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+@@ -456,10 +456,10 @@ extern void show_registers(struct pt_reg
+ #else /* __BIG_ENDIAN */
+-#define     LoadHW(addr, value, res)  \
++#define     _LoadHW(addr, value, res, type)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+-                      "1:\t"user_lb("%0", "1(%2)")"\n"    \
+-                      "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++                      "1:\t"type##_lb("%0", "1(%2)")"\n"  \
++                      "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -477,10 +477,10 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     LoadW(addr, value, res)   \
++#define     _LoadW(addr, value, res, type)   \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_lwl("%0", "3(%2)")"\n"   \
+-                      "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
++                      "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++                      "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+@@ -496,18 +496,18 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define     LoadW(addr, value, res) \
++#define     _LoadW(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_lb("%0", "3(%2)")"\n\t"    \
+-                      "2:"user_lbu("$1", "2(%2)")"\n\t"   \
++                      "1:"type##_lb("%0", "3(%2)")"\n\t"  \
++                      "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "3:"user_lbu("$1", "1(%2)")"\n\t"   \
++                      "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "4:"user_lbu("$1", "0(%2)")"\n\t"   \
++                      "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -529,11 +529,11 @@ extern void show_registers(struct pt_reg
+ #endif /* CONFIG_CPU_MIPSR6 */
+-#define     LoadHWU(addr, value, res) \
++#define     _LoadHWU(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+-                      "1:\t"user_lbu("%0", "1(%2)")"\n"   \
+-                      "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++                      "1:\t"type##_lbu("%0", "1(%2)")"\n" \
++                      "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -552,10 +552,10 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     LoadWU(addr, value, res)  \
++#define     _LoadWU(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_lwl("%0", "3(%2)")"\n"   \
+-                      "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
++                      "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++                      "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+@@ -572,7 +572,7 @@ extern void show_registers(struct pt_reg
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+-#define     LoadDW(addr, value, res)  \
++#define     _LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, 7(%2)\n"              \
+                       "2:\tldr\t%0, (%2)\n\t"             \
+@@ -591,18 +591,18 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define           LoadWU(addr, value, res) \
++#define           _LoadWU(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_lbu("%0", "3(%2)")"\n\t"   \
+-                      "2:"user_lbu("$1", "2(%2)")"\n\t"   \
++                      "1:"type##_lbu("%0", "3(%2)")"\n\t" \
++                      "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "3:"user_lbu("$1", "1(%2)")"\n\t"   \
++                      "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+-                      "4:"user_lbu("$1", "0(%2)")"\n\t"   \
++                      "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+@@ -622,7 +622,7 @@ extern void show_registers(struct pt_reg
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+-#define     LoadDW(addr, value, res)  \
++#define     _LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -670,12 +670,12 @@ extern void show_registers(struct pt_reg
+                       : "r" (addr), "i" (-EFAULT));
+ #endif /* CONFIG_CPU_MIPSR6 */
+-#define     StoreHW(addr, value, res) \
++#define     _StoreHW(addr, value, res, type) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+-                      "1:\t"user_sb("%1", "0(%2)")"\n"    \
++                      "1:\t"type##_sb("%1", "0(%2)")"\n"  \
+                       "srl\t$1,%1, 0x8\n"                 \
+-                      "2:\t"user_sb("$1", "1(%2)")"\n"    \
++                      "2:\t"type##_sb("$1", "1(%2)")"\n"  \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+@@ -691,10 +691,10 @@ extern void show_registers(struct pt_reg
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+ #ifndef CONFIG_CPU_MIPSR6
+-#define     StoreW(addr, value, res)  \
++#define     _StoreW(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+-                      "1:\t"user_swl("%1", "3(%2)")"\n"   \
+-                      "2:\t"user_swr("%1", "(%2)")"\n\t"  \
++                      "1:\t"type##_swl("%1", "3(%2)")"\n" \
++                      "2:\t"type##_swr("%1", "(%2)")"\n\t"\
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+@@ -709,7 +709,7 @@ extern void show_registers(struct pt_reg
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+-#define     StoreDW(addr, value, res) \
++#define     _StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1, 7(%2)\n"              \
+                       "2:\tsdr\t%1, (%2)\n\t"             \
+@@ -728,17 +728,17 @@ extern void show_registers(struct pt_reg
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define     StoreW(addr, value, res)  \
++#define     _StoreW(addr, value, res, type)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+-                      "1:"user_sb("%1", "0(%2)")"\n\t"    \
++                      "1:"type##_sb("%1", "0(%2)")"\n\t"  \
+                       "srl\t$1, %1, 0x8\n\t"              \
+-                      "2:"user_sb("$1", "1(%2)")"\n\t"    \
++                      "2:"type##_sb("$1", "1(%2)")"\n\t"  \
+                       "srl\t$1, $1,  0x8\n\t"             \
+-                      "3:"user_sb("$1", "2(%2)")"\n\t"    \
++                      "3:"type##_sb("$1", "2(%2)")"\n\t"  \
+                       "srl\t$1, $1, 0x8\n\t"              \
+-                      "4:"user_sb("$1", "3(%2)")"\n\t"    \
++                      "4:"type##_sb("$1", "3(%2)")"\n\t"  \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+@@ -757,7 +757,7 @@ extern void show_registers(struct pt_reg
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+-#define     StoreDW(addr, value, res) \
++#define     _StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -801,6 +801,22 @@ extern void show_registers(struct pt_reg
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #endif
++#define LoadHWU(addr, value, res)     _LoadHWU(addr, value, res, kernel)
++#define LoadHWUE(addr, value, res)    _LoadHWU(addr, value, res, user)
++#define LoadWU(addr, value, res)      _LoadWU(addr, value, res, kernel)
++#define LoadWUE(addr, value, res)     _LoadWU(addr, value, res, user)
++#define LoadHW(addr, value, res)      _LoadHW(addr, value, res, kernel)
++#define LoadHWE(addr, value, res)     _LoadHW(addr, value, res, user)
++#define LoadW(addr, value, res)               _LoadW(addr, value, res, kernel)
++#define LoadWE(addr, value, res)      _LoadW(addr, value, res, user)
++#define LoadDW(addr, value, res)      _LoadDW(addr, value, res)
++
++#define StoreHW(addr, value, res)     _StoreHW(addr, value, res, kernel)
++#define StoreHWE(addr, value, res)    _StoreHW(addr, value, res, user)
++#define StoreW(addr, value, res)      _StoreW(addr, value, res, kernel)
++#define StoreWE(addr, value, res)     _StoreW(addr, value, res, user)
++#define StoreDW(addr, value, res)     _StoreDW(addr, value, res)
++
+ static void emulate_load_store_insn(struct pt_regs *regs,
+       void __user *addr, unsigned int __user *pc)
+ {
+@@ -872,7 +888,7 @@ static void emulate_load_store_insn(stru
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+-                      LoadHW(addr, value, res);
++                      LoadHWE(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+@@ -885,7 +901,7 @@ static void emulate_load_store_insn(stru
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+-                              LoadW(addr, value, res);
++                              LoadWE(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+@@ -898,7 +914,7 @@ static void emulate_load_store_insn(stru
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+-                      LoadHWU(addr, value, res);
++                      LoadHWUE(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+@@ -913,7 +929,7 @@ static void emulate_load_store_insn(stru
+                       }
+                       compute_return_epc(regs);
+                       value = regs->regs[insn.spec3_format.rt];
+-                      StoreHW(addr, value, res);
++                      StoreHWE(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+@@ -926,7 +942,7 @@ static void emulate_load_store_insn(stru
+                       }
+                       compute_return_epc(regs);
+                       value = regs->regs[insn.spec3_format.rt];
+-                      StoreW(addr, value, res);
++                      StoreWE(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
diff --git a/queue-4.0/mips-unaligned-surround-load-store-macros-in-do-while-statements.patch b/queue-4.0/mips-unaligned-surround-load-store-macros-in-do-while-statements.patch
new file mode 100644 (file)
index 0000000..0dc42d5
--- /dev/null
@@ -0,0 +1,400 @@
+From 3563c32d6532ece53c9dd8905a8e41983ef9952f Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 9 Mar 2015 14:54:51 +0000
+Subject: MIPS: unaligned: Surround load/store macros in do {} while statements
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 3563c32d6532ece53c9dd8905a8e41983ef9952f upstream.
+
+It's best to surround such complex macros with do {} while statements
+so they can appear as independent logical blocks when used within other
+control blocks.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9502/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/unaligned.c |  116 +++++++++++++++++++++++++++++++++----------
+ 1 file changed, 90 insertions(+), 26 deletions(-)
+
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -110,6 +110,7 @@ extern void show_registers(struct pt_reg
+ #ifdef __BIG_ENDIAN
+ #define     _LoadHW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\t"type##_lb("%0", "0(%2)")"\n"  \
+                       "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+@@ -127,10 +128,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _LoadW(addr, value, res, type)   \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_lwl("%0", "(%2)")"\n"   \
+                       "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+@@ -146,10 +149,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+ #define     _LoadW(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+@@ -178,10 +184,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #define     _LoadHWU(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\t"type##_lbu("%0", "0(%2)")"\n" \
+@@ -201,10 +210,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _LoadWU(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_lwl("%0", "(%2)")"\n"  \
+                       "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+@@ -222,9 +233,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #define     _LoadDW(addr, value, res)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, (%2)\n"               \
+                       "2:\tldr\t%0, 7(%2)\n\t"            \
+@@ -240,10 +253,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+ #define           _LoadWU(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -272,9 +288,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #define     _LoadDW(addr, value, res)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -319,11 +337,14 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #define     _StoreHW(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\t"type##_sb("%1", "1(%2)")"\n"  \
+@@ -342,10 +363,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+-                      : "r" (value), "r" (addr), "i" (-EFAULT));
++                      : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _StoreW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_swl("%1", "(%2)")"\n"  \
+                       "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
+@@ -361,9 +384,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+-              : "r" (value), "r" (addr), "i" (-EFAULT));
++              : "r" (value), "r" (addr), "i" (-EFAULT));  \
++} while(0)
+ #define     _StoreDW(addr, value, res) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1,(%2)\n"                \
+                       "2:\tsdr\t%1, 7(%2)\n\t"            \
+@@ -379,10 +404,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+-              : "r" (value), "r" (addr), "i" (-EFAULT));
++              : "r" (value), "r" (addr), "i" (-EFAULT));  \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+ #define     _StoreW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -409,9 +437,11 @@ extern void show_registers(struct pt_reg
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+-              : "memory");
++              : "memory");                                \
++} while(0)
+ #define     StoreDW(addr, value, res) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -451,12 +481,15 @@ extern void show_registers(struct pt_reg
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+-              : "memory");
++              : "memory");                                \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #else /* __BIG_ENDIAN */
+ #define     _LoadHW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\t"type##_lb("%0", "1(%2)")"\n"  \
+                       "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+@@ -474,10 +507,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _LoadW(addr, value, res, type)   \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+                       "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+@@ -493,10 +528,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+ #define     _LoadW(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+@@ -525,11 +563,14 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #define     _LoadHWU(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\t"type##_lbu("%0", "1(%2)")"\n" \
+@@ -549,10 +590,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _LoadWU(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+                       "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+@@ -570,9 +613,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #define     _LoadDW(addr, value, res)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, 7(%2)\n"              \
+                       "2:\tldr\t%0, (%2)\n\t"             \
+@@ -588,10 +633,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+ #define           _LoadWU(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -620,9 +668,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #define     _LoadDW(addr, value, res)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -667,10 +717,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+-                      : "r" (addr), "i" (-EFAULT));
++                      : "r" (addr), "i" (-EFAULT));       \
++} while(0)
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #define     _StoreHW(addr, value, res, type) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\t"type##_sb("%1", "0(%2)")"\n"  \
+@@ -689,9 +741,12 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+-                      : "r" (value), "r" (addr), "i" (-EFAULT));
++                      : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
++
+ #ifndef CONFIG_CPU_MIPSR6
+ #define     _StoreW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\t"type##_swl("%1", "3(%2)")"\n" \
+                       "2:\t"type##_swr("%1", "(%2)")"\n\t"\
+@@ -707,9 +762,11 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+-              : "r" (value), "r" (addr), "i" (-EFAULT));
++              : "r" (value), "r" (addr), "i" (-EFAULT));  \
++} while(0)
+ #define     _StoreDW(addr, value, res) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1, 7(%2)\n"              \
+                       "2:\tsdr\t%1, (%2)\n\t"             \
+@@ -725,10 +782,13 @@ extern void show_registers(struct pt_reg
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+-              : "r" (value), "r" (addr), "i" (-EFAULT));
++              : "r" (value), "r" (addr), "i" (-EFAULT));  \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+ #define     _StoreW(addr, value, res, type)  \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -755,9 +815,11 @@ extern void show_registers(struct pt_reg
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+-              : "memory");
++              : "memory");                                \
++} while(0)
+ #define     _StoreDW(addr, value, res) \
++do {                                                        \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+@@ -797,7 +859,9 @@ extern void show_registers(struct pt_reg
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+-              : "memory");
++              : "memory");                                \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #endif
diff --git a/queue-4.0/s390-hibernate-fix-save-and-restore-of-kernel-text-section.patch b/queue-4.0/s390-hibernate-fix-save-and-restore-of-kernel-text-section.patch
new file mode 100644 (file)
index 0000000..8fc8daa
--- /dev/null
@@ -0,0 +1,74 @@
+From d74419495633493c9cd3f2bbeb7f3529d0edded6 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Wed, 25 Mar 2015 10:13:33 +0100
+Subject: s390/hibernate: fix save and restore of kernel text section
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit d74419495633493c9cd3f2bbeb7f3529d0edded6 upstream.
+
+Sebastian reported a crash caused by a jump label mismatch after resume.
+This happens because we do not save the kernel text section during suspend
+and therefore also do not restore it during resume, but use the kernel image
+that restores the old system.
+
+This means that after a suspend/resume cycle we lost all modifications done
+to the kernel text section.
+The reason for this is the pfn_is_nosave() function, which incorrectly
+returns that read-only pages don't need to be saved. This is incorrect since
+we mark the kernel text section read-only.
+We still need to make sure to not save and restore pages contained within
+NSS and DCSS segment.
+To fix this add an extra case for the kernel text section and only save
+those pages if they are not contained within an NSS segment.
+
+Fixes the following crash (and the above bugs as well):
+
+Jump label code mismatch at netif_receive_skb_internal+0x28/0xd0
+Found:    c0 04 00 00 00 00
+Expected: c0 f4 00 00 00 11
+New:      c0 04 00 00 00 00
+Kernel panic - not syncing: Corrupted kernel text
+CPU: 0 PID: 9 Comm: migration/0 Not tainted 3.19.0-01975-gb1b096e70f23 #4
+Call Trace:
+  [<0000000000113972>] show_stack+0x72/0xf0
+  [<000000000081f15e>] dump_stack+0x6e/0x90
+  [<000000000081c4e8>] panic+0x108/0x2b0
+  [<000000000081be64>] jump_label_bug.isra.2+0x104/0x108
+  [<0000000000112176>] __jump_label_transform+0x9e/0xd0
+  [<00000000001121e6>] __sm_arch_jump_label_transform+0x3e/0x50
+  [<00000000001d1136>] multi_cpu_stop+0x12e/0x170
+  [<00000000001d1472>] cpu_stopper_thread+0xb2/0x168
+  [<000000000015d2ac>] smpboot_thread_fn+0x134/0x1b0
+  [<0000000000158baa>] kthread+0x10a/0x110
+  [<0000000000824a86>] kernel_thread_starter+0x6/0xc
+
+Reported-and-tested-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/suspend.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+       unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+       unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++      unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++      unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+       /* Always save lowcore pages (LC protection might be enabled). */
+       if (pfn <= LC_PAGES)
+@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+       if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+               return 1;
+       /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++      if (pfn >= stext_pfn && pfn <= eshared_pfn)
++              return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+       if (tprot(PFN_PHYS(pfn)))
+               return 1;
+       return 0;
index 9f6dac8c3a8d33825e3fb0ec350f4960ed4dcae4..6c622fe4f03e44b460362db70d8e2ea6677edb99 100644 (file)
@@ -24,3 +24,20 @@ btrfs-fix-inode-eviction-infinite-loop-after-extent_same-ioctl.patch
 mm-hugetlb-use-pmd_page-in-follow_huge_pmd.patch
 powerpc-hugetlb-call-mm_dec_nr_pmds-in-hugetlb_free_pmd_range.patch
 usb-gadget-printer-enqueue-printer-s-response-for-setup-request.patch
+kvm-s390-fix-handling-of-write-errors-in-the-tpi-handler.patch
+kvm-s390-reinjection-of-irqs-can-fail-in-the-tpi-handler.patch
+kvm-s390-zero-out-current-vmdb-of-stsi-before-including-level3-data.patch
+kvm-s390-fix-get_all_floating_irqs.patch
+s390-hibernate-fix-save-and-restore-of-kernel-text-section.patch
+kvm-use-slowpath-for-cross-page-cached-accesses.patch
+kvm-arm-arm64-check-irq-number-on-userland-injection.patch
+kvm-vmx-preserve-host-cr4.mce-value-while-in-guest-mode.patch
+mips-kvm-handle-msa-disabled-exceptions-from-guest.patch
+mips-lose_fpu-disable-fpu-when-msa-enabled.patch
+mips-malta-detect-and-fix-bad-memsize-values.patch
+mips-asm-asm-eva-introduce-kernel-load-store-variants.patch
+mips-unaligned-prevent-eva-instructions-on-kernel-unaligned-accesses.patch
+mips-unaligned-surround-load-store-macros-in-do-while-statements.patch
+mips-unaligned-fix-regular-load-store-instruction-emulation-for-eva.patch
+mips-loongson-3-add-irqf_no_suspend-to-cascade-irqaction.patch
+mips-hibernate-flush-tlb-entries-earlier.patch