]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2013 23:39:35 +0000 (16:39 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2013 23:39:35 +0000 (16:39 -0700)
added patches:
batman-adv-bat_socket_read-missing-checks.patch
batman-adv-only-write-requested-number-of-byte-to-user-buffer.patch
kvm-clean-up-error-handling-during-vcpu-creation.patch
kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch
kvm-fix-buffer-overflow-in-kvm_set_irq.patch
kvm-x86-invalid-opcode-oops-on-set_sregs-with-osxsave-bit-set-cve-2012-4461.patch
kvm-x86-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch
macvtap-zerocopy-validate-vectors-before-building-skb.patch
mm-hotplug-correctly-add-new-zone-to-all-other-nodes-zone-lists.patch
nfs-nfs_getaclargs.acl_len-is-a-size_t.patch
nfsv4-fix-an-oops-in-the-nfsv4-getacl-code.patch
nfsv4-include-bitmap-in-nfsv4-get-acl-data.patch
x25-handle-undersized-fragmented-skbs.patch
x25-validate-incoming-call-user-data-lengths.patch

15 files changed:
queue-3.0/batman-adv-bat_socket_read-missing-checks.patch [new file with mode: 0644]
queue-3.0/batman-adv-only-write-requested-number-of-byte-to-user-buffer.patch [new file with mode: 0644]
queue-3.0/kvm-clean-up-error-handling-during-vcpu-creation.patch [new file with mode: 0644]
queue-3.0/kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch [new file with mode: 0644]
queue-3.0/kvm-fix-buffer-overflow-in-kvm_set_irq.patch [new file with mode: 0644]
queue-3.0/kvm-x86-invalid-opcode-oops-on-set_sregs-with-osxsave-bit-set-cve-2012-4461.patch [new file with mode: 0644]
queue-3.0/kvm-x86-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch [new file with mode: 0644]
queue-3.0/macvtap-zerocopy-validate-vectors-before-building-skb.patch [new file with mode: 0644]
queue-3.0/mm-hotplug-correctly-add-new-zone-to-all-other-nodes-zone-lists.patch [new file with mode: 0644]
queue-3.0/nfs-nfs_getaclargs.acl_len-is-a-size_t.patch [new file with mode: 0644]
queue-3.0/nfsv4-fix-an-oops-in-the-nfsv4-getacl-code.patch [new file with mode: 0644]
queue-3.0/nfsv4-include-bitmap-in-nfsv4-get-acl-data.patch [new file with mode: 0644]
queue-3.0/series
queue-3.0/x25-handle-undersized-fragmented-skbs.patch [new file with mode: 0644]
queue-3.0/x25-validate-incoming-call-user-data-lengths.patch [new file with mode: 0644]

diff --git a/queue-3.0/batman-adv-bat_socket_read-missing-checks.patch b/queue-3.0/batman-adv-bat_socket_read-missing-checks.patch
new file mode 100644 (file)
index 0000000..df540c2
--- /dev/null
@@ -0,0 +1,39 @@
+From jslaby@suse.cz  Mon Apr  1 16:36:12 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:49 +0100
+Subject: batman-adv: bat_socket_read missing checks
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Paul Kot <pawlkt@gmail.com>, Sven Eckelmann <sven@narfation.org>, Marek Lindner <lindner_marek@yahoo.de>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-4-git-send-email-jslaby@suse.cz>
+
+
+From: Paul Kot <pawlkt@gmail.com>
+
+commit c00b6856fc642b234895cfabd15b289e76726430 upstream.
+
+Writing a icmp_packet_rr and then reading icmp_packet can lead to kernel
+memory corruption, if __user *buf is just below TASK_SIZE.
+
+Signed-off-by: Paul Kot <pawlkt@gmail.com>
+[sven@narfation.org: made it checkpatch clean]
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/icmp_socket.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/batman-adv/icmp_socket.c
++++ b/net/batman-adv/icmp_socket.c
+@@ -136,8 +136,8 @@ static ssize_t bat_socket_read(struct fi
+       spin_unlock_bh(&socket_client->lock);
+-      error = __copy_to_user(buf, &socket_packet->icmp_packet,
+-                             socket_packet->icmp_len);
++      error = copy_to_user(buf, &socket_packet->icmp_packet,
++                           socket_packet->icmp_len);
+       packet_len = socket_packet->icmp_len;
+       kfree(socket_packet);
diff --git a/queue-3.0/batman-adv-only-write-requested-number-of-byte-to-user-buffer.patch b/queue-3.0/batman-adv-only-write-requested-number-of-byte-to-user-buffer.patch
new file mode 100644 (file)
index 0000000..d88a8a0
--- /dev/null
@@ -0,0 +1,40 @@
+From jslaby@suse.cz  Mon Apr  1 16:36:25 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:50 +0100
+Subject: batman-adv: Only write requested number of byte to user buffer
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Sven Eckelmann <sven@narfation.org>, Marek Lindner <lindner_marek@yahoo.de>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-5-git-send-email-jslaby@suse.cz>
+
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit b5a1eeef04cc7859f34dec9b72ea1b28e4aba07c upstream.
+
+Don't write more than the requested number of bytes of an batman-adv icmp
+packet to the userspace buffer. Otherwise unrelated userspace memory might get
+overridden by the kernel.
+
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/icmp_socket.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/batman-adv/icmp_socket.c
++++ b/net/batman-adv/icmp_socket.c
+@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct fi
+       spin_unlock_bh(&socket_client->lock);
+-      error = copy_to_user(buf, &socket_packet->icmp_packet,
+-                           socket_packet->icmp_len);
++      packet_len = min(count, socket_packet->icmp_len);
++      error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
+-      packet_len = socket_packet->icmp_len;
+       kfree(socket_packet);
+       if (error)
diff --git a/queue-3.0/kvm-clean-up-error-handling-during-vcpu-creation.patch b/queue-3.0/kvm-clean-up-error-handling-during-vcpu-creation.patch
new file mode 100644 (file)
index 0000000..750e7fd
--- /dev/null
@@ -0,0 +1,86 @@
+From jslaby@suse.cz  Mon Apr  1 16:35:27 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:46 +0100
+Subject: KVM: Clean up error handling during VCPU creation
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Jan Kiszka <jan.kiszka@siemens.com>, Avi Kivity <avi@redhat.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-1-git-send-email-jslaby@suse.cz>
+
+
+From: Jan Kiszka <jan.kiszka@siemens.com>
+
+commit d780592b99d7d8a5ff905f6bacca519d4a342c76 upstream.
+
+So far kvm_arch_vcpu_setup is responsible for freeing the vcpu struct if
+it fails. Move this confusing resonsibility back into the hands of
+kvm_vm_ioctl_create_vcpu. Only kvm_arch_vcpu_setup of x86 is affected,
+all other archs cannot fail.
+
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c  |    5 -----
+ virt/kvm/kvm_main.c |   11 ++++++-----
+ 2 files changed, 6 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6116,12 +6116,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu
+       if (r == 0)
+               r = kvm_mmu_setup(vcpu);
+       vcpu_put(vcpu);
+-      if (r < 0)
+-              goto free_vcpu;
+-      return 0;
+-free_vcpu:
+-      kvm_x86_ops->vcpu_free(vcpu);
+       return r;
+ }
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1616,18 +1616,18 @@ static int kvm_vm_ioctl_create_vcpu(stru
+       r = kvm_arch_vcpu_setup(vcpu);
+       if (r)
+-              return r;
++              goto vcpu_destroy;
+       mutex_lock(&kvm->lock);
+       if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+               r = -EINVAL;
+-              goto vcpu_destroy;
++              goto unlock_vcpu_destroy;
+       }
+       kvm_for_each_vcpu(r, v, kvm)
+               if (v->vcpu_id == id) {
+                       r = -EEXIST;
+-                      goto vcpu_destroy;
++                      goto unlock_vcpu_destroy;
+               }
+       BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
+@@ -1637,7 +1637,7 @@ static int kvm_vm_ioctl_create_vcpu(stru
+       r = create_vcpu_fd(vcpu);
+       if (r < 0) {
+               kvm_put_kvm(kvm);
+-              goto vcpu_destroy;
++              goto unlock_vcpu_destroy;
+       }
+       kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+@@ -1651,8 +1651,9 @@ static int kvm_vm_ioctl_create_vcpu(stru
+       mutex_unlock(&kvm->lock);
+       return r;
+-vcpu_destroy:
++unlock_vcpu_destroy:
+       mutex_unlock(&kvm->lock);
++vcpu_destroy:
+       kvm_arch_vcpu_destroy(vcpu);
+       return r;
+ }
diff --git a/queue-3.0/kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch b/queue-3.0/kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch
new file mode 100644 (file)
index 0000000..121a8c9
--- /dev/null
@@ -0,0 +1,104 @@
+From jslaby@suse.cz  Mon Apr  1 16:37:16 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:55 +0100
+Subject: KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Avi Kivity <avi@redhat.com>, Michael Ellerman <michael@ellerman.id.au>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-10-git-send-email-jslaby@suse.cz>
+
+
+From: Avi Kivity <avi@redhat.com>
+
+commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e upstream.
+
+If some vcpus are created before KVM_CREATE_IRQCHIP, then
+irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
+to potential NULL pointer dereferences.
+
+Fix by:
+- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
+- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
+
+This is somewhat long winded because vcpu->arch.apic is created without
+kvm->lock held.
+
+Based on earlier patch by Michael Ellerman.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/ia64/kvm/kvm-ia64.c |    5 +++++
+ arch/x86/kvm/x86.c       |    8 ++++++++
+ include/linux/kvm_host.h |    7 +++++++
+ virt/kvm/kvm_main.c      |    4 ++++
+ 4 files changed, 24 insertions(+)
+
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -1168,6 +1168,11 @@ out:
+ #define PALE_RESET_ENTRY    0x80000000ffffffb0UL
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++      return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_vcpu *v;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3410,6 +3410,9 @@ long kvm_arch_vm_ioctl(struct file *filp
+               r = -EEXIST;
+               if (kvm->arch.vpic)
+                       goto create_irqchip_unlock;
++              r = -EINVAL;
++              if (atomic_read(&kvm->online_vcpus))
++                      goto create_irqchip_unlock;
+               r = -ENOMEM;
+               vpic = kvm_create_pic(kvm);
+               if (vpic) {
+@@ -6189,6 +6192,11 @@ void kvm_arch_check_processor_compat(voi
+       kvm_x86_ops->check_processor_compatibility(rtn);
+ }
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++      return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+       struct page *page;
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -736,6 +736,13 @@ static inline bool kvm_vcpu_is_bsp(struc
+ {
+       return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+ }
++
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
++
++#else
++
++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
++
+ #endif
+ #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1619,6 +1619,10 @@ static int kvm_vm_ioctl_create_vcpu(stru
+               goto vcpu_destroy;
+       mutex_lock(&kvm->lock);
++      if (!kvm_vcpu_compatible(vcpu)) {
++              r = -EINVAL;
++              goto unlock_vcpu_destroy;
++      }
+       if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+               r = -EINVAL;
+               goto unlock_vcpu_destroy;
diff --git a/queue-3.0/kvm-fix-buffer-overflow-in-kvm_set_irq.patch b/queue-3.0/kvm-fix-buffer-overflow-in-kvm_set_irq.patch
new file mode 100644 (file)
index 0000000..292517c
--- /dev/null
@@ -0,0 +1,37 @@
+From jslaby@suse.cz  Mon Apr  1 16:37:40 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:57 +0100
+Subject: KVM: Fix buffer overflow in kvm_set_irq()
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Avi Kivity <avi@redhat.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-12-git-send-email-jslaby@suse.cz>
+
+
+From: Avi Kivity <avi@redhat.com>
+
+commit f2ebd422f71cda9c791f76f85d2ca102ae34a1ed upstream.
+
+kvm_set_irq() has an internal buffer of three irq routing entries, allowing
+connecting a GSI to three IRQ chips or on MSI.  However setup_routing_entry()
+does not properly enforce this, allowing three irqchip routes followed by
+an MSI route to overflow the buffer.
+
+Fix by ensuring that an MSI entry is added to an empty list.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/irq_comm.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kv
+        */
+       hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+               if (ei->type == KVM_IRQ_ROUTING_MSI ||
++                  ue->type == KVM_IRQ_ROUTING_MSI ||
+                   ue->u.irqchip.irqchip == ei->irqchip.irqchip)
+                       return r;
diff --git a/queue-3.0/kvm-x86-invalid-opcode-oops-on-set_sregs-with-osxsave-bit-set-cve-2012-4461.patch b/queue-3.0/kvm-x86-invalid-opcode-oops-on-set_sregs-with-osxsave-bit-set-cve-2012-4461.patch
new file mode 100644 (file)
index 0000000..44a9a99
--- /dev/null
@@ -0,0 +1,83 @@
+From jslaby@suse.cz  Mon Apr  1 16:38:00 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:59 +0100
+Subject: KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461)
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Petr Matousek <pmatouse@redhat.com>, Marcelo Tosatti <mtosatti@redhat.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-14-git-send-email-jslaby@suse.cz>
+
+
+From: Petr Matousek <pmatouse@redhat.com>
+
+commit 6d1068b3a98519247d8ba4ec85cd40ac136dbdf9 upstream.
+
+On hosts without the XSAVE support unprivileged local user can trigger
+oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest
+cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN
+ioctl.
+
+invalid opcode: 0000 [#2] SMP
+Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables
+...
+Pid: 24935, comm: zoog_kvm_monito Tainted: G      D      3.2.0-3-686-pae
+EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0
+EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm]
+EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000
+ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70
+ DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
+Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0
+task.ti=d7c62000)
+Stack:
+ 00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000
+ ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0
+ c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80
+Call Trace:
+ [<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm]
+...
+ [<c12bfb44>] ? syscall_call+0x7/0xb
+Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74
+1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01
+d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89
+EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP
+0068:d7c63e70
+
+QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID
+and then sets them later. So guest's X86_FEATURE_XSAVE should be masked
+out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with
+X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with
+X86_FEATURE_XSAVE even on hosts that do not support it, might be
+susceptible to this attack from inside the guest as well.
+
+Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support.
+
+Signed-off-by: Petr Matousek <pmatouse@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -575,6 +575,9 @@ static bool guest_cpuid_has_xsave(struct
+ {
+       struct kvm_cpuid_entry2 *best;
++      if (!cpu_has_xsave)
++              return 0;
++
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+ }
+@@ -5854,6 +5857,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+       int pending_vec, max_bits, idx;
+       struct desc_ptr dt;
++      if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++              return -EINVAL;
++
+       dt.size = sregs->idt.limit;
+       dt.address = sregs->idt.base;
+       kvm_x86_ops->set_idt(vcpu, &dt);
diff --git a/queue-3.0/kvm-x86-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch b/queue-3.0/kvm-x86-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch
new file mode 100644 (file)
index 0000000..cb44ec2
--- /dev/null
@@ -0,0 +1,73 @@
+From jslaby@suse.cz  Mon Apr  1 16:36:34 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:51 +0100
+Subject: KVM: x86: Prevent starting PIT timers in the absence of irqchip support
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Jan Kiszka <jan.kiszka@siemens.com>, Marcelo Tosatti <mtosatti@redhat.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-6-git-send-email-jslaby@suse.cz>
+
+
+From: Jan Kiszka <jan.kiszka@siemens.com>
+
+commit 0924ab2cfa98b1ece26c033d696651fd62896c69 upstream.
+
+User space may create the PIT and forgets about setting up the irqchips.
+In that case, firing PIT IRQs will crash the host:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000128
+IP: [<ffffffffa10f6280>] kvm_set_irq+0x30/0x170 [kvm]
+...
+Call Trace:
+ [<ffffffffa11228c1>] pit_do_work+0x51/0xd0 [kvm]
+ [<ffffffff81071431>] process_one_work+0x111/0x4d0
+ [<ffffffff81071bb2>] worker_thread+0x152/0x340
+ [<ffffffff81075c8e>] kthread+0x7e/0x90
+ [<ffffffff815a4474>] kernel_thread_helper+0x4/0x10
+
+Prevent this by checking the irqchip mode before starting a timer. We
+can't deny creating the PIT if the irqchips aren't set up yet as
+current user land expects this order to work.
+
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/i8254.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn
+               return HRTIMER_NORESTART;
+ }
+-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
++static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+ {
++      struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+       struct kvm_timer *pt = &ps->pit_timer;
+       s64 interval;
++      if (!irqchip_in_kernel(kvm))
++              return;
++
+       interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+       pr_debug("create pit timer, interval is %llu nsec\n", interval);
+@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *k
+         /* FIXME: enhance mode 4 precision */
+       case 4:
+               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
+-                      create_pit_timer(ps, val, 0);
++                      create_pit_timer(kvm, val, 0);
+               }
+               break;
+       case 2:
+       case 3:
+               if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
+-                      create_pit_timer(ps, val, 1);
++                      create_pit_timer(kvm, val, 1);
+               }
+               break;
+       default:
diff --git a/queue-3.0/macvtap-zerocopy-validate-vectors-before-building-skb.patch b/queue-3.0/macvtap-zerocopy-validate-vectors-before-building-skb.patch
new file mode 100644 (file)
index 0000000..4df0e0b
--- /dev/null
@@ -0,0 +1,43 @@
+From jslaby@suse.cz  Mon Apr  1 16:37:28 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:56 +0100
+Subject: macvtap: zerocopy: validate vectors before building skb
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Jason Wang <jasowang@redhat.com>, "Michael S. Tsirkin" <mst@redhat.com>, Benjamin Poirier <bpoirier@suse.de>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-11-git-send-email-jslaby@suse.cz>
+
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit b92946e2919134ebe2a4083e4302236295ea2a73 upstream.
+
+There're several reasons that the vectors need to be validated:
+
+- Return error when caller provides vectors whose num is greater than UIO_MAXIOV.
+- Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS.
+- Return error when userspace provides vectors whose total length may exceed
+- MAX_SKB_FRAGS * PAGE_SIZE.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Benjamin Poirier <bpoirier@suse.de> [patch reduced to
+                                       the 3rd reason only for 3.0]
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macvtap.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -552,6 +552,10 @@ static ssize_t macvtap_get_user(struct m
+       if (unlikely(len < ETH_HLEN))
+               goto err;
++      err = -EMSGSIZE;
++      if (unlikely(count > UIO_MAXIOV))
++              goto err;
++
+       skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
+                               noblock, &err);
+       if (!skb)
diff --git a/queue-3.0/mm-hotplug-correctly-add-new-zone-to-all-other-nodes-zone-lists.patch b/queue-3.0/mm-hotplug-correctly-add-new-zone-to-all-other-nodes-zone-lists.patch
new file mode 100644 (file)
index 0000000..62318d7
--- /dev/null
@@ -0,0 +1,87 @@
+From jslaby@suse.cz  Mon Apr  1 16:37:48 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:58 +0100
+Subject: mm/hotplug: correctly add new zone to all other nodes' zone lists
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Jiang Liu <jiang.liu@huawei.com>, Jianguo Wu <wujianguo@huawei.com>, Jiang Liu <liuj97@gmail.com>, Mel Gorman <mgorman@suse.de>, Michal Hocko <mhocko@suse.cz>, Minchan Kim <minchan@kernel.org>, Rusty Russell <rusty@rustcorp.com.au>, Yinghai Lu <yinghai@kernel.org>, Tony Luck <tony.luck@intel.com>, KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, David Rientjes <rientjes@google.com>, Keping Chen <chenkeping@huawei.com>, Andrew Morton <akpm@linux-foundation.org>, Linus Torvalds <torvalds@linux-foundation.org>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-13-git-send-email-jslaby@suse.cz>
+
+
+From: Jiang Liu <jiang.liu@huawei.com>
+
+commit 08dff7b7d629807dbb1f398c68dd9cd58dd657a1 upstream.
+
+When online_pages() is called to add new memory to an empty zone, it
+rebuilds all zone lists by calling build_all_zonelists().  But there's a
+bug which prevents the new zone to be added to other nodes' zone lists.
+
+online_pages() {
+       build_all_zonelists()
+       .....
+       node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
+}
+
+Here the node of the zone is put into N_HIGH_MEMORY state after calling
+build_all_zonelists(), but build_all_zonelists() only adds zones from
+nodes in N_HIGH_MEMORY state to the fallback zone lists.
+build_all_zonelists()
+
+    ->__build_all_zonelists()
+       ->build_zonelists()
+           ->find_next_best_node()
+               ->for_each_node_state(n, N_HIGH_MEMORY)
+
+So memory in the new zone will never be used by other nodes, and it may
+cause strange behavor when system is under memory pressure.  So put node
+into N_HIGH_MEMORY state before calling build_all_zonelists().
+
+Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Jiang Liu <liuj97@gmail.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Keping Chen <chenkeping@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory_hotplug.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -453,19 +453,20 @@ int __ref online_pages(unsigned long pfn
+       zone->present_pages += onlined_pages;
+       zone->zone_pgdat->node_present_pages += onlined_pages;
+-      if (need_zonelists_rebuild)
+-              build_all_zonelists(zone);
+-      else
+-              zone_pcp_update(zone);
++      if (onlined_pages) {
++              node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++              if (need_zonelists_rebuild)
++                      build_all_zonelists(zone);
++              else
++                      zone_pcp_update(zone);
++      }
+       mutex_unlock(&zonelists_mutex);
+       init_per_zone_wmark_min();
+-      if (onlined_pages) {
++      if (onlined_pages)
+               kswapd_run(zone_to_nid(zone));
+-              node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+-      }
+       vm_total_pages = nr_free_pagecache_pages();
diff --git a/queue-3.0/nfs-nfs_getaclargs.acl_len-is-a-size_t.patch b/queue-3.0/nfs-nfs_getaclargs.acl_len-is-a-size_t.patch
new file mode 100644 (file)
index 0000000..453ca50
--- /dev/null
@@ -0,0 +1,44 @@
+From jslaby@suse.cz  Mon Apr  1 16:37:06 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:54 +0100
+Subject: NFS: nfs_getaclargs.acl_len is a size_t
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Chuck Lever <chuck.lever@oracle.com>, Trond Myklebust <Trond.Myklebust@netapp.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-9-git-send-email-jslaby@suse.cz>
+
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 56d08fef2369d5ca9ad2e1fc697f5379fd8af751 upstream.
+
+Squelch compiler warnings:
+
+fs/nfs/nfs4proc.c: In function â€˜__nfs4_get_acl_uncached’:
+fs/nfs/nfs4proc.c:3811:14: warning: comparison between signed and
+       unsigned integer expressions [-Wsign-compare]
+fs/nfs/nfs4proc.c:3818:15: warning: comparison between signed and
+       unsigned integer expressions [-Wsign-compare]
+
+Introduced by commit bf118a34 "NFSv4: include bitmap in nfsv4 get
+acl data", Dec 7, 2011.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs4proc.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3563,7 +3563,8 @@ static ssize_t __nfs4_get_acl_uncached(s
+               .rpc_argp = &args,
+               .rpc_resp = &res,
+       };
+-      int ret = -ENOMEM, npages, i, acl_len = 0;
++      int ret = -ENOMEM, npages, i;
++      size_t acl_len = 0;
+       npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       /* As long as we're doing a round trip to the server anyway,
diff --git a/queue-3.0/nfsv4-fix-an-oops-in-the-nfsv4-getacl-code.patch b/queue-3.0/nfsv4-fix-an-oops-in-the-nfsv4-getacl-code.patch
new file mode 100644 (file)
index 0000000..9babc2d
--- /dev/null
@@ -0,0 +1,96 @@
+From jslaby@suse.cz  Mon Apr  1 16:36:55 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:53 +0100
+Subject: NFSv4: Fix an Oops in the NFSv4 getacl code
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Trond Myklebust <Trond.Myklebust@netapp.com>, Andy Adamson <andros@netapp.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-8-git-send-email-jslaby@suse.cz>
+
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 331818f1c468a24e581aedcbe52af799366a9dfe upstream.
+
+Commit bf118a342f10dafe44b14451a1392c3254629a1f (NFSv4: include bitmap
+in nfsv4 get acl data) introduces the 'acl_scratch' page for the case
+where we may need to decode multi-page data. However it fails to take
+into account the fact that the variable may be NULL (for the case where
+we're not doing multi-page decode), and it also attaches it to the
+encoding xdr_stream rather than the decoding one.
+
+The immediate result is an Oops in nfs4_xdr_enc_getacl due to the
+call to page_address() with a NULL page pointer.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: Andy Adamson <andros@netapp.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs4proc.c       |    8 ++++----
+ fs/nfs/nfs4xdr.c        |    5 ++++-
+ include/linux/nfs_xdr.h |    2 +-
+ 3 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3578,8 +3578,8 @@ static ssize_t __nfs4_get_acl_uncached(s
+       }
+       if (npages > 1) {
+               /* for decoding across pages */
+-              args.acl_scratch = alloc_page(GFP_KERNEL);
+-              if (!args.acl_scratch)
++              res.acl_scratch = alloc_page(GFP_KERNEL);
++              if (!res.acl_scratch)
+                       goto out_free;
+       }
+       args.acl_len = npages * PAGE_SIZE;
+@@ -3615,8 +3615,8 @@ out_free:
+       for (i = 0; i < npages; i++)
+               if (pages[i])
+                       __free_page(pages[i]);
+-      if (args.acl_scratch)
+-              __free_page(args.acl_scratch);
++      if (res.acl_scratch)
++              __free_page(res.acl_scratch);
+       return ret;
+ }
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2379,7 +2379,6 @@ static void nfs4_xdr_enc_getacl(struct r
+       xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+               args->acl_pages, args->acl_pgbase, args->acl_len);
+-      xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
+       encode_nops(&hdr);
+ }
+@@ -5688,6 +5687,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqs
+       struct compound_hdr hdr;
+       int status;
++      if (res->acl_scratch != NULL) {
++              void *p = page_address(res->acl_scratch);
++              xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
++      }
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -588,7 +588,6 @@ struct nfs_getaclargs {
+       size_t                          acl_len;
+       unsigned int                    acl_pgbase;
+       struct page **                  acl_pages;
+-      struct page *                   acl_scratch;
+       struct nfs4_sequence_args       seq_args;
+ };
+@@ -598,6 +597,7 @@ struct nfs_getaclres {
+       size_t                          acl_len;
+       size_t                          acl_data_offset;
+       int                             acl_flags;
++      struct page *                   acl_scratch;
+       struct nfs4_sequence_res        seq_res;
+ };
diff --git a/queue-3.0/nfsv4-include-bitmap-in-nfsv4-get-acl-data.patch b/queue-3.0/nfsv4-include-bitmap-in-nfsv4-get-acl-data.patch
new file mode 100644 (file)
index 0000000..9bfb2b3
--- /dev/null
@@ -0,0 +1,307 @@
+From jslaby@suse.cz  Mon Apr  1 16:36:45 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:52 +0100
+Subject: NFSv4: include bitmap in nfsv4 get acl data
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Andy Adamson <andros@netapp.com>, stable@kernel.org, Trond Myklebust <Trond.Myklebust@netapp.com>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-7-git-send-email-jslaby@suse.cz>
+
+
+From: Andy Adamson <andros@netapp.com>
+
+commit bf118a342f10dafe44b14451a1392c3254629a1f upstream.
+
+The NFSv4 bitmap size is unbounded: a server can return an arbitrary
+sized bitmap in an FATTR4_WORD0_ACL request.  Replace using the
+nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
+with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
+xdr length to the (cached) acl page data.
+
+This is a general solution to commit e5012d1f "NFSv4.1: update
+nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
+when getting ACLs.
+
+Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
+was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
+
+Signed-off-by: Andy Adamson <andros@netapp.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs4proc.c          |   96 ++++++++++++++++++++++++++-------------------
+ fs/nfs/nfs4xdr.c           |   31 ++++++++++----
+ include/linux/nfs_xdr.h    |    5 ++
+ include/linux/sunrpc/xdr.h |    2 
+ net/sunrpc/xdr.c           |    3 -
+ 5 files changed, 89 insertions(+), 48 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3440,19 +3440,6 @@ static inline int nfs4_server_supports_a
+  */
+ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+-static void buf_to_pages(const void *buf, size_t buflen,
+-              struct page **pages, unsigned int *pgbase)
+-{
+-      const void *p = buf;
+-
+-      *pgbase = offset_in_page(buf);
+-      p -= *pgbase;
+-      while (p < buf + buflen) {
+-              *(pages++) = virt_to_page(p);
+-              p += PAGE_CACHE_SIZE;
+-      }
+-}
+-
+ static int buf_to_pages_noslab(const void *buf, size_t buflen,
+               struct page **pages, unsigned int *pgbase)
+ {
+@@ -3549,9 +3536,19 @@ out:
+       nfs4_set_cached_acl(inode, acl);
+ }
++/*
++ * The getxattr API returns the required buffer length when called with a
++ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
++ * the required buf.  On a NULL buf, we send a page of data to the server
++ * guessing that the ACL request can be serviced by a page. If so, we cache
++ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
++ * the cache. If not so, we throw away the page, and cache the required
++ * length. The next getxattr call will then produce another round trip to
++ * the server, this time with the input buf of the required size.
++ */
+ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+-      struct page *pages[NFS4ACL_MAXPAGES];
++      struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+       struct nfs_getaclargs args = {
+               .fh = NFS_FH(inode),
+               .acl_pages = pages,
+@@ -3566,41 +3563,60 @@ static ssize_t __nfs4_get_acl_uncached(s
+               .rpc_argp = &args,
+               .rpc_resp = &res,
+       };
+-      struct page *localpage = NULL;
+-      int ret;
++      int ret = -ENOMEM, npages, i, acl_len = 0;
+-      if (buflen < PAGE_SIZE) {
+-              /* As long as we're doing a round trip to the server anyway,
+-               * let's be prepared for a page of acl data. */
+-              localpage = alloc_page(GFP_KERNEL);
+-              resp_buf = page_address(localpage);
+-              if (localpage == NULL)
+-                      return -ENOMEM;
+-              args.acl_pages[0] = localpage;
+-              args.acl_pgbase = 0;
+-              args.acl_len = PAGE_SIZE;
+-      } else {
+-              resp_buf = buf;
+-              buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
++      npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      /* As long as we're doing a round trip to the server anyway,
++       * let's be prepared for a page of acl data. */
++      if (npages == 0)
++              npages = 1;
++
++      for (i = 0; i < npages; i++) {
++              pages[i] = alloc_page(GFP_KERNEL);
++              if (!pages[i])
++                      goto out_free;
+       }
+-      ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
++      if (npages > 1) {
++              /* for decoding across pages */
++              args.acl_scratch = alloc_page(GFP_KERNEL);
++              if (!args.acl_scratch)
++                      goto out_free;
++      }
++      args.acl_len = npages * PAGE_SIZE;
++      args.acl_pgbase = 0;
++      /* Let decode_getfacl know not to fail if the ACL data is larger than
++       * the page we send as a guess */
++      if (buf == NULL)
++              res.acl_flags |= NFS4_ACL_LEN_REQUEST;
++      resp_buf = page_address(pages[0]);
++
++      dprintk("%s  buf %p buflen %ld npages %d args.acl_len %ld\n",
++              __func__, buf, buflen, npages, args.acl_len);
++      ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
++                           &msg, &args.seq_args, &res.seq_res, 0);
+       if (ret)
+               goto out_free;
+-      if (res.acl_len > args.acl_len)
+-              nfs4_write_cached_acl(inode, NULL, res.acl_len);
++
++      acl_len = res.acl_len - res.acl_data_offset;
++      if (acl_len > args.acl_len)
++              nfs4_write_cached_acl(inode, NULL, acl_len);
+       else
+-              nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
++              nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
++                                    acl_len);
+       if (buf) {
+               ret = -ERANGE;
+-              if (res.acl_len > buflen)
++              if (acl_len > buflen)
+                       goto out_free;
+-              if (localpage)
+-                      memcpy(buf, resp_buf, res.acl_len);
++              _copy_from_pages(buf, pages, res.acl_data_offset,
++                              res.acl_len);
+       }
+-      ret = res.acl_len;
++      ret = acl_len;
+ out_free:
+-      if (localpage)
+-              __free_page(localpage);
++      for (i = 0; i < npages; i++)
++              if (pages[i])
++                      __free_page(pages[i]);
++      if (args.acl_scratch)
++              __free_page(args.acl_scratch);
+       return ret;
+ }
+@@ -3631,6 +3647,8 @@ static ssize_t nfs4_proc_get_acl(struct
+               nfs_zap_acl_cache(inode);
+       ret = nfs4_read_cached_acl(inode, buf, buflen);
+       if (ret != -ENOENT)
++              /* -ENOENT is returned if there is no ACL or if there is an ACL
++               * but no cached acl data, just the acl length */
+               return ret;
+       return nfs4_get_acl_uncached(inode, buf, buflen);
+ }
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2374,11 +2374,13 @@ static void nfs4_xdr_enc_getacl(struct r
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+-      replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
++      replen = hdr.replen + op_decode_hdr_maxsz + 1;
+       encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+       xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+               args->acl_pages, args->acl_pgbase, args->acl_len);
++      xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
++
+       encode_nops(&hdr);
+ }
+@@ -4714,17 +4716,18 @@ decode_restorefh(struct xdr_stream *xdr)
+ }
+ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+-              size_t *acl_len)
++                       struct nfs_getaclres *res)
+ {
+-      __be32 *savep;
++      __be32 *savep, *bm_p;
+       uint32_t attrlen,
+                bitmap[2] = {0};
+       struct kvec *iov = req->rq_rcv_buf.head;
+       int status;
+-      *acl_len = 0;
++      res->acl_len = 0;
+       if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+               goto out;
++      bm_p = xdr->p;
+       if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+               goto out;
+       if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4736,18 +4739,30 @@ static int decode_getacl(struct xdr_stre
+               size_t hdrlen;
+               u32 recvd;
++              /* The bitmap (xdr len + bitmaps) and the attr xdr len words
++               * are stored with the acl data to handle the problem of
++               * variable length bitmaps.*/
++              xdr->p = bm_p;
++              res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++              res->acl_data_offset <<= 2;
++
+               /* We ignore &savep and don't do consistency checks on
+                * the attr length.  Let userspace figure it out.... */
+               hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
++              attrlen += res->acl_data_offset;
+               recvd = req->rq_rcv_buf.len - hdrlen;
+               if (attrlen > recvd) {
+-                      dprintk("NFS: server cheating in getattr"
+-                                      " acl reply: attrlen %u > recvd %u\n",
++                      if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
++                              /* getxattr interface called with a NULL buf */
++                              res->acl_len = attrlen;
++                              goto out;
++                      }
++                      dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+                                       attrlen, recvd);
+                       return -EINVAL;
+               }
+               xdr_read_pages(xdr, attrlen);
+-              *acl_len = attrlen;
++              res->acl_len = attrlen;
+       } else
+               status = -EOPNOTSUPP;
+@@ -5682,7 +5697,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqs
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+-      status = decode_getacl(xdr, rqstp, &res->acl_len);
++      status = decode_getacl(xdr, rqstp, res);
+ out:
+       return status;
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -588,11 +588,16 @@ struct nfs_getaclargs {
+       size_t                          acl_len;
+       unsigned int                    acl_pgbase;
+       struct page **                  acl_pages;
++      struct page *                   acl_scratch;
+       struct nfs4_sequence_args       seq_args;
+ };
++/* getxattr ACL interface flags */
++#define NFS4_ACL_LEN_REQUEST  0x0001  /* zero length getxattr buffer */
+ struct nfs_getaclres {
+       size_t                          acl_len;
++      size_t                          acl_data_offset;
++      int                             acl_flags;
+       struct nfs4_sequence_res        seq_res;
+ };
+--- a/include/linux/sunrpc/xdr.h
++++ b/include/linux/sunrpc/xdr.h
+@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_
+                            struct xdr_array2_desc *desc);
+ extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+                            struct xdr_array2_desc *desc);
++extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
++                           size_t len);
+ /*
+  * Provide some simple tools for XDR buffer overflow-checking etc.
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size
+  * Copies data into an arbitrary memory location from an array of pages
+  * The copy is assumed to be non-overlapping.
+  */
+-static void
++void
+ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ {
+       struct page **pgfrom;
+@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **
+       } while ((len -= copy) != 0);
+ }
++EXPORT_SYMBOL_GPL(_copy_from_pages);
+ /*
+  * xdr_shrink_bufhead
index 93f570f8329701861f0b9174d73b798e5ad1db94..2be9e3f5325e891c8b15006f2e9f9913e0427533 100644 (file)
@@ -14,3 +14,17 @@ net-irda-add-missing-error-path-release_sock-call.patch
 usb-ehci-fix-bug-in-itd-sitd-dma-pool-allocation.patch
 usb-xhci-fix-trb-transfer-length-macro-used-for-event-trb.patch
 btrfs-limit-the-global-reserve-to-512mb.patch
+kvm-clean-up-error-handling-during-vcpu-creation.patch
+x25-validate-incoming-call-user-data-lengths.patch
+x25-handle-undersized-fragmented-skbs.patch
+batman-adv-bat_socket_read-missing-checks.patch
+batman-adv-only-write-requested-number-of-byte-to-user-buffer.patch
+kvm-x86-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch
+nfsv4-include-bitmap-in-nfsv4-get-acl-data.patch
+nfsv4-fix-an-oops-in-the-nfsv4-getacl-code.patch
+nfs-nfs_getaclargs.acl_len-is-a-size_t.patch
+kvm-ensure-all-vcpus-are-consistent-with-in-kernel-irqchip-settings.patch
+macvtap-zerocopy-validate-vectors-before-building-skb.patch
+kvm-fix-buffer-overflow-in-kvm_set_irq.patch
+mm-hotplug-correctly-add-new-zone-to-all-other-nodes-zone-lists.patch
+kvm-x86-invalid-opcode-oops-on-set_sregs-with-osxsave-bit-set-cve-2012-4461.patch
diff --git a/queue-3.0/x25-handle-undersized-fragmented-skbs.patch b/queue-3.0/x25-handle-undersized-fragmented-skbs.patch
new file mode 100644 (file)
index 0000000..db906e5
--- /dev/null
@@ -0,0 +1,348 @@
+From jslaby@suse.cz  Mon Apr  1 16:35:58 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:48 +0100
+Subject: x25: Handle undersized/fragmented skbs
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Matthew Daley <mattjd@gmail.com>, Eric Dumazet <eric.dumazet@gmail.com>, Andrew Hendry <andrew.hendry@gmail.com>, stable <stable@kernel.org>, "David S. Miller" <davem@davemloft.net>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-3-git-send-email-jslaby@suse.cz>
+
+
+From: Matthew Daley <mattjd@gmail.com>
+
+commit cb101ed2c3c7c0224d16953fe77bfb9d6c2cb9df upstream.
+
+There are multiple locations in the X.25 packet layer where a skb is
+assumed to be of at least a certain size and that all its data is
+currently available at skb->data.  These assumptions are not checked,
+hence buffer overreads may occur.  Use pskb_may_pull to check these
+minimal size assumptions and ensure that data is available at skb->data
+when necessary, as well as use skb_copy_bits where needed.
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Andrew Hendry <andrew.hendry@gmail.com>
+Acked-by: Andrew Hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/x25/af_x25.c         |   31 ++++++++++++++++++++++++-------
+ net/x25/x25_dev.c        |    6 ++++++
+ net/x25/x25_facilities.c |   10 ++++++----
+ net/x25/x25_in.c         |   40 +++++++++++++++++++++++++++++++++++-----
+ net/x25/x25_link.c       |    3 +++
+ net/x25/x25_subr.c       |   14 +++++++++++++-
+ 6 files changed, 87 insertions(+), 17 deletions(-)
+
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_bu
+       int needed;
+       int rc;
+-      if (skb->len < 1) {
++      if (!pskb_may_pull(skb, 1)) {
+               /* packet has no address block */
+               rc = 0;
+               goto empty;
+@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_bu
+       len = *skb->data;
+       needed = 1 + (len >> 4) + (len & 0x0f);
+-      if (skb->len < needed) {
++      if (!pskb_may_pull(skb, needed)) {
+               /* packet is too short to hold the addresses it claims
+                  to hold */
+               rc = -1;
+@@ -952,10 +952,10 @@ int x25_rx_call_request(struct sk_buff *
+        *
+        *      Facilities length is mandatory in call request packets
+        */
+-      if (skb->len < 1)
++      if (!pskb_may_pull(skb, 1))
+               goto out_clear_request;
+       len = skb->data[0] + 1;
+-      if (skb->len < len)
++      if (!pskb_may_pull(skb, len))
+               goto out_clear_request;
+       skb_pull(skb,len);
+@@ -966,6 +966,13 @@ int x25_rx_call_request(struct sk_buff *
+               goto out_clear_request;
+       /*
++       *      Get all the call user data so it can be used in
++       *      x25_find_listener and skb_copy_from_linear_data up ahead.
++       */
++      if (!pskb_may_pull(skb, skb->len))
++              goto out_clear_request;
++
++      /*
+        *      Find a listener for the particular address/cud pair.
+        */
+       sk = x25_find_listener(&source_addr,skb);
+@@ -1173,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *ioc
+        *      byte of the user data is the logical value of the Q Bit.
+        */
+       if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
++              if (!pskb_may_pull(skb, 1))
++                      goto out_kfree_skb;
++
+               qbit = skb->data[0];
+               skb_pull(skb, 1);
+       }
+@@ -1251,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *ioc
+       struct x25_sock *x25 = x25_sk(sk);
+       struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
+       size_t copied;
+-      int qbit;
++      int qbit, header_len = x25->neighbour->extended ?
++              X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
++
+       struct sk_buff *skb;
+       unsigned char *asmptr;
+       int rc = -ENOTCONN;
+@@ -1272,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *ioc
+               skb = skb_dequeue(&x25->interrupt_in_queue);
++              if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++                      goto out_free_dgram;
++
+               skb_pull(skb, X25_STD_MIN_LEN);
+               /*
+@@ -1292,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *ioc
+               if (!skb)
+                       goto out;
++              if (!pskb_may_pull(skb, header_len))
++                      goto out_free_dgram;
++
+               qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
+-              skb_pull(skb, x25->neighbour->extended ?
+-                              X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
++              skb_pull(skb, header_len);
+               if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+                       asmptr  = skb_push(skb, 1);
+--- a/net/x25/x25_dev.c
++++ b/net/x25/x25_dev.c
+@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_bu
+       unsigned short frametype;
+       unsigned int lci;
++      if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++              return 0;
++
+       frametype = skb->data[2];
+       lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buf
+               goto drop;
+       }
++      if (!pskb_may_pull(skb, 1))
++              return 0;
++
+       switch (skb->data[0]) {
+       case X25_IFACE_DATA:
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -44,7 +44,7 @@
+ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
+               struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
+ {
+-      unsigned char *p = skb->data;
++      unsigned char *p;
+       unsigned int len;
+       *vc_fac_mask = 0;
+@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff
+       memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
+       memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
+-      if (skb->len < 1)
++      if (!pskb_may_pull(skb, 1))
+               return 0;
+-      len = *p++;
++      len = skb->data[0];
+-      if (len >= skb->len)
++      if (!pskb_may_pull(skb, 1 + len))
+               return -1;
++      p = skb->data + 1;
++
+       while (len > 0) {
+               switch (*p & X25_FAC_CLASS_MASK) {
+               case X25_FAC_CLASS_A:
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -107,6 +107,8 @@ static int x25_state1_machine(struct soc
+                       /*
+                        *      Parse the data in the frame.
+                        */
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++                              goto out_clear;
+                       skb_pull(skb, X25_STD_MIN_LEN);
+                       len = x25_parse_address_block(skb, &source_addr,
+@@ -130,9 +132,8 @@ static int x25_state1_machine(struct soc
+                               if (skb->len > X25_MAX_CUD_LEN)
+                                       goto out_clear;
+-                              skb_copy_from_linear_data(skb,
+-                                            x25->calluserdata.cuddata,
+-                                            skb->len);
++                              skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
++                                      skb->len);
+                               x25->calluserdata.cudlength = skb->len;
+                       }
+                       if (!sock_flag(sk, SOCK_DEAD))
+@@ -140,6 +141,9 @@ static int x25_state1_machine(struct soc
+                       break;
+               }
+               case X25_CLEAR_REQUEST:
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++                              goto out_clear;
++
+                       x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+                       x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
+                       break;
+@@ -167,6 +171,9 @@ static int x25_state2_machine(struct soc
+       switch (frametype) {
+               case X25_CLEAR_REQUEST:
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++                              goto out_clear;
++
+                       x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+                       x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+                       break;
+@@ -180,6 +187,11 @@ static int x25_state2_machine(struct soc
+       }
+       return 0;
++
++out_clear:
++      x25_write_internal(sk, X25_CLEAR_REQUEST);
++      x25_start_t23timer(sk);
++      return 0;
+ }
+ /*
+@@ -209,6 +221,9 @@ static int x25_state3_machine(struct soc
+                       break;
+               case X25_CLEAR_REQUEST:
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++                              goto out_clear;
++
+                       x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+                       x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+                       break;
+@@ -307,6 +322,12 @@ static int x25_state3_machine(struct soc
+       }
+       return queued;
++
++out_clear:
++      x25_write_internal(sk, X25_CLEAR_REQUEST);
++      x25->state = X25_STATE_2;
++      x25_start_t23timer(sk);
++      return 0;
+ }
+ /*
+@@ -316,13 +337,13 @@ static int x25_state3_machine(struct soc
+  */
+ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+ {
++      struct x25_sock *x25 = x25_sk(sk);
++
+       switch (frametype) {
+               case X25_RESET_REQUEST:
+                       x25_write_internal(sk, X25_RESET_CONFIRMATION);
+               case X25_RESET_CONFIRMATION: {
+-                      struct x25_sock *x25 = x25_sk(sk);
+-
+                       x25_stop_timer(sk);
+                       x25->condition = 0x00;
+                       x25->va        = 0;
+@@ -334,6 +355,9 @@ static int x25_state4_machine(struct soc
+                       break;
+               }
+               case X25_CLEAR_REQUEST:
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++                              goto out_clear;
++
+                       x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+                       x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+                       break;
+@@ -343,6 +367,12 @@ static int x25_state4_machine(struct soc
+       }
+       return 0;
++
++out_clear:
++      x25_write_internal(sk, X25_CLEAR_REQUEST);
++      x25->state = X25_STATE_2;
++      x25_start_t23timer(sk);
++      return 0;
+ }
+ /* Higher level upcall for a LAPB frame */
+--- a/net/x25/x25_link.c
++++ b/net/x25/x25_link.c
+@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *sk
+                       break;
+               case X25_DIAGNOSTIC:
++                      if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
++                              break;
++
+                       printk(KERN_WARNING "x25: diagnostic #%d - "
+                              "%02X %02X %02X\n",
+                              skb->data[3], skb->data[4],
+--- a/net/x25/x25_subr.c
++++ b/net/x25/x25_subr.c
+@@ -271,7 +271,11 @@ int x25_decode(struct sock *sk, struct s
+              int *d, int *m)
+ {
+       struct x25_sock *x25 = x25_sk(sk);
+-      unsigned char *frame = skb->data;
++      unsigned char *frame;
++
++      if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++              return X25_ILLEGAL;
++      frame = skb->data;
+       *ns = *nr = *q = *d = *m = 0;
+@@ -296,6 +300,10 @@ int x25_decode(struct sock *sk, struct s
+               if (frame[2] == X25_RR  ||
+                   frame[2] == X25_RNR ||
+                   frame[2] == X25_REJ) {
++                      if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
++                              return X25_ILLEGAL;
++                      frame = skb->data;
++
+                       *nr = (frame[3] >> 1) & 0x7F;
+                       return frame[2];
+               }
+@@ -310,6 +318,10 @@ int x25_decode(struct sock *sk, struct s
+       if (x25->neighbour->extended) {
+               if ((frame[2] & 0x01) == X25_DATA) {
++                      if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
++                              return X25_ILLEGAL;
++                      frame = skb->data;
++
+                       *q  = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
+                       *d  = (frame[0] & X25_D_BIT) == X25_D_BIT;
+                       *m  = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/queue-3.0/x25-validate-incoming-call-user-data-lengths.patch b/queue-3.0/x25-validate-incoming-call-user-data-lengths.patch
new file mode 100644 (file)
index 0000000..b3bd03e
--- /dev/null
@@ -0,0 +1,60 @@
+From jslaby@suse.cz  Mon Apr  1 16:35:43 2013
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 19 Mar 2013 12:36:47 +0100
+Subject: x25: Validate incoming call user data lengths
+To: gregkh@linuxfoundation.org
+Cc: jirislaby@gmail.com, stable@vger.kernel.org, Matthew Daley <mattjd@gmail.com>, stable <stable@kernel.org>, "David S. Miller" <davem@davemloft.net>, Jiri Slaby <jslaby@suse.cz>
+Message-ID: <1363693019-14812-2-git-send-email-jslaby@suse.cz>
+
+
+From: Matthew Daley <mattjd@gmail.com>
+
+commit c7fd0d48bde943e228e9c28ce971a22d6a1744c4 upstream.
+
+X.25 call user data is being copied in its entirety from incoming messages
+without consideration to the size of the destination buffers, leading to
+possible buffer overflows. Validate incoming call user data lengths before
+these copies are performed.
+
+It appears this issue was noticed some time ago, however nothing seemed to
+come of it: see http://www.spinics.net/lists/linux-x25/msg00043.html and
+commit 8db09f26f912f7c90c764806e804b558da520d4f.
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
+Tested-by: Andrew Hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/x25/af_x25.c |    6 ++++++
+ net/x25/x25_in.c |    3 +++
+ 2 files changed, 9 insertions(+)
+
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -960,6 +960,12 @@ int x25_rx_call_request(struct sk_buff *
+       skb_pull(skb,len);
+       /*
++       *      Ensure that the amount of call user data is valid.
++       */
++      if (skb->len > X25_MAX_CUD_LEN)
++              goto out_clear_request;
++
++      /*
+        *      Find a listener for the particular address/cud pair.
+        */
+       sk = x25_find_listener(&source_addr,skb);
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -127,6 +127,9 @@ static int x25_state1_machine(struct soc
+                        *      Copy any Call User Data.
+                        */
+                       if (skb->len > 0) {
++                              if (skb->len > X25_MAX_CUD_LEN)
++                                      goto out_clear;
++
+                               skb_copy_from_linear_data(skb,
+                                             x25->calluserdata.cuddata,
+                                             skb->len);