--- /dev/null
+From cf903e9d3a97f89b224d2d07be37c0f160db8192 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 3 Apr 2017 15:53:34 +0200
+Subject: Documentation: stable-kernel-rules: fix stable-tag format
+
+From: Johan Hovold <johan@kernel.org>
+
+commit cf903e9d3a97f89b224d2d07be37c0f160db8192 upstream.
+
+A patch documenting how to specify which kernels a particular fix should
+be backported to (seemingly) inadvertently added a minus sign after the
+kernel version. This particular stable-tag format had never been used
+prior to this patch, and was neither present when the patch in question
+was first submitted (it was added in v2 without any comment).
+
+Drop the minus sign to avoid any confusion.
+
+Fixes: fdc81b7910ad ("stable_kernel_rules: Add clause about specification of kernel versions to patch.")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/process/stable-kernel-rules.rst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/process/stable-kernel-rules.rst
++++ b/Documentation/process/stable-kernel-rules.rst
+@@ -124,7 +124,7 @@ specified in the following format in the
+
+ .. code-block:: none
+
+- Cc: <stable@vger.kernel.org> # 3.3.x-
++ Cc: <stable@vger.kernel.org> # 3.3.x
+
+ The tag has the meaning of:
+
--- /dev/null
+From 06ce521af9558814b8606c0476c54497cf83a653 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 24 Jan 2017 11:56:21 +0100
+Subject: kvm: fix page struct leak in handle_vmon
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 06ce521af9558814b8606c0476c54497cf83a653 upstream.
+
+handle_vmon gets a reference on VMXON region page,
+but does not release it. Release the reference.
+
+Found by syzkaller; based on a patch by Dmitry.
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Charles (Chas) Williams" <ciwillia@brocade.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7086,13 +7086,18 @@ static int nested_vmx_check_vmptr(struct
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+- if (page == NULL ||
+- *(u32 *)kmap(page) != VMCS12_REVISION) {
++ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
++ return kvm_skip_emulated_instruction(vcpu);
++ }
++ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+ kunmap(page);
++ nested_release_page_clean(page);
++ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ kunmap(page);
++ nested_release_page_clean(page);
+ vmx->nested.vmxon_ptr = vmptr;
+ break;
+ case EXIT_REASON_VMCLEAR:
--- /dev/null
+From cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 Mon Sep 17 00:00:00 2001
+From: Chris Salls <salls@cs.ucsb.edu>
+Date: Fri, 7 Apr 2017 23:48:11 -0700
+Subject: mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
+
+From: Chris Salls <salls@cs.ucsb.edu>
+
+commit cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 upstream.
+
+In the case that compat_get_bitmap fails we do not want to copy the
+bitmap to the user as it will contain uninitialized stack data and leak
+sensitive data.
+
+Signed-off-by: Chris Salls <salls@cs.ucsb.edu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1526,7 +1526,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, in
+ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1535,14 +1534,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, in
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(bm, nmask, nr_bits);
++ if (compat_get_bitmap(bm, nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, bm, alloc_size);
++ if (copy_to_user(nm, bm, alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+
+@@ -1550,7 +1548,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo
+ compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+ compat_ulong_t, maxnode, compat_ulong_t, flags)
+ {
+- long err = 0;
+ unsigned long __user *nm = NULL;
+ unsigned long nr_bits, alloc_size;
+ nodemask_t bm;
+@@ -1559,14 +1556,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo
+ alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+
+ if (nmask) {
+- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++ return -EFAULT;
+ nm = compat_alloc_user_space(alloc_size);
+- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++ return -EFAULT;
+ }
+
+- if (err)
+- return -EFAULT;
+-
+ return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+
--- /dev/null
+From f5b98461cb8167ba362ad9f74c41d126b7becea7 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Fri, 6 Jan 2017 19:32:01 +0100
+Subject: random: use chacha20 for get_random_int/long
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit f5b98461cb8167ba362ad9f74c41d126b7becea7 upstream.
+
+Now that our crng uses chacha20, we can rely on its speedy
+characteristics for replacing MD5, while simultaneously achieving a
+higher security guarantee. Before the idea was to use these functions if
+you wanted random integers that aren't stupidly insecure but aren't
+necessarily secure either, a vague gray zone, that hopefully was "good
+enough" for its users. With chacha20, we can strengthen this claim,
+since either we're using an rdrand-like instruction, or we're using the
+same crng as /dev/urandom. And it's faster than what was before.
+
+We could have chosen to replace this with a SipHash-derived function,
+which might be slightly faster, but at the cost of having yet another
+RNG construction in the kernel. By moving to chacha20, we have a single
+RNG to analyze and verify, and we also already get good performance
+improvements on all platforms.
+
+Implementation-wise, rather than use a generic buffer for both
+get_random_int/long and memcpy based on the size needs, we use a
+specific buffer for 32-bit reads and for 64-bit reads. This way, we're
+guaranteed to always have aligned accesses on all platforms. While
+slightly more verbose in C, the assembly this generates is a lot
+simpler than otherwise.
+
+Finally, on 32-bit platforms where longs and ints are the same size,
+we simply alias get_random_int to get_random_long.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Suggested-by: Theodore Ts'o <tytso@mit.edu>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 84 +++++++++++++++++++++++++------------------------
+ include/linux/random.h | 1
+ init/main.c | 1
+ 3 files changed, 43 insertions(+), 43 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2042,63 +2042,65 @@ struct ctl_table random_table[] = {
+ };
+ #endif /* CONFIG_SYSCTL */
+
+-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+-
+-int random_int_secret_init(void)
+-{
+- get_random_bytes(random_int_secret, sizeof(random_int_secret));
+- return 0;
+-}
+-
+-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
+- __aligned(sizeof(unsigned long));
++struct batched_entropy {
++ union {
++ unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
++ unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
++ };
++ unsigned int position;
++};
+
+ /*
+- * Get a random word for internal kernel use only. Similar to urandom but
+- * with the goal of minimal entropy pool depletion. As a result, the random
+- * value is not cryptographically secure but for several uses the cost of
+- * depleting entropy is too high
++ * Get a random word for internal kernel use only. The quality of the random
++ * number is either as good as RDRAND or as good as /dev/urandom, with the
++ * goal of being quite fast and not depleting entropy.
+ */
+-unsigned int get_random_int(void)
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
++unsigned long get_random_long(void)
+ {
+- __u32 *hash;
+- unsigned int ret;
++ unsigned long ret;
++ struct batched_entropy *batch;
+
+- if (arch_get_random_int(&ret))
++ if (arch_get_random_long(&ret))
+ return ret;
+
+- hash = get_cpu_var(get_random_int_hash);
+-
+- hash[0] += current->pid + jiffies + random_get_entropy();
+- md5_transform(hash, random_int_secret);
+- ret = hash[0];
+- put_cpu_var(get_random_int_hash);
+-
++ batch = &get_cpu_var(batched_entropy_long);
++ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
++ extract_crng((u8 *)batch->entropy_long);
++ batch->position = 0;
++ }
++ ret = batch->entropy_long[batch->position++];
++ put_cpu_var(batched_entropy_long);
+ return ret;
+ }
+-EXPORT_SYMBOL(get_random_int);
++EXPORT_SYMBOL(get_random_long);
+
+-/*
+- * Same as get_random_int(), but returns unsigned long.
+- */
+-unsigned long get_random_long(void)
++#if BITS_PER_LONG == 32
++unsigned int get_random_int(void)
+ {
+- __u32 *hash;
+- unsigned long ret;
++ return get_random_long();
++}
++#else
++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
++unsigned int get_random_int(void)
++{
++ unsigned int ret;
++ struct batched_entropy *batch;
+
+- if (arch_get_random_long(&ret))
++ if (arch_get_random_int(&ret))
+ return ret;
+
+- hash = get_cpu_var(get_random_int_hash);
+-
+- hash[0] += current->pid + jiffies + random_get_entropy();
+- md5_transform(hash, random_int_secret);
+- ret = *(unsigned long *)hash;
+- put_cpu_var(get_random_int_hash);
+-
++ batch = &get_cpu_var(batched_entropy_int);
++ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
++ extract_crng((u8 *)batch->entropy_int);
++ batch->position = 0;
++ }
++ ret = batch->entropy_int[batch->position++];
++ put_cpu_var(batched_entropy_int);
+ return ret;
+ }
+-EXPORT_SYMBOL(get_random_long);
++#endif
++EXPORT_SYMBOL(get_random_int);
+
+ /**
+ * randomize_page - Generate a random, page aligned address
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -37,7 +37,6 @@ extern void get_random_bytes(void *buf,
+ extern int add_random_ready_callback(struct random_ready_callback *rdy);
+ extern void del_random_ready_callback(struct random_ready_callback *rdy);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+-extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+--- a/init/main.c
++++ b/init/main.c
+@@ -879,7 +879,6 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
+- random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)