From c541b80747bbf9952ce27931fb25310a02430c29 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 10 Apr 2017 17:09:34 +0200 Subject: [PATCH] 4.9-stable patches added patches: documentation-stable-kernel-rules-fix-stable-tag-format.patch mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch random-use-chacha20-for-get_random_int-long.patch --- ...e-kernel-rules-fix-stable-tag-format.patch | 36 ++++ ...-handling-in-set_mempolicy-and-mbind.patch | 77 ++++++++ ...use-chacha20-for-get_random_int-long.patch | 177 ++++++++++++++++++ queue-4.9/series | 3 + 4 files changed, 293 insertions(+) create mode 100644 queue-4.9/documentation-stable-kernel-rules-fix-stable-tag-format.patch create mode 100644 queue-4.9/mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch create mode 100644 queue-4.9/random-use-chacha20-for-get_random_int-long.patch diff --git a/queue-4.9/documentation-stable-kernel-rules-fix-stable-tag-format.patch b/queue-4.9/documentation-stable-kernel-rules-fix-stable-tag-format.patch new file mode 100644 index 00000000000..d131ae10ad5 --- /dev/null +++ b/queue-4.9/documentation-stable-kernel-rules-fix-stable-tag-format.patch @@ -0,0 +1,36 @@ +From cf903e9d3a97f89b224d2d07be37c0f160db8192 Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Mon, 3 Apr 2017 15:53:34 +0200 +Subject: Documentation: stable-kernel-rules: fix stable-tag format + +From: Johan Hovold + +commit cf903e9d3a97f89b224d2d07be37c0f160db8192 upstream. + +A patch documenting how to specify which kernels a particular fix should +be backported to (seemingly) inadvertently added a minus sign after the +kernel version. This particular stable-tag format had never been used +prior to this patch, and was neither present when the patch in question +was first submitted (it was added in v2 without any comment). + +Drop the minus sign to avoid any confusion. + +Fixes: fdc81b7910ad ("stable_kernel_rules: Add clause about specification of kernel versions to patch.") +Signed-off-by: Johan Hovold +Signed-off-by: Greg Kroah-Hartman + +--- + Documentation/stable_kernel_rules.txt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/Documentation/stable_kernel_rules.txt ++++ b/Documentation/stable_kernel_rules.txt +@@ -124,7 +124,7 @@ specified in the following format in the + + .. code-block:: none + +- Cc: # 3.3.x- ++ Cc: # 3.3.x + + The tag has the meaning of: + diff --git a/queue-4.9/mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch b/queue-4.9/mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch new file mode 100644 index 00000000000..7b88dcbe4de --- /dev/null +++ b/queue-4.9/mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch @@ -0,0 +1,77 @@ +From cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 Mon Sep 17 00:00:00 2001 +From: Chris Salls +Date: Fri, 7 Apr 2017 23:48:11 -0700 +Subject: mm/mempolicy.c: fix error handling in set_mempolicy and mbind. + +From: Chris Salls + +commit cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 upstream. + +In the case that compat_get_bitmap fails we do not want to copy the +bitmap to the user as it will contain uninitialized stack data and leak +sensitive data. + +Signed-off-by: Chris Salls +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/mempolicy.c | 20 ++++++++------------ + 1 file changed, 8 insertions(+), 12 deletions(-) + +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -1524,7 +1524,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, in + COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, + compat_ulong_t, maxnode) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + DECLARE_BITMAP(bm, MAX_NUMNODES); +@@ -1533,14 +1532,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, in + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(bm, nmask, nr_bits); ++ if (compat_get_bitmap(bm, nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, bm, alloc_size); ++ if (copy_to_user(nm, bm, alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_set_mempolicy(mode, nm, nr_bits+1); + } + +@@ -1548,7 +1546,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo + compat_ulong_t, mode, compat_ulong_t __user *, nmask, + compat_ulong_t, maxnode, compat_ulong_t, flags) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + nodemask_t bm; +@@ -1557,14 +1554,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulo + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); ++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, nodes_addr(bm), alloc_size); ++ if (copy_to_user(nm, nodes_addr(bm), alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_mbind(start, len, mode, nm, nr_bits+1, flags); + } + diff --git a/queue-4.9/random-use-chacha20-for-get_random_int-long.patch b/queue-4.9/random-use-chacha20-for-get_random_int-long.patch new file mode 100644 index 00000000000..edc34a2c3c6 --- /dev/null +++ b/queue-4.9/random-use-chacha20-for-get_random_int-long.patch @@ -0,0 +1,177 @@ +From f5b98461cb8167ba362ad9f74c41d126b7becea7 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Fri, 6 Jan 2017 19:32:01 +0100 +Subject: random: use chacha20 for get_random_int/long + +From: Jason A. Donenfeld + +commit f5b98461cb8167ba362ad9f74c41d126b7becea7 upstream. + +Now that our crng uses chacha20, we can rely on its speedy +characteristics for replacing MD5, while simultaneously achieving a +higher security guarantee. Before the idea was to use these functions if +you wanted random integers that aren't stupidly insecure but aren't +necessarily secure either, a vague gray zone, that hopefully was "good +enough" for its users. With chacha20, we can strengthen this claim, +since either we're using an rdrand-like instruction, or we're using the +same crng as /dev/urandom. And it's faster than what was before. + +We could have chosen to replace this with a SipHash-derived function, +which might be slightly faster, but at the cost of having yet another +RNG construction in the kernel. By moving to chacha20, we have a single +RNG to analyze and verify, and we also already get good performance +improvements on all platforms. + +Implementation-wise, rather than use a generic buffer for both +get_random_int/long and memcpy based on the size needs, we use a +specific buffer for 32-bit reads and for 64-bit reads. This way, we're +guaranteed to always have aligned accesses on all platforms. While +slightly more verbose in C, the assembly this generates is a lot +simpler than otherwise. + +Finally, on 32-bit platforms where longs and ints are the same size, +we simply alias get_random_int to get_random_long. + +Signed-off-by: Jason A. Donenfeld +Suggested-by: Theodore Ts'o +Cc: Theodore Ts'o +Cc: Hannes Frederic Sowa +Cc: Andy Lutomirski +Signed-off-by: Theodore Ts'o +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/char/random.c | 84 +++++++++++++++++++++++++------------------------ + include/linux/random.h | 1 + init/main.c | 1 + 3 files changed, 43 insertions(+), 43 deletions(-) + +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -2042,63 +2042,65 @@ struct ctl_table random_table[] = { + }; + #endif /* CONFIG_SYSCTL */ + +-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; +- +-int random_int_secret_init(void) +-{ +- get_random_bytes(random_int_secret, sizeof(random_int_secret)); +- return 0; +-} +- +-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) +- __aligned(sizeof(unsigned long)); ++struct batched_entropy { ++ union { ++ unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)]; ++ unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)]; ++ }; ++ unsigned int position; ++}; + + /* +- * Get a random word for internal kernel use only. Similar to urandom but +- * with the goal of minimal entropy pool depletion. As a result, the random +- * value is not cryptographically secure but for several uses the cost of +- * depleting entropy is too high ++ * Get a random word for internal kernel use only. The quality of the random ++ * number is either as good as RDRAND or as good as /dev/urandom, with the ++ * goal of being quite fast and not depleting entropy. + */ +-unsigned int get_random_int(void) ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long); ++unsigned long get_random_long(void) + { +- __u32 *hash; +- unsigned int ret; ++ unsigned long ret; ++ struct batched_entropy *batch; + +- if (arch_get_random_int(&ret)) ++ if (arch_get_random_long(&ret)) + return ret; + +- hash = get_cpu_var(get_random_int_hash); +- +- hash[0] += current->pid + jiffies + random_get_entropy(); +- md5_transform(hash, random_int_secret); +- ret = hash[0]; +- put_cpu_var(get_random_int_hash); +- ++ batch = &get_cpu_var(batched_entropy_long); ++ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) { ++ extract_crng((u8 *)batch->entropy_long); ++ batch->position = 0; ++ } ++ ret = batch->entropy_long[batch->position++]; ++ put_cpu_var(batched_entropy_long); + return ret; + } +-EXPORT_SYMBOL(get_random_int); ++EXPORT_SYMBOL(get_random_long); + +-/* +- * Same as get_random_int(), but returns unsigned long. +- */ +-unsigned long get_random_long(void) ++#if BITS_PER_LONG == 32 ++unsigned int get_random_int(void) + { +- __u32 *hash; +- unsigned long ret; ++ return get_random_long(); ++} ++#else ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int); ++unsigned int get_random_int(void) ++{ ++ unsigned int ret; ++ struct batched_entropy *batch; + +- if (arch_get_random_long(&ret)) ++ if (arch_get_random_int(&ret)) + return ret; + +- hash = get_cpu_var(get_random_int_hash); +- +- hash[0] += current->pid + jiffies + random_get_entropy(); +- md5_transform(hash, random_int_secret); +- ret = *(unsigned long *)hash; +- put_cpu_var(get_random_int_hash); +- ++ batch = &get_cpu_var(batched_entropy_int); ++ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) { ++ extract_crng((u8 *)batch->entropy_int); ++ batch->position = 0; ++ } ++ ret = batch->entropy_int[batch->position++]; ++ put_cpu_var(batched_entropy_int); + return ret; + } +-EXPORT_SYMBOL(get_random_long); ++#endif ++EXPORT_SYMBOL(get_random_int); + + /** + * randomize_page - Generate a random, page aligned address +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -37,7 +37,6 @@ extern void get_random_bytes(void *buf, + extern int add_random_ready_callback(struct random_ready_callback *rdy); + extern void del_random_ready_callback(struct random_ready_callback *rdy); + extern void get_random_bytes_arch(void *buf, int nbytes); +-extern int random_int_secret_init(void); + + #ifndef MODULE + extern const struct file_operations random_fops, urandom_fops; +--- a/init/main.c ++++ b/init/main.c +@@ -868,7 +868,6 @@ static void __init do_basic_setup(void) + do_ctors(); + usermodehelper_enable(); + do_initcalls(); +- random_int_secret_init(); + } + + static void __init do_pre_smp_initcalls(void) diff --git a/queue-4.9/series b/queue-4.9/series index d70b9bd2696..89fa02bd244 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -51,3 +51,6 @@ mips-check-tlb-before-handle_ri_rdhwr-for-loongson-3.patch mips-add-mips_cpu_ftlb-for-loongson-3a-r2.patch mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch mips-c-r4k-fix-loongson-3-s-vcache-scache-waysize-calculation.patch +documentation-stable-kernel-rules-fix-stable-tag-format.patch +mm-mempolicy.c-fix-error-handling-in-set_mempolicy-and-mbind.patch +random-use-chacha20-for-get_random_int-long.patch -- 2.47.3