From: Sasha Levin Date: Tue, 14 Apr 2020 03:00:48 +0000 (-0400) Subject: Fixes for 4.19 X-Git-Tag: v4.19.116~100 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=7d3b346b62005df3ab5eabcf5ddffcba7b720116;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 4.19 Signed-off-by: Sasha Levin --- diff --git a/queue-4.19/ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunn.patch b/queue-4.19/ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunn.patch new file mode 100644 index 00000000000..cbea2fc2444 --- /dev/null +++ b/queue-4.19/ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunn.patch @@ -0,0 +1,77 @@ +From 6e4b0ff386320f03b54109e6af86267d6485a803 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 5 Mar 2020 14:38:41 +0200 +Subject: IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads + +From: Alex Vesker + +[ Upstream commit 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 ] + +Until now the flex parser capability was used in ib_query_device() to +indicate tunnel_offloads_caps support for mpls_over_gre/mpls_over_udp. + +Newer devices and firmware will have configurations with the flexparser +but without mpls support. + +Testing for the flex parser capability was a mistake, the tunnel_stateless +capability was intended for detecting mpls and was introduced at the same +time as the flex parser capability. + +Otherwise userspace will be incorrectly informed that a future device +supports MPLS when it does not. + +Link: https://lore.kernel.org/r/20200305123841.196086-1-leon@kernel.org +Cc: # 4.17 +Fixes: e818e255a58d ("IB/mlx5: Expose MPLS related tunneling offloads") +Signed-off-by: Alex Vesker +Reviewed-by: Ariel Levkovich +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/hw/mlx5/main.c | 6 ++---- + include/linux/mlx5/mlx5_ifc.h | 9 ++++++++- + 2 files changed, 10 insertions(+), 5 deletions(-) + +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index 2db34f7b5ced1..f41f3ff689c55 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -1070,12 +1070,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, + if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) + resp.tunnel_offloads_caps |= + MLX5_IB_TUNNELED_OFFLOADS_GRE; +- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & +- MLX5_FLEX_PROTO_CW_MPLS_GRE) ++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) + resp.tunnel_offloads_caps |= + MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; +- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & +- MLX5_FLEX_PROTO_CW_MPLS_UDP) ++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) + resp.tunnel_offloads_caps |= + MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; + } +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 76b76b6aa83d0..b87b1569d15b5 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -672,7 +672,14 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 swp[0x1]; + u8 swp_csum[0x1]; + u8 swp_lso[0x1]; +- u8 reserved_at_23[0xd]; ++ u8 cqe_checksum_full[0x1]; ++ u8 tunnel_stateless_geneve_tx[0x1]; ++ u8 tunnel_stateless_mpls_over_udp[0x1]; ++ u8 tunnel_stateless_mpls_over_gre[0x1]; ++ u8 tunnel_stateless_vxlan_gpe[0x1]; ++ u8 tunnel_stateless_ipv4_over_vxlan[0x1]; ++ u8 tunnel_stateless_ip_over_ip[0x1]; ++ u8 reserved_at_2a[0x6]; + u8 max_vxlan_udp_ports[0x8]; + u8 reserved_at_38[0x6]; + u8 max_geneve_opt_len[0x1]; +-- +2.20.1 + diff --git a/queue-4.19/series b/queue-4.19/series index 49075c4c4ee..2109cf79dd1 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -38,3 +38,6 @@ block-bfq-fix-use-after-free-in-bfq_idle_slice_timer.patch btrfs-qgroup-ensure-qgroup_rescan_running-is-only-se.patch btrfs-remove-a-bug_on-from-merge_reloc_roots.patch btrfs-track-reloc-roots-based-on-their-commit-root-b.patch +ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunn.patch +uapi-rename-ext2_swab-to-swab-and-share-globally-in-.patch +slub-improve-bit-diffusion-for-freelist-ptr-obfuscat.patch diff --git a/queue-4.19/slub-improve-bit-diffusion-for-freelist-ptr-obfuscat.patch b/queue-4.19/slub-improve-bit-diffusion-for-freelist-ptr-obfuscat.patch new file mode 100644 index 00000000000..140cb8a385d --- /dev/null +++ b/queue-4.19/slub-improve-bit-diffusion-for-freelist-ptr-obfuscat.patch @@ -0,0 +1,73 @@ +From cc7031c39518a3889809759712f6cee8032e4250 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 1 Apr 2020 21:04:23 -0700 +Subject: slub: improve bit diffusion for freelist ptr obfuscation + +From: Kees Cook + +[ Upstream commit 1ad53d9fa3f6168ebcf48a50e08b170432da2257 ] + +Under CONFIG_SLAB_FREELIST_HARDENED=y, the obfuscation was relatively weak +in that the ptr and ptr address were usually so close that the first XOR +would result in an almost entirely 0-byte value[1], leaving most of the +"secret" number ultimately being stored after the third XOR. A single +blind memory content exposure of the freelist was generally sufficient to +learn the secret. + +Add a swab() call to mix bits a little more. This is a cheap way (1 +cycle) to make attacks need more than a single exposure to learn the +secret (or to know _where_ the exposure is in memory). + +kmalloc-32 freelist walk, before: + +ptr ptr_addr stored value secret +ffff90c22e019020@ffff90c22e019000 is 86528eb656b3b5bd (86528eb656b3b59d) +ffff90c22e019040@ffff90c22e019020 is 86528eb656b3b5fd (86528eb656b3b59d) +ffff90c22e019060@ffff90c22e019040 is 86528eb656b3b5bd (86528eb656b3b59d) +ffff90c22e019080@ffff90c22e019060 is 86528eb656b3b57d (86528eb656b3b59d) +ffff90c22e0190a0@ffff90c22e019080 is 86528eb656b3b5bd (86528eb656b3b59d) +... + +after: + +ptr ptr_addr stored value secret +ffff9eed6e019020@ffff9eed6e019000 is 793d1135d52cda42 (86528eb656b3b59d) +ffff9eed6e019040@ffff9eed6e019020 is 593d1135d52cda22 (86528eb656b3b59d) +ffff9eed6e019060@ffff9eed6e019040 is 393d1135d52cda02 (86528eb656b3b59d) +ffff9eed6e019080@ffff9eed6e019060 is 193d1135d52cdae2 (86528eb656b3b59d) +ffff9eed6e0190a0@ffff9eed6e019080 is f93d1135d52cdac2 (86528eb656b3b59d) + +[1] https://blog.infosectcbr.com.au/2020/03/weaknesses-in-linux-kernel-heap.html + +Fixes: 2482ddec670f ("mm: add SLUB free list pointer obfuscation") +Reported-by: Silvio Cesare +Signed-off-by: Kees Cook +Signed-off-by: Andrew Morton +Cc: Christoph Lameter +Cc: Pekka Enberg +Cc: David Rientjes +Cc: Joonsoo Kim +Cc: +Link: http://lkml.kernel.org/r/202003051623.AF4F8CB@keescook +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + mm/slub.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/mm/slub.c b/mm/slub.c +index 9b7b989273d41..11e5615649ee9 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, + unsigned long ptr_addr) + { + #ifdef CONFIG_SLAB_FREELIST_HARDENED +- return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); ++ return (void *)swab((unsigned long)ptr ^ s->random ^ ptr_addr); + #else + return ptr; + #endif +-- +2.20.1 + diff --git a/queue-4.19/uapi-rename-ext2_swab-to-swab-and-share-globally-in-.patch b/queue-4.19/uapi-rename-ext2_swab-to-swab-and-share-globally-in-.patch new file mode 100644 index 00000000000..b76ec7ffe13 --- /dev/null +++ b/queue-4.19/uapi-rename-ext2_swab-to-swab-and-share-globally-in-.patch @@ -0,0 +1,117 @@ +From a1ed208290c247e894242f0db4ac7a526d917f3a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 30 Jan 2020 22:16:40 -0800 +Subject: uapi: rename ext2_swab() to swab() and share globally in swab.h + +From: Yury Norov + +[ Upstream commit d5767057c9a76a29f073dad66b7fa12a90e8c748 ] + +ext2_swab() is defined locally in lib/find_bit.c However it is not +specific to ext2, neither to bitmaps. + +There are many potential users of it, so rename it to just swab() and +move to include/uapi/linux/swab.h + +ABI guarantees that size of unsigned long corresponds to BITS_PER_LONG, +therefore drop unneeded cast. + +Link: http://lkml.kernel.org/r/20200103202846.21616-1-yury.norov@gmail.com +Signed-off-by: Yury Norov +Cc: Allison Randal +Cc: Joe Perches +Cc: Thomas Gleixner +Cc: William Breathitt Gray +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + include/linux/swab.h | 1 + + include/uapi/linux/swab.h | 10 ++++++++++ + lib/find_bit.c | 16 ++-------------- + 3 files changed, 13 insertions(+), 14 deletions(-) + +diff --git a/include/linux/swab.h b/include/linux/swab.h +index e466fd159c857..bcff5149861a9 100644 +--- a/include/linux/swab.h ++++ b/include/linux/swab.h +@@ -7,6 +7,7 @@ + # define swab16 __swab16 + # define swab32 __swab32 + # define swab64 __swab64 ++# define swab __swab + # define swahw32 __swahw32 + # define swahb32 __swahb32 + # define swab16p __swab16p +diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h +index 23cd84868cc3b..fa7f97da5b768 100644 +--- a/include/uapi/linux/swab.h ++++ b/include/uapi/linux/swab.h +@@ -4,6 +4,7 @@ + + #include + #include ++#include + #include + + /* +@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) + __fswab64(x)) + #endif + ++static __always_inline unsigned long __swab(const unsigned long y) ++{ ++#if BITS_PER_LONG == 64 ++ return __swab64(y); ++#else /* BITS_PER_LONG == 32 */ ++ return __swab32(y); ++#endif ++} ++ + /** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap +diff --git a/lib/find_bit.c b/lib/find_bit.c +index ee3df93ba69af..8a5492173267d 100644 +--- a/lib/find_bit.c ++++ b/lib/find_bit.c +@@ -153,18 +153,6 @@ EXPORT_SYMBOL(find_last_bit); + + #ifdef __BIG_ENDIAN + +-/* include/linux/byteorder does not support "unsigned long" type */ +-static inline unsigned long ext2_swab(const unsigned long y) +-{ +-#if BITS_PER_LONG == 64 +- return (unsigned long) __swab64((u64) y); +-#elif BITS_PER_LONG == 32 +- return (unsigned long) __swab32((u32) y); +-#else +-#error BITS_PER_LONG not defined +-#endif +-} +- + #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) + static inline unsigned long _find_next_bit_le(const unsigned long *addr1, + const unsigned long *addr2, unsigned long nbits, +@@ -181,7 +169,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, + tmp ^= invert; + + /* Handle 1st word. */ +- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); ++ tmp &= swab(BITMAP_FIRST_WORD_MASK(start)); + start = round_down(start, BITS_PER_LONG); + + while (!tmp) { +@@ -195,7 +183,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, + tmp ^= invert; + } + +- return min(start + __ffs(ext2_swab(tmp)), nbits); ++ return min(start + __ffs(swab(tmp)), nbits); + } + #endif + +-- +2.20.1 +