From: Greg Kroah-Hartman Date: Sun, 7 Mar 2021 15:16:26 +0000 (+0100) Subject: 5.11-stable patches X-Git-Tag: v5.4.104~21 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f9bc158b006a149f85775794350981fa33ee11e5;p=thirdparty%2Fkernel%2Fstable-queue.git 5.11-stable patches added patches: crypto-shash-reduce-minimum-alignment-of-shash_desc-structure.patch --- diff --git a/queue-5.11/crypto-shash-reduce-minimum-alignment-of-shash_desc-structure.patch b/queue-5.11/crypto-shash-reduce-minimum-alignment-of-shash_desc-structure.patch new file mode 100644 index 00000000000..3be5ab8bf51 --- /dev/null +++ b/queue-5.11/crypto-shash-reduce-minimum-alignment-of-shash_desc-structure.patch @@ -0,0 +1,84 @@ +From 660d2062190db131d2feaf19914e90f868fe285c Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Wed, 13 Jan 2021 10:11:35 +0100 +Subject: crypto - shash: reduce minimum alignment of shash_desc structure + +From: Ard Biesheuvel + +commit 660d2062190db131d2feaf19914e90f868fe285c upstream. + +Unlike many other structure types defined in the crypto API, the +'shash_desc' structure is permitted to live on the stack, which +implies its contents may not be accessed by DMA masters. (This is +due to the fact that the stack may be located in the vmalloc area, +which requires a different virtual-to-physical translation than the +one implemented by the DMA subsystem) + +Our definition of CRYPTO_MINALIGN_ATTR is based on ARCH_KMALLOC_MINALIGN, +which may take DMA constraints into account on architectures that support +non-cache coherent DMA such as ARM and arm64. In this case, the value is +chosen to reflect the largest cacheline size in the system, in order to +ensure that explicit cache maintenance as required by non-coherent DMA +masters does not affect adjacent, unrelated slab allocations. On arm64, +this value is currently set at 128 bytes. + +This means that applying CRYPTO_MINALIGN_ATTR to struct shash_desc is both +unnecessary (as it is never used for DMA), and undesirable, given that it +wastes stack space (on arm64, performing the alignment costs 112 bytes in +the worst case, and the hole between the 'tfm' and '__ctx' members takes +up another 120 bytes, resulting in an increased stack footprint of up to +232 bytes.) So instead, let's switch to the minimum SLAB alignment, which +does not take DMA constraints into account. + +Note that this is a no-op for x86. + +Signed-off-by: Ard Biesheuvel +Signed-off-by: Herbert Xu +Signed-off-by: Greg Kroah-Hartman +--- + include/crypto/hash.h | 8 ++++---- + include/linux/crypto.h | 9 ++++++--- + 2 files changed, 10 insertions(+), 7 deletions(-) + +--- a/include/crypto/hash.h ++++ b/include/crypto/hash.h +@@ -149,7 +149,7 @@ struct ahash_alg { + + struct shash_desc { + struct crypto_shash *tfm; +- void *__ctx[] CRYPTO_MINALIGN_ATTR; ++ void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); + }; + + #define HASH_MAX_DIGESTSIZE 64 +@@ -162,9 +162,9 @@ struct shash_desc { + + #define HASH_MAX_STATESIZE 512 + +-#define SHASH_DESC_ON_STACK(shash, ctx) \ +- char __##shash##_desc[sizeof(struct shash_desc) + \ +- HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \ ++#define SHASH_DESC_ON_STACK(shash, ctx) \ ++ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ ++ __aligned(__alignof__(struct shash_desc)); \ + struct shash_desc *shash = (struct shash_desc *)__##shash##_desc + + /** +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -151,9 +151,12 @@ + * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual + * declaration) is used to ensure that the crypto_tfm context structure is + * aligned correctly for the given architecture so that there are no alignment +- * faults for C data types. In particular, this is required on platforms such +- * as arm where pointers are 32-bit aligned but there are data types such as +- * u64 which require 64-bit alignment. ++ * faults for C data types. On architectures that support non-cache coherent ++ * DMA, such as ARM or arm64, it also takes into account the minimal alignment ++ * that is required to ensure that the context struct member does not share any ++ * cachelines with the rest of the struct. This is needed to ensure that cache ++ * maintenance for non-coherent DMA (cache invalidation in particular) does not ++ * affect data that may be accessed by the CPU concurrently. + */ + #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN + diff --git a/queue-5.11/series b/queue-5.11/series index 855083d1dfd..503567ed937 100644 --- a/queue-5.11/series +++ b/queue-5.11/series @@ -26,3 +26,4 @@ drm-amd-pm-correct-arcturus-mmthm_baco_cntl-register-address.patch drm-amdgpu-disable-vcn-for-navi12-sku.patch drm-amdgpu-only-check-for-s0ix-if-amd_pmc-is-configured.patch drm-amdgpu-fix-parameter-error-of-rreg32_pcie-in-amdgpu_regs_pcie.patch +crypto-shash-reduce-minimum-alignment-of-shash_desc-structure.patch