]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: mm: Simplify __TLBI_RANGE_NUM() macro
authorWill Deacon <will@kernel.org>
Mon, 2 Mar 2026 13:55:54 +0000 (13:55 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 13 Mar 2026 17:23:04 +0000 (17:23 +0000)
Since commit e2768b798a19 ("arm64/mm: Modify range-based tlbi to
decrement scale"), we don't need to clamp the 'pages' argument to fit
the range for the specified 'scale' as we know that the upper bits will
have been processed in a prior iteration.

Drop the clamping and simplify the __TLBI_RANGE_NUM() macro.

Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/tlbflush.h

index 3c05afdbe3a6999db2f00c85423756e6ae470c81..fb7e541cfdfd9f4bd3a4ab1dc354a6b251a0369f 100644 (file)
@@ -199,11 +199,7 @@ static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
  * range.
  */
 #define __TLBI_RANGE_NUM(pages, scale)                                 \
-       ({                                                              \
-               int __pages = min((pages),                              \
-                                 __TLBI_RANGE_PAGES(31, (scale)));     \
-               (__pages >> (5 * (scale) + 1)) - 1;                     \
-       })
+       (((pages) >> (5 * (scale) + 1)) - 1)
 
 #define __repeat_tlbi_sync(op, arg...)                                         \
 do {                                                                           \