]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
s390/bitops: Use __assume() for __flogr() inline assembly return value
authorHeiko Carstens <hca@linux.ibm.com>
Tue, 16 Sep 2025 13:48:02 +0000 (15:48 +0200)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Thu, 18 Sep 2025 12:06:40 +0000 (14:06 +0200)
Use __assume() to tell compilers that the output operand of the __flogr()
inline assembly contains a value in the range of 0..64. This allows to
optimize the logical AND operation away.

This reduces the kernel image size by 2804 bytes (defconfig, gcc 15.2.0).

Suggested-by: Juergen Christ <jchrist@linux.ibm.com>
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/include/asm/bitops.h

index 9bc70acbac9ec48ae92a4a4937e8e1dc70c0acf9..ac94672db8170179dcb98365c1f5de5db273d50a 100644 (file)
@@ -132,9 +132,10 @@ static inline bool test_bit_inv(unsigned long nr,
  */
 static __always_inline unsigned char __flogr(unsigned long word)
 {
-       if (__builtin_constant_p(word)) {
-               unsigned long bit = 0;
+       unsigned long bit;
 
+       if (__builtin_constant_p(word)) {
+               bit = 0;
                if (!word)
                        return 64;
                if (!(word & 0xffffffff00000000UL)) {
@@ -169,7 +170,14 @@ static __always_inline unsigned char __flogr(unsigned long word)
                asm volatile(
                        "       flogr   %[rp],%[rp]\n"
                        : [rp] "+d" (rp.pair) : : "cc");
-               return rp.even & 127;
+               bit = rp.even;
+               /*
+                * The result of the flogr instruction is a value in the range
+                * of 0..64. Let the compiler know that the AND operation can
+                * be optimized away.
+                */
+               __assume(bit <= 64);
+               return bit & 127;
        }
 }