]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390: Remove superfluous new lines from inline assemblies
authorHeiko Carstens <hca@linux.ibm.com>
Wed, 11 Dec 2024 11:58:03 +0000 (12:58 +0100)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Sun, 15 Dec 2024 15:19:03 +0000 (16:19 +0100)
GCC uses the number of lines of an inline assembly to calculate its length
(number of instructions). This has an impact on GCCs inlining decisions.

Therefore remove superfluous new lines from a couple of inline
assemblies, so that their real size is reflected.

Also use an "asm inline" statement for the fpu_lfpc_safe() inline assembly
to enforce that GCC assumes the minimum size for this inline assembly,
since it contains various statements which make it appear much larger than
the resulting code is.

Suggested-by: Juergen Christ <jchrist@linux.ibm.com>
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/include/asm/atomic_ops.h
arch/s390/include/asm/checksum.h
arch/s390/include/asm/fpu-insn.h

index 90573508d0454e5a9016ce86be42cc464330e69b..021503d81ed16802f72aff175e94e8a7ed63babe 100644 (file)
@@ -73,7 +73,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr)     \
 }                                                                      \
 
 #define __ATOMIC_OPS(op_name, op_type, op_string)                      \
-       __ATOMIC_OP(op_name, op_type, op_string, "\n")                  \
+       __ATOMIC_OP(op_name, op_type, op_string, "")                    \
        __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
 
 __ATOMIC_OPS(__atomic_add, int, "laa")
@@ -99,7 +99,7 @@ static __always_inline void op_name(op_type val, op_type *ptr)                \
 }
 
 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string)                        \
-       __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n")            \
+       __ATOMIC_CONST_OP(op_name, op_type, op_string, "")              \
        __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
 
 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
index 46f5c96606160242559c50b216f8114b6baa1490..d86dea5900e72296f40eb16db70155f7dd54593f 100644 (file)
@@ -25,7 +25,7 @@ static inline __wsum cksm(const void *buff, int len, __wsum sum)
 
        instrument_read(buff, len);
        kmsan_check_memory(buff, len);
-       asm volatile("\n"
+       asm volatile(
                "0:     cksm    %[sum],%[rp]\n"
                "       jo      0b\n"
                : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
index c1e2e521d9af7c51a9f740c08c45d7a97e6964b8..de510c9f6efa94994fb9b97a7da08ef2720d7bb3 100644 (file)
@@ -103,7 +103,7 @@ static inline void fpu_lfpc_safe(unsigned int *fpc)
        u32 tmp;
 
        instrument_read(fpc, sizeof(*fpc));
-       asm volatile("\n"
+       asm_inline volatile(
                "0:     lfpc    %[fpc]\n"
                "1:     nopr    %%r7\n"
                ".pushsection .fixup, \"ax\"\n"
@@ -188,7 +188,7 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
 static __always_inline void fpu_vl(u8 v1, const void *vxr)
 {
        instrument_read(vxr, sizeof(__vector128));
-       asm volatile("\n"
+       asm volatile(
                "       la      1,%[vxr]\n"
                "       VL      %[v1],0,,1\n"
                :
@@ -246,7 +246,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
 
        size = min(index + 1, sizeof(__vector128));
        instrument_read(vxr, size);
-       asm volatile("\n"
+       asm volatile(
                "       la      1,%[vxr]\n"
                "       VLL     %[v1],%[index],0,1\n"
                :
@@ -284,7 +284,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
        } *_v = (void *)(_vxrs);                                        \
                                                                        \
        instrument_read(_v, size);                                      \
-       asm volatile("\n"                                               \
+       asm volatile(                                                   \
                "       la      1,%[vxrs]\n"                            \
                "       VLM     %[v1],%[v3],0,1\n"                      \
                :                                                       \
@@ -367,7 +367,7 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
 static __always_inline void fpu_vst(u8 v1, const void *vxr)
 {
        instrument_write(vxr, sizeof(__vector128));
-       asm volatile("\n"
+       asm volatile(
                "       la      1,%[vxr]\n"
                "       VST     %[v1],0,,1\n"
                : [vxr] "=R" (*(__vector128 *)vxr)
@@ -396,7 +396,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
 
        size = min(index + 1, sizeof(__vector128));
        instrument_write(vxr, size);
-       asm volatile("\n"
+       asm volatile(
                "       la      1,%[vxr]\n"
                "       VSTL    %[v1],%[index],0,1\n"
                : [vxr] "=R" (*(u8 *)vxr)
@@ -430,7 +430,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
        } *_v = (void *)(_vxrs);                                        \
                                                                        \
        instrument_write(_v, size);                                     \
-       asm volatile("\n"                                               \
+       asm volatile(                                                   \
                "       la      1,%[vxrs]\n"                            \
                "       VSTM    %[v1],%[v3],0,1\n"                      \
                : [vxrs] "=R" (*_v)                                     \