From: Paul Pluzhnikov Date: Mon, 22 May 2023 03:40:33 +0000 (+0000) Subject: Fix misspellings in sysdeps/x86_64/fpu/multiarch -- BZ 25337. X-Git-Tag: glibc-2.38~211 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1d2971b525396e9935f3d90616a1668ceca425e5;p=thirdparty%2Fglibc.git Fix misspellings in sysdeps/x86_64/fpu/multiarch -- BZ 25337. Applying this commit results in a bit-identical rebuild of mathvec/libmvec.so.1 (which is the only binary that gets rebuilt). Reviewed-by: Noah Goldstein --- diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S index 840c3d6a177..a46ddc136ec 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S index 3c752005784..808ea2fe956 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S @@ -204,7 +204,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S index 0647a2e1f72..878d1454c6d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S @@ -226,7 +226,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S index 8a56813ff02..b69e5cef8b5 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S @@ -321,7 +321,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S index f16f539fb6d..825b2311730 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S @@ -366,7 +366,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S index 1a3211bf43c..32ed85e3681 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S @@ -311,7 +311,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S index 9fb9ddcf3da..7bba3b52720 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S @@ -211,7 +211,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S index af6fa771c5c..c7dbb727e34 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S index 2a0f6d43785..c23665b9b22 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S @@ -218,7 +218,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S index a3630b11457..f4da4b2c328 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S @@ -474,7 +474,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S index d97a5f845f6..3ecec43c665 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S @@ -423,7 +423,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S index b4d88848b51..82bd52407d0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S @@ -337,7 +337,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S index 7d14cb8cb4a..39d86480e4d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S @@ -229,11 +229,11 @@ ENTRY(_ZGVbN2vv_atan2_sse4) /* Special branch for fast (vector) processing of zero arguments */ testb $3, %cl - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -316,7 +316,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -336,7 +336,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S index 35b635dac70..a4bcf9c375a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S @@ -170,11 +170,11 @@ ENTRY(_ZGVdN4vv_atan2_avx2) /* Special branch for fast (vector) processing of zero arguments */ testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx xmm3 ymm0 ymm1 ymm2 ymm4 ymm5 ymm6 ymm7 ymm8 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -271,7 +271,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -291,7 +291,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S index 49662bc8c9e..def7af38dcb 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S @@ -188,11 +188,11 @@ ENTRY(_ZGVeN8vv_atan2_skx) vmovups 64(%rsp), %zmm9 testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx ymm6 zmm0 zmm2 zmm3 zmm4 zmm5 zmm7 zmm9 zmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -289,7 +289,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm11 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -309,7 +309,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S index 50345f026db..0a87c8cd81f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S @@ -367,7 +367,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S index 0e2f6cadae5..44517bea883 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S @@ -333,7 +333,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S index 7ba45c00566..99141c1f390 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S @@ -268,7 +268,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S index aa903227224..98b276f2e21 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S @@ -241,7 +241,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S index d0de65fde8a..45f395dccbe 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S @@ -256,7 +256,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S index c2a13245a81..dd89de036fe 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S index c152307a25d..83309680636 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S @@ -276,7 +276,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S index b4b2284a162..3e2aa620b29 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S index 5934986b520..a5f2f115089 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S index 2948e6b3c31..376be17b34a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S @@ -252,7 +252,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S index 5c92653e204..debba0c365d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S index 65abd701684..db25e5b14d3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S @@ -68,7 +68,7 @@ ENTRY(_ZGVbN2v_exp10_sse4) /* R */ movaps %xmm0, %xmm12 - /* Load arument */ + /* Load argument */ movups _dbLg2_10+__svml_dexp10_data_internal(%rip), %xmm13 lea __svml_dexp10_data_internal(%rip), %rsi mulpd %xmm0, %xmm13 @@ -214,7 +214,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S index 1c7c8e2db8d..c5cec289a7a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S @@ -73,7 +73,7 @@ ENTRY(_ZGVdN4v_exp10_avx2) vmovapd %ymm0, %ymm2 vmovupd _dbShifter+__svml_dexp10_data_internal(%rip), %ymm3 - /* Load arument */ + /* Load argument */ vmovupd _dbLg2_10+__svml_dexp10_data_internal(%rip), %ymm0 vfmadd213pd %ymm3, %ymm2, %ymm0 vsubpd %ymm3, %ymm0, %ymm1 @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S index 2f45c9292d3..9ea6a3d2047 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S @@ -23,7 +23,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * - SAE used to avoid spurious flag settings * */ @@ -185,7 +185,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S index 0ffb56d9d4b..4c24aa8a2ea 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S @@ -67,7 +67,7 @@ ENTRY(_ZGVbN2v_exp2_sse4) /* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */ lea __svml_dexp2_data_internal(%rip), %rsi - /* Load arument */ + /* Load argument */ movaps %xmm1, %xmm10 addpd %xmm0, %xmm10 movaps %xmm10, %xmm6 @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S index 9337921c63f..1e55f3db853 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S @@ -71,7 +71,7 @@ ENTRY(_ZGVdN4v_exp2_avx2) vmovupd _lIndexMask+__svml_dexp2_data_internal(%rip), %ymm3 vmovapd %ymm0, %ymm1 - /* Load arument */ + /* Load argument */ vaddpd %ymm4, %ymm1, %ymm2 vsubpd %ymm4, %ymm2, %ymm0 @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S index ab3db009101..7e759c445f1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S index 7e1df110e49..05be9079f5a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S index 815ef349356..ad0b49978cc 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S index f38c694eb11..968801ab005 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S @@ -24,7 +24,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * * */ @@ -205,7 +205,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S index 136f5ebd8de..07c3156cf79 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -127,7 +127,7 @@ ENTRY(_ZGVbN2vv_hypot_sse4) mulpd %xmm10, %xmm11 mulpd %xmm10, %xmm2 - /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */ + /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */ movq _LowBoundary+__svml_dhypot_data_internal(%rip), %xmm5 movq _HighBoundary+__svml_dhypot_data_internal(%rip), %xmm3 pshufd $221, %xmm10, %xmm4 @@ -215,7 +215,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S index 61d12c9795c..d8c6a3ac43c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -111,7 +111,7 @@ ENTRY(_ZGVdN4vv_hypot_avx2) */ vcvtpd2ps %ymm0, %xmm12 - /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */ + /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */ vextractf128 $1, %ymm0, %xmm3 vrsqrtps %xmm12, %xmm13 vshufps $221, %xmm3, %xmm0, %xmm5 @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S index fb53d5dbd72..24ab764b7a2 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S @@ -47,7 +47,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [3BC ; 441] else goto Callout + * Check _z exponent to be within borders [3BC ; 441] else goto Callout * * _s ~ 1.0/sqrt(_z) * _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O) @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S index b2e75c1f239..de1583b394c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S @@ -227,7 +227,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm3 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S index 2e6ebac0eeb..8a9b8a84fba 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S index b7593067c0c..b4e5a9ccea7 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S index d0372e82c61..618b7e1e09f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S index d114653c713..dc2ccb32550 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S @@ -257,7 +257,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S index 283c40b689c..f5ec27ddb11 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S index 93bf27092d2..29465643cd7 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm3 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S index 83d8d4c4627..30fa3e44737 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S index bc9db384e91..351e00d1827 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S index 03a703f5f15..3b01840d73f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S index 26075187aee..585e2e51bfc 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S @@ -274,7 +274,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S index ce08de9dd07..8158d1455f8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S index 9fac5fa4bc8..9c208765af0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S @@ -181,11 +181,11 @@ ENTRY(_ZGVbN2v_tan_sse4) movmskpd %xmm4, %edx testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1 xmm4 xmm5 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -264,7 +264,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -283,7 +283,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S index 8586565ddb0..82d2ceff9a6 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S @@ -166,11 +166,11 @@ ENTRY(_ZGVdN4v_tan_avx2) vxorpd %ymm0, %ymm8, %ymm0 testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 r9d ymm0 ymm1 ymm14 ymm15 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -261,7 +261,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -280,7 +280,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S index 79deb21b2a2..c5738cef990 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S @@ -96,11 +96,11 @@ ENTRY(_ZGVeN8v_tan_skx) vfnmadd231pd {rn-sae}, %zmm8, %zmm3, %zmm5 vfnmadd213pd {rn-sae}, %zmm5, %zmm4, %zmm8 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm8 zmm11 k1 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S index 6fef5f08565..cbcb0d6a43e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S @@ -259,7 +259,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S index c05f4c2079f..cf0182bf8a0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S @@ -266,7 +266,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S index 70f0880049a..b3477a346b0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xfe, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S index 1c68130a87e..5bdc356429c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S index 372beff631f..ac099d38c55 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S @@ -198,7 +198,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm7 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S index 9e2f3b0dfeb..76296d91c0b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S index 9ba81506ca9..ff7063499fa 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S @@ -284,7 +284,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S index 6c3cbf0c3b8..6a213dc2e4f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S @@ -299,7 +299,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm9 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S index 45aede28ea9..17f6a19b3db 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S index daa5cfa91e3..2ffe24e1ff4 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S index 0718fa09b49..bc3e2f83402 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S index 2199ed35d1b..41e015c490d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S @@ -181,7 +181,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S index 720b58f9564..592caa85da9 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S @@ -307,7 +307,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S index c78550ec22d..e5996b33469 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S @@ -403,7 +403,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S index f9aeea6c856..1e8fc229102 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S @@ -355,7 +355,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S index e031dadf193..08c193e273e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S @@ -150,11 +150,11 @@ ENTRY(_ZGVeN16vv_atan2f_skx) vaddps {rn-sae}, %zmm11, %zmm9, %zmm9{%k4} vorps %zmm6, %zmm9, %zmm10 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm10 zmm11 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm10 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -271,7 +271,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S index 60426108b1a..0ec9b195907 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S @@ -157,11 +157,11 @@ ENTRY(_ZGVbN4vv_atan2f_sse4) /* Special branch for fast (vector) processing of zero arguments */ testl %ecx, %ecx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -244,7 +244,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -264,7 +264,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S index bf632c8a997..69619cb4d81 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S @@ -131,11 +131,11 @@ ENTRY(_ZGVdN8vv_atan2f_avx2) /* Special branch for fast (vector) processing of zero arguments */ testl %eax, %eax - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm9 ymm10 ymm12 ymm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm9 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S index f733c7a1b5d..6c3d40d676c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 64(%rsp, %rbp, 4), %xmm0 call atanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S index 055484bfb27..ab2ef46b9dc 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S @@ -242,7 +242,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp bsfl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ movss 40(%rsp, %rbp, 4), %xmm0 call atanhf@PLT /* No good way to avoid the store-forwarding fault this will cause on diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S index 8ffe98cfe18..e70085b0511 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S @@ -230,7 +230,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 32(%rsp, %rbp, 4), %xmm0 call atanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S index f5331db13be..270e620d610 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S @@ -273,7 +273,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm12 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S index 76db762fe80..292eb5a93f0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S @@ -298,7 +298,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S index 14696eeff4b..773594d4e04 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm6 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S index 654ac65916a..ee987dd10fd 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S index 474cb05473d..24692722eb7 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S index 03b7e4adc1e..3d19dbd58a8 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S index 02aa2b4f763..e7cae805795 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S index c3e8e399db0..958b46dbfe4 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S index e70e8c52ca0..f2d8130ee43 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S @@ -23,7 +23,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * - SAE used to avoid spurious flag settings * */ @@ -180,7 +180,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S index 9de39a62c23..9eb215a40ff 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S @@ -63,7 +63,7 @@ ENTRY(_ZGVbN4v_exp10f_sse4) cfi_def_cfa_offset(80) movaps %xmm0, %xmm4 - /* Load arument */ + /* Load argument */ movups _sLg2_10+__svml_sexp10_data_internal(%rip), %xmm2 lea __svml_sexp10_data_internal(%rip), %r8 mulps %xmm4, %xmm2 @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S index e3087a75dc0..79563cc3535 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S @@ -69,7 +69,7 @@ ENTRY(_ZGVdN8v_exp10f_avx2) lea __svml_sexp10_data_internal(%rip), %rax vmovups _sShifter+__svml_sexp10_data_internal(%rip), %ymm4 - /* Load arument */ + /* Load argument */ vmovups _sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1 vmovups _iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2 vmovaps %ymm0, %ymm3 @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S index 1911c06bcf5..ce983b297bd 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S @@ -203,7 +203,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S index f4ddfbe9329..512ea5c5bfd 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S @@ -175,7 +175,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S index 277508b8ef5..47592985c1c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S index 7aa1e3c417c..4683e546dec 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S @@ -24,7 +24,7 @@ * - all arguments processed in the main path * - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses) * - a VAND is used to ensure the reduced argument |R|<2, even for large inputs - * - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling + * - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling * * */ @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S index 6a3a9d266cd..5159b0785a2 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S @@ -207,7 +207,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 xmm10 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S index ee442d8c4ac..aae9068cc9a 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S index 06c6903df2c..749deb0833d 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S index c5a94d7b5bb..38ab12b1e23 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -117,7 +117,7 @@ ENTRY(_ZGVbN4vv_hypotf_sse4) movaps %xmm2, %xmm6 mulps %xmm10, %xmm6 - /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ + /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */ movdqu _LowBoundary+__svml_shypot_data_internal(%rip), %xmm4 subps %xmm6, %xmm5 @@ -216,7 +216,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -96) # LOE rbx rbp r12 r13 r14 r15 xmm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S index fe87678ae6e..80f1081201b 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S @@ -45,7 +45,7 @@ * No multiprecision branch for _LA_ and _EP_ * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 * - * Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout + * Check _z exponent to be within borders [1E3 ; 60A] else goto Callout * * Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z), * that multiplied by _z, is final result for _EP_ version. @@ -107,7 +107,7 @@ ENTRY(_ZGVdN8vv_hypotf_avx2) */ vmovups _sHalf+__svml_shypot_data_internal(%rip), %ymm7 - /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ + /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */ vmovups _LowBoundary+__svml_shypot_data_internal(%rip), %ymm2 vfmadd231ps %ymm1, %ymm1, %ymm8 @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm2 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S index 87a1694a6f1..0deb96997a1 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S @@ -155,7 +155,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S index 80ded852935..6baff562f54 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S index 480495037fe..54ff0b1e4d7 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S index d629dc44f38..e4f8a603ff0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S index 511e064a3d4..4a10457eb8f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S index ea39f66d228..672c91e07e4 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S index c14fd3d918f..04288956c42 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S @@ -152,7 +152,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S index f4aa9481cac..93ed64254ee 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S @@ -160,7 +160,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S index d2441c35812..02360e57ee6 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S @@ -163,7 +163,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm1 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S index dda1a0531b1..03e7f345b0f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S @@ -246,7 +246,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S index 34ec276ac04..59d63291267 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -80) # LOE rbx rbp r12 r13 r14 r15 xmm14 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S index abf8d658ab4..81e1f19e26f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S index 3d4dba3fabe..ae95fbae912 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S @@ -94,11 +94,11 @@ ENTRY(_ZGVeN16v_tanf_skx) vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4 vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5 - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -229,7 +229,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE rbx r12 r13 r14 r15 zmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -248,7 +248,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S index 1292e88cf91..fab86645b6f 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S @@ -175,11 +175,11 @@ ENTRY(_ZGVbN4v_tanf_sse4) testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4 xmm11 xmm12 xmm13 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -258,7 +258,7 @@ L(SPECIAL_VALUES_LOOP): cfi_offset(14, -240) # LOE rbx rbp r12 r13 r14 r15 xmm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -277,7 +277,7 @@ L(SCALAR_MATH_CALL): cfi_restore(14) # LOE rbx rbp r15 r12d r13d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S index ab523212209..30585a77b45 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S @@ -161,13 +161,13 @@ ENTRY(_ZGVdN8v_tanf_avx2) testl %edx, %edx - /* Go to auxilary branch */ + /* Go to auxiliary branch */ jne L(AUX_BRANCH) /* DW_CFA_expression: r3 (rbx) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -8; DW_OP_plus) */ .cfi_escape 0x10, 0x03, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xf8, 0xff, 0xff, 0xff, 0x22 # LOE r12 r13 r14 r15 eax ymm0 ymm1 ymm10 ymm11 ymm12 - /* Return from auxilary branch + /* Return from auxiliary branch * for out of main path inputs */ @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP): .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 # LOE r12 r13 r14 r15 ymm0 - /* Scalar math fucntion call + /* Scalar math function call * to process special input */ @@ -273,7 +273,7 @@ L(SCALAR_MATH_CALL): cfi_restore(13) # LOE r14 r15 ebx r12d - /* Auxilary branch + /* Auxiliary branch * for out of main path inputs */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S index d72a88924c6..e639c485240 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp tzcntl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ vmovss 64(%rsp, %rbp, 4), %xmm0 call tanhf@PLT diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S index dcbb1886d0b..357ad375b30 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S @@ -73,7 +73,7 @@ #include -/* tanhf data tables for avx2 and sse4 implementatins defined here. +/* tanhf data tables for avx2 and sse4 implementations defined here. */ #define ONLY_DECL_OFFSET #include "svml_s_tanhf_rodata.S" @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP): xorl %ebp, %ebp bsfl %ebx, %ebp - /* Scalar math fucntion call to process special input. */ + /* Scalar math function call to process special input. */ movss 40(%rsp, %rbp, 4), %xmm0 call tanhf@PLT /* No good way to avoid the store-forwarding fault this will cause on diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S index b8d828e0817..ea19903d9df 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S @@ -72,7 +72,7 @@ #include -/* tanhf data tables for avx2 and sse4 implementatins defined here. +/* tanhf data tables for avx2 and sse4 implementations defined here. */ #include "svml_s_tanhf_rodata.S"