Turn
movzbl -1(%rdi, %rdx), %edi
movzbl -1(%rsi, %rdx), %esi
orl %edi, %eax
orl %esi, %ecx
into
movb -1(%rdi, %rdx), %al
movb -1(%rsi, %rdx), %cl
* sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S (between_2_3):
Replace movzbl and orl with movb.
+2017-06-23 H.J. Lu <hongjiu.lu@intel.com>
+
+ * sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S (between_2_3):
+ Replace movzbl and orl with movb.
+
2017-06-23 Gabriel F. T. Gomes <gftg@linux.vnet.ibm.com>
* manual/arith.texi (Infinity and NaN): Document SNANFN and SNANFNx.
shll $8, %ecx
bswap %eax
bswap %ecx
- movzbl -1(%rdi, %rdx), %edi
- movzbl -1(%rsi, %rdx), %esi
- orl %edi, %eax
- orl %esi, %ecx
+ movb -1(%rdi, %rdx), %al
+ movb -1(%rsi, %rdx), %cl
/* Subtraction is okay because the upper 8 bits are zero. */
subl %ecx, %eax
ret