From: Scott Boudreaux <121303252+Scottcjn@users.noreply.github.com> Date: Wed, 18 Mar 2026 15:56:09 +0000 (-0500) Subject: md5/md4: enable unaligned access fast path on powerpc64 X-Git-Tag: rc-8_20_0-1~141 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=21fc17b265ca32c8a5a768dc7cd730754a104740;p=thirdparty%2Fcurl.git md5/md4: enable unaligned access fast path on powerpc64 PowerPC64 (both big-endian and little-endian) supports efficient unaligned memory access, similar to x86. This extends the existing fast path that avoids byte-by-byte loads in the MD5 and MD4 SET/GET macros. On POWER8 ppc64le, this eliminates 3 shifts + 3 ORs per 32-bit word load, replacing them with a single lwz (or lwbrx on big-endian). Co Authored By Claude Opus 4.6 (1M context) Closes #20985 --- diff --git a/lib/md4.c b/lib/md4.c index 1ac6ef1c5b..c3934ec79d 100644 --- a/lib/md4.c +++ b/lib/md4.c @@ -213,7 +213,8 @@ typedef struct md4_ctx MD4_CTX; * The check for little-endian architectures that tolerate unaligned memory * accesses is an optimization. Nothing will break if it does not work. */ -#if defined(__i386__) || defined(__x86_64__) || defined(__vax__) +#if defined(__i386__) || defined(__x86_64__) || \ + defined(__vax__) || defined(__powerpc64__) #define MD4_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4]) #define MD4_GET(n) MD4_SET(n) #else diff --git a/lib/md5.c b/lib/md5.c index c2bd176dc9..53d93aa564 100644 --- a/lib/md5.c +++ b/lib/md5.c @@ -294,7 +294,8 @@ typedef struct md5_ctx my_md5_ctx; * The check for little-endian architectures that tolerate unaligned memory * accesses is an optimization. Nothing will break if it does not work. */ -#if defined(__i386__) || defined(__x86_64__) || defined(__vax__) +#if defined(__i386__) || defined(__x86_64__) || \ + defined(__vax__) || defined(__powerpc64__) #define MD5_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4]) #define MD5_GET(n) MD5_SET(n) #else