* macros.h (ROTL32): New macro, to replace (almost) all other
rotation macros.
+
* aes-set-encrypt-key.c: Include macros.h.
(aes_set_encrypt_key): Use ROTL32.
* aes-internal.h (ROTBYTE, ROTRBYTE): Deleted macros.
+
* camellia-internal.h (ROL32): Deleted macro.
(ROTL128): Renamed for consistency, from...
(ROL128): ... old name.
* cast128.c (ROL): Deleted macro.
(F1, F2, F3): Updated to use ROTL32 (reversed order of arguments).
Also added proper do { ... } while (0) wrappers.
+
* ripemd160-compress.c (ROL32): Deleted macro.
(R): Updated to use ROTL32 (reversed order of arguments).
+ * serpent-internal.h (ROL32): Deleted macro.
+ (ROTL64): Renamed (from ROL64) and reorderd arguments, for
+ consistency.
+ (RSHIFT64): Reordered arguments, for consistency.
+ * serpent-decrypt.c: Updated for renamed rotation macros, with
+ reversed argument order.
+ * serpent-encrypt.c: Likewise.
+ * serpent-set-key.c: Likewise.
+
2012-03-30 Niels Möller <nisse@lysator.liu.se>
* nettle-internal.c (nettle_salsa20): Cipher struct for
/* In-place inverse linear transformation. */
#define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3) \
do { \
- x2 = ROL32 (x2, 10); \
- x0 = ROL32 (x0, 27); \
+ x2 = ROTL32 (10, x2); \
+ x0 = ROTL32 (27, x0); \
x2 = x2 ^ x3 ^ (x1 << 7); \
x0 = x0 ^ x1 ^ x3; \
- x3 = ROL32 (x3, 25); \
- x1 = ROL32 (x1, 31); \
+ x3 = ROTL32 (25, x3); \
+ x1 = ROTL32 (31, x1); \
x3 = x3 ^ x2 ^ (x0 << 3); \
x1 = x1 ^ x0 ^ x2; \
- x2 = ROL32 (x2, 29); \
- x0 = ROL32 (x0, 19); \
+ x2 = ROTL32 (29, x2); \
+ x0 = ROTL32 (19, x0); \
} while (0)
/* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
/* In-place inverse linear transformation. */
#define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3) \
do { \
- x2 = ROL64 (x2, 10); \
- x0 = ROL64 (x0, 27); \
- x2 = x2 ^ x3 ^ RSHIFT64(x1, 7); \
+ x2 = ROTL64 (10, x2); \
+ x0 = ROTL64 (27, x0); \
+ x2 = x2 ^ x3 ^ RSHIFT64(7, x1); \
x0 = x0 ^ x1 ^ x3; \
- x3 = ROL64 (x3, 25); \
- x1 = ROL64 (x1, 31); \
- x3 = x3 ^ x2 ^ RSHIFT64(x0, 3); \
+ x3 = ROTL64 (25, x3); \
+ x1 = ROTL64 (31, x1); \
+ x3 = x3 ^ x2 ^ RSHIFT64(3, x0); \
x1 = x1 ^ x0 ^ x2; \
- x2 = ROL64 (x2, 29); \
- x0 = ROL64 (x0, 19); \
+ x2 = ROTL64 (29, x2); \
+ x0 = ROTL64 (19, x0); \
} while (0)
#define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
/* In-place linear transformation. */
#define LINEAR_TRANSFORMATION(x0,x1,x2,x3) \
do { \
- x0 = ROL32 (x0, 13); \
- x2 = ROL32 (x2, 3); \
+ x0 = ROTL32 (13, x0); \
+ x2 = ROTL32 (3, x2); \
x1 = x1 ^ x0 ^ x2; \
x3 = x3 ^ x2 ^ (x0 << 3); \
- x1 = ROL32 (x1, 1); \
- x3 = ROL32 (x3, 7); \
+ x1 = ROTL32 (1, x1); \
+ x3 = ROTL32 (7, x3); \
x0 = x0 ^ x1 ^ x3; \
x2 = x2 ^ x3 ^ (x1 << 7); \
- x0 = ROL32 (x0, 5); \
- x2 = ROL32 (x2, 22); \
+ x0 = ROTL32 (5, x0); \
+ x2 = ROTL32 (22, x2); \
} while (0)
/* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
#define LINEAR_TRANSFORMATION64(x0,x1,x2,x3) \
do { \
- x0 = ROL64 (x0, 13); \
- x2 = ROL64 (x2, 3); \
+ x0 = ROTL64 (13, x0); \
+ x2 = ROTL64 (3, x2); \
x1 = x1 ^ x0 ^ x2; \
- x3 = x3 ^ x2 ^ RSHIFT64(x0, 3); \
- x1 = ROL64 (x1, 1); \
- x3 = ROL64 (x3, 7); \
+ x3 = x3 ^ x2 ^ RSHIFT64(3, x0); \
+ x1 = ROTL64 (1, x1); \
+ x3 = ROTL64 (7, x3); \
x0 = x0 ^ x1 ^ x3; \
- x2 = x2 ^ x3 ^ RSHIFT64(x1, 7); \
- x0 = ROL64 (x0, 5); \
- x2 = ROL64 (x2, 22); \
+ x2 = x2 ^ x3 ^ RSHIFT64(7, x1); \
+ x0 = ROTL64 (5, x0); \
+ x2 = ROTL64 (22, x2); \
} while (0)
#define ROUND64(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
#ifndef NETTLE_SERPENT_INTERNAL_H_INCLUDED
#define NETTLE_SERPENT_INTERNAL_H_INCLUDED
-/* FIXME: Unify ROL macros used here, in camellia.c and cast128.c. */
-#define ROL32(x,n) ((((x))<<(n)) | (((x))>>(32-(n))))
-
#define KEYXOR(x0,x1,x2,x3, subkey) \
do { \
(x0) ^= (subkey)[0]; \
#if HAVE_NATIVE_64_BIT
/* Operate independently on both halves of a 64-bit word. */
-#define ROL64(x,n) \
+#define ROTL64(n,x) \
(((x) << (n) & ~((((uint64_t) 1 << (n))-1) << 32)) \
|(((x) >> (32-(n))) & ~((((uint64_t) 1 << (32-(n)))-1) << (n))))
_sk = (subkey)[3]; _sk |= _sk << 32; (x3) ^= _sk; \
} while (0)
-#define RSHIFT64(x,n) \
+#define RSHIFT64(n,x) \
( ((x) << (n)) & ~((((uint64_t) 1 << (n)) - 1) << 32))
#endif /* HAVE_NATIVE_64_BIT */
do { \
uint32_t _wn = (w)[(i)] ^ (w)[((i)+3)&7] ^ w[((i)+5)&7] \
^ w[((i)+7)&7] ^ PHI ^ (k)++; \
- ((w)[(i)] = ROL32(_wn, 11)); \
+ ((w)[(i)] = ROTL32(11, _wn)); \
} while (0)
/* Note: Increments k four times and keys once */