#define GHASH_POLYNOMIAL 0xE1UL
static void
-gcm_gf_add (union gcm_block *r, const union gcm_block *x, const union gcm_block *y)
+gcm_gf_add (union nettle_block16 *r,
+ const union nettle_block16 *x, const union nettle_block16 *y)
{
r->w[0] = x->w[0] ^ y->w[0];
r->w[1] = x->w[1] ^ y->w[1];
shifted out is one, the defining polynomial is added to cancel it
out. r == x is allowed. */
static void
-gcm_gf_shift (union gcm_block *r, const union gcm_block *x)
+gcm_gf_shift (union nettle_block16 *r, const union nettle_block16 *x)
{
long mask;
specification. y may be shorter than a full block, missing bytes
are assumed zero. */
static void
-gcm_gf_mul (union gcm_block *x, const union gcm_block *y)
+gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *y)
{
- union gcm_block V;
- union gcm_block Z;
+ union nettle_block16 V;
+ union nettle_block16 Z;
unsigned i;
memcpy(V.b, x, sizeof(V));
};
static void
-gcm_gf_shift_4(union gcm_block *x)
+gcm_gf_shift_4(union nettle_block16 *x)
{
unsigned long *w = x->w;
unsigned long reduce;
}
static void
-gcm_gf_mul (union gcm_block *x, const union gcm_block *table)
+gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table)
{
- union gcm_block Z;
+ union nettle_block16 Z;
unsigned i;
memset(Z.b, 0, sizeof(Z));
#define gcm_hash _nettle_gcm_hash8
void
-_nettle_gcm_hash8 (const struct gcm_key *key, union gcm_block *x,
+_nettle_gcm_hash8 (const struct gcm_key *key, union nettle_block16 *x,
size_t length, const uint8_t *data);
# else /* !HAVE_NATIVE_gcm_hash8 */
static const uint16_t
};
static void
-gcm_gf_shift_8(union gcm_block *x)
+gcm_gf_shift_8(union nettle_block16 *x)
{
unsigned long *w = x->w;
unsigned long reduce;
}
static void
-gcm_gf_mul (union gcm_block *x, const union gcm_block *table)
+gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table)
{
- union gcm_block Z;
+ union nettle_block16 Z;
unsigned i;
memcpy(Z.b, table[x->b[GCM_BLOCK_SIZE-1]].b, GCM_BLOCK_SIZE);
#ifndef gcm_hash
static void
-gcm_hash(const struct gcm_key *key, union gcm_block *x,
+gcm_hash(const struct gcm_key *key, union nettle_block16 *x,
size_t length, const uint8_t *data)
{
for (; length >= GCM_BLOCK_SIZE;
#endif /* !gcm_hash */
static void
-gcm_hash_sizes(const struct gcm_key *key, union gcm_block *x,
+gcm_hash_sizes(const struct gcm_key *key, union nettle_block16 *x,
uint64_t auth_size, uint64_t data_size)
{
uint8_t buffer[GCM_BLOCK_SIZE];
#define GCM_TABLE_BITS 8
-/* To make sure that we have proper alignment. */
-union gcm_block
-{
- uint8_t b[GCM_BLOCK_SIZE];
- unsigned long w[GCM_BLOCK_SIZE / sizeof(unsigned long)];
-};
-
/* Hashing subkey */
struct gcm_key
{
- union gcm_block h[1 << GCM_TABLE_BITS];
+ union nettle_block16 h[1 << GCM_TABLE_BITS];
};
/* Per-message state, depending on the iv */
struct gcm_ctx {
/* Original counter block */
- union gcm_block iv;
+ union nettle_block16 iv;
/* Updated for each block. */
- union gcm_block ctr;
+ union nettle_block16 ctr;
/* Hashing state */
- union gcm_block x;
+ union nettle_block16 x;
uint64_t auth_size;
uint64_t data_size;
};