]> git.ipfire.org Git - thirdparty/openssl.git/blame - providers/implementations/ciphers/cipher_aes_gcm_hw_vaes_avx512.inc
Update copyright year
[thirdparty/openssl.git] / providers / implementations / ciphers / cipher_aes_gcm_hw_vaes_avx512.inc
CommitLineData
63b996e7 1/*
fecb3aae 2 * Copyright 2021-2022 The OpenSSL Project Authors. All Rights Reserved.
63b996e7
AM
3 * Copyright (c) 2021, Intel Corporation. All Rights Reserved.
4 *
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
9 */
10
11/*-
12 * AVX512 VAES + VPCLMULDQD support for AES GCM.
13 * This file is included by cipher_aes_gcm_hw_aesni.inc
14 */
15
16#undef VAES_GCM_ENABLED
17#if (defined(__x86_64) || defined(__x86_64__) || \
18 defined(_M_AMD64) || defined(_M_X64))
19# define VAES_GCM_ENABLED
20
21/* Returns non-zero when AVX512F + VAES + VPCLMULDQD combination is available */
22int ossl_vaes_vpclmulqdq_capable(void);
23
24# define OSSL_AES_GCM_UPDATE(direction) \
25 void ossl_aes_gcm_ ## direction ## _avx512(const void *ks, \
26 void *gcm128ctx, \
27 unsigned int *pblocklen, \
28 const unsigned char *in, \
29 size_t len, \
30 unsigned char *out);
31
32OSSL_AES_GCM_UPDATE(encrypt)
33OSSL_AES_GCM_UPDATE(decrypt)
34
35void ossl_aes_gcm_init_avx512(const void *ks, void *gcm128ctx);
36void ossl_aes_gcm_setiv_avx512(const void *ks, void *gcm128ctx,
37 const unsigned char *iv, size_t ivlen);
38void ossl_aes_gcm_update_aad_avx512(void *gcm128ctx, const unsigned char *aad,
39 size_t aadlen);
40void ossl_aes_gcm_finalize_avx512(void *gcm128ctx, unsigned int pblocklen);
41
42void ossl_gcm_gmult_avx512(u64 Xi[2], const void *gcm128ctx);
43
44static int vaes_gcm_setkey(PROV_GCM_CTX *ctx, const unsigned char *key,
45 size_t keylen)
46{
47 GCM128_CONTEXT *gcmctx = &ctx->gcm;
48 PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
49 AES_KEY *ks = &actx->ks.ks;
50
51 ctx->ks = ks;
52 aesni_set_encrypt_key(key, keylen * 8, ks);
53 memset(gcmctx, 0, sizeof(*gcmctx));
54 gcmctx->key = ks;
55 ctx->key_set = 1;
56
57 ossl_aes_gcm_init_avx512(ks, gcmctx);
58
59 return 1;
60}
61
62static int vaes_gcm_setiv(PROV_GCM_CTX *ctx, const unsigned char *iv,
63 size_t ivlen)
64{
65 GCM128_CONTEXT *gcmctx = &ctx->gcm;
66
67 gcmctx->Yi.u[0] = 0; /* Current counter */
68 gcmctx->Yi.u[1] = 0;
69 gcmctx->Xi.u[0] = 0; /* AAD hash */
70 gcmctx->Xi.u[1] = 0;
71 gcmctx->len.u[0] = 0; /* AAD length */
72 gcmctx->len.u[1] = 0; /* Message length */
73 gcmctx->ares = 0;
74 gcmctx->mres = 0;
75
76 /* IV is limited by 2^64 bits, thus 2^61 bytes */
77 if (ivlen > (U64(1) << 61))
78 return 0;
79
80 ossl_aes_gcm_setiv_avx512(ctx->ks, gcmctx, iv, ivlen);
81
82 return 1;
83}
84
85static int vaes_gcm_aadupdate(PROV_GCM_CTX *ctx,
86 const unsigned char *aad,
87 size_t aad_len)
88{
89 GCM128_CONTEXT *gcmctx = &ctx->gcm;
90 u64 alen = gcmctx->len.u[0];
91 unsigned int ares;
92 size_t i, lenBlks;
93
94 /* Bad sequence: call of AAD update after message processing */
95 if (gcmctx->len.u[1] > 0)
96 return 0;
97
98 alen += aad_len;
99 /* AAD is limited by 2^64 bits, thus 2^61 bytes */
100 if ((alen > (U64(1) << 61)) || (alen < aad_len))
101 return 0;
102
103 gcmctx->len.u[0] = alen;
104
105 ares = gcmctx->ares;
106 /* Partial AAD block left from previous AAD update calls */
107 if (ares > 0) {
108 /*
109 * Fill partial block buffer till full block
110 * (note, the hash is stored reflected)
111 */
112 while (ares > 0 && aad_len > 0) {
113 gcmctx->Xi.c[15 - ares] ^= *(aad++);
114 --aad_len;
115 ares = (ares + 1) % AES_BLOCK_SIZE;
116 }
117 /* Full block gathered */
118 if (ares == 0) {
119 ossl_gcm_gmult_avx512(gcmctx->Xi.u, gcmctx);
120 } else { /* no more AAD */
121 gcmctx->ares = ares;
122 return 1;
123 }
124 }
125
126 /* Bulk AAD processing */
127 lenBlks = aad_len & ((size_t)(-AES_BLOCK_SIZE));
128 if (lenBlks > 0) {
129 ossl_aes_gcm_update_aad_avx512(gcmctx, aad, lenBlks);
130 aad += lenBlks;
131 aad_len -= lenBlks;
132 }
133
134 /* Add remaining AAD to the hash (note, the hash is stored reflected) */
135 if (aad_len > 0) {
136 ares = aad_len;
137 for (i = 0; i < aad_len; i++)
138 gcmctx->Xi.c[15 - i] ^= aad[i];
139 }
140
141 gcmctx->ares = ares;
142
143 return 1;
144}
145
146static int vaes_gcm_cipherupdate(PROV_GCM_CTX *ctx, const unsigned char *in,
147 size_t len, unsigned char *out)
148{
149 GCM128_CONTEXT *gcmctx = &ctx->gcm;
150 u64 mlen = gcmctx->len.u[1];
151
152 mlen += len;
153 if (mlen > ((U64(1) << 36) - 32) || (mlen < len))
154 return 0;
155
156 gcmctx->len.u[1] = mlen;
157
158 /* Finalize GHASH(AAD) if AAD partial blocks left unprocessed */
159 if (gcmctx->ares > 0) {
160 ossl_gcm_gmult_avx512(gcmctx->Xi.u, gcmctx);
161 gcmctx->ares = 0;
162 }
163
164 if (ctx->enc)
165 ossl_aes_gcm_encrypt_avx512(ctx->ks, gcmctx, &gcmctx->mres, in, len, out);
166 else
167 ossl_aes_gcm_decrypt_avx512(ctx->ks, gcmctx, &gcmctx->mres, in, len, out);
168
169 return 1;
170}
171
172static int vaes_gcm_cipherfinal(PROV_GCM_CTX *ctx, unsigned char *tag)
173{
174 GCM128_CONTEXT *gcmctx = &ctx->gcm;
175 unsigned int *res = &gcmctx->mres;
176
177 /* Finalize AAD processing */
178 if (gcmctx->ares > 0)
179 res = &gcmctx->ares;
180
181 ossl_aes_gcm_finalize_avx512(gcmctx, *res);
182
183 if (ctx->enc) {
184 ctx->taglen = GCM_TAG_MAX_SIZE;
185 memcpy(tag, gcmctx->Xi.c,
186 ctx->taglen <= sizeof(gcmctx->Xi.c) ? ctx->taglen :
187 sizeof(gcmctx->Xi.c));
188 *res = 0;
189 } else {
190 return !CRYPTO_memcmp(gcmctx->Xi.c, tag, ctx->taglen);
191 }
192
193 return 1;
194}
195
196static const PROV_GCM_HW vaes_gcm = {
197 vaes_gcm_setkey,
198 vaes_gcm_setiv,
199 vaes_gcm_aadupdate,
200 vaes_gcm_cipherupdate,
201 vaes_gcm_cipherfinal,
202 ossl_gcm_one_shot
203};
204
205#endif