]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - crypto/cfb.c
net: phy: dp83867: Set up RGMII TX delay
[thirdparty/kernel/stable.git] / crypto / cfb.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3 * CFB: Cipher FeedBack mode
4 *
5 * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
6 *
7 * CFB is a stream cipher mode which is layered on to a block
8 * encryption scheme. It works very much like a one time pad where
9 * the pad is generated initially from the encrypted IV and then
10 * subsequently from the encrypted previous block of ciphertext. The
11 * pad is XOR'd into the plain text to get the final ciphertext.
12 *
13 * The scheme of CFB is best described by wikipedia:
14 *
15 * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
16 *
17 * Note that since the pad for both encryption and decryption is
18 * generated by an encryption operation, CFB never uses the block
19 * decryption function.
20 */
21
22 #include <crypto/algapi.h>
23 #include <crypto/internal/skcipher.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31
32 struct crypto_cfb_ctx {
33 struct crypto_cipher *child;
34 };
35
36 static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
37 {
38 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
39 struct crypto_cipher *child = ctx->child;
40
41 return crypto_cipher_blocksize(child);
42 }
43
44 static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
45 const u8 *src, u8 *dst)
46 {
47 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
48
49 crypto_cipher_encrypt_one(ctx->child, dst, src);
50 }
51
52 /* final encrypt and decrypt is the same */
53 static void crypto_cfb_final(struct skcipher_walk *walk,
54 struct crypto_skcipher *tfm)
55 {
56 const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
57 u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
58 u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
59 u8 *src = walk->src.virt.addr;
60 u8 *dst = walk->dst.virt.addr;
61 u8 *iv = walk->iv;
62 unsigned int nbytes = walk->nbytes;
63
64 crypto_cfb_encrypt_one(tfm, iv, stream);
65 crypto_xor_cpy(dst, stream, src, nbytes);
66 }
67
68 static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
69 struct crypto_skcipher *tfm)
70 {
71 const unsigned int bsize = crypto_cfb_bsize(tfm);
72 unsigned int nbytes = walk->nbytes;
73 u8 *src = walk->src.virt.addr;
74 u8 *dst = walk->dst.virt.addr;
75 u8 *iv = walk->iv;
76
77 do {
78 crypto_cfb_encrypt_one(tfm, iv, dst);
79 crypto_xor(dst, src, bsize);
80 iv = dst;
81
82 src += bsize;
83 dst += bsize;
84 } while ((nbytes -= bsize) >= bsize);
85
86 memcpy(walk->iv, iv, bsize);
87
88 return nbytes;
89 }
90
91 static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
92 struct crypto_skcipher *tfm)
93 {
94 const unsigned int bsize = crypto_cfb_bsize(tfm);
95 unsigned int nbytes = walk->nbytes;
96 u8 *src = walk->src.virt.addr;
97 u8 *iv = walk->iv;
98 u8 tmp[MAX_CIPHER_BLOCKSIZE];
99
100 do {
101 crypto_cfb_encrypt_one(tfm, iv, tmp);
102 crypto_xor(src, tmp, bsize);
103 iv = src;
104
105 src += bsize;
106 } while ((nbytes -= bsize) >= bsize);
107
108 memcpy(walk->iv, iv, bsize);
109
110 return nbytes;
111 }
112
113 static int crypto_cfb_encrypt(struct skcipher_request *req)
114 {
115 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
116 struct skcipher_walk walk;
117 unsigned int bsize = crypto_cfb_bsize(tfm);
118 int err;
119
120 err = skcipher_walk_virt(&walk, req, false);
121
122 while (walk.nbytes >= bsize) {
123 if (walk.src.virt.addr == walk.dst.virt.addr)
124 err = crypto_cfb_encrypt_inplace(&walk, tfm);
125 else
126 err = crypto_cfb_encrypt_segment(&walk, tfm);
127 err = skcipher_walk_done(&walk, err);
128 }
129
130 if (walk.nbytes) {
131 crypto_cfb_final(&walk, tfm);
132 err = skcipher_walk_done(&walk, 0);
133 }
134
135 return err;
136 }
137
138 static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
139 struct crypto_skcipher *tfm)
140 {
141 const unsigned int bsize = crypto_cfb_bsize(tfm);
142 unsigned int nbytes = walk->nbytes;
143 u8 *src = walk->src.virt.addr;
144 u8 *dst = walk->dst.virt.addr;
145 u8 *iv = walk->iv;
146
147 do {
148 crypto_cfb_encrypt_one(tfm, iv, dst);
149 crypto_xor(dst, src, bsize);
150 iv = src;
151
152 src += bsize;
153 dst += bsize;
154 } while ((nbytes -= bsize) >= bsize);
155
156 memcpy(walk->iv, iv, bsize);
157
158 return nbytes;
159 }
160
161 static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
162 struct crypto_skcipher *tfm)
163 {
164 const unsigned int bsize = crypto_cfb_bsize(tfm);
165 unsigned int nbytes = walk->nbytes;
166 u8 *src = walk->src.virt.addr;
167 u8 * const iv = walk->iv;
168 u8 tmp[MAX_CIPHER_BLOCKSIZE];
169
170 do {
171 crypto_cfb_encrypt_one(tfm, iv, tmp);
172 memcpy(iv, src, bsize);
173 crypto_xor(src, tmp, bsize);
174 src += bsize;
175 } while ((nbytes -= bsize) >= bsize);
176
177 return nbytes;
178 }
179
180 static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
181 struct crypto_skcipher *tfm)
182 {
183 if (walk->src.virt.addr == walk->dst.virt.addr)
184 return crypto_cfb_decrypt_inplace(walk, tfm);
185 else
186 return crypto_cfb_decrypt_segment(walk, tfm);
187 }
188
189 static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
190 unsigned int keylen)
191 {
192 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
193 struct crypto_cipher *child = ctx->child;
194 int err;
195
196 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
197 crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
198 CRYPTO_TFM_REQ_MASK);
199 err = crypto_cipher_setkey(child, key, keylen);
200 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
201 CRYPTO_TFM_RES_MASK);
202 return err;
203 }
204
205 static int crypto_cfb_decrypt(struct skcipher_request *req)
206 {
207 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
208 struct skcipher_walk walk;
209 const unsigned int bsize = crypto_cfb_bsize(tfm);
210 int err;
211
212 err = skcipher_walk_virt(&walk, req, false);
213
214 while (walk.nbytes >= bsize) {
215 err = crypto_cfb_decrypt_blocks(&walk, tfm);
216 err = skcipher_walk_done(&walk, err);
217 }
218
219 if (walk.nbytes) {
220 crypto_cfb_final(&walk, tfm);
221 err = skcipher_walk_done(&walk, 0);
222 }
223
224 return err;
225 }
226
227 static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
228 {
229 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
230 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
231 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
232 struct crypto_cipher *cipher;
233
234 cipher = crypto_spawn_cipher(spawn);
235 if (IS_ERR(cipher))
236 return PTR_ERR(cipher);
237
238 ctx->child = cipher;
239 return 0;
240 }
241
242 static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
243 {
244 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
245
246 crypto_free_cipher(ctx->child);
247 }
248
249 static void crypto_cfb_free(struct skcipher_instance *inst)
250 {
251 crypto_drop_skcipher(skcipher_instance_ctx(inst));
252 kfree(inst);
253 }
254
255 static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
256 {
257 struct skcipher_instance *inst;
258 struct crypto_attr_type *algt;
259 struct crypto_spawn *spawn;
260 struct crypto_alg *alg;
261 u32 mask;
262 int err;
263
264 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
265 if (err)
266 return err;
267
268 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
269 if (!inst)
270 return -ENOMEM;
271
272 algt = crypto_get_attr_type(tb);
273 err = PTR_ERR(algt);
274 if (IS_ERR(algt))
275 goto err_free_inst;
276
277 mask = CRYPTO_ALG_TYPE_MASK |
278 crypto_requires_off(algt->type, algt->mask,
279 CRYPTO_ALG_NEED_FALLBACK);
280
281 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
282 err = PTR_ERR(alg);
283 if (IS_ERR(alg))
284 goto err_free_inst;
285
286 spawn = skcipher_instance_ctx(inst);
287 err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
288 CRYPTO_ALG_TYPE_MASK);
289 if (err)
290 goto err_put_alg;
291
292 err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
293 if (err)
294 goto err_drop_spawn;
295
296 inst->alg.base.cra_priority = alg->cra_priority;
297 /* we're a stream cipher independend of the crypto cra_blocksize */
298 inst->alg.base.cra_blocksize = 1;
299 inst->alg.base.cra_alignmask = alg->cra_alignmask;
300
301 /*
302 * To simplify the implementation, configure the skcipher walk to only
303 * give a partial block at the very end, never earlier.
304 */
305 inst->alg.chunksize = alg->cra_blocksize;
306
307 inst->alg.ivsize = alg->cra_blocksize;
308 inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
309 inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
310
311 inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
312
313 inst->alg.init = crypto_cfb_init_tfm;
314 inst->alg.exit = crypto_cfb_exit_tfm;
315
316 inst->alg.setkey = crypto_cfb_setkey;
317 inst->alg.encrypt = crypto_cfb_encrypt;
318 inst->alg.decrypt = crypto_cfb_decrypt;
319
320 inst->free = crypto_cfb_free;
321
322 err = skcipher_register_instance(tmpl, inst);
323 if (err)
324 goto err_drop_spawn;
325 crypto_mod_put(alg);
326
327 out:
328 return err;
329
330 err_drop_spawn:
331 crypto_drop_spawn(spawn);
332 err_put_alg:
333 crypto_mod_put(alg);
334 err_free_inst:
335 kfree(inst);
336 goto out;
337 }
338
339 static struct crypto_template crypto_cfb_tmpl = {
340 .name = "cfb",
341 .create = crypto_cfb_create,
342 .module = THIS_MODULE,
343 };
344
345 static int __init crypto_cfb_module_init(void)
346 {
347 return crypto_register_template(&crypto_cfb_tmpl);
348 }
349
350 static void __exit crypto_cfb_module_exit(void)
351 {
352 crypto_unregister_template(&crypto_cfb_tmpl);
353 }
354
355 module_init(crypto_cfb_module_init);
356 module_exit(crypto_cfb_module_exit);
357
358 MODULE_LICENSE("GPL");
359 MODULE_DESCRIPTION("CFB block cipher algorithm");
360 MODULE_ALIAS_CRYPTO("cfb");