]> git.ipfire.org Git - thirdparty/linux.git/blob - crypto/crypto_user_base.c
Merge tag 'kconfig-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[thirdparty/linux.git] / crypto / crypto_user_base.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Crypto user configuration API.
4 *
5 * Copyright (C) 2011 secunet Security Networks AG
6 * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/crypto.h>
11 #include <linux/cryptouser.h>
12 #include <linux/sched.h>
13 #include <net/netlink.h>
14 #include <linux/security.h>
15 #include <net/net_namespace.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/internal/rng.h>
18 #include <crypto/akcipher.h>
19 #include <crypto/kpp.h>
20 #include <crypto/internal/cryptouser.h>
21
22 #include "internal.h"
23
24 #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
25
26 static DEFINE_MUTEX(crypto_cfg_mutex);
27
28 /* The crypto netlink socket */
29 struct sock *crypto_nlsk;
30
31 struct crypto_dump_info {
32 struct sk_buff *in_skb;
33 struct sk_buff *out_skb;
34 u32 nlmsg_seq;
35 u16 nlmsg_flags;
36 };
37
38 struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
39 {
40 struct crypto_alg *q, *alg = NULL;
41
42 down_read(&crypto_alg_sem);
43
44 list_for_each_entry(q, &crypto_alg_list, cra_list) {
45 int match = 0;
46
47 if (crypto_is_larval(q))
48 continue;
49
50 if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
51 continue;
52
53 if (strlen(p->cru_driver_name))
54 match = !strcmp(q->cra_driver_name,
55 p->cru_driver_name);
56 else if (!exact)
57 match = !strcmp(q->cra_name, p->cru_name);
58
59 if (!match)
60 continue;
61
62 if (unlikely(!crypto_mod_get(q)))
63 continue;
64
65 alg = q;
66 break;
67 }
68
69 up_read(&crypto_alg_sem);
70
71 return alg;
72 }
73
74 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
75 {
76 struct crypto_report_cipher rcipher;
77
78 memset(&rcipher, 0, sizeof(rcipher));
79
80 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
81
82 rcipher.blocksize = alg->cra_blocksize;
83 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
84 rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
85
86 return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
87 sizeof(rcipher), &rcipher);
88 }
89
90 static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
91 {
92 struct crypto_report_comp rcomp;
93
94 memset(&rcomp, 0, sizeof(rcomp));
95
96 strscpy(rcomp.type, "compression", sizeof(rcomp.type));
97
98 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
99 }
100
101 static int crypto_report_one(struct crypto_alg *alg,
102 struct crypto_user_alg *ualg, struct sk_buff *skb)
103 {
104 memset(ualg, 0, sizeof(*ualg));
105
106 strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
107 strscpy(ualg->cru_driver_name, alg->cra_driver_name,
108 sizeof(ualg->cru_driver_name));
109 strscpy(ualg->cru_module_name, module_name(alg->cra_module),
110 sizeof(ualg->cru_module_name));
111
112 ualg->cru_type = 0;
113 ualg->cru_mask = 0;
114 ualg->cru_flags = alg->cra_flags;
115 ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
116
117 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
118 goto nla_put_failure;
119 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
120 struct crypto_report_larval rl;
121
122 memset(&rl, 0, sizeof(rl));
123 strscpy(rl.type, "larval", sizeof(rl.type));
124 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
125 goto nla_put_failure;
126 goto out;
127 }
128
129 if (alg->cra_type && alg->cra_type->report) {
130 if (alg->cra_type->report(skb, alg))
131 goto nla_put_failure;
132
133 goto out;
134 }
135
136 switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
137 case CRYPTO_ALG_TYPE_CIPHER:
138 if (crypto_report_cipher(skb, alg))
139 goto nla_put_failure;
140
141 break;
142 case CRYPTO_ALG_TYPE_COMPRESS:
143 if (crypto_report_comp(skb, alg))
144 goto nla_put_failure;
145
146 break;
147 }
148
149 out:
150 return 0;
151
152 nla_put_failure:
153 return -EMSGSIZE;
154 }
155
156 static int crypto_report_alg(struct crypto_alg *alg,
157 struct crypto_dump_info *info)
158 {
159 struct sk_buff *in_skb = info->in_skb;
160 struct sk_buff *skb = info->out_skb;
161 struct nlmsghdr *nlh;
162 struct crypto_user_alg *ualg;
163 int err = 0;
164
165 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
166 CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
167 if (!nlh) {
168 err = -EMSGSIZE;
169 goto out;
170 }
171
172 ualg = nlmsg_data(nlh);
173
174 err = crypto_report_one(alg, ualg, skb);
175 if (err) {
176 nlmsg_cancel(skb, nlh);
177 goto out;
178 }
179
180 nlmsg_end(skb, nlh);
181
182 out:
183 return err;
184 }
185
186 static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
187 struct nlattr **attrs)
188 {
189 struct crypto_user_alg *p = nlmsg_data(in_nlh);
190 struct crypto_alg *alg;
191 struct sk_buff *skb;
192 struct crypto_dump_info info;
193 int err;
194
195 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
196 return -EINVAL;
197
198 alg = crypto_alg_match(p, 0);
199 if (!alg)
200 return -ENOENT;
201
202 err = -ENOMEM;
203 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
204 if (!skb)
205 goto drop_alg;
206
207 info.in_skb = in_skb;
208 info.out_skb = skb;
209 info.nlmsg_seq = in_nlh->nlmsg_seq;
210 info.nlmsg_flags = 0;
211
212 err = crypto_report_alg(alg, &info);
213
214 drop_alg:
215 crypto_mod_put(alg);
216
217 if (err)
218 return err;
219
220 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
221 }
222
223 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
224 {
225 const size_t start_pos = cb->args[0];
226 size_t pos = 0;
227 struct crypto_dump_info info;
228 struct crypto_alg *alg;
229 int res;
230
231 info.in_skb = cb->skb;
232 info.out_skb = skb;
233 info.nlmsg_seq = cb->nlh->nlmsg_seq;
234 info.nlmsg_flags = NLM_F_MULTI;
235
236 down_read(&crypto_alg_sem);
237 list_for_each_entry(alg, &crypto_alg_list, cra_list) {
238 if (pos >= start_pos) {
239 res = crypto_report_alg(alg, &info);
240 if (res == -EMSGSIZE)
241 break;
242 if (res)
243 goto out;
244 }
245 pos++;
246 }
247 cb->args[0] = pos;
248 res = skb->len;
249 out:
250 up_read(&crypto_alg_sem);
251 return res;
252 }
253
254 static int crypto_dump_report_done(struct netlink_callback *cb)
255 {
256 return 0;
257 }
258
259 static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
260 struct nlattr **attrs)
261 {
262 struct crypto_alg *alg;
263 struct crypto_user_alg *p = nlmsg_data(nlh);
264 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
265 LIST_HEAD(list);
266
267 if (!netlink_capable(skb, CAP_NET_ADMIN))
268 return -EPERM;
269
270 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
271 return -EINVAL;
272
273 if (priority && !strlen(p->cru_driver_name))
274 return -EINVAL;
275
276 alg = crypto_alg_match(p, 1);
277 if (!alg)
278 return -ENOENT;
279
280 down_write(&crypto_alg_sem);
281
282 crypto_remove_spawns(alg, &list, NULL);
283
284 if (priority)
285 alg->cra_priority = nla_get_u32(priority);
286
287 up_write(&crypto_alg_sem);
288
289 crypto_mod_put(alg);
290 crypto_remove_final(&list);
291
292 return 0;
293 }
294
295 static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
296 struct nlattr **attrs)
297 {
298 struct crypto_alg *alg;
299 struct crypto_user_alg *p = nlmsg_data(nlh);
300 int err;
301
302 if (!netlink_capable(skb, CAP_NET_ADMIN))
303 return -EPERM;
304
305 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
306 return -EINVAL;
307
308 alg = crypto_alg_match(p, 1);
309 if (!alg)
310 return -ENOENT;
311
312 /* We can not unregister core algorithms such as aes-generic.
313 * We would loose the reference in the crypto_alg_list to this algorithm
314 * if we try to unregister. Unregistering such an algorithm without
315 * removing the module is not possible, so we restrict to crypto
316 * instances that are build from templates. */
317 err = -EINVAL;
318 if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
319 goto drop_alg;
320
321 err = -EBUSY;
322 if (refcount_read(&alg->cra_refcnt) > 2)
323 goto drop_alg;
324
325 err = crypto_unregister_instance((struct crypto_instance *)alg);
326
327 drop_alg:
328 crypto_mod_put(alg);
329 return err;
330 }
331
332 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
333 struct nlattr **attrs)
334 {
335 int exact = 0;
336 const char *name;
337 struct crypto_alg *alg;
338 struct crypto_user_alg *p = nlmsg_data(nlh);
339 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
340
341 if (!netlink_capable(skb, CAP_NET_ADMIN))
342 return -EPERM;
343
344 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
345 return -EINVAL;
346
347 if (strlen(p->cru_driver_name))
348 exact = 1;
349
350 if (priority && !exact)
351 return -EINVAL;
352
353 alg = crypto_alg_match(p, exact);
354 if (alg) {
355 crypto_mod_put(alg);
356 return -EEXIST;
357 }
358
359 if (strlen(p->cru_driver_name))
360 name = p->cru_driver_name;
361 else
362 name = p->cru_name;
363
364 alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
365 if (IS_ERR(alg))
366 return PTR_ERR(alg);
367
368 down_write(&crypto_alg_sem);
369
370 if (priority)
371 alg->cra_priority = nla_get_u32(priority);
372
373 up_write(&crypto_alg_sem);
374
375 crypto_mod_put(alg);
376
377 return 0;
378 }
379
380 static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
381 struct nlattr **attrs)
382 {
383 if (!netlink_capable(skb, CAP_NET_ADMIN))
384 return -EPERM;
385 return crypto_del_default_rng();
386 }
387
388 #define MSGSIZE(type) sizeof(struct type)
389
390 static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
391 [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
392 [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
393 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
394 [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
395 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
396 [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
397 };
398
399 static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
400 [CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32},
401 };
402
403 #undef MSGSIZE
404
405 static const struct crypto_link {
406 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
407 int (*dump)(struct sk_buff *, struct netlink_callback *);
408 int (*done)(struct netlink_callback *);
409 } crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
410 [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
411 [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
412 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
413 [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report,
414 .dump = crypto_dump_report,
415 .done = crypto_dump_report_done},
416 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
417 [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
418 };
419
420 static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
421 struct netlink_ext_ack *extack)
422 {
423 struct nlattr *attrs[CRYPTOCFGA_MAX+1];
424 const struct crypto_link *link;
425 int type, err;
426
427 type = nlh->nlmsg_type;
428 if (type > CRYPTO_MSG_MAX)
429 return -EINVAL;
430
431 type -= CRYPTO_MSG_BASE;
432 link = &crypto_dispatch[type];
433
434 if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
435 (nlh->nlmsg_flags & NLM_F_DUMP))) {
436 struct crypto_alg *alg;
437 unsigned long dump_alloc = 0;
438
439 if (link->dump == NULL)
440 return -EINVAL;
441
442 down_read(&crypto_alg_sem);
443 list_for_each_entry(alg, &crypto_alg_list, cra_list)
444 dump_alloc += CRYPTO_REPORT_MAXSIZE;
445 up_read(&crypto_alg_sem);
446
447 {
448 struct netlink_dump_control c = {
449 .dump = link->dump,
450 .done = link->done,
451 .min_dump_alloc = min(dump_alloc, 65535UL),
452 };
453 err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
454 }
455
456 return err;
457 }
458
459 err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
460 CRYPTOCFGA_MAX, crypto_policy, extack);
461 if (err < 0)
462 return err;
463
464 if (link->doit == NULL)
465 return -EINVAL;
466
467 return link->doit(skb, nlh, attrs);
468 }
469
470 static void crypto_netlink_rcv(struct sk_buff *skb)
471 {
472 mutex_lock(&crypto_cfg_mutex);
473 netlink_rcv_skb(skb, &crypto_user_rcv_msg);
474 mutex_unlock(&crypto_cfg_mutex);
475 }
476
477 static int __init crypto_user_init(void)
478 {
479 struct netlink_kernel_cfg cfg = {
480 .input = crypto_netlink_rcv,
481 };
482
483 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
484 if (!crypto_nlsk)
485 return -ENOMEM;
486
487 return 0;
488 }
489
490 static void __exit crypto_user_exit(void)
491 {
492 netlink_kernel_release(crypto_nlsk);
493 }
494
495 module_init(crypto_user_init);
496 module_exit(crypto_user_exit);
497 MODULE_LICENSE("GPL");
498 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
499 MODULE_DESCRIPTION("Crypto userspace configuration API");
500 MODULE_ALIAS("net-pf-16-proto-21");