]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/crypto/virtio/virtio_crypto_core.c
KVM: x86: Ignore MSR_AMD64_TW_CFG access
[thirdparty/kernel/stable.git] / drivers / crypto / virtio / virtio_crypto_core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7 #include <linux/err.h>
8 #include <linux/module.h>
9 #include <linux/virtio_config.h>
10 #include <linux/cpu.h>
11
12 #include <uapi/linux/virtio_crypto.h>
13 #include "virtio_crypto_common.h"
14
15
16 void
17 virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18 {
19 if (vc_req) {
20 kfree_sensitive(vc_req->req_data);
21 kfree(vc_req->sgs);
22 }
23 }
24
25 static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26 {
27 complete(&vc_ctrl_req->compl);
28 }
29
30 static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31 {
32 struct virtio_crypto *vcrypto = vq->vdev->priv;
33 struct virtio_crypto_ctrl_request *vc_ctrl_req;
34 unsigned long flags;
35 unsigned int len;
36
37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38 do {
39 virtqueue_disable_cb(vq);
40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
41 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req);
43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44 }
45 if (unlikely(virtqueue_is_broken(vq)))
46 break;
47 } while (!virtqueue_enable_cb(vq));
48 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
49 }
50
51 int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
52 unsigned int out_sgs, unsigned int in_sgs,
53 struct virtio_crypto_ctrl_request *vc_ctrl_req)
54 {
55 int err;
56 unsigned long flags;
57
58 init_completion(&vc_ctrl_req->compl);
59
60 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
61 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
62 if (err < 0) {
63 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
64 return err;
65 }
66
67 virtqueue_kick(vcrypto->ctrl_vq);
68 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
69
70 wait_for_completion(&vc_ctrl_req->compl);
71
72 return 0;
73 }
74
75 static void virtcrypto_dataq_callback(struct virtqueue *vq)
76 {
77 struct virtio_crypto *vcrypto = vq->vdev->priv;
78 struct virtio_crypto_request *vc_req;
79 unsigned long flags;
80 unsigned int len;
81 unsigned int qid = vq->index;
82
83 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
84 do {
85 virtqueue_disable_cb(vq);
86 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
87 spin_unlock_irqrestore(
88 &vcrypto->data_vq[qid].lock, flags);
89 if (vc_req->alg_cb)
90 vc_req->alg_cb(vc_req, len);
91 spin_lock_irqsave(
92 &vcrypto->data_vq[qid].lock, flags);
93 }
94 } while (!virtqueue_enable_cb(vq));
95 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
96 }
97
98 static int virtcrypto_find_vqs(struct virtio_crypto *vi)
99 {
100 vq_callback_t **callbacks;
101 struct virtqueue **vqs;
102 int ret = -ENOMEM;
103 int i, total_vqs;
104 const char **names;
105 struct device *dev = &vi->vdev->dev;
106
107 /*
108 * We expect 1 data virtqueue, followed by
109 * possible N-1 data queues used in multiqueue mode,
110 * followed by control vq.
111 */
112 total_vqs = vi->max_data_queues + 1;
113
114 /* Allocate space for find_vqs parameters */
115 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
116 if (!vqs)
117 goto err_vq;
118 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
119 if (!callbacks)
120 goto err_callback;
121 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
122 if (!names)
123 goto err_names;
124
125 /* Parameters for control virtqueue */
126 callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
127 names[total_vqs - 1] = "controlq";
128
129 /* Allocate/initialize parameters for data virtqueues */
130 for (i = 0; i < vi->max_data_queues; i++) {
131 callbacks[i] = virtcrypto_dataq_callback;
132 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
133 "dataq.%d", i);
134 names[i] = vi->data_vq[i].name;
135 }
136
137 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
138 if (ret)
139 goto err_find;
140
141 vi->ctrl_vq = vqs[total_vqs - 1];
142
143 for (i = 0; i < vi->max_data_queues; i++) {
144 spin_lock_init(&vi->data_vq[i].lock);
145 vi->data_vq[i].vq = vqs[i];
146 /* Initialize crypto engine */
147 vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
148 virtqueue_get_vring_size(vqs[i]));
149 if (!vi->data_vq[i].engine) {
150 ret = -ENOMEM;
151 goto err_engine;
152 }
153 }
154
155 kfree(names);
156 kfree(callbacks);
157 kfree(vqs);
158
159 return 0;
160
161 err_engine:
162 err_find:
163 kfree(names);
164 err_names:
165 kfree(callbacks);
166 err_callback:
167 kfree(vqs);
168 err_vq:
169 return ret;
170 }
171
172 static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
173 {
174 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
175 GFP_KERNEL);
176 if (!vi->data_vq)
177 return -ENOMEM;
178
179 return 0;
180 }
181
182 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
183 {
184 int i;
185
186 if (vi->affinity_hint_set) {
187 for (i = 0; i < vi->max_data_queues; i++)
188 virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
189
190 vi->affinity_hint_set = false;
191 }
192 }
193
194 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
195 {
196 int i = 0;
197 int cpu;
198
199 /*
200 * In single queue mode, we don't set the cpu affinity.
201 */
202 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
203 virtcrypto_clean_affinity(vcrypto, -1);
204 return;
205 }
206
207 /*
208 * In multiqueue mode, we let the queue to be private to one cpu
209 * by setting the affinity hint to eliminate the contention.
210 *
211 * TODO: adds cpu hotplug support by register cpu notifier.
212 *
213 */
214 for_each_online_cpu(cpu) {
215 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
216 if (++i >= vcrypto->max_data_queues)
217 break;
218 }
219
220 vcrypto->affinity_hint_set = true;
221 }
222
223 static void virtcrypto_free_queues(struct virtio_crypto *vi)
224 {
225 kfree(vi->data_vq);
226 }
227
228 static int virtcrypto_init_vqs(struct virtio_crypto *vi)
229 {
230 int ret;
231
232 /* Allocate send & receive queues */
233 ret = virtcrypto_alloc_queues(vi);
234 if (ret)
235 goto err;
236
237 ret = virtcrypto_find_vqs(vi);
238 if (ret)
239 goto err_free;
240
241 cpus_read_lock();
242 virtcrypto_set_affinity(vi);
243 cpus_read_unlock();
244
245 return 0;
246
247 err_free:
248 virtcrypto_free_queues(vi);
249 err:
250 return ret;
251 }
252
253 static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
254 {
255 u32 status;
256 int err;
257
258 virtio_cread_le(vcrypto->vdev,
259 struct virtio_crypto_config, status, &status);
260
261 /*
262 * Unknown status bits would be a host error and the driver
263 * should consider the device to be broken.
264 */
265 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
266 dev_warn(&vcrypto->vdev->dev,
267 "Unknown status bits: 0x%x\n", status);
268
269 virtio_break_device(vcrypto->vdev);
270 return -EPERM;
271 }
272
273 if (vcrypto->status == status)
274 return 0;
275
276 vcrypto->status = status;
277
278 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
279 err = virtcrypto_dev_start(vcrypto);
280 if (err) {
281 dev_err(&vcrypto->vdev->dev,
282 "Failed to start virtio crypto device.\n");
283
284 return -EPERM;
285 }
286 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
287 } else {
288 virtcrypto_dev_stop(vcrypto);
289 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
290 }
291
292 return 0;
293 }
294
295 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
296 {
297 int32_t i;
298 int ret;
299
300 for (i = 0; i < vcrypto->max_data_queues; i++) {
301 if (vcrypto->data_vq[i].engine) {
302 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
303 if (ret)
304 goto err;
305 }
306 }
307
308 return 0;
309
310 err:
311 while (--i >= 0)
312 if (vcrypto->data_vq[i].engine)
313 crypto_engine_exit(vcrypto->data_vq[i].engine);
314
315 return ret;
316 }
317
318 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
319 {
320 u32 i;
321
322 for (i = 0; i < vcrypto->max_data_queues; i++)
323 if (vcrypto->data_vq[i].engine)
324 crypto_engine_exit(vcrypto->data_vq[i].engine);
325 }
326
327 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
328 {
329 struct virtio_device *vdev = vcrypto->vdev;
330
331 virtcrypto_clean_affinity(vcrypto, -1);
332
333 vdev->config->del_vqs(vdev);
334
335 virtcrypto_free_queues(vcrypto);
336 }
337
338 static int virtcrypto_probe(struct virtio_device *vdev)
339 {
340 int err = -EFAULT;
341 struct virtio_crypto *vcrypto;
342 u32 max_data_queues = 0, max_cipher_key_len = 0;
343 u32 max_auth_key_len = 0;
344 u64 max_size = 0;
345 u32 cipher_algo_l = 0;
346 u32 cipher_algo_h = 0;
347 u32 hash_algo = 0;
348 u32 mac_algo_l = 0;
349 u32 mac_algo_h = 0;
350 u32 aead_algo = 0;
351 u32 akcipher_algo = 0;
352 u32 crypto_services = 0;
353
354 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
355 return -ENODEV;
356
357 if (!vdev->config->get) {
358 dev_err(&vdev->dev, "%s failure: config access disabled\n",
359 __func__);
360 return -EINVAL;
361 }
362
363 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
364 /*
365 * If the accelerator is connected to a node with no memory
366 * there is no point in using the accelerator since the remote
367 * memory transaction will be very slow.
368 */
369 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
370 return -EINVAL;
371 }
372
373 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
374 dev_to_node(&vdev->dev));
375 if (!vcrypto)
376 return -ENOMEM;
377
378 virtio_cread_le(vdev, struct virtio_crypto_config,
379 max_dataqueues, &max_data_queues);
380 if (max_data_queues < 1)
381 max_data_queues = 1;
382
383 virtio_cread_le(vdev, struct virtio_crypto_config,
384 max_cipher_key_len, &max_cipher_key_len);
385 virtio_cread_le(vdev, struct virtio_crypto_config,
386 max_auth_key_len, &max_auth_key_len);
387 virtio_cread_le(vdev, struct virtio_crypto_config,
388 max_size, &max_size);
389 virtio_cread_le(vdev, struct virtio_crypto_config,
390 crypto_services, &crypto_services);
391 virtio_cread_le(vdev, struct virtio_crypto_config,
392 cipher_algo_l, &cipher_algo_l);
393 virtio_cread_le(vdev, struct virtio_crypto_config,
394 cipher_algo_h, &cipher_algo_h);
395 virtio_cread_le(vdev, struct virtio_crypto_config,
396 hash_algo, &hash_algo);
397 virtio_cread_le(vdev, struct virtio_crypto_config,
398 mac_algo_l, &mac_algo_l);
399 virtio_cread_le(vdev, struct virtio_crypto_config,
400 mac_algo_h, &mac_algo_h);
401 virtio_cread_le(vdev, struct virtio_crypto_config,
402 aead_algo, &aead_algo);
403 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
404 virtio_cread_le(vdev, struct virtio_crypto_config,
405 akcipher_algo, &akcipher_algo);
406
407 /* Add virtio crypto device to global table */
408 err = virtcrypto_devmgr_add_dev(vcrypto);
409 if (err) {
410 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
411 goto free;
412 }
413 vcrypto->owner = THIS_MODULE;
414 vcrypto = vdev->priv = vcrypto;
415 vcrypto->vdev = vdev;
416
417 spin_lock_init(&vcrypto->ctrl_lock);
418
419 /* Use single data queue as default */
420 vcrypto->curr_queue = 1;
421 vcrypto->max_data_queues = max_data_queues;
422 vcrypto->max_cipher_key_len = max_cipher_key_len;
423 vcrypto->max_auth_key_len = max_auth_key_len;
424 vcrypto->max_size = max_size;
425 vcrypto->crypto_services = crypto_services;
426 vcrypto->cipher_algo_l = cipher_algo_l;
427 vcrypto->cipher_algo_h = cipher_algo_h;
428 vcrypto->mac_algo_l = mac_algo_l;
429 vcrypto->mac_algo_h = mac_algo_h;
430 vcrypto->hash_algo = hash_algo;
431 vcrypto->aead_algo = aead_algo;
432 vcrypto->akcipher_algo = akcipher_algo;
433
434 dev_info(&vdev->dev,
435 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
436 vcrypto->max_data_queues,
437 vcrypto->max_cipher_key_len,
438 vcrypto->max_auth_key_len,
439 vcrypto->max_size);
440
441 err = virtcrypto_init_vqs(vcrypto);
442 if (err) {
443 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
444 goto free_dev;
445 }
446
447 err = virtcrypto_start_crypto_engines(vcrypto);
448 if (err)
449 goto free_vqs;
450
451 virtio_device_ready(vdev);
452
453 err = virtcrypto_update_status(vcrypto);
454 if (err)
455 goto free_engines;
456
457 return 0;
458
459 free_engines:
460 virtcrypto_clear_crypto_engines(vcrypto);
461 free_vqs:
462 virtio_reset_device(vdev);
463 virtcrypto_del_vqs(vcrypto);
464 free_dev:
465 virtcrypto_devmgr_rm_dev(vcrypto);
466 free:
467 kfree(vcrypto);
468 return err;
469 }
470
471 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
472 {
473 struct virtio_crypto_request *vc_req;
474 int i;
475 struct virtqueue *vq;
476
477 for (i = 0; i < vcrypto->max_data_queues; i++) {
478 vq = vcrypto->data_vq[i].vq;
479 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
480 kfree(vc_req->req_data);
481 kfree(vc_req->sgs);
482 }
483 cond_resched();
484 }
485 }
486
487 static void virtcrypto_remove(struct virtio_device *vdev)
488 {
489 struct virtio_crypto *vcrypto = vdev->priv;
490
491 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
492
493 if (virtcrypto_dev_started(vcrypto))
494 virtcrypto_dev_stop(vcrypto);
495 virtio_reset_device(vdev);
496 virtcrypto_free_unused_reqs(vcrypto);
497 virtcrypto_clear_crypto_engines(vcrypto);
498 virtcrypto_del_vqs(vcrypto);
499 virtcrypto_devmgr_rm_dev(vcrypto);
500 kfree(vcrypto);
501 }
502
503 static void virtcrypto_config_changed(struct virtio_device *vdev)
504 {
505 struct virtio_crypto *vcrypto = vdev->priv;
506
507 virtcrypto_update_status(vcrypto);
508 }
509
510 #ifdef CONFIG_PM_SLEEP
511 static int virtcrypto_freeze(struct virtio_device *vdev)
512 {
513 struct virtio_crypto *vcrypto = vdev->priv;
514
515 virtio_reset_device(vdev);
516 virtcrypto_free_unused_reqs(vcrypto);
517 if (virtcrypto_dev_started(vcrypto))
518 virtcrypto_dev_stop(vcrypto);
519
520 virtcrypto_clear_crypto_engines(vcrypto);
521 virtcrypto_del_vqs(vcrypto);
522 return 0;
523 }
524
525 static int virtcrypto_restore(struct virtio_device *vdev)
526 {
527 struct virtio_crypto *vcrypto = vdev->priv;
528 int err;
529
530 err = virtcrypto_init_vqs(vcrypto);
531 if (err)
532 return err;
533
534 err = virtcrypto_start_crypto_engines(vcrypto);
535 if (err)
536 goto free_vqs;
537
538 virtio_device_ready(vdev);
539
540 err = virtcrypto_dev_start(vcrypto);
541 if (err) {
542 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
543 goto free_engines;
544 }
545
546 return 0;
547
548 free_engines:
549 virtcrypto_clear_crypto_engines(vcrypto);
550 free_vqs:
551 virtio_reset_device(vdev);
552 virtcrypto_del_vqs(vcrypto);
553 return err;
554 }
555 #endif
556
557 static const unsigned int features[] = {
558 /* none */
559 };
560
561 static const struct virtio_device_id id_table[] = {
562 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
563 { 0 },
564 };
565
566 static struct virtio_driver virtio_crypto_driver = {
567 .driver.name = KBUILD_MODNAME,
568 .driver.owner = THIS_MODULE,
569 .feature_table = features,
570 .feature_table_size = ARRAY_SIZE(features),
571 .id_table = id_table,
572 .probe = virtcrypto_probe,
573 .remove = virtcrypto_remove,
574 .config_changed = virtcrypto_config_changed,
575 #ifdef CONFIG_PM_SLEEP
576 .freeze = virtcrypto_freeze,
577 .restore = virtcrypto_restore,
578 #endif
579 };
580
581 module_virtio_driver(virtio_crypto_driver);
582
583 MODULE_DEVICE_TABLE(virtio, id_table);
584 MODULE_DESCRIPTION("virtio crypto device driver");
585 MODULE_LICENSE("GPL");
586 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");