1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
8 #include <linux/module.h>
9 #include <linux/virtio_config.h>
10 #include <linux/cpu.h>
12 #include <uapi/linux/virtio_crypto.h>
13 #include "virtio_crypto_common.h"
17 virtcrypto_clear_request(struct virtio_crypto_request
*vc_req
)
20 kfree_sensitive(vc_req
->req_data
);
25 static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request
*vc_ctrl_req
)
27 complete(&vc_ctrl_req
->compl);
30 static void virtcrypto_ctrlq_callback(struct virtqueue
*vq
)
32 struct virtio_crypto
*vcrypto
= vq
->vdev
->priv
;
33 struct virtio_crypto_ctrl_request
*vc_ctrl_req
;
37 spin_lock_irqsave(&vcrypto
->ctrl_lock
, flags
);
39 virtqueue_disable_cb(vq
);
40 while ((vc_ctrl_req
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
41 spin_unlock_irqrestore(&vcrypto
->ctrl_lock
, flags
);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req
);
43 spin_lock_irqsave(&vcrypto
->ctrl_lock
, flags
);
45 if (unlikely(virtqueue_is_broken(vq
)))
47 } while (!virtqueue_enable_cb(vq
));
48 spin_unlock_irqrestore(&vcrypto
->ctrl_lock
, flags
);
51 int virtio_crypto_ctrl_vq_request(struct virtio_crypto
*vcrypto
, struct scatterlist
*sgs
[],
52 unsigned int out_sgs
, unsigned int in_sgs
,
53 struct virtio_crypto_ctrl_request
*vc_ctrl_req
)
58 init_completion(&vc_ctrl_req
->compl);
60 spin_lock_irqsave(&vcrypto
->ctrl_lock
, flags
);
61 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, out_sgs
, in_sgs
, vc_ctrl_req
, GFP_ATOMIC
);
63 spin_unlock_irqrestore(&vcrypto
->ctrl_lock
, flags
);
67 virtqueue_kick(vcrypto
->ctrl_vq
);
68 spin_unlock_irqrestore(&vcrypto
->ctrl_lock
, flags
);
70 wait_for_completion(&vc_ctrl_req
->compl);
75 static void virtcrypto_dataq_callback(struct virtqueue
*vq
)
77 struct virtio_crypto
*vcrypto
= vq
->vdev
->priv
;
78 struct virtio_crypto_request
*vc_req
;
81 unsigned int qid
= vq
->index
;
83 spin_lock_irqsave(&vcrypto
->data_vq
[qid
].lock
, flags
);
85 virtqueue_disable_cb(vq
);
86 while ((vc_req
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
87 spin_unlock_irqrestore(
88 &vcrypto
->data_vq
[qid
].lock
, flags
);
90 vc_req
->alg_cb(vc_req
, len
);
92 &vcrypto
->data_vq
[qid
].lock
, flags
);
94 } while (!virtqueue_enable_cb(vq
));
95 spin_unlock_irqrestore(&vcrypto
->data_vq
[qid
].lock
, flags
);
98 static int virtcrypto_find_vqs(struct virtio_crypto
*vi
)
100 vq_callback_t
**callbacks
;
101 struct virtqueue
**vqs
;
105 struct device
*dev
= &vi
->vdev
->dev
;
108 * We expect 1 data virtqueue, followed by
109 * possible N-1 data queues used in multiqueue mode,
110 * followed by control vq.
112 total_vqs
= vi
->max_data_queues
+ 1;
114 /* Allocate space for find_vqs parameters */
115 vqs
= kcalloc(total_vqs
, sizeof(*vqs
), GFP_KERNEL
);
118 callbacks
= kcalloc(total_vqs
, sizeof(*callbacks
), GFP_KERNEL
);
121 names
= kcalloc(total_vqs
, sizeof(*names
), GFP_KERNEL
);
125 /* Parameters for control virtqueue */
126 callbacks
[total_vqs
- 1] = virtcrypto_ctrlq_callback
;
127 names
[total_vqs
- 1] = "controlq";
129 /* Allocate/initialize parameters for data virtqueues */
130 for (i
= 0; i
< vi
->max_data_queues
; i
++) {
131 callbacks
[i
] = virtcrypto_dataq_callback
;
132 snprintf(vi
->data_vq
[i
].name
, sizeof(vi
->data_vq
[i
].name
),
134 names
[i
] = vi
->data_vq
[i
].name
;
137 ret
= virtio_find_vqs(vi
->vdev
, total_vqs
, vqs
, callbacks
, names
, NULL
);
141 vi
->ctrl_vq
= vqs
[total_vqs
- 1];
143 for (i
= 0; i
< vi
->max_data_queues
; i
++) {
144 spin_lock_init(&vi
->data_vq
[i
].lock
);
145 vi
->data_vq
[i
].vq
= vqs
[i
];
146 /* Initialize crypto engine */
147 vi
->data_vq
[i
].engine
= crypto_engine_alloc_init_and_set(dev
, true, NULL
, true,
148 virtqueue_get_vring_size(vqs
[i
]));
149 if (!vi
->data_vq
[i
].engine
) {
172 static int virtcrypto_alloc_queues(struct virtio_crypto
*vi
)
174 vi
->data_vq
= kcalloc(vi
->max_data_queues
, sizeof(*vi
->data_vq
),
182 static void virtcrypto_clean_affinity(struct virtio_crypto
*vi
, long hcpu
)
186 if (vi
->affinity_hint_set
) {
187 for (i
= 0; i
< vi
->max_data_queues
; i
++)
188 virtqueue_set_affinity(vi
->data_vq
[i
].vq
, NULL
);
190 vi
->affinity_hint_set
= false;
194 static void virtcrypto_set_affinity(struct virtio_crypto
*vcrypto
)
200 * In single queue mode, we don't set the cpu affinity.
202 if (vcrypto
->curr_queue
== 1 || vcrypto
->max_data_queues
== 1) {
203 virtcrypto_clean_affinity(vcrypto
, -1);
208 * In multiqueue mode, we let the queue to be private to one cpu
209 * by setting the affinity hint to eliminate the contention.
211 * TODO: adds cpu hotplug support by register cpu notifier.
214 for_each_online_cpu(cpu
) {
215 virtqueue_set_affinity(vcrypto
->data_vq
[i
].vq
, cpumask_of(cpu
));
216 if (++i
>= vcrypto
->max_data_queues
)
220 vcrypto
->affinity_hint_set
= true;
223 static void virtcrypto_free_queues(struct virtio_crypto
*vi
)
228 static int virtcrypto_init_vqs(struct virtio_crypto
*vi
)
232 /* Allocate send & receive queues */
233 ret
= virtcrypto_alloc_queues(vi
);
237 ret
= virtcrypto_find_vqs(vi
);
242 virtcrypto_set_affinity(vi
);
248 virtcrypto_free_queues(vi
);
253 static int virtcrypto_update_status(struct virtio_crypto
*vcrypto
)
258 virtio_cread_le(vcrypto
->vdev
,
259 struct virtio_crypto_config
, status
, &status
);
262 * Unknown status bits would be a host error and the driver
263 * should consider the device to be broken.
265 if (status
& (~VIRTIO_CRYPTO_S_HW_READY
)) {
266 dev_warn(&vcrypto
->vdev
->dev
,
267 "Unknown status bits: 0x%x\n", status
);
269 virtio_break_device(vcrypto
->vdev
);
273 if (vcrypto
->status
== status
)
276 vcrypto
->status
= status
;
278 if (vcrypto
->status
& VIRTIO_CRYPTO_S_HW_READY
) {
279 err
= virtcrypto_dev_start(vcrypto
);
281 dev_err(&vcrypto
->vdev
->dev
,
282 "Failed to start virtio crypto device.\n");
286 dev_info(&vcrypto
->vdev
->dev
, "Accelerator device is ready\n");
288 virtcrypto_dev_stop(vcrypto
);
289 dev_info(&vcrypto
->vdev
->dev
, "Accelerator is not ready\n");
295 static int virtcrypto_start_crypto_engines(struct virtio_crypto
*vcrypto
)
300 for (i
= 0; i
< vcrypto
->max_data_queues
; i
++) {
301 if (vcrypto
->data_vq
[i
].engine
) {
302 ret
= crypto_engine_start(vcrypto
->data_vq
[i
].engine
);
312 if (vcrypto
->data_vq
[i
].engine
)
313 crypto_engine_exit(vcrypto
->data_vq
[i
].engine
);
318 static void virtcrypto_clear_crypto_engines(struct virtio_crypto
*vcrypto
)
322 for (i
= 0; i
< vcrypto
->max_data_queues
; i
++)
323 if (vcrypto
->data_vq
[i
].engine
)
324 crypto_engine_exit(vcrypto
->data_vq
[i
].engine
);
327 static void virtcrypto_del_vqs(struct virtio_crypto
*vcrypto
)
329 struct virtio_device
*vdev
= vcrypto
->vdev
;
331 virtcrypto_clean_affinity(vcrypto
, -1);
333 vdev
->config
->del_vqs(vdev
);
335 virtcrypto_free_queues(vcrypto
);
338 static int virtcrypto_probe(struct virtio_device
*vdev
)
341 struct virtio_crypto
*vcrypto
;
342 u32 max_data_queues
= 0, max_cipher_key_len
= 0;
343 u32 max_auth_key_len
= 0;
345 u32 cipher_algo_l
= 0;
346 u32 cipher_algo_h
= 0;
351 u32 akcipher_algo
= 0;
352 u32 crypto_services
= 0;
354 if (!virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
))
357 if (!vdev
->config
->get
) {
358 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
363 if (num_possible_nodes() > 1 && dev_to_node(&vdev
->dev
) < 0) {
365 * If the accelerator is connected to a node with no memory
366 * there is no point in using the accelerator since the remote
367 * memory transaction will be very slow.
369 dev_err(&vdev
->dev
, "Invalid NUMA configuration.\n");
373 vcrypto
= kzalloc_node(sizeof(*vcrypto
), GFP_KERNEL
,
374 dev_to_node(&vdev
->dev
));
378 virtio_cread_le(vdev
, struct virtio_crypto_config
,
379 max_dataqueues
, &max_data_queues
);
380 if (max_data_queues
< 1)
383 virtio_cread_le(vdev
, struct virtio_crypto_config
,
384 max_cipher_key_len
, &max_cipher_key_len
);
385 virtio_cread_le(vdev
, struct virtio_crypto_config
,
386 max_auth_key_len
, &max_auth_key_len
);
387 virtio_cread_le(vdev
, struct virtio_crypto_config
,
388 max_size
, &max_size
);
389 virtio_cread_le(vdev
, struct virtio_crypto_config
,
390 crypto_services
, &crypto_services
);
391 virtio_cread_le(vdev
, struct virtio_crypto_config
,
392 cipher_algo_l
, &cipher_algo_l
);
393 virtio_cread_le(vdev
, struct virtio_crypto_config
,
394 cipher_algo_h
, &cipher_algo_h
);
395 virtio_cread_le(vdev
, struct virtio_crypto_config
,
396 hash_algo
, &hash_algo
);
397 virtio_cread_le(vdev
, struct virtio_crypto_config
,
398 mac_algo_l
, &mac_algo_l
);
399 virtio_cread_le(vdev
, struct virtio_crypto_config
,
400 mac_algo_h
, &mac_algo_h
);
401 virtio_cread_le(vdev
, struct virtio_crypto_config
,
402 aead_algo
, &aead_algo
);
403 if (crypto_services
& (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER
))
404 virtio_cread_le(vdev
, struct virtio_crypto_config
,
405 akcipher_algo
, &akcipher_algo
);
407 /* Add virtio crypto device to global table */
408 err
= virtcrypto_devmgr_add_dev(vcrypto
);
410 dev_err(&vdev
->dev
, "Failed to add new virtio crypto device.\n");
413 vcrypto
->owner
= THIS_MODULE
;
414 vcrypto
= vdev
->priv
= vcrypto
;
415 vcrypto
->vdev
= vdev
;
417 spin_lock_init(&vcrypto
->ctrl_lock
);
419 /* Use single data queue as default */
420 vcrypto
->curr_queue
= 1;
421 vcrypto
->max_data_queues
= max_data_queues
;
422 vcrypto
->max_cipher_key_len
= max_cipher_key_len
;
423 vcrypto
->max_auth_key_len
= max_auth_key_len
;
424 vcrypto
->max_size
= max_size
;
425 vcrypto
->crypto_services
= crypto_services
;
426 vcrypto
->cipher_algo_l
= cipher_algo_l
;
427 vcrypto
->cipher_algo_h
= cipher_algo_h
;
428 vcrypto
->mac_algo_l
= mac_algo_l
;
429 vcrypto
->mac_algo_h
= mac_algo_h
;
430 vcrypto
->hash_algo
= hash_algo
;
431 vcrypto
->aead_algo
= aead_algo
;
432 vcrypto
->akcipher_algo
= akcipher_algo
;
435 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
436 vcrypto
->max_data_queues
,
437 vcrypto
->max_cipher_key_len
,
438 vcrypto
->max_auth_key_len
,
441 err
= virtcrypto_init_vqs(vcrypto
);
443 dev_err(&vdev
->dev
, "Failed to initialize vqs.\n");
447 err
= virtcrypto_start_crypto_engines(vcrypto
);
451 virtio_device_ready(vdev
);
453 err
= virtcrypto_update_status(vcrypto
);
460 virtcrypto_clear_crypto_engines(vcrypto
);
462 virtio_reset_device(vdev
);
463 virtcrypto_del_vqs(vcrypto
);
465 virtcrypto_devmgr_rm_dev(vcrypto
);
471 static void virtcrypto_free_unused_reqs(struct virtio_crypto
*vcrypto
)
473 struct virtio_crypto_request
*vc_req
;
475 struct virtqueue
*vq
;
477 for (i
= 0; i
< vcrypto
->max_data_queues
; i
++) {
478 vq
= vcrypto
->data_vq
[i
].vq
;
479 while ((vc_req
= virtqueue_detach_unused_buf(vq
)) != NULL
) {
480 kfree(vc_req
->req_data
);
487 static void virtcrypto_remove(struct virtio_device
*vdev
)
489 struct virtio_crypto
*vcrypto
= vdev
->priv
;
491 dev_info(&vdev
->dev
, "Start virtcrypto_remove.\n");
493 if (virtcrypto_dev_started(vcrypto
))
494 virtcrypto_dev_stop(vcrypto
);
495 virtio_reset_device(vdev
);
496 virtcrypto_free_unused_reqs(vcrypto
);
497 virtcrypto_clear_crypto_engines(vcrypto
);
498 virtcrypto_del_vqs(vcrypto
);
499 virtcrypto_devmgr_rm_dev(vcrypto
);
503 static void virtcrypto_config_changed(struct virtio_device
*vdev
)
505 struct virtio_crypto
*vcrypto
= vdev
->priv
;
507 virtcrypto_update_status(vcrypto
);
510 #ifdef CONFIG_PM_SLEEP
511 static int virtcrypto_freeze(struct virtio_device
*vdev
)
513 struct virtio_crypto
*vcrypto
= vdev
->priv
;
515 virtio_reset_device(vdev
);
516 virtcrypto_free_unused_reqs(vcrypto
);
517 if (virtcrypto_dev_started(vcrypto
))
518 virtcrypto_dev_stop(vcrypto
);
520 virtcrypto_clear_crypto_engines(vcrypto
);
521 virtcrypto_del_vqs(vcrypto
);
525 static int virtcrypto_restore(struct virtio_device
*vdev
)
527 struct virtio_crypto
*vcrypto
= vdev
->priv
;
530 err
= virtcrypto_init_vqs(vcrypto
);
534 err
= virtcrypto_start_crypto_engines(vcrypto
);
538 virtio_device_ready(vdev
);
540 err
= virtcrypto_dev_start(vcrypto
);
542 dev_err(&vdev
->dev
, "Failed to start virtio crypto device.\n");
549 virtcrypto_clear_crypto_engines(vcrypto
);
551 virtio_reset_device(vdev
);
552 virtcrypto_del_vqs(vcrypto
);
557 static const unsigned int features
[] = {
561 static const struct virtio_device_id id_table
[] = {
562 { VIRTIO_ID_CRYPTO
, VIRTIO_DEV_ANY_ID
},
566 static struct virtio_driver virtio_crypto_driver
= {
567 .driver
.name
= KBUILD_MODNAME
,
568 .driver
.owner
= THIS_MODULE
,
569 .feature_table
= features
,
570 .feature_table_size
= ARRAY_SIZE(features
),
571 .id_table
= id_table
,
572 .probe
= virtcrypto_probe
,
573 .remove
= virtcrypto_remove
,
574 .config_changed
= virtcrypto_config_changed
,
575 #ifdef CONFIG_PM_SLEEP
576 .freeze
= virtcrypto_freeze
,
577 .restore
= virtcrypto_restore
,
581 module_virtio_driver(virtio_crypto_driver
);
583 MODULE_DEVICE_TABLE(virtio
, id_table
);
584 MODULE_DESCRIPTION("virtio crypto device driver");
585 MODULE_LICENSE("GPL");
586 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");