1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDPA networking device simulator.
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
15 #include <linux/poll.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/wait.h>
19 #include <linux/uuid.h>
20 #include <linux/iommu.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sysfs.h>
23 #include <linux/file.h>
24 #include <linux/etherdevice.h>
25 #include <linux/vringh.h>
26 #include <linux/vdpa.h>
27 #include <linux/vhost_iotlb.h>
28 #include <uapi/linux/virtio_config.h>
29 #include <uapi/linux/virtio_net.h>
31 #define DRV_VERSION "0.1"
32 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
33 #define DRV_DESC "vDPA Device Simulator"
34 #define DRV_LICENSE "GPL v2"
36 struct vdpasim_virtqueue
{
38 struct vringh_kiov iov
;
46 irqreturn_t (*cb
)(void *data
);
49 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
50 #define VDPASIM_QUEUE_MAX 256
51 #define VDPASIM_DEVICE_ID 0x1
52 #define VDPASIM_VENDOR_ID 0
53 #define VDPASIM_VQ_NUM 0x2
54 #define VDPASIM_NAME "vdpasim-netdev"
56 static u64 vdpasim_features
= (1ULL << VIRTIO_F_ANY_LAYOUT
) |
57 (1ULL << VIRTIO_F_VERSION_1
) |
58 (1ULL << VIRTIO_F_IOMMU_PLATFORM
);
60 /* State of each vdpasim device */
62 struct vdpa_device vdpa
;
63 struct vdpasim_virtqueue vqs
[2];
64 struct work_struct work
;
65 /* spinlock to synchronize virtqueue state */
67 struct virtio_net_config config
;
68 struct vhost_iotlb
*iommu
;
75 static struct vdpasim
*vdpasim_dev
;
77 static struct vdpasim
*vdpa_to_sim(struct vdpa_device
*vdpa
)
79 return container_of(vdpa
, struct vdpasim
, vdpa
);
82 static struct vdpasim
*dev_to_sim(struct device
*dev
)
84 struct vdpa_device
*vdpa
= dev_to_vdpa(dev
);
86 return vdpa_to_sim(vdpa
);
89 static void vdpasim_queue_ready(struct vdpasim
*vdpasim
, unsigned int idx
)
91 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
94 ret
= vringh_init_iotlb(&vq
->vring
, vdpasim_features
,
95 VDPASIM_QUEUE_MAX
, false,
96 (struct vring_desc
*)(uintptr_t)vq
->desc_addr
,
97 (struct vring_avail
*)
98 (uintptr_t)vq
->driver_addr
,
100 (uintptr_t)vq
->device_addr
);
103 static void vdpasim_vq_reset(struct vdpasim_virtqueue
*vq
)
111 vringh_init_iotlb(&vq
->vring
, vdpasim_features
, VDPASIM_QUEUE_MAX
,
112 false, NULL
, NULL
, NULL
);
115 static void vdpasim_reset(struct vdpasim
*vdpasim
)
119 for (i
= 0; i
< VDPASIM_VQ_NUM
; i
++)
120 vdpasim_vq_reset(&vdpasim
->vqs
[i
]);
122 vhost_iotlb_reset(vdpasim
->iommu
);
124 vdpasim
->features
= 0;
126 ++vdpasim
->generation
;
129 static void vdpasim_work(struct work_struct
*work
)
131 struct vdpasim
*vdpasim
= container_of(work
, struct
133 struct vdpasim_virtqueue
*txq
= &vdpasim
->vqs
[1];
134 struct vdpasim_virtqueue
*rxq
= &vdpasim
->vqs
[0];
135 size_t read
, write
, total_write
;
139 spin_lock(&vdpasim
->lock
);
141 if (!(vdpasim
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))
144 if (!txq
->ready
|| !rxq
->ready
)
149 err
= vringh_getdesc_iotlb(&txq
->vring
, &txq
->iov
, NULL
,
150 &txq
->head
, GFP_ATOMIC
);
154 err
= vringh_getdesc_iotlb(&rxq
->vring
, NULL
, &rxq
->iov
,
155 &rxq
->head
, GFP_ATOMIC
);
157 vringh_complete_iotlb(&txq
->vring
, txq
->head
, 0);
162 read
= vringh_iov_pull_iotlb(&txq
->vring
, &txq
->iov
,
168 write
= vringh_iov_push_iotlb(&rxq
->vring
, &rxq
->iov
,
169 vdpasim
->buffer
, read
);
173 total_write
+= write
;
176 /* Make sure data is wrote before advancing index */
179 vringh_complete_iotlb(&txq
->vring
, txq
->head
, 0);
180 vringh_complete_iotlb(&rxq
->vring
, rxq
->head
, total_write
);
182 /* Make sure used is visible before rasing the interrupt. */
187 txq
->cb(txq
->private);
189 rxq
->cb(rxq
->private);
193 schedule_work(&vdpasim
->work
);
199 spin_unlock(&vdpasim
->lock
);
202 static int dir_to_perm(enum dma_data_direction dir
)
207 case DMA_FROM_DEVICE
:
213 case DMA_BIDIRECTIONAL
:
223 static dma_addr_t
vdpasim_map_page(struct device
*dev
, struct page
*page
,
224 unsigned long offset
, size_t size
,
225 enum dma_data_direction dir
,
228 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
229 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
230 u64 pa
= (page_to_pfn(page
) << PAGE_SHIFT
) + offset
;
231 int ret
, perm
= dir_to_perm(dir
);
234 return DMA_MAPPING_ERROR
;
236 /* For simplicity, use identical mapping to avoid e.g iova
239 ret
= vhost_iotlb_add_range(iommu
, pa
, pa
+ size
- 1,
240 pa
, dir_to_perm(dir
));
242 return DMA_MAPPING_ERROR
;
244 return (dma_addr_t
)(pa
);
247 static void vdpasim_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
248 size_t size
, enum dma_data_direction dir
,
251 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
252 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
254 vhost_iotlb_del_range(iommu
, (u64
)dma_addr
,
255 (u64
)dma_addr
+ size
- 1);
258 static void *vdpasim_alloc_coherent(struct device
*dev
, size_t size
,
259 dma_addr_t
*dma_addr
, gfp_t flag
,
262 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
263 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
264 void *addr
= kmalloc(size
, flag
);
268 *dma_addr
= DMA_MAPPING_ERROR
;
270 u64 pa
= virt_to_phys(addr
);
272 ret
= vhost_iotlb_add_range(iommu
, (u64
)pa
,
276 *dma_addr
= DMA_MAPPING_ERROR
;
280 *dma_addr
= (dma_addr_t
)pa
;
286 static void vdpasim_free_coherent(struct device
*dev
, size_t size
,
287 void *vaddr
, dma_addr_t dma_addr
,
290 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
291 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
293 vhost_iotlb_del_range(iommu
, (u64
)dma_addr
,
294 (u64
)dma_addr
+ size
- 1);
295 kfree(phys_to_virt((uintptr_t)dma_addr
));
298 static const struct dma_map_ops vdpasim_dma_ops
= {
299 .map_page
= vdpasim_map_page
,
300 .unmap_page
= vdpasim_unmap_page
,
301 .alloc
= vdpasim_alloc_coherent
,
302 .free
= vdpasim_free_coherent
,
305 static const struct vdpa_config_ops vdpasim_net_config_ops
;
307 static struct vdpasim
*vdpasim_create(void)
309 struct virtio_net_config
*config
;
310 struct vdpasim
*vdpasim
;
314 vdpasim
= vdpa_alloc_device(struct vdpasim
, vdpa
, NULL
,
315 &vdpasim_net_config_ops
);
319 INIT_WORK(&vdpasim
->work
, vdpasim_work
);
320 spin_lock_init(&vdpasim
->lock
);
322 dev
= &vdpasim
->vdpa
.dev
;
323 dev
->coherent_dma_mask
= DMA_BIT_MASK(64);
324 set_dma_ops(dev
, &vdpasim_dma_ops
);
326 vdpasim
->iommu
= vhost_iotlb_alloc(2048, 0);
330 vdpasim
->buffer
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
331 if (!vdpasim
->buffer
)
334 config
= &vdpasim
->config
;
336 config
->status
= VIRTIO_NET_S_LINK_UP
;
337 eth_random_addr(config
->mac
);
339 vringh_set_iotlb(&vdpasim
->vqs
[0].vring
, vdpasim
->iommu
);
340 vringh_set_iotlb(&vdpasim
->vqs
[1].vring
, vdpasim
->iommu
);
342 vdpasim
->vdpa
.dma_dev
= dev
;
343 ret
= vdpa_register_device(&vdpasim
->vdpa
);
355 static int vdpasim_set_vq_address(struct vdpa_device
*vdpa
, u16 idx
,
356 u64 desc_area
, u64 driver_area
,
359 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
360 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
362 vq
->desc_addr
= desc_area
;
363 vq
->driver_addr
= driver_area
;
364 vq
->device_addr
= device_area
;
369 static void vdpasim_set_vq_num(struct vdpa_device
*vdpa
, u16 idx
, u32 num
)
371 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
372 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
377 static void vdpasim_kick_vq(struct vdpa_device
*vdpa
, u16 idx
)
379 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
380 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
383 schedule_work(&vdpasim
->work
);
386 static void vdpasim_set_vq_cb(struct vdpa_device
*vdpa
, u16 idx
,
387 struct vdpa_callback
*cb
)
389 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
390 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
392 vq
->cb
= cb
->callback
;
393 vq
->private = cb
->private;
396 static void vdpasim_set_vq_ready(struct vdpa_device
*vdpa
, u16 idx
, bool ready
)
398 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
399 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
401 spin_lock(&vdpasim
->lock
);
404 vdpasim_queue_ready(vdpasim
, idx
);
405 spin_unlock(&vdpasim
->lock
);
408 static bool vdpasim_get_vq_ready(struct vdpa_device
*vdpa
, u16 idx
)
410 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
411 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
416 static int vdpasim_set_vq_state(struct vdpa_device
*vdpa
, u16 idx
, u64 state
)
418 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
419 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
420 struct vringh
*vrh
= &vq
->vring
;
422 spin_lock(&vdpasim
->lock
);
423 vrh
->last_avail_idx
= state
;
424 spin_unlock(&vdpasim
->lock
);
429 static u64
vdpasim_get_vq_state(struct vdpa_device
*vdpa
, u16 idx
)
431 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
432 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
433 struct vringh
*vrh
= &vq
->vring
;
435 return vrh
->last_avail_idx
;
438 static u32
vdpasim_get_vq_align(struct vdpa_device
*vdpa
)
440 return VDPASIM_QUEUE_ALIGN
;
443 static u64
vdpasim_get_features(struct vdpa_device
*vdpa
)
445 return vdpasim_features
;
448 static int vdpasim_set_features(struct vdpa_device
*vdpa
, u64 features
)
450 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
452 /* DMA mapping must be done by driver */
453 if (!(features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
)))
456 vdpasim
->features
= features
& vdpasim_features
;
461 static void vdpasim_set_config_cb(struct vdpa_device
*vdpa
,
462 struct vdpa_callback
*cb
)
464 /* We don't support config interrupt */
467 static u16
vdpasim_get_vq_num_max(struct vdpa_device
*vdpa
)
469 return VDPASIM_QUEUE_MAX
;
472 static u32
vdpasim_get_device_id(struct vdpa_device
*vdpa
)
474 return VDPASIM_DEVICE_ID
;
477 static u32
vdpasim_get_vendor_id(struct vdpa_device
*vdpa
)
479 return VDPASIM_VENDOR_ID
;
482 static u8
vdpasim_get_status(struct vdpa_device
*vdpa
)
484 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
487 spin_lock(&vdpasim
->lock
);
488 status
= vdpasim
->status
;
489 spin_unlock(&vdpasim
->lock
);
494 static void vdpasim_set_status(struct vdpa_device
*vdpa
, u8 status
)
496 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
498 spin_lock(&vdpasim
->lock
);
499 vdpasim
->status
= status
;
501 vdpasim_reset(vdpasim
);
502 spin_unlock(&vdpasim
->lock
);
505 static void vdpasim_get_config(struct vdpa_device
*vdpa
, unsigned int offset
,
506 void *buf
, unsigned int len
)
508 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
510 if (offset
+ len
< sizeof(struct virtio_net_config
))
511 memcpy(buf
, &vdpasim
->config
+ offset
, len
);
514 static void vdpasim_set_config(struct vdpa_device
*vdpa
, unsigned int offset
,
515 const void *buf
, unsigned int len
)
517 /* No writable config supportted by vdpasim */
520 static u32
vdpasim_get_generation(struct vdpa_device
*vdpa
)
522 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
524 return vdpasim
->generation
;
527 static int vdpasim_set_map(struct vdpa_device
*vdpa
,
528 struct vhost_iotlb
*iotlb
)
530 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
531 struct vhost_iotlb_map
*map
;
532 u64 start
= 0ULL, last
= 0ULL - 1;
535 vhost_iotlb_reset(vdpasim
->iommu
);
537 for (map
= vhost_iotlb_itree_first(iotlb
, start
, last
); map
;
538 map
= vhost_iotlb_itree_next(map
, start
, last
)) {
539 ret
= vhost_iotlb_add_range(vdpasim
->iommu
, map
->start
,
540 map
->last
, map
->addr
, map
->perm
);
547 vhost_iotlb_reset(vdpasim
->iommu
);
551 static int vdpasim_dma_map(struct vdpa_device
*vdpa
, u64 iova
, u64 size
,
554 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
556 return vhost_iotlb_add_range(vdpasim
->iommu
, iova
,
557 iova
+ size
- 1, pa
, perm
);
560 static int vdpasim_dma_unmap(struct vdpa_device
*vdpa
, u64 iova
, u64 size
)
562 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
564 vhost_iotlb_del_range(vdpasim
->iommu
, iova
, iova
+ size
- 1);
569 static void vdpasim_free(struct vdpa_device
*vdpa
)
571 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
573 cancel_work_sync(&vdpasim
->work
);
574 kfree(vdpasim
->buffer
);
576 vhost_iotlb_free(vdpasim
->iommu
);
579 static const struct vdpa_config_ops vdpasim_net_config_ops
= {
580 .set_vq_address
= vdpasim_set_vq_address
,
581 .set_vq_num
= vdpasim_set_vq_num
,
582 .kick_vq
= vdpasim_kick_vq
,
583 .set_vq_cb
= vdpasim_set_vq_cb
,
584 .set_vq_ready
= vdpasim_set_vq_ready
,
585 .get_vq_ready
= vdpasim_get_vq_ready
,
586 .set_vq_state
= vdpasim_set_vq_state
,
587 .get_vq_state
= vdpasim_get_vq_state
,
588 .get_vq_align
= vdpasim_get_vq_align
,
589 .get_features
= vdpasim_get_features
,
590 .set_features
= vdpasim_set_features
,
591 .set_config_cb
= vdpasim_set_config_cb
,
592 .get_vq_num_max
= vdpasim_get_vq_num_max
,
593 .get_device_id
= vdpasim_get_device_id
,
594 .get_vendor_id
= vdpasim_get_vendor_id
,
595 .get_status
= vdpasim_get_status
,
596 .set_status
= vdpasim_set_status
,
597 .get_config
= vdpasim_get_config
,
598 .set_config
= vdpasim_set_config
,
599 .get_generation
= vdpasim_get_generation
,
600 .set_map
= vdpasim_set_map
,
601 .dma_map
= vdpasim_dma_map
,
602 .dma_unmap
= vdpasim_dma_unmap
,
603 .free
= vdpasim_free
,
606 static int __init
vdpasim_dev_init(void)
608 vdpasim_dev
= vdpasim_create();
610 if (!IS_ERR(vdpasim_dev
))
613 return PTR_ERR(vdpasim_dev
);
616 static void __exit
vdpasim_dev_exit(void)
618 struct vdpa_device
*vdpa
= &vdpasim_dev
->vdpa
;
620 vdpa_unregister_device(vdpa
);
623 module_init(vdpasim_dev_init
)
624 module_exit(vdpasim_dev_exit
)
626 MODULE_VERSION(DRV_VERSION
);
627 MODULE_LICENSE(DRV_LICENSE
);
628 MODULE_AUTHOR(DRV_AUTHOR
);
629 MODULE_DESCRIPTION(DRV_DESC
);