]>
Commit | Line | Data |
---|---|---|
2c53d0f6 JW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * VDPA networking device simulator. | |
4 | * | |
5 | * Copyright (c) 2020, Red Hat Inc. All rights reserved. | |
6 | * Author: Jason Wang <jasowang@redhat.com> | |
7 | * | |
8 | */ | |
9 | ||
10 | #include <linux/init.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/poll.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/wait.h> | |
19 | #include <linux/uuid.h> | |
20 | #include <linux/iommu.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/sysfs.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/etherdevice.h> | |
25 | #include <linux/vringh.h> | |
26 | #include <linux/vdpa.h> | |
27 | #include <linux/vhost_iotlb.h> | |
28 | #include <uapi/linux/virtio_config.h> | |
29 | #include <uapi/linux/virtio_net.h> | |
30 | ||
31 | #define DRV_VERSION "0.1" | |
32 | #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" | |
33 | #define DRV_DESC "vDPA Device Simulator" | |
34 | #define DRV_LICENSE "GPL v2" | |
35 | ||
36 | struct vdpasim_virtqueue { | |
37 | struct vringh vring; | |
38 | struct vringh_kiov iov; | |
39 | unsigned short head; | |
40 | bool ready; | |
41 | u64 desc_addr; | |
42 | u64 device_addr; | |
43 | u64 driver_addr; | |
44 | u32 num; | |
45 | void *private; | |
46 | irqreturn_t (*cb)(void *data); | |
47 | }; | |
48 | ||
49 | #define VDPASIM_QUEUE_ALIGN PAGE_SIZE | |
50 | #define VDPASIM_QUEUE_MAX 256 | |
51 | #define VDPASIM_DEVICE_ID 0x1 | |
52 | #define VDPASIM_VENDOR_ID 0 | |
53 | #define VDPASIM_VQ_NUM 0x2 | |
54 | #define VDPASIM_NAME "vdpasim-netdev" | |
55 | ||
56 | static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | | |
57 | (1ULL << VIRTIO_F_VERSION_1) | | |
58 | (1ULL << VIRTIO_F_IOMMU_PLATFORM); | |
59 | ||
60 | /* State of each vdpasim device */ | |
61 | struct vdpasim { | |
62 | struct vdpa_device vdpa; | |
63 | struct vdpasim_virtqueue vqs[2]; | |
64 | struct work_struct work; | |
65 | /* spinlock to synchronize virtqueue state */ | |
66 | spinlock_t lock; | |
67 | struct virtio_net_config config; | |
68 | struct vhost_iotlb *iommu; | |
69 | void *buffer; | |
70 | u32 status; | |
71 | u32 generation; | |
72 | u64 features; | |
73 | }; | |
74 | ||
75 | static struct vdpasim *vdpasim_dev; | |
76 | ||
77 | static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) | |
78 | { | |
79 | return container_of(vdpa, struct vdpasim, vdpa); | |
80 | } | |
81 | ||
82 | static struct vdpasim *dev_to_sim(struct device *dev) | |
83 | { | |
84 | struct vdpa_device *vdpa = dev_to_vdpa(dev); | |
85 | ||
86 | return vdpa_to_sim(vdpa); | |
87 | } | |
88 | ||
89 | static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) | |
90 | { | |
91 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
2c53d0f6 | 92 | |
0f8e3823 Y |
93 | vringh_init_iotlb(&vq->vring, vdpasim_features, |
94 | VDPASIM_QUEUE_MAX, false, | |
95 | (struct vring_desc *)(uintptr_t)vq->desc_addr, | |
96 | (struct vring_avail *) | |
97 | (uintptr_t)vq->driver_addr, | |
98 | (struct vring_used *) | |
99 | (uintptr_t)vq->device_addr); | |
2c53d0f6 JW |
100 | } |
101 | ||
102 | static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) | |
103 | { | |
104 | vq->ready = 0; | |
105 | vq->desc_addr = 0; | |
106 | vq->driver_addr = 0; | |
107 | vq->device_addr = 0; | |
108 | vq->cb = NULL; | |
109 | vq->private = NULL; | |
110 | vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX, | |
111 | false, NULL, NULL, NULL); | |
112 | } | |
113 | ||
114 | static void vdpasim_reset(struct vdpasim *vdpasim) | |
115 | { | |
116 | int i; | |
117 | ||
118 | for (i = 0; i < VDPASIM_VQ_NUM; i++) | |
119 | vdpasim_vq_reset(&vdpasim->vqs[i]); | |
120 | ||
121 | vhost_iotlb_reset(vdpasim->iommu); | |
122 | ||
123 | vdpasim->features = 0; | |
124 | vdpasim->status = 0; | |
125 | ++vdpasim->generation; | |
126 | } | |
127 | ||
128 | static void vdpasim_work(struct work_struct *work) | |
129 | { | |
130 | struct vdpasim *vdpasim = container_of(work, struct | |
131 | vdpasim, work); | |
132 | struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; | |
133 | struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; | |
134 | size_t read, write, total_write; | |
135 | int err; | |
136 | int pkts = 0; | |
137 | ||
138 | spin_lock(&vdpasim->lock); | |
139 | ||
140 | if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) | |
141 | goto out; | |
142 | ||
143 | if (!txq->ready || !rxq->ready) | |
144 | goto out; | |
145 | ||
146 | while (true) { | |
147 | total_write = 0; | |
148 | err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL, | |
149 | &txq->head, GFP_ATOMIC); | |
150 | if (err <= 0) | |
151 | break; | |
152 | ||
153 | err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov, | |
154 | &rxq->head, GFP_ATOMIC); | |
155 | if (err <= 0) { | |
156 | vringh_complete_iotlb(&txq->vring, txq->head, 0); | |
157 | break; | |
158 | } | |
159 | ||
160 | while (true) { | |
161 | read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov, | |
162 | vdpasim->buffer, | |
163 | PAGE_SIZE); | |
164 | if (read <= 0) | |
165 | break; | |
166 | ||
167 | write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov, | |
168 | vdpasim->buffer, read); | |
169 | if (write <= 0) | |
170 | break; | |
171 | ||
172 | total_write += write; | |
173 | } | |
174 | ||
175 | /* Make sure data is wrote before advancing index */ | |
176 | smp_wmb(); | |
177 | ||
178 | vringh_complete_iotlb(&txq->vring, txq->head, 0); | |
179 | vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); | |
180 | ||
181 | /* Make sure used is visible before rasing the interrupt. */ | |
182 | smp_wmb(); | |
183 | ||
184 | local_bh_disable(); | |
185 | if (txq->cb) | |
186 | txq->cb(txq->private); | |
187 | if (rxq->cb) | |
188 | rxq->cb(rxq->private); | |
189 | local_bh_enable(); | |
190 | ||
191 | if (++pkts > 4) { | |
192 | schedule_work(&vdpasim->work); | |
193 | goto out; | |
194 | } | |
195 | } | |
196 | ||
197 | out: | |
198 | spin_unlock(&vdpasim->lock); | |
199 | } | |
200 | ||
201 | static int dir_to_perm(enum dma_data_direction dir) | |
202 | { | |
203 | int perm = -EFAULT; | |
204 | ||
205 | switch (dir) { | |
206 | case DMA_FROM_DEVICE: | |
207 | perm = VHOST_MAP_WO; | |
208 | break; | |
209 | case DMA_TO_DEVICE: | |
210 | perm = VHOST_MAP_RO; | |
211 | break; | |
212 | case DMA_BIDIRECTIONAL: | |
213 | perm = VHOST_MAP_RW; | |
214 | break; | |
215 | default: | |
216 | break; | |
217 | } | |
218 | ||
219 | return perm; | |
220 | } | |
221 | ||
222 | static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, | |
223 | unsigned long offset, size_t size, | |
224 | enum dma_data_direction dir, | |
225 | unsigned long attrs) | |
226 | { | |
227 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
228 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
229 | u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; | |
230 | int ret, perm = dir_to_perm(dir); | |
231 | ||
232 | if (perm < 0) | |
233 | return DMA_MAPPING_ERROR; | |
234 | ||
235 | /* For simplicity, use identical mapping to avoid e.g iova | |
236 | * allocator. | |
237 | */ | |
238 | ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, | |
239 | pa, dir_to_perm(dir)); | |
240 | if (ret) | |
241 | return DMA_MAPPING_ERROR; | |
242 | ||
243 | return (dma_addr_t)(pa); | |
244 | } | |
245 | ||
246 | static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, | |
247 | size_t size, enum dma_data_direction dir, | |
248 | unsigned long attrs) | |
249 | { | |
250 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
251 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
252 | ||
253 | vhost_iotlb_del_range(iommu, (u64)dma_addr, | |
254 | (u64)dma_addr + size - 1); | |
255 | } | |
256 | ||
257 | static void *vdpasim_alloc_coherent(struct device *dev, size_t size, | |
258 | dma_addr_t *dma_addr, gfp_t flag, | |
259 | unsigned long attrs) | |
260 | { | |
261 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
262 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
263 | void *addr = kmalloc(size, flag); | |
264 | int ret; | |
265 | ||
266 | if (!addr) | |
267 | *dma_addr = DMA_MAPPING_ERROR; | |
268 | else { | |
269 | u64 pa = virt_to_phys(addr); | |
270 | ||
271 | ret = vhost_iotlb_add_range(iommu, (u64)pa, | |
272 | (u64)pa + size - 1, | |
273 | pa, VHOST_MAP_RW); | |
274 | if (ret) { | |
275 | *dma_addr = DMA_MAPPING_ERROR; | |
276 | kfree(addr); | |
277 | addr = NULL; | |
278 | } else | |
279 | *dma_addr = (dma_addr_t)pa; | |
280 | } | |
281 | ||
282 | return addr; | |
283 | } | |
284 | ||
285 | static void vdpasim_free_coherent(struct device *dev, size_t size, | |
286 | void *vaddr, dma_addr_t dma_addr, | |
287 | unsigned long attrs) | |
288 | { | |
289 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
290 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
291 | ||
292 | vhost_iotlb_del_range(iommu, (u64)dma_addr, | |
293 | (u64)dma_addr + size - 1); | |
294 | kfree(phys_to_virt((uintptr_t)dma_addr)); | |
295 | } | |
296 | ||
297 | static const struct dma_map_ops vdpasim_dma_ops = { | |
298 | .map_page = vdpasim_map_page, | |
299 | .unmap_page = vdpasim_unmap_page, | |
300 | .alloc = vdpasim_alloc_coherent, | |
301 | .free = vdpasim_free_coherent, | |
302 | }; | |
303 | ||
304 | static const struct vdpa_config_ops vdpasim_net_config_ops; | |
305 | ||
306 | static struct vdpasim *vdpasim_create(void) | |
307 | { | |
308 | struct virtio_net_config *config; | |
309 | struct vdpasim *vdpasim; | |
310 | struct device *dev; | |
311 | int ret = -ENOMEM; | |
312 | ||
313 | vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, | |
314 | &vdpasim_net_config_ops); | |
315 | if (!vdpasim) | |
316 | goto err_alloc; | |
317 | ||
318 | INIT_WORK(&vdpasim->work, vdpasim_work); | |
319 | spin_lock_init(&vdpasim->lock); | |
320 | ||
321 | dev = &vdpasim->vdpa.dev; | |
322 | dev->coherent_dma_mask = DMA_BIT_MASK(64); | |
323 | set_dma_ops(dev, &vdpasim_dma_ops); | |
324 | ||
325 | vdpasim->iommu = vhost_iotlb_alloc(2048, 0); | |
326 | if (!vdpasim->iommu) | |
327 | goto err_iommu; | |
328 | ||
329 | vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
330 | if (!vdpasim->buffer) | |
331 | goto err_iommu; | |
332 | ||
333 | config = &vdpasim->config; | |
334 | config->mtu = 1500; | |
335 | config->status = VIRTIO_NET_S_LINK_UP; | |
336 | eth_random_addr(config->mac); | |
337 | ||
338 | vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); | |
339 | vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); | |
340 | ||
341 | vdpasim->vdpa.dma_dev = dev; | |
342 | ret = vdpa_register_device(&vdpasim->vdpa); | |
343 | if (ret) | |
344 | goto err_iommu; | |
345 | ||
346 | return vdpasim; | |
347 | ||
348 | err_iommu: | |
349 | put_device(dev); | |
350 | err_alloc: | |
351 | return ERR_PTR(ret); | |
352 | } | |
353 | ||
354 | static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, | |
355 | u64 desc_area, u64 driver_area, | |
356 | u64 device_area) | |
357 | { | |
358 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
359 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
360 | ||
361 | vq->desc_addr = desc_area; | |
362 | vq->driver_addr = driver_area; | |
363 | vq->device_addr = device_area; | |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
368 | static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) | |
369 | { | |
370 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
371 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
372 | ||
373 | vq->num = num; | |
374 | } | |
375 | ||
376 | static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) | |
377 | { | |
378 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
379 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
380 | ||
381 | if (vq->ready) | |
382 | schedule_work(&vdpasim->work); | |
383 | } | |
384 | ||
385 | static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, | |
386 | struct vdpa_callback *cb) | |
387 | { | |
388 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
389 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
390 | ||
391 | vq->cb = cb->callback; | |
392 | vq->private = cb->private; | |
393 | } | |
394 | ||
395 | static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) | |
396 | { | |
397 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
398 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
399 | ||
400 | spin_lock(&vdpasim->lock); | |
401 | vq->ready = ready; | |
402 | if (vq->ready) | |
403 | vdpasim_queue_ready(vdpasim, idx); | |
404 | spin_unlock(&vdpasim->lock); | |
405 | } | |
406 | ||
407 | static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) | |
408 | { | |
409 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
410 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
411 | ||
412 | return vq->ready; | |
413 | } | |
414 | ||
415 | static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) | |
416 | { | |
417 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
418 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
419 | struct vringh *vrh = &vq->vring; | |
420 | ||
421 | spin_lock(&vdpasim->lock); | |
422 | vrh->last_avail_idx = state; | |
423 | spin_unlock(&vdpasim->lock); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
428 | static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) | |
429 | { | |
430 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
431 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
432 | struct vringh *vrh = &vq->vring; | |
433 | ||
434 | return vrh->last_avail_idx; | |
435 | } | |
436 | ||
425a5070 | 437 | static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) |
2c53d0f6 JW |
438 | { |
439 | return VDPASIM_QUEUE_ALIGN; | |
440 | } | |
441 | ||
442 | static u64 vdpasim_get_features(struct vdpa_device *vdpa) | |
443 | { | |
444 | return vdpasim_features; | |
445 | } | |
446 | ||
447 | static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) | |
448 | { | |
449 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
450 | ||
451 | /* DMA mapping must be done by driver */ | |
452 | if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) | |
453 | return -EINVAL; | |
454 | ||
455 | vdpasim->features = features & vdpasim_features; | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
460 | static void vdpasim_set_config_cb(struct vdpa_device *vdpa, | |
461 | struct vdpa_callback *cb) | |
462 | { | |
463 | /* We don't support config interrupt */ | |
464 | } | |
465 | ||
466 | static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) | |
467 | { | |
468 | return VDPASIM_QUEUE_MAX; | |
469 | } | |
470 | ||
471 | static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) | |
472 | { | |
473 | return VDPASIM_DEVICE_ID; | |
474 | } | |
475 | ||
476 | static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) | |
477 | { | |
478 | return VDPASIM_VENDOR_ID; | |
479 | } | |
480 | ||
481 | static u8 vdpasim_get_status(struct vdpa_device *vdpa) | |
482 | { | |
483 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
484 | u8 status; | |
485 | ||
486 | spin_lock(&vdpasim->lock); | |
487 | status = vdpasim->status; | |
488 | spin_unlock(&vdpasim->lock); | |
489 | ||
21818ed0 | 490 | return status; |
2c53d0f6 JW |
491 | } |
492 | ||
493 | static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) | |
494 | { | |
495 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
496 | ||
497 | spin_lock(&vdpasim->lock); | |
498 | vdpasim->status = status; | |
499 | if (status == 0) | |
500 | vdpasim_reset(vdpasim); | |
501 | spin_unlock(&vdpasim->lock); | |
502 | } | |
503 | ||
504 | static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, | |
505 | void *buf, unsigned int len) | |
506 | { | |
507 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
508 | ||
509 | if (offset + len < sizeof(struct virtio_net_config)) | |
510 | memcpy(buf, &vdpasim->config + offset, len); | |
511 | } | |
512 | ||
513 | static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, | |
514 | const void *buf, unsigned int len) | |
515 | { | |
516 | /* No writable config supportted by vdpasim */ | |
517 | } | |
518 | ||
519 | static u32 vdpasim_get_generation(struct vdpa_device *vdpa) | |
520 | { | |
521 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
522 | ||
523 | return vdpasim->generation; | |
524 | } | |
525 | ||
526 | static int vdpasim_set_map(struct vdpa_device *vdpa, | |
527 | struct vhost_iotlb *iotlb) | |
528 | { | |
529 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
530 | struct vhost_iotlb_map *map; | |
531 | u64 start = 0ULL, last = 0ULL - 1; | |
532 | int ret; | |
533 | ||
534 | vhost_iotlb_reset(vdpasim->iommu); | |
535 | ||
536 | for (map = vhost_iotlb_itree_first(iotlb, start, last); map; | |
537 | map = vhost_iotlb_itree_next(map, start, last)) { | |
538 | ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, | |
539 | map->last, map->addr, map->perm); | |
540 | if (ret) | |
541 | goto err; | |
542 | } | |
543 | return 0; | |
544 | ||
545 | err: | |
546 | vhost_iotlb_reset(vdpasim->iommu); | |
547 | return ret; | |
548 | } | |
549 | ||
550 | static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, | |
551 | u64 pa, u32 perm) | |
552 | { | |
553 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
554 | ||
555 | return vhost_iotlb_add_range(vdpasim->iommu, iova, | |
556 | iova + size - 1, pa, perm); | |
557 | } | |
558 | ||
559 | static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) | |
560 | { | |
561 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
562 | ||
563 | vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
568 | static void vdpasim_free(struct vdpa_device *vdpa) | |
569 | { | |
570 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
571 | ||
572 | cancel_work_sync(&vdpasim->work); | |
573 | kfree(vdpasim->buffer); | |
574 | if (vdpasim->iommu) | |
575 | vhost_iotlb_free(vdpasim->iommu); | |
576 | } | |
577 | ||
578 | static const struct vdpa_config_ops vdpasim_net_config_ops = { | |
579 | .set_vq_address = vdpasim_set_vq_address, | |
580 | .set_vq_num = vdpasim_set_vq_num, | |
581 | .kick_vq = vdpasim_kick_vq, | |
582 | .set_vq_cb = vdpasim_set_vq_cb, | |
583 | .set_vq_ready = vdpasim_set_vq_ready, | |
584 | .get_vq_ready = vdpasim_get_vq_ready, | |
585 | .set_vq_state = vdpasim_set_vq_state, | |
586 | .get_vq_state = vdpasim_get_vq_state, | |
587 | .get_vq_align = vdpasim_get_vq_align, | |
588 | .get_features = vdpasim_get_features, | |
589 | .set_features = vdpasim_set_features, | |
590 | .set_config_cb = vdpasim_set_config_cb, | |
591 | .get_vq_num_max = vdpasim_get_vq_num_max, | |
592 | .get_device_id = vdpasim_get_device_id, | |
593 | .get_vendor_id = vdpasim_get_vendor_id, | |
594 | .get_status = vdpasim_get_status, | |
595 | .set_status = vdpasim_set_status, | |
596 | .get_config = vdpasim_get_config, | |
597 | .set_config = vdpasim_set_config, | |
598 | .get_generation = vdpasim_get_generation, | |
599 | .set_map = vdpasim_set_map, | |
600 | .dma_map = vdpasim_dma_map, | |
601 | .dma_unmap = vdpasim_dma_unmap, | |
602 | .free = vdpasim_free, | |
603 | }; | |
604 | ||
605 | static int __init vdpasim_dev_init(void) | |
606 | { | |
607 | vdpasim_dev = vdpasim_create(); | |
608 | ||
609 | if (!IS_ERR(vdpasim_dev)) | |
610 | return 0; | |
611 | ||
612 | return PTR_ERR(vdpasim_dev); | |
613 | } | |
614 | ||
615 | static void __exit vdpasim_dev_exit(void) | |
616 | { | |
617 | struct vdpa_device *vdpa = &vdpasim_dev->vdpa; | |
618 | ||
619 | vdpa_unregister_device(vdpa); | |
620 | } | |
621 | ||
622 | module_init(vdpasim_dev_init) | |
623 | module_exit(vdpasim_dev_exit) | |
624 | ||
625 | MODULE_VERSION(DRV_VERSION); | |
626 | MODULE_LICENSE(DRV_LICENSE); | |
627 | MODULE_AUTHOR(DRV_AUTHOR); | |
628 | MODULE_DESCRIPTION(DRV_DESC); |