]>
Commit | Line | Data |
---|---|---|
2c53d0f6 JW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * VDPA networking device simulator. | |
4 | * | |
5 | * Copyright (c) 2020, Red Hat Inc. All rights reserved. | |
6 | * Author: Jason Wang <jasowang@redhat.com> | |
7 | * | |
8 | */ | |
9 | ||
10 | #include <linux/init.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/poll.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/wait.h> | |
19 | #include <linux/uuid.h> | |
20 | #include <linux/iommu.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/sysfs.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/etherdevice.h> | |
25 | #include <linux/vringh.h> | |
26 | #include <linux/vdpa.h> | |
27 | #include <linux/vhost_iotlb.h> | |
28 | #include <uapi/linux/virtio_config.h> | |
29 | #include <uapi/linux/virtio_net.h> | |
30 | ||
31 | #define DRV_VERSION "0.1" | |
32 | #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" | |
33 | #define DRV_DESC "vDPA Device Simulator" | |
34 | #define DRV_LICENSE "GPL v2" | |
35 | ||
36 | struct vdpasim_virtqueue { | |
37 | struct vringh vring; | |
38 | struct vringh_kiov iov; | |
39 | unsigned short head; | |
40 | bool ready; | |
41 | u64 desc_addr; | |
42 | u64 device_addr; | |
43 | u64 driver_addr; | |
44 | u32 num; | |
45 | void *private; | |
46 | irqreturn_t (*cb)(void *data); | |
47 | }; | |
48 | ||
49 | #define VDPASIM_QUEUE_ALIGN PAGE_SIZE | |
50 | #define VDPASIM_QUEUE_MAX 256 | |
51 | #define VDPASIM_DEVICE_ID 0x1 | |
52 | #define VDPASIM_VENDOR_ID 0 | |
53 | #define VDPASIM_VQ_NUM 0x2 | |
54 | #define VDPASIM_NAME "vdpasim-netdev" | |
55 | ||
56 | static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | | |
57 | (1ULL << VIRTIO_F_VERSION_1) | | |
58 | (1ULL << VIRTIO_F_IOMMU_PLATFORM); | |
59 | ||
60 | /* State of each vdpasim device */ | |
61 | struct vdpasim { | |
62 | struct vdpa_device vdpa; | |
63 | struct vdpasim_virtqueue vqs[2]; | |
64 | struct work_struct work; | |
65 | /* spinlock to synchronize virtqueue state */ | |
66 | spinlock_t lock; | |
67 | struct virtio_net_config config; | |
68 | struct vhost_iotlb *iommu; | |
69 | void *buffer; | |
70 | u32 status; | |
71 | u32 generation; | |
72 | u64 features; | |
73 | }; | |
74 | ||
75 | static struct vdpasim *vdpasim_dev; | |
76 | ||
77 | static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) | |
78 | { | |
79 | return container_of(vdpa, struct vdpasim, vdpa); | |
80 | } | |
81 | ||
82 | static struct vdpasim *dev_to_sim(struct device *dev) | |
83 | { | |
84 | struct vdpa_device *vdpa = dev_to_vdpa(dev); | |
85 | ||
86 | return vdpa_to_sim(vdpa); | |
87 | } | |
88 | ||
89 | static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) | |
90 | { | |
91 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
92 | int ret; | |
93 | ||
94 | ret = vringh_init_iotlb(&vq->vring, vdpasim_features, | |
95 | VDPASIM_QUEUE_MAX, false, | |
96 | (struct vring_desc *)(uintptr_t)vq->desc_addr, | |
97 | (struct vring_avail *) | |
98 | (uintptr_t)vq->driver_addr, | |
99 | (struct vring_used *) | |
100 | (uintptr_t)vq->device_addr); | |
101 | } | |
102 | ||
103 | static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) | |
104 | { | |
105 | vq->ready = 0; | |
106 | vq->desc_addr = 0; | |
107 | vq->driver_addr = 0; | |
108 | vq->device_addr = 0; | |
109 | vq->cb = NULL; | |
110 | vq->private = NULL; | |
111 | vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX, | |
112 | false, NULL, NULL, NULL); | |
113 | } | |
114 | ||
115 | static void vdpasim_reset(struct vdpasim *vdpasim) | |
116 | { | |
117 | int i; | |
118 | ||
119 | for (i = 0; i < VDPASIM_VQ_NUM; i++) | |
120 | vdpasim_vq_reset(&vdpasim->vqs[i]); | |
121 | ||
122 | vhost_iotlb_reset(vdpasim->iommu); | |
123 | ||
124 | vdpasim->features = 0; | |
125 | vdpasim->status = 0; | |
126 | ++vdpasim->generation; | |
127 | } | |
128 | ||
129 | static void vdpasim_work(struct work_struct *work) | |
130 | { | |
131 | struct vdpasim *vdpasim = container_of(work, struct | |
132 | vdpasim, work); | |
133 | struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; | |
134 | struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; | |
135 | size_t read, write, total_write; | |
136 | int err; | |
137 | int pkts = 0; | |
138 | ||
139 | spin_lock(&vdpasim->lock); | |
140 | ||
141 | if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) | |
142 | goto out; | |
143 | ||
144 | if (!txq->ready || !rxq->ready) | |
145 | goto out; | |
146 | ||
147 | while (true) { | |
148 | total_write = 0; | |
149 | err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL, | |
150 | &txq->head, GFP_ATOMIC); | |
151 | if (err <= 0) | |
152 | break; | |
153 | ||
154 | err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov, | |
155 | &rxq->head, GFP_ATOMIC); | |
156 | if (err <= 0) { | |
157 | vringh_complete_iotlb(&txq->vring, txq->head, 0); | |
158 | break; | |
159 | } | |
160 | ||
161 | while (true) { | |
162 | read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov, | |
163 | vdpasim->buffer, | |
164 | PAGE_SIZE); | |
165 | if (read <= 0) | |
166 | break; | |
167 | ||
168 | write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov, | |
169 | vdpasim->buffer, read); | |
170 | if (write <= 0) | |
171 | break; | |
172 | ||
173 | total_write += write; | |
174 | } | |
175 | ||
176 | /* Make sure data is wrote before advancing index */ | |
177 | smp_wmb(); | |
178 | ||
179 | vringh_complete_iotlb(&txq->vring, txq->head, 0); | |
180 | vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); | |
181 | ||
182 | /* Make sure used is visible before rasing the interrupt. */ | |
183 | smp_wmb(); | |
184 | ||
185 | local_bh_disable(); | |
186 | if (txq->cb) | |
187 | txq->cb(txq->private); | |
188 | if (rxq->cb) | |
189 | rxq->cb(rxq->private); | |
190 | local_bh_enable(); | |
191 | ||
192 | if (++pkts > 4) { | |
193 | schedule_work(&vdpasim->work); | |
194 | goto out; | |
195 | } | |
196 | } | |
197 | ||
198 | out: | |
199 | spin_unlock(&vdpasim->lock); | |
200 | } | |
201 | ||
202 | static int dir_to_perm(enum dma_data_direction dir) | |
203 | { | |
204 | int perm = -EFAULT; | |
205 | ||
206 | switch (dir) { | |
207 | case DMA_FROM_DEVICE: | |
208 | perm = VHOST_MAP_WO; | |
209 | break; | |
210 | case DMA_TO_DEVICE: | |
211 | perm = VHOST_MAP_RO; | |
212 | break; | |
213 | case DMA_BIDIRECTIONAL: | |
214 | perm = VHOST_MAP_RW; | |
215 | break; | |
216 | default: | |
217 | break; | |
218 | } | |
219 | ||
220 | return perm; | |
221 | } | |
222 | ||
223 | static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, | |
224 | unsigned long offset, size_t size, | |
225 | enum dma_data_direction dir, | |
226 | unsigned long attrs) | |
227 | { | |
228 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
229 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
230 | u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; | |
231 | int ret, perm = dir_to_perm(dir); | |
232 | ||
233 | if (perm < 0) | |
234 | return DMA_MAPPING_ERROR; | |
235 | ||
236 | /* For simplicity, use identical mapping to avoid e.g iova | |
237 | * allocator. | |
238 | */ | |
239 | ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, | |
240 | pa, dir_to_perm(dir)); | |
241 | if (ret) | |
242 | return DMA_MAPPING_ERROR; | |
243 | ||
244 | return (dma_addr_t)(pa); | |
245 | } | |
246 | ||
247 | static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, | |
248 | size_t size, enum dma_data_direction dir, | |
249 | unsigned long attrs) | |
250 | { | |
251 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
252 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
253 | ||
254 | vhost_iotlb_del_range(iommu, (u64)dma_addr, | |
255 | (u64)dma_addr + size - 1); | |
256 | } | |
257 | ||
258 | static void *vdpasim_alloc_coherent(struct device *dev, size_t size, | |
259 | dma_addr_t *dma_addr, gfp_t flag, | |
260 | unsigned long attrs) | |
261 | { | |
262 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
263 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
264 | void *addr = kmalloc(size, flag); | |
265 | int ret; | |
266 | ||
267 | if (!addr) | |
268 | *dma_addr = DMA_MAPPING_ERROR; | |
269 | else { | |
270 | u64 pa = virt_to_phys(addr); | |
271 | ||
272 | ret = vhost_iotlb_add_range(iommu, (u64)pa, | |
273 | (u64)pa + size - 1, | |
274 | pa, VHOST_MAP_RW); | |
275 | if (ret) { | |
276 | *dma_addr = DMA_MAPPING_ERROR; | |
277 | kfree(addr); | |
278 | addr = NULL; | |
279 | } else | |
280 | *dma_addr = (dma_addr_t)pa; | |
281 | } | |
282 | ||
283 | return addr; | |
284 | } | |
285 | ||
286 | static void vdpasim_free_coherent(struct device *dev, size_t size, | |
287 | void *vaddr, dma_addr_t dma_addr, | |
288 | unsigned long attrs) | |
289 | { | |
290 | struct vdpasim *vdpasim = dev_to_sim(dev); | |
291 | struct vhost_iotlb *iommu = vdpasim->iommu; | |
292 | ||
293 | vhost_iotlb_del_range(iommu, (u64)dma_addr, | |
294 | (u64)dma_addr + size - 1); | |
295 | kfree(phys_to_virt((uintptr_t)dma_addr)); | |
296 | } | |
297 | ||
298 | static const struct dma_map_ops vdpasim_dma_ops = { | |
299 | .map_page = vdpasim_map_page, | |
300 | .unmap_page = vdpasim_unmap_page, | |
301 | .alloc = vdpasim_alloc_coherent, | |
302 | .free = vdpasim_free_coherent, | |
303 | }; | |
304 | ||
305 | static const struct vdpa_config_ops vdpasim_net_config_ops; | |
306 | ||
307 | static struct vdpasim *vdpasim_create(void) | |
308 | { | |
309 | struct virtio_net_config *config; | |
310 | struct vdpasim *vdpasim; | |
311 | struct device *dev; | |
312 | int ret = -ENOMEM; | |
313 | ||
314 | vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, | |
315 | &vdpasim_net_config_ops); | |
316 | if (!vdpasim) | |
317 | goto err_alloc; | |
318 | ||
319 | INIT_WORK(&vdpasim->work, vdpasim_work); | |
320 | spin_lock_init(&vdpasim->lock); | |
321 | ||
322 | dev = &vdpasim->vdpa.dev; | |
323 | dev->coherent_dma_mask = DMA_BIT_MASK(64); | |
324 | set_dma_ops(dev, &vdpasim_dma_ops); | |
325 | ||
326 | vdpasim->iommu = vhost_iotlb_alloc(2048, 0); | |
327 | if (!vdpasim->iommu) | |
328 | goto err_iommu; | |
329 | ||
330 | vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
331 | if (!vdpasim->buffer) | |
332 | goto err_iommu; | |
333 | ||
334 | config = &vdpasim->config; | |
335 | config->mtu = 1500; | |
336 | config->status = VIRTIO_NET_S_LINK_UP; | |
337 | eth_random_addr(config->mac); | |
338 | ||
339 | vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); | |
340 | vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); | |
341 | ||
342 | vdpasim->vdpa.dma_dev = dev; | |
343 | ret = vdpa_register_device(&vdpasim->vdpa); | |
344 | if (ret) | |
345 | goto err_iommu; | |
346 | ||
347 | return vdpasim; | |
348 | ||
349 | err_iommu: | |
350 | put_device(dev); | |
351 | err_alloc: | |
352 | return ERR_PTR(ret); | |
353 | } | |
354 | ||
355 | static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, | |
356 | u64 desc_area, u64 driver_area, | |
357 | u64 device_area) | |
358 | { | |
359 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
360 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
361 | ||
362 | vq->desc_addr = desc_area; | |
363 | vq->driver_addr = driver_area; | |
364 | vq->device_addr = device_area; | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) | |
370 | { | |
371 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
372 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
373 | ||
374 | vq->num = num; | |
375 | } | |
376 | ||
377 | static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) | |
378 | { | |
379 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
380 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
381 | ||
382 | if (vq->ready) | |
383 | schedule_work(&vdpasim->work); | |
384 | } | |
385 | ||
386 | static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, | |
387 | struct vdpa_callback *cb) | |
388 | { | |
389 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
390 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
391 | ||
392 | vq->cb = cb->callback; | |
393 | vq->private = cb->private; | |
394 | } | |
395 | ||
396 | static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) | |
397 | { | |
398 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
399 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
400 | ||
401 | spin_lock(&vdpasim->lock); | |
402 | vq->ready = ready; | |
403 | if (vq->ready) | |
404 | vdpasim_queue_ready(vdpasim, idx); | |
405 | spin_unlock(&vdpasim->lock); | |
406 | } | |
407 | ||
408 | static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) | |
409 | { | |
410 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
411 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
412 | ||
413 | return vq->ready; | |
414 | } | |
415 | ||
416 | static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) | |
417 | { | |
418 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
419 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
420 | struct vringh *vrh = &vq->vring; | |
421 | ||
422 | spin_lock(&vdpasim->lock); | |
423 | vrh->last_avail_idx = state; | |
424 | spin_unlock(&vdpasim->lock); | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
429 | static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) | |
430 | { | |
431 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
432 | struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; | |
433 | struct vringh *vrh = &vq->vring; | |
434 | ||
435 | return vrh->last_avail_idx; | |
436 | } | |
437 | ||
425a5070 | 438 | static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) |
2c53d0f6 JW |
439 | { |
440 | return VDPASIM_QUEUE_ALIGN; | |
441 | } | |
442 | ||
443 | static u64 vdpasim_get_features(struct vdpa_device *vdpa) | |
444 | { | |
445 | return vdpasim_features; | |
446 | } | |
447 | ||
448 | static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) | |
449 | { | |
450 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
451 | ||
452 | /* DMA mapping must be done by driver */ | |
453 | if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) | |
454 | return -EINVAL; | |
455 | ||
456 | vdpasim->features = features & vdpasim_features; | |
457 | ||
458 | return 0; | |
459 | } | |
460 | ||
461 | static void vdpasim_set_config_cb(struct vdpa_device *vdpa, | |
462 | struct vdpa_callback *cb) | |
463 | { | |
464 | /* We don't support config interrupt */ | |
465 | } | |
466 | ||
467 | static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) | |
468 | { | |
469 | return VDPASIM_QUEUE_MAX; | |
470 | } | |
471 | ||
472 | static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) | |
473 | { | |
474 | return VDPASIM_DEVICE_ID; | |
475 | } | |
476 | ||
477 | static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) | |
478 | { | |
479 | return VDPASIM_VENDOR_ID; | |
480 | } | |
481 | ||
482 | static u8 vdpasim_get_status(struct vdpa_device *vdpa) | |
483 | { | |
484 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
485 | u8 status; | |
486 | ||
487 | spin_lock(&vdpasim->lock); | |
488 | status = vdpasim->status; | |
489 | spin_unlock(&vdpasim->lock); | |
490 | ||
21818ed0 | 491 | return status; |
2c53d0f6 JW |
492 | } |
493 | ||
494 | static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) | |
495 | { | |
496 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
497 | ||
498 | spin_lock(&vdpasim->lock); | |
499 | vdpasim->status = status; | |
500 | if (status == 0) | |
501 | vdpasim_reset(vdpasim); | |
502 | spin_unlock(&vdpasim->lock); | |
503 | } | |
504 | ||
505 | static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, | |
506 | void *buf, unsigned int len) | |
507 | { | |
508 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
509 | ||
510 | if (offset + len < sizeof(struct virtio_net_config)) | |
511 | memcpy(buf, &vdpasim->config + offset, len); | |
512 | } | |
513 | ||
514 | static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, | |
515 | const void *buf, unsigned int len) | |
516 | { | |
517 | /* No writable config supportted by vdpasim */ | |
518 | } | |
519 | ||
520 | static u32 vdpasim_get_generation(struct vdpa_device *vdpa) | |
521 | { | |
522 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
523 | ||
524 | return vdpasim->generation; | |
525 | } | |
526 | ||
527 | static int vdpasim_set_map(struct vdpa_device *vdpa, | |
528 | struct vhost_iotlb *iotlb) | |
529 | { | |
530 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
531 | struct vhost_iotlb_map *map; | |
532 | u64 start = 0ULL, last = 0ULL - 1; | |
533 | int ret; | |
534 | ||
535 | vhost_iotlb_reset(vdpasim->iommu); | |
536 | ||
537 | for (map = vhost_iotlb_itree_first(iotlb, start, last); map; | |
538 | map = vhost_iotlb_itree_next(map, start, last)) { | |
539 | ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, | |
540 | map->last, map->addr, map->perm); | |
541 | if (ret) | |
542 | goto err; | |
543 | } | |
544 | return 0; | |
545 | ||
546 | err: | |
547 | vhost_iotlb_reset(vdpasim->iommu); | |
548 | return ret; | |
549 | } | |
550 | ||
551 | static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, | |
552 | u64 pa, u32 perm) | |
553 | { | |
554 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
555 | ||
556 | return vhost_iotlb_add_range(vdpasim->iommu, iova, | |
557 | iova + size - 1, pa, perm); | |
558 | } | |
559 | ||
560 | static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) | |
561 | { | |
562 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
563 | ||
564 | vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); | |
565 | ||
566 | return 0; | |
567 | } | |
568 | ||
569 | static void vdpasim_free(struct vdpa_device *vdpa) | |
570 | { | |
571 | struct vdpasim *vdpasim = vdpa_to_sim(vdpa); | |
572 | ||
573 | cancel_work_sync(&vdpasim->work); | |
574 | kfree(vdpasim->buffer); | |
575 | if (vdpasim->iommu) | |
576 | vhost_iotlb_free(vdpasim->iommu); | |
577 | } | |
578 | ||
579 | static const struct vdpa_config_ops vdpasim_net_config_ops = { | |
580 | .set_vq_address = vdpasim_set_vq_address, | |
581 | .set_vq_num = vdpasim_set_vq_num, | |
582 | .kick_vq = vdpasim_kick_vq, | |
583 | .set_vq_cb = vdpasim_set_vq_cb, | |
584 | .set_vq_ready = vdpasim_set_vq_ready, | |
585 | .get_vq_ready = vdpasim_get_vq_ready, | |
586 | .set_vq_state = vdpasim_set_vq_state, | |
587 | .get_vq_state = vdpasim_get_vq_state, | |
588 | .get_vq_align = vdpasim_get_vq_align, | |
589 | .get_features = vdpasim_get_features, | |
590 | .set_features = vdpasim_set_features, | |
591 | .set_config_cb = vdpasim_set_config_cb, | |
592 | .get_vq_num_max = vdpasim_get_vq_num_max, | |
593 | .get_device_id = vdpasim_get_device_id, | |
594 | .get_vendor_id = vdpasim_get_vendor_id, | |
595 | .get_status = vdpasim_get_status, | |
596 | .set_status = vdpasim_set_status, | |
597 | .get_config = vdpasim_get_config, | |
598 | .set_config = vdpasim_set_config, | |
599 | .get_generation = vdpasim_get_generation, | |
600 | .set_map = vdpasim_set_map, | |
601 | .dma_map = vdpasim_dma_map, | |
602 | .dma_unmap = vdpasim_dma_unmap, | |
603 | .free = vdpasim_free, | |
604 | }; | |
605 | ||
606 | static int __init vdpasim_dev_init(void) | |
607 | { | |
608 | vdpasim_dev = vdpasim_create(); | |
609 | ||
610 | if (!IS_ERR(vdpasim_dev)) | |
611 | return 0; | |
612 | ||
613 | return PTR_ERR(vdpasim_dev); | |
614 | } | |
615 | ||
616 | static void __exit vdpasim_dev_exit(void) | |
617 | { | |
618 | struct vdpa_device *vdpa = &vdpasim_dev->vdpa; | |
619 | ||
620 | vdpa_unregister_device(vdpa); | |
621 | } | |
622 | ||
623 | module_init(vdpasim_dev_init) | |
624 | module_exit(vdpasim_dev_exit) | |
625 | ||
626 | MODULE_VERSION(DRV_VERSION); | |
627 | MODULE_LICENSE(DRV_LICENSE); | |
628 | MODULE_AUTHOR(DRV_AUTHOR); | |
629 | MODULE_DESCRIPTION(DRV_DESC); |