]>
Commit | Line | Data |
---|---|---|
6cca22ed | 1 | /* |
10be98a7 | 2 | * SPDX-License-Identifier: MIT |
6cca22ed | 3 | * |
10be98a7 | 4 | * Copyright © 2016 Intel Corporation |
6cca22ed CW |
5 | */ |
6 | ||
7 | #include "mock_dmabuf.h" | |
8 | ||
9 | static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, | |
10 | enum dma_data_direction dir) | |
11 | { | |
12 | struct mock_dmabuf *mock = to_mock(attachment->dmabuf); | |
13 | struct sg_table *st; | |
14 | struct scatterlist *sg; | |
15 | int i, err; | |
16 | ||
17 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
18 | if (!st) | |
19 | return ERR_PTR(-ENOMEM); | |
20 | ||
21 | err = sg_alloc_table(st, mock->npages, GFP_KERNEL); | |
22 | if (err) | |
23 | goto err_free; | |
24 | ||
25 | sg = st->sgl; | |
26 | for (i = 0; i < mock->npages; i++) { | |
27 | sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); | |
28 | sg = sg_next(sg); | |
29 | } | |
30 | ||
31 | if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { | |
32 | err = -ENOMEM; | |
33 | goto err_st; | |
34 | } | |
35 | ||
36 | return st; | |
37 | ||
38 | err_st: | |
39 | sg_free_table(st); | |
40 | err_free: | |
41 | kfree(st); | |
42 | return ERR_PTR(err); | |
43 | } | |
44 | ||
45 | static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
46 | struct sg_table *st, | |
47 | enum dma_data_direction dir) | |
48 | { | |
49 | dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); | |
50 | sg_free_table(st); | |
51 | kfree(st); | |
52 | } | |
53 | ||
54 | static void mock_dmabuf_release(struct dma_buf *dma_buf) | |
55 | { | |
56 | struct mock_dmabuf *mock = to_mock(dma_buf); | |
57 | int i; | |
58 | ||
59 | for (i = 0; i < mock->npages; i++) | |
60 | put_page(mock->pages[i]); | |
61 | ||
62 | kfree(mock); | |
63 | } | |
64 | ||
65 | static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) | |
66 | { | |
67 | struct mock_dmabuf *mock = to_mock(dma_buf); | |
68 | ||
d4efd79a | 69 | return vm_map_ram(mock->pages, mock->npages, 0); |
6cca22ed CW |
70 | } |
71 | ||
72 | static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | |
73 | { | |
74 | struct mock_dmabuf *mock = to_mock(dma_buf); | |
75 | ||
76 | vm_unmap_ram(vaddr, mock->npages); | |
77 | } | |
78 | ||
6cca22ed CW |
79 | static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
80 | { | |
81 | return -ENODEV; | |
82 | } | |
83 | ||
84 | static const struct dma_buf_ops mock_dmabuf_ops = { | |
85 | .map_dma_buf = mock_map_dma_buf, | |
86 | .unmap_dma_buf = mock_unmap_dma_buf, | |
87 | .release = mock_dmabuf_release, | |
6cca22ed CW |
88 | .mmap = mock_dmabuf_mmap, |
89 | .vmap = mock_dmabuf_vmap, | |
90 | .vunmap = mock_dmabuf_vunmap, | |
91 | }; | |
92 | ||
93 | static struct dma_buf *mock_dmabuf(int npages) | |
94 | { | |
95 | struct mock_dmabuf *mock; | |
96 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); | |
97 | struct dma_buf *dmabuf; | |
98 | int i; | |
99 | ||
100 | mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), | |
101 | GFP_KERNEL); | |
102 | if (!mock) | |
103 | return ERR_PTR(-ENOMEM); | |
104 | ||
105 | mock->npages = npages; | |
106 | for (i = 0; i < npages; i++) { | |
107 | mock->pages[i] = alloc_page(GFP_KERNEL); | |
108 | if (!mock->pages[i]) | |
109 | goto err; | |
110 | } | |
111 | ||
112 | exp_info.ops = &mock_dmabuf_ops; | |
113 | exp_info.size = npages * PAGE_SIZE; | |
114 | exp_info.flags = O_CLOEXEC; | |
115 | exp_info.priv = mock; | |
116 | ||
117 | dmabuf = dma_buf_export(&exp_info); | |
118 | if (IS_ERR(dmabuf)) | |
119 | goto err; | |
120 | ||
121 | return dmabuf; | |
122 | ||
123 | err: | |
124 | while (i--) | |
125 | put_page(mock->pages[i]); | |
126 | kfree(mock); | |
127 | return ERR_PTR(-ENOMEM); | |
128 | } |