]>
Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/errno.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/mm.h> | |
36 | #include <linux/export.h> | |
37 | #include <linux/bitmap.h> | |
38 | #include <linux/dma-mapping.h> | |
39 | #include <linux/vmalloc.h> | |
40 | #include <linux/mlx5/driver.h> | |
41 | ||
42 | #include "mlx5_core.h" | |
43 | ||
b47bd6ea DJ |
44 | struct mlx5_db_pgdir { |
45 | struct list_head list; | |
46 | unsigned long *bitmap; | |
47 | __be32 *db_page; | |
48 | dma_addr_t db_dma; | |
49 | }; | |
50 | ||
e126ba97 | 51 | /* Handling for queue buffers -- we allocate a bunch of memory and |
64ffaa21 | 52 | * register it in a memory region at HCA virtual address 0. |
e126ba97 EC |
53 | */ |
54 | ||
311c7c71 SM |
55 | static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, |
56 | size_t size, dma_addr_t *dma_handle, | |
57 | int node) | |
58 | { | |
59 | struct mlx5_priv *priv = &dev->priv; | |
60 | int original_node; | |
61 | void *cpu_handle; | |
62 | ||
63 | mutex_lock(&priv->alloc_mutex); | |
64 | original_node = dev_to_node(&dev->pdev->dev); | |
65 | set_dev_node(&dev->pdev->dev, node); | |
750afb08 LC |
66 | cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, |
67 | GFP_KERNEL); | |
311c7c71 SM |
68 | set_dev_node(&dev->pdev->dev, original_node); |
69 | mutex_unlock(&priv->alloc_mutex); | |
70 | return cpu_handle; | |
71 | } | |
72 | ||
73 | int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, | |
388ca8be | 74 | struct mlx5_frag_buf *buf, int node) |
e126ba97 EC |
75 | { |
76 | dma_addr_t t; | |
77 | ||
78 | buf->size = size; | |
64ffaa21 AV |
79 | buf->npages = 1; |
80 | buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; | |
388ca8be YC |
81 | |
82 | buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL); | |
83 | if (!buf->frags) | |
64ffaa21 | 84 | return -ENOMEM; |
e126ba97 | 85 | |
388ca8be YC |
86 | buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size, |
87 | &t, node); | |
88 | if (!buf->frags->buf) | |
89 | goto err_out; | |
90 | ||
91 | buf->frags->map = t; | |
e126ba97 | 92 | |
64ffaa21 AV |
93 | while (t & ((1 << buf->page_shift) - 1)) { |
94 | --buf->page_shift; | |
95 | buf->npages *= 2; | |
96 | } | |
e126ba97 | 97 | |
64ffaa21 | 98 | return 0; |
388ca8be YC |
99 | err_out: |
100 | kfree(buf->frags); | |
101 | return -ENOMEM; | |
e126ba97 | 102 | } |
311c7c71 | 103 | |
388ca8be YC |
104 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, |
105 | int size, struct mlx5_frag_buf *buf) | |
311c7c71 SM |
106 | { |
107 | return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); | |
108 | } | |
388ca8be | 109 | EXPORT_SYMBOL(mlx5_buf_alloc); |
e126ba97 | 110 | |
388ca8be | 111 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) |
e126ba97 | 112 | { |
388ca8be YC |
113 | dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf, |
114 | buf->frags->map); | |
115 | ||
116 | kfree(buf->frags); | |
e126ba97 EC |
117 | } |
118 | EXPORT_SYMBOL_GPL(mlx5_buf_free); | |
119 | ||
1c1b5228 TT |
120 | int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
121 | struct mlx5_frag_buf *buf, int node) | |
122 | { | |
123 | int i; | |
124 | ||
125 | buf->size = size; | |
d7037ad7 | 126 | buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); |
1c1b5228 TT |
127 | buf->page_shift = PAGE_SHIFT; |
128 | buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), | |
129 | GFP_KERNEL); | |
130 | if (!buf->frags) | |
131 | goto err_out; | |
132 | ||
133 | for (i = 0; i < buf->npages; i++) { | |
134 | struct mlx5_buf_list *frag = &buf->frags[i]; | |
135 | int frag_sz = min_t(int, size, PAGE_SIZE); | |
136 | ||
137 | frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz, | |
138 | &frag->map, node); | |
139 | if (!frag->buf) | |
140 | goto err_free_buf; | |
141 | if (frag->map & ((1 << buf->page_shift) - 1)) { | |
142 | dma_free_coherent(&dev->pdev->dev, frag_sz, | |
143 | buf->frags[i].buf, buf->frags[i].map); | |
9afd8952 AB |
144 | mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n", |
145 | &frag->map, buf->page_shift); | |
1c1b5228 TT |
146 | goto err_free_buf; |
147 | } | |
148 | size -= frag_sz; | |
149 | } | |
150 | ||
151 | return 0; | |
152 | ||
153 | err_free_buf: | |
154 | while (i--) | |
155 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, | |
156 | buf->frags[i].map); | |
157 | kfree(buf->frags); | |
158 | err_out: | |
159 | return -ENOMEM; | |
160 | } | |
388ca8be | 161 | EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node); |
1c1b5228 TT |
162 | |
163 | void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) | |
164 | { | |
165 | int size = buf->size; | |
166 | int i; | |
167 | ||
168 | for (i = 0; i < buf->npages; i++) { | |
169 | int frag_sz = min_t(int, size, PAGE_SIZE); | |
170 | ||
171 | dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf, | |
172 | buf->frags[i].map); | |
173 | size -= frag_sz; | |
174 | } | |
175 | kfree(buf->frags); | |
176 | } | |
388ca8be | 177 | EXPORT_SYMBOL_GPL(mlx5_frag_buf_free); |
1c1b5228 | 178 | |
311c7c71 SM |
179 | static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, |
180 | int node) | |
e126ba97 | 181 | { |
b47bd6ea | 182 | u32 db_per_page = PAGE_SIZE / cache_line_size(); |
e126ba97 EC |
183 | struct mlx5_db_pgdir *pgdir; |
184 | ||
185 | pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); | |
186 | if (!pgdir) | |
187 | return NULL; | |
188 | ||
214fa1c4 | 189 | pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); |
b47bd6ea DJ |
190 | if (!pgdir->bitmap) { |
191 | kfree(pgdir); | |
192 | return NULL; | |
193 | } | |
194 | ||
195 | bitmap_fill(pgdir->bitmap, db_per_page); | |
311c7c71 SM |
196 | |
197 | pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, | |
198 | &pgdir->db_dma, node); | |
e126ba97 | 199 | if (!pgdir->db_page) { |
214fa1c4 | 200 | bitmap_free(pgdir->bitmap); |
e126ba97 EC |
201 | kfree(pgdir); |
202 | return NULL; | |
203 | } | |
204 | ||
205 | return pgdir; | |
206 | } | |
207 | ||
208 | static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, | |
209 | struct mlx5_db *db) | |
210 | { | |
b47bd6ea | 211 | u32 db_per_page = PAGE_SIZE / cache_line_size(); |
e126ba97 EC |
212 | int offset; |
213 | int i; | |
214 | ||
b47bd6ea DJ |
215 | i = find_first_bit(pgdir->bitmap, db_per_page); |
216 | if (i >= db_per_page) | |
e126ba97 EC |
217 | return -ENOMEM; |
218 | ||
219 | __clear_bit(i, pgdir->bitmap); | |
220 | ||
221 | db->u.pgdir = pgdir; | |
222 | db->index = i; | |
b47bd6ea | 223 | offset = db->index * cache_line_size(); |
e126ba97 EC |
224 | db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); |
225 | db->dma = pgdir->db_dma + offset; | |
226 | ||
b812b544 SM |
227 | db->db[0] = 0; |
228 | db->db[1] = 0; | |
229 | ||
e126ba97 EC |
230 | return 0; |
231 | } | |
232 | ||
311c7c71 | 233 | int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) |
e126ba97 EC |
234 | { |
235 | struct mlx5_db_pgdir *pgdir; | |
236 | int ret = 0; | |
237 | ||
238 | mutex_lock(&dev->priv.pgdir_mutex); | |
239 | ||
240 | list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) | |
241 | if (!mlx5_alloc_db_from_pgdir(pgdir, db)) | |
242 | goto out; | |
243 | ||
311c7c71 | 244 | pgdir = mlx5_alloc_db_pgdir(dev, node); |
e126ba97 EC |
245 | if (!pgdir) { |
246 | ret = -ENOMEM; | |
247 | goto out; | |
248 | } | |
249 | ||
250 | list_add(&pgdir->list, &dev->priv.pgdir_list); | |
251 | ||
252 | /* This should never fail -- we just allocated an empty page: */ | |
253 | WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); | |
254 | ||
255 | out: | |
256 | mutex_unlock(&dev->priv.pgdir_mutex); | |
257 | ||
258 | return ret; | |
259 | } | |
311c7c71 SM |
260 | EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); |
261 | ||
262 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) | |
263 | { | |
264 | return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); | |
265 | } | |
e126ba97 EC |
266 | EXPORT_SYMBOL_GPL(mlx5_db_alloc); |
267 | ||
268 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) | |
269 | { | |
b47bd6ea | 270 | u32 db_per_page = PAGE_SIZE / cache_line_size(); |
ad5b39a9 | 271 | |
e126ba97 EC |
272 | mutex_lock(&dev->priv.pgdir_mutex); |
273 | ||
274 | __set_bit(db->index, db->u.pgdir->bitmap); | |
275 | ||
b47bd6ea | 276 | if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { |
e126ba97 EC |
277 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, |
278 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | |
279 | list_del(&db->u.pgdir->list); | |
214fa1c4 | 280 | bitmap_free(db->u.pgdir->bitmap); |
e126ba97 EC |
281 | kfree(db->u.pgdir); |
282 | } | |
283 | ||
284 | mutex_unlock(&dev->priv.pgdir_mutex); | |
285 | } | |
286 | EXPORT_SYMBOL_GPL(mlx5_db_free); | |
287 | ||
388ca8be | 288 | void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) |
e126ba97 EC |
289 | { |
290 | u64 addr; | |
291 | int i; | |
292 | ||
293 | for (i = 0; i < buf->npages; i++) { | |
388ca8be | 294 | addr = buf->frags->map + (i << buf->page_shift); |
e126ba97 EC |
295 | |
296 | pas[i] = cpu_to_be64(addr); | |
297 | } | |
298 | } | |
299 | EXPORT_SYMBOL_GPL(mlx5_fill_page_array); | |
1c1b5228 TT |
300 | |
301 | void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas) | |
302 | { | |
303 | int i; | |
304 | ||
305 | for (i = 0; i < buf->npages; i++) | |
306 | pas[i] = cpu_to_be64(buf->frags[i].map); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array); |