]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/infiniband/hw/hfi1/mmu_rb.c
mmap locking API: convert mmap_sem comments
[thirdparty/linux.git] / drivers / infiniband / hw / hfi1 / mmu_rb.c
1 /*
2 * Copyright(c) 2016 - 2017 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47 #include <linux/list.h>
48 #include <linux/rculist.h>
49 #include <linux/mmu_notifier.h>
50 #include <linux/interval_tree_generic.h>
51
52 #include "mmu_rb.h"
53 #include "trace.h"
54
55 struct mmu_rb_handler {
56 struct mmu_notifier mn;
57 struct rb_root_cached root;
58 void *ops_arg;
59 spinlock_t lock; /* protect the RB tree */
60 struct mmu_rb_ops *ops;
61 struct mm_struct *mm;
62 struct list_head lru_list;
63 struct work_struct del_work;
64 struct list_head del_list;
65 struct workqueue_struct *wq;
66 };
67
68 static unsigned long mmu_node_start(struct mmu_rb_node *);
69 static unsigned long mmu_node_last(struct mmu_rb_node *);
70 static int mmu_notifier_range_start(struct mmu_notifier *,
71 const struct mmu_notifier_range *);
72 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
73 unsigned long, unsigned long);
74 static void do_remove(struct mmu_rb_handler *handler,
75 struct list_head *del_list);
76 static void handle_remove(struct work_struct *work);
77
78 static const struct mmu_notifier_ops mn_opts = {
79 .invalidate_range_start = mmu_notifier_range_start,
80 };
81
82 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
83 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
84
85 static unsigned long mmu_node_start(struct mmu_rb_node *node)
86 {
87 return node->addr & PAGE_MASK;
88 }
89
90 static unsigned long mmu_node_last(struct mmu_rb_node *node)
91 {
92 return PAGE_ALIGN(node->addr + node->len) - 1;
93 }
94
95 int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
96 struct mmu_rb_ops *ops,
97 struct workqueue_struct *wq,
98 struct mmu_rb_handler **handler)
99 {
100 struct mmu_rb_handler *handlr;
101 int ret;
102
103 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
104 if (!handlr)
105 return -ENOMEM;
106
107 handlr->root = RB_ROOT_CACHED;
108 handlr->ops = ops;
109 handlr->ops_arg = ops_arg;
110 INIT_HLIST_NODE(&handlr->mn.hlist);
111 spin_lock_init(&handlr->lock);
112 handlr->mn.ops = &mn_opts;
113 handlr->mm = mm;
114 INIT_WORK(&handlr->del_work, handle_remove);
115 INIT_LIST_HEAD(&handlr->del_list);
116 INIT_LIST_HEAD(&handlr->lru_list);
117 handlr->wq = wq;
118
119 ret = mmu_notifier_register(&handlr->mn, handlr->mm);
120 if (ret) {
121 kfree(handlr);
122 return ret;
123 }
124
125 *handler = handlr;
126 return 0;
127 }
128
129 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
130 {
131 struct mmu_rb_node *rbnode;
132 struct rb_node *node;
133 unsigned long flags;
134 struct list_head del_list;
135
136 /* Unregister first so we don't get any more notifications. */
137 mmu_notifier_unregister(&handler->mn, handler->mm);
138
139 /*
140 * Make sure the wq delete handler is finished running. It will not
141 * be triggered once the mmu notifiers are unregistered above.
142 */
143 flush_work(&handler->del_work);
144
145 INIT_LIST_HEAD(&del_list);
146
147 spin_lock_irqsave(&handler->lock, flags);
148 while ((node = rb_first_cached(&handler->root))) {
149 rbnode = rb_entry(node, struct mmu_rb_node, node);
150 rb_erase_cached(node, &handler->root);
151 /* move from LRU list to delete list */
152 list_move(&rbnode->list, &del_list);
153 }
154 spin_unlock_irqrestore(&handler->lock, flags);
155
156 do_remove(handler, &del_list);
157
158 kfree(handler);
159 }
160
161 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
162 struct mmu_rb_node *mnode)
163 {
164 struct mmu_rb_node *node;
165 unsigned long flags;
166 int ret = 0;
167
168 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
169 spin_lock_irqsave(&handler->lock, flags);
170 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
171 if (node) {
172 ret = -EINVAL;
173 goto unlock;
174 }
175 __mmu_int_rb_insert(mnode, &handler->root);
176 list_add(&mnode->list, &handler->lru_list);
177
178 ret = handler->ops->insert(handler->ops_arg, mnode);
179 if (ret) {
180 __mmu_int_rb_remove(mnode, &handler->root);
181 list_del(&mnode->list); /* remove from LRU list */
182 }
183 unlock:
184 spin_unlock_irqrestore(&handler->lock, flags);
185 return ret;
186 }
187
188 /* Caller must hold handler lock */
189 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
190 unsigned long addr,
191 unsigned long len)
192 {
193 struct mmu_rb_node *node = NULL;
194
195 trace_hfi1_mmu_rb_search(addr, len);
196 if (!handler->ops->filter) {
197 node = __mmu_int_rb_iter_first(&handler->root, addr,
198 (addr + len) - 1);
199 } else {
200 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
201 (addr + len) - 1);
202 node;
203 node = __mmu_int_rb_iter_next(node, addr,
204 (addr + len) - 1)) {
205 if (handler->ops->filter(node, addr, len))
206 return node;
207 }
208 }
209 return node;
210 }
211
212 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
213 unsigned long addr, unsigned long len,
214 struct mmu_rb_node **rb_node)
215 {
216 struct mmu_rb_node *node;
217 unsigned long flags;
218 bool ret = false;
219
220 spin_lock_irqsave(&handler->lock, flags);
221 node = __mmu_rb_search(handler, addr, len);
222 if (node) {
223 if (node->addr == addr && node->len == len)
224 goto unlock;
225 __mmu_int_rb_remove(node, &handler->root);
226 list_del(&node->list); /* remove from LRU list */
227 ret = true;
228 }
229 unlock:
230 spin_unlock_irqrestore(&handler->lock, flags);
231 *rb_node = node;
232 return ret;
233 }
234
235 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
236 {
237 struct mmu_rb_node *rbnode, *ptr;
238 struct list_head del_list;
239 unsigned long flags;
240 bool stop = false;
241
242 INIT_LIST_HEAD(&del_list);
243
244 spin_lock_irqsave(&handler->lock, flags);
245 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
246 list) {
247 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
248 &stop)) {
249 __mmu_int_rb_remove(rbnode, &handler->root);
250 /* move from LRU list to delete list */
251 list_move(&rbnode->list, &del_list);
252 }
253 if (stop)
254 break;
255 }
256 spin_unlock_irqrestore(&handler->lock, flags);
257
258 while (!list_empty(&del_list)) {
259 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
260 list_del(&rbnode->list);
261 handler->ops->remove(handler->ops_arg, rbnode);
262 }
263 }
264
265 /*
266 * It is up to the caller to ensure that this function does not race with the
267 * mmu invalidate notifier which may be calling the users remove callback on
268 * 'node'.
269 */
270 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
271 struct mmu_rb_node *node)
272 {
273 unsigned long flags;
274
275 /* Validity of handler and node pointers has been checked by caller. */
276 trace_hfi1_mmu_rb_remove(node->addr, node->len);
277 spin_lock_irqsave(&handler->lock, flags);
278 __mmu_int_rb_remove(node, &handler->root);
279 list_del(&node->list); /* remove from LRU list */
280 spin_unlock_irqrestore(&handler->lock, flags);
281
282 handler->ops->remove(handler->ops_arg, node);
283 }
284
285 static int mmu_notifier_range_start(struct mmu_notifier *mn,
286 const struct mmu_notifier_range *range)
287 {
288 struct mmu_rb_handler *handler =
289 container_of(mn, struct mmu_rb_handler, mn);
290 struct rb_root_cached *root = &handler->root;
291 struct mmu_rb_node *node, *ptr = NULL;
292 unsigned long flags;
293 bool added = false;
294
295 spin_lock_irqsave(&handler->lock, flags);
296 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
297 node; node = ptr) {
298 /* Guard against node removal. */
299 ptr = __mmu_int_rb_iter_next(node, range->start,
300 range->end - 1);
301 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
302 if (handler->ops->invalidate(handler->ops_arg, node)) {
303 __mmu_int_rb_remove(node, root);
304 /* move from LRU list to delete list */
305 list_move(&node->list, &handler->del_list);
306 added = true;
307 }
308 }
309 spin_unlock_irqrestore(&handler->lock, flags);
310
311 if (added)
312 queue_work(handler->wq, &handler->del_work);
313
314 return 0;
315 }
316
317 /*
318 * Call the remove function for the given handler and the list. This
319 * is expected to be called with a delete list extracted from handler.
320 * The caller should not be holding the handler lock.
321 */
322 static void do_remove(struct mmu_rb_handler *handler,
323 struct list_head *del_list)
324 {
325 struct mmu_rb_node *node;
326
327 while (!list_empty(del_list)) {
328 node = list_first_entry(del_list, struct mmu_rb_node, list);
329 list_del(&node->list);
330 handler->ops->remove(handler->ops_arg, node);
331 }
332 }
333
334 /*
335 * Work queue function to remove all nodes that have been queued up to
336 * be removed. The key feature is that mm->mmap_lock is not being held
337 * and the remove callback can sleep while taking it, if needed.
338 */
339 static void handle_remove(struct work_struct *work)
340 {
341 struct mmu_rb_handler *handler = container_of(work,
342 struct mmu_rb_handler,
343 del_work);
344 struct list_head del_list;
345 unsigned long flags;
346
347 /* remove anything that is queued to get removed */
348 spin_lock_irqsave(&handler->lock, flags);
349 list_replace_init(&handler->del_list, &del_list);
350 spin_unlock_irqrestore(&handler->lock, flags);
351
352 do_remove(handler, &del_list);
353 }