]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/infiniband/core/uverbs_main.c
Remove 'type' argument from access_ok() function
[thirdparty/kernel/linux.git] / drivers / infiniband / core / uverbs_main.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
33b9b3ee 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
2a1d9b7f
RD
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
67cdb40c 6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
bc38a6ab
RD
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
bc38a6ab
RD
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/device.h>
40#include <linux/err.h>
41#include <linux/fs.h>
42#include <linux/poll.h>
a99bbaf5 43#include <linux/sched.h>
bc38a6ab 44#include <linux/file.h>
70a30e16 45#include <linux/cdev.h>
a265e558 46#include <linux/anon_inodes.h>
5a0e3ad6 47#include <linux/slab.h>
5f9794dc 48#include <linux/sched/mm.h>
bc38a6ab 49
7c0f6ba6 50#include <linux/uaccess.h>
bc38a6ab 51
e6bd18f5 52#include <rdma/ib.h>
52427112 53#include <rdma/uverbs_std_types.h>
e6bd18f5 54
bc38a6ab 55#include "uverbs.h"
43579b5f 56#include "core_priv.h"
fd3c7904 57#include "rdma_core.h"
bc38a6ab
RD
58
59MODULE_AUTHOR("Roland Dreier");
60MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61MODULE_LICENSE("Dual BSD/GPL");
62
bc38a6ab
RD
63enum {
64 IB_UVERBS_MAJOR = 231,
65 IB_UVERBS_BASE_MINOR = 192,
8cf12d77
HN
66 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
67 IB_UVERBS_NUM_FIXED_MINOR = 32,
68 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
bc38a6ab
RD
69};
70
71#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
72
8cf12d77 73static dev_t dynamic_uverbs_dev;
70a30e16
RD
74static struct class *uverbs_class;
75
90f6e41c 76static DEFINE_IDA(uverbs_ida);
bc38a6ab 77static void ib_uverbs_add_one(struct ib_device *device);
7c1eb45a 78static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
bc38a6ab 79
22fa27fb
JG
80/*
81 * Must be called with the ufile->device->disassociate_srcu held, and the lock
82 * must be held until use of the ucontext is finished.
83 */
8313c10f 84struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
7dc08dcf 85{
22fa27fb
JG
86 /*
87 * We do not hold the hw_destroy_rwsem lock for this flow, instead
88 * srcu is used. It does not matter if someone races this with
89 * get_context, we get NULL or valid ucontext.
90 */
91 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
92
93 if (!srcu_dereference(ufile->device->ib_dev,
94 &ufile->device->disassociate_srcu))
95 return ERR_PTR(-EIO);
96
97 if (!ucontext)
98 return ERR_PTR(-EINVAL);
99
100 return ucontext;
7dc08dcf 101}
8313c10f 102EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
7dc08dcf 103
feb7c1e3
CH
104int uverbs_dealloc_mw(struct ib_mw *mw)
105{
106 struct ib_pd *pd = mw->pd;
107 int ret;
108
3023a1e9 109 ret = mw->device->ops.dealloc_mw(mw);
feb7c1e3
CH
110 if (!ret)
111 atomic_dec(&pd->usecnt);
112 return ret;
113}
114
c5c4d92e 115static void ib_uverbs_release_dev(struct device *device)
70a30e16
RD
116{
117 struct ib_uverbs_device *dev =
c5c4d92e 118 container_of(device, struct ib_uverbs_device, dev);
70a30e16 119
9ed3e5f4 120 uverbs_destroy_api(dev->uapi);
036b1063 121 cleanup_srcu_struct(&dev->disassociate_srcu);
35d4a0b6 122 kfree(dev);
70a30e16
RD
123}
124
1e7710f3 125static void ib_uverbs_release_async_event_file(struct kref *ref)
04d29b0e 126{
1e7710f3
MB
127 struct ib_uverbs_async_event_file *file =
128 container_of(ref, struct ib_uverbs_async_event_file, ref);
04d29b0e
RD
129
130 kfree(file);
131}
132
70a30e16 133void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
1e7710f3 134 struct ib_uverbs_completion_event_file *ev_file,
70a30e16
RD
135 struct ib_ucq_object *uobj)
136{
137 struct ib_uverbs_event *evt, *tmp;
138
139 if (ev_file) {
db1b5ddd 140 spin_lock_irq(&ev_file->ev_queue.lock);
70a30e16
RD
141 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
142 list_del(&evt->list);
143 kfree(evt);
144 }
db1b5ddd 145 spin_unlock_irq(&ev_file->ev_queue.lock);
70a30e16 146
d0259e82 147 uverbs_uobject_put(&ev_file->uobj);
70a30e16
RD
148 }
149
db1b5ddd 150 spin_lock_irq(&file->async_file->ev_queue.lock);
70a30e16
RD
151 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
152 list_del(&evt->list);
153 kfree(evt);
154 }
db1b5ddd 155 spin_unlock_irq(&file->async_file->ev_queue.lock);
70a30e16
RD
156}
157
158void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
159 struct ib_uevent_object *uobj)
160{
161 struct ib_uverbs_event *evt, *tmp;
162
db1b5ddd 163 spin_lock_irq(&file->async_file->ev_queue.lock);
70a30e16
RD
164 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
165 list_del(&evt->list);
166 kfree(evt);
167 }
db1b5ddd 168 spin_unlock_irq(&file->async_file->ev_queue.lock);
70a30e16
RD
169}
170
6be60aed
MB
171void ib_uverbs_detach_umcast(struct ib_qp *qp,
172 struct ib_uqp_object *uobj)
f4e40156
JM
173{
174 struct ib_uverbs_mcast_entry *mcast, *tmp;
175
176 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
177 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
178 list_del(&mcast->list);
179 kfree(mcast);
180 }
181}
182
35d4a0b6
YH
183static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
184{
185 complete(&dev->comp);
186}
187
cf8966b3 188void ib_uverbs_release_file(struct kref *ref)
bc38a6ab
RD
189{
190 struct ib_uverbs_file *file =
191 container_of(ref, struct ib_uverbs_file, ref);
036b1063
YH
192 struct ib_device *ib_dev;
193 int srcu_key;
194
0f50d88a
JG
195 release_ufile_idr_uobject(file);
196
036b1063
YH
197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
198 ib_dev = srcu_dereference(file->device->ib_dev,
199 &file->device->disassociate_srcu);
3023a1e9 200 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
036b1063
YH
201 module_put(ib_dev->owner);
202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
bc38a6ab 203
35d4a0b6
YH
204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device);
70a30e16 206
c5c4d92e 207 put_device(&file->device->dev);
bc38a6ab
RD
208 kfree(file);
209}
210
db1b5ddd 211static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
1e7710f3
MB
212 struct ib_uverbs_file *uverbs_file,
213 struct file *filp, char __user *buf,
214 size_t count, loff_t *pos,
e0fcc611 215 size_t eventsz)
bc38a6ab 216{
63aaf647 217 struct ib_uverbs_event *event;
bc38a6ab
RD
218 int ret = 0;
219
db1b5ddd 220 spin_lock_irq(&ev_queue->lock);
bc38a6ab 221
db1b5ddd
MB
222 while (list_empty(&ev_queue->event_list)) {
223 spin_unlock_irq(&ev_queue->lock);
bc38a6ab
RD
224
225 if (filp->f_flags & O_NONBLOCK)
226 return -EAGAIN;
227
db1b5ddd
MB
228 if (wait_event_interruptible(ev_queue->poll_wait,
229 (!list_empty(&ev_queue->event_list) ||
036b1063
YH
230 /* The barriers built into wait_event_interruptible()
231 * and wake_up() guarentee this will see the null set
232 * without using RCU
233 */
1e7710f3 234 !uverbs_file->device->ib_dev)))
bc38a6ab
RD
235 return -ERESTARTSYS;
236
036b1063 237 /* If device was disassociated and no event exists set an error */
db1b5ddd 238 if (list_empty(&ev_queue->event_list) &&
1e7710f3 239 !uverbs_file->device->ib_dev)
036b1063
YH
240 return -EIO;
241
db1b5ddd 242 spin_lock_irq(&ev_queue->lock);
bc38a6ab
RD
243 }
244
db1b5ddd 245 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
63aaf647 246
bc38a6ab
RD
247 if (eventsz > count) {
248 ret = -EINVAL;
249 event = NULL;
63aaf647 250 } else {
db1b5ddd 251 list_del(ev_queue->event_list.next);
63aaf647
RD
252 if (event->counter) {
253 ++(*event->counter);
254 list_del(&event->obj_list);
255 }
256 }
bc38a6ab 257
db1b5ddd 258 spin_unlock_irq(&ev_queue->lock);
bc38a6ab
RD
259
260 if (event) {
261 if (copy_to_user(buf, event, eventsz))
262 ret = -EFAULT;
263 else
264 ret = eventsz;
265 }
266
267 kfree(event);
268
269 return ret;
270}
271
1e7710f3
MB
272static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
273 size_t count, loff_t *pos)
274{
275 struct ib_uverbs_async_event_file *file = filp->private_data;
276
db1b5ddd 277 return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
e0fcc611
MB
278 buf, count, pos,
279 sizeof(struct ib_uverbs_async_event_desc));
1e7710f3
MB
280}
281
282static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
283 size_t count, loff_t *pos)
284{
285 struct ib_uverbs_completion_event_file *comp_ev_file =
286 filp->private_data;
287
db1b5ddd 288 return ib_uverbs_event_read(&comp_ev_file->ev_queue,
d0259e82 289 comp_ev_file->uobj.ufile, filp,
e0fcc611
MB
290 buf, count, pos,
291 sizeof(struct ib_uverbs_comp_event_desc));
1e7710f3
MB
292}
293
afc9a42b 294static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
1e7710f3 295 struct file *filp,
bc38a6ab
RD
296 struct poll_table_struct *wait)
297{
afc9a42b 298 __poll_t pollflags = 0;
bc38a6ab 299
db1b5ddd 300 poll_wait(filp, &ev_queue->poll_wait, wait);
bc38a6ab 301
db1b5ddd
MB
302 spin_lock_irq(&ev_queue->lock);
303 if (!list_empty(&ev_queue->event_list))
a9a08845 304 pollflags = EPOLLIN | EPOLLRDNORM;
db1b5ddd 305 spin_unlock_irq(&ev_queue->lock);
bc38a6ab
RD
306
307 return pollflags;
308}
309
afc9a42b 310static __poll_t ib_uverbs_async_event_poll(struct file *filp,
1e7710f3
MB
311 struct poll_table_struct *wait)
312{
313 return ib_uverbs_event_poll(filp->private_data, filp, wait);
314}
315
afc9a42b 316static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
1e7710f3
MB
317 struct poll_table_struct *wait)
318{
319 struct ib_uverbs_completion_event_file *comp_ev_file =
320 filp->private_data;
321
db1b5ddd 322 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
1e7710f3
MB
323}
324
325static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
abdf119b 326{
db1b5ddd 327 struct ib_uverbs_event_queue *ev_queue = filp->private_data;
abdf119b 328
db1b5ddd 329 return fasync_helper(fd, filp, on, &ev_queue->async_queue);
abdf119b
GN
330}
331
1e7710f3 332static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
bc38a6ab 333{
1e7710f3
MB
334 struct ib_uverbs_completion_event_file *comp_ev_file =
335 filp->private_data;
336
db1b5ddd 337 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
1e7710f3
MB
338}
339
340static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
341{
342 struct ib_uverbs_async_event_file *file = filp->private_data;
343 struct ib_uverbs_file *uverbs_file = file->uverbs_file;
6b73597e 344 struct ib_uverbs_event *entry, *tmp;
036b1063 345 int closed_already = 0;
6b73597e 346
1e7710f3 347 mutex_lock(&uverbs_file->device->lists_mutex);
db1b5ddd
MB
348 spin_lock_irq(&file->ev_queue.lock);
349 closed_already = file->ev_queue.is_closed;
350 file->ev_queue.is_closed = 1;
351 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
6b73597e
RD
352 if (entry->counter)
353 list_del(&entry->obj_list);
354 kfree(entry);
355 }
db1b5ddd 356 spin_unlock_irq(&file->ev_queue.lock);
036b1063
YH
357 if (!closed_already) {
358 list_del(&file->list);
1e7710f3
MB
359 ib_unregister_event_handler(&uverbs_file->event_handler);
360 }
361 mutex_unlock(&uverbs_file->device->lists_mutex);
362
363 kref_put(&uverbs_file->ref, ib_uverbs_release_file);
364 kref_put(&file->ref, ib_uverbs_release_async_event_file);
365
366 return 0;
367}
368
369static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
370{
d0259e82
JG
371 struct ib_uobject *uobj = filp->private_data;
372 struct ib_uverbs_completion_event_file *file = container_of(
373 uobj, struct ib_uverbs_completion_event_file, uobj);
1e7710f3
MB
374 struct ib_uverbs_event *entry, *tmp;
375
db1b5ddd
MB
376 spin_lock_irq(&file->ev_queue.lock);
377 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
1e7710f3
MB
378 if (entry->counter)
379 list_del(&entry->obj_list);
380 kfree(entry);
036b1063 381 }
67e38168 382 file->ev_queue.is_closed = 1;
db1b5ddd 383 spin_unlock_irq(&file->ev_queue.lock);
bc38a6ab 384
1e7710f3 385 uverbs_close_fd(filp);
bc38a6ab
RD
386
387 return 0;
388}
389
1e7710f3 390const struct file_operations uverbs_event_fops = {
6b73597e 391 .owner = THIS_MODULE,
1e7710f3
MB
392 .read = ib_uverbs_comp_event_read,
393 .poll = ib_uverbs_comp_event_poll,
394 .release = ib_uverbs_comp_event_close,
395 .fasync = ib_uverbs_comp_event_fasync,
396 .llseek = no_llseek,
397};
398
399static const struct file_operations uverbs_async_event_fops = {
400 .owner = THIS_MODULE,
401 .read = ib_uverbs_async_event_read,
402 .poll = ib_uverbs_async_event_poll,
403 .release = ib_uverbs_async_event_close,
404 .fasync = ib_uverbs_async_event_fasync,
bc1db9af 405 .llseek = no_llseek,
bc38a6ab
RD
406};
407
408void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
409{
db1b5ddd 410 struct ib_uverbs_event_queue *ev_queue = cq_context;
6b73597e
RD
411 struct ib_ucq_object *uobj;
412 struct ib_uverbs_event *entry;
413 unsigned long flags;
414
db1b5ddd 415 if (!ev_queue)
6b73597e
RD
416 return;
417
db1b5ddd
MB
418 spin_lock_irqsave(&ev_queue->lock, flags);
419 if (ev_queue->is_closed) {
420 spin_unlock_irqrestore(&ev_queue->lock, flags);
6b73597e
RD
421 return;
422 }
bc38a6ab 423
08f0e161 424 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
305a7e87 425 if (!entry) {
db1b5ddd 426 spin_unlock_irqrestore(&ev_queue->lock, flags);
bc38a6ab 427 return;
305a7e87 428 }
bc38a6ab 429
63aaf647
RD
430 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
431
432 entry->desc.comp.cq_handle = cq->uobject->user_handle;
433 entry->counter = &uobj->comp_events_reported;
bc38a6ab 434
db1b5ddd 435 list_add_tail(&entry->list, &ev_queue->event_list);
63aaf647 436 list_add_tail(&entry->obj_list, &uobj->comp_list);
db1b5ddd 437 spin_unlock_irqrestore(&ev_queue->lock, flags);
bc38a6ab 438
db1b5ddd
MB
439 wake_up_interruptible(&ev_queue->poll_wait);
440 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
bc38a6ab
RD
441}
442
443static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
63aaf647
RD
444 __u64 element, __u64 event,
445 struct list_head *obj_list,
446 u32 *counter)
bc38a6ab 447{
63aaf647 448 struct ib_uverbs_event *entry;
bc38a6ab
RD
449 unsigned long flags;
450
db1b5ddd
MB
451 spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
452 if (file->async_file->ev_queue.is_closed) {
453 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
6b73597e
RD
454 return;
455 }
456
08f0e161 457 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
305a7e87 458 if (!entry) {
db1b5ddd 459 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
bc38a6ab 460 return;
305a7e87 461 }
bc38a6ab 462
63aaf647
RD
463 entry->desc.async.element = element;
464 entry->desc.async.event_type = event;
377b5134 465 entry->desc.async.reserved = 0;
63aaf647 466 entry->counter = counter;
bc38a6ab 467
db1b5ddd 468 list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
63aaf647
RD
469 if (obj_list)
470 list_add_tail(&entry->obj_list, obj_list);
db1b5ddd 471 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
bc38a6ab 472
db1b5ddd
MB
473 wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
474 kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
bc38a6ab
RD
475}
476
477void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
478{
7162a3e0
RD
479 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
480 struct ib_ucq_object, uobject);
63aaf647 481
76bc79cc 482 ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
63aaf647
RD
483 event->event, &uobj->async_list,
484 &uobj->async_events_reported);
bc38a6ab
RD
485}
486
487void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
488{
63aaf647
RD
489 struct ib_uevent_object *uobj;
490
a040f95d 491 /* for XRC target qp's, check that qp is live */
fd3c7904 492 if (!event->element.qp->uobject)
a040f95d
JM
493 return;
494
63aaf647
RD
495 uobj = container_of(event->element.qp->uobject,
496 struct ib_uevent_object, uobject);
497
498 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
499 event->event, &uobj->event_list,
500 &uobj->events_reported);
bc38a6ab
RD
501}
502
f213c052
YH
503void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
504{
505 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
506 struct ib_uevent_object, uobject);
507
508 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
509 event->event, &uobj->event_list,
510 &uobj->events_reported);
511}
512
f520ba5a
RD
513void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
514{
63aaf647
RD
515 struct ib_uevent_object *uobj;
516
517 uobj = container_of(event->element.srq->uobject,
518 struct ib_uevent_object, uobject);
519
520 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
521 event->event, &uobj->event_list,
522 &uobj->events_reported);
f520ba5a
RD
523}
524
6b73597e
RD
525void ib_uverbs_event_handler(struct ib_event_handler *handler,
526 struct ib_event *event)
bc38a6ab
RD
527{
528 struct ib_uverbs_file *file =
529 container_of(handler, struct ib_uverbs_file, event_handler);
530
63aaf647
RD
531 ib_uverbs_async_handler(file, event->element.port_num, event->event,
532 NULL, NULL);
bc38a6ab
RD
533}
534
03c40442
YH
535void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
536{
1e7710f3 537 kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
03c40442
YH
538 file->async_file = NULL;
539}
540
db1b5ddd 541void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
bc38a6ab 542{
db1b5ddd
MB
543 spin_lock_init(&ev_queue->lock);
544 INIT_LIST_HEAD(&ev_queue->event_list);
545 init_waitqueue_head(&ev_queue->poll_wait);
546 ev_queue->is_closed = 0;
547 ev_queue->async_queue = NULL;
1e7710f3
MB
548}
549
550struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
551 struct ib_device *ib_dev)
552{
553 struct ib_uverbs_async_event_file *ev_file;
bc38a6ab
RD
554 struct file *filp;
555
03c40442 556 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
6b73597e
RD
557 if (!ev_file)
558 return ERR_PTR(-ENOMEM);
559
db1b5ddd 560 ib_uverbs_init_event_queue(&ev_file->ev_queue);
6b73597e 561 ev_file->uverbs_file = uverbs_file;
03c40442 562 kref_get(&ev_file->uverbs_file->ref);
1e7710f3
MB
563 kref_init(&ev_file->ref);
564 filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
a265e558 565 ev_file, O_RDONLY);
b1e4594b 566 if (IS_ERR(filp))
03c40442
YH
567 goto err_put_refs;
568
036b1063
YH
569 mutex_lock(&uverbs_file->device->lists_mutex);
570 list_add_tail(&ev_file->list,
571 &uverbs_file->device->uverbs_events_file_list);
572 mutex_unlock(&uverbs_file->device->lists_mutex);
573
1e7710f3
MB
574 WARN_ON(uverbs_file->async_file);
575 uverbs_file->async_file = ev_file;
576 kref_get(&uverbs_file->async_file->ref);
577 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
578 ib_dev,
579 ib_uverbs_event_handler);
dcc9881e 580 ib_register_event_handler(&uverbs_file->event_handler);
1e7710f3 581 /* At that point async file stuff was fully set */
03c40442
YH
582
583 return filp;
584
03c40442
YH
585err_put_refs:
586 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
1e7710f3 587 kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
6b73597e 588 return filp;
6b73597e
RD
589}
590
6284380a 591static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
da0f60df
JG
592 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
593 const struct uverbs_api_write_method *method_elm)
6284380a 594{
da0f60df 595 if (method_elm->is_ex) {
6284380a
LR
596 count -= sizeof(*hdr) + sizeof(*ex_hdr);
597
598 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
599 return -EINVAL;
600
da0f60df
JG
601 if (hdr->in_words * 8 < method_elm->req_size)
602 return -ENOSPC;
603
6284380a
LR
604 if (ex_hdr->cmd_hdr_reserved)
605 return -EINVAL;
606
607 if (ex_hdr->response) {
608 if (!hdr->out_words && !ex_hdr->provider_out_words)
609 return -EINVAL;
610
da0f60df
JG
611 if (hdr->out_words * 8 < method_elm->resp_size)
612 return -ENOSPC;
613
96d4f267 614 if (!access_ok(u64_to_user_ptr(ex_hdr->response),
6284380a
LR
615 (hdr->out_words + ex_hdr->provider_out_words) * 8))
616 return -EFAULT;
617 } else {
618 if (hdr->out_words || ex_hdr->provider_out_words)
619 return -EINVAL;
620 }
621
622 return 0;
623 }
624
625 /* not extended command */
626 if (hdr->in_words * 4 != count)
627 return -EINVAL;
628
da0f60df
JG
629 if (count < method_elm->req_size + sizeof(hdr)) {
630 /*
631 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
632 * with a 16 byte write instead of 24. Old kernels didn't
633 * check the size so they allowed this. Now that the size is
634 * checked provide a compatibility work around to not break
635 * those userspaces.
636 */
637 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
638 count == 16) {
639 hdr->in_words = 6;
640 return 0;
641 }
642 return -ENOSPC;
643 }
644 if (hdr->out_words * 4 < method_elm->resp_size)
645 return -ENOSPC;
646
6284380a
LR
647 return 0;
648}
649
bc38a6ab
RD
650static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
651 size_t count, loff_t *pos)
652{
653 struct ib_uverbs_file *file = filp->private_data;
d120c3c9
JG
654 const struct uverbs_api_write_method *method_elm;
655 struct uverbs_api *uapi = file->device->uapi;
43ae9513 656 struct ib_uverbs_ex_cmd_hdr ex_hdr;
bc38a6ab 657 struct ib_uverbs_cmd_hdr hdr;
8313c10f 658 struct uverbs_attr_bundle bundle;
036b1063
YH
659 int srcu_key;
660 ssize_t ret;
057aec0d 661
f73a1dbc
LR
662 if (!ib_safe_file_access(filp)) {
663 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
664 task_tgid_vnr(current), current->comm);
e6bd18f5 665 return -EACCES;
f73a1dbc 666 }
e6bd18f5 667
08f0e161 668 if (count < sizeof(hdr))
bc38a6ab
RD
669 return -EINVAL;
670
08f0e161 671 if (copy_from_user(&hdr, buf, sizeof(hdr)))
bc38a6ab
RD
672 return -EFAULT;
673
d120c3c9
JG
674 method_elm = uapi_get_method(uapi, hdr.command);
675 if (IS_ERR(method_elm))
676 return PTR_ERR(method_elm);
77833b8a 677
d120c3c9 678 if (method_elm->is_ex) {
e21719fb
LR
679 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
680 return -EINVAL;
681 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
682 return -EFAULT;
683 }
77833b8a 684
da0f60df 685 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
6284380a
LR
686 if (ret)
687 return ret;
688
036b1063 689 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
036b1063 690
6284380a 691 buf += sizeof(hdr);
400dbc96 692
8313c10f 693 bundle.ufile = file;
d120c3c9 694 if (!method_elm->is_ex) {
3a6532c9
JG
695 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
696 size_t out_len = hdr.out_words * 4;
c2a939fd 697 u64 response = 0;
3a6532c9
JG
698
699 if (method_elm->has_udata) {
700 bundle.driver_udata.inlen =
701 in_len - method_elm->req_size;
702 in_len = method_elm->req_size;
703 if (bundle.driver_udata.inlen)
704 bundle.driver_udata.inbuf = buf + in_len;
705 else
706 bundle.driver_udata.inbuf = NULL;
707 } else {
708 memset(&bundle.driver_udata, 0,
709 sizeof(bundle.driver_udata));
710 }
711
712 if (method_elm->has_resp) {
3a6532c9
JG
713 /*
714 * The macros check that if has_resp is set
715 * then the command request structure starts
716 * with a '__aligned u64 response' member.
717 */
718 ret = get_user(response, (const u64 *)buf);
719 if (ret)
720 goto out_unlock;
721
722 if (method_elm->has_udata) {
723 bundle.driver_udata.outlen =
724 out_len - method_elm->resp_size;
725 out_len = method_elm->resp_size;
726 if (bundle.driver_udata.outlen)
727 bundle.driver_udata.outbuf =
728 u64_to_user_ptr(response +
729 out_len);
730 else
731 bundle.driver_udata.outbuf = NULL;
732 }
733 } else {
734 bundle.driver_udata.outlen = 0;
735 bundle.driver_udata.outbuf = NULL;
736 }
737
c2a939fd
JG
738 ib_uverbs_init_udata_buf_or_null(
739 &bundle.ucore, buf, u64_to_user_ptr(response),
740 in_len, out_len);
a6c4a66a 741 } else {
6284380a 742 buf += sizeof(ex_hdr);
f21519b2 743
c2a939fd 744 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
12f72772
AB
745 u64_to_user_ptr(ex_hdr.response),
746 hdr.in_words * 8, hdr.out_words * 8);
a96e4e2f 747
c2a939fd
JG
748 ib_uverbs_init_udata_buf_or_null(
749 &bundle.driver_udata, buf + bundle.ucore.inlen,
750 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
751 ex_hdr.provider_in_words * 8,
752 ex_hdr.provider_out_words * 8);
f21519b2 753
400dbc96 754 }
f21519b2 755
974d6b4b 756 ret = method_elm->handler(&bundle);
3a6532c9 757out_unlock:
036b1063 758 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
7106a976 759 return (ret) ? : count;
bc38a6ab
RD
760}
761
762static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
763{
764 struct ib_uverbs_file *file = filp->private_data;
22fa27fb 765 struct ib_ucontext *ucontext;
036b1063
YH
766 int ret = 0;
767 int srcu_key;
bc38a6ab 768
036b1063 769 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
8313c10f 770 ucontext = ib_uverbs_get_ucontext_file(file);
22fa27fb
JG
771 if (IS_ERR(ucontext)) {
772 ret = PTR_ERR(ucontext);
036b1063
YH
773 goto out;
774 }
775
3023a1e9 776 ret = ucontext->device->ops.mmap(ucontext, vma);
036b1063
YH
777out:
778 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
779 return ret;
bc38a6ab
RD
780}
781
5f9794dc
JG
782/*
783 * Each time we map IO memory into user space this keeps track of the mapping.
784 * When the device is hot-unplugged we 'zap' the mmaps in user space to point
785 * to the zero page and allow the hot unplug to proceed.
786 *
787 * This is necessary for cases like PCI physical hot unplug as the actual BAR
788 * memory may vanish after this and access to it from userspace could MCE.
789 *
790 * RDMA drivers supporting disassociation must have their user space designed
791 * to cope in some way with their IO pages going to the zero page.
792 */
793struct rdma_umap_priv {
794 struct vm_area_struct *vma;
795 struct list_head list;
796};
797
798static const struct vm_operations_struct rdma_umap_ops;
799
800static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
801 struct vm_area_struct *vma)
802{
803 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
804
805 priv->vma = vma;
806 vma->vm_private_data = priv;
807 vma->vm_ops = &rdma_umap_ops;
808
809 mutex_lock(&ufile->umap_lock);
810 list_add(&priv->list, &ufile->umaps);
811 mutex_unlock(&ufile->umap_lock);
812}
813
814/*
815 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
816 * struct
817 */
818static void rdma_umap_open(struct vm_area_struct *vma)
819{
820 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
821 struct rdma_umap_priv *opriv = vma->vm_private_data;
822 struct rdma_umap_priv *priv;
823
824 if (!opriv)
825 return;
826
827 /* We are racing with disassociation */
828 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
829 goto out_zap;
830 /*
831 * Disassociation already completed, the VMA should already be zapped.
832 */
833 if (!ufile->ucontext)
834 goto out_unlock;
835
836 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
837 if (!priv)
838 goto out_unlock;
839 rdma_umap_priv_init(priv, vma);
840
841 up_read(&ufile->hw_destroy_rwsem);
842 return;
843
844out_unlock:
845 up_read(&ufile->hw_destroy_rwsem);
846out_zap:
847 /*
848 * We can't allow the VMA to be created with the actual IO pages, that
849 * would break our API contract, and it can't be stopped at this
850 * point, so zap it.
851 */
852 vma->vm_private_data = NULL;
853 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
854}
855
856static void rdma_umap_close(struct vm_area_struct *vma)
857{
858 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
859 struct rdma_umap_priv *priv = vma->vm_private_data;
860
861 if (!priv)
862 return;
863
864 /*
865 * The vma holds a reference on the struct file that created it, which
866 * in turn means that the ib_uverbs_file is guaranteed to exist at
867 * this point.
868 */
869 mutex_lock(&ufile->umap_lock);
870 list_del(&priv->list);
871 mutex_unlock(&ufile->umap_lock);
872 kfree(priv);
873}
874
875static const struct vm_operations_struct rdma_umap_ops = {
876 .open = rdma_umap_open,
877 .close = rdma_umap_close,
878};
879
880static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
881 struct vm_area_struct *vma,
882 unsigned long size)
883{
884 struct ib_uverbs_file *ufile = ucontext->ufile;
885 struct rdma_umap_priv *priv;
886
887 if (vma->vm_end - vma->vm_start != size)
888 return ERR_PTR(-EINVAL);
889
890 /* Driver is using this wrong, must be called by ib_uverbs_mmap */
891 if (WARN_ON(!vma->vm_file ||
892 vma->vm_file->private_data != ufile))
893 return ERR_PTR(-EINVAL);
894 lockdep_assert_held(&ufile->device->disassociate_srcu);
895
896 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
897 if (!priv)
898 return ERR_PTR(-ENOMEM);
899 return priv;
900}
901
902/*
903 * Map IO memory into a process. This is to be called by drivers as part of
904 * their mmap() functions if they wish to send something like PCI-E BAR memory
905 * to userspace.
906 */
907int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
908 unsigned long pfn, unsigned long size, pgprot_t prot)
909{
910 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
911
912 if (IS_ERR(priv))
913 return PTR_ERR(priv);
914
915 vma->vm_page_prot = prot;
916 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
917 kfree(priv);
918 return -EAGAIN;
919 }
920
921 rdma_umap_priv_init(priv, vma);
922 return 0;
923}
924EXPORT_SYMBOL(rdma_user_mmap_io);
925
926/*
927 * The page case is here for a slightly different reason, the driver expects
928 * to be able to free the page it is sharing to user space when it destroys
929 * its ucontext, which means we need to zap the user space references.
930 *
931 * We could handle this differently by providing an API to allocate a shared
932 * page and then only freeing the shared page when the last ufile is
933 * destroyed.
934 */
935int rdma_user_mmap_page(struct ib_ucontext *ucontext,
936 struct vm_area_struct *vma, struct page *page,
937 unsigned long size)
938{
939 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
940
941 if (IS_ERR(priv))
942 return PTR_ERR(priv);
943
944 if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
945 vma->vm_page_prot)) {
946 kfree(priv);
947 return -EAGAIN;
948 }
949
950 rdma_umap_priv_init(priv, vma);
951 return 0;
952}
953EXPORT_SYMBOL(rdma_user_mmap_page);
954
955void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
956{
957 struct rdma_umap_priv *priv, *next_priv;
958
959 lockdep_assert_held(&ufile->hw_destroy_rwsem);
960
961 while (1) {
962 struct mm_struct *mm = NULL;
963
964 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
965 mutex_lock(&ufile->umap_lock);
966 if (!list_empty(&ufile->umaps)) {
967 mm = list_first_entry(&ufile->umaps,
968 struct rdma_umap_priv, list)
969 ->vma->vm_mm;
970 mmget(mm);
971 }
972 mutex_unlock(&ufile->umap_lock);
973 if (!mm)
974 return;
975
976 /*
977 * The umap_lock is nested under mmap_sem since it used within
978 * the vma_ops callbacks, so we have to clean the list one mm
979 * at a time to get the lock ordering right. Typically there
980 * will only be one mm, so no big deal.
981 */
982 down_write(&mm->mmap_sem);
983 mutex_lock(&ufile->umap_lock);
984 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
985 list) {
986 struct vm_area_struct *vma = priv->vma;
987
988 if (vma->vm_mm != mm)
989 continue;
990 list_del_init(&priv->list);
991
992 zap_vma_ptes(vma, vma->vm_start,
993 vma->vm_end - vma->vm_start);
994 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
995 }
996 mutex_unlock(&ufile->umap_lock);
997 up_write(&mm->mmap_sem);
998 mmput(mm);
999 }
1000}
1001
5b2d281a
RD
1002/*
1003 * ib_uverbs_open() does not need the BKL:
1004 *
2a72f212 1005 * - the ib_uverbs_device structures are properly reference counted and
5b2d281a
RD
1006 * everything else is purely local to the file being created, so
1007 * races against other open calls are not a problem;
1008 * - there is no ioctl method to race against;
2a72f212
AC
1009 * - the open method will either immediately run -ENXIO, or all
1010 * required initialization will be done.
5b2d281a 1011 */
bc38a6ab
RD
1012static int ib_uverbs_open(struct inode *inode, struct file *filp)
1013{
70a30e16 1014 struct ib_uverbs_device *dev;
bc38a6ab 1015 struct ib_uverbs_file *file;
036b1063 1016 struct ib_device *ib_dev;
70a30e16 1017 int ret;
036b1063
YH
1018 int module_dependent;
1019 int srcu_key;
bc38a6ab 1020
2a72f212 1021 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
35d4a0b6 1022 if (!atomic_inc_not_zero(&dev->refcount))
70a30e16
RD
1023 return -ENXIO;
1024
c5c4d92e 1025 get_device(&dev->dev);
036b1063
YH
1026 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1027 mutex_lock(&dev->lists_mutex);
1028 ib_dev = srcu_dereference(dev->ib_dev,
1029 &dev->disassociate_srcu);
1030 if (!ib_dev) {
1031 ret = -EIO;
70a30e16
RD
1032 goto err;
1033 }
bc38a6ab 1034
036b1063
YH
1035 /* In case IB device supports disassociate ucontext, there is no hard
1036 * dependency between uverbs device and its low level device.
1037 */
3023a1e9 1038 module_dependent = !(ib_dev->ops.disassociate_ucontext);
036b1063
YH
1039
1040 if (module_dependent) {
1041 if (!try_module_get(ib_dev->owner)) {
1042 ret = -ENODEV;
1043 goto err;
1044 }
1045 }
1046
1047 file = kzalloc(sizeof(*file), GFP_KERNEL);
63c47c28 1048 if (!file) {
70a30e16 1049 ret = -ENOMEM;
036b1063
YH
1050 if (module_dependent)
1051 goto err_module;
1052
1053 goto err;
63c47c28 1054 }
bc38a6ab 1055
70a30e16 1056 file->device = dev;
bc38a6ab 1057 kref_init(&file->ref);
e951747a 1058 mutex_init(&file->ucontext_lock);
bc38a6ab 1059
87064277 1060 spin_lock_init(&file->uobjects_lock);
6a5e9c88 1061 INIT_LIST_HEAD(&file->uobjects);
87064277 1062 init_rwsem(&file->hw_destroy_rwsem);
5f9794dc
JG
1063 mutex_init(&file->umap_lock);
1064 INIT_LIST_HEAD(&file->umaps);
6a5e9c88 1065
bc38a6ab 1066 filp->private_data = file;
036b1063
YH
1067 list_add_tail(&file->list, &dev->uverbs_file_list);
1068 mutex_unlock(&dev->lists_mutex);
1069 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
bc38a6ab 1070
0f50d88a
JG
1071 setup_ufile_idr_uobject(file);
1072
bc1db9af 1073 return nonseekable_open(inode, filp);
70a30e16
RD
1074
1075err_module:
036b1063 1076 module_put(ib_dev->owner);
70a30e16
RD
1077
1078err:
036b1063
YH
1079 mutex_unlock(&dev->lists_mutex);
1080 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
35d4a0b6
YH
1081 if (atomic_dec_and_test(&dev->refcount))
1082 ib_uverbs_comp_dev(dev);
1083
c5c4d92e 1084 put_device(&dev->dev);
70a30e16 1085 return ret;
bc38a6ab
RD
1086}
1087
1088static int ib_uverbs_close(struct inode *inode, struct file *filp)
1089{
1090 struct ib_uverbs_file *file = filp->private_data;
d1e09f30 1091
e951747a 1092 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
036b1063
YH
1093
1094 mutex_lock(&file->device->lists_mutex);
6ebce447 1095 list_del_init(&file->list);
036b1063 1096 mutex_unlock(&file->device->lists_mutex);
70a30e16
RD
1097
1098 if (file->async_file)
1e7710f3
MB
1099 kref_put(&file->async_file->ref,
1100 ib_uverbs_release_async_event_file);
bc38a6ab 1101
bc38a6ab
RD
1102 kref_put(&file->ref, ib_uverbs_release_file);
1103
1104 return 0;
1105}
1106
2b8693c0 1107static const struct file_operations uverbs_fops = {
9afed76d
AC
1108 .owner = THIS_MODULE,
1109 .write = ib_uverbs_write,
1110 .open = ib_uverbs_open,
bc1db9af
RD
1111 .release = ib_uverbs_close,
1112 .llseek = no_llseek,
8eb19e8e 1113 .unlocked_ioctl = ib_uverbs_ioctl,
9dfb2ff4 1114 .compat_ioctl = ib_uverbs_ioctl,
bc38a6ab
RD
1115};
1116
2b8693c0 1117static const struct file_operations uverbs_mmap_fops = {
9afed76d
AC
1118 .owner = THIS_MODULE,
1119 .write = ib_uverbs_write,
bc38a6ab 1120 .mmap = ib_uverbs_mmap,
9afed76d 1121 .open = ib_uverbs_open,
bc1db9af
RD
1122 .release = ib_uverbs_close,
1123 .llseek = no_llseek,
8eb19e8e 1124 .unlocked_ioctl = ib_uverbs_ioctl,
9dfb2ff4 1125 .compat_ioctl = ib_uverbs_ioctl,
bc38a6ab
RD
1126};
1127
1128static struct ib_client uverbs_client = {
1129 .name = "uverbs",
1130 .add = ib_uverbs_add_one,
1131 .remove = ib_uverbs_remove_one
1132};
1133
b53b1c08 1134static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
f4e91eb4 1135 char *buf)
bc38a6ab 1136{
c5c4d92e
PP
1137 struct ib_uverbs_device *dev =
1138 container_of(device, struct ib_uverbs_device, dev);
036b1063
YH
1139 int ret = -ENODEV;
1140 int srcu_key;
036b1063 1141 struct ib_device *ib_dev;
70a30e16 1142
036b1063
YH
1143 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1144 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1145 if (ib_dev)
896de009 1146 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
036b1063
YH
1147 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1148
1149 return ret;
bc38a6ab 1150}
b53b1c08 1151static DEVICE_ATTR_RO(ibdev);
bc38a6ab 1152
b53b1c08
PP
1153static ssize_t abi_version_show(struct device *device,
1154 struct device_attribute *attr, char *buf)
274c0891 1155{
c5c4d92e
PP
1156 struct ib_uverbs_device *dev =
1157 container_of(device, struct ib_uverbs_device, dev);
036b1063
YH
1158 int ret = -ENODEV;
1159 int srcu_key;
1160 struct ib_device *ib_dev;
70a30e16 1161
036b1063
YH
1162 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1163 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1164 if (ib_dev)
1165 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1166 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
274c0891 1167
036b1063 1168 return ret;
274c0891 1169}
b53b1c08
PP
1170static DEVICE_ATTR_RO(abi_version);
1171
1172static struct attribute *ib_dev_attrs[] = {
1173 &dev_attr_abi_version.attr,
1174 &dev_attr_ibdev.attr,
1175 NULL,
1176};
1177
1178static const struct attribute_group dev_attr_group = {
1179 .attrs = ib_dev_attrs,
1180};
274c0891 1181
0933e2d9
AK
1182static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1183 __stringify(IB_USER_VERBS_ABI_VERSION));
bc38a6ab 1184
7d96c9b1
JG
1185static int ib_uverbs_create_uapi(struct ib_device *device,
1186 struct ib_uverbs_device *uverbs_dev)
1187{
9ed3e5f4 1188 struct uverbs_api *uapi;
7d96c9b1 1189
6829c1c2 1190 uapi = uverbs_alloc_api(device);
51d0a2b4 1191 if (IS_ERR(uapi))
9ed3e5f4 1192 return PTR_ERR(uapi);
9ed3e5f4 1193
9ed3e5f4 1194 uverbs_dev->uapi = uapi;
7d96c9b1
JG
1195 return 0;
1196}
1197
bc38a6ab
RD
1198static void ib_uverbs_add_one(struct ib_device *device)
1199{
38707980 1200 int devnum;
ddbd6883 1201 dev_t base;
bc38a6ab 1202 struct ib_uverbs_device *uverbs_dev;
036b1063 1203 int ret;
bc38a6ab 1204
3023a1e9 1205 if (!device->ops.alloc_ucontext)
bc38a6ab
RD
1206 return;
1207
08f0e161 1208 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
bc38a6ab
RD
1209 if (!uverbs_dev)
1210 return;
1211
036b1063
YH
1212 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1213 if (ret) {
1214 kfree(uverbs_dev);
1215 return;
1216 }
1217
00991039
JG
1218 device_initialize(&uverbs_dev->dev);
1219 uverbs_dev->dev.class = uverbs_class;
1220 uverbs_dev->dev.parent = device->dev.parent;
1221 uverbs_dev->dev.release = ib_uverbs_release_dev;
1222 uverbs_dev->groups[0] = &dev_attr_group;
1223 uverbs_dev->dev.groups = uverbs_dev->groups;
35d4a0b6 1224 atomic_set(&uverbs_dev->refcount, 1);
fd60ae40 1225 init_completion(&uverbs_dev->comp);
53d0bd1e
SH
1226 uverbs_dev->xrcd_tree = RB_ROOT;
1227 mutex_init(&uverbs_dev->xrcd_tree_mutex);
036b1063
YH
1228 mutex_init(&uverbs_dev->lists_mutex);
1229 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1230 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
00991039
JG
1231 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1232 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
70a30e16 1233
90f6e41c
LR
1234 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1235 GFP_KERNEL);
1236 if (devnum < 0)
8cf12d77
HN
1237 goto err;
1238 uverbs_dev->devnum = devnum;
8cf12d77
HN
1239 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1240 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1241 else
1242 base = IB_UVERBS_BASE_DEV + devnum;
bc38a6ab 1243
7d96c9b1 1244 if (ib_uverbs_create_uapi(device, uverbs_dev))
08e74be1 1245 goto err_uapi;
7d96c9b1 1246
c5c4d92e 1247 uverbs_dev->dev.devt = base;
c5c4d92e
PP
1248 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1249
1250 cdev_init(&uverbs_dev->cdev,
3023a1e9 1251 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
055422dd 1252 uverbs_dev->cdev.owner = THIS_MODULE;
bc38a6ab 1253
c5c4d92e
PP
1254 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1255 if (ret)
00991039 1256 goto err_uapi;
bc38a6ab 1257
bc38a6ab 1258 ib_set_client_data(device, &uverbs_client, uverbs_dev);
bc38a6ab
RD
1259 return;
1260
08e74be1 1261err_uapi:
90f6e41c 1262 ida_free(&uverbs_ida, devnum);
bc38a6ab 1263err:
35d4a0b6
YH
1264 if (atomic_dec_and_test(&uverbs_dev->refcount))
1265 ib_uverbs_comp_dev(uverbs_dev);
fd60ae40 1266 wait_for_completion(&uverbs_dev->comp);
00991039 1267 put_device(&uverbs_dev->dev);
bc38a6ab
RD
1268 return;
1269}
1270
036b1063
YH
1271static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1272 struct ib_device *ib_dev)
1273{
1274 struct ib_uverbs_file *file;
1e7710f3 1275 struct ib_uverbs_async_event_file *event_file;
036b1063
YH
1276 struct ib_event event;
1277
1278 /* Pending running commands to terminate */
9ed3e5f4 1279 uverbs_disassociate_api_pre(uverbs_dev);
036b1063
YH
1280 event.event = IB_EVENT_DEVICE_FATAL;
1281 event.element.port_num = 0;
1282 event.device = ib_dev;
1283
1284 mutex_lock(&uverbs_dev->lists_mutex);
1285 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
036b1063
YH
1286 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1287 struct ib_uverbs_file, list);
6ebce447 1288 list_del_init(&file->list);
036b1063 1289 kref_get(&file->ref);
d1e09f30 1290
e951747a
JG
1291 /* We must release the mutex before going ahead and calling
1292 * uverbs_cleanup_ufile, as it might end up indirectly calling
1293 * uverbs_close, for example due to freeing the resources (e.g
1294 * mmput).
d1e09f30 1295 */
e951747a 1296 mutex_unlock(&uverbs_dev->lists_mutex);
036b1063 1297
e951747a
JG
1298 ib_uverbs_event_handler(&file->event_handler, &event);
1299 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
036b1063 1300 kref_put(&file->ref, ib_uverbs_release_file);
e951747a
JG
1301
1302 mutex_lock(&uverbs_dev->lists_mutex);
036b1063
YH
1303 }
1304
1305 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1306 event_file = list_first_entry(&uverbs_dev->
1307 uverbs_events_file_list,
1e7710f3 1308 struct ib_uverbs_async_event_file,
036b1063 1309 list);
db1b5ddd
MB
1310 spin_lock_irq(&event_file->ev_queue.lock);
1311 event_file->ev_queue.is_closed = 1;
1312 spin_unlock_irq(&event_file->ev_queue.lock);
036b1063
YH
1313
1314 list_del(&event_file->list);
1e7710f3
MB
1315 ib_unregister_event_handler(
1316 &event_file->uverbs_file->event_handler);
1317 event_file->uverbs_file->event_handler.device =
1318 NULL;
036b1063 1319
db1b5ddd
MB
1320 wake_up_interruptible(&event_file->ev_queue.poll_wait);
1321 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
036b1063
YH
1322 }
1323 mutex_unlock(&uverbs_dev->lists_mutex);
9ed3e5f4
JG
1324
1325 uverbs_disassociate_api(uverbs_dev->uapi);
036b1063
YH
1326}
1327
7c1eb45a 1328static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
bc38a6ab 1329{
7c1eb45a 1330 struct ib_uverbs_device *uverbs_dev = client_data;
036b1063 1331 int wait_clients = 1;
bc38a6ab
RD
1332
1333 if (!uverbs_dev)
1334 return;
1335
c5c4d92e 1336 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
90f6e41c 1337 ida_free(&uverbs_ida, uverbs_dev->devnum);
fd60ae40 1338
3023a1e9 1339 if (device->ops.disassociate_ucontext) {
036b1063
YH
1340 /* We disassociate HW resources and immediately return.
1341 * Userspace will see a EIO errno for all future access.
1342 * Upon returning, ib_device may be freed internally and is not
1343 * valid any more.
1344 * uverbs_device is still available until all clients close
1345 * their files, then the uverbs device ref count will be zero
1346 * and its resources will be freed.
1347 * Note: At this point no more files can be opened since the
1348 * cdev was deleted, however active clients can still issue
1349 * commands and close their open files.
1350 */
036b1063
YH
1351 ib_uverbs_free_hw_resources(uverbs_dev, device);
1352 wait_clients = 0;
1353 }
1354
35d4a0b6
YH
1355 if (atomic_dec_and_test(&uverbs_dev->refcount))
1356 ib_uverbs_comp_dev(uverbs_dev);
036b1063
YH
1357 if (wait_clients)
1358 wait_for_completion(&uverbs_dev->comp);
52427112 1359
c5c4d92e 1360 put_device(&uverbs_dev->dev);
bc38a6ab
RD
1361}
1362
2c9ede55 1363static char *uverbs_devnode(struct device *dev, umode_t *mode)
71c29bd5 1364{
b2bc4782
GR
1365 if (mode)
1366 *mode = 0666;
71c29bd5
RD
1367 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1368}
1369
bc38a6ab
RD
1370static int __init ib_uverbs_init(void)
1371{
1372 int ret;
1373
8cf12d77
HN
1374 ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1375 IB_UVERBS_NUM_FIXED_MINOR,
bc38a6ab
RD
1376 "infiniband_verbs");
1377 if (ret) {
aba25a3e 1378 pr_err("user_verbs: couldn't register device number\n");
bc38a6ab
RD
1379 goto out;
1380 }
1381
8cf12d77
HN
1382 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1383 IB_UVERBS_NUM_DYNAMIC_MINOR,
1384 "infiniband_verbs");
1385 if (ret) {
1386 pr_err("couldn't register dynamic device number\n");
1387 goto out_alloc;
1388 }
1389
70a30e16
RD
1390 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1391 if (IS_ERR(uverbs_class)) {
1392 ret = PTR_ERR(uverbs_class);
aba25a3e 1393 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
bc38a6ab
RD
1394 goto out_chrdev;
1395 }
1396
71c29bd5
RD
1397 uverbs_class->devnode = uverbs_devnode;
1398
0933e2d9 1399 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
bc38a6ab 1400 if (ret) {
aba25a3e 1401 pr_err("user_verbs: couldn't create abi_version attribute\n");
bc38a6ab
RD
1402 goto out_class;
1403 }
1404
bc38a6ab
RD
1405 ret = ib_register_client(&uverbs_client);
1406 if (ret) {
aba25a3e 1407 pr_err("user_verbs: couldn't register client\n");
a265e558 1408 goto out_class;
bc38a6ab
RD
1409 }
1410
1411 return 0;
1412
bc38a6ab 1413out_class:
70a30e16 1414 class_destroy(uverbs_class);
bc38a6ab
RD
1415
1416out_chrdev:
8cf12d77
HN
1417 unregister_chrdev_region(dynamic_uverbs_dev,
1418 IB_UVERBS_NUM_DYNAMIC_MINOR);
1419
1420out_alloc:
1421 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1422 IB_UVERBS_NUM_FIXED_MINOR);
bc38a6ab
RD
1423
1424out:
1425 return ret;
1426}
1427
1428static void __exit ib_uverbs_cleanup(void)
1429{
1430 ib_unregister_client(&uverbs_client);
70a30e16 1431 class_destroy(uverbs_class);
8cf12d77
HN
1432 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1433 IB_UVERBS_NUM_FIXED_MINOR);
1434 unregister_chrdev_region(dynamic_uverbs_dev,
1435 IB_UVERBS_NUM_DYNAMIC_MINOR);
bc38a6ab
RD
1436}
1437
1438module_init(ib_uverbs_init);
1439module_exit(ib_uverbs_cleanup);