2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
56 #include "core_priv.h"
57 #include "rdma_core.h"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61 MODULE_LICENSE("Dual BSD/GPL");
64 IB_UVERBS_MAJOR
= 231,
65 IB_UVERBS_BASE_MINOR
= 192,
66 IB_UVERBS_MAX_DEVICES
= RDMA_MAX_PORTS
,
67 IB_UVERBS_NUM_FIXED_MINOR
= 32,
68 IB_UVERBS_NUM_DYNAMIC_MINOR
= IB_UVERBS_MAX_DEVICES
- IB_UVERBS_NUM_FIXED_MINOR
,
71 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
73 static dev_t dynamic_uverbs_dev
;
74 static struct class *uverbs_class
;
76 static DEFINE_IDA(uverbs_ida
);
77 static void ib_uverbs_add_one(struct ib_device
*device
);
78 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
81 * Must be called with the ufile->device->disassociate_srcu held, and the lock
82 * must be held until use of the ucontext is finished.
84 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
)
87 * We do not hold the hw_destroy_rwsem lock for this flow, instead
88 * srcu is used. It does not matter if someone races this with
89 * get_context, we get NULL or valid ucontext.
91 struct ib_ucontext
*ucontext
= smp_load_acquire(&ufile
->ucontext
);
93 if (!srcu_dereference(ufile
->device
->ib_dev
,
94 &ufile
->device
->disassociate_srcu
))
98 return ERR_PTR(-EINVAL
);
102 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file
);
104 int uverbs_dealloc_mw(struct ib_mw
*mw
)
106 struct ib_pd
*pd
= mw
->pd
;
109 ret
= mw
->device
->ops
.dealloc_mw(mw
);
111 atomic_dec(&pd
->usecnt
);
115 static void ib_uverbs_release_dev(struct device
*device
)
117 struct ib_uverbs_device
*dev
=
118 container_of(device
, struct ib_uverbs_device
, dev
);
120 uverbs_destroy_api(dev
->uapi
);
121 cleanup_srcu_struct(&dev
->disassociate_srcu
);
125 static void ib_uverbs_release_async_event_file(struct kref
*ref
)
127 struct ib_uverbs_async_event_file
*file
=
128 container_of(ref
, struct ib_uverbs_async_event_file
, ref
);
133 void ib_uverbs_release_ucq(struct ib_uverbs_file
*file
,
134 struct ib_uverbs_completion_event_file
*ev_file
,
135 struct ib_ucq_object
*uobj
)
137 struct ib_uverbs_event
*evt
, *tmp
;
140 spin_lock_irq(&ev_file
->ev_queue
.lock
);
141 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
142 list_del(&evt
->list
);
145 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
147 uverbs_uobject_put(&ev_file
->uobj
);
150 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
151 list_for_each_entry_safe(evt
, tmp
, &uobj
->async_list
, obj_list
) {
152 list_del(&evt
->list
);
155 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
158 void ib_uverbs_release_uevent(struct ib_uverbs_file
*file
,
159 struct ib_uevent_object
*uobj
)
161 struct ib_uverbs_event
*evt
, *tmp
;
163 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
164 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
165 list_del(&evt
->list
);
168 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
171 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
172 struct ib_uqp_object
*uobj
)
174 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
176 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
177 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
178 list_del(&mcast
->list
);
183 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
185 complete(&dev
->comp
);
188 void ib_uverbs_release_file(struct kref
*ref
)
190 struct ib_uverbs_file
*file
=
191 container_of(ref
, struct ib_uverbs_file
, ref
);
192 struct ib_device
*ib_dev
;
195 release_ufile_idr_uobject(file
);
197 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
198 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
199 &file
->device
->disassociate_srcu
);
200 if (ib_dev
&& !ib_dev
->ops
.disassociate_ucontext
)
201 module_put(ib_dev
->owner
);
202 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
204 if (atomic_dec_and_test(&file
->device
->refcount
))
205 ib_uverbs_comp_dev(file
->device
);
207 put_device(&file
->device
->dev
);
211 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
212 struct ib_uverbs_file
*uverbs_file
,
213 struct file
*filp
, char __user
*buf
,
214 size_t count
, loff_t
*pos
,
217 struct ib_uverbs_event
*event
;
220 spin_lock_irq(&ev_queue
->lock
);
222 while (list_empty(&ev_queue
->event_list
)) {
223 spin_unlock_irq(&ev_queue
->lock
);
225 if (filp
->f_flags
& O_NONBLOCK
)
228 if (wait_event_interruptible(ev_queue
->poll_wait
,
229 (!list_empty(&ev_queue
->event_list
) ||
230 /* The barriers built into wait_event_interruptible()
231 * and wake_up() guarentee this will see the null set
234 !uverbs_file
->device
->ib_dev
)))
237 /* If device was disassociated and no event exists set an error */
238 if (list_empty(&ev_queue
->event_list
) &&
239 !uverbs_file
->device
->ib_dev
)
242 spin_lock_irq(&ev_queue
->lock
);
245 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
247 if (eventsz
> count
) {
251 list_del(ev_queue
->event_list
.next
);
252 if (event
->counter
) {
254 list_del(&event
->obj_list
);
258 spin_unlock_irq(&ev_queue
->lock
);
261 if (copy_to_user(buf
, event
, eventsz
))
272 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
273 size_t count
, loff_t
*pos
)
275 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
277 return ib_uverbs_event_read(&file
->ev_queue
, file
->uverbs_file
, filp
,
279 sizeof(struct ib_uverbs_async_event_desc
));
282 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
283 size_t count
, loff_t
*pos
)
285 struct ib_uverbs_completion_event_file
*comp_ev_file
=
288 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
,
289 comp_ev_file
->uobj
.ufile
, filp
,
291 sizeof(struct ib_uverbs_comp_event_desc
));
294 static __poll_t
ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
296 struct poll_table_struct
*wait
)
298 __poll_t pollflags
= 0;
300 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
302 spin_lock_irq(&ev_queue
->lock
);
303 if (!list_empty(&ev_queue
->event_list
))
304 pollflags
= EPOLLIN
| EPOLLRDNORM
;
305 spin_unlock_irq(&ev_queue
->lock
);
310 static __poll_t
ib_uverbs_async_event_poll(struct file
*filp
,
311 struct poll_table_struct
*wait
)
313 return ib_uverbs_event_poll(filp
->private_data
, filp
, wait
);
316 static __poll_t
ib_uverbs_comp_event_poll(struct file
*filp
,
317 struct poll_table_struct
*wait
)
319 struct ib_uverbs_completion_event_file
*comp_ev_file
=
322 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
325 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
327 struct ib_uverbs_event_queue
*ev_queue
= filp
->private_data
;
329 return fasync_helper(fd
, filp
, on
, &ev_queue
->async_queue
);
332 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
334 struct ib_uverbs_completion_event_file
*comp_ev_file
=
337 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
340 static int ib_uverbs_async_event_close(struct inode
*inode
, struct file
*filp
)
342 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
343 struct ib_uverbs_file
*uverbs_file
= file
->uverbs_file
;
344 struct ib_uverbs_event
*entry
, *tmp
;
345 int closed_already
= 0;
347 mutex_lock(&uverbs_file
->device
->lists_mutex
);
348 spin_lock_irq(&file
->ev_queue
.lock
);
349 closed_already
= file
->ev_queue
.is_closed
;
350 file
->ev_queue
.is_closed
= 1;
351 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
353 list_del(&entry
->obj_list
);
356 spin_unlock_irq(&file
->ev_queue
.lock
);
357 if (!closed_already
) {
358 list_del(&file
->list
);
359 ib_unregister_event_handler(&uverbs_file
->event_handler
);
361 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
363 kref_put(&uverbs_file
->ref
, ib_uverbs_release_file
);
364 kref_put(&file
->ref
, ib_uverbs_release_async_event_file
);
369 static int ib_uverbs_comp_event_close(struct inode
*inode
, struct file
*filp
)
371 struct ib_uobject
*uobj
= filp
->private_data
;
372 struct ib_uverbs_completion_event_file
*file
= container_of(
373 uobj
, struct ib_uverbs_completion_event_file
, uobj
);
374 struct ib_uverbs_event
*entry
, *tmp
;
376 spin_lock_irq(&file
->ev_queue
.lock
);
377 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
379 list_del(&entry
->obj_list
);
382 file
->ev_queue
.is_closed
= 1;
383 spin_unlock_irq(&file
->ev_queue
.lock
);
385 uverbs_close_fd(filp
);
390 const struct file_operations uverbs_event_fops
= {
391 .owner
= THIS_MODULE
,
392 .read
= ib_uverbs_comp_event_read
,
393 .poll
= ib_uverbs_comp_event_poll
,
394 .release
= ib_uverbs_comp_event_close
,
395 .fasync
= ib_uverbs_comp_event_fasync
,
399 static const struct file_operations uverbs_async_event_fops
= {
400 .owner
= THIS_MODULE
,
401 .read
= ib_uverbs_async_event_read
,
402 .poll
= ib_uverbs_async_event_poll
,
403 .release
= ib_uverbs_async_event_close
,
404 .fasync
= ib_uverbs_async_event_fasync
,
408 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
410 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
411 struct ib_ucq_object
*uobj
;
412 struct ib_uverbs_event
*entry
;
418 spin_lock_irqsave(&ev_queue
->lock
, flags
);
419 if (ev_queue
->is_closed
) {
420 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
424 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
426 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
430 uobj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
432 entry
->desc
.comp
.cq_handle
= cq
->uobject
->user_handle
;
433 entry
->counter
= &uobj
->comp_events_reported
;
435 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
436 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
437 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
439 wake_up_interruptible(&ev_queue
->poll_wait
);
440 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
443 static void ib_uverbs_async_handler(struct ib_uverbs_file
*file
,
444 __u64 element
, __u64 event
,
445 struct list_head
*obj_list
,
448 struct ib_uverbs_event
*entry
;
451 spin_lock_irqsave(&file
->async_file
->ev_queue
.lock
, flags
);
452 if (file
->async_file
->ev_queue
.is_closed
) {
453 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
457 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
459 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
463 entry
->desc
.async
.element
= element
;
464 entry
->desc
.async
.event_type
= event
;
465 entry
->desc
.async
.reserved
= 0;
466 entry
->counter
= counter
;
468 list_add_tail(&entry
->list
, &file
->async_file
->ev_queue
.event_list
);
470 list_add_tail(&entry
->obj_list
, obj_list
);
471 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
473 wake_up_interruptible(&file
->async_file
->ev_queue
.poll_wait
);
474 kill_fasync(&file
->async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
477 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
479 struct ib_ucq_object
*uobj
= container_of(event
->element
.cq
->uobject
,
480 struct ib_ucq_object
, uobject
);
482 ib_uverbs_async_handler(uobj
->uobject
.ufile
, uobj
->uobject
.user_handle
,
483 event
->event
, &uobj
->async_list
,
484 &uobj
->async_events_reported
);
487 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
489 struct ib_uevent_object
*uobj
;
491 /* for XRC target qp's, check that qp is live */
492 if (!event
->element
.qp
->uobject
)
495 uobj
= container_of(event
->element
.qp
->uobject
,
496 struct ib_uevent_object
, uobject
);
498 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
499 event
->event
, &uobj
->event_list
,
500 &uobj
->events_reported
);
503 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
505 struct ib_uevent_object
*uobj
= container_of(event
->element
.wq
->uobject
,
506 struct ib_uevent_object
, uobject
);
508 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
509 event
->event
, &uobj
->event_list
,
510 &uobj
->events_reported
);
513 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
515 struct ib_uevent_object
*uobj
;
517 uobj
= container_of(event
->element
.srq
->uobject
,
518 struct ib_uevent_object
, uobject
);
520 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
521 event
->event
, &uobj
->event_list
,
522 &uobj
->events_reported
);
525 void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
526 struct ib_event
*event
)
528 struct ib_uverbs_file
*file
=
529 container_of(handler
, struct ib_uverbs_file
, event_handler
);
531 ib_uverbs_async_handler(file
, event
->element
.port_num
, event
->event
,
535 void ib_uverbs_free_async_event_file(struct ib_uverbs_file
*file
)
537 kref_put(&file
->async_file
->ref
, ib_uverbs_release_async_event_file
);
538 file
->async_file
= NULL
;
541 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
543 spin_lock_init(&ev_queue
->lock
);
544 INIT_LIST_HEAD(&ev_queue
->event_list
);
545 init_waitqueue_head(&ev_queue
->poll_wait
);
546 ev_queue
->is_closed
= 0;
547 ev_queue
->async_queue
= NULL
;
550 struct file
*ib_uverbs_alloc_async_event_file(struct ib_uverbs_file
*uverbs_file
,
551 struct ib_device
*ib_dev
)
553 struct ib_uverbs_async_event_file
*ev_file
;
556 ev_file
= kzalloc(sizeof(*ev_file
), GFP_KERNEL
);
558 return ERR_PTR(-ENOMEM
);
560 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
561 ev_file
->uverbs_file
= uverbs_file
;
562 kref_get(&ev_file
->uverbs_file
->ref
);
563 kref_init(&ev_file
->ref
);
564 filp
= anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops
,
569 mutex_lock(&uverbs_file
->device
->lists_mutex
);
570 list_add_tail(&ev_file
->list
,
571 &uverbs_file
->device
->uverbs_events_file_list
);
572 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
574 WARN_ON(uverbs_file
->async_file
);
575 uverbs_file
->async_file
= ev_file
;
576 kref_get(&uverbs_file
->async_file
->ref
);
577 INIT_IB_EVENT_HANDLER(&uverbs_file
->event_handler
,
579 ib_uverbs_event_handler
);
580 ib_register_event_handler(&uverbs_file
->event_handler
);
581 /* At that point async file stuff was fully set */
586 kref_put(&ev_file
->uverbs_file
->ref
, ib_uverbs_release_file
);
587 kref_put(&ev_file
->ref
, ib_uverbs_release_async_event_file
);
591 static ssize_t
verify_hdr(struct ib_uverbs_cmd_hdr
*hdr
,
592 struct ib_uverbs_ex_cmd_hdr
*ex_hdr
, size_t count
,
593 const struct uverbs_api_write_method
*method_elm
)
595 if (method_elm
->is_ex
) {
596 count
-= sizeof(*hdr
) + sizeof(*ex_hdr
);
598 if ((hdr
->in_words
+ ex_hdr
->provider_in_words
) * 8 != count
)
601 if (hdr
->in_words
* 8 < method_elm
->req_size
)
604 if (ex_hdr
->cmd_hdr_reserved
)
607 if (ex_hdr
->response
) {
608 if (!hdr
->out_words
&& !ex_hdr
->provider_out_words
)
611 if (hdr
->out_words
* 8 < method_elm
->resp_size
)
614 if (!access_ok(VERIFY_WRITE
,
615 u64_to_user_ptr(ex_hdr
->response
),
616 (hdr
->out_words
+ ex_hdr
->provider_out_words
) * 8))
619 if (hdr
->out_words
|| ex_hdr
->provider_out_words
)
626 /* not extended command */
627 if (hdr
->in_words
* 4 != count
)
630 if (count
< method_elm
->req_size
+ sizeof(hdr
)) {
632 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
633 * with a 16 byte write instead of 24. Old kernels didn't
634 * check the size so they allowed this. Now that the size is
635 * checked provide a compatibility work around to not break
638 if (hdr
->command
== IB_USER_VERBS_CMD_DESTROY_CQ
&&
645 if (hdr
->out_words
* 4 < method_elm
->resp_size
)
651 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
652 size_t count
, loff_t
*pos
)
654 struct ib_uverbs_file
*file
= filp
->private_data
;
655 const struct uverbs_api_write_method
*method_elm
;
656 struct uverbs_api
*uapi
= file
->device
->uapi
;
657 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
658 struct ib_uverbs_cmd_hdr hdr
;
659 struct uverbs_attr_bundle bundle
;
663 if (!ib_safe_file_access(filp
)) {
664 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
665 task_tgid_vnr(current
), current
->comm
);
669 if (count
< sizeof(hdr
))
672 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
675 method_elm
= uapi_get_method(uapi
, hdr
.command
);
676 if (IS_ERR(method_elm
))
677 return PTR_ERR(method_elm
);
679 if (method_elm
->is_ex
) {
680 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
)))
682 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
)))
686 ret
= verify_hdr(&hdr
, &ex_hdr
, count
, method_elm
);
690 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
695 if (!method_elm
->is_ex
) {
696 size_t in_len
= hdr
.in_words
* 4 - sizeof(hdr
);
697 size_t out_len
= hdr
.out_words
* 4;
700 if (method_elm
->has_udata
) {
701 bundle
.driver_udata
.inlen
=
702 in_len
- method_elm
->req_size
;
703 in_len
= method_elm
->req_size
;
704 if (bundle
.driver_udata
.inlen
)
705 bundle
.driver_udata
.inbuf
= buf
+ in_len
;
707 bundle
.driver_udata
.inbuf
= NULL
;
709 memset(&bundle
.driver_udata
, 0,
710 sizeof(bundle
.driver_udata
));
713 if (method_elm
->has_resp
) {
715 * The macros check that if has_resp is set
716 * then the command request structure starts
717 * with a '__aligned u64 response' member.
719 ret
= get_user(response
, (const u64
*)buf
);
723 if (method_elm
->has_udata
) {
724 bundle
.driver_udata
.outlen
=
725 out_len
- method_elm
->resp_size
;
726 out_len
= method_elm
->resp_size
;
727 if (bundle
.driver_udata
.outlen
)
728 bundle
.driver_udata
.outbuf
=
729 u64_to_user_ptr(response
+
732 bundle
.driver_udata
.outbuf
= NULL
;
735 bundle
.driver_udata
.outlen
= 0;
736 bundle
.driver_udata
.outbuf
= NULL
;
739 ib_uverbs_init_udata_buf_or_null(
740 &bundle
.ucore
, buf
, u64_to_user_ptr(response
),
743 buf
+= sizeof(ex_hdr
);
745 ib_uverbs_init_udata_buf_or_null(&bundle
.ucore
, buf
,
746 u64_to_user_ptr(ex_hdr
.response
),
747 hdr
.in_words
* 8, hdr
.out_words
* 8);
749 ib_uverbs_init_udata_buf_or_null(
750 &bundle
.driver_udata
, buf
+ bundle
.ucore
.inlen
,
751 u64_to_user_ptr(ex_hdr
.response
) + bundle
.ucore
.outlen
,
752 ex_hdr
.provider_in_words
* 8,
753 ex_hdr
.provider_out_words
* 8);
757 ret
= method_elm
->handler(&bundle
);
759 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
760 return (ret
) ? : count
;
763 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
765 struct ib_uverbs_file
*file
= filp
->private_data
;
766 struct ib_ucontext
*ucontext
;
770 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
771 ucontext
= ib_uverbs_get_ucontext_file(file
);
772 if (IS_ERR(ucontext
)) {
773 ret
= PTR_ERR(ucontext
);
777 ret
= ucontext
->device
->ops
.mmap(ucontext
, vma
);
779 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
784 * Each time we map IO memory into user space this keeps track of the mapping.
785 * When the device is hot-unplugged we 'zap' the mmaps in user space to point
786 * to the zero page and allow the hot unplug to proceed.
788 * This is necessary for cases like PCI physical hot unplug as the actual BAR
789 * memory may vanish after this and access to it from userspace could MCE.
791 * RDMA drivers supporting disassociation must have their user space designed
792 * to cope in some way with their IO pages going to the zero page.
794 struct rdma_umap_priv
{
795 struct vm_area_struct
*vma
;
796 struct list_head list
;
799 static const struct vm_operations_struct rdma_umap_ops
;
801 static void rdma_umap_priv_init(struct rdma_umap_priv
*priv
,
802 struct vm_area_struct
*vma
)
804 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
807 vma
->vm_private_data
= priv
;
808 vma
->vm_ops
= &rdma_umap_ops
;
810 mutex_lock(&ufile
->umap_lock
);
811 list_add(&priv
->list
, &ufile
->umaps
);
812 mutex_unlock(&ufile
->umap_lock
);
816 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
819 static void rdma_umap_open(struct vm_area_struct
*vma
)
821 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
822 struct rdma_umap_priv
*opriv
= vma
->vm_private_data
;
823 struct rdma_umap_priv
*priv
;
828 /* We are racing with disassociation */
829 if (!down_read_trylock(&ufile
->hw_destroy_rwsem
))
832 * Disassociation already completed, the VMA should already be zapped.
834 if (!ufile
->ucontext
)
837 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
840 rdma_umap_priv_init(priv
, vma
);
842 up_read(&ufile
->hw_destroy_rwsem
);
846 up_read(&ufile
->hw_destroy_rwsem
);
849 * We can't allow the VMA to be created with the actual IO pages, that
850 * would break our API contract, and it can't be stopped at this
853 vma
->vm_private_data
= NULL
;
854 zap_vma_ptes(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
857 static void rdma_umap_close(struct vm_area_struct
*vma
)
859 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
860 struct rdma_umap_priv
*priv
= vma
->vm_private_data
;
866 * The vma holds a reference on the struct file that created it, which
867 * in turn means that the ib_uverbs_file is guaranteed to exist at
870 mutex_lock(&ufile
->umap_lock
);
871 list_del(&priv
->list
);
872 mutex_unlock(&ufile
->umap_lock
);
876 static const struct vm_operations_struct rdma_umap_ops
= {
877 .open
= rdma_umap_open
,
878 .close
= rdma_umap_close
,
881 static struct rdma_umap_priv
*rdma_user_mmap_pre(struct ib_ucontext
*ucontext
,
882 struct vm_area_struct
*vma
,
885 struct ib_uverbs_file
*ufile
= ucontext
->ufile
;
886 struct rdma_umap_priv
*priv
;
888 if (vma
->vm_end
- vma
->vm_start
!= size
)
889 return ERR_PTR(-EINVAL
);
891 /* Driver is using this wrong, must be called by ib_uverbs_mmap */
892 if (WARN_ON(!vma
->vm_file
||
893 vma
->vm_file
->private_data
!= ufile
))
894 return ERR_PTR(-EINVAL
);
895 lockdep_assert_held(&ufile
->device
->disassociate_srcu
);
897 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
899 return ERR_PTR(-ENOMEM
);
904 * Map IO memory into a process. This is to be called by drivers as part of
905 * their mmap() functions if they wish to send something like PCI-E BAR memory
908 int rdma_user_mmap_io(struct ib_ucontext
*ucontext
, struct vm_area_struct
*vma
,
909 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
911 struct rdma_umap_priv
*priv
= rdma_user_mmap_pre(ucontext
, vma
, size
);
914 return PTR_ERR(priv
);
916 vma
->vm_page_prot
= prot
;
917 if (io_remap_pfn_range(vma
, vma
->vm_start
, pfn
, size
, prot
)) {
922 rdma_umap_priv_init(priv
, vma
);
925 EXPORT_SYMBOL(rdma_user_mmap_io
);
928 * The page case is here for a slightly different reason, the driver expects
929 * to be able to free the page it is sharing to user space when it destroys
930 * its ucontext, which means we need to zap the user space references.
932 * We could handle this differently by providing an API to allocate a shared
933 * page and then only freeing the shared page when the last ufile is
936 int rdma_user_mmap_page(struct ib_ucontext
*ucontext
,
937 struct vm_area_struct
*vma
, struct page
*page
,
940 struct rdma_umap_priv
*priv
= rdma_user_mmap_pre(ucontext
, vma
, size
);
943 return PTR_ERR(priv
);
945 if (remap_pfn_range(vma
, vma
->vm_start
, page_to_pfn(page
), size
,
946 vma
->vm_page_prot
)) {
951 rdma_umap_priv_init(priv
, vma
);
954 EXPORT_SYMBOL(rdma_user_mmap_page
);
956 void uverbs_user_mmap_disassociate(struct ib_uverbs_file
*ufile
)
958 struct rdma_umap_priv
*priv
, *next_priv
;
960 lockdep_assert_held(&ufile
->hw_destroy_rwsem
);
963 struct mm_struct
*mm
= NULL
;
965 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
966 mutex_lock(&ufile
->umap_lock
);
967 if (!list_empty(&ufile
->umaps
)) {
968 mm
= list_first_entry(&ufile
->umaps
,
969 struct rdma_umap_priv
, list
)
973 mutex_unlock(&ufile
->umap_lock
);
978 * The umap_lock is nested under mmap_sem since it used within
979 * the vma_ops callbacks, so we have to clean the list one mm
980 * at a time to get the lock ordering right. Typically there
981 * will only be one mm, so no big deal.
983 down_write(&mm
->mmap_sem
);
984 mutex_lock(&ufile
->umap_lock
);
985 list_for_each_entry_safe (priv
, next_priv
, &ufile
->umaps
,
987 struct vm_area_struct
*vma
= priv
->vma
;
989 if (vma
->vm_mm
!= mm
)
991 list_del_init(&priv
->list
);
993 zap_vma_ptes(vma
, vma
->vm_start
,
994 vma
->vm_end
- vma
->vm_start
);
995 vma
->vm_flags
&= ~(VM_SHARED
| VM_MAYSHARE
);
997 mutex_unlock(&ufile
->umap_lock
);
998 up_write(&mm
->mmap_sem
);
1004 * ib_uverbs_open() does not need the BKL:
1006 * - the ib_uverbs_device structures are properly reference counted and
1007 * everything else is purely local to the file being created, so
1008 * races against other open calls are not a problem;
1009 * - there is no ioctl method to race against;
1010 * - the open method will either immediately run -ENXIO, or all
1011 * required initialization will be done.
1013 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
1015 struct ib_uverbs_device
*dev
;
1016 struct ib_uverbs_file
*file
;
1017 struct ib_device
*ib_dev
;
1019 int module_dependent
;
1022 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
1023 if (!atomic_inc_not_zero(&dev
->refcount
))
1026 get_device(&dev
->dev
);
1027 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1028 mutex_lock(&dev
->lists_mutex
);
1029 ib_dev
= srcu_dereference(dev
->ib_dev
,
1030 &dev
->disassociate_srcu
);
1036 /* In case IB device supports disassociate ucontext, there is no hard
1037 * dependency between uverbs device and its low level device.
1039 module_dependent
= !(ib_dev
->ops
.disassociate_ucontext
);
1041 if (module_dependent
) {
1042 if (!try_module_get(ib_dev
->owner
)) {
1048 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
1051 if (module_dependent
)
1058 kref_init(&file
->ref
);
1059 mutex_init(&file
->ucontext_lock
);
1061 spin_lock_init(&file
->uobjects_lock
);
1062 INIT_LIST_HEAD(&file
->uobjects
);
1063 init_rwsem(&file
->hw_destroy_rwsem
);
1064 mutex_init(&file
->umap_lock
);
1065 INIT_LIST_HEAD(&file
->umaps
);
1067 filp
->private_data
= file
;
1068 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
1069 mutex_unlock(&dev
->lists_mutex
);
1070 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1072 setup_ufile_idr_uobject(file
);
1074 return nonseekable_open(inode
, filp
);
1077 module_put(ib_dev
->owner
);
1080 mutex_unlock(&dev
->lists_mutex
);
1081 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1082 if (atomic_dec_and_test(&dev
->refcount
))
1083 ib_uverbs_comp_dev(dev
);
1085 put_device(&dev
->dev
);
1089 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
1091 struct ib_uverbs_file
*file
= filp
->private_data
;
1093 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_CLOSE
);
1095 mutex_lock(&file
->device
->lists_mutex
);
1096 list_del_init(&file
->list
);
1097 mutex_unlock(&file
->device
->lists_mutex
);
1099 if (file
->async_file
)
1100 kref_put(&file
->async_file
->ref
,
1101 ib_uverbs_release_async_event_file
);
1103 kref_put(&file
->ref
, ib_uverbs_release_file
);
1108 static const struct file_operations uverbs_fops
= {
1109 .owner
= THIS_MODULE
,
1110 .write
= ib_uverbs_write
,
1111 .open
= ib_uverbs_open
,
1112 .release
= ib_uverbs_close
,
1113 .llseek
= no_llseek
,
1114 .unlocked_ioctl
= ib_uverbs_ioctl
,
1115 .compat_ioctl
= ib_uverbs_ioctl
,
1118 static const struct file_operations uverbs_mmap_fops
= {
1119 .owner
= THIS_MODULE
,
1120 .write
= ib_uverbs_write
,
1121 .mmap
= ib_uverbs_mmap
,
1122 .open
= ib_uverbs_open
,
1123 .release
= ib_uverbs_close
,
1124 .llseek
= no_llseek
,
1125 .unlocked_ioctl
= ib_uverbs_ioctl
,
1126 .compat_ioctl
= ib_uverbs_ioctl
,
1129 static struct ib_client uverbs_client
= {
1131 .add
= ib_uverbs_add_one
,
1132 .remove
= ib_uverbs_remove_one
1135 static ssize_t
ibdev_show(struct device
*device
, struct device_attribute
*attr
,
1138 struct ib_uverbs_device
*dev
=
1139 container_of(device
, struct ib_uverbs_device
, dev
);
1142 struct ib_device
*ib_dev
;
1144 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1145 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1147 ret
= sprintf(buf
, "%s\n", dev_name(&ib_dev
->dev
));
1148 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1152 static DEVICE_ATTR_RO(ibdev
);
1154 static ssize_t
abi_version_show(struct device
*device
,
1155 struct device_attribute
*attr
, char *buf
)
1157 struct ib_uverbs_device
*dev
=
1158 container_of(device
, struct ib_uverbs_device
, dev
);
1161 struct ib_device
*ib_dev
;
1163 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1164 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1166 ret
= sprintf(buf
, "%d\n", ib_dev
->uverbs_abi_ver
);
1167 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1171 static DEVICE_ATTR_RO(abi_version
);
1173 static struct attribute
*ib_dev_attrs
[] = {
1174 &dev_attr_abi_version
.attr
,
1175 &dev_attr_ibdev
.attr
,
1179 static const struct attribute_group dev_attr_group
= {
1180 .attrs
= ib_dev_attrs
,
1183 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1184 __stringify(IB_USER_VERBS_ABI_VERSION
));
1186 static int ib_uverbs_create_uapi(struct ib_device
*device
,
1187 struct ib_uverbs_device
*uverbs_dev
)
1189 struct uverbs_api
*uapi
;
1191 uapi
= uverbs_alloc_api(device
);
1193 return PTR_ERR(uapi
);
1195 uverbs_dev
->uapi
= uapi
;
1199 static void ib_uverbs_add_one(struct ib_device
*device
)
1203 struct ib_uverbs_device
*uverbs_dev
;
1206 if (!device
->ops
.alloc_ucontext
)
1209 uverbs_dev
= kzalloc(sizeof(*uverbs_dev
), GFP_KERNEL
);
1213 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1219 device_initialize(&uverbs_dev
->dev
);
1220 uverbs_dev
->dev
.class = uverbs_class
;
1221 uverbs_dev
->dev
.parent
= device
->dev
.parent
;
1222 uverbs_dev
->dev
.release
= ib_uverbs_release_dev
;
1223 uverbs_dev
->groups
[0] = &dev_attr_group
;
1224 uverbs_dev
->dev
.groups
= uverbs_dev
->groups
;
1225 atomic_set(&uverbs_dev
->refcount
, 1);
1226 init_completion(&uverbs_dev
->comp
);
1227 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1228 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1229 mutex_init(&uverbs_dev
->lists_mutex
);
1230 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1231 INIT_LIST_HEAD(&uverbs_dev
->uverbs_events_file_list
);
1232 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1233 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1235 devnum
= ida_alloc_max(&uverbs_ida
, IB_UVERBS_MAX_DEVICES
- 1,
1239 uverbs_dev
->devnum
= devnum
;
1240 if (devnum
>= IB_UVERBS_NUM_FIXED_MINOR
)
1241 base
= dynamic_uverbs_dev
+ devnum
- IB_UVERBS_NUM_FIXED_MINOR
;
1243 base
= IB_UVERBS_BASE_DEV
+ devnum
;
1245 if (ib_uverbs_create_uapi(device
, uverbs_dev
))
1248 uverbs_dev
->dev
.devt
= base
;
1249 dev_set_name(&uverbs_dev
->dev
, "uverbs%d", uverbs_dev
->devnum
);
1251 cdev_init(&uverbs_dev
->cdev
,
1252 device
->ops
.mmap
? &uverbs_mmap_fops
: &uverbs_fops
);
1253 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1255 ret
= cdev_device_add(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1259 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1263 ida_free(&uverbs_ida
, devnum
);
1265 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1266 ib_uverbs_comp_dev(uverbs_dev
);
1267 wait_for_completion(&uverbs_dev
->comp
);
1268 put_device(&uverbs_dev
->dev
);
1272 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1273 struct ib_device
*ib_dev
)
1275 struct ib_uverbs_file
*file
;
1276 struct ib_uverbs_async_event_file
*event_file
;
1277 struct ib_event event
;
1279 /* Pending running commands to terminate */
1280 uverbs_disassociate_api_pre(uverbs_dev
);
1281 event
.event
= IB_EVENT_DEVICE_FATAL
;
1282 event
.element
.port_num
= 0;
1283 event
.device
= ib_dev
;
1285 mutex_lock(&uverbs_dev
->lists_mutex
);
1286 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1287 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1288 struct ib_uverbs_file
, list
);
1289 list_del_init(&file
->list
);
1290 kref_get(&file
->ref
);
1292 /* We must release the mutex before going ahead and calling
1293 * uverbs_cleanup_ufile, as it might end up indirectly calling
1294 * uverbs_close, for example due to freeing the resources (e.g
1297 mutex_unlock(&uverbs_dev
->lists_mutex
);
1299 ib_uverbs_event_handler(&file
->event_handler
, &event
);
1300 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_DRIVER_REMOVE
);
1301 kref_put(&file
->ref
, ib_uverbs_release_file
);
1303 mutex_lock(&uverbs_dev
->lists_mutex
);
1306 while (!list_empty(&uverbs_dev
->uverbs_events_file_list
)) {
1307 event_file
= list_first_entry(&uverbs_dev
->
1308 uverbs_events_file_list
,
1309 struct ib_uverbs_async_event_file
,
1311 spin_lock_irq(&event_file
->ev_queue
.lock
);
1312 event_file
->ev_queue
.is_closed
= 1;
1313 spin_unlock_irq(&event_file
->ev_queue
.lock
);
1315 list_del(&event_file
->list
);
1316 ib_unregister_event_handler(
1317 &event_file
->uverbs_file
->event_handler
);
1318 event_file
->uverbs_file
->event_handler
.device
=
1321 wake_up_interruptible(&event_file
->ev_queue
.poll_wait
);
1322 kill_fasync(&event_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
1324 mutex_unlock(&uverbs_dev
->lists_mutex
);
1326 uverbs_disassociate_api(uverbs_dev
->uapi
);
1329 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1331 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1332 int wait_clients
= 1;
1337 cdev_device_del(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1338 ida_free(&uverbs_ida
, uverbs_dev
->devnum
);
1340 if (device
->ops
.disassociate_ucontext
) {
1341 /* We disassociate HW resources and immediately return.
1342 * Userspace will see a EIO errno for all future access.
1343 * Upon returning, ib_device may be freed internally and is not
1345 * uverbs_device is still available until all clients close
1346 * their files, then the uverbs device ref count will be zero
1347 * and its resources will be freed.
1348 * Note: At this point no more files can be opened since the
1349 * cdev was deleted, however active clients can still issue
1350 * commands and close their open files.
1352 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1356 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1357 ib_uverbs_comp_dev(uverbs_dev
);
1359 wait_for_completion(&uverbs_dev
->comp
);
1361 put_device(&uverbs_dev
->dev
);
1364 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1368 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1371 static int __init
ib_uverbs_init(void)
1375 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
,
1376 IB_UVERBS_NUM_FIXED_MINOR
,
1377 "infiniband_verbs");
1379 pr_err("user_verbs: couldn't register device number\n");
1383 ret
= alloc_chrdev_region(&dynamic_uverbs_dev
, 0,
1384 IB_UVERBS_NUM_DYNAMIC_MINOR
,
1385 "infiniband_verbs");
1387 pr_err("couldn't register dynamic device number\n");
1391 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1392 if (IS_ERR(uverbs_class
)) {
1393 ret
= PTR_ERR(uverbs_class
);
1394 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1398 uverbs_class
->devnode
= uverbs_devnode
;
1400 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1402 pr_err("user_verbs: couldn't create abi_version attribute\n");
1406 ret
= ib_register_client(&uverbs_client
);
1408 pr_err("user_verbs: couldn't register client\n");
1415 class_destroy(uverbs_class
);
1418 unregister_chrdev_region(dynamic_uverbs_dev
,
1419 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1422 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1423 IB_UVERBS_NUM_FIXED_MINOR
);
1429 static void __exit
ib_uverbs_cleanup(void)
1431 ib_unregister_client(&uverbs_client
);
1432 class_destroy(uverbs_class
);
1433 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1434 IB_UVERBS_NUM_FIXED_MINOR
);
1435 unregister_chrdev_region(dynamic_uverbs_dev
,
1436 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1439 module_init(ib_uverbs_init
);
1440 module_exit(ib_uverbs_cleanup
);