2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
54 #include <rdma/rdma_netlink.h>
55 #include <rdma/ib_ucaps.h>
58 #include "core_priv.h"
59 #include "rdma_core.h"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
63 MODULE_LICENSE("Dual BSD/GPL");
66 IB_UVERBS_MAJOR
= 231,
67 IB_UVERBS_BASE_MINOR
= 192,
68 IB_UVERBS_MAX_DEVICES
= RDMA_MAX_PORTS
,
69 IB_UVERBS_NUM_FIXED_MINOR
= 32,
70 IB_UVERBS_NUM_DYNAMIC_MINOR
= IB_UVERBS_MAX_DEVICES
- IB_UVERBS_NUM_FIXED_MINOR
,
73 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
75 static dev_t dynamic_uverbs_dev
;
77 static DEFINE_IDA(uverbs_ida
);
78 static int ib_uverbs_add_one(struct ib_device
*device
);
79 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
80 static struct ib_client uverbs_client
;
82 static char *uverbs_devnode(const struct device
*dev
, umode_t
*mode
)
86 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
89 static const struct class uverbs_class
= {
90 .name
= "infiniband_verbs",
91 .devnode
= uverbs_devnode
,
95 * Must be called with the ufile->device->disassociate_srcu held, and the lock
96 * must be held until use of the ucontext is finished.
98 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
)
101 * We do not hold the hw_destroy_rwsem lock for this flow, instead
102 * srcu is used. It does not matter if someone races this with
103 * get_context, we get NULL or valid ucontext.
105 struct ib_ucontext
*ucontext
= smp_load_acquire(&ufile
->ucontext
);
107 if (!srcu_dereference(ufile
->device
->ib_dev
,
108 &ufile
->device
->disassociate_srcu
))
109 return ERR_PTR(-EIO
);
112 return ERR_PTR(-EINVAL
);
116 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file
);
118 int uverbs_dealloc_mw(struct ib_mw
*mw
)
120 struct ib_pd
*pd
= mw
->pd
;
123 ret
= mw
->device
->ops
.dealloc_mw(mw
);
127 atomic_dec(&pd
->usecnt
);
132 static void ib_uverbs_release_dev(struct device
*device
)
134 struct ib_uverbs_device
*dev
=
135 container_of(device
, struct ib_uverbs_device
, dev
);
137 uverbs_destroy_api(dev
->uapi
);
138 cleanup_srcu_struct(&dev
->disassociate_srcu
);
139 mutex_destroy(&dev
->lists_mutex
);
140 mutex_destroy(&dev
->xrcd_tree_mutex
);
144 void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file
*ev_file
,
145 struct ib_ucq_object
*uobj
)
147 struct ib_uverbs_event
*evt
, *tmp
;
150 spin_lock_irq(&ev_file
->ev_queue
.lock
);
151 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
152 list_del(&evt
->list
);
155 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
157 uverbs_uobject_put(&ev_file
->uobj
);
160 ib_uverbs_release_uevent(&uobj
->uevent
);
163 void ib_uverbs_release_uevent(struct ib_uevent_object
*uobj
)
165 struct ib_uverbs_async_event_file
*async_file
= uobj
->event_file
;
166 struct ib_uverbs_event
*evt
, *tmp
;
171 spin_lock_irq(&async_file
->ev_queue
.lock
);
172 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
173 list_del(&evt
->list
);
176 spin_unlock_irq(&async_file
->ev_queue
.lock
);
177 uverbs_uobject_put(&async_file
->uobj
);
180 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
181 struct ib_uqp_object
*uobj
)
183 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
185 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
186 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
187 list_del(&mcast
->list
);
192 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
194 complete(&dev
->comp
);
197 void ib_uverbs_release_file(struct kref
*ref
)
199 struct ib_uverbs_file
*file
=
200 container_of(ref
, struct ib_uverbs_file
, ref
);
201 struct ib_device
*ib_dev
;
204 release_ufile_idr_uobject(file
);
206 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
207 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
208 &file
->device
->disassociate_srcu
);
209 if (ib_dev
&& !ib_dev
->ops
.disassociate_ucontext
)
210 module_put(ib_dev
->ops
.owner
);
211 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
213 if (refcount_dec_and_test(&file
->device
->refcount
))
214 ib_uverbs_comp_dev(file
->device
);
216 if (file
->default_async_file
)
217 uverbs_uobject_put(&file
->default_async_file
->uobj
);
218 put_device(&file
->device
->dev
);
220 if (file
->disassociate_page
)
221 __free_pages(file
->disassociate_page
, 0);
222 mutex_destroy(&file
->disassociation_lock
);
223 mutex_destroy(&file
->umap_lock
);
224 mutex_destroy(&file
->ucontext_lock
);
228 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
229 struct file
*filp
, char __user
*buf
,
230 size_t count
, loff_t
*pos
,
233 struct ib_uverbs_event
*event
;
236 spin_lock_irq(&ev_queue
->lock
);
238 while (list_empty(&ev_queue
->event_list
)) {
239 if (ev_queue
->is_closed
) {
240 spin_unlock_irq(&ev_queue
->lock
);
244 spin_unlock_irq(&ev_queue
->lock
);
245 if (filp
->f_flags
& O_NONBLOCK
)
248 if (wait_event_interruptible(ev_queue
->poll_wait
,
249 (!list_empty(&ev_queue
->event_list
) ||
250 ev_queue
->is_closed
)))
253 spin_lock_irq(&ev_queue
->lock
);
256 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
258 if (eventsz
> count
) {
262 list_del(ev_queue
->event_list
.next
);
263 if (event
->counter
) {
265 list_del(&event
->obj_list
);
269 spin_unlock_irq(&ev_queue
->lock
);
272 if (copy_to_user(buf
, event
, eventsz
))
283 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
284 size_t count
, loff_t
*pos
)
286 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
288 return ib_uverbs_event_read(&file
->ev_queue
, filp
, buf
, count
, pos
,
289 sizeof(struct ib_uverbs_async_event_desc
));
292 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
293 size_t count
, loff_t
*pos
)
295 struct ib_uverbs_completion_event_file
*comp_ev_file
=
298 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
, filp
, buf
, count
,
300 sizeof(struct ib_uverbs_comp_event_desc
));
303 static __poll_t
ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
305 struct poll_table_struct
*wait
)
307 __poll_t pollflags
= 0;
309 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
311 spin_lock_irq(&ev_queue
->lock
);
312 if (!list_empty(&ev_queue
->event_list
))
313 pollflags
= EPOLLIN
| EPOLLRDNORM
;
314 else if (ev_queue
->is_closed
)
315 pollflags
= EPOLLERR
;
316 spin_unlock_irq(&ev_queue
->lock
);
321 static __poll_t
ib_uverbs_async_event_poll(struct file
*filp
,
322 struct poll_table_struct
*wait
)
324 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
326 return ib_uverbs_event_poll(&file
->ev_queue
, filp
, wait
);
329 static __poll_t
ib_uverbs_comp_event_poll(struct file
*filp
,
330 struct poll_table_struct
*wait
)
332 struct ib_uverbs_completion_event_file
*comp_ev_file
=
335 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
338 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
340 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
342 return fasync_helper(fd
, filp
, on
, &file
->ev_queue
.async_queue
);
345 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
347 struct ib_uverbs_completion_event_file
*comp_ev_file
=
350 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
353 const struct file_operations uverbs_event_fops
= {
354 .owner
= THIS_MODULE
,
355 .read
= ib_uverbs_comp_event_read
,
356 .poll
= ib_uverbs_comp_event_poll
,
357 .release
= uverbs_uobject_fd_release
,
358 .fasync
= ib_uverbs_comp_event_fasync
,
361 const struct file_operations uverbs_async_event_fops
= {
362 .owner
= THIS_MODULE
,
363 .read
= ib_uverbs_async_event_read
,
364 .poll
= ib_uverbs_async_event_poll
,
365 .release
= uverbs_async_event_release
,
366 .fasync
= ib_uverbs_async_event_fasync
,
369 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
371 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
372 struct ib_ucq_object
*uobj
;
373 struct ib_uverbs_event
*entry
;
379 spin_lock_irqsave(&ev_queue
->lock
, flags
);
380 if (ev_queue
->is_closed
) {
381 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
385 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
387 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
393 entry
->desc
.comp
.cq_handle
= cq
->uobject
->uevent
.uobject
.user_handle
;
394 entry
->counter
= &uobj
->comp_events_reported
;
396 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
397 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
398 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
400 wake_up_interruptible(&ev_queue
->poll_wait
);
401 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
404 void ib_uverbs_async_handler(struct ib_uverbs_async_event_file
*async_file
,
405 __u64 element
, __u64 event
,
406 struct list_head
*obj_list
, u32
*counter
)
408 struct ib_uverbs_event
*entry
;
414 spin_lock_irqsave(&async_file
->ev_queue
.lock
, flags
);
415 if (async_file
->ev_queue
.is_closed
) {
416 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
420 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
422 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
426 entry
->desc
.async
.element
= element
;
427 entry
->desc
.async
.event_type
= event
;
428 entry
->desc
.async
.reserved
= 0;
429 entry
->counter
= counter
;
431 list_add_tail(&entry
->list
, &async_file
->ev_queue
.event_list
);
433 list_add_tail(&entry
->obj_list
, obj_list
);
434 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
436 wake_up_interruptible(&async_file
->ev_queue
.poll_wait
);
437 kill_fasync(&async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
440 static void uverbs_uobj_event(struct ib_uevent_object
*eobj
,
441 struct ib_event
*event
)
443 ib_uverbs_async_handler(eobj
->event_file
,
444 eobj
->uobject
.user_handle
, event
->event
,
445 &eobj
->event_list
, &eobj
->events_reported
);
448 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
450 uverbs_uobj_event(&event
->element
.cq
->uobject
->uevent
, event
);
453 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
455 /* for XRC target qp's, check that qp is live */
456 if (!event
->element
.qp
->uobject
)
459 uverbs_uobj_event(&event
->element
.qp
->uobject
->uevent
, event
);
462 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
464 uverbs_uobj_event(&event
->element
.wq
->uobject
->uevent
, event
);
467 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
469 uverbs_uobj_event(&event
->element
.srq
->uobject
->uevent
, event
);
472 static void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
473 struct ib_event
*event
)
475 ib_uverbs_async_handler(
476 container_of(handler
, struct ib_uverbs_async_event_file
,
478 event
->element
.port_num
, event
->event
, NULL
, NULL
);
481 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
483 spin_lock_init(&ev_queue
->lock
);
484 INIT_LIST_HEAD(&ev_queue
->event_list
);
485 init_waitqueue_head(&ev_queue
->poll_wait
);
486 ev_queue
->is_closed
= 0;
487 ev_queue
->async_queue
= NULL
;
490 void ib_uverbs_init_async_event_file(
491 struct ib_uverbs_async_event_file
*async_file
)
493 struct ib_uverbs_file
*uverbs_file
= async_file
->uobj
.ufile
;
494 struct ib_device
*ib_dev
= async_file
->uobj
.context
->device
;
496 ib_uverbs_init_event_queue(&async_file
->ev_queue
);
498 /* The first async_event_file becomes the default one for the file. */
499 mutex_lock(&uverbs_file
->ucontext_lock
);
500 if (!uverbs_file
->default_async_file
) {
501 /* Pairs with the put in ib_uverbs_release_file */
502 uverbs_uobject_get(&async_file
->uobj
);
503 smp_store_release(&uverbs_file
->default_async_file
, async_file
);
505 mutex_unlock(&uverbs_file
->ucontext_lock
);
507 INIT_IB_EVENT_HANDLER(&async_file
->event_handler
, ib_dev
,
508 ib_uverbs_event_handler
);
509 ib_register_event_handler(&async_file
->event_handler
);
512 static ssize_t
verify_hdr(struct ib_uverbs_cmd_hdr
*hdr
,
513 struct ib_uverbs_ex_cmd_hdr
*ex_hdr
, size_t count
,
514 const struct uverbs_api_write_method
*method_elm
)
516 if (method_elm
->is_ex
) {
517 count
-= sizeof(*hdr
) + sizeof(*ex_hdr
);
519 if ((hdr
->in_words
+ ex_hdr
->provider_in_words
) * 8 != count
)
522 if (hdr
->in_words
* 8 < method_elm
->req_size
)
525 if (ex_hdr
->cmd_hdr_reserved
)
528 if (ex_hdr
->response
) {
529 if (!hdr
->out_words
&& !ex_hdr
->provider_out_words
)
532 if (hdr
->out_words
* 8 < method_elm
->resp_size
)
535 if (!access_ok(u64_to_user_ptr(ex_hdr
->response
),
536 (hdr
->out_words
+ ex_hdr
->provider_out_words
) * 8))
539 if (hdr
->out_words
|| ex_hdr
->provider_out_words
)
546 /* not extended command */
547 if (hdr
->in_words
* 4 != count
)
550 if (count
< method_elm
->req_size
+ sizeof(*hdr
)) {
552 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
553 * with a 16 byte write instead of 24. Old kernels didn't
554 * check the size so they allowed this. Now that the size is
555 * checked provide a compatibility work around to not break
558 if (hdr
->command
== IB_USER_VERBS_CMD_DESTROY_CQ
&&
565 if (hdr
->out_words
* 4 < method_elm
->resp_size
)
571 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
572 size_t count
, loff_t
*pos
)
574 struct ib_uverbs_file
*file
= filp
->private_data
;
575 const struct uverbs_api_write_method
*method_elm
;
576 struct uverbs_api
*uapi
= file
->device
->uapi
;
577 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
578 struct ib_uverbs_cmd_hdr hdr
;
579 struct uverbs_attr_bundle bundle
;
583 if (!ib_safe_file_access(filp
)) {
584 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
585 task_tgid_vnr(current
), current
->comm
);
589 if (count
< sizeof(hdr
))
592 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
595 method_elm
= uapi_get_method(uapi
, hdr
.command
);
596 if (IS_ERR(method_elm
))
597 return PTR_ERR(method_elm
);
599 if (method_elm
->is_ex
) {
600 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
)))
602 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
)))
606 ret
= verify_hdr(&hdr
, &ex_hdr
, count
, method_elm
);
610 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
614 memset(bundle
.attr_present
, 0, sizeof(bundle
.attr_present
));
616 bundle
.context
= NULL
; /* only valid if bundle has uobject */
617 bundle
.uobject
= NULL
;
618 if (!method_elm
->is_ex
) {
619 size_t in_len
= hdr
.in_words
* 4 - sizeof(hdr
);
620 size_t out_len
= hdr
.out_words
* 4;
623 if (method_elm
->has_udata
) {
624 bundle
.driver_udata
.inlen
=
625 in_len
- method_elm
->req_size
;
626 in_len
= method_elm
->req_size
;
627 if (bundle
.driver_udata
.inlen
)
628 bundle
.driver_udata
.inbuf
= buf
+ in_len
;
630 bundle
.driver_udata
.inbuf
= NULL
;
632 memset(&bundle
.driver_udata
, 0,
633 sizeof(bundle
.driver_udata
));
636 if (method_elm
->has_resp
) {
638 * The macros check that if has_resp is set
639 * then the command request structure starts
640 * with a '__aligned u64 response' member.
642 ret
= get_user(response
, (const u64 __user
*)buf
);
646 if (method_elm
->has_udata
) {
647 bundle
.driver_udata
.outlen
=
648 out_len
- method_elm
->resp_size
;
649 out_len
= method_elm
->resp_size
;
650 if (bundle
.driver_udata
.outlen
)
651 bundle
.driver_udata
.outbuf
=
652 u64_to_user_ptr(response
+
655 bundle
.driver_udata
.outbuf
= NULL
;
658 bundle
.driver_udata
.outlen
= 0;
659 bundle
.driver_udata
.outbuf
= NULL
;
662 ib_uverbs_init_udata_buf_or_null(
663 &bundle
.ucore
, buf
, u64_to_user_ptr(response
),
666 buf
+= sizeof(ex_hdr
);
668 ib_uverbs_init_udata_buf_or_null(&bundle
.ucore
, buf
,
669 u64_to_user_ptr(ex_hdr
.response
),
670 hdr
.in_words
* 8, hdr
.out_words
* 8);
672 ib_uverbs_init_udata_buf_or_null(
673 &bundle
.driver_udata
, buf
+ bundle
.ucore
.inlen
,
674 u64_to_user_ptr(ex_hdr
.response
) + bundle
.ucore
.outlen
,
675 ex_hdr
.provider_in_words
* 8,
676 ex_hdr
.provider_out_words
* 8);
680 ret
= method_elm
->handler(&bundle
);
682 uverbs_finalize_object(bundle
.uobject
, UVERBS_ACCESS_NEW
, true,
685 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
686 return (ret
) ? : count
;
689 static const struct vm_operations_struct rdma_umap_ops
;
691 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
693 struct ib_uverbs_file
*file
= filp
->private_data
;
694 struct ib_ucontext
*ucontext
;
698 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
699 ucontext
= ib_uverbs_get_ucontext_file(file
);
700 if (IS_ERR(ucontext
)) {
701 ret
= PTR_ERR(ucontext
);
705 mutex_lock(&file
->disassociation_lock
);
707 vma
->vm_ops
= &rdma_umap_ops
;
708 ret
= ucontext
->device
->ops
.mmap(ucontext
, vma
);
710 mutex_unlock(&file
->disassociation_lock
);
712 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
717 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
720 static void rdma_umap_open(struct vm_area_struct
*vma
)
722 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
723 struct rdma_umap_priv
*opriv
= vma
->vm_private_data
;
724 struct rdma_umap_priv
*priv
;
729 /* We are racing with disassociation */
730 if (!down_read_trylock(&ufile
->hw_destroy_rwsem
))
732 mutex_lock(&ufile
->disassociation_lock
);
735 * Disassociation already completed, the VMA should already be zapped.
737 if (!ufile
->ucontext
)
740 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
743 rdma_umap_priv_init(priv
, vma
, opriv
->entry
);
745 mutex_unlock(&ufile
->disassociation_lock
);
746 up_read(&ufile
->hw_destroy_rwsem
);
750 mutex_unlock(&ufile
->disassociation_lock
);
751 up_read(&ufile
->hw_destroy_rwsem
);
754 * We can't allow the VMA to be created with the actual IO pages, that
755 * would break our API contract, and it can't be stopped at this
758 vma
->vm_private_data
= NULL
;
759 zap_vma_ptes(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
762 static void rdma_umap_close(struct vm_area_struct
*vma
)
764 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
765 struct rdma_umap_priv
*priv
= vma
->vm_private_data
;
771 * The vma holds a reference on the struct file that created it, which
772 * in turn means that the ib_uverbs_file is guaranteed to exist at
775 mutex_lock(&ufile
->umap_lock
);
777 rdma_user_mmap_entry_put(priv
->entry
);
779 list_del(&priv
->list
);
780 mutex_unlock(&ufile
->umap_lock
);
785 * Once the zap_vma_ptes has been called touches to the VMA will come here and
786 * we return a dummy writable zero page for all the pfns.
788 static vm_fault_t
rdma_umap_fault(struct vm_fault
*vmf
)
790 struct ib_uverbs_file
*ufile
= vmf
->vma
->vm_file
->private_data
;
791 struct rdma_umap_priv
*priv
= vmf
->vma
->vm_private_data
;
795 return VM_FAULT_SIGBUS
;
797 /* Read only pages can just use the system zero page. */
798 if (!(vmf
->vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))) {
799 vmf
->page
= ZERO_PAGE(vmf
->address
);
804 mutex_lock(&ufile
->umap_lock
);
805 if (!ufile
->disassociate_page
)
806 ufile
->disassociate_page
=
807 alloc_pages(vmf
->gfp_mask
| __GFP_ZERO
, 0);
809 if (ufile
->disassociate_page
) {
811 * This VMA is forced to always be shared so this doesn't have
812 * to worry about COW.
814 vmf
->page
= ufile
->disassociate_page
;
817 ret
= VM_FAULT_SIGBUS
;
819 mutex_unlock(&ufile
->umap_lock
);
824 static const struct vm_operations_struct rdma_umap_ops
= {
825 .open
= rdma_umap_open
,
826 .close
= rdma_umap_close
,
827 .fault
= rdma_umap_fault
,
830 void uverbs_user_mmap_disassociate(struct ib_uverbs_file
*ufile
)
832 struct rdma_umap_priv
*priv
, *next_priv
;
834 mutex_lock(&ufile
->disassociation_lock
);
837 struct mm_struct
*mm
= NULL
;
839 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
840 mutex_lock(&ufile
->umap_lock
);
841 while (!list_empty(&ufile
->umaps
)) {
844 priv
= list_first_entry(&ufile
->umaps
,
845 struct rdma_umap_priv
, list
);
846 mm
= priv
->vma
->vm_mm
;
847 ret
= mmget_not_zero(mm
);
849 list_del_init(&priv
->list
);
851 rdma_user_mmap_entry_put(priv
->entry
);
859 mutex_unlock(&ufile
->umap_lock
);
861 mutex_unlock(&ufile
->disassociation_lock
);
866 * The umap_lock is nested under mmap_lock since it used within
867 * the vma_ops callbacks, so we have to clean the list one mm
868 * at a time to get the lock ordering right. Typically there
869 * will only be one mm, so no big deal.
872 mutex_lock(&ufile
->umap_lock
);
873 list_for_each_entry_safe (priv
, next_priv
, &ufile
->umaps
,
875 struct vm_area_struct
*vma
= priv
->vma
;
877 if (vma
->vm_mm
!= mm
)
879 list_del_init(&priv
->list
);
881 zap_vma_ptes(vma
, vma
->vm_start
,
882 vma
->vm_end
- vma
->vm_start
);
885 rdma_user_mmap_entry_put(priv
->entry
);
889 mutex_unlock(&ufile
->umap_lock
);
890 mmap_read_unlock(mm
);
894 mutex_unlock(&ufile
->disassociation_lock
);
898 * rdma_user_mmap_disassociate() - Revoke mmaps for a device
899 * @device: device to revoke
901 * This function should be called by drivers that need to disable mmaps for the
902 * device, for instance because it is going to be reset.
904 void rdma_user_mmap_disassociate(struct ib_device
*device
)
906 struct ib_uverbs_device
*uverbs_dev
=
907 ib_get_client_data(device
, &uverbs_client
);
908 struct ib_uverbs_file
*ufile
;
910 mutex_lock(&uverbs_dev
->lists_mutex
);
911 list_for_each_entry(ufile
, &uverbs_dev
->uverbs_file_list
, list
) {
913 uverbs_user_mmap_disassociate(ufile
);
915 mutex_unlock(&uverbs_dev
->lists_mutex
);
917 EXPORT_SYMBOL(rdma_user_mmap_disassociate
);
920 * ib_uverbs_open() does not need the BKL:
922 * - the ib_uverbs_device structures are properly reference counted and
923 * everything else is purely local to the file being created, so
924 * races against other open calls are not a problem;
925 * - there is no ioctl method to race against;
926 * - the open method will either immediately run -ENXIO, or all
927 * required initialization will be done.
929 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
931 struct ib_uverbs_device
*dev
;
932 struct ib_uverbs_file
*file
;
933 struct ib_device
*ib_dev
;
935 int module_dependent
;
938 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
939 if (!refcount_inc_not_zero(&dev
->refcount
))
942 get_device(&dev
->dev
);
943 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
944 mutex_lock(&dev
->lists_mutex
);
945 ib_dev
= srcu_dereference(dev
->ib_dev
,
946 &dev
->disassociate_srcu
);
952 if (!rdma_dev_access_netns(ib_dev
, current
->nsproxy
->net_ns
)) {
957 /* In case IB device supports disassociate ucontext, there is no hard
958 * dependency between uverbs device and its low level device.
960 module_dependent
= !(ib_dev
->ops
.disassociate_ucontext
);
962 if (module_dependent
) {
963 if (!try_module_get(ib_dev
->ops
.owner
)) {
969 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
972 if (module_dependent
)
979 kref_init(&file
->ref
);
980 mutex_init(&file
->ucontext_lock
);
982 spin_lock_init(&file
->uobjects_lock
);
983 INIT_LIST_HEAD(&file
->uobjects
);
984 init_rwsem(&file
->hw_destroy_rwsem
);
985 mutex_init(&file
->umap_lock
);
986 INIT_LIST_HEAD(&file
->umaps
);
988 mutex_init(&file
->disassociation_lock
);
990 filp
->private_data
= file
;
991 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
992 mutex_unlock(&dev
->lists_mutex
);
993 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
995 setup_ufile_idr_uobject(file
);
997 return stream_open(inode
, filp
);
1000 module_put(ib_dev
->ops
.owner
);
1003 mutex_unlock(&dev
->lists_mutex
);
1004 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1005 if (refcount_dec_and_test(&dev
->refcount
))
1006 ib_uverbs_comp_dev(dev
);
1008 put_device(&dev
->dev
);
1012 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
1014 struct ib_uverbs_file
*file
= filp
->private_data
;
1016 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_CLOSE
);
1018 mutex_lock(&file
->device
->lists_mutex
);
1019 list_del_init(&file
->list
);
1020 mutex_unlock(&file
->device
->lists_mutex
);
1022 kref_put(&file
->ref
, ib_uverbs_release_file
);
1027 static const struct file_operations uverbs_fops
= {
1028 .owner
= THIS_MODULE
,
1029 .write
= ib_uverbs_write
,
1030 .open
= ib_uverbs_open
,
1031 .release
= ib_uverbs_close
,
1032 .unlocked_ioctl
= ib_uverbs_ioctl
,
1033 .compat_ioctl
= compat_ptr_ioctl
,
1036 static const struct file_operations uverbs_mmap_fops
= {
1037 .owner
= THIS_MODULE
,
1038 .write
= ib_uverbs_write
,
1039 .mmap
= ib_uverbs_mmap
,
1040 .open
= ib_uverbs_open
,
1041 .release
= ib_uverbs_close
,
1042 .unlocked_ioctl
= ib_uverbs_ioctl
,
1043 .compat_ioctl
= compat_ptr_ioctl
,
1046 static int ib_uverbs_get_nl_info(struct ib_device
*ibdev
, void *client_data
,
1047 struct ib_client_nl_info
*res
)
1049 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1052 if (res
->port
!= -1)
1055 res
->abi
= ibdev
->ops
.uverbs_abi_ver
;
1056 res
->cdev
= &uverbs_dev
->dev
;
1059 * To support DRIVER_ID binding in userspace some of the driver need
1060 * upgrading to expose their PCI dependent revision information
1061 * through get_context instead of relying on modalias matching. When
1062 * the drivers are fixed they can drop this flag.
1064 if (!ibdev
->ops
.uverbs_no_driver_id_binding
) {
1065 ret
= nla_put_u32(res
->nl_msg
, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID
,
1066 ibdev
->ops
.driver_id
);
1073 static struct ib_client uverbs_client
= {
1075 .no_kverbs_req
= true,
1076 .add
= ib_uverbs_add_one
,
1077 .remove
= ib_uverbs_remove_one
,
1078 .get_nl_info
= ib_uverbs_get_nl_info
,
1080 MODULE_ALIAS_RDMA_CLIENT("uverbs");
1082 static ssize_t
ibdev_show(struct device
*device
, struct device_attribute
*attr
,
1085 struct ib_uverbs_device
*dev
=
1086 container_of(device
, struct ib_uverbs_device
, dev
);
1089 struct ib_device
*ib_dev
;
1091 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1092 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1094 ret
= sysfs_emit(buf
, "%s\n", dev_name(&ib_dev
->dev
));
1095 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1099 static DEVICE_ATTR_RO(ibdev
);
1101 static ssize_t
abi_version_show(struct device
*device
,
1102 struct device_attribute
*attr
, char *buf
)
1104 struct ib_uverbs_device
*dev
=
1105 container_of(device
, struct ib_uverbs_device
, dev
);
1108 struct ib_device
*ib_dev
;
1110 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1111 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1113 ret
= sysfs_emit(buf
, "%u\n", ib_dev
->ops
.uverbs_abi_ver
);
1114 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1118 static DEVICE_ATTR_RO(abi_version
);
1120 static struct attribute
*ib_dev_attrs
[] = {
1121 &dev_attr_abi_version
.attr
,
1122 &dev_attr_ibdev
.attr
,
1126 static const struct attribute_group dev_attr_group
= {
1127 .attrs
= ib_dev_attrs
,
1130 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1131 __stringify(IB_USER_VERBS_ABI_VERSION
));
1133 static int ib_uverbs_create_uapi(struct ib_device
*device
,
1134 struct ib_uverbs_device
*uverbs_dev
)
1136 struct uverbs_api
*uapi
;
1138 uapi
= uverbs_alloc_api(device
);
1140 return PTR_ERR(uapi
);
1142 uverbs_dev
->uapi
= uapi
;
1146 static int ib_uverbs_add_one(struct ib_device
*device
)
1150 struct ib_uverbs_device
*uverbs_dev
;
1153 if (!device
->ops
.alloc_ucontext
||
1154 device
->type
== RDMA_DEVICE_TYPE_SMI
)
1157 uverbs_dev
= kzalloc(sizeof(*uverbs_dev
), GFP_KERNEL
);
1161 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1167 device_initialize(&uverbs_dev
->dev
);
1168 uverbs_dev
->dev
.class = &uverbs_class
;
1169 uverbs_dev
->dev
.parent
= device
->dev
.parent
;
1170 uverbs_dev
->dev
.release
= ib_uverbs_release_dev
;
1171 uverbs_dev
->groups
[0] = &dev_attr_group
;
1172 uverbs_dev
->dev
.groups
= uverbs_dev
->groups
;
1173 refcount_set(&uverbs_dev
->refcount
, 1);
1174 init_completion(&uverbs_dev
->comp
);
1175 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1176 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1177 mutex_init(&uverbs_dev
->lists_mutex
);
1178 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1179 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1180 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1182 devnum
= ida_alloc_max(&uverbs_ida
, IB_UVERBS_MAX_DEVICES
- 1,
1188 uverbs_dev
->devnum
= devnum
;
1189 if (devnum
>= IB_UVERBS_NUM_FIXED_MINOR
)
1190 base
= dynamic_uverbs_dev
+ devnum
- IB_UVERBS_NUM_FIXED_MINOR
;
1192 base
= IB_UVERBS_BASE_DEV
+ devnum
;
1194 ret
= ib_uverbs_create_uapi(device
, uverbs_dev
);
1198 uverbs_dev
->dev
.devt
= base
;
1199 dev_set_name(&uverbs_dev
->dev
, "uverbs%d", uverbs_dev
->devnum
);
1201 cdev_init(&uverbs_dev
->cdev
,
1202 device
->ops
.mmap
? &uverbs_mmap_fops
: &uverbs_fops
);
1203 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1205 ret
= cdev_device_add(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1209 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1213 ida_free(&uverbs_ida
, devnum
);
1215 if (refcount_dec_and_test(&uverbs_dev
->refcount
))
1216 ib_uverbs_comp_dev(uverbs_dev
);
1217 wait_for_completion(&uverbs_dev
->comp
);
1218 put_device(&uverbs_dev
->dev
);
1222 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1223 struct ib_device
*ib_dev
)
1225 struct ib_uverbs_file
*file
;
1227 /* Pending running commands to terminate */
1228 uverbs_disassociate_api_pre(uverbs_dev
);
1230 mutex_lock(&uverbs_dev
->lists_mutex
);
1231 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1232 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1233 struct ib_uverbs_file
, list
);
1234 list_del_init(&file
->list
);
1235 kref_get(&file
->ref
);
1237 /* We must release the mutex before going ahead and calling
1238 * uverbs_cleanup_ufile, as it might end up indirectly calling
1239 * uverbs_close, for example due to freeing the resources (e.g
1242 mutex_unlock(&uverbs_dev
->lists_mutex
);
1244 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_DRIVER_REMOVE
);
1245 kref_put(&file
->ref
, ib_uverbs_release_file
);
1247 mutex_lock(&uverbs_dev
->lists_mutex
);
1249 mutex_unlock(&uverbs_dev
->lists_mutex
);
1251 uverbs_disassociate_api(uverbs_dev
->uapi
);
1254 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1256 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1257 int wait_clients
= 1;
1259 cdev_device_del(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1260 ida_free(&uverbs_ida
, uverbs_dev
->devnum
);
1262 if (device
->ops
.disassociate_ucontext
) {
1263 /* We disassociate HW resources and immediately return.
1264 * Userspace will see a EIO errno for all future access.
1265 * Upon returning, ib_device may be freed internally and is not
1267 * uverbs_device is still available until all clients close
1268 * their files, then the uverbs device ref count will be zero
1269 * and its resources will be freed.
1270 * Note: At this point no more files can be opened since the
1271 * cdev was deleted, however active clients can still issue
1272 * commands and close their open files.
1274 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1278 if (refcount_dec_and_test(&uverbs_dev
->refcount
))
1279 ib_uverbs_comp_dev(uverbs_dev
);
1281 wait_for_completion(&uverbs_dev
->comp
);
1283 put_device(&uverbs_dev
->dev
);
1286 static int __init
ib_uverbs_init(void)
1290 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
,
1291 IB_UVERBS_NUM_FIXED_MINOR
,
1292 "infiniband_verbs");
1294 pr_err("user_verbs: couldn't register device number\n");
1298 ret
= alloc_chrdev_region(&dynamic_uverbs_dev
, 0,
1299 IB_UVERBS_NUM_DYNAMIC_MINOR
,
1300 "infiniband_verbs");
1302 pr_err("couldn't register dynamic device number\n");
1306 ret
= class_register(&uverbs_class
);
1308 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1312 ret
= class_create_file(&uverbs_class
, &class_attr_abi_version
.attr
);
1314 pr_err("user_verbs: couldn't create abi_version attribute\n");
1318 ret
= ib_register_client(&uverbs_client
);
1320 pr_err("user_verbs: couldn't register client\n");
1327 class_unregister(&uverbs_class
);
1330 unregister_chrdev_region(dynamic_uverbs_dev
,
1331 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1334 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1335 IB_UVERBS_NUM_FIXED_MINOR
);
1341 static void __exit
ib_uverbs_cleanup(void)
1343 ib_unregister_client(&uverbs_client
);
1344 class_unregister(&uverbs_class
);
1345 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1346 IB_UVERBS_NUM_FIXED_MINOR
);
1347 unregister_chrdev_region(dynamic_uverbs_dev
,
1348 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1350 mmu_notifier_synchronize();
1353 module_init(ib_uverbs_init
);
1354 module_exit(ib_uverbs_cleanup
);