1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <uapi/linux/ublk_cmd.h>
48 #define UBLK_MINORS (1U << MINORBITS)
50 /* All UBLK_F_* have to be included into UBLK_F_ALL */
51 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
52 | UBLK_F_URING_CMD_COMP_IN_TASK \
53 | UBLK_F_NEED_GET_DATA \
54 | UBLK_F_USER_RECOVERY \
55 | UBLK_F_USER_RECOVERY_REISSUE \
56 | UBLK_F_UNPRIVILEGED_DEV)
58 /* All UBLK_PARAM_TYPE_* should be included here */
59 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
60 UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
63 struct llist_node node
;
64 struct callback_head work
;
67 struct ublk_uring_cmd_pdu
{
68 struct ublk_queue
*ubq
;
72 * io command is active: sqe cmd is received, and its cqe isn't done
74 * If the flag is set, the io command is owned by ublk driver, and waited
75 * for incoming blk-mq request from the ublk block device.
77 * If the flag is cleared, the io command will be completed, and owned by
80 #define UBLK_IO_FLAG_ACTIVE 0x01
83 * IO command is completed via cqe, and it is being handled by ublksrv, and
86 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
89 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
92 * IO command is aborted, so this flag is set in case of
93 * !UBLK_IO_FLAG_ACTIVE.
95 * After this flag is observed, any pending or new incoming request
96 * associated with this io command will be failed immediately
98 #define UBLK_IO_FLAG_ABORTED 0x04
101 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
102 * get data buffer address from ublksrv.
104 * Then, bio data could be copied into this data buffer for a WRITE request
105 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
107 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
110 /* userspace buffer address from io cmd */
115 struct io_uring_cmd
*cmd
;
123 struct task_struct
*ubq_daemon
;
126 struct llist_head io_cmds
;
128 unsigned long io_addr
; /* mapped vm address */
129 unsigned int max_io_sz
;
131 unsigned short nr_io_ready
; /* how many ios setup */
132 struct ublk_device
*dev
;
133 struct ublk_io ios
[];
136 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
139 struct gendisk
*ub_disk
;
143 unsigned int queue_size
;
144 struct ublksrv_ctrl_dev_info dev_info
;
146 struct blk_mq_tag_set tag_set
;
149 struct device cdev_dev
;
151 #define UB_STATE_OPEN 0
152 #define UB_STATE_USED 1
153 #define UB_STATE_DELETED 2
160 struct mm_struct
*mm
;
162 struct ublk_params params
;
164 struct completion completion
;
165 unsigned int nr_queues_ready
;
166 unsigned int nr_privileged_daemon
;
169 * Our ubq->daemon may be killed without any notification, so
170 * monitor each queue's daemon periodically
172 struct delayed_work monitor_work
;
173 struct work_struct quiesce_work
;
174 struct work_struct stop_work
;
177 /* header of ublk_params */
178 struct ublk_params_header
{
183 static dev_t ublk_chr_devt
;
184 static struct class *ublk_chr_class
;
186 static DEFINE_IDR(ublk_index_idr
);
187 static DEFINE_SPINLOCK(ublk_idr_lock
);
188 static wait_queue_head_t ublk_idr_wq
; /* wait until one idr is freed */
190 static DEFINE_MUTEX(ublk_ctl_mutex
);
193 * Max ublk devices allowed to add
195 * It can be extended to one per-user limit in future or even controlled
198 static unsigned int ublks_max
= 64;
199 static unsigned int ublks_added
; /* protected by ublk_ctl_mutex */
201 static struct miscdevice ublk_misc
;
203 static void ublk_dev_param_basic_apply(struct ublk_device
*ub
)
205 struct request_queue
*q
= ub
->ub_disk
->queue
;
206 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
208 blk_queue_logical_block_size(q
, 1 << p
->logical_bs_shift
);
209 blk_queue_physical_block_size(q
, 1 << p
->physical_bs_shift
);
210 blk_queue_io_min(q
, 1 << p
->io_min_shift
);
211 blk_queue_io_opt(q
, 1 << p
->io_opt_shift
);
213 blk_queue_write_cache(q
, p
->attrs
& UBLK_ATTR_VOLATILE_CACHE
,
214 p
->attrs
& UBLK_ATTR_FUA
);
215 if (p
->attrs
& UBLK_ATTR_ROTATIONAL
)
216 blk_queue_flag_clear(QUEUE_FLAG_NONROT
, q
);
218 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
220 blk_queue_max_hw_sectors(q
, p
->max_sectors
);
221 blk_queue_chunk_sectors(q
, p
->chunk_sectors
);
222 blk_queue_virt_boundary(q
, p
->virt_boundary_mask
);
224 if (p
->attrs
& UBLK_ATTR_READ_ONLY
)
225 set_disk_ro(ub
->ub_disk
, true);
227 set_capacity(ub
->ub_disk
, p
->dev_sectors
);
230 static void ublk_dev_param_discard_apply(struct ublk_device
*ub
)
232 struct request_queue
*q
= ub
->ub_disk
->queue
;
233 const struct ublk_param_discard
*p
= &ub
->params
.discard
;
235 q
->limits
.discard_alignment
= p
->discard_alignment
;
236 q
->limits
.discard_granularity
= p
->discard_granularity
;
237 blk_queue_max_discard_sectors(q
, p
->max_discard_sectors
);
238 blk_queue_max_write_zeroes_sectors(q
,
239 p
->max_write_zeroes_sectors
);
240 blk_queue_max_discard_segments(q
, p
->max_discard_segments
);
243 static int ublk_validate_params(const struct ublk_device
*ub
)
245 /* basic param is the only one which must be set */
246 if (ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
) {
247 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
249 if (p
->logical_bs_shift
> PAGE_SHIFT
)
252 if (p
->logical_bs_shift
> p
->physical_bs_shift
)
255 if (p
->max_sectors
> (ub
->dev_info
.max_io_buf_bytes
>> 9))
260 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
) {
261 const struct ublk_param_discard
*p
= &ub
->params
.discard
;
263 /* So far, only support single segment discard */
264 if (p
->max_discard_sectors
&& p
->max_discard_segments
!= 1)
267 if (!p
->discard_granularity
)
271 /* dev_t is read-only */
272 if (ub
->params
.types
& UBLK_PARAM_TYPE_DEVT
)
278 static int ublk_apply_params(struct ublk_device
*ub
)
280 if (!(ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
))
283 ublk_dev_param_basic_apply(ub
);
285 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
)
286 ublk_dev_param_discard_apply(ub
);
291 static inline bool ublk_can_use_task_work(const struct ublk_queue
*ubq
)
293 if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK
) &&
294 !(ubq
->flags
& UBLK_F_URING_CMD_COMP_IN_TASK
))
299 static inline bool ublk_need_get_data(const struct ublk_queue
*ubq
)
301 if (ubq
->flags
& UBLK_F_NEED_GET_DATA
)
306 static struct ublk_device
*ublk_get_device(struct ublk_device
*ub
)
308 if (kobject_get_unless_zero(&ub
->cdev_dev
.kobj
))
313 static void ublk_put_device(struct ublk_device
*ub
)
315 put_device(&ub
->cdev_dev
);
318 static inline struct ublk_queue
*ublk_get_queue(struct ublk_device
*dev
,
321 return (struct ublk_queue
*)&(dev
->__queues
[qid
* dev
->queue_size
]);
324 static inline bool ublk_rq_has_data(const struct request
*rq
)
326 return bio_has_data(rq
->bio
);
329 static inline struct ublksrv_io_desc
*ublk_get_iod(struct ublk_queue
*ubq
,
332 return (struct ublksrv_io_desc
*)
333 &(ubq
->io_cmd_buf
[tag
* sizeof(struct ublksrv_io_desc
)]);
336 static inline char *ublk_queue_cmd_buf(struct ublk_device
*ub
, int q_id
)
338 return ublk_get_queue(ub
, q_id
)->io_cmd_buf
;
341 static inline int ublk_queue_cmd_buf_size(struct ublk_device
*ub
, int q_id
)
343 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
345 return round_up(ubq
->q_depth
* sizeof(struct ublksrv_io_desc
),
349 static inline bool ublk_queue_can_use_recovery_reissue(
350 struct ublk_queue
*ubq
)
352 if ((ubq
->flags
& UBLK_F_USER_RECOVERY
) &&
353 (ubq
->flags
& UBLK_F_USER_RECOVERY_REISSUE
))
358 static inline bool ublk_queue_can_use_recovery(
359 struct ublk_queue
*ubq
)
361 if (ubq
->flags
& UBLK_F_USER_RECOVERY
)
366 static inline bool ublk_can_use_recovery(struct ublk_device
*ub
)
368 if (ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY
)
373 static void ublk_free_disk(struct gendisk
*disk
)
375 struct ublk_device
*ub
= disk
->private_data
;
377 clear_bit(UB_STATE_USED
, &ub
->state
);
378 put_device(&ub
->cdev_dev
);
381 static void ublk_store_owner_uid_gid(unsigned int *owner_uid
,
382 unsigned int *owner_gid
)
387 current_uid_gid(&uid
, &gid
);
389 *owner_uid
= from_kuid(&init_user_ns
, uid
);
390 *owner_gid
= from_kgid(&init_user_ns
, gid
);
393 static int ublk_open(struct block_device
*bdev
, fmode_t mode
)
395 struct ublk_device
*ub
= bdev
->bd_disk
->private_data
;
397 if (capable(CAP_SYS_ADMIN
))
401 * If it is one unprivileged device, only owner can open
402 * the disk. Otherwise it could be one trap made by one
403 * evil user who grants this disk's privileges to other
404 * users deliberately.
406 * This way is reasonable too given anyone can create
407 * unprivileged device, and no need other's grant.
409 if (ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
) {
410 unsigned int curr_uid
, curr_gid
;
412 ublk_store_owner_uid_gid(&curr_uid
, &curr_gid
);
414 if (curr_uid
!= ub
->dev_info
.owner_uid
|| curr_gid
!=
415 ub
->dev_info
.owner_gid
)
422 static const struct block_device_operations ub_fops
= {
423 .owner
= THIS_MODULE
,
425 .free_disk
= ublk_free_disk
,
428 #define UBLK_MAX_PIN_PAGES 32
430 struct ublk_map_data
{
431 const struct ublk_queue
*ubq
;
432 const struct request
*rq
;
433 const struct ublk_io
*io
;
437 struct ublk_io_iter
{
438 struct page
*pages
[UBLK_MAX_PIN_PAGES
];
439 unsigned pg_off
; /* offset in the 1st page in pages */
440 int nr_pages
; /* how many page pointers in pages */
442 struct bvec_iter iter
;
445 static inline unsigned ublk_copy_io_pages(struct ublk_io_iter
*data
,
446 unsigned max_bytes
, bool to_vm
)
448 const unsigned total
= min_t(unsigned, max_bytes
,
449 PAGE_SIZE
- data
->pg_off
+
450 ((data
->nr_pages
- 1) << PAGE_SHIFT
));
454 while (done
< total
) {
455 struct bio_vec bv
= bio_iter_iovec(data
->bio
, data
->iter
);
456 const unsigned int bytes
= min3(bv
.bv_len
, total
- done
,
457 (unsigned)(PAGE_SIZE
- data
->pg_off
));
458 void *bv_buf
= bvec_kmap_local(&bv
);
459 void *pg_buf
= kmap_local_page(data
->pages
[pg_idx
]);
462 memcpy(pg_buf
+ data
->pg_off
, bv_buf
, bytes
);
464 memcpy(bv_buf
, pg_buf
+ data
->pg_off
, bytes
);
466 kunmap_local(pg_buf
);
467 kunmap_local(bv_buf
);
469 /* advance page array */
470 data
->pg_off
+= bytes
;
471 if (data
->pg_off
== PAGE_SIZE
) {
479 bio_advance_iter_single(data
->bio
, &data
->iter
, bytes
);
480 if (!data
->iter
.bi_size
) {
481 data
->bio
= data
->bio
->bi_next
;
482 if (data
->bio
== NULL
)
484 data
->iter
= data
->bio
->bi_iter
;
491 static inline int ublk_copy_user_pages(struct ublk_map_data
*data
,
494 const unsigned int gup_flags
= to_vm
? FOLL_WRITE
: 0;
495 const unsigned long start_vm
= data
->io
->addr
;
496 unsigned int done
= 0;
497 struct ublk_io_iter iter
= {
498 .pg_off
= start_vm
& (PAGE_SIZE
- 1),
499 .bio
= data
->rq
->bio
,
500 .iter
= data
->rq
->bio
->bi_iter
,
502 const unsigned int nr_pages
= round_up(data
->max_bytes
+
503 (start_vm
& (PAGE_SIZE
- 1)), PAGE_SIZE
) >> PAGE_SHIFT
;
505 while (done
< nr_pages
) {
506 const unsigned to_pin
= min_t(unsigned, UBLK_MAX_PIN_PAGES
,
510 iter
.nr_pages
= get_user_pages_fast(start_vm
+
511 (done
<< PAGE_SHIFT
), to_pin
, gup_flags
,
513 if (iter
.nr_pages
<= 0)
514 return done
== 0 ? iter
.nr_pages
: done
;
515 len
= ublk_copy_io_pages(&iter
, data
->max_bytes
, to_vm
);
516 for (i
= 0; i
< iter
.nr_pages
; i
++) {
518 set_page_dirty(iter
.pages
[i
]);
519 put_page(iter
.pages
[i
]);
521 data
->max_bytes
-= len
;
522 done
+= iter
.nr_pages
;
528 static int ublk_map_io(const struct ublk_queue
*ubq
, const struct request
*req
,
531 const unsigned int rq_bytes
= blk_rq_bytes(req
);
533 * no zero copy, we delay copy WRITE request data into ublksrv
534 * context and the big benefit is that pinning pages in current
535 * context is pretty fast, see ublk_pin_user_pages
537 if (req_op(req
) != REQ_OP_WRITE
&& req_op(req
) != REQ_OP_FLUSH
)
540 if (ublk_rq_has_data(req
)) {
541 struct ublk_map_data data
= {
545 .max_bytes
= rq_bytes
,
548 ublk_copy_user_pages(&data
, true);
550 return rq_bytes
- data
.max_bytes
;
555 static int ublk_unmap_io(const struct ublk_queue
*ubq
,
556 const struct request
*req
,
559 const unsigned int rq_bytes
= blk_rq_bytes(req
);
561 if (req_op(req
) == REQ_OP_READ
&& ublk_rq_has_data(req
)) {
562 struct ublk_map_data data
= {
566 .max_bytes
= io
->res
,
569 WARN_ON_ONCE(io
->res
> rq_bytes
);
571 ublk_copy_user_pages(&data
, false);
573 return io
->res
- data
.max_bytes
;
578 static inline unsigned int ublk_req_build_flags(struct request
*req
)
582 if (req
->cmd_flags
& REQ_FAILFAST_DEV
)
583 flags
|= UBLK_IO_F_FAILFAST_DEV
;
585 if (req
->cmd_flags
& REQ_FAILFAST_TRANSPORT
)
586 flags
|= UBLK_IO_F_FAILFAST_TRANSPORT
;
588 if (req
->cmd_flags
& REQ_FAILFAST_DRIVER
)
589 flags
|= UBLK_IO_F_FAILFAST_DRIVER
;
591 if (req
->cmd_flags
& REQ_META
)
592 flags
|= UBLK_IO_F_META
;
594 if (req
->cmd_flags
& REQ_FUA
)
595 flags
|= UBLK_IO_F_FUA
;
597 if (req
->cmd_flags
& REQ_NOUNMAP
)
598 flags
|= UBLK_IO_F_NOUNMAP
;
600 if (req
->cmd_flags
& REQ_SWAP
)
601 flags
|= UBLK_IO_F_SWAP
;
606 static blk_status_t
ublk_setup_iod(struct ublk_queue
*ubq
, struct request
*req
)
608 struct ublksrv_io_desc
*iod
= ublk_get_iod(ubq
, req
->tag
);
609 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
612 switch (req_op(req
)) {
614 ublk_op
= UBLK_IO_OP_READ
;
617 ublk_op
= UBLK_IO_OP_WRITE
;
620 ublk_op
= UBLK_IO_OP_FLUSH
;
623 ublk_op
= UBLK_IO_OP_DISCARD
;
625 case REQ_OP_WRITE_ZEROES
:
626 ublk_op
= UBLK_IO_OP_WRITE_ZEROES
;
629 return BLK_STS_IOERR
;
632 /* need to translate since kernel may change */
633 iod
->op_flags
= ublk_op
| ublk_req_build_flags(req
);
634 iod
->nr_sectors
= blk_rq_sectors(req
);
635 iod
->start_sector
= blk_rq_pos(req
);
636 iod
->addr
= io
->addr
;
641 static inline struct ublk_uring_cmd_pdu
*ublk_get_uring_cmd_pdu(
642 struct io_uring_cmd
*ioucmd
)
644 return (struct ublk_uring_cmd_pdu
*)&ioucmd
->pdu
;
647 static inline bool ubq_daemon_is_dying(struct ublk_queue
*ubq
)
649 return ubq
->ubq_daemon
->flags
& PF_EXITING
;
652 /* todo: handle partial completion */
653 static void ublk_complete_rq(struct request
*req
)
655 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
656 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
657 unsigned int unmapped_bytes
;
658 blk_status_t res
= BLK_STS_OK
;
660 /* failed read IO if nothing is read */
661 if (!io
->res
&& req_op(req
) == REQ_OP_READ
)
665 res
= errno_to_blk_status(io
->res
);
670 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
673 * Both the two needn't unmap.
675 if (req_op(req
) != REQ_OP_READ
&& req_op(req
) != REQ_OP_WRITE
)
678 /* for READ request, writing data in iod->addr to rq buffers */
679 unmapped_bytes
= ublk_unmap_io(ubq
, req
, io
);
682 * Extremely impossible since we got data filled in just before
684 * Re-read simply for this unlikely case.
686 if (unlikely(unmapped_bytes
< io
->res
))
687 io
->res
= unmapped_bytes
;
689 if (blk_update_request(req
, BLK_STS_OK
, io
->res
))
690 blk_mq_requeue_request(req
, true);
692 __blk_mq_end_request(req
, BLK_STS_OK
);
696 blk_mq_end_request(req
, res
);
700 * Since __ublk_rq_task_work always fails requests immediately during
701 * exiting, __ublk_fail_req() is only called from abort context during
702 * exiting. So lock is unnecessary.
704 * Also aborting may not be started yet, keep in mind that one failed
705 * request may be issued by block layer again.
707 static void __ublk_fail_req(struct ublk_queue
*ubq
, struct ublk_io
*io
,
710 WARN_ON_ONCE(io
->flags
& UBLK_IO_FLAG_ACTIVE
);
712 if (!(io
->flags
& UBLK_IO_FLAG_ABORTED
)) {
713 io
->flags
|= UBLK_IO_FLAG_ABORTED
;
714 if (ublk_queue_can_use_recovery_reissue(ubq
))
715 blk_mq_requeue_request(req
, false);
717 blk_mq_end_request(req
, BLK_STS_IOERR
);
721 static void ubq_complete_io_cmd(struct ublk_io
*io
, int res
)
723 /* mark this cmd owned by ublksrv */
724 io
->flags
|= UBLK_IO_FLAG_OWNED_BY_SRV
;
727 * clear ACTIVE since we are done with this sqe/cmd slot
728 * We can only accept io cmd in case of being not active.
730 io
->flags
&= ~UBLK_IO_FLAG_ACTIVE
;
732 /* tell ublksrv one io request is coming */
733 io_uring_cmd_done(io
->cmd
, res
, 0);
736 #define UBLK_REQUEUE_DELAY_MS 3
738 static inline void __ublk_abort_rq(struct ublk_queue
*ubq
,
741 /* We cannot process this rq so just requeue it. */
742 if (ublk_queue_can_use_recovery(ubq
))
743 blk_mq_requeue_request(rq
, false);
745 blk_mq_end_request(rq
, BLK_STS_IOERR
);
747 mod_delayed_work(system_wq
, &ubq
->dev
->monitor_work
, 0);
750 static inline void __ublk_rq_task_work(struct request
*req
)
752 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
754 struct ublk_io
*io
= &ubq
->ios
[tag
];
755 unsigned int mapped_bytes
;
757 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
758 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
759 ublk_get_iod(ubq
, req
->tag
)->addr
);
762 * Task is exiting if either:
764 * (1) current != ubq_daemon.
765 * io_uring_cmd_complete_in_task() tries to run task_work
766 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
768 * (2) current->flags & PF_EXITING.
770 if (unlikely(current
!= ubq
->ubq_daemon
|| current
->flags
& PF_EXITING
)) {
771 __ublk_abort_rq(ubq
, req
);
775 if (ublk_need_get_data(ubq
) &&
776 (req_op(req
) == REQ_OP_WRITE
||
777 req_op(req
) == REQ_OP_FLUSH
)) {
779 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
780 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
783 if (!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
)) {
784 io
->flags
|= UBLK_IO_FLAG_NEED_GET_DATA
;
785 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
786 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
,
787 req
->tag
, io
->flags
);
788 ubq_complete_io_cmd(io
, UBLK_IO_RES_NEED_GET_DATA
);
792 * We have handled UBLK_IO_NEED_GET_DATA command,
793 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
796 io
->flags
&= ~UBLK_IO_FLAG_NEED_GET_DATA
;
797 /* update iod->addr because ublksrv may have passed a new io buffer */
798 ublk_get_iod(ubq
, req
->tag
)->addr
= io
->addr
;
799 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
800 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
801 ublk_get_iod(ubq
, req
->tag
)->addr
);
804 mapped_bytes
= ublk_map_io(ubq
, req
, io
);
806 /* partially mapped, update io descriptor */
807 if (unlikely(mapped_bytes
!= blk_rq_bytes(req
))) {
809 * Nothing mapped, retry until we succeed.
811 * We may never succeed in mapping any bytes here because
812 * of OOM. TODO: reserve one buffer with single page pinned
813 * for providing forward progress guarantee.
815 if (unlikely(!mapped_bytes
)) {
816 blk_mq_requeue_request(req
, false);
817 blk_mq_delay_kick_requeue_list(req
->q
,
818 UBLK_REQUEUE_DELAY_MS
);
822 ublk_get_iod(ubq
, req
->tag
)->nr_sectors
=
826 ubq_complete_io_cmd(io
, UBLK_IO_RES_OK
);
829 static inline void ublk_forward_io_cmds(struct ublk_queue
*ubq
)
831 struct llist_node
*io_cmds
= llist_del_all(&ubq
->io_cmds
);
832 struct ublk_rq_data
*data
, *tmp
;
834 io_cmds
= llist_reverse_order(io_cmds
);
835 llist_for_each_entry_safe(data
, tmp
, io_cmds
, node
)
836 __ublk_rq_task_work(blk_mq_rq_from_pdu(data
));
839 static inline void ublk_abort_io_cmds(struct ublk_queue
*ubq
)
841 struct llist_node
*io_cmds
= llist_del_all(&ubq
->io_cmds
);
842 struct ublk_rq_data
*data
, *tmp
;
844 llist_for_each_entry_safe(data
, tmp
, io_cmds
, node
)
845 __ublk_abort_rq(ubq
, blk_mq_rq_from_pdu(data
));
848 static void ublk_rq_task_work_cb(struct io_uring_cmd
*cmd
)
850 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
851 struct ublk_queue
*ubq
= pdu
->ubq
;
853 ublk_forward_io_cmds(ubq
);
856 static void ublk_rq_task_work_fn(struct callback_head
*work
)
858 struct ublk_rq_data
*data
= container_of(work
,
859 struct ublk_rq_data
, work
);
860 struct request
*req
= blk_mq_rq_from_pdu(data
);
861 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
863 ublk_forward_io_cmds(ubq
);
866 static void ublk_queue_cmd(struct ublk_queue
*ubq
, struct request
*rq
)
868 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(rq
);
871 if (!llist_add(&data
->node
, &ubq
->io_cmds
))
874 io
= &ubq
->ios
[rq
->tag
];
876 * If the check pass, we know that this is a re-issued request aborted
877 * previously in monitor_work because the ubq_daemon(cmd's task) is
878 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
879 * because this ioucmd's io_uring context may be freed now if no inflight
880 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
882 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
883 * the tag). Then the request is re-started(allocating the tag) and we are here.
884 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
885 * guarantees that here is a re-issued request aborted previously.
887 if (unlikely(io
->flags
& UBLK_IO_FLAG_ABORTED
)) {
888 ublk_abort_io_cmds(ubq
);
889 } else if (ublk_can_use_task_work(ubq
)) {
890 if (task_work_add(ubq
->ubq_daemon
, &data
->work
,
892 ublk_abort_io_cmds(ubq
);
894 struct io_uring_cmd
*cmd
= io
->cmd
;
895 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
898 io_uring_cmd_complete_in_task(cmd
, ublk_rq_task_work_cb
);
902 static blk_status_t
ublk_queue_rq(struct blk_mq_hw_ctx
*hctx
,
903 const struct blk_mq_queue_data
*bd
)
905 struct ublk_queue
*ubq
= hctx
->driver_data
;
906 struct request
*rq
= bd
->rq
;
909 /* fill iod to slot in io cmd buffer */
910 res
= ublk_setup_iod(ubq
, rq
);
911 if (unlikely(res
!= BLK_STS_OK
))
912 return BLK_STS_IOERR
;
914 /* With recovery feature enabled, force_abort is set in
915 * ublk_stop_dev() before calling del_gendisk(). We have to
916 * abort all requeued and new rqs here to let del_gendisk()
917 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
918 * to avoid UAF on io_uring ctx.
920 * Note: force_abort is guaranteed to be seen because it is set
921 * before request queue is unqiuesced.
923 if (ublk_queue_can_use_recovery(ubq
) && unlikely(ubq
->force_abort
))
924 return BLK_STS_IOERR
;
926 blk_mq_start_request(bd
->rq
);
928 if (unlikely(ubq_daemon_is_dying(ubq
))) {
929 __ublk_abort_rq(ubq
, rq
);
933 ublk_queue_cmd(ubq
, rq
);
938 static int ublk_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *driver_data
,
939 unsigned int hctx_idx
)
941 struct ublk_device
*ub
= driver_data
;
942 struct ublk_queue
*ubq
= ublk_get_queue(ub
, hctx
->queue_num
);
944 hctx
->driver_data
= ubq
;
948 static int ublk_init_rq(struct blk_mq_tag_set
*set
, struct request
*req
,
949 unsigned int hctx_idx
, unsigned int numa_node
)
951 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
953 init_task_work(&data
->work
, ublk_rq_task_work_fn
);
957 static const struct blk_mq_ops ublk_mq_ops
= {
958 .queue_rq
= ublk_queue_rq
,
959 .init_hctx
= ublk_init_hctx
,
960 .init_request
= ublk_init_rq
,
963 static int ublk_ch_open(struct inode
*inode
, struct file
*filp
)
965 struct ublk_device
*ub
= container_of(inode
->i_cdev
,
966 struct ublk_device
, cdev
);
968 if (test_and_set_bit(UB_STATE_OPEN
, &ub
->state
))
970 filp
->private_data
= ub
;
974 static int ublk_ch_release(struct inode
*inode
, struct file
*filp
)
976 struct ublk_device
*ub
= filp
->private_data
;
978 clear_bit(UB_STATE_OPEN
, &ub
->state
);
982 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
983 static int ublk_ch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
985 struct ublk_device
*ub
= filp
->private_data
;
986 size_t sz
= vma
->vm_end
- vma
->vm_start
;
987 unsigned max_sz
= UBLK_MAX_QUEUE_DEPTH
* sizeof(struct ublksrv_io_desc
);
988 unsigned long pfn
, end
, phys_off
= vma
->vm_pgoff
<< PAGE_SHIFT
;
991 spin_lock(&ub
->mm_lock
);
993 ub
->mm
= current
->mm
;
994 if (current
->mm
!= ub
->mm
)
996 spin_unlock(&ub
->mm_lock
);
1001 if (vma
->vm_flags
& VM_WRITE
)
1004 end
= UBLKSRV_CMD_BUF_OFFSET
+ ub
->dev_info
.nr_hw_queues
* max_sz
;
1005 if (phys_off
< UBLKSRV_CMD_BUF_OFFSET
|| phys_off
>= end
)
1008 q_id
= (phys_off
- UBLKSRV_CMD_BUF_OFFSET
) / max_sz
;
1009 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1010 __func__
, q_id
, current
->pid
, vma
->vm_start
,
1011 phys_off
, (unsigned long)sz
);
1013 if (sz
!= ublk_queue_cmd_buf_size(ub
, q_id
))
1016 pfn
= virt_to_phys(ublk_queue_cmd_buf(ub
, q_id
)) >> PAGE_SHIFT
;
1017 return remap_pfn_range(vma
, vma
->vm_start
, pfn
, sz
, vma
->vm_page_prot
);
1020 static void ublk_commit_completion(struct ublk_device
*ub
,
1021 struct ublksrv_io_cmd
*ub_cmd
)
1023 u32 qid
= ub_cmd
->q_id
, tag
= ub_cmd
->tag
;
1024 struct ublk_queue
*ubq
= ublk_get_queue(ub
, qid
);
1025 struct ublk_io
*io
= &ubq
->ios
[tag
];
1026 struct request
*req
;
1028 /* now this cmd slot is owned by nbd driver */
1029 io
->flags
&= ~UBLK_IO_FLAG_OWNED_BY_SRV
;
1030 io
->res
= ub_cmd
->result
;
1032 /* find the io request and complete */
1033 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[qid
], tag
);
1035 if (req
&& likely(!blk_should_fake_timeout(req
->q
)))
1036 ublk_complete_rq(req
);
1040 * When ->ubq_daemon is exiting, either new request is ended immediately,
1041 * or any queued io command is drained, so it is safe to abort queue
1044 static void ublk_abort_queue(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1048 if (!ublk_get_device(ub
))
1051 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1052 struct ublk_io
*io
= &ubq
->ios
[i
];
1054 if (!(io
->flags
& UBLK_IO_FLAG_ACTIVE
)) {
1058 * Either we fail the request or ublk_rq_task_work_fn
1061 rq
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ubq
->q_id
], i
);
1063 __ublk_fail_req(ubq
, io
, rq
);
1066 ublk_put_device(ub
);
1069 static void ublk_daemon_monitor_work(struct work_struct
*work
)
1071 struct ublk_device
*ub
=
1072 container_of(work
, struct ublk_device
, monitor_work
.work
);
1075 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++) {
1076 struct ublk_queue
*ubq
= ublk_get_queue(ub
, i
);
1078 if (ubq_daemon_is_dying(ubq
)) {
1079 if (ublk_queue_can_use_recovery(ubq
))
1080 schedule_work(&ub
->quiesce_work
);
1082 schedule_work(&ub
->stop_work
);
1084 /* abort queue is for making forward progress */
1085 ublk_abort_queue(ub
, ubq
);
1090 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1091 * after ublk_remove() or __ublk_quiesce_dev() is started.
1093 * No need ub->mutex, monitor work are canceled after state is marked
1094 * as not LIVE, so new state is observed reliably.
1096 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
)
1097 schedule_delayed_work(&ub
->monitor_work
,
1098 UBLK_DAEMON_MONITOR_PERIOD
);
1101 static inline bool ublk_queue_ready(struct ublk_queue
*ubq
)
1103 return ubq
->nr_io_ready
== ubq
->q_depth
;
1106 static void ublk_cancel_queue(struct ublk_queue
*ubq
)
1110 if (!ublk_queue_ready(ubq
))
1113 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1114 struct ublk_io
*io
= &ubq
->ios
[i
];
1116 if (io
->flags
& UBLK_IO_FLAG_ACTIVE
)
1117 io_uring_cmd_done(io
->cmd
, UBLK_IO_RES_ABORT
, 0);
1120 /* all io commands are canceled */
1121 ubq
->nr_io_ready
= 0;
1124 /* Cancel all pending commands, must be called after del_gendisk() returns */
1125 static void ublk_cancel_dev(struct ublk_device
*ub
)
1129 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1130 ublk_cancel_queue(ublk_get_queue(ub
, i
));
1133 static bool ublk_check_inflight_rq(struct request
*rq
, void *data
)
1137 if (blk_mq_request_started(rq
)) {
1144 static void ublk_wait_tagset_rqs_idle(struct ublk_device
*ub
)
1148 WARN_ON_ONCE(!blk_queue_quiesced(ub
->ub_disk
->queue
));
1151 blk_mq_tagset_busy_iter(&ub
->tag_set
,
1152 ublk_check_inflight_rq
, &idle
);
1155 msleep(UBLK_REQUEUE_DELAY_MS
);
1159 static void __ublk_quiesce_dev(struct ublk_device
*ub
)
1161 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1162 __func__
, ub
->dev_info
.dev_id
,
1163 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1164 "LIVE" : "QUIESCED");
1165 blk_mq_quiesce_queue(ub
->ub_disk
->queue
);
1166 ublk_wait_tagset_rqs_idle(ub
);
1167 ub
->dev_info
.state
= UBLK_S_DEV_QUIESCED
;
1168 ublk_cancel_dev(ub
);
1169 /* we are going to release task_struct of ubq_daemon and resets
1170 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1171 * Besides, monitor_work is not necessary in QUIESCED state since we have
1172 * already scheduled quiesce_work and quiesced all ubqs.
1174 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1175 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1177 cancel_delayed_work_sync(&ub
->monitor_work
);
1180 static void ublk_quiesce_work_fn(struct work_struct
*work
)
1182 struct ublk_device
*ub
=
1183 container_of(work
, struct ublk_device
, quiesce_work
);
1185 mutex_lock(&ub
->mutex
);
1186 if (ub
->dev_info
.state
!= UBLK_S_DEV_LIVE
)
1188 __ublk_quiesce_dev(ub
);
1190 mutex_unlock(&ub
->mutex
);
1193 static void ublk_unquiesce_dev(struct ublk_device
*ub
)
1197 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1198 __func__
, ub
->dev_info
.dev_id
,
1199 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1200 "LIVE" : "QUIESCED");
1201 /* quiesce_work has run. We let requeued rqs be aborted
1202 * before running fallback_wq. "force_abort" must be seen
1203 * after request queue is unqiuesced. Then del_gendisk()
1206 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1207 ublk_get_queue(ub
, i
)->force_abort
= true;
1209 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
1210 /* We may have requeued some rqs in ublk_quiesce_queue() */
1211 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
1214 static void ublk_stop_dev(struct ublk_device
*ub
)
1216 mutex_lock(&ub
->mutex
);
1217 if (ub
->dev_info
.state
== UBLK_S_DEV_DEAD
)
1219 if (ublk_can_use_recovery(ub
)) {
1220 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
)
1221 __ublk_quiesce_dev(ub
);
1222 ublk_unquiesce_dev(ub
);
1224 del_gendisk(ub
->ub_disk
);
1225 ub
->dev_info
.state
= UBLK_S_DEV_DEAD
;
1226 ub
->dev_info
.ublksrv_pid
= -1;
1227 put_disk(ub
->ub_disk
);
1230 ublk_cancel_dev(ub
);
1231 mutex_unlock(&ub
->mutex
);
1232 cancel_delayed_work_sync(&ub
->monitor_work
);
1235 /* device can only be started after all IOs are ready */
1236 static void ublk_mark_io_ready(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1238 mutex_lock(&ub
->mutex
);
1240 if (ublk_queue_ready(ubq
)) {
1241 ubq
->ubq_daemon
= current
;
1242 get_task_struct(ubq
->ubq_daemon
);
1243 ub
->nr_queues_ready
++;
1245 if (capable(CAP_SYS_ADMIN
))
1246 ub
->nr_privileged_daemon
++;
1248 if (ub
->nr_queues_ready
== ub
->dev_info
.nr_hw_queues
)
1249 complete_all(&ub
->completion
);
1250 mutex_unlock(&ub
->mutex
);
1253 static void ublk_handle_need_get_data(struct ublk_device
*ub
, int q_id
,
1256 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1257 struct request
*req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[q_id
], tag
);
1259 ublk_queue_cmd(ubq
, req
);
1262 static int ublk_ch_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
1264 struct ublksrv_io_cmd
*ub_cmd
= (struct ublksrv_io_cmd
*)cmd
->cmd
;
1265 struct ublk_device
*ub
= cmd
->file
->private_data
;
1266 struct ublk_queue
*ubq
;
1268 u32 cmd_op
= cmd
->cmd_op
;
1269 unsigned tag
= ub_cmd
->tag
;
1271 struct request
*req
;
1273 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1274 __func__
, cmd
->cmd_op
, ub_cmd
->q_id
, tag
,
1277 if (ub_cmd
->q_id
>= ub
->dev_info
.nr_hw_queues
)
1280 ubq
= ublk_get_queue(ub
, ub_cmd
->q_id
);
1281 if (!ubq
|| ub_cmd
->q_id
!= ubq
->q_id
)
1284 if (ubq
->ubq_daemon
&& ubq
->ubq_daemon
!= current
)
1287 if (tag
>= ubq
->q_depth
)
1290 io
= &ubq
->ios
[tag
];
1292 /* there is pending io cmd, something must be wrong */
1293 if (io
->flags
& UBLK_IO_FLAG_ACTIVE
) {
1299 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1300 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1302 if ((!!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
))
1303 ^ (cmd_op
== UBLK_IO_NEED_GET_DATA
))
1307 case UBLK_IO_FETCH_REQ
:
1308 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1309 if (ublk_queue_ready(ubq
)) {
1314 * The io is being handled by server, so COMMIT_RQ is expected
1315 * instead of FETCH_REQ
1317 if (io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
)
1319 /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
1320 if (!ub_cmd
->addr
&& !ublk_need_get_data(ubq
))
1323 io
->flags
|= UBLK_IO_FLAG_ACTIVE
;
1324 io
->addr
= ub_cmd
->addr
;
1326 ublk_mark_io_ready(ub
, ubq
);
1328 case UBLK_IO_COMMIT_AND_FETCH_REQ
:
1329 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ub_cmd
->q_id
], tag
);
1331 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
1332 * not enabled or it is Read IO.
1334 if (!ub_cmd
->addr
&& (!ublk_need_get_data(ubq
) || req_op(req
) == REQ_OP_READ
))
1336 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1338 io
->addr
= ub_cmd
->addr
;
1339 io
->flags
|= UBLK_IO_FLAG_ACTIVE
;
1341 ublk_commit_completion(ub
, ub_cmd
);
1343 case UBLK_IO_NEED_GET_DATA
:
1344 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1346 io
->addr
= ub_cmd
->addr
;
1348 io
->flags
|= UBLK_IO_FLAG_ACTIVE
;
1349 ublk_handle_need_get_data(ub
, ub_cmd
->q_id
, ub_cmd
->tag
);
1354 return -EIOCBQUEUED
;
1357 io_uring_cmd_done(cmd
, ret
, 0);
1358 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1359 __func__
, cmd_op
, tag
, ret
, io
->flags
);
1360 return -EIOCBQUEUED
;
1363 static const struct file_operations ublk_ch_fops
= {
1364 .owner
= THIS_MODULE
,
1365 .open
= ublk_ch_open
,
1366 .release
= ublk_ch_release
,
1367 .llseek
= no_llseek
,
1368 .uring_cmd
= ublk_ch_uring_cmd
,
1369 .mmap
= ublk_ch_mmap
,
1372 static void ublk_deinit_queue(struct ublk_device
*ub
, int q_id
)
1374 int size
= ublk_queue_cmd_buf_size(ub
, q_id
);
1375 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1377 if (ubq
->ubq_daemon
)
1378 put_task_struct(ubq
->ubq_daemon
);
1379 if (ubq
->io_cmd_buf
)
1380 free_pages((unsigned long)ubq
->io_cmd_buf
, get_order(size
));
1383 static int ublk_init_queue(struct ublk_device
*ub
, int q_id
)
1385 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1386 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
;
1390 ubq
->flags
= ub
->dev_info
.flags
;
1392 ubq
->q_depth
= ub
->dev_info
.queue_depth
;
1393 size
= ublk_queue_cmd_buf_size(ub
, q_id
);
1395 ptr
= (void *) __get_free_pages(gfp_flags
, get_order(size
));
1399 ubq
->io_cmd_buf
= ptr
;
1404 static void ublk_deinit_queues(struct ublk_device
*ub
)
1406 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
1412 for (i
= 0; i
< nr_queues
; i
++)
1413 ublk_deinit_queue(ub
, i
);
1414 kfree(ub
->__queues
);
1417 static int ublk_init_queues(struct ublk_device
*ub
)
1419 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
1420 int depth
= ub
->dev_info
.queue_depth
;
1421 int ubq_size
= sizeof(struct ublk_queue
) + depth
* sizeof(struct ublk_io
);
1422 int i
, ret
= -ENOMEM
;
1424 ub
->queue_size
= ubq_size
;
1425 ub
->__queues
= kcalloc(nr_queues
, ubq_size
, GFP_KERNEL
);
1429 for (i
= 0; i
< nr_queues
; i
++) {
1430 if (ublk_init_queue(ub
, i
))
1434 init_completion(&ub
->completion
);
1438 ublk_deinit_queues(ub
);
1442 static int ublk_alloc_dev_number(struct ublk_device
*ub
, int idx
)
1447 spin_lock(&ublk_idr_lock
);
1448 /* allocate id, if @id >= 0, we're requesting that specific id */
1450 err
= idr_alloc(&ublk_index_idr
, ub
, i
, i
+ 1, GFP_NOWAIT
);
1454 err
= idr_alloc(&ublk_index_idr
, ub
, 0, 0, GFP_NOWAIT
);
1456 spin_unlock(&ublk_idr_lock
);
1459 ub
->ub_number
= err
;
1464 static void ublk_free_dev_number(struct ublk_device
*ub
)
1466 spin_lock(&ublk_idr_lock
);
1467 idr_remove(&ublk_index_idr
, ub
->ub_number
);
1468 wake_up_all(&ublk_idr_wq
);
1469 spin_unlock(&ublk_idr_lock
);
1472 static void ublk_cdev_rel(struct device
*dev
)
1474 struct ublk_device
*ub
= container_of(dev
, struct ublk_device
, cdev_dev
);
1476 blk_mq_free_tag_set(&ub
->tag_set
);
1477 ublk_deinit_queues(ub
);
1478 ublk_free_dev_number(ub
);
1479 mutex_destroy(&ub
->mutex
);
1483 static int ublk_add_chdev(struct ublk_device
*ub
)
1485 struct device
*dev
= &ub
->cdev_dev
;
1486 int minor
= ub
->ub_number
;
1489 dev
->parent
= ublk_misc
.this_device
;
1490 dev
->devt
= MKDEV(MAJOR(ublk_chr_devt
), minor
);
1491 dev
->class = ublk_chr_class
;
1492 dev
->release
= ublk_cdev_rel
;
1493 device_initialize(dev
);
1495 ret
= dev_set_name(dev
, "ublkc%d", minor
);
1499 cdev_init(&ub
->cdev
, &ublk_ch_fops
);
1500 ret
= cdev_device_add(&ub
->cdev
, dev
);
1511 static void ublk_stop_work_fn(struct work_struct
*work
)
1513 struct ublk_device
*ub
=
1514 container_of(work
, struct ublk_device
, stop_work
);
1519 /* align max io buffer size with PAGE_SIZE */
1520 static void ublk_align_max_io_size(struct ublk_device
*ub
)
1522 unsigned int max_io_bytes
= ub
->dev_info
.max_io_buf_bytes
;
1524 ub
->dev_info
.max_io_buf_bytes
=
1525 round_down(max_io_bytes
, PAGE_SIZE
);
1528 static int ublk_add_tag_set(struct ublk_device
*ub
)
1530 ub
->tag_set
.ops
= &ublk_mq_ops
;
1531 ub
->tag_set
.nr_hw_queues
= ub
->dev_info
.nr_hw_queues
;
1532 ub
->tag_set
.queue_depth
= ub
->dev_info
.queue_depth
;
1533 ub
->tag_set
.numa_node
= NUMA_NO_NODE
;
1534 ub
->tag_set
.cmd_size
= sizeof(struct ublk_rq_data
);
1535 ub
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
1536 ub
->tag_set
.driver_data
= ub
;
1537 return blk_mq_alloc_tag_set(&ub
->tag_set
);
1540 static void ublk_remove(struct ublk_device
*ub
)
1543 cancel_work_sync(&ub
->stop_work
);
1544 cancel_work_sync(&ub
->quiesce_work
);
1545 cdev_device_del(&ub
->cdev
, &ub
->cdev_dev
);
1546 put_device(&ub
->cdev_dev
);
1550 static struct ublk_device
*ublk_get_device_from_id(int idx
)
1552 struct ublk_device
*ub
= NULL
;
1557 spin_lock(&ublk_idr_lock
);
1558 ub
= idr_find(&ublk_index_idr
, idx
);
1560 ub
= ublk_get_device(ub
);
1561 spin_unlock(&ublk_idr_lock
);
1566 static int ublk_ctrl_start_dev(struct ublk_device
*ub
, struct io_uring_cmd
*cmd
)
1568 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1569 int ublksrv_pid
= (int)header
->data
[0];
1570 struct gendisk
*disk
;
1573 if (ublksrv_pid
<= 0)
1576 wait_for_completion_interruptible(&ub
->completion
);
1578 schedule_delayed_work(&ub
->monitor_work
, UBLK_DAEMON_MONITOR_PERIOD
);
1580 mutex_lock(&ub
->mutex
);
1581 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
||
1582 test_bit(UB_STATE_USED
, &ub
->state
)) {
1587 disk
= blk_mq_alloc_disk(&ub
->tag_set
, NULL
);
1589 ret
= PTR_ERR(disk
);
1592 sprintf(disk
->disk_name
, "ublkb%d", ub
->ub_number
);
1593 disk
->fops
= &ub_fops
;
1594 disk
->private_data
= ub
;
1596 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
1599 ret
= ublk_apply_params(ub
);
1603 /* don't probe partitions if any one ubq daemon is un-trusted */
1604 if (ub
->nr_privileged_daemon
!= ub
->nr_queues_ready
)
1605 set_bit(GD_SUPPRESS_PART_SCAN
, &disk
->state
);
1607 get_device(&ub
->cdev_dev
);
1608 ret
= add_disk(disk
);
1611 * Has to drop the reference since ->free_disk won't be
1612 * called in case of add_disk failure.
1614 ublk_put_device(ub
);
1617 set_bit(UB_STATE_USED
, &ub
->state
);
1618 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
1623 mutex_unlock(&ub
->mutex
);
1627 static int ublk_ctrl_get_queue_affinity(struct ublk_device
*ub
,
1628 struct io_uring_cmd
*cmd
)
1630 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1631 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1632 cpumask_var_t cpumask
;
1633 unsigned long queue
;
1634 unsigned int retlen
;
1638 if (header
->len
* BITS_PER_BYTE
< nr_cpu_ids
)
1640 if (header
->len
& (sizeof(unsigned long)-1))
1645 queue
= header
->data
[0];
1646 if (queue
>= ub
->dev_info
.nr_hw_queues
)
1649 if (!zalloc_cpumask_var(&cpumask
, GFP_KERNEL
))
1652 for_each_possible_cpu(i
) {
1653 if (ub
->tag_set
.map
[HCTX_TYPE_DEFAULT
].mq_map
[i
] == queue
)
1654 cpumask_set_cpu(i
, cpumask
);
1658 retlen
= min_t(unsigned short, header
->len
, cpumask_size());
1659 if (copy_to_user(argp
, cpumask
, retlen
))
1660 goto out_free_cpumask
;
1661 if (retlen
!= header
->len
&&
1662 clear_user(argp
+ retlen
, header
->len
- retlen
))
1663 goto out_free_cpumask
;
1667 free_cpumask_var(cpumask
);
1671 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info
*info
)
1673 pr_devel("%s: dev id %d flags %llx\n", __func__
,
1674 info
->dev_id
, info
->flags
);
1675 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1676 info
->nr_hw_queues
, info
->queue_depth
);
1679 static int ublk_ctrl_add_dev(struct io_uring_cmd
*cmd
)
1681 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1682 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1683 struct ublksrv_ctrl_dev_info info
;
1684 struct ublk_device
*ub
;
1687 if (header
->len
< sizeof(info
) || !header
->addr
)
1689 if (header
->queue_id
!= (u16
)-1) {
1690 pr_warn("%s: queue_id is wrong %x\n",
1691 __func__
, header
->queue_id
);
1695 if (copy_from_user(&info
, argp
, sizeof(info
)))
1698 if (capable(CAP_SYS_ADMIN
))
1699 info
.flags
&= ~UBLK_F_UNPRIVILEGED_DEV
;
1700 else if (!(info
.flags
& UBLK_F_UNPRIVILEGED_DEV
))
1703 /* the created device is always owned by current user */
1704 ublk_store_owner_uid_gid(&info
.owner_uid
, &info
.owner_gid
);
1706 if (header
->dev_id
!= info
.dev_id
) {
1707 pr_warn("%s: dev id not match %u %u\n",
1708 __func__
, header
->dev_id
, info
.dev_id
);
1712 ublk_dump_dev_info(&info
);
1714 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
1719 if (ublks_added
>= ublks_max
)
1723 ub
= kzalloc(sizeof(*ub
), GFP_KERNEL
);
1726 mutex_init(&ub
->mutex
);
1727 spin_lock_init(&ub
->mm_lock
);
1728 INIT_WORK(&ub
->quiesce_work
, ublk_quiesce_work_fn
);
1729 INIT_WORK(&ub
->stop_work
, ublk_stop_work_fn
);
1730 INIT_DELAYED_WORK(&ub
->monitor_work
, ublk_daemon_monitor_work
);
1732 ret
= ublk_alloc_dev_number(ub
, header
->dev_id
);
1736 memcpy(&ub
->dev_info
, &info
, sizeof(info
));
1738 /* update device id */
1739 ub
->dev_info
.dev_id
= ub
->ub_number
;
1742 * 64bit flags will be copied back to userspace as feature
1743 * negotiation result, so have to clear flags which driver
1744 * doesn't support yet, then userspace can get correct flags
1745 * (features) to handle.
1747 ub
->dev_info
.flags
&= UBLK_F_ALL
;
1749 if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK
))
1750 ub
->dev_info
.flags
|= UBLK_F_URING_CMD_COMP_IN_TASK
;
1752 /* We are not ready to support zero copy */
1753 ub
->dev_info
.flags
&= ~UBLK_F_SUPPORT_ZERO_COPY
;
1755 ub
->dev_info
.nr_hw_queues
= min_t(unsigned int,
1756 ub
->dev_info
.nr_hw_queues
, nr_cpu_ids
);
1757 ublk_align_max_io_size(ub
);
1759 ret
= ublk_init_queues(ub
);
1761 goto out_free_dev_number
;
1763 ret
= ublk_add_tag_set(ub
);
1765 goto out_deinit_queues
;
1768 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(info
)))
1769 goto out_free_tag_set
;
1772 * Add the char dev so that ublksrv daemon can be setup.
1773 * ublk_add_chdev() will cleanup everything if it fails.
1775 ret
= ublk_add_chdev(ub
);
1779 blk_mq_free_tag_set(&ub
->tag_set
);
1781 ublk_deinit_queues(ub
);
1782 out_free_dev_number
:
1783 ublk_free_dev_number(ub
);
1785 mutex_destroy(&ub
->mutex
);
1788 mutex_unlock(&ublk_ctl_mutex
);
1792 static inline bool ublk_idr_freed(int id
)
1796 spin_lock(&ublk_idr_lock
);
1797 ptr
= idr_find(&ublk_index_idr
, id
);
1798 spin_unlock(&ublk_idr_lock
);
1803 static int ublk_ctrl_del_dev(struct ublk_device
**p_ub
)
1805 struct ublk_device
*ub
= *p_ub
;
1806 int idx
= ub
->ub_number
;
1809 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
1813 if (!test_bit(UB_STATE_DELETED
, &ub
->state
)) {
1815 set_bit(UB_STATE_DELETED
, &ub
->state
);
1818 /* Mark the reference as consumed */
1820 ublk_put_device(ub
);
1821 mutex_unlock(&ublk_ctl_mutex
);
1824 * Wait until the idr is removed, then it can be reused after
1825 * DEL_DEV command is returned.
1827 * If we returns because of user interrupt, future delete command
1830 * - the device number isn't freed, this device won't or needn't
1831 * be deleted again, since UB_STATE_DELETED is set, and device
1832 * will be released after the last reference is dropped
1834 * - the device number is freed already, we will not find this
1835 * device via ublk_get_device_from_id()
1837 wait_event_interruptible(ublk_idr_wq
, ublk_idr_freed(idx
));
1842 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd
*cmd
)
1844 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1846 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
1847 __func__
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
,
1848 header
->data
[0], header
->addr
, header
->len
);
1851 static int ublk_ctrl_stop_dev(struct ublk_device
*ub
)
1854 cancel_work_sync(&ub
->stop_work
);
1855 cancel_work_sync(&ub
->quiesce_work
);
1860 static int ublk_ctrl_get_dev_info(struct ublk_device
*ub
,
1861 struct io_uring_cmd
*cmd
)
1863 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1864 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1866 if (header
->len
< sizeof(struct ublksrv_ctrl_dev_info
) || !header
->addr
)
1869 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(ub
->dev_info
)))
1875 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
1876 static void ublk_ctrl_fill_params_devt(struct ublk_device
*ub
)
1878 ub
->params
.devt
.char_major
= MAJOR(ub
->cdev_dev
.devt
);
1879 ub
->params
.devt
.char_minor
= MINOR(ub
->cdev_dev
.devt
);
1882 ub
->params
.devt
.disk_major
= MAJOR(disk_devt(ub
->ub_disk
));
1883 ub
->params
.devt
.disk_minor
= MINOR(disk_devt(ub
->ub_disk
));
1885 ub
->params
.devt
.disk_major
= 0;
1886 ub
->params
.devt
.disk_minor
= 0;
1888 ub
->params
.types
|= UBLK_PARAM_TYPE_DEVT
;
1891 static int ublk_ctrl_get_params(struct ublk_device
*ub
,
1892 struct io_uring_cmd
*cmd
)
1894 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1895 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1896 struct ublk_params_header ph
;
1899 if (header
->len
<= sizeof(ph
) || !header
->addr
)
1902 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
1905 if (ph
.len
> header
->len
|| !ph
.len
)
1908 if (ph
.len
> sizeof(struct ublk_params
))
1909 ph
.len
= sizeof(struct ublk_params
);
1911 mutex_lock(&ub
->mutex
);
1912 ublk_ctrl_fill_params_devt(ub
);
1913 if (copy_to_user(argp
, &ub
->params
, ph
.len
))
1917 mutex_unlock(&ub
->mutex
);
1922 static int ublk_ctrl_set_params(struct ublk_device
*ub
,
1923 struct io_uring_cmd
*cmd
)
1925 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1926 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1927 struct ublk_params_header ph
;
1930 if (header
->len
<= sizeof(ph
) || !header
->addr
)
1933 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
1936 if (ph
.len
> header
->len
|| !ph
.len
|| !ph
.types
)
1939 if (ph
.len
> sizeof(struct ublk_params
))
1940 ph
.len
= sizeof(struct ublk_params
);
1942 /* parameters can only be changed when device isn't live */
1943 mutex_lock(&ub
->mutex
);
1944 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
) {
1946 } else if (copy_from_user(&ub
->params
, argp
, ph
.len
)) {
1949 /* clear all we don't support yet */
1950 ub
->params
.types
&= UBLK_PARAM_TYPE_ALL
;
1951 ret
= ublk_validate_params(ub
);
1953 mutex_unlock(&ub
->mutex
);
1958 static void ublk_queue_reinit(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1962 WARN_ON_ONCE(!(ubq
->ubq_daemon
&& ubq_daemon_is_dying(ubq
)));
1963 /* All old ioucmds have to be completed */
1964 WARN_ON_ONCE(ubq
->nr_io_ready
);
1965 /* old daemon is PF_EXITING, put it now */
1966 put_task_struct(ubq
->ubq_daemon
);
1967 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
1968 ubq
->ubq_daemon
= NULL
;
1970 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1971 struct ublk_io
*io
= &ubq
->ios
[i
];
1973 /* forget everything now and be ready for new FETCH_REQ */
1980 static int ublk_ctrl_start_recovery(struct ublk_device
*ub
,
1981 struct io_uring_cmd
*cmd
)
1983 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
1987 mutex_lock(&ub
->mutex
);
1988 if (!ublk_can_use_recovery(ub
))
1991 * START_RECOVERY is only allowd after:
1993 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
1994 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
1997 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
1998 * (a)has quiesced request queue
1999 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2000 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2001 * (d)has completed/camceled all ioucmds owned by ther dying process
2003 if (test_bit(UB_STATE_OPEN
, &ub
->state
) ||
2004 ub
->dev_info
.state
!= UBLK_S_DEV_QUIESCED
) {
2008 pr_devel("%s: start recovery for dev id %d.\n", __func__
, header
->dev_id
);
2009 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
2010 ublk_queue_reinit(ub
, ublk_get_queue(ub
, i
));
2011 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2013 ub
->nr_queues_ready
= 0;
2014 ub
->nr_privileged_daemon
= 0;
2015 init_completion(&ub
->completion
);
2018 mutex_unlock(&ub
->mutex
);
2022 static int ublk_ctrl_end_recovery(struct ublk_device
*ub
,
2023 struct io_uring_cmd
*cmd
)
2025 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
2026 int ublksrv_pid
= (int)header
->data
[0];
2029 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2030 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2031 /* wait until new ubq_daemon sending all FETCH_REQ */
2032 wait_for_completion_interruptible(&ub
->completion
);
2033 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2034 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2036 mutex_lock(&ub
->mutex
);
2037 if (!ublk_can_use_recovery(ub
))
2040 if (ub
->dev_info
.state
!= UBLK_S_DEV_QUIESCED
) {
2044 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
2045 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2046 __func__
, ublksrv_pid
, header
->dev_id
);
2047 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
2048 pr_devel("%s: queue unquiesced, dev id %d.\n",
2049 __func__
, header
->dev_id
);
2050 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
2051 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
2052 schedule_delayed_work(&ub
->monitor_work
, UBLK_DAEMON_MONITOR_PERIOD
);
2055 mutex_unlock(&ub
->mutex
);
2060 * All control commands are sent via /dev/ublk-control, so we have to check
2061 * the destination device's permission
2063 static int ublk_char_dev_permission(struct ublk_device
*ub
,
2064 const char *dev_path
, int mask
)
2070 err
= kern_path(dev_path
, LOOKUP_FOLLOW
, &path
);
2074 err
= vfs_getattr(&path
, &stat
, STATX_TYPE
, AT_STATX_SYNC_AS_STAT
);
2079 if (stat
.rdev
!= ub
->cdev_dev
.devt
|| !S_ISCHR(stat
.mode
))
2082 err
= inode_permission(&nop_mnt_idmap
,
2083 d_backing_inode(path
.dentry
), mask
);
2089 static int ublk_ctrl_uring_cmd_permission(struct ublk_device
*ub
,
2090 struct io_uring_cmd
*cmd
)
2092 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
2093 bool unprivileged
= ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
;
2094 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2095 char *dev_path
= NULL
;
2099 if (!unprivileged
) {
2100 if (!capable(CAP_SYS_ADMIN
))
2103 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2104 * char_dev_path in payload too, since userspace may not
2105 * know if the specified device is created as unprivileged
2108 if (cmd
->cmd_op
!= UBLK_CMD_GET_DEV_INFO2
)
2113 * User has to provide the char device path for unprivileged ublk
2115 * header->addr always points to the dev path buffer, and
2116 * header->dev_path_len records length of dev path buffer.
2118 if (!header
->dev_path_len
|| header
->dev_path_len
> PATH_MAX
)
2121 if (header
->len
< header
->dev_path_len
)
2124 dev_path
= kmalloc(header
->dev_path_len
+ 1, GFP_KERNEL
);
2129 if (copy_from_user(dev_path
, argp
, header
->dev_path_len
))
2131 dev_path
[header
->dev_path_len
] = 0;
2134 switch (cmd
->cmd_op
) {
2135 case UBLK_CMD_GET_DEV_INFO
:
2136 case UBLK_CMD_GET_DEV_INFO2
:
2137 case UBLK_CMD_GET_QUEUE_AFFINITY
:
2138 case UBLK_CMD_GET_PARAMS
:
2141 case UBLK_CMD_START_DEV
:
2142 case UBLK_CMD_STOP_DEV
:
2143 case UBLK_CMD_ADD_DEV
:
2144 case UBLK_CMD_DEL_DEV
:
2145 case UBLK_CMD_SET_PARAMS
:
2146 case UBLK_CMD_START_USER_RECOVERY
:
2147 case UBLK_CMD_END_USER_RECOVERY
:
2148 mask
= MAY_READ
| MAY_WRITE
;
2154 ret
= ublk_char_dev_permission(ub
, dev_path
, mask
);
2156 header
->len
-= header
->dev_path_len
;
2157 header
->addr
+= header
->dev_path_len
;
2159 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2160 __func__
, ub
->ub_number
, cmd
->cmd_op
,
2161 ub
->dev_info
.owner_uid
, ub
->dev_info
.owner_gid
,
2168 static int ublk_ctrl_uring_cmd(struct io_uring_cmd
*cmd
,
2169 unsigned int issue_flags
)
2171 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)cmd
->cmd
;
2172 struct ublk_device
*ub
= NULL
;
2175 if (issue_flags
& IO_URING_F_NONBLOCK
)
2178 ublk_ctrl_cmd_dump(cmd
);
2180 if (!(issue_flags
& IO_URING_F_SQE128
))
2183 if (cmd
->cmd_op
!= UBLK_CMD_ADD_DEV
) {
2185 ub
= ublk_get_device_from_id(header
->dev_id
);
2189 ret
= ublk_ctrl_uring_cmd_permission(ub
, cmd
);
2191 /* ADD_DEV permission check is done in command handler */
2198 switch (cmd
->cmd_op
) {
2199 case UBLK_CMD_START_DEV
:
2200 ret
= ublk_ctrl_start_dev(ub
, cmd
);
2202 case UBLK_CMD_STOP_DEV
:
2203 ret
= ublk_ctrl_stop_dev(ub
);
2205 case UBLK_CMD_GET_DEV_INFO
:
2206 case UBLK_CMD_GET_DEV_INFO2
:
2207 ret
= ublk_ctrl_get_dev_info(ub
, cmd
);
2209 case UBLK_CMD_ADD_DEV
:
2210 ret
= ublk_ctrl_add_dev(cmd
);
2212 case UBLK_CMD_DEL_DEV
:
2213 ret
= ublk_ctrl_del_dev(&ub
);
2215 case UBLK_CMD_GET_QUEUE_AFFINITY
:
2216 ret
= ublk_ctrl_get_queue_affinity(ub
, cmd
);
2218 case UBLK_CMD_GET_PARAMS
:
2219 ret
= ublk_ctrl_get_params(ub
, cmd
);
2221 case UBLK_CMD_SET_PARAMS
:
2222 ret
= ublk_ctrl_set_params(ub
, cmd
);
2224 case UBLK_CMD_START_USER_RECOVERY
:
2225 ret
= ublk_ctrl_start_recovery(ub
, cmd
);
2227 case UBLK_CMD_END_USER_RECOVERY
:
2228 ret
= ublk_ctrl_end_recovery(ub
, cmd
);
2237 ublk_put_device(ub
);
2239 io_uring_cmd_done(cmd
, ret
, 0);
2240 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2241 __func__
, ret
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
);
2242 return -EIOCBQUEUED
;
2245 static const struct file_operations ublk_ctl_fops
= {
2246 .open
= nonseekable_open
,
2247 .uring_cmd
= ublk_ctrl_uring_cmd
,
2248 .owner
= THIS_MODULE
,
2249 .llseek
= noop_llseek
,
2252 static struct miscdevice ublk_misc
= {
2253 .minor
= MISC_DYNAMIC_MINOR
,
2254 .name
= "ublk-control",
2255 .fops
= &ublk_ctl_fops
,
2258 static int __init
ublk_init(void)
2262 init_waitqueue_head(&ublk_idr_wq
);
2264 ret
= misc_register(&ublk_misc
);
2268 ret
= alloc_chrdev_region(&ublk_chr_devt
, 0, UBLK_MINORS
, "ublk-char");
2270 goto unregister_mis
;
2272 ublk_chr_class
= class_create(THIS_MODULE
, "ublk-char");
2273 if (IS_ERR(ublk_chr_class
)) {
2274 ret
= PTR_ERR(ublk_chr_class
);
2275 goto free_chrdev_region
;
2280 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
2282 misc_deregister(&ublk_misc
);
2286 static void __exit
ublk_exit(void)
2288 struct ublk_device
*ub
;
2291 idr_for_each_entry(&ublk_index_idr
, ub
, id
)
2294 class_destroy(ublk_chr_class
);
2295 misc_deregister(&ublk_misc
);
2297 idr_destroy(&ublk_index_idr
);
2298 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
2301 module_init(ublk_init
);
2302 module_exit(ublk_exit
);
2304 module_param(ublks_max
, int, 0444);
2305 MODULE_PARM_DESC(ublks_max
, "max number of ublk devices allowed to add(default: 64)");
2307 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2308 MODULE_LICENSE("GPL");