1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
49 #define UBLK_MINORS (1U << MINORBITS)
51 /* All UBLK_F_* have to be included into UBLK_F_ALL */
52 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
53 | UBLK_F_URING_CMD_COMP_IN_TASK \
54 | UBLK_F_NEED_GET_DATA \
55 | UBLK_F_USER_RECOVERY \
56 | UBLK_F_USER_RECOVERY_REISSUE \
57 | UBLK_F_UNPRIVILEGED_DEV \
58 | UBLK_F_CMD_IOCTL_ENCODE \
61 /* All UBLK_PARAM_TYPE_* should be included here */
62 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
63 UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
66 struct llist_node node
;
71 struct ublk_uring_cmd_pdu
{
72 struct ublk_queue
*ubq
;
76 * io command is active: sqe cmd is received, and its cqe isn't done
78 * If the flag is set, the io command is owned by ublk driver, and waited
79 * for incoming blk-mq request from the ublk block device.
81 * If the flag is cleared, the io command will be completed, and owned by
84 #define UBLK_IO_FLAG_ACTIVE 0x01
87 * IO command is completed via cqe, and it is being handled by ublksrv, and
90 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
93 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
96 * IO command is aborted, so this flag is set in case of
97 * !UBLK_IO_FLAG_ACTIVE.
99 * After this flag is observed, any pending or new incoming request
100 * associated with this io command will be failed immediately
102 #define UBLK_IO_FLAG_ABORTED 0x04
105 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
106 * get data buffer address from ublksrv.
108 * Then, bio data could be copied into this data buffer for a WRITE request
109 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
111 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
114 /* userspace buffer address from io cmd */
119 struct io_uring_cmd
*cmd
;
127 struct task_struct
*ubq_daemon
;
130 struct llist_head io_cmds
;
132 unsigned long io_addr
; /* mapped vm address */
133 unsigned int max_io_sz
;
136 unsigned short nr_io_ready
; /* how many ios setup */
137 struct ublk_device
*dev
;
138 struct ublk_io ios
[];
141 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
144 struct gendisk
*ub_disk
;
148 unsigned int queue_size
;
149 struct ublksrv_ctrl_dev_info dev_info
;
151 struct blk_mq_tag_set tag_set
;
154 struct device cdev_dev
;
156 #define UB_STATE_OPEN 0
157 #define UB_STATE_USED 1
158 #define UB_STATE_DELETED 2
165 struct mm_struct
*mm
;
167 struct ublk_params params
;
169 struct completion completion
;
170 unsigned int nr_queues_ready
;
171 unsigned int nr_privileged_daemon
;
174 * Our ubq->daemon may be killed without any notification, so
175 * monitor each queue's daemon periodically
177 struct delayed_work monitor_work
;
178 struct work_struct quiesce_work
;
179 struct work_struct stop_work
;
182 /* header of ublk_params */
183 struct ublk_params_header
{
188 static inline void __ublk_complete_rq(struct request
*req
);
189 static void ublk_complete_rq(struct kref
*ref
);
191 static dev_t ublk_chr_devt
;
192 static const struct class ublk_chr_class
= {
196 static DEFINE_IDR(ublk_index_idr
);
197 static DEFINE_SPINLOCK(ublk_idr_lock
);
198 static wait_queue_head_t ublk_idr_wq
; /* wait until one idr is freed */
200 static DEFINE_MUTEX(ublk_ctl_mutex
);
203 * Max ublk devices allowed to add
205 * It can be extended to one per-user limit in future or even controlled
208 static unsigned int ublks_max
= 64;
209 static unsigned int ublks_added
; /* protected by ublk_ctl_mutex */
211 static struct miscdevice ublk_misc
;
213 static inline unsigned ublk_pos_to_hwq(loff_t pos
)
215 return ((pos
- UBLKSRV_IO_BUF_OFFSET
) >> UBLK_QID_OFF
) &
219 static inline unsigned ublk_pos_to_buf_off(loff_t pos
)
221 return (pos
- UBLKSRV_IO_BUF_OFFSET
) & UBLK_IO_BUF_BITS_MASK
;
224 static inline unsigned ublk_pos_to_tag(loff_t pos
)
226 return ((pos
- UBLKSRV_IO_BUF_OFFSET
) >> UBLK_TAG_OFF
) &
230 static void ublk_dev_param_basic_apply(struct ublk_device
*ub
)
232 struct request_queue
*q
= ub
->ub_disk
->queue
;
233 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
235 blk_queue_logical_block_size(q
, 1 << p
->logical_bs_shift
);
236 blk_queue_physical_block_size(q
, 1 << p
->physical_bs_shift
);
237 blk_queue_io_min(q
, 1 << p
->io_min_shift
);
238 blk_queue_io_opt(q
, 1 << p
->io_opt_shift
);
240 blk_queue_write_cache(q
, p
->attrs
& UBLK_ATTR_VOLATILE_CACHE
,
241 p
->attrs
& UBLK_ATTR_FUA
);
242 if (p
->attrs
& UBLK_ATTR_ROTATIONAL
)
243 blk_queue_flag_clear(QUEUE_FLAG_NONROT
, q
);
245 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
247 blk_queue_max_hw_sectors(q
, p
->max_sectors
);
248 blk_queue_chunk_sectors(q
, p
->chunk_sectors
);
249 blk_queue_virt_boundary(q
, p
->virt_boundary_mask
);
251 if (p
->attrs
& UBLK_ATTR_READ_ONLY
)
252 set_disk_ro(ub
->ub_disk
, true);
254 set_capacity(ub
->ub_disk
, p
->dev_sectors
);
257 static void ublk_dev_param_discard_apply(struct ublk_device
*ub
)
259 struct request_queue
*q
= ub
->ub_disk
->queue
;
260 const struct ublk_param_discard
*p
= &ub
->params
.discard
;
262 q
->limits
.discard_alignment
= p
->discard_alignment
;
263 q
->limits
.discard_granularity
= p
->discard_granularity
;
264 blk_queue_max_discard_sectors(q
, p
->max_discard_sectors
);
265 blk_queue_max_write_zeroes_sectors(q
,
266 p
->max_write_zeroes_sectors
);
267 blk_queue_max_discard_segments(q
, p
->max_discard_segments
);
270 static int ublk_validate_params(const struct ublk_device
*ub
)
272 /* basic param is the only one which must be set */
273 if (ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
) {
274 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
276 if (p
->logical_bs_shift
> PAGE_SHIFT
|| p
->logical_bs_shift
< 9)
279 if (p
->logical_bs_shift
> p
->physical_bs_shift
)
282 if (p
->max_sectors
> (ub
->dev_info
.max_io_buf_bytes
>> 9))
287 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
) {
288 const struct ublk_param_discard
*p
= &ub
->params
.discard
;
290 /* So far, only support single segment discard */
291 if (p
->max_discard_sectors
&& p
->max_discard_segments
!= 1)
294 if (!p
->discard_granularity
)
298 /* dev_t is read-only */
299 if (ub
->params
.types
& UBLK_PARAM_TYPE_DEVT
)
305 static int ublk_apply_params(struct ublk_device
*ub
)
307 if (!(ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
))
310 ublk_dev_param_basic_apply(ub
);
312 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
)
313 ublk_dev_param_discard_apply(ub
);
318 static inline bool ublk_support_user_copy(const struct ublk_queue
*ubq
)
320 return ubq
->flags
& UBLK_F_USER_COPY
;
323 static inline bool ublk_need_req_ref(const struct ublk_queue
*ubq
)
326 * read()/write() is involved in user copy, so request reference
329 return ublk_support_user_copy(ubq
);
332 static inline void ublk_init_req_ref(const struct ublk_queue
*ubq
,
335 if (ublk_need_req_ref(ubq
)) {
336 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
338 kref_init(&data
->ref
);
342 static inline bool ublk_get_req_ref(const struct ublk_queue
*ubq
,
345 if (ublk_need_req_ref(ubq
)) {
346 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
348 return kref_get_unless_zero(&data
->ref
);
354 static inline void ublk_put_req_ref(const struct ublk_queue
*ubq
,
357 if (ublk_need_req_ref(ubq
)) {
358 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
360 kref_put(&data
->ref
, ublk_complete_rq
);
362 __ublk_complete_rq(req
);
366 static inline bool ublk_need_get_data(const struct ublk_queue
*ubq
)
368 return ubq
->flags
& UBLK_F_NEED_GET_DATA
;
371 static struct ublk_device
*ublk_get_device(struct ublk_device
*ub
)
373 if (kobject_get_unless_zero(&ub
->cdev_dev
.kobj
))
378 static void ublk_put_device(struct ublk_device
*ub
)
380 put_device(&ub
->cdev_dev
);
383 static inline struct ublk_queue
*ublk_get_queue(struct ublk_device
*dev
,
386 return (struct ublk_queue
*)&(dev
->__queues
[qid
* dev
->queue_size
]);
389 static inline bool ublk_rq_has_data(const struct request
*rq
)
391 return bio_has_data(rq
->bio
);
394 static inline struct ublksrv_io_desc
*ublk_get_iod(struct ublk_queue
*ubq
,
397 return (struct ublksrv_io_desc
*)
398 &(ubq
->io_cmd_buf
[tag
* sizeof(struct ublksrv_io_desc
)]);
401 static inline char *ublk_queue_cmd_buf(struct ublk_device
*ub
, int q_id
)
403 return ublk_get_queue(ub
, q_id
)->io_cmd_buf
;
406 static inline int ublk_queue_cmd_buf_size(struct ublk_device
*ub
, int q_id
)
408 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
410 return round_up(ubq
->q_depth
* sizeof(struct ublksrv_io_desc
),
414 static inline bool ublk_queue_can_use_recovery_reissue(
415 struct ublk_queue
*ubq
)
417 return (ubq
->flags
& UBLK_F_USER_RECOVERY
) &&
418 (ubq
->flags
& UBLK_F_USER_RECOVERY_REISSUE
);
421 static inline bool ublk_queue_can_use_recovery(
422 struct ublk_queue
*ubq
)
424 return ubq
->flags
& UBLK_F_USER_RECOVERY
;
427 static inline bool ublk_can_use_recovery(struct ublk_device
*ub
)
429 return ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY
;
432 static void ublk_free_disk(struct gendisk
*disk
)
434 struct ublk_device
*ub
= disk
->private_data
;
436 clear_bit(UB_STATE_USED
, &ub
->state
);
437 put_device(&ub
->cdev_dev
);
440 static void ublk_store_owner_uid_gid(unsigned int *owner_uid
,
441 unsigned int *owner_gid
)
446 current_uid_gid(&uid
, &gid
);
448 *owner_uid
= from_kuid(&init_user_ns
, uid
);
449 *owner_gid
= from_kgid(&init_user_ns
, gid
);
452 static int ublk_open(struct gendisk
*disk
, blk_mode_t mode
)
454 struct ublk_device
*ub
= disk
->private_data
;
456 if (capable(CAP_SYS_ADMIN
))
460 * If it is one unprivileged device, only owner can open
461 * the disk. Otherwise it could be one trap made by one
462 * evil user who grants this disk's privileges to other
463 * users deliberately.
465 * This way is reasonable too given anyone can create
466 * unprivileged device, and no need other's grant.
468 if (ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
) {
469 unsigned int curr_uid
, curr_gid
;
471 ublk_store_owner_uid_gid(&curr_uid
, &curr_gid
);
473 if (curr_uid
!= ub
->dev_info
.owner_uid
|| curr_gid
!=
474 ub
->dev_info
.owner_gid
)
481 static const struct block_device_operations ub_fops
= {
482 .owner
= THIS_MODULE
,
484 .free_disk
= ublk_free_disk
,
487 #define UBLK_MAX_PIN_PAGES 32
489 struct ublk_io_iter
{
490 struct page
*pages
[UBLK_MAX_PIN_PAGES
];
492 struct bvec_iter iter
;
495 /* return how many pages are copied */
496 static void ublk_copy_io_pages(struct ublk_io_iter
*data
,
497 size_t total
, size_t pg_off
, int dir
)
502 while (done
< total
) {
503 struct bio_vec bv
= bio_iter_iovec(data
->bio
, data
->iter
);
504 unsigned int bytes
= min3(bv
.bv_len
, (unsigned)total
- done
,
505 (unsigned)(PAGE_SIZE
- pg_off
));
506 void *bv_buf
= bvec_kmap_local(&bv
);
507 void *pg_buf
= kmap_local_page(data
->pages
[pg_idx
]);
509 if (dir
== ITER_DEST
)
510 memcpy(pg_buf
+ pg_off
, bv_buf
, bytes
);
512 memcpy(bv_buf
, pg_buf
+ pg_off
, bytes
);
514 kunmap_local(pg_buf
);
515 kunmap_local(bv_buf
);
517 /* advance page array */
519 if (pg_off
== PAGE_SIZE
) {
527 bio_advance_iter_single(data
->bio
, &data
->iter
, bytes
);
528 if (!data
->iter
.bi_size
) {
529 data
->bio
= data
->bio
->bi_next
;
530 if (data
->bio
== NULL
)
532 data
->iter
= data
->bio
->bi_iter
;
537 static bool ublk_advance_io_iter(const struct request
*req
,
538 struct ublk_io_iter
*iter
, unsigned int offset
)
540 struct bio
*bio
= req
->bio
;
543 if (bio
->bi_iter
.bi_size
> offset
) {
545 iter
->iter
= bio
->bi_iter
;
546 bio_advance_iter(iter
->bio
, &iter
->iter
, offset
);
549 offset
-= bio
->bi_iter
.bi_size
;
555 * Copy data between request pages and io_iter, and 'offset'
556 * is the start point of linear offset of request.
558 static size_t ublk_copy_user_pages(const struct request
*req
,
559 unsigned offset
, struct iov_iter
*uiter
, int dir
)
561 struct ublk_io_iter iter
;
564 if (!ublk_advance_io_iter(req
, &iter
, offset
))
567 while (iov_iter_count(uiter
) && iter
.bio
) {
573 len
= iov_iter_get_pages2(uiter
, iter
.pages
,
574 iov_iter_count(uiter
),
575 UBLK_MAX_PIN_PAGES
, &off
);
579 ublk_copy_io_pages(&iter
, len
, off
, dir
);
580 nr_pages
= DIV_ROUND_UP(len
+ off
, PAGE_SIZE
);
581 for (i
= 0; i
< nr_pages
; i
++) {
582 if (dir
== ITER_DEST
)
583 set_page_dirty(iter
.pages
[i
]);
584 put_page(iter
.pages
[i
]);
592 static inline bool ublk_need_map_req(const struct request
*req
)
594 return ublk_rq_has_data(req
) && req_op(req
) == REQ_OP_WRITE
;
597 static inline bool ublk_need_unmap_req(const struct request
*req
)
599 return ublk_rq_has_data(req
) && req_op(req
) == REQ_OP_READ
;
602 static int ublk_map_io(const struct ublk_queue
*ubq
, const struct request
*req
,
605 const unsigned int rq_bytes
= blk_rq_bytes(req
);
607 if (ublk_support_user_copy(ubq
))
611 * no zero copy, we delay copy WRITE request data into ublksrv
612 * context and the big benefit is that pinning pages in current
613 * context is pretty fast, see ublk_pin_user_pages
615 if (ublk_need_map_req(req
)) {
616 struct iov_iter iter
;
618 const int dir
= ITER_DEST
;
620 import_single_range(dir
, u64_to_user_ptr(io
->addr
), rq_bytes
,
623 return ublk_copy_user_pages(req
, 0, &iter
, dir
);
628 static int ublk_unmap_io(const struct ublk_queue
*ubq
,
629 const struct request
*req
,
632 const unsigned int rq_bytes
= blk_rq_bytes(req
);
634 if (ublk_support_user_copy(ubq
))
637 if (ublk_need_unmap_req(req
)) {
638 struct iov_iter iter
;
640 const int dir
= ITER_SOURCE
;
642 WARN_ON_ONCE(io
->res
> rq_bytes
);
644 import_single_range(dir
, u64_to_user_ptr(io
->addr
), io
->res
,
646 return ublk_copy_user_pages(req
, 0, &iter
, dir
);
651 static inline unsigned int ublk_req_build_flags(struct request
*req
)
655 if (req
->cmd_flags
& REQ_FAILFAST_DEV
)
656 flags
|= UBLK_IO_F_FAILFAST_DEV
;
658 if (req
->cmd_flags
& REQ_FAILFAST_TRANSPORT
)
659 flags
|= UBLK_IO_F_FAILFAST_TRANSPORT
;
661 if (req
->cmd_flags
& REQ_FAILFAST_DRIVER
)
662 flags
|= UBLK_IO_F_FAILFAST_DRIVER
;
664 if (req
->cmd_flags
& REQ_META
)
665 flags
|= UBLK_IO_F_META
;
667 if (req
->cmd_flags
& REQ_FUA
)
668 flags
|= UBLK_IO_F_FUA
;
670 if (req
->cmd_flags
& REQ_NOUNMAP
)
671 flags
|= UBLK_IO_F_NOUNMAP
;
673 if (req
->cmd_flags
& REQ_SWAP
)
674 flags
|= UBLK_IO_F_SWAP
;
679 static blk_status_t
ublk_setup_iod(struct ublk_queue
*ubq
, struct request
*req
)
681 struct ublksrv_io_desc
*iod
= ublk_get_iod(ubq
, req
->tag
);
682 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
685 switch (req_op(req
)) {
687 ublk_op
= UBLK_IO_OP_READ
;
690 ublk_op
= UBLK_IO_OP_WRITE
;
693 ublk_op
= UBLK_IO_OP_FLUSH
;
696 ublk_op
= UBLK_IO_OP_DISCARD
;
698 case REQ_OP_WRITE_ZEROES
:
699 ublk_op
= UBLK_IO_OP_WRITE_ZEROES
;
702 return BLK_STS_IOERR
;
705 /* need to translate since kernel may change */
706 iod
->op_flags
= ublk_op
| ublk_req_build_flags(req
);
707 iod
->nr_sectors
= blk_rq_sectors(req
);
708 iod
->start_sector
= blk_rq_pos(req
);
709 iod
->addr
= io
->addr
;
714 static inline struct ublk_uring_cmd_pdu
*ublk_get_uring_cmd_pdu(
715 struct io_uring_cmd
*ioucmd
)
717 return (struct ublk_uring_cmd_pdu
*)&ioucmd
->pdu
;
720 static inline bool ubq_daemon_is_dying(struct ublk_queue
*ubq
)
722 return ubq
->ubq_daemon
->flags
& PF_EXITING
;
725 /* todo: handle partial completion */
726 static inline void __ublk_complete_rq(struct request
*req
)
728 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
729 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
730 unsigned int unmapped_bytes
;
731 blk_status_t res
= BLK_STS_OK
;
733 /* called from ublk_abort_queue() code path */
734 if (io
->flags
& UBLK_IO_FLAG_ABORTED
) {
739 /* failed read IO if nothing is read */
740 if (!io
->res
&& req_op(req
) == REQ_OP_READ
)
744 res
= errno_to_blk_status(io
->res
);
749 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
752 * Both the two needn't unmap.
754 if (req_op(req
) != REQ_OP_READ
&& req_op(req
) != REQ_OP_WRITE
)
757 /* for READ request, writing data in iod->addr to rq buffers */
758 unmapped_bytes
= ublk_unmap_io(ubq
, req
, io
);
761 * Extremely impossible since we got data filled in just before
763 * Re-read simply for this unlikely case.
765 if (unlikely(unmapped_bytes
< io
->res
))
766 io
->res
= unmapped_bytes
;
768 if (blk_update_request(req
, BLK_STS_OK
, io
->res
))
769 blk_mq_requeue_request(req
, true);
771 __blk_mq_end_request(req
, BLK_STS_OK
);
775 blk_mq_end_request(req
, res
);
778 static void ublk_complete_rq(struct kref
*ref
)
780 struct ublk_rq_data
*data
= container_of(ref
, struct ublk_rq_data
,
782 struct request
*req
= blk_mq_rq_from_pdu(data
);
784 __ublk_complete_rq(req
);
788 * Since __ublk_rq_task_work always fails requests immediately during
789 * exiting, __ublk_fail_req() is only called from abort context during
790 * exiting. So lock is unnecessary.
792 * Also aborting may not be started yet, keep in mind that one failed
793 * request may be issued by block layer again.
795 static void __ublk_fail_req(struct ublk_queue
*ubq
, struct ublk_io
*io
,
798 WARN_ON_ONCE(io
->flags
& UBLK_IO_FLAG_ACTIVE
);
800 if (!(io
->flags
& UBLK_IO_FLAG_ABORTED
)) {
801 io
->flags
|= UBLK_IO_FLAG_ABORTED
;
802 if (ublk_queue_can_use_recovery_reissue(ubq
))
803 blk_mq_requeue_request(req
, false);
805 ublk_put_req_ref(ubq
, req
);
809 static void ubq_complete_io_cmd(struct ublk_io
*io
, int res
,
810 unsigned issue_flags
)
812 /* mark this cmd owned by ublksrv */
813 io
->flags
|= UBLK_IO_FLAG_OWNED_BY_SRV
;
816 * clear ACTIVE since we are done with this sqe/cmd slot
817 * We can only accept io cmd in case of being not active.
819 io
->flags
&= ~UBLK_IO_FLAG_ACTIVE
;
821 /* tell ublksrv one io request is coming */
822 io_uring_cmd_done(io
->cmd
, res
, 0, issue_flags
);
825 #define UBLK_REQUEUE_DELAY_MS 3
827 static inline void __ublk_abort_rq(struct ublk_queue
*ubq
,
830 /* We cannot process this rq so just requeue it. */
831 if (ublk_queue_can_use_recovery(ubq
))
832 blk_mq_requeue_request(rq
, false);
834 blk_mq_end_request(rq
, BLK_STS_IOERR
);
836 mod_delayed_work(system_wq
, &ubq
->dev
->monitor_work
, 0);
839 static inline void __ublk_rq_task_work(struct request
*req
,
840 unsigned issue_flags
)
842 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
844 struct ublk_io
*io
= &ubq
->ios
[tag
];
845 unsigned int mapped_bytes
;
847 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
848 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
849 ublk_get_iod(ubq
, req
->tag
)->addr
);
852 * Task is exiting if either:
854 * (1) current != ubq_daemon.
855 * io_uring_cmd_complete_in_task() tries to run task_work
856 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
858 * (2) current->flags & PF_EXITING.
860 if (unlikely(current
!= ubq
->ubq_daemon
|| current
->flags
& PF_EXITING
)) {
861 __ublk_abort_rq(ubq
, req
);
865 if (ublk_need_get_data(ubq
) && ublk_need_map_req(req
)) {
867 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
868 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
871 if (!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
)) {
872 io
->flags
|= UBLK_IO_FLAG_NEED_GET_DATA
;
873 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
874 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
,
875 req
->tag
, io
->flags
);
876 ubq_complete_io_cmd(io
, UBLK_IO_RES_NEED_GET_DATA
, issue_flags
);
880 * We have handled UBLK_IO_NEED_GET_DATA command,
881 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
884 io
->flags
&= ~UBLK_IO_FLAG_NEED_GET_DATA
;
885 /* update iod->addr because ublksrv may have passed a new io buffer */
886 ublk_get_iod(ubq
, req
->tag
)->addr
= io
->addr
;
887 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
888 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
889 ublk_get_iod(ubq
, req
->tag
)->addr
);
892 mapped_bytes
= ublk_map_io(ubq
, req
, io
);
894 /* partially mapped, update io descriptor */
895 if (unlikely(mapped_bytes
!= blk_rq_bytes(req
))) {
897 * Nothing mapped, retry until we succeed.
899 * We may never succeed in mapping any bytes here because
900 * of OOM. TODO: reserve one buffer with single page pinned
901 * for providing forward progress guarantee.
903 if (unlikely(!mapped_bytes
)) {
904 blk_mq_requeue_request(req
, false);
905 blk_mq_delay_kick_requeue_list(req
->q
,
906 UBLK_REQUEUE_DELAY_MS
);
910 ublk_get_iod(ubq
, req
->tag
)->nr_sectors
=
914 ublk_init_req_ref(ubq
, req
);
915 ubq_complete_io_cmd(io
, UBLK_IO_RES_OK
, issue_flags
);
918 static inline void ublk_forward_io_cmds(struct ublk_queue
*ubq
,
919 unsigned issue_flags
)
921 struct llist_node
*io_cmds
= llist_del_all(&ubq
->io_cmds
);
922 struct ublk_rq_data
*data
, *tmp
;
924 io_cmds
= llist_reverse_order(io_cmds
);
925 llist_for_each_entry_safe(data
, tmp
, io_cmds
, node
)
926 __ublk_rq_task_work(blk_mq_rq_from_pdu(data
), issue_flags
);
929 static inline void ublk_abort_io_cmds(struct ublk_queue
*ubq
)
931 struct llist_node
*io_cmds
= llist_del_all(&ubq
->io_cmds
);
932 struct ublk_rq_data
*data
, *tmp
;
934 llist_for_each_entry_safe(data
, tmp
, io_cmds
, node
)
935 __ublk_abort_rq(ubq
, blk_mq_rq_from_pdu(data
));
938 static void ublk_rq_task_work_cb(struct io_uring_cmd
*cmd
, unsigned issue_flags
)
940 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
941 struct ublk_queue
*ubq
= pdu
->ubq
;
943 ublk_forward_io_cmds(ubq
, issue_flags
);
946 static void ublk_queue_cmd(struct ublk_queue
*ubq
, struct request
*rq
)
948 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(rq
);
951 if (!llist_add(&data
->node
, &ubq
->io_cmds
))
954 io
= &ubq
->ios
[rq
->tag
];
956 * If the check pass, we know that this is a re-issued request aborted
957 * previously in monitor_work because the ubq_daemon(cmd's task) is
958 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
959 * because this ioucmd's io_uring context may be freed now if no inflight
960 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
962 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
963 * the tag). Then the request is re-started(allocating the tag) and we are here.
964 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
965 * guarantees that here is a re-issued request aborted previously.
967 if (unlikely(io
->flags
& UBLK_IO_FLAG_ABORTED
)) {
968 ublk_abort_io_cmds(ubq
);
970 struct io_uring_cmd
*cmd
= io
->cmd
;
971 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
974 io_uring_cmd_complete_in_task(cmd
, ublk_rq_task_work_cb
);
978 static enum blk_eh_timer_return
ublk_timeout(struct request
*rq
)
980 struct ublk_queue
*ubq
= rq
->mq_hctx
->driver_data
;
982 if (ubq
->flags
& UBLK_F_UNPRIVILEGED_DEV
) {
984 send_sig(SIGKILL
, ubq
->ubq_daemon
, 0);
991 return BLK_EH_RESET_TIMER
;
994 static blk_status_t
ublk_queue_rq(struct blk_mq_hw_ctx
*hctx
,
995 const struct blk_mq_queue_data
*bd
)
997 struct ublk_queue
*ubq
= hctx
->driver_data
;
998 struct request
*rq
= bd
->rq
;
1001 /* fill iod to slot in io cmd buffer */
1002 res
= ublk_setup_iod(ubq
, rq
);
1003 if (unlikely(res
!= BLK_STS_OK
))
1004 return BLK_STS_IOERR
;
1006 /* With recovery feature enabled, force_abort is set in
1007 * ublk_stop_dev() before calling del_gendisk(). We have to
1008 * abort all requeued and new rqs here to let del_gendisk()
1009 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1010 * to avoid UAF on io_uring ctx.
1012 * Note: force_abort is guaranteed to be seen because it is set
1013 * before request queue is unqiuesced.
1015 if (ublk_queue_can_use_recovery(ubq
) && unlikely(ubq
->force_abort
))
1016 return BLK_STS_IOERR
;
1018 blk_mq_start_request(bd
->rq
);
1020 if (unlikely(ubq_daemon_is_dying(ubq
))) {
1021 __ublk_abort_rq(ubq
, rq
);
1025 ublk_queue_cmd(ubq
, rq
);
1030 static int ublk_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *driver_data
,
1031 unsigned int hctx_idx
)
1033 struct ublk_device
*ub
= driver_data
;
1034 struct ublk_queue
*ubq
= ublk_get_queue(ub
, hctx
->queue_num
);
1036 hctx
->driver_data
= ubq
;
1040 static const struct blk_mq_ops ublk_mq_ops
= {
1041 .queue_rq
= ublk_queue_rq
,
1042 .init_hctx
= ublk_init_hctx
,
1043 .timeout
= ublk_timeout
,
1046 static int ublk_ch_open(struct inode
*inode
, struct file
*filp
)
1048 struct ublk_device
*ub
= container_of(inode
->i_cdev
,
1049 struct ublk_device
, cdev
);
1051 if (test_and_set_bit(UB_STATE_OPEN
, &ub
->state
))
1053 filp
->private_data
= ub
;
1057 static int ublk_ch_release(struct inode
*inode
, struct file
*filp
)
1059 struct ublk_device
*ub
= filp
->private_data
;
1061 clear_bit(UB_STATE_OPEN
, &ub
->state
);
1065 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1066 static int ublk_ch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1068 struct ublk_device
*ub
= filp
->private_data
;
1069 size_t sz
= vma
->vm_end
- vma
->vm_start
;
1070 unsigned max_sz
= UBLK_MAX_QUEUE_DEPTH
* sizeof(struct ublksrv_io_desc
);
1071 unsigned long pfn
, end
, phys_off
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1074 spin_lock(&ub
->mm_lock
);
1076 ub
->mm
= current
->mm
;
1077 if (current
->mm
!= ub
->mm
)
1079 spin_unlock(&ub
->mm_lock
);
1084 if (vma
->vm_flags
& VM_WRITE
)
1087 end
= UBLKSRV_CMD_BUF_OFFSET
+ ub
->dev_info
.nr_hw_queues
* max_sz
;
1088 if (phys_off
< UBLKSRV_CMD_BUF_OFFSET
|| phys_off
>= end
)
1091 q_id
= (phys_off
- UBLKSRV_CMD_BUF_OFFSET
) / max_sz
;
1092 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1093 __func__
, q_id
, current
->pid
, vma
->vm_start
,
1094 phys_off
, (unsigned long)sz
);
1096 if (sz
!= ublk_queue_cmd_buf_size(ub
, q_id
))
1099 pfn
= virt_to_phys(ublk_queue_cmd_buf(ub
, q_id
)) >> PAGE_SHIFT
;
1100 return remap_pfn_range(vma
, vma
->vm_start
, pfn
, sz
, vma
->vm_page_prot
);
1103 static void ublk_commit_completion(struct ublk_device
*ub
,
1104 const struct ublksrv_io_cmd
*ub_cmd
)
1106 u32 qid
= ub_cmd
->q_id
, tag
= ub_cmd
->tag
;
1107 struct ublk_queue
*ubq
= ublk_get_queue(ub
, qid
);
1108 struct ublk_io
*io
= &ubq
->ios
[tag
];
1109 struct request
*req
;
1111 /* now this cmd slot is owned by nbd driver */
1112 io
->flags
&= ~UBLK_IO_FLAG_OWNED_BY_SRV
;
1113 io
->res
= ub_cmd
->result
;
1115 /* find the io request and complete */
1116 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[qid
], tag
);
1118 if (req
&& likely(!blk_should_fake_timeout(req
->q
)))
1119 ublk_put_req_ref(ubq
, req
);
1123 * When ->ubq_daemon is exiting, either new request is ended immediately,
1124 * or any queued io command is drained, so it is safe to abort queue
1127 static void ublk_abort_queue(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1131 if (!ublk_get_device(ub
))
1134 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1135 struct ublk_io
*io
= &ubq
->ios
[i
];
1137 if (!(io
->flags
& UBLK_IO_FLAG_ACTIVE
)) {
1141 * Either we fail the request or ublk_rq_task_work_fn
1144 rq
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ubq
->q_id
], i
);
1146 __ublk_fail_req(ubq
, io
, rq
);
1149 ublk_put_device(ub
);
1152 static void ublk_daemon_monitor_work(struct work_struct
*work
)
1154 struct ublk_device
*ub
=
1155 container_of(work
, struct ublk_device
, monitor_work
.work
);
1158 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++) {
1159 struct ublk_queue
*ubq
= ublk_get_queue(ub
, i
);
1161 if (ubq_daemon_is_dying(ubq
)) {
1162 if (ublk_queue_can_use_recovery(ubq
))
1163 schedule_work(&ub
->quiesce_work
);
1165 schedule_work(&ub
->stop_work
);
1167 /* abort queue is for making forward progress */
1168 ublk_abort_queue(ub
, ubq
);
1173 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1174 * after ublk_remove() or __ublk_quiesce_dev() is started.
1176 * No need ub->mutex, monitor work are canceled after state is marked
1177 * as not LIVE, so new state is observed reliably.
1179 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
)
1180 schedule_delayed_work(&ub
->monitor_work
,
1181 UBLK_DAEMON_MONITOR_PERIOD
);
1184 static inline bool ublk_queue_ready(struct ublk_queue
*ubq
)
1186 return ubq
->nr_io_ready
== ubq
->q_depth
;
1189 static void ublk_cmd_cancel_cb(struct io_uring_cmd
*cmd
, unsigned issue_flags
)
1191 io_uring_cmd_done(cmd
, UBLK_IO_RES_ABORT
, 0, issue_flags
);
1194 static void ublk_cancel_queue(struct ublk_queue
*ubq
)
1198 if (!ublk_queue_ready(ubq
))
1201 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1202 struct ublk_io
*io
= &ubq
->ios
[i
];
1204 if (io
->flags
& UBLK_IO_FLAG_ACTIVE
)
1205 io_uring_cmd_complete_in_task(io
->cmd
,
1206 ublk_cmd_cancel_cb
);
1209 /* all io commands are canceled */
1210 ubq
->nr_io_ready
= 0;
1213 /* Cancel all pending commands, must be called after del_gendisk() returns */
1214 static void ublk_cancel_dev(struct ublk_device
*ub
)
1218 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1219 ublk_cancel_queue(ublk_get_queue(ub
, i
));
1222 static bool ublk_check_inflight_rq(struct request
*rq
, void *data
)
1226 if (blk_mq_request_started(rq
)) {
1233 static void ublk_wait_tagset_rqs_idle(struct ublk_device
*ub
)
1237 WARN_ON_ONCE(!blk_queue_quiesced(ub
->ub_disk
->queue
));
1240 blk_mq_tagset_busy_iter(&ub
->tag_set
,
1241 ublk_check_inflight_rq
, &idle
);
1244 msleep(UBLK_REQUEUE_DELAY_MS
);
1248 static void __ublk_quiesce_dev(struct ublk_device
*ub
)
1250 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1251 __func__
, ub
->dev_info
.dev_id
,
1252 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1253 "LIVE" : "QUIESCED");
1254 blk_mq_quiesce_queue(ub
->ub_disk
->queue
);
1255 ublk_wait_tagset_rqs_idle(ub
);
1256 ub
->dev_info
.state
= UBLK_S_DEV_QUIESCED
;
1257 ublk_cancel_dev(ub
);
1258 /* we are going to release task_struct of ubq_daemon and resets
1259 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1260 * Besides, monitor_work is not necessary in QUIESCED state since we have
1261 * already scheduled quiesce_work and quiesced all ubqs.
1263 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1264 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1266 cancel_delayed_work_sync(&ub
->monitor_work
);
1269 static void ublk_quiesce_work_fn(struct work_struct
*work
)
1271 struct ublk_device
*ub
=
1272 container_of(work
, struct ublk_device
, quiesce_work
);
1274 mutex_lock(&ub
->mutex
);
1275 if (ub
->dev_info
.state
!= UBLK_S_DEV_LIVE
)
1277 __ublk_quiesce_dev(ub
);
1279 mutex_unlock(&ub
->mutex
);
1282 static void ublk_unquiesce_dev(struct ublk_device
*ub
)
1286 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1287 __func__
, ub
->dev_info
.dev_id
,
1288 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1289 "LIVE" : "QUIESCED");
1290 /* quiesce_work has run. We let requeued rqs be aborted
1291 * before running fallback_wq. "force_abort" must be seen
1292 * after request queue is unqiuesced. Then del_gendisk()
1295 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1296 ublk_get_queue(ub
, i
)->force_abort
= true;
1298 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
1299 /* We may have requeued some rqs in ublk_quiesce_queue() */
1300 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
1303 static void ublk_stop_dev(struct ublk_device
*ub
)
1305 mutex_lock(&ub
->mutex
);
1306 if (ub
->dev_info
.state
== UBLK_S_DEV_DEAD
)
1308 if (ublk_can_use_recovery(ub
)) {
1309 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
)
1310 __ublk_quiesce_dev(ub
);
1311 ublk_unquiesce_dev(ub
);
1313 del_gendisk(ub
->ub_disk
);
1314 ub
->dev_info
.state
= UBLK_S_DEV_DEAD
;
1315 ub
->dev_info
.ublksrv_pid
= -1;
1316 put_disk(ub
->ub_disk
);
1319 ublk_cancel_dev(ub
);
1320 mutex_unlock(&ub
->mutex
);
1321 cancel_delayed_work_sync(&ub
->monitor_work
);
1324 /* device can only be started after all IOs are ready */
1325 static void ublk_mark_io_ready(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1327 mutex_lock(&ub
->mutex
);
1329 if (ublk_queue_ready(ubq
)) {
1330 ubq
->ubq_daemon
= current
;
1331 get_task_struct(ubq
->ubq_daemon
);
1332 ub
->nr_queues_ready
++;
1334 if (capable(CAP_SYS_ADMIN
))
1335 ub
->nr_privileged_daemon
++;
1337 if (ub
->nr_queues_ready
== ub
->dev_info
.nr_hw_queues
)
1338 complete_all(&ub
->completion
);
1339 mutex_unlock(&ub
->mutex
);
1342 static void ublk_handle_need_get_data(struct ublk_device
*ub
, int q_id
,
1345 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1346 struct request
*req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[q_id
], tag
);
1348 ublk_queue_cmd(ubq
, req
);
1351 static inline int ublk_check_cmd_op(u32 cmd_op
)
1353 u32 ioc_type
= _IOC_TYPE(cmd_op
);
1355 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES
) && ioc_type
!= 'u')
1358 if (ioc_type
!= 'u' && ioc_type
!= 0)
1364 static inline void ublk_fill_io_cmd(struct ublk_io
*io
,
1365 struct io_uring_cmd
*cmd
, unsigned long buf_addr
)
1368 io
->flags
|= UBLK_IO_FLAG_ACTIVE
;
1369 io
->addr
= buf_addr
;
1372 static int __ublk_ch_uring_cmd(struct io_uring_cmd
*cmd
,
1373 unsigned int issue_flags
,
1374 const struct ublksrv_io_cmd
*ub_cmd
)
1376 struct ublk_device
*ub
= cmd
->file
->private_data
;
1377 struct ublk_queue
*ubq
;
1379 u32 cmd_op
= cmd
->cmd_op
;
1380 unsigned tag
= ub_cmd
->tag
;
1382 struct request
*req
;
1384 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1385 __func__
, cmd
->cmd_op
, ub_cmd
->q_id
, tag
,
1388 if (ub_cmd
->q_id
>= ub
->dev_info
.nr_hw_queues
)
1391 ubq
= ublk_get_queue(ub
, ub_cmd
->q_id
);
1392 if (!ubq
|| ub_cmd
->q_id
!= ubq
->q_id
)
1395 if (ubq
->ubq_daemon
&& ubq
->ubq_daemon
!= current
)
1398 if (tag
>= ubq
->q_depth
)
1401 io
= &ubq
->ios
[tag
];
1403 /* there is pending io cmd, something must be wrong */
1404 if (io
->flags
& UBLK_IO_FLAG_ACTIVE
) {
1410 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1411 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1413 if ((!!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
))
1414 ^ (_IOC_NR(cmd_op
) == UBLK_IO_NEED_GET_DATA
))
1417 if (ublk_support_user_copy(ubq
) && ub_cmd
->addr
) {
1422 ret
= ublk_check_cmd_op(cmd_op
);
1427 switch (_IOC_NR(cmd_op
)) {
1428 case UBLK_IO_FETCH_REQ
:
1429 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1430 if (ublk_queue_ready(ubq
)) {
1435 * The io is being handled by server, so COMMIT_RQ is expected
1436 * instead of FETCH_REQ
1438 if (io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
)
1441 if (!ublk_support_user_copy(ubq
)) {
1443 * FETCH_RQ has to provide IO buffer if NEED GET
1444 * DATA is not enabled
1446 if (!ub_cmd
->addr
&& !ublk_need_get_data(ubq
))
1450 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1451 ublk_mark_io_ready(ub
, ubq
);
1453 case UBLK_IO_COMMIT_AND_FETCH_REQ
:
1454 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ub_cmd
->q_id
], tag
);
1456 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1459 if (!ublk_support_user_copy(ubq
)) {
1461 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1462 * NEED GET DATA is not enabled or it is Read IO.
1464 if (!ub_cmd
->addr
&& (!ublk_need_get_data(ubq
) ||
1465 req_op(req
) == REQ_OP_READ
))
1468 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1469 ublk_commit_completion(ub
, ub_cmd
);
1471 case UBLK_IO_NEED_GET_DATA
:
1472 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1474 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1475 ublk_handle_need_get_data(ub
, ub_cmd
->q_id
, ub_cmd
->tag
);
1480 return -EIOCBQUEUED
;
1483 io_uring_cmd_done(cmd
, ret
, 0, issue_flags
);
1484 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1485 __func__
, cmd_op
, tag
, ret
, io
->flags
);
1486 return -EIOCBQUEUED
;
1489 static inline struct request
*__ublk_check_and_get_req(struct ublk_device
*ub
,
1490 struct ublk_queue
*ubq
, int tag
, size_t offset
)
1492 struct request
*req
;
1494 if (!ublk_need_req_ref(ubq
))
1497 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ubq
->q_id
], tag
);
1501 if (!ublk_get_req_ref(ubq
, req
))
1504 if (unlikely(!blk_mq_request_started(req
) || req
->tag
!= tag
))
1507 if (!ublk_rq_has_data(req
))
1510 if (offset
> blk_rq_bytes(req
))
1515 ublk_put_req_ref(ubq
, req
);
1519 static int ublk_ch_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
1522 * Not necessary for async retry, but let's keep it simple and always
1523 * copy the values to avoid any potential reuse.
1525 const struct ublksrv_io_cmd
*ub_src
= io_uring_sqe_cmd(cmd
->sqe
);
1526 const struct ublksrv_io_cmd ub_cmd
= {
1527 .q_id
= READ_ONCE(ub_src
->q_id
),
1528 .tag
= READ_ONCE(ub_src
->tag
),
1529 .result
= READ_ONCE(ub_src
->result
),
1530 .addr
= READ_ONCE(ub_src
->addr
)
1533 return __ublk_ch_uring_cmd(cmd
, issue_flags
, &ub_cmd
);
1536 static inline bool ublk_check_ubuf_dir(const struct request
*req
,
1539 /* copy ubuf to request pages */
1540 if (req_op(req
) == REQ_OP_READ
&& ubuf_dir
== ITER_SOURCE
)
1543 /* copy request pages to ubuf */
1544 if (req_op(req
) == REQ_OP_WRITE
&& ubuf_dir
== ITER_DEST
)
1550 static struct request
*ublk_check_and_get_req(struct kiocb
*iocb
,
1551 struct iov_iter
*iter
, size_t *off
, int dir
)
1553 struct ublk_device
*ub
= iocb
->ki_filp
->private_data
;
1554 struct ublk_queue
*ubq
;
1555 struct request
*req
;
1560 return ERR_PTR(-EACCES
);
1562 if (!user_backed_iter(iter
))
1563 return ERR_PTR(-EACCES
);
1565 if (ub
->dev_info
.state
== UBLK_S_DEV_DEAD
)
1566 return ERR_PTR(-EACCES
);
1568 tag
= ublk_pos_to_tag(iocb
->ki_pos
);
1569 q_id
= ublk_pos_to_hwq(iocb
->ki_pos
);
1570 buf_off
= ublk_pos_to_buf_off(iocb
->ki_pos
);
1572 if (q_id
>= ub
->dev_info
.nr_hw_queues
)
1573 return ERR_PTR(-EINVAL
);
1575 ubq
= ublk_get_queue(ub
, q_id
);
1577 return ERR_PTR(-EINVAL
);
1579 if (tag
>= ubq
->q_depth
)
1580 return ERR_PTR(-EINVAL
);
1582 req
= __ublk_check_and_get_req(ub
, ubq
, tag
, buf_off
);
1584 return ERR_PTR(-EINVAL
);
1586 if (!req
->mq_hctx
|| !req
->mq_hctx
->driver_data
)
1589 if (!ublk_check_ubuf_dir(req
, dir
))
1595 ublk_put_req_ref(ubq
, req
);
1596 return ERR_PTR(-EACCES
);
1599 static ssize_t
ublk_ch_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1601 struct ublk_queue
*ubq
;
1602 struct request
*req
;
1606 req
= ublk_check_and_get_req(iocb
, to
, &buf_off
, ITER_DEST
);
1608 return PTR_ERR(req
);
1610 ret
= ublk_copy_user_pages(req
, buf_off
, to
, ITER_DEST
);
1611 ubq
= req
->mq_hctx
->driver_data
;
1612 ublk_put_req_ref(ubq
, req
);
1617 static ssize_t
ublk_ch_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1619 struct ublk_queue
*ubq
;
1620 struct request
*req
;
1624 req
= ublk_check_and_get_req(iocb
, from
, &buf_off
, ITER_SOURCE
);
1626 return PTR_ERR(req
);
1628 ret
= ublk_copy_user_pages(req
, buf_off
, from
, ITER_SOURCE
);
1629 ubq
= req
->mq_hctx
->driver_data
;
1630 ublk_put_req_ref(ubq
, req
);
1635 static const struct file_operations ublk_ch_fops
= {
1636 .owner
= THIS_MODULE
,
1637 .open
= ublk_ch_open
,
1638 .release
= ublk_ch_release
,
1639 .llseek
= no_llseek
,
1640 .read_iter
= ublk_ch_read_iter
,
1641 .write_iter
= ublk_ch_write_iter
,
1642 .uring_cmd
= ublk_ch_uring_cmd
,
1643 .mmap
= ublk_ch_mmap
,
1646 static void ublk_deinit_queue(struct ublk_device
*ub
, int q_id
)
1648 int size
= ublk_queue_cmd_buf_size(ub
, q_id
);
1649 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1651 if (ubq
->ubq_daemon
)
1652 put_task_struct(ubq
->ubq_daemon
);
1653 if (ubq
->io_cmd_buf
)
1654 free_pages((unsigned long)ubq
->io_cmd_buf
, get_order(size
));
1657 static int ublk_init_queue(struct ublk_device
*ub
, int q_id
)
1659 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1660 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
;
1664 ubq
->flags
= ub
->dev_info
.flags
;
1666 ubq
->q_depth
= ub
->dev_info
.queue_depth
;
1667 size
= ublk_queue_cmd_buf_size(ub
, q_id
);
1669 ptr
= (void *) __get_free_pages(gfp_flags
, get_order(size
));
1673 ubq
->io_cmd_buf
= ptr
;
1678 static void ublk_deinit_queues(struct ublk_device
*ub
)
1680 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
1686 for (i
= 0; i
< nr_queues
; i
++)
1687 ublk_deinit_queue(ub
, i
);
1688 kfree(ub
->__queues
);
1691 static int ublk_init_queues(struct ublk_device
*ub
)
1693 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
1694 int depth
= ub
->dev_info
.queue_depth
;
1695 int ubq_size
= sizeof(struct ublk_queue
) + depth
* sizeof(struct ublk_io
);
1696 int i
, ret
= -ENOMEM
;
1698 ub
->queue_size
= ubq_size
;
1699 ub
->__queues
= kcalloc(nr_queues
, ubq_size
, GFP_KERNEL
);
1703 for (i
= 0; i
< nr_queues
; i
++) {
1704 if (ublk_init_queue(ub
, i
))
1708 init_completion(&ub
->completion
);
1712 ublk_deinit_queues(ub
);
1716 static int ublk_alloc_dev_number(struct ublk_device
*ub
, int idx
)
1721 spin_lock(&ublk_idr_lock
);
1722 /* allocate id, if @id >= 0, we're requesting that specific id */
1724 err
= idr_alloc(&ublk_index_idr
, ub
, i
, i
+ 1, GFP_NOWAIT
);
1728 err
= idr_alloc(&ublk_index_idr
, ub
, 0, 0, GFP_NOWAIT
);
1730 spin_unlock(&ublk_idr_lock
);
1733 ub
->ub_number
= err
;
1738 static void ublk_free_dev_number(struct ublk_device
*ub
)
1740 spin_lock(&ublk_idr_lock
);
1741 idr_remove(&ublk_index_idr
, ub
->ub_number
);
1742 wake_up_all(&ublk_idr_wq
);
1743 spin_unlock(&ublk_idr_lock
);
1746 static void ublk_cdev_rel(struct device
*dev
)
1748 struct ublk_device
*ub
= container_of(dev
, struct ublk_device
, cdev_dev
);
1750 blk_mq_free_tag_set(&ub
->tag_set
);
1751 ublk_deinit_queues(ub
);
1752 ublk_free_dev_number(ub
);
1753 mutex_destroy(&ub
->mutex
);
1757 static int ublk_add_chdev(struct ublk_device
*ub
)
1759 struct device
*dev
= &ub
->cdev_dev
;
1760 int minor
= ub
->ub_number
;
1763 dev
->parent
= ublk_misc
.this_device
;
1764 dev
->devt
= MKDEV(MAJOR(ublk_chr_devt
), minor
);
1765 dev
->class = &ublk_chr_class
;
1766 dev
->release
= ublk_cdev_rel
;
1767 device_initialize(dev
);
1769 ret
= dev_set_name(dev
, "ublkc%d", minor
);
1773 cdev_init(&ub
->cdev
, &ublk_ch_fops
);
1774 ret
= cdev_device_add(&ub
->cdev
, dev
);
1785 static void ublk_stop_work_fn(struct work_struct
*work
)
1787 struct ublk_device
*ub
=
1788 container_of(work
, struct ublk_device
, stop_work
);
1793 /* align max io buffer size with PAGE_SIZE */
1794 static void ublk_align_max_io_size(struct ublk_device
*ub
)
1796 unsigned int max_io_bytes
= ub
->dev_info
.max_io_buf_bytes
;
1798 ub
->dev_info
.max_io_buf_bytes
=
1799 round_down(max_io_bytes
, PAGE_SIZE
);
1802 static int ublk_add_tag_set(struct ublk_device
*ub
)
1804 ub
->tag_set
.ops
= &ublk_mq_ops
;
1805 ub
->tag_set
.nr_hw_queues
= ub
->dev_info
.nr_hw_queues
;
1806 ub
->tag_set
.queue_depth
= ub
->dev_info
.queue_depth
;
1807 ub
->tag_set
.numa_node
= NUMA_NO_NODE
;
1808 ub
->tag_set
.cmd_size
= sizeof(struct ublk_rq_data
);
1809 ub
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
1810 ub
->tag_set
.driver_data
= ub
;
1811 return blk_mq_alloc_tag_set(&ub
->tag_set
);
1814 static void ublk_remove(struct ublk_device
*ub
)
1817 cancel_work_sync(&ub
->stop_work
);
1818 cancel_work_sync(&ub
->quiesce_work
);
1819 cdev_device_del(&ub
->cdev
, &ub
->cdev_dev
);
1820 put_device(&ub
->cdev_dev
);
1824 static struct ublk_device
*ublk_get_device_from_id(int idx
)
1826 struct ublk_device
*ub
= NULL
;
1831 spin_lock(&ublk_idr_lock
);
1832 ub
= idr_find(&ublk_index_idr
, idx
);
1834 ub
= ublk_get_device(ub
);
1835 spin_unlock(&ublk_idr_lock
);
1840 static int ublk_ctrl_start_dev(struct ublk_device
*ub
, struct io_uring_cmd
*cmd
)
1842 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
1843 int ublksrv_pid
= (int)header
->data
[0];
1844 struct gendisk
*disk
;
1847 if (ublksrv_pid
<= 0)
1850 if (wait_for_completion_interruptible(&ub
->completion
) != 0)
1853 schedule_delayed_work(&ub
->monitor_work
, UBLK_DAEMON_MONITOR_PERIOD
);
1855 mutex_lock(&ub
->mutex
);
1856 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
||
1857 test_bit(UB_STATE_USED
, &ub
->state
)) {
1862 disk
= blk_mq_alloc_disk(&ub
->tag_set
, NULL
);
1864 ret
= PTR_ERR(disk
);
1867 sprintf(disk
->disk_name
, "ublkb%d", ub
->ub_number
);
1868 disk
->fops
= &ub_fops
;
1869 disk
->private_data
= ub
;
1871 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
1874 ret
= ublk_apply_params(ub
);
1878 /* don't probe partitions if any one ubq daemon is un-trusted */
1879 if (ub
->nr_privileged_daemon
!= ub
->nr_queues_ready
)
1880 set_bit(GD_SUPPRESS_PART_SCAN
, &disk
->state
);
1882 get_device(&ub
->cdev_dev
);
1883 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
1884 ret
= add_disk(disk
);
1887 * Has to drop the reference since ->free_disk won't be
1888 * called in case of add_disk failure.
1890 ub
->dev_info
.state
= UBLK_S_DEV_DEAD
;
1891 ublk_put_device(ub
);
1894 set_bit(UB_STATE_USED
, &ub
->state
);
1899 mutex_unlock(&ub
->mutex
);
1903 static int ublk_ctrl_get_queue_affinity(struct ublk_device
*ub
,
1904 struct io_uring_cmd
*cmd
)
1906 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
1907 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1908 cpumask_var_t cpumask
;
1909 unsigned long queue
;
1910 unsigned int retlen
;
1914 if (header
->len
* BITS_PER_BYTE
< nr_cpu_ids
)
1916 if (header
->len
& (sizeof(unsigned long)-1))
1921 queue
= header
->data
[0];
1922 if (queue
>= ub
->dev_info
.nr_hw_queues
)
1925 if (!zalloc_cpumask_var(&cpumask
, GFP_KERNEL
))
1928 for_each_possible_cpu(i
) {
1929 if (ub
->tag_set
.map
[HCTX_TYPE_DEFAULT
].mq_map
[i
] == queue
)
1930 cpumask_set_cpu(i
, cpumask
);
1934 retlen
= min_t(unsigned short, header
->len
, cpumask_size());
1935 if (copy_to_user(argp
, cpumask
, retlen
))
1936 goto out_free_cpumask
;
1937 if (retlen
!= header
->len
&&
1938 clear_user(argp
+ retlen
, header
->len
- retlen
))
1939 goto out_free_cpumask
;
1943 free_cpumask_var(cpumask
);
1947 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info
*info
)
1949 pr_devel("%s: dev id %d flags %llx\n", __func__
,
1950 info
->dev_id
, info
->flags
);
1951 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1952 info
->nr_hw_queues
, info
->queue_depth
);
1955 static int ublk_ctrl_add_dev(struct io_uring_cmd
*cmd
)
1957 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
1958 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
1959 struct ublksrv_ctrl_dev_info info
;
1960 struct ublk_device
*ub
;
1963 if (header
->len
< sizeof(info
) || !header
->addr
)
1965 if (header
->queue_id
!= (u16
)-1) {
1966 pr_warn("%s: queue_id is wrong %x\n",
1967 __func__
, header
->queue_id
);
1971 if (copy_from_user(&info
, argp
, sizeof(info
)))
1974 if (capable(CAP_SYS_ADMIN
))
1975 info
.flags
&= ~UBLK_F_UNPRIVILEGED_DEV
;
1976 else if (!(info
.flags
& UBLK_F_UNPRIVILEGED_DEV
))
1980 * unprivileged device can't be trusted, but RECOVERY and
1981 * RECOVERY_REISSUE still may hang error handling, so can't
1982 * support recovery features for unprivileged ublk now
1984 * TODO: provide forward progress for RECOVERY handler, so that
1985 * unprivileged device can benefit from it
1987 if (info
.flags
& UBLK_F_UNPRIVILEGED_DEV
)
1988 info
.flags
&= ~(UBLK_F_USER_RECOVERY_REISSUE
|
1989 UBLK_F_USER_RECOVERY
);
1991 /* the created device is always owned by current user */
1992 ublk_store_owner_uid_gid(&info
.owner_uid
, &info
.owner_gid
);
1994 if (header
->dev_id
!= info
.dev_id
) {
1995 pr_warn("%s: dev id not match %u %u\n",
1996 __func__
, header
->dev_id
, info
.dev_id
);
2000 ublk_dump_dev_info(&info
);
2002 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
2007 if (ublks_added
>= ublks_max
)
2011 ub
= kzalloc(sizeof(*ub
), GFP_KERNEL
);
2014 mutex_init(&ub
->mutex
);
2015 spin_lock_init(&ub
->mm_lock
);
2016 INIT_WORK(&ub
->quiesce_work
, ublk_quiesce_work_fn
);
2017 INIT_WORK(&ub
->stop_work
, ublk_stop_work_fn
);
2018 INIT_DELAYED_WORK(&ub
->monitor_work
, ublk_daemon_monitor_work
);
2020 ret
= ublk_alloc_dev_number(ub
, header
->dev_id
);
2024 memcpy(&ub
->dev_info
, &info
, sizeof(info
));
2026 /* update device id */
2027 ub
->dev_info
.dev_id
= ub
->ub_number
;
2030 * 64bit flags will be copied back to userspace as feature
2031 * negotiation result, so have to clear flags which driver
2032 * doesn't support yet, then userspace can get correct flags
2033 * (features) to handle.
2035 ub
->dev_info
.flags
&= UBLK_F_ALL
;
2037 ub
->dev_info
.flags
|= UBLK_F_CMD_IOCTL_ENCODE
|
2038 UBLK_F_URING_CMD_COMP_IN_TASK
;
2040 /* GET_DATA isn't needed any more with USER_COPY */
2041 if (ub
->dev_info
.flags
& UBLK_F_USER_COPY
)
2042 ub
->dev_info
.flags
&= ~UBLK_F_NEED_GET_DATA
;
2044 /* We are not ready to support zero copy */
2045 ub
->dev_info
.flags
&= ~UBLK_F_SUPPORT_ZERO_COPY
;
2047 ub
->dev_info
.nr_hw_queues
= min_t(unsigned int,
2048 ub
->dev_info
.nr_hw_queues
, nr_cpu_ids
);
2049 ublk_align_max_io_size(ub
);
2051 ret
= ublk_init_queues(ub
);
2053 goto out_free_dev_number
;
2055 ret
= ublk_add_tag_set(ub
);
2057 goto out_deinit_queues
;
2060 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(info
)))
2061 goto out_free_tag_set
;
2064 * Add the char dev so that ublksrv daemon can be setup.
2065 * ublk_add_chdev() will cleanup everything if it fails.
2067 ret
= ublk_add_chdev(ub
);
2071 blk_mq_free_tag_set(&ub
->tag_set
);
2073 ublk_deinit_queues(ub
);
2074 out_free_dev_number
:
2075 ublk_free_dev_number(ub
);
2077 mutex_destroy(&ub
->mutex
);
2080 mutex_unlock(&ublk_ctl_mutex
);
2084 static inline bool ublk_idr_freed(int id
)
2088 spin_lock(&ublk_idr_lock
);
2089 ptr
= idr_find(&ublk_index_idr
, id
);
2090 spin_unlock(&ublk_idr_lock
);
2095 static int ublk_ctrl_del_dev(struct ublk_device
**p_ub
)
2097 struct ublk_device
*ub
= *p_ub
;
2098 int idx
= ub
->ub_number
;
2101 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
2105 if (!test_bit(UB_STATE_DELETED
, &ub
->state
)) {
2107 set_bit(UB_STATE_DELETED
, &ub
->state
);
2110 /* Mark the reference as consumed */
2112 ublk_put_device(ub
);
2113 mutex_unlock(&ublk_ctl_mutex
);
2116 * Wait until the idr is removed, then it can be reused after
2117 * DEL_DEV command is returned.
2119 * If we returns because of user interrupt, future delete command
2122 * - the device number isn't freed, this device won't or needn't
2123 * be deleted again, since UB_STATE_DELETED is set, and device
2124 * will be released after the last reference is dropped
2126 * - the device number is freed already, we will not find this
2127 * device via ublk_get_device_from_id()
2129 wait_event_interruptible(ublk_idr_wq
, ublk_idr_freed(idx
));
2134 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd
*cmd
)
2136 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2138 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2139 __func__
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
,
2140 header
->data
[0], header
->addr
, header
->len
);
2143 static int ublk_ctrl_stop_dev(struct ublk_device
*ub
)
2146 cancel_work_sync(&ub
->stop_work
);
2147 cancel_work_sync(&ub
->quiesce_work
);
2152 static int ublk_ctrl_get_dev_info(struct ublk_device
*ub
,
2153 struct io_uring_cmd
*cmd
)
2155 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2156 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2158 if (header
->len
< sizeof(struct ublksrv_ctrl_dev_info
) || !header
->addr
)
2161 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(ub
->dev_info
)))
2167 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2168 static void ublk_ctrl_fill_params_devt(struct ublk_device
*ub
)
2170 ub
->params
.devt
.char_major
= MAJOR(ub
->cdev_dev
.devt
);
2171 ub
->params
.devt
.char_minor
= MINOR(ub
->cdev_dev
.devt
);
2174 ub
->params
.devt
.disk_major
= MAJOR(disk_devt(ub
->ub_disk
));
2175 ub
->params
.devt
.disk_minor
= MINOR(disk_devt(ub
->ub_disk
));
2177 ub
->params
.devt
.disk_major
= 0;
2178 ub
->params
.devt
.disk_minor
= 0;
2180 ub
->params
.types
|= UBLK_PARAM_TYPE_DEVT
;
2183 static int ublk_ctrl_get_params(struct ublk_device
*ub
,
2184 struct io_uring_cmd
*cmd
)
2186 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2187 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2188 struct ublk_params_header ph
;
2191 if (header
->len
<= sizeof(ph
) || !header
->addr
)
2194 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
2197 if (ph
.len
> header
->len
|| !ph
.len
)
2200 if (ph
.len
> sizeof(struct ublk_params
))
2201 ph
.len
= sizeof(struct ublk_params
);
2203 mutex_lock(&ub
->mutex
);
2204 ublk_ctrl_fill_params_devt(ub
);
2205 if (copy_to_user(argp
, &ub
->params
, ph
.len
))
2209 mutex_unlock(&ub
->mutex
);
2214 static int ublk_ctrl_set_params(struct ublk_device
*ub
,
2215 struct io_uring_cmd
*cmd
)
2217 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2218 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2219 struct ublk_params_header ph
;
2222 if (header
->len
<= sizeof(ph
) || !header
->addr
)
2225 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
2228 if (ph
.len
> header
->len
|| !ph
.len
|| !ph
.types
)
2231 if (ph
.len
> sizeof(struct ublk_params
))
2232 ph
.len
= sizeof(struct ublk_params
);
2234 /* parameters can only be changed when device isn't live */
2235 mutex_lock(&ub
->mutex
);
2236 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
) {
2238 } else if (copy_from_user(&ub
->params
, argp
, ph
.len
)) {
2241 /* clear all we don't support yet */
2242 ub
->params
.types
&= UBLK_PARAM_TYPE_ALL
;
2243 ret
= ublk_validate_params(ub
);
2245 ub
->params
.types
= 0;
2247 mutex_unlock(&ub
->mutex
);
2252 static void ublk_queue_reinit(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
2256 WARN_ON_ONCE(!(ubq
->ubq_daemon
&& ubq_daemon_is_dying(ubq
)));
2257 /* All old ioucmds have to be completed */
2258 WARN_ON_ONCE(ubq
->nr_io_ready
);
2259 /* old daemon is PF_EXITING, put it now */
2260 put_task_struct(ubq
->ubq_daemon
);
2261 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2262 ubq
->ubq_daemon
= NULL
;
2263 ubq
->timeout
= false;
2265 for (i
= 0; i
< ubq
->q_depth
; i
++) {
2266 struct ublk_io
*io
= &ubq
->ios
[i
];
2268 /* forget everything now and be ready for new FETCH_REQ */
2275 static int ublk_ctrl_start_recovery(struct ublk_device
*ub
,
2276 struct io_uring_cmd
*cmd
)
2278 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2282 mutex_lock(&ub
->mutex
);
2283 if (!ublk_can_use_recovery(ub
))
2286 * START_RECOVERY is only allowd after:
2288 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2289 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2292 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2293 * (a)has quiesced request queue
2294 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2295 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2296 * (d)has completed/camceled all ioucmds owned by ther dying process
2298 if (test_bit(UB_STATE_OPEN
, &ub
->state
) ||
2299 ub
->dev_info
.state
!= UBLK_S_DEV_QUIESCED
) {
2303 pr_devel("%s: start recovery for dev id %d.\n", __func__
, header
->dev_id
);
2304 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
2305 ublk_queue_reinit(ub
, ublk_get_queue(ub
, i
));
2306 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2308 ub
->nr_queues_ready
= 0;
2309 ub
->nr_privileged_daemon
= 0;
2310 init_completion(&ub
->completion
);
2313 mutex_unlock(&ub
->mutex
);
2317 static int ublk_ctrl_end_recovery(struct ublk_device
*ub
,
2318 struct io_uring_cmd
*cmd
)
2320 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2321 int ublksrv_pid
= (int)header
->data
[0];
2324 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2325 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2326 /* wait until new ubq_daemon sending all FETCH_REQ */
2327 if (wait_for_completion_interruptible(&ub
->completion
))
2330 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2331 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2333 mutex_lock(&ub
->mutex
);
2334 if (!ublk_can_use_recovery(ub
))
2337 if (ub
->dev_info
.state
!= UBLK_S_DEV_QUIESCED
) {
2341 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
2342 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2343 __func__
, ublksrv_pid
, header
->dev_id
);
2344 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
2345 pr_devel("%s: queue unquiesced, dev id %d.\n",
2346 __func__
, header
->dev_id
);
2347 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
2348 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
2349 schedule_delayed_work(&ub
->monitor_work
, UBLK_DAEMON_MONITOR_PERIOD
);
2352 mutex_unlock(&ub
->mutex
);
2356 static int ublk_ctrl_get_features(struct io_uring_cmd
*cmd
)
2358 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2359 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2360 u64 features
= UBLK_F_ALL
& ~UBLK_F_SUPPORT_ZERO_COPY
;
2362 if (header
->len
!= UBLK_FEATURES_LEN
|| !header
->addr
)
2365 if (copy_to_user(argp
, &features
, UBLK_FEATURES_LEN
))
2372 * All control commands are sent via /dev/ublk-control, so we have to check
2373 * the destination device's permission
2375 static int ublk_char_dev_permission(struct ublk_device
*ub
,
2376 const char *dev_path
, int mask
)
2382 err
= kern_path(dev_path
, LOOKUP_FOLLOW
, &path
);
2386 err
= vfs_getattr(&path
, &stat
, STATX_TYPE
, AT_STATX_SYNC_AS_STAT
);
2391 if (stat
.rdev
!= ub
->cdev_dev
.devt
|| !S_ISCHR(stat
.mode
))
2394 err
= inode_permission(&nop_mnt_idmap
,
2395 d_backing_inode(path
.dentry
), mask
);
2401 static int ublk_ctrl_uring_cmd_permission(struct ublk_device
*ub
,
2402 struct io_uring_cmd
*cmd
)
2404 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)io_uring_sqe_cmd(cmd
->sqe
);
2405 bool unprivileged
= ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
;
2406 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2407 char *dev_path
= NULL
;
2411 if (!unprivileged
) {
2412 if (!capable(CAP_SYS_ADMIN
))
2415 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2416 * char_dev_path in payload too, since userspace may not
2417 * know if the specified device is created as unprivileged
2420 if (_IOC_NR(cmd
->cmd_op
) != UBLK_CMD_GET_DEV_INFO2
)
2425 * User has to provide the char device path for unprivileged ublk
2427 * header->addr always points to the dev path buffer, and
2428 * header->dev_path_len records length of dev path buffer.
2430 if (!header
->dev_path_len
|| header
->dev_path_len
> PATH_MAX
)
2433 if (header
->len
< header
->dev_path_len
)
2436 dev_path
= kmalloc(header
->dev_path_len
+ 1, GFP_KERNEL
);
2441 if (copy_from_user(dev_path
, argp
, header
->dev_path_len
))
2443 dev_path
[header
->dev_path_len
] = 0;
2446 switch (_IOC_NR(cmd
->cmd_op
)) {
2447 case UBLK_CMD_GET_DEV_INFO
:
2448 case UBLK_CMD_GET_DEV_INFO2
:
2449 case UBLK_CMD_GET_QUEUE_AFFINITY
:
2450 case UBLK_CMD_GET_PARAMS
:
2451 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES
)):
2454 case UBLK_CMD_START_DEV
:
2455 case UBLK_CMD_STOP_DEV
:
2456 case UBLK_CMD_ADD_DEV
:
2457 case UBLK_CMD_DEL_DEV
:
2458 case UBLK_CMD_SET_PARAMS
:
2459 case UBLK_CMD_START_USER_RECOVERY
:
2460 case UBLK_CMD_END_USER_RECOVERY
:
2461 mask
= MAY_READ
| MAY_WRITE
;
2467 ret
= ublk_char_dev_permission(ub
, dev_path
, mask
);
2469 header
->len
-= header
->dev_path_len
;
2470 header
->addr
+= header
->dev_path_len
;
2472 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2473 __func__
, ub
->ub_number
, cmd
->cmd_op
,
2474 ub
->dev_info
.owner_uid
, ub
->dev_info
.owner_gid
,
2481 static int ublk_ctrl_uring_cmd(struct io_uring_cmd
*cmd
,
2482 unsigned int issue_flags
)
2484 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2485 struct ublk_device
*ub
= NULL
;
2486 u32 cmd_op
= cmd
->cmd_op
;
2489 if (issue_flags
& IO_URING_F_NONBLOCK
)
2492 ublk_ctrl_cmd_dump(cmd
);
2494 if (!(issue_flags
& IO_URING_F_SQE128
))
2497 ret
= ublk_check_cmd_op(cmd_op
);
2501 if (cmd_op
== UBLK_U_CMD_GET_FEATURES
) {
2502 ret
= ublk_ctrl_get_features(cmd
);
2506 if (_IOC_NR(cmd_op
) != UBLK_CMD_ADD_DEV
) {
2508 ub
= ublk_get_device_from_id(header
->dev_id
);
2512 ret
= ublk_ctrl_uring_cmd_permission(ub
, cmd
);
2517 switch (_IOC_NR(cmd_op
)) {
2518 case UBLK_CMD_START_DEV
:
2519 ret
= ublk_ctrl_start_dev(ub
, cmd
);
2521 case UBLK_CMD_STOP_DEV
:
2522 ret
= ublk_ctrl_stop_dev(ub
);
2524 case UBLK_CMD_GET_DEV_INFO
:
2525 case UBLK_CMD_GET_DEV_INFO2
:
2526 ret
= ublk_ctrl_get_dev_info(ub
, cmd
);
2528 case UBLK_CMD_ADD_DEV
:
2529 ret
= ublk_ctrl_add_dev(cmd
);
2531 case UBLK_CMD_DEL_DEV
:
2532 ret
= ublk_ctrl_del_dev(&ub
);
2534 case UBLK_CMD_GET_QUEUE_AFFINITY
:
2535 ret
= ublk_ctrl_get_queue_affinity(ub
, cmd
);
2537 case UBLK_CMD_GET_PARAMS
:
2538 ret
= ublk_ctrl_get_params(ub
, cmd
);
2540 case UBLK_CMD_SET_PARAMS
:
2541 ret
= ublk_ctrl_set_params(ub
, cmd
);
2543 case UBLK_CMD_START_USER_RECOVERY
:
2544 ret
= ublk_ctrl_start_recovery(ub
, cmd
);
2546 case UBLK_CMD_END_USER_RECOVERY
:
2547 ret
= ublk_ctrl_end_recovery(ub
, cmd
);
2556 ublk_put_device(ub
);
2558 io_uring_cmd_done(cmd
, ret
, 0, issue_flags
);
2559 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2560 __func__
, ret
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
);
2561 return -EIOCBQUEUED
;
2564 static const struct file_operations ublk_ctl_fops
= {
2565 .open
= nonseekable_open
,
2566 .uring_cmd
= ublk_ctrl_uring_cmd
,
2567 .owner
= THIS_MODULE
,
2568 .llseek
= noop_llseek
,
2571 static struct miscdevice ublk_misc
= {
2572 .minor
= MISC_DYNAMIC_MINOR
,
2573 .name
= "ublk-control",
2574 .fops
= &ublk_ctl_fops
,
2577 static int __init
ublk_init(void)
2581 BUILD_BUG_ON((u64
)UBLKSRV_IO_BUF_OFFSET
+
2582 UBLKSRV_IO_BUF_TOTAL_SIZE
< UBLKSRV_IO_BUF_OFFSET
);
2584 init_waitqueue_head(&ublk_idr_wq
);
2586 ret
= misc_register(&ublk_misc
);
2590 ret
= alloc_chrdev_region(&ublk_chr_devt
, 0, UBLK_MINORS
, "ublk-char");
2592 goto unregister_mis
;
2594 ret
= class_register(&ublk_chr_class
);
2596 goto free_chrdev_region
;
2601 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
2603 misc_deregister(&ublk_misc
);
2607 static void __exit
ublk_exit(void)
2609 struct ublk_device
*ub
;
2612 idr_for_each_entry(&ublk_index_idr
, ub
, id
)
2615 class_unregister(&ublk_chr_class
);
2616 misc_deregister(&ublk_misc
);
2618 idr_destroy(&ublk_index_idr
);
2619 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
2622 module_init(ublk_init
);
2623 module_exit(ublk_exit
);
2625 module_param(ublks_max
, int, 0444);
2626 MODULE_PARM_DESC(ublks_max
, "max number of ublk devices allowed to add(default: 64)");
2628 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2629 MODULE_LICENSE("GPL");