1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/capability.h>
3 #include <linux/compat.h>
4 #include <linux/blkdev.h>
5 #include <linux/export.h>
7 #include <linux/blkpg.h>
8 #include <linux/hdreg.h>
9 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
13 #include <linux/uaccess.h>
16 static int blkpg_do_ioctl(struct block_device
*bdev
,
17 struct blkpg_partition __user
*upart
, int op
)
19 struct gendisk
*disk
= bdev
->bd_disk
;
20 struct blkpg_partition p
;
21 long long start
, length
;
23 if (!capable(CAP_SYS_ADMIN
))
25 if (copy_from_user(&p
, upart
, sizeof(struct blkpg_partition
)))
27 if (bdev_is_partition(bdev
))
33 if (op
== BLKPG_DEL_PARTITION
)
34 return bdev_del_partition(disk
, p
.pno
);
36 start
= p
.start
>> SECTOR_SHIFT
;
37 length
= p
.length
>> SECTOR_SHIFT
;
40 case BLKPG_ADD_PARTITION
:
41 /* check if partition is aligned to blocksize */
42 if (p
.start
& (bdev_logical_block_size(bdev
) - 1))
44 return bdev_add_partition(disk
, p
.pno
, start
, length
);
45 case BLKPG_RESIZE_PARTITION
:
46 return bdev_resize_partition(disk
, p
.pno
, start
, length
);
52 static int blkpg_ioctl(struct block_device
*bdev
,
53 struct blkpg_ioctl_arg __user
*arg
)
55 struct blkpg_partition __user
*udata
;
58 if (get_user(op
, &arg
->op
) || get_user(udata
, &arg
->data
))
61 return blkpg_do_ioctl(bdev
, udata
, op
);
65 struct compat_blkpg_ioctl_arg
{
72 static int compat_blkpg_ioctl(struct block_device
*bdev
,
73 struct compat_blkpg_ioctl_arg __user
*arg
)
78 if (get_user(op
, &arg
->op
) || get_user(udata
, &arg
->data
))
81 return blkpg_do_ioctl(bdev
, compat_ptr(udata
), op
);
85 static int blk_ioctl_discard(struct block_device
*bdev
, fmode_t mode
,
90 struct inode
*inode
= bdev
->bd_inode
;
93 if (!(mode
& FMODE_WRITE
))
96 if (!bdev_max_discard_sectors(bdev
))
99 if (copy_from_user(range
, (void __user
*)arg
, sizeof(range
)))
110 if (start
+ len
> bdev_nr_bytes(bdev
))
113 filemap_invalidate_lock(inode
->i_mapping
);
114 err
= truncate_bdev_range(bdev
, mode
, start
, start
+ len
- 1);
117 err
= blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
);
119 filemap_invalidate_unlock(inode
->i_mapping
);
123 static int blk_ioctl_secure_erase(struct block_device
*bdev
, fmode_t mode
,
130 if (!(mode
& FMODE_WRITE
))
132 if (!bdev_max_secure_erase_sectors(bdev
))
134 if (copy_from_user(range
, argp
, sizeof(range
)))
139 if ((start
& 511) || (len
& 511))
141 if (start
+ len
> bdev_nr_bytes(bdev
))
144 filemap_invalidate_lock(bdev
->bd_inode
->i_mapping
);
145 err
= truncate_bdev_range(bdev
, mode
, start
, start
+ len
- 1);
147 err
= blkdev_issue_secure_erase(bdev
, start
>> 9, len
>> 9,
149 filemap_invalidate_unlock(bdev
->bd_inode
->i_mapping
);
154 static int blk_ioctl_zeroout(struct block_device
*bdev
, fmode_t mode
,
158 uint64_t start
, end
, len
;
159 struct inode
*inode
= bdev
->bd_inode
;
162 if (!(mode
& FMODE_WRITE
))
165 if (copy_from_user(range
, (void __user
*)arg
, sizeof(range
)))
170 end
= start
+ len
- 1;
176 if (end
>= (uint64_t)bdev_nr_bytes(bdev
))
181 /* Invalidate the page cache, including dirty pages */
182 filemap_invalidate_lock(inode
->i_mapping
);
183 err
= truncate_bdev_range(bdev
, mode
, start
, end
);
187 err
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
,
188 BLKDEV_ZERO_NOUNMAP
);
191 filemap_invalidate_unlock(inode
->i_mapping
);
195 static int put_ushort(unsigned short __user
*argp
, unsigned short val
)
197 return put_user(val
, argp
);
200 static int put_int(int __user
*argp
, int val
)
202 return put_user(val
, argp
);
205 static int put_uint(unsigned int __user
*argp
, unsigned int val
)
207 return put_user(val
, argp
);
210 static int put_long(long __user
*argp
, long val
)
212 return put_user(val
, argp
);
215 static int put_ulong(unsigned long __user
*argp
, unsigned long val
)
217 return put_user(val
, argp
);
220 static int put_u64(u64 __user
*argp
, u64 val
)
222 return put_user(val
, argp
);
226 static int compat_put_long(compat_long_t __user
*argp
, long val
)
228 return put_user(val
, argp
);
231 static int compat_put_ulong(compat_ulong_t __user
*argp
, compat_ulong_t val
)
233 return put_user(val
, argp
);
239 * This is the equivalent of compat_ptr_ioctl(), to be used by block
240 * drivers that implement only commands that are completely compatible
241 * between 32-bit and 64-bit user space
243 int blkdev_compat_ptr_ioctl(struct block_device
*bdev
, fmode_t mode
,
244 unsigned cmd
, unsigned long arg
)
246 struct gendisk
*disk
= bdev
->bd_disk
;
248 if (disk
->fops
->ioctl
)
249 return disk
->fops
->ioctl(bdev
, mode
, cmd
,
250 (unsigned long)compat_ptr(arg
));
254 EXPORT_SYMBOL(blkdev_compat_ptr_ioctl
);
257 static int blkdev_pr_register(struct block_device
*bdev
,
258 struct pr_registration __user
*arg
)
260 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
261 struct pr_registration reg
;
263 if (!capable(CAP_SYS_ADMIN
))
265 if (!ops
|| !ops
->pr_register
)
267 if (copy_from_user(®
, arg
, sizeof(reg
)))
270 if (reg
.flags
& ~PR_FL_IGNORE_KEY
)
272 return ops
->pr_register(bdev
, reg
.old_key
, reg
.new_key
, reg
.flags
);
275 static int blkdev_pr_reserve(struct block_device
*bdev
,
276 struct pr_reservation __user
*arg
)
278 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
279 struct pr_reservation rsv
;
281 if (!capable(CAP_SYS_ADMIN
))
283 if (!ops
|| !ops
->pr_reserve
)
285 if (copy_from_user(&rsv
, arg
, sizeof(rsv
)))
288 if (rsv
.flags
& ~PR_FL_IGNORE_KEY
)
290 return ops
->pr_reserve(bdev
, rsv
.key
, rsv
.type
, rsv
.flags
);
293 static int blkdev_pr_release(struct block_device
*bdev
,
294 struct pr_reservation __user
*arg
)
296 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
297 struct pr_reservation rsv
;
299 if (!capable(CAP_SYS_ADMIN
))
301 if (!ops
|| !ops
->pr_release
)
303 if (copy_from_user(&rsv
, arg
, sizeof(rsv
)))
308 return ops
->pr_release(bdev
, rsv
.key
, rsv
.type
);
311 static int blkdev_pr_preempt(struct block_device
*bdev
,
312 struct pr_preempt __user
*arg
, bool abort
)
314 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
317 if (!capable(CAP_SYS_ADMIN
))
319 if (!ops
|| !ops
->pr_preempt
)
321 if (copy_from_user(&p
, arg
, sizeof(p
)))
326 return ops
->pr_preempt(bdev
, p
.old_key
, p
.new_key
, p
.type
, abort
);
329 static int blkdev_pr_clear(struct block_device
*bdev
,
330 struct pr_clear __user
*arg
)
332 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
335 if (!capable(CAP_SYS_ADMIN
))
337 if (!ops
|| !ops
->pr_clear
)
339 if (copy_from_user(&c
, arg
, sizeof(c
)))
344 return ops
->pr_clear(bdev
, c
.key
);
347 static int blkdev_flushbuf(struct block_device
*bdev
, fmode_t mode
,
348 unsigned cmd
, unsigned long arg
)
350 if (!capable(CAP_SYS_ADMIN
))
353 invalidate_bdev(bdev
);
357 static int blkdev_roset(struct block_device
*bdev
, fmode_t mode
,
358 unsigned cmd
, unsigned long arg
)
362 if (!capable(CAP_SYS_ADMIN
))
365 if (get_user(n
, (int __user
*)arg
))
367 if (bdev
->bd_disk
->fops
->set_read_only
) {
368 ret
= bdev
->bd_disk
->fops
->set_read_only(bdev
, n
);
372 bdev
->bd_read_only
= n
;
376 static int blkdev_getgeo(struct block_device
*bdev
,
377 struct hd_geometry __user
*argp
)
379 struct gendisk
*disk
= bdev
->bd_disk
;
380 struct hd_geometry geo
;
385 if (!disk
->fops
->getgeo
)
389 * We need to set the startsect first, the driver may
390 * want to override it.
392 memset(&geo
, 0, sizeof(geo
));
393 geo
.start
= get_start_sect(bdev
);
394 ret
= disk
->fops
->getgeo(bdev
, &geo
);
397 if (copy_to_user(argp
, &geo
, sizeof(geo
)))
403 struct compat_hd_geometry
{
405 unsigned char sectors
;
406 unsigned short cylinders
;
410 static int compat_hdio_getgeo(struct block_device
*bdev
,
411 struct compat_hd_geometry __user
*ugeo
)
413 struct gendisk
*disk
= bdev
->bd_disk
;
414 struct hd_geometry geo
;
419 if (!disk
->fops
->getgeo
)
422 memset(&geo
, 0, sizeof(geo
));
424 * We need to set the startsect first, the driver may
425 * want to override it.
427 geo
.start
= get_start_sect(bdev
);
428 ret
= disk
->fops
->getgeo(bdev
, &geo
);
432 ret
= copy_to_user(ugeo
, &geo
, 4);
433 ret
|= put_user(geo
.start
, &ugeo
->start
);
441 /* set the logical block size */
442 static int blkdev_bszset(struct block_device
*bdev
, fmode_t mode
,
447 if (!capable(CAP_SYS_ADMIN
))
451 if (get_user(n
, argp
))
454 if (mode
& FMODE_EXCL
)
455 return set_blocksize(bdev
, n
);
457 if (IS_ERR(blkdev_get_by_dev(bdev
->bd_dev
, mode
| FMODE_EXCL
, &bdev
)))
459 ret
= set_blocksize(bdev
, n
);
460 blkdev_put(bdev
, mode
| FMODE_EXCL
);
466 * Common commands that are handled the same way on native and compat
467 * user space. Note the separate arg/argp parameters that are needed
468 * to deal with the compat_ptr() conversion.
470 static int blkdev_common_ioctl(struct block_device
*bdev
, fmode_t mode
,
471 unsigned cmd
, unsigned long arg
, void __user
*argp
)
473 unsigned int max_sectors
;
477 return blkdev_flushbuf(bdev
, mode
, cmd
, arg
);
479 return blkdev_roset(bdev
, mode
, cmd
, arg
);
481 return blk_ioctl_discard(bdev
, mode
, arg
);
483 return blk_ioctl_secure_erase(bdev
, mode
, argp
);
485 return blk_ioctl_zeroout(bdev
, mode
, arg
);
487 return put_u64(argp
, bdev
->bd_disk
->diskseq
);
489 return blkdev_report_zones_ioctl(bdev
, mode
, cmd
, arg
);
494 return blkdev_zone_mgmt_ioctl(bdev
, mode
, cmd
, arg
);
496 return put_uint(argp
, bdev_zone_sectors(bdev
));
498 return put_uint(argp
, blkdev_nr_zones(bdev
->bd_disk
));
500 return put_int(argp
, bdev_read_only(bdev
) != 0);
501 case BLKSSZGET
: /* get block device logical block size */
502 return put_int(argp
, bdev_logical_block_size(bdev
));
503 case BLKPBSZGET
: /* get block device physical block size */
504 return put_uint(argp
, bdev_physical_block_size(bdev
));
506 return put_uint(argp
, bdev_io_min(bdev
));
508 return put_uint(argp
, bdev_io_opt(bdev
));
510 return put_int(argp
, bdev_alignment_offset(bdev
));
511 case BLKDISCARDZEROES
:
512 return put_uint(argp
, 0);
514 max_sectors
= min_t(unsigned int, USHRT_MAX
,
515 queue_max_sectors(bdev_get_queue(bdev
)));
516 return put_ushort(argp
, max_sectors
);
518 return put_ushort(argp
, !bdev_nonrot(bdev
));
521 if(!capable(CAP_SYS_ADMIN
))
523 bdev
->bd_disk
->bdi
->ra_pages
= (arg
* 512) / PAGE_SIZE
;
526 if (!capable(CAP_SYS_ADMIN
))
528 if (bdev_is_partition(bdev
))
530 return disk_scan_partitions(bdev
->bd_disk
, mode
& ~FMODE_EXCL
);
533 case BLKTRACETEARDOWN
:
534 return blk_trace_ioctl(bdev
, cmd
, argp
);
535 case IOC_PR_REGISTER
:
536 return blkdev_pr_register(bdev
, argp
);
538 return blkdev_pr_reserve(bdev
, argp
);
540 return blkdev_pr_release(bdev
, argp
);
542 return blkdev_pr_preempt(bdev
, argp
, false);
543 case IOC_PR_PREEMPT_ABORT
:
544 return blkdev_pr_preempt(bdev
, argp
, true);
546 return blkdev_pr_clear(bdev
, argp
);
553 * Always keep this in sync with compat_blkdev_ioctl()
554 * to handle all incompatible commands in both functions.
556 * New commands must be compatible and go into blkdev_common_ioctl
558 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
560 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
561 void __user
*argp
= (void __user
*)arg
;
562 fmode_t mode
= file
->f_mode
;
566 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
567 * to updated it before every ioctl.
569 if (file
->f_flags
& O_NDELAY
)
570 mode
|= FMODE_NDELAY
;
572 mode
&= ~FMODE_NDELAY
;
575 /* These need separate implementations for the data structure */
577 return blkdev_getgeo(bdev
, argp
);
579 return blkpg_ioctl(bdev
, argp
);
581 /* Compat mode returns 32-bit data instead of 'long' */
586 return put_long(argp
,
587 (bdev
->bd_disk
->bdi
->ra_pages
* PAGE_SIZE
) / 512);
589 if (bdev_nr_sectors(bdev
) > ~0UL)
591 return put_ulong(argp
, bdev_nr_sectors(bdev
));
593 /* The data is compatible, but the command number is different */
594 case BLKBSZGET
: /* get block device soft block size (cf. BLKSSZGET) */
595 return put_int(argp
, block_size(bdev
));
597 return blkdev_bszset(bdev
, mode
, argp
);
599 return put_u64(argp
, bdev_nr_bytes(bdev
));
601 /* Incompatible alignment on i386 */
603 return blk_trace_ioctl(bdev
, cmd
, argp
);
608 ret
= blkdev_common_ioctl(bdev
, mode
, cmd
, arg
, argp
);
609 if (ret
!= -ENOIOCTLCMD
)
612 if (!bdev
->bd_disk
->fops
->ioctl
)
614 return bdev
->bd_disk
->fops
->ioctl(bdev
, mode
, cmd
, arg
);
619 #define BLKBSZGET_32 _IOR(0x12, 112, int)
620 #define BLKBSZSET_32 _IOW(0x12, 113, int)
621 #define BLKGETSIZE64_32 _IOR(0x12, 114, int)
623 /* Most of the generic ioctls are handled in the normal fallback path.
624 This assumes the blkdev's low level compat_ioctl always returns
625 ENOIOCTLCMD for unknown ioctls. */
626 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
629 void __user
*argp
= compat_ptr(arg
);
630 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
631 struct gendisk
*disk
= bdev
->bd_disk
;
632 fmode_t mode
= file
->f_mode
;
635 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
636 * to updated it before every ioctl.
638 if (file
->f_flags
& O_NDELAY
)
639 mode
|= FMODE_NDELAY
;
641 mode
&= ~FMODE_NDELAY
;
644 /* These need separate implementations for the data structure */
646 return compat_hdio_getgeo(bdev
, argp
);
648 return compat_blkpg_ioctl(bdev
, argp
);
650 /* Compat mode returns 32-bit data instead of 'long' */
655 return compat_put_long(argp
,
656 (bdev
->bd_disk
->bdi
->ra_pages
* PAGE_SIZE
) / 512);
658 if (bdev_nr_sectors(bdev
) > ~(compat_ulong_t
)0)
660 return compat_put_ulong(argp
, bdev_nr_sectors(bdev
));
662 /* The data is compatible, but the command number is different */
663 case BLKBSZGET_32
: /* get the logical block size (cf. BLKSSZGET) */
664 return put_int(argp
, bdev_logical_block_size(bdev
));
666 return blkdev_bszset(bdev
, mode
, argp
);
667 case BLKGETSIZE64_32
:
668 return put_u64(argp
, bdev_nr_bytes(bdev
));
670 /* Incompatible alignment on i386 */
671 case BLKTRACESETUP32
:
672 return blk_trace_ioctl(bdev
, cmd
, argp
);
677 ret
= blkdev_common_ioctl(bdev
, mode
, cmd
, arg
, argp
);
678 if (ret
== -ENOIOCTLCMD
&& disk
->fops
->compat_ioctl
)
679 ret
= disk
->fops
->compat_ioctl(bdev
, mode
, cmd
, arg
);