1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991-1998 Linus Torvalds
4 * Re-organised Feb 1998 Russell King
7 #include <linux/slab.h>
8 #include <linux/ctype.h>
9 #include <linux/genhd.h>
10 #include <linux/vmalloc.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/raid/detect.h>
15 static int (*check_part
[])(struct parsed_partitions
*) = {
17 * Probe partition formats with tables at disk address 0
18 * that also have an ADFS boot block at 0xdc0.
20 #ifdef CONFIG_ACORN_PARTITION_ICS
23 #ifdef CONFIG_ACORN_PARTITION_POWERTEC
24 adfspart_check_POWERTEC
,
26 #ifdef CONFIG_ACORN_PARTITION_EESOX
31 * Now move on to formats that only have partition info at
32 * disk address 0xdc0. Since these may also have stale
33 * PC/BIOS partition tables, they need to come before
36 #ifdef CONFIG_ACORN_PARTITION_CUMANA
37 adfspart_check_CUMANA
,
39 #ifdef CONFIG_ACORN_PARTITION_ADFS
43 #ifdef CONFIG_CMDLINE_PARTITION
46 #ifdef CONFIG_EFI_PARTITION
47 efi_partition
, /* this must come before msdos */
49 #ifdef CONFIG_SGI_PARTITION
52 #ifdef CONFIG_LDM_PARTITION
53 ldm_partition
, /* this must come before msdos */
55 #ifdef CONFIG_MSDOS_PARTITION
58 #ifdef CONFIG_OSF_PARTITION
61 #ifdef CONFIG_SUN_PARTITION
64 #ifdef CONFIG_AMIGA_PARTITION
67 #ifdef CONFIG_ATARI_PARTITION
70 #ifdef CONFIG_MAC_PARTITION
73 #ifdef CONFIG_ULTRIX_PARTITION
76 #ifdef CONFIG_IBM_PARTITION
79 #ifdef CONFIG_KARMA_PARTITION
82 #ifdef CONFIG_SYSV68_PARTITION
88 static struct parsed_partitions
*allocate_partitions(struct gendisk
*hd
)
90 struct parsed_partitions
*state
;
93 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
97 nr
= disk_max_parts(hd
);
98 state
->parts
= vzalloc(array_size(nr
, sizeof(state
->parts
[0])));
109 static void free_partitions(struct parsed_partitions
*state
)
115 static struct parsed_partitions
*check_partition(struct gendisk
*hd
,
116 struct block_device
*bdev
)
118 struct parsed_partitions
*state
;
121 state
= allocate_partitions(hd
);
124 state
->pp_buf
= (char *)__get_free_page(GFP_KERNEL
);
125 if (!state
->pp_buf
) {
126 free_partitions(state
);
129 state
->pp_buf
[0] = '\0';
132 disk_name(hd
, 0, state
->name
);
133 snprintf(state
->pp_buf
, PAGE_SIZE
, " %s:", state
->name
);
134 if (isdigit(state
->name
[strlen(state
->name
)-1]))
135 sprintf(state
->name
, "p");
138 while (!res
&& check_part
[i
]) {
139 memset(state
->parts
, 0, state
->limit
* sizeof(state
->parts
[0]));
140 res
= check_part
[i
++](state
);
143 * We have hit an I/O error which we don't report now.
144 * But record it, and let the others do their job.
152 printk(KERN_INFO
"%s", state
->pp_buf
);
154 free_page((unsigned long)state
->pp_buf
);
157 if (state
->access_beyond_eod
)
160 * The partition is unrecognized. So report I/O errors if there were any
165 strlcat(state
->pp_buf
,
166 " unable to read partition table\n", PAGE_SIZE
);
167 printk(KERN_INFO
"%s", state
->pp_buf
);
170 free_page((unsigned long)state
->pp_buf
);
171 free_partitions(state
);
175 static ssize_t
part_partition_show(struct device
*dev
,
176 struct device_attribute
*attr
, char *buf
)
178 struct hd_struct
*p
= dev_to_part(dev
);
180 return sprintf(buf
, "%d\n", p
->partno
);
183 static ssize_t
part_start_show(struct device
*dev
,
184 struct device_attribute
*attr
, char *buf
)
186 struct hd_struct
*p
= dev_to_part(dev
);
188 return sprintf(buf
, "%llu\n",(unsigned long long)p
->start_sect
);
191 static ssize_t
part_ro_show(struct device
*dev
,
192 struct device_attribute
*attr
, char *buf
)
194 struct hd_struct
*p
= dev_to_part(dev
);
195 return sprintf(buf
, "%d\n", p
->policy
? 1 : 0);
198 static ssize_t
part_alignment_offset_show(struct device
*dev
,
199 struct device_attribute
*attr
, char *buf
)
201 struct hd_struct
*p
= dev_to_part(dev
);
202 return sprintf(buf
, "%llu\n", (unsigned long long)p
->alignment_offset
);
205 static ssize_t
part_discard_alignment_show(struct device
*dev
,
206 struct device_attribute
*attr
, char *buf
)
208 struct hd_struct
*p
= dev_to_part(dev
);
209 return sprintf(buf
, "%u\n", p
->discard_alignment
);
212 static DEVICE_ATTR(partition
, 0444, part_partition_show
, NULL
);
213 static DEVICE_ATTR(start
, 0444, part_start_show
, NULL
);
214 static DEVICE_ATTR(size
, 0444, part_size_show
, NULL
);
215 static DEVICE_ATTR(ro
, 0444, part_ro_show
, NULL
);
216 static DEVICE_ATTR(alignment_offset
, 0444, part_alignment_offset_show
, NULL
);
217 static DEVICE_ATTR(discard_alignment
, 0444, part_discard_alignment_show
, NULL
);
218 static DEVICE_ATTR(stat
, 0444, part_stat_show
, NULL
);
219 static DEVICE_ATTR(inflight
, 0444, part_inflight_show
, NULL
);
220 #ifdef CONFIG_FAIL_MAKE_REQUEST
221 static struct device_attribute dev_attr_fail
=
222 __ATTR(make
-it
-fail
, 0644, part_fail_show
, part_fail_store
);
225 static struct attribute
*part_attrs
[] = {
226 &dev_attr_partition
.attr
,
227 &dev_attr_start
.attr
,
230 &dev_attr_alignment_offset
.attr
,
231 &dev_attr_discard_alignment
.attr
,
233 &dev_attr_inflight
.attr
,
234 #ifdef CONFIG_FAIL_MAKE_REQUEST
240 static struct attribute_group part_attr_group
= {
244 static const struct attribute_group
*part_attr_groups
[] = {
246 #ifdef CONFIG_BLK_DEV_IO_TRACE
247 &blk_trace_attr_group
,
252 static void part_release(struct device
*dev
)
254 struct hd_struct
*p
= dev_to_part(dev
);
255 blk_free_devt(dev
->devt
);
260 static int part_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
262 struct hd_struct
*part
= dev_to_part(dev
);
264 add_uevent_var(env
, "PARTN=%u", part
->partno
);
265 if (part
->info
&& part
->info
->volname
[0])
266 add_uevent_var(env
, "PARTNAME=%s", part
->info
->volname
);
270 struct device_type part_type
= {
272 .groups
= part_attr_groups
,
273 .release
= part_release
,
274 .uevent
= part_uevent
,
277 static void delete_partition_work_fn(struct work_struct
*work
)
279 struct hd_struct
*part
= container_of(to_rcu_work(work
), struct hd_struct
,
282 part
->start_sect
= 0;
284 part_stat_set_all(part
, 0);
285 put_device(part_to_dev(part
));
288 void __delete_partition(struct percpu_ref
*ref
)
290 struct hd_struct
*part
= container_of(ref
, struct hd_struct
, ref
);
291 INIT_RCU_WORK(&part
->rcu_work
, delete_partition_work_fn
);
292 queue_rcu_work(system_wq
, &part
->rcu_work
);
296 * Must be called either with bd_mutex held, before a disk can be opened or
297 * after all disk users are gone.
299 void delete_partition(struct gendisk
*disk
, int partno
)
301 struct disk_part_tbl
*ptbl
=
302 rcu_dereference_protected(disk
->part_tbl
, 1);
303 struct hd_struct
*part
;
305 if (partno
>= ptbl
->len
)
308 part
= rcu_dereference_protected(ptbl
->part
[partno
], 1);
312 rcu_assign_pointer(ptbl
->part
[partno
], NULL
);
313 rcu_assign_pointer(ptbl
->last_lookup
, NULL
);
314 kobject_put(part
->holder_dir
);
315 device_del(part_to_dev(part
));
318 * Remove gendisk pointer from idr so that it cannot be looked up
319 * while RCU period before freeing gendisk is running to prevent
320 * use-after-free issues. Note that the device number stays
321 * "in-use" until we really free the gendisk.
323 blk_invalidate_devt(part_devt(part
));
324 hd_struct_kill(part
);
327 static ssize_t
whole_disk_show(struct device
*dev
,
328 struct device_attribute
*attr
, char *buf
)
332 static DEVICE_ATTR(whole_disk
, 0444, whole_disk_show
, NULL
);
335 * Must be called either with bd_mutex held, before a disk can be opened or
336 * after all disk users are gone.
338 struct hd_struct
*add_partition(struct gendisk
*disk
, int partno
,
339 sector_t start
, sector_t len
, int flags
,
340 struct partition_meta_info
*info
)
343 dev_t devt
= MKDEV(0, 0);
344 struct device
*ddev
= disk_to_dev(disk
);
346 struct disk_part_tbl
*ptbl
;
351 * Partitions are not supported on zoned block devices that are used as
354 switch (disk
->queue
->limits
.zoned
) {
356 pr_warn("%s: partitions not supported on host managed zoned block device\n",
358 return ERR_PTR(-ENXIO
);
360 pr_info("%s: disabling host aware zoned block device support due to partitions\n",
362 disk
->queue
->limits
.zoned
= BLK_ZONED_NONE
;
368 err
= disk_expand_part_tbl(disk
, partno
);
371 ptbl
= rcu_dereference_protected(disk
->part_tbl
, 1);
373 if (ptbl
->part
[partno
])
374 return ERR_PTR(-EBUSY
);
376 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
378 return ERR_PTR(-EBUSY
);
380 if (!init_part_stats(p
)) {
385 seqcount_init(&p
->nr_sects_seq
);
386 pdev
= part_to_dev(p
);
388 p
->start_sect
= start
;
389 p
->alignment_offset
=
390 queue_limit_alignment_offset(&disk
->queue
->limits
, start
);
391 p
->discard_alignment
=
392 queue_limit_discard_alignment(&disk
->queue
->limits
, start
);
395 p
->policy
= get_disk_ro(disk
);
398 struct partition_meta_info
*pinfo
;
400 pinfo
= kzalloc_node(sizeof(*pinfo
), GFP_KERNEL
, disk
->node_id
);
405 memcpy(pinfo
, info
, sizeof(*info
));
409 dname
= dev_name(ddev
);
410 if (isdigit(dname
[strlen(dname
) - 1]))
411 dev_set_name(pdev
, "%sp%d", dname
, partno
);
413 dev_set_name(pdev
, "%s%d", dname
, partno
);
415 device_initialize(pdev
);
416 pdev
->class = &block_class
;
417 pdev
->type
= &part_type
;
420 err
= blk_alloc_devt(p
, &devt
);
425 /* delay uevent until 'holders' subdir is created */
426 dev_set_uevent_suppress(pdev
, 1);
427 err
= device_add(pdev
);
432 p
->holder_dir
= kobject_create_and_add("holders", &pdev
->kobj
);
436 dev_set_uevent_suppress(pdev
, 0);
437 if (flags
& ADDPART_FLAG_WHOLEDISK
) {
438 err
= device_create_file(pdev
, &dev_attr_whole_disk
);
443 err
= hd_ref_init(p
);
445 if (flags
& ADDPART_FLAG_WHOLEDISK
)
446 goto out_remove_file
;
450 /* everything is up and running, commence */
451 rcu_assign_pointer(ptbl
->part
[partno
], p
);
453 /* suppress uevent if the disk suppresses it */
454 if (!dev_get_uevent_suppress(ddev
))
455 kobject_uevent(&pdev
->kobj
, KOBJ_ADD
);
466 device_remove_file(pdev
, &dev_attr_whole_disk
);
468 kobject_put(p
->holder_dir
);
475 static bool disk_unlock_native_capacity(struct gendisk
*disk
)
477 const struct block_device_operations
*bdops
= disk
->fops
;
479 if (bdops
->unlock_native_capacity
&&
480 !(disk
->flags
& GENHD_FL_NATIVE_CAPACITY
)) {
481 printk(KERN_CONT
"enabling native capacity\n");
482 bdops
->unlock_native_capacity(disk
);
483 disk
->flags
|= GENHD_FL_NATIVE_CAPACITY
;
486 printk(KERN_CONT
"truncated\n");
491 int blk_drop_partitions(struct gendisk
*disk
, struct block_device
*bdev
)
493 struct disk_part_iter piter
;
494 struct hd_struct
*part
;
497 if (!disk_part_scan_enabled(disk
))
499 if (bdev
->bd_part_count
|| bdev
->bd_openers
> 1)
501 res
= invalidate_partition(disk
, 0);
505 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_EMPTY
);
506 while ((part
= disk_part_iter_next(&piter
)))
507 delete_partition(disk
, part
->partno
);
508 disk_part_iter_exit(&piter
);
513 static bool blk_add_partition(struct gendisk
*disk
, struct block_device
*bdev
,
514 struct parsed_partitions
*state
, int p
)
516 sector_t size
= state
->parts
[p
].size
;
517 sector_t from
= state
->parts
[p
].from
;
518 struct hd_struct
*part
;
523 if (from
>= get_capacity(disk
)) {
525 "%s: p%d start %llu is beyond EOD, ",
526 disk
->disk_name
, p
, (unsigned long long) from
);
527 if (disk_unlock_native_capacity(disk
))
532 if (from
+ size
> get_capacity(disk
)) {
534 "%s: p%d size %llu extends beyond EOD, ",
535 disk
->disk_name
, p
, (unsigned long long) size
);
537 if (disk_unlock_native_capacity(disk
))
541 * We can not ignore partitions of broken tables created by for
542 * example camera firmware, but we limit them to the end of the
543 * disk to avoid creating invalid block devices.
545 size
= get_capacity(disk
) - from
;
548 part
= add_partition(disk
, p
, from
, size
, state
->parts
[p
].flags
,
549 &state
->parts
[p
].info
);
550 if (IS_ERR(part
) && PTR_ERR(part
) != -ENXIO
) {
551 printk(KERN_ERR
" %s: p%d could not be added: %ld\n",
552 disk
->disk_name
, p
, -PTR_ERR(part
));
556 if (IS_BUILTIN(CONFIG_BLK_DEV_MD
) &&
557 (state
->parts
[p
].flags
& ADDPART_FLAG_RAID
))
558 md_autodetect_dev(part_to_dev(part
)->devt
);
563 int blk_add_partitions(struct gendisk
*disk
, struct block_device
*bdev
)
565 struct parsed_partitions
*state
;
566 int ret
= -EAGAIN
, p
, highest
;
568 if (!disk_part_scan_enabled(disk
))
571 state
= check_partition(disk
, bdev
);
576 * I/O error reading the partition table. If we tried to read
577 * beyond EOD, retry after unlocking the native capacity.
579 if (PTR_ERR(state
) == -ENOSPC
) {
580 printk(KERN_WARNING
"%s: partition table beyond EOD, ",
582 if (disk_unlock_native_capacity(disk
))
589 * Partitions are not supported on host managed zoned block devices.
591 if (disk
->queue
->limits
.zoned
== BLK_ZONED_HM
) {
592 pr_warn("%s: ignoring partition table on host managed zoned block device\n",
599 * If we read beyond EOD, try unlocking native capacity even if the
600 * partition table was successfully read as we could be missing some
603 if (state
->access_beyond_eod
) {
605 "%s: partition table partially beyond EOD, ",
607 if (disk_unlock_native_capacity(disk
))
611 /* tell userspace that the media / partition table may have changed */
612 kobject_uevent(&disk_to_dev(disk
)->kobj
, KOBJ_CHANGE
);
615 * Detect the highest partition number and preallocate disk->part_tbl.
616 * This is an optimization and not strictly necessary.
618 for (p
= 1, highest
= 0; p
< state
->limit
; p
++)
619 if (state
->parts
[p
].size
)
621 disk_expand_part_tbl(disk
, highest
);
623 for (p
= 1; p
< state
->limit
; p
++)
624 if (!blk_add_partition(disk
, bdev
, state
, p
))
629 free_partitions(state
);
633 void *read_part_sector(struct parsed_partitions
*state
, sector_t n
, Sector
*p
)
635 struct address_space
*mapping
= state
->bdev
->bd_inode
->i_mapping
;
638 if (n
>= get_capacity(state
->bdev
->bd_disk
)) {
639 state
->access_beyond_eod
= true;
643 page
= read_mapping_page(mapping
,
644 (pgoff_t
)(n
>> (PAGE_SHIFT
- 9)), NULL
);
651 return (unsigned char *)page_address(page
) +
652 ((n
& ((1 << (PAGE_SHIFT
- 9)) - 1)) << SECTOR_SHIFT
);