1 // SPDX-License-Identifier: GPL-2.0
3 * Block driver for media (i.e., flash cards)
5 * Copyright 2002 Hewlett-Packard Company
6 * Copyright 2005-2008 Pierre Ossman
8 * Use consistent with the GNU GPL is permitted,
9 * provided that this copyright notice is
10 * preserved in its entirety in all copies and derived works.
12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
14 * FITNESS FOR ANY PARTICULAR PURPOSE.
16 * Many thanks to Alessandro Rubini and Jonathan Corbet!
18 * Author: Andrew Christian
21 #include <linux/moduleparam.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/errno.h>
29 #include <linux/hdreg.h>
30 #include <linux/kdev_t.h>
31 #include <linux/kref.h>
32 #include <linux/blkdev.h>
33 #include <linux/cdev.h>
34 #include <linux/mutex.h>
35 #include <linux/scatterlist.h>
36 #include <linux/string_helpers.h>
37 #include <linux/delay.h>
38 #include <linux/capability.h>
39 #include <linux/compat.h>
40 #include <linux/pm_runtime.h>
41 #include <linux/idr.h>
42 #include <linux/debugfs.h>
44 #include <linux/mmc/ioctl.h>
45 #include <linux/mmc/card.h>
46 #include <linux/mmc/host.h>
47 #include <linux/mmc/mmc.h>
48 #include <linux/mmc/sd.h>
50 #include <linux/uaccess.h>
63 MODULE_ALIAS("mmc:block");
64 #ifdef MODULE_PARAM_PREFIX
65 #undef MODULE_PARAM_PREFIX
67 #define MODULE_PARAM_PREFIX "mmcblk."
70 * Set a 10 second timeout for polling write request busy state. Note, mmc core
71 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
72 * second software timer to timeout the whole request, so 10 seconds should be
75 #define MMC_BLK_TIMEOUT_MS (10 * 1000)
76 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
77 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
79 static DEFINE_MUTEX(block_mutex
);
82 * The defaults come from config options but can be overriden by module
85 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
88 * We've only got one major, so number of mmcblk devices is
89 * limited to (1 << 20) / number of minors per device. It is also
90 * limited by the MAX_DEVICES below.
92 static int max_devices
;
94 #define MAX_DEVICES 256
96 static DEFINE_IDA(mmc_blk_ida
);
97 static DEFINE_IDA(mmc_rpmb_ida
);
99 struct mmc_blk_busy_data
{
100 struct mmc_card
*card
;
105 * There is one mmc_blk_data per slot.
107 struct mmc_blk_data
{
108 struct device
*parent
;
109 struct gendisk
*disk
;
110 struct mmc_queue queue
;
111 struct list_head part
;
112 struct list_head rpmbs
;
115 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
116 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
119 unsigned int read_only
;
120 unsigned int part_type
;
121 unsigned int reset_done
;
122 #define MMC_BLK_READ BIT(0)
123 #define MMC_BLK_WRITE BIT(1)
124 #define MMC_BLK_DISCARD BIT(2)
125 #define MMC_BLK_SECDISCARD BIT(3)
126 #define MMC_BLK_CQE_RECOVERY BIT(4)
127 #define MMC_BLK_TRIM BIT(5)
130 * Only set in main mmc_blk_data associated
131 * with mmc_card with dev_set_drvdata, and keeps
132 * track of the current selected device partition.
134 unsigned int part_curr
;
135 #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */
138 /* debugfs files (only in main mmc_blk_data) */
139 struct dentry
*status_dentry
;
140 struct dentry
*ext_csd_dentry
;
143 /* Device type for RPMB character devices */
144 static dev_t mmc_rpmb_devt
;
146 /* Bus type for RPMB character devices */
147 static struct bus_type mmc_rpmb_bus_type
= {
152 * struct mmc_rpmb_data - special RPMB device type for these areas
153 * @dev: the device for the RPMB area
154 * @chrdev: character device for the RPMB area
155 * @id: unique device ID number
156 * @part_index: partition index (0 on first)
157 * @md: parent MMC block device
158 * @node: list item, so we can put this device on a list
160 struct mmc_rpmb_data
{
164 unsigned int part_index
;
165 struct mmc_blk_data
*md
;
166 struct list_head node
;
169 static DEFINE_MUTEX(open_lock
);
171 module_param(perdev_minors
, int, 0444);
172 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
174 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
175 unsigned int part_type
);
176 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
177 struct mmc_card
*card
,
179 struct mmc_queue
*mq
);
180 static void mmc_blk_hsq_req_done(struct mmc_request
*mrq
);
181 static int mmc_spi_err_check(struct mmc_card
*card
);
183 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
185 struct mmc_blk_data
*md
;
187 mutex_lock(&open_lock
);
188 md
= disk
->private_data
;
189 if (md
&& !kref_get_unless_zero(&md
->kref
))
191 mutex_unlock(&open_lock
);
196 static inline int mmc_get_devidx(struct gendisk
*disk
)
198 int devidx
= disk
->first_minor
/ perdev_minors
;
202 static void mmc_blk_kref_release(struct kref
*ref
)
204 struct mmc_blk_data
*md
= container_of(ref
, struct mmc_blk_data
, kref
);
207 devidx
= mmc_get_devidx(md
->disk
);
208 ida_simple_remove(&mmc_blk_ida
, devidx
);
210 mutex_lock(&open_lock
);
211 md
->disk
->private_data
= NULL
;
212 mutex_unlock(&open_lock
);
218 static void mmc_blk_put(struct mmc_blk_data
*md
)
220 kref_put(&md
->kref
, mmc_blk_kref_release
);
223 static ssize_t
power_ro_lock_show(struct device
*dev
,
224 struct device_attribute
*attr
, char *buf
)
227 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
228 struct mmc_card
*card
= md
->queue
.card
;
231 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
233 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
236 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
243 static ssize_t
power_ro_lock_store(struct device
*dev
,
244 struct device_attribute
*attr
, const char *buf
, size_t count
)
247 struct mmc_blk_data
*md
, *part_md
;
248 struct mmc_queue
*mq
;
252 if (kstrtoul(buf
, 0, &set
))
258 md
= mmc_blk_get(dev_to_disk(dev
));
261 /* Dispatch locking to the block layer */
262 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_OUT
, 0);
264 count
= PTR_ERR(req
);
267 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_BOOT_WP
;
268 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
269 blk_execute_rq(req
, false);
270 ret
= req_to_mmc_queue_req(req
)->drv_op_result
;
271 blk_mq_free_request(req
);
274 pr_info("%s: Locking boot partition ro until next power on\n",
275 md
->disk
->disk_name
);
276 set_disk_ro(md
->disk
, 1);
278 list_for_each_entry(part_md
, &md
->part
, part
)
279 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
280 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
281 set_disk_ro(part_md
->disk
, 1);
289 static DEVICE_ATTR(ro_lock_until_next_power_on
, 0,
290 power_ro_lock_show
, power_ro_lock_store
);
292 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
296 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
298 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
299 get_disk_ro(dev_to_disk(dev
)) ^
305 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
306 const char *buf
, size_t count
)
310 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
311 unsigned long set
= simple_strtoul(buf
, &end
, 0);
317 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
324 static DEVICE_ATTR(force_ro
, 0644, force_ro_show
, force_ro_store
);
326 static struct attribute
*mmc_disk_attrs
[] = {
327 &dev_attr_force_ro
.attr
,
328 &dev_attr_ro_lock_until_next_power_on
.attr
,
332 static umode_t
mmc_disk_attrs_is_visible(struct kobject
*kobj
,
333 struct attribute
*a
, int n
)
335 struct device
*dev
= kobj_to_dev(kobj
);
336 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
337 umode_t mode
= a
->mode
;
339 if (a
== &dev_attr_ro_lock_until_next_power_on
.attr
&&
340 (md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
341 md
->queue
.card
->ext_csd
.boot_ro_lockable
) {
343 if (!(md
->queue
.card
->ext_csd
.boot_ro_lock
&
344 EXT_CSD_BOOT_WP_B_PWR_WP_DIS
))
352 static const struct attribute_group mmc_disk_attr_group
= {
353 .is_visible
= mmc_disk_attrs_is_visible
,
354 .attrs
= mmc_disk_attrs
,
357 static const struct attribute_group
*mmc_disk_attr_groups
[] = {
358 &mmc_disk_attr_group
,
362 static int mmc_blk_open(struct gendisk
*disk
, blk_mode_t mode
)
364 struct mmc_blk_data
*md
= mmc_blk_get(disk
);
367 mutex_lock(&block_mutex
);
370 if ((mode
& BLK_OPEN_WRITE
) && md
->read_only
) {
375 mutex_unlock(&block_mutex
);
380 static void mmc_blk_release(struct gendisk
*disk
)
382 struct mmc_blk_data
*md
= disk
->private_data
;
384 mutex_lock(&block_mutex
);
386 mutex_unlock(&block_mutex
);
390 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
392 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
398 struct mmc_blk_ioc_data
{
399 struct mmc_ioc_cmd ic
;
402 struct mmc_rpmb_data
*rpmb
;
405 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
406 struct mmc_ioc_cmd __user
*user
)
408 struct mmc_blk_ioc_data
*idata
;
411 idata
= kmalloc(sizeof(*idata
), GFP_KERNEL
);
417 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
422 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
423 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
428 if (!idata
->buf_bytes
) {
433 idata
->buf
= memdup_user((void __user
*)(unsigned long)
434 idata
->ic
.data_ptr
, idata
->buf_bytes
);
435 if (IS_ERR(idata
->buf
)) {
436 err
= PTR_ERR(idata
->buf
);
448 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user
*ic_ptr
,
449 struct mmc_blk_ioc_data
*idata
)
451 struct mmc_ioc_cmd
*ic
= &idata
->ic
;
453 if (copy_to_user(&(ic_ptr
->response
), ic
->response
,
454 sizeof(ic
->response
)))
457 if (!idata
->ic
.write_flag
) {
458 if (copy_to_user((void __user
*)(unsigned long)ic
->data_ptr
,
459 idata
->buf
, idata
->buf_bytes
))
466 static int __mmc_blk_ioctl_cmd(struct mmc_card
*card
, struct mmc_blk_data
*md
,
467 struct mmc_blk_ioc_data
*idata
)
469 struct mmc_command cmd
= {}, sbc
= {};
470 struct mmc_data data
= {};
471 struct mmc_request mrq
= {};
472 struct scatterlist sg
;
473 bool r1b_resp
, use_r1b_resp
= false;
474 unsigned int busy_timeout_ms
;
476 unsigned int target_part
;
478 if (!card
|| !md
|| !idata
)
482 * The RPMB accesses comes in from the character device, so we
483 * need to target these explicitly. Else we just target the
484 * partition type for the block device the ioctl() was issued
488 /* Support multiple RPMB partitions */
489 target_part
= idata
->rpmb
->part_index
;
490 target_part
|= EXT_CSD_PART_CONFIG_ACC_RPMB
;
492 target_part
= md
->part_type
;
495 cmd
.opcode
= idata
->ic
.opcode
;
496 cmd
.arg
= idata
->ic
.arg
;
497 cmd
.flags
= idata
->ic
.flags
;
499 if (idata
->buf_bytes
) {
502 data
.blksz
= idata
->ic
.blksz
;
503 data
.blocks
= idata
->ic
.blocks
;
505 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
507 if (idata
->ic
.write_flag
)
508 data
.flags
= MMC_DATA_WRITE
;
510 data
.flags
= MMC_DATA_READ
;
512 /* data.flags must already be set before doing this. */
513 mmc_set_data_timeout(&data
, card
);
515 /* Allow overriding the timeout_ns for empirical tuning. */
516 if (idata
->ic
.data_timeout_ns
)
517 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
524 err
= mmc_blk_part_switch(card
, target_part
);
528 if (idata
->ic
.is_acmd
) {
529 err
= mmc_app_cmd(card
->host
, card
);
535 sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
537 * We don't do any blockcount validation because the max size
538 * may be increased by a future standard. We just copy the
539 * 'Reliable Write' bit here.
541 sbc
.arg
= data
.blocks
| (idata
->ic
.write_flag
& BIT(31));
542 sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
546 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_SANITIZE_START
) &&
547 (cmd
.opcode
== MMC_SWITCH
))
548 return mmc_sanitize(card
, idata
->ic
.cmd_timeout_ms
);
550 /* If it's an R1B response we need some more preparations. */
551 busy_timeout_ms
= idata
->ic
.cmd_timeout_ms
? : MMC_BLK_TIMEOUT_MS
;
552 r1b_resp
= (cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
;
554 use_r1b_resp
= mmc_prepare_busy_cmd(card
->host
, &cmd
,
557 mmc_wait_for_req(card
->host
, &mrq
);
558 memcpy(&idata
->ic
.response
, cmd
.resp
, sizeof(cmd
.resp
));
561 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
562 __func__
, cmd
.error
);
566 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
567 __func__
, data
.error
);
572 * Make sure the cache of the PARTITION_CONFIG register and
573 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
574 * changed it successfully.
576 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_PART_CONFIG
) &&
577 (cmd
.opcode
== MMC_SWITCH
)) {
578 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
579 u8 value
= MMC_EXTRACT_VALUE_FROM_ARG(cmd
.arg
);
582 * Update cache so the next mmc_blk_part_switch call operates
583 * on up-to-date data.
585 card
->ext_csd
.part_config
= value
;
586 main_md
->part_curr
= value
& EXT_CSD_PART_CONFIG_ACC_MASK
;
590 * Make sure to update CACHE_CTRL in case it was changed. The cache
591 * will get turned back on if the card is re-initialized, e.g.
592 * suspend/resume or hw reset in recovery.
594 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_CACHE_CTRL
) &&
595 (cmd
.opcode
== MMC_SWITCH
)) {
596 u8 value
= MMC_EXTRACT_VALUE_FROM_ARG(cmd
.arg
) & 1;
598 card
->ext_csd
.cache_ctrl
= value
;
602 * According to the SD specs, some commands require a delay after
603 * issuing the command.
605 if (idata
->ic
.postsleep_min_us
)
606 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
608 /* No need to poll when using HW busy detection. */
609 if ((card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) && use_r1b_resp
)
612 if (mmc_host_is_spi(card
->host
)) {
613 if (idata
->ic
.write_flag
|| r1b_resp
|| cmd
.flags
& MMC_RSP_SPI_BUSY
)
614 return mmc_spi_err_check(card
);
617 /* Ensure RPMB/R1B command has completed by polling with CMD13. */
618 if (idata
->rpmb
|| r1b_resp
)
619 err
= mmc_poll_for_busy(card
, busy_timeout_ms
, false,
625 static int mmc_blk_ioctl_cmd(struct mmc_blk_data
*md
,
626 struct mmc_ioc_cmd __user
*ic_ptr
,
627 struct mmc_rpmb_data
*rpmb
)
629 struct mmc_blk_ioc_data
*idata
;
630 struct mmc_blk_ioc_data
*idatas
[1];
631 struct mmc_queue
*mq
;
632 struct mmc_card
*card
;
633 int err
= 0, ioc_err
= 0;
636 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
638 return PTR_ERR(idata
);
639 /* This will be NULL on non-RPMB ioctl():s */
642 card
= md
->queue
.card
;
649 * Dispatch the ioctl() into the block request queue.
652 req
= blk_mq_alloc_request(mq
->queue
,
653 idata
->ic
.write_flag
? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
659 req_to_mmc_queue_req(req
)->drv_op
=
660 rpmb
? MMC_DRV_OP_IOCTL_RPMB
: MMC_DRV_OP_IOCTL
;
661 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
662 req_to_mmc_queue_req(req
)->drv_op_data
= idatas
;
663 req_to_mmc_queue_req(req
)->ioc_count
= 1;
664 blk_execute_rq(req
, false);
665 ioc_err
= req_to_mmc_queue_req(req
)->drv_op_result
;
666 err
= mmc_blk_ioctl_copy_to_user(ic_ptr
, idata
);
667 blk_mq_free_request(req
);
672 return ioc_err
? ioc_err
: err
;
675 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data
*md
,
676 struct mmc_ioc_multi_cmd __user
*user
,
677 struct mmc_rpmb_data
*rpmb
)
679 struct mmc_blk_ioc_data
**idata
= NULL
;
680 struct mmc_ioc_cmd __user
*cmds
= user
->cmds
;
681 struct mmc_card
*card
;
682 struct mmc_queue
*mq
;
683 int err
= 0, ioc_err
= 0;
688 if (copy_from_user(&num_of_cmds
, &user
->num_of_cmds
,
689 sizeof(num_of_cmds
)))
695 if (num_of_cmds
> MMC_IOC_MAX_CMDS
)
699 idata
= kcalloc(n
, sizeof(*idata
), GFP_KERNEL
);
703 for (i
= 0; i
< n
; i
++) {
704 idata
[i
] = mmc_blk_ioctl_copy_from_user(&cmds
[i
]);
705 if (IS_ERR(idata
[i
])) {
706 err
= PTR_ERR(idata
[i
]);
710 /* This will be NULL on non-RPMB ioctl():s */
711 idata
[i
]->rpmb
= rpmb
;
714 card
= md
->queue
.card
;
722 * Dispatch the ioctl()s into the block request queue.
725 req
= blk_mq_alloc_request(mq
->queue
,
726 idata
[0]->ic
.write_flag
? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
731 req_to_mmc_queue_req(req
)->drv_op
=
732 rpmb
? MMC_DRV_OP_IOCTL_RPMB
: MMC_DRV_OP_IOCTL
;
733 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
734 req_to_mmc_queue_req(req
)->drv_op_data
= idata
;
735 req_to_mmc_queue_req(req
)->ioc_count
= n
;
736 blk_execute_rq(req
, false);
737 ioc_err
= req_to_mmc_queue_req(req
)->drv_op_result
;
739 /* copy to user if data and response */
740 for (i
= 0; i
< n
&& !err
; i
++)
741 err
= mmc_blk_ioctl_copy_to_user(&cmds
[i
], idata
[i
]);
743 blk_mq_free_request(req
);
746 for (i
= 0; i
< n
; i
++) {
747 kfree(idata
[i
]->buf
);
751 return ioc_err
? ioc_err
: err
;
754 static int mmc_blk_check_blkdev(struct block_device
*bdev
)
757 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
758 * whole block device, not on a partition. This prevents overspray
759 * between sibling partitions.
761 if (!capable(CAP_SYS_RAWIO
) || bdev_is_partition(bdev
))
766 static int mmc_blk_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
767 unsigned int cmd
, unsigned long arg
)
769 struct mmc_blk_data
*md
;
774 ret
= mmc_blk_check_blkdev(bdev
);
777 md
= mmc_blk_get(bdev
->bd_disk
);
780 ret
= mmc_blk_ioctl_cmd(md
,
781 (struct mmc_ioc_cmd __user
*)arg
,
785 case MMC_IOC_MULTI_CMD
:
786 ret
= mmc_blk_check_blkdev(bdev
);
789 md
= mmc_blk_get(bdev
->bd_disk
);
792 ret
= mmc_blk_ioctl_multi_cmd(md
,
793 (struct mmc_ioc_multi_cmd __user
*)arg
,
803 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
804 unsigned int cmd
, unsigned long arg
)
806 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
810 static int mmc_blk_alternative_gpt_sector(struct gendisk
*disk
,
813 struct mmc_blk_data
*md
;
816 md
= mmc_blk_get(disk
);
821 ret
= mmc_card_alternative_gpt_sector(md
->queue
.card
, sector
);
830 static const struct block_device_operations mmc_bdops
= {
831 .open
= mmc_blk_open
,
832 .release
= mmc_blk_release
,
833 .getgeo
= mmc_blk_getgeo
,
834 .owner
= THIS_MODULE
,
835 .ioctl
= mmc_blk_ioctl
,
837 .compat_ioctl
= mmc_blk_compat_ioctl
,
839 .alternative_gpt_sector
= mmc_blk_alternative_gpt_sector
,
842 static int mmc_blk_part_switch_pre(struct mmc_card
*card
,
843 unsigned int part_type
)
847 if (part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
) {
848 if (card
->ext_csd
.cmdq_en
) {
849 ret
= mmc_cmdq_disable(card
);
853 mmc_retune_pause(card
->host
);
859 static int mmc_blk_part_switch_post(struct mmc_card
*card
,
860 unsigned int part_type
)
864 if (part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
) {
865 mmc_retune_unpause(card
->host
);
866 if (card
->reenable_cmdq
&& !card
->ext_csd
.cmdq_en
)
867 ret
= mmc_cmdq_enable(card
);
873 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
874 unsigned int part_type
)
877 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
879 if (main_md
->part_curr
== part_type
)
882 if (mmc_card_mmc(card
)) {
883 u8 part_config
= card
->ext_csd
.part_config
;
885 ret
= mmc_blk_part_switch_pre(card
, part_type
);
889 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
890 part_config
|= part_type
;
892 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
893 EXT_CSD_PART_CONFIG
, part_config
,
894 card
->ext_csd
.part_time
);
896 mmc_blk_part_switch_post(card
, part_type
);
900 card
->ext_csd
.part_config
= part_config
;
902 ret
= mmc_blk_part_switch_post(card
, main_md
->part_curr
);
905 main_md
->part_curr
= part_type
;
909 static int mmc_sd_num_wr_blocks(struct mmc_card
*card
, u32
*written_blocks
)
915 struct mmc_request mrq
= {};
916 struct mmc_command cmd
= {};
917 struct mmc_data data
= {};
919 struct scatterlist sg
;
921 err
= mmc_app_cmd(card
->host
, card
);
925 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
927 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
931 data
.flags
= MMC_DATA_READ
;
934 mmc_set_data_timeout(&data
, card
);
939 blocks
= kmalloc(4, GFP_KERNEL
);
943 sg_init_one(&sg
, blocks
, 4);
945 mmc_wait_for_req(card
->host
, &mrq
);
947 result
= ntohl(*blocks
);
950 if (cmd
.error
|| data
.error
)
953 *written_blocks
= result
;
958 static unsigned int mmc_blk_clock_khz(struct mmc_host
*host
)
960 if (host
->actual_clock
)
961 return host
->actual_clock
/ 1000;
963 /* Clock may be subject to a divisor, fudge it by a factor of 2. */
965 return host
->ios
.clock
/ 2000;
967 /* How can there be no clock */
969 return 100; /* 100 kHz is minimum possible value */
972 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host
*host
,
973 struct mmc_data
*data
)
975 unsigned int ms
= DIV_ROUND_UP(data
->timeout_ns
, 1000000);
978 if (data
->timeout_clks
) {
979 khz
= mmc_blk_clock_khz(host
);
980 ms
+= DIV_ROUND_UP(data
->timeout_clks
, khz
);
987 * Attempts to reset the card and get back to the requested partition.
988 * Therefore any error here must result in cancelling the block layer
989 * request, it must not be reattempted without going through the mmc_blk
990 * partition sanity checks.
992 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
996 struct mmc_blk_data
*main_md
= dev_get_drvdata(&host
->card
->dev
);
998 if (md
->reset_done
& type
)
1001 md
->reset_done
|= type
;
1002 err
= mmc_hw_reset(host
->card
);
1004 * A successful reset will leave the card in the main partition, but
1005 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
1008 main_md
->part_curr
= err
? MMC_BLK_PART_INVALID
: main_md
->part_type
;
1011 /* Ensure we switch back to the correct partition */
1012 if (mmc_blk_part_switch(host
->card
, md
->part_type
))
1014 * We have failed to get back into the correct
1015 * partition, so we need to abort the whole request.
1021 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
1023 md
->reset_done
&= ~type
;
1027 * The non-block commands come back from the block layer after it queued it and
1028 * processed it with all other requests and then they get issued in this
1031 static void mmc_blk_issue_drv_op(struct mmc_queue
*mq
, struct request
*req
)
1033 struct mmc_queue_req
*mq_rq
;
1034 struct mmc_card
*card
= mq
->card
;
1035 struct mmc_blk_data
*md
= mq
->blkdata
;
1036 struct mmc_blk_ioc_data
**idata
;
1043 mq_rq
= req_to_mmc_queue_req(req
);
1044 rpmb_ioctl
= (mq_rq
->drv_op
== MMC_DRV_OP_IOCTL_RPMB
);
1046 switch (mq_rq
->drv_op
) {
1047 case MMC_DRV_OP_IOCTL
:
1048 if (card
->ext_csd
.cmdq_en
) {
1049 ret
= mmc_cmdq_disable(card
);
1054 case MMC_DRV_OP_IOCTL_RPMB
:
1055 idata
= mq_rq
->drv_op_data
;
1056 for (i
= 0, ret
= 0; i
< mq_rq
->ioc_count
; i
++) {
1057 ret
= __mmc_blk_ioctl_cmd(card
, md
, idata
[i
]);
1061 /* Always switch back to main area after RPMB access */
1063 mmc_blk_part_switch(card
, 0);
1064 else if (card
->reenable_cmdq
&& !card
->ext_csd
.cmdq_en
)
1065 mmc_cmdq_enable(card
);
1067 case MMC_DRV_OP_BOOT_WP
:
1068 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
1069 card
->ext_csd
.boot_ro_lock
|
1070 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
1071 card
->ext_csd
.part_time
);
1073 pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1074 md
->disk
->disk_name
, ret
);
1076 card
->ext_csd
.boot_ro_lock
|=
1077 EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
1079 case MMC_DRV_OP_GET_CARD_STATUS
:
1080 ret
= mmc_send_status(card
, &status
);
1084 case MMC_DRV_OP_GET_EXT_CSD
:
1085 ext_csd
= mq_rq
->drv_op_data
;
1086 ret
= mmc_get_ext_csd(card
, ext_csd
);
1089 pr_err("%s: unknown driver specific operation\n",
1090 md
->disk
->disk_name
);
1094 mq_rq
->drv_op_result
= ret
;
1095 blk_mq_end_request(req
, ret
? BLK_STS_IOERR
: BLK_STS_OK
);
1098 static void mmc_blk_issue_erase_rq(struct mmc_queue
*mq
, struct request
*req
,
1099 int type
, unsigned int erase_arg
)
1101 struct mmc_blk_data
*md
= mq
->blkdata
;
1102 struct mmc_card
*card
= md
->queue
.card
;
1103 unsigned int from
, nr
;
1105 blk_status_t status
= BLK_STS_OK
;
1107 if (!mmc_can_erase(card
)) {
1108 status
= BLK_STS_NOTSUPP
;
1112 from
= blk_rq_pos(req
);
1113 nr
= blk_rq_sectors(req
);
1117 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1118 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1119 INAND_CMD38_ARG_EXT_CSD
,
1120 erase_arg
== MMC_TRIM_ARG
?
1121 INAND_CMD38_ARG_TRIM
:
1122 INAND_CMD38_ARG_ERASE
,
1123 card
->ext_csd
.generic_cmd6_time
);
1126 err
= mmc_erase(card
, from
, nr
, erase_arg
);
1127 } while (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
));
1129 status
= BLK_STS_IOERR
;
1131 mmc_blk_reset_success(md
, type
);
1133 blk_mq_end_request(req
, status
);
1136 static void mmc_blk_issue_trim_rq(struct mmc_queue
*mq
, struct request
*req
)
1138 mmc_blk_issue_erase_rq(mq
, req
, MMC_BLK_TRIM
, MMC_TRIM_ARG
);
1141 static void mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
1143 struct mmc_blk_data
*md
= mq
->blkdata
;
1144 struct mmc_card
*card
= md
->queue
.card
;
1145 unsigned int arg
= card
->erase_arg
;
1147 if (mmc_card_broken_sd_discard(card
))
1150 mmc_blk_issue_erase_rq(mq
, req
, MMC_BLK_DISCARD
, arg
);
1153 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
1154 struct request
*req
)
1156 struct mmc_blk_data
*md
= mq
->blkdata
;
1157 struct mmc_card
*card
= md
->queue
.card
;
1158 unsigned int from
, nr
, arg
;
1159 int err
= 0, type
= MMC_BLK_SECDISCARD
;
1160 blk_status_t status
= BLK_STS_OK
;
1162 if (!(mmc_can_secure_erase_trim(card
))) {
1163 status
= BLK_STS_NOTSUPP
;
1167 from
= blk_rq_pos(req
);
1168 nr
= blk_rq_sectors(req
);
1170 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
1171 arg
= MMC_SECURE_TRIM1_ARG
;
1173 arg
= MMC_SECURE_ERASE_ARG
;
1176 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1177 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1178 INAND_CMD38_ARG_EXT_CSD
,
1179 arg
== MMC_SECURE_TRIM1_ARG
?
1180 INAND_CMD38_ARG_SECTRIM1
:
1181 INAND_CMD38_ARG_SECERASE
,
1182 card
->ext_csd
.generic_cmd6_time
);
1187 err
= mmc_erase(card
, from
, nr
, arg
);
1191 status
= BLK_STS_IOERR
;
1195 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1196 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1197 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1198 INAND_CMD38_ARG_EXT_CSD
,
1199 INAND_CMD38_ARG_SECTRIM2
,
1200 card
->ext_csd
.generic_cmd6_time
);
1205 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1209 status
= BLK_STS_IOERR
;
1215 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1218 mmc_blk_reset_success(md
, type
);
1220 blk_mq_end_request(req
, status
);
1223 static void mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1225 struct mmc_blk_data
*md
= mq
->blkdata
;
1226 struct mmc_card
*card
= md
->queue
.card
;
1229 ret
= mmc_flush_cache(card
->host
);
1230 blk_mq_end_request(req
, ret
? BLK_STS_IOERR
: BLK_STS_OK
);
1234 * Reformat current write as a reliable write, supporting
1235 * both legacy and the enhanced reliable write MMC cards.
1236 * In each transfer we'll handle only as much as a single
1237 * reliable write can handle, thus finish the request in
1238 * partial completions.
1240 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1241 struct mmc_card
*card
,
1242 struct request
*req
)
1244 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1245 /* Legacy mode imposes restrictions on transfers. */
1246 if (!IS_ALIGNED(blk_rq_pos(req
), card
->ext_csd
.rel_sectors
))
1247 brq
->data
.blocks
= 1;
1249 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1250 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1251 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1252 brq
->data
.blocks
= 1;
1256 #define CMD_ERRORS_EXCL_OOR \
1257 (R1_ADDRESS_ERROR | /* Misaligned address */ \
1258 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1259 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1260 R1_CARD_ECC_FAILED | /* Card ECC failed */ \
1261 R1_CC_ERROR | /* Card controller error */ \
1262 R1_ERROR) /* General/unknown error */
1264 #define CMD_ERRORS \
1265 (CMD_ERRORS_EXCL_OOR | \
1266 R1_OUT_OF_RANGE) /* Command argument out of range */ \
1268 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1273 * Per the SD specification(physical layer version 4.10)[1],
1274 * section 4.3.3, it explicitly states that "When the last
1275 * block of user area is read using CMD18, the host should
1276 * ignore OUT_OF_RANGE error that may occur even the sequence
1277 * is correct". And JESD84-B51 for eMMC also has a similar
1278 * statement on section 6.8.3.
1280 * Multiple block read/write could be done by either predefined
1281 * method, namely CMD23, or open-ending mode. For open-ending mode,
1282 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1284 * However the spec[1] doesn't tell us whether we should also
1285 * ignore that for predefined method. But per the spec[1], section
1286 * 4.15 Set Block Count Command, it says"If illegal block count
1287 * is set, out of range error will be indicated during read/write
1288 * operation (For example, data transfer is stopped at user area
1289 * boundary)." In another word, we could expect a out of range error
1290 * in the response for the following CMD18/25. And if argument of
1291 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1292 * we could also expect to get a -ETIMEDOUT or any error number from
1293 * the host drivers due to missing data response(for write)/data(for
1294 * read), as the cards will stop the data transfer by itself per the
1295 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1298 if (!brq
->stop
.error
) {
1299 bool oor_with_open_end
;
1300 /* If there is no error yet, check R1 response */
1302 val
= brq
->stop
.resp
[0] & CMD_ERRORS
;
1303 oor_with_open_end
= val
& R1_OUT_OF_RANGE
&& !brq
->mrq
.sbc
;
1305 if (val
&& !oor_with_open_end
)
1306 brq
->stop
.error
= -EIO
;
1310 static void mmc_blk_data_prep(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
,
1311 int recovery_mode
, bool *do_rel_wr_p
,
1312 bool *do_data_tag_p
)
1314 struct mmc_blk_data
*md
= mq
->blkdata
;
1315 struct mmc_card
*card
= md
->queue
.card
;
1316 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1317 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1318 bool do_rel_wr
, do_data_tag
;
1321 * Reliable writes are used to implement Forced Unit Access and
1322 * are supported only on MMCs.
1324 do_rel_wr
= (req
->cmd_flags
& REQ_FUA
) &&
1325 rq_data_dir(req
) == WRITE
&&
1326 (md
->flags
& MMC_BLK_REL_WR
);
1328 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1330 mmc_crypto_prepare_req(mqrq
);
1332 brq
->mrq
.data
= &brq
->data
;
1333 brq
->mrq
.tag
= req
->tag
;
1335 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1338 if (rq_data_dir(req
) == READ
) {
1339 brq
->data
.flags
= MMC_DATA_READ
;
1340 brq
->stop
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1342 brq
->data
.flags
= MMC_DATA_WRITE
;
1343 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1346 brq
->data
.blksz
= 512;
1347 brq
->data
.blocks
= blk_rq_sectors(req
);
1348 brq
->data
.blk_addr
= blk_rq_pos(req
);
1351 * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1352 * The eMMC will give "high" priority tasks priority over "simple"
1353 * priority tasks. Here we always set "simple" priority by not setting
1358 * The block layer doesn't support all sector count
1359 * restrictions, so we need to be prepared for too big
1362 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1363 brq
->data
.blocks
= card
->host
->max_blk_count
;
1365 if (brq
->data
.blocks
> 1) {
1367 * Some SD cards in SPI mode return a CRC error or even lock up
1368 * completely when trying to read the last block using a
1369 * multiblock read command.
1371 if (mmc_host_is_spi(card
->host
) && (rq_data_dir(req
) == READ
) &&
1372 (blk_rq_pos(req
) + blk_rq_sectors(req
) ==
1373 get_capacity(md
->disk
)))
1377 * After a read error, we redo the request one (native) sector
1378 * at a time in order to accurately determine which
1379 * sectors can be read successfully.
1382 brq
->data
.blocks
= queue_physical_block_size(mq
->queue
) >> 9;
1385 * Some controllers have HW issues while operating
1386 * in multiple I/O mode
1388 if (card
->host
->ops
->multi_io_quirk
)
1389 brq
->data
.blocks
= card
->host
->ops
->multi_io_quirk(card
,
1390 (rq_data_dir(req
) == READ
) ?
1391 MMC_DATA_READ
: MMC_DATA_WRITE
,
1396 mmc_apply_rel_rw(brq
, card
, req
);
1397 brq
->data
.flags
|= MMC_DATA_REL_WR
;
1401 * Data tag is used only during writing meta data to speed
1402 * up write and any subsequent read of this meta data
1404 do_data_tag
= card
->ext_csd
.data_tag_unit_size
&&
1405 (req
->cmd_flags
& REQ_META
) &&
1406 (rq_data_dir(req
) == WRITE
) &&
1407 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1408 card
->ext_csd
.data_tag_unit_size
);
1411 brq
->data
.flags
|= MMC_DATA_DAT_TAG
;
1413 mmc_set_data_timeout(&brq
->data
, card
);
1415 brq
->data
.sg
= mqrq
->sg
;
1416 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1419 * Adjust the sg list so it is the same size as the
1422 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1423 int i
, data_size
= brq
->data
.blocks
<< 9;
1424 struct scatterlist
*sg
;
1426 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1427 data_size
-= sg
->length
;
1428 if (data_size
<= 0) {
1429 sg
->length
+= data_size
;
1434 brq
->data
.sg_len
= i
;
1438 *do_rel_wr_p
= do_rel_wr
;
1441 *do_data_tag_p
= do_data_tag
;
1444 #define MMC_CQE_RETRIES 2
1446 static void mmc_blk_cqe_complete_rq(struct mmc_queue
*mq
, struct request
*req
)
1448 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1449 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
1450 struct request_queue
*q
= req
->q
;
1451 struct mmc_host
*host
= mq
->card
->host
;
1452 enum mmc_issue_type issue_type
= mmc_issue_type(mq
, req
);
1453 unsigned long flags
;
1457 mmc_cqe_post_req(host
, mrq
);
1459 if (mrq
->cmd
&& mrq
->cmd
->error
)
1460 err
= mrq
->cmd
->error
;
1461 else if (mrq
->data
&& mrq
->data
->error
)
1462 err
= mrq
->data
->error
;
1467 if (mqrq
->retries
++ < MMC_CQE_RETRIES
)
1468 blk_mq_requeue_request(req
, true);
1470 blk_mq_end_request(req
, BLK_STS_IOERR
);
1471 } else if (mrq
->data
) {
1472 if (blk_update_request(req
, BLK_STS_OK
, mrq
->data
->bytes_xfered
))
1473 blk_mq_requeue_request(req
, true);
1475 __blk_mq_end_request(req
, BLK_STS_OK
);
1477 blk_mq_end_request(req
, BLK_STS_OK
);
1480 spin_lock_irqsave(&mq
->lock
, flags
);
1482 mq
->in_flight
[issue_type
] -= 1;
1484 put_card
= (mmc_tot_in_flight(mq
) == 0);
1486 mmc_cqe_check_busy(mq
);
1488 spin_unlock_irqrestore(&mq
->lock
, flags
);
1491 blk_mq_run_hw_queues(q
, true);
1494 mmc_put_card(mq
->card
, &mq
->ctx
);
1497 void mmc_blk_cqe_recovery(struct mmc_queue
*mq
)
1499 struct mmc_card
*card
= mq
->card
;
1500 struct mmc_host
*host
= card
->host
;
1503 pr_debug("%s: CQE recovery start\n", mmc_hostname(host
));
1505 err
= mmc_cqe_recovery(host
);
1507 mmc_blk_reset(mq
->blkdata
, host
, MMC_BLK_CQE_RECOVERY
);
1508 mmc_blk_reset_success(mq
->blkdata
, MMC_BLK_CQE_RECOVERY
);
1510 pr_debug("%s: CQE recovery done\n", mmc_hostname(host
));
1513 static void mmc_blk_cqe_req_done(struct mmc_request
*mrq
)
1515 struct mmc_queue_req
*mqrq
= container_of(mrq
, struct mmc_queue_req
,
1517 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1518 struct request_queue
*q
= req
->q
;
1519 struct mmc_queue
*mq
= q
->queuedata
;
1522 * Block layer timeouts race with completions which means the normal
1523 * completion path cannot be used during recovery.
1525 if (mq
->in_recovery
)
1526 mmc_blk_cqe_complete_rq(mq
, req
);
1527 else if (likely(!blk_should_fake_timeout(req
->q
)))
1528 blk_mq_complete_request(req
);
1531 static int mmc_blk_cqe_start_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
1533 mrq
->done
= mmc_blk_cqe_req_done
;
1534 mrq
->recovery_notifier
= mmc_cqe_recovery_notifier
;
1536 return mmc_cqe_start_req(host
, mrq
);
1539 static struct mmc_request
*mmc_blk_cqe_prep_dcmd(struct mmc_queue_req
*mqrq
,
1540 struct request
*req
)
1542 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1544 memset(brq
, 0, sizeof(*brq
));
1546 brq
->mrq
.cmd
= &brq
->cmd
;
1547 brq
->mrq
.tag
= req
->tag
;
1552 static int mmc_blk_cqe_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1554 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1555 struct mmc_request
*mrq
= mmc_blk_cqe_prep_dcmd(mqrq
, req
);
1557 mrq
->cmd
->opcode
= MMC_SWITCH
;
1558 mrq
->cmd
->arg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
1559 (EXT_CSD_FLUSH_CACHE
<< 16) |
1561 EXT_CSD_CMD_SET_NORMAL
;
1562 mrq
->cmd
->flags
= MMC_CMD_AC
| MMC_RSP_R1B
;
1564 return mmc_blk_cqe_start_req(mq
->card
->host
, mrq
);
1567 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue
*mq
, struct request
*req
)
1569 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1570 struct mmc_host
*host
= mq
->card
->host
;
1573 mmc_blk_rw_rq_prep(mqrq
, mq
->card
, 0, mq
);
1574 mqrq
->brq
.mrq
.done
= mmc_blk_hsq_req_done
;
1575 mmc_pre_req(host
, &mqrq
->brq
.mrq
);
1577 err
= mmc_cqe_start_req(host
, &mqrq
->brq
.mrq
);
1579 mmc_post_req(host
, &mqrq
->brq
.mrq
, err
);
1584 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue
*mq
, struct request
*req
)
1586 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1587 struct mmc_host
*host
= mq
->card
->host
;
1589 if (host
->hsq_enabled
)
1590 return mmc_blk_hsq_issue_rw_rq(mq
, req
);
1592 mmc_blk_data_prep(mq
, mqrq
, 0, NULL
, NULL
);
1594 return mmc_blk_cqe_start_req(mq
->card
->host
, &mqrq
->brq
.mrq
);
1597 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1598 struct mmc_card
*card
,
1600 struct mmc_queue
*mq
)
1602 u32 readcmd
, writecmd
;
1603 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1604 struct request
*req
= mmc_queue_req_to_req(mqrq
);
1605 struct mmc_blk_data
*md
= mq
->blkdata
;
1606 bool do_rel_wr
, do_data_tag
;
1608 mmc_blk_data_prep(mq
, mqrq
, recovery_mode
, &do_rel_wr
, &do_data_tag
);
1610 brq
->mrq
.cmd
= &brq
->cmd
;
1612 brq
->cmd
.arg
= blk_rq_pos(req
);
1613 if (!mmc_card_blockaddr(card
))
1615 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1617 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1618 /* SPI multiblock writes terminate using a special
1619 * token, not a STOP_TRANSMISSION request.
1621 if (!mmc_host_is_spi(card
->host
) ||
1622 rq_data_dir(req
) == READ
)
1623 brq
->mrq
.stop
= &brq
->stop
;
1624 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1625 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1627 brq
->mrq
.stop
= NULL
;
1628 readcmd
= MMC_READ_SINGLE_BLOCK
;
1629 writecmd
= MMC_WRITE_BLOCK
;
1631 brq
->cmd
.opcode
= rq_data_dir(req
) == READ
? readcmd
: writecmd
;
1634 * Pre-defined multi-block transfers are preferable to
1635 * open ended-ones (and necessary for reliable writes).
1636 * However, it is not sufficient to just send CMD23,
1637 * and avoid the final CMD12, as on an error condition
1638 * CMD12 (stop) needs to be sent anyway. This, coupled
1639 * with Auto-CMD23 enhancements provided by some
1640 * hosts, means that the complexity of dealing
1641 * with this is best left to the host. If CMD23 is
1642 * supported by card and host, we'll fill sbc in and let
1643 * the host deal with handling it correctly. This means
1644 * that for hosts that don't expose MMC_CAP_CMD23, no
1645 * change of behavior will be observed.
1647 * N.B: Some MMC cards experience perf degradation.
1648 * We'll avoid using CMD23-bounded multiblock writes for
1649 * these, while retaining features like reliable writes.
1651 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1652 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1654 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1655 brq
->sbc
.arg
= brq
->data
.blocks
|
1656 (do_rel_wr
? (1 << 31) : 0) |
1657 (do_data_tag
? (1 << 29) : 0);
1658 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1659 brq
->mrq
.sbc
= &brq
->sbc
;
1663 #define MMC_MAX_RETRIES 5
1664 #define MMC_DATA_RETRIES 2
1665 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
1667 static int mmc_blk_send_stop(struct mmc_card
*card
, unsigned int timeout
)
1669 struct mmc_command cmd
= {
1670 .opcode
= MMC_STOP_TRANSMISSION
,
1671 .flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
,
1672 /* Some hosts wait for busy anyway, so provide a busy timeout */
1673 .busy_timeout
= timeout
,
1676 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
1679 static int mmc_blk_fix_state(struct mmc_card
*card
, struct request
*req
)
1681 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1682 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1683 unsigned int timeout
= mmc_blk_data_timeout_ms(card
->host
, &brq
->data
);
1686 mmc_retune_hold_now(card
->host
);
1688 mmc_blk_send_stop(card
, timeout
);
1690 err
= mmc_poll_for_busy(card
, timeout
, false, MMC_BUSY_IO
);
1692 mmc_retune_release(card
->host
);
1697 #define MMC_READ_SINGLE_RETRIES 2
1699 /* Single (native) sector read during recovery */
1700 static void mmc_blk_read_single(struct mmc_queue
*mq
, struct request
*req
)
1702 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1703 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
1704 struct mmc_card
*card
= mq
->card
;
1705 struct mmc_host
*host
= card
->host
;
1706 blk_status_t error
= BLK_STS_OK
;
1707 size_t bytes_per_read
= queue_physical_block_size(mq
->queue
);
1714 while (retries
++ <= MMC_READ_SINGLE_RETRIES
) {
1715 mmc_blk_rw_rq_prep(mqrq
, card
, 1, mq
);
1717 mmc_wait_for_req(host
, mrq
);
1719 err
= mmc_send_status(card
, &status
);
1723 if (!mmc_host_is_spi(host
) &&
1724 !mmc_ready_for_data(status
)) {
1725 err
= mmc_blk_fix_state(card
, req
);
1730 if (!mrq
->cmd
->error
)
1734 if (mrq
->cmd
->error
||
1736 (!mmc_host_is_spi(host
) &&
1737 (mrq
->cmd
->resp
[0] & CMD_ERRORS
|| status
& CMD_ERRORS
)))
1738 error
= BLK_STS_IOERR
;
1742 } while (blk_update_request(req
, error
, bytes_per_read
));
1747 mrq
->data
->bytes_xfered
= 0;
1748 blk_update_request(req
, BLK_STS_IOERR
, bytes_per_read
);
1749 /* Let it try the remaining request again */
1750 if (mqrq
->retries
> MMC_MAX_RETRIES
- 1)
1751 mqrq
->retries
= MMC_MAX_RETRIES
- 1;
1754 static inline bool mmc_blk_oor_valid(struct mmc_blk_request
*brq
)
1756 return !!brq
->mrq
.sbc
;
1759 static inline u32
mmc_blk_stop_err_bits(struct mmc_blk_request
*brq
)
1761 return mmc_blk_oor_valid(brq
) ? CMD_ERRORS
: CMD_ERRORS_EXCL_OOR
;
1765 * Check for errors the host controller driver might not have seen such as
1766 * response mode errors or invalid card state.
1768 static bool mmc_blk_status_error(struct request
*req
, u32 status
)
1770 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1771 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1772 struct mmc_queue
*mq
= req
->q
->queuedata
;
1775 if (mmc_host_is_spi(mq
->card
->host
))
1778 stop_err_bits
= mmc_blk_stop_err_bits(brq
);
1780 return brq
->cmd
.resp
[0] & CMD_ERRORS
||
1781 brq
->stop
.resp
[0] & stop_err_bits
||
1782 status
& stop_err_bits
||
1783 (rq_data_dir(req
) == WRITE
&& !mmc_ready_for_data(status
));
1786 static inline bool mmc_blk_cmd_started(struct mmc_blk_request
*brq
)
1788 return !brq
->sbc
.error
&& !brq
->cmd
.error
&&
1789 !(brq
->cmd
.resp
[0] & CMD_ERRORS
);
1793 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
1795 * 1. A request that has transferred at least some data is considered
1796 * successful and will be requeued if there is remaining data to
1798 * 2. Otherwise the number of retries is incremented and the request
1799 * will be requeued if there are remaining retries.
1800 * 3. Otherwise the request will be errored out.
1801 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
1802 * mqrq->retries. So there are only 4 possible actions here:
1803 * 1. do not accept the bytes_xfered value i.e. set it to zero
1804 * 2. change mqrq->retries to determine the number of retries
1805 * 3. try to reset the card
1806 * 4. read one sector at a time
1808 static void mmc_blk_mq_rw_recovery(struct mmc_queue
*mq
, struct request
*req
)
1810 int type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1811 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1812 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1813 struct mmc_blk_data
*md
= mq
->blkdata
;
1814 struct mmc_card
*card
= mq
->card
;
1820 * Some errors the host driver might not have seen. Set the number of
1821 * bytes transferred to zero in that case.
1823 err
= __mmc_send_status(card
, &status
, 0);
1824 if (err
|| mmc_blk_status_error(req
, status
))
1825 brq
->data
.bytes_xfered
= 0;
1827 mmc_retune_release(card
->host
);
1830 * Try again to get the status. This also provides an opportunity for
1834 err
= __mmc_send_status(card
, &status
, 0);
1837 * Nothing more to do after the number of bytes transferred has been
1838 * updated and there is no card.
1840 if (err
&& mmc_detect_card_removed(card
->host
))
1843 /* Try to get back to "tran" state */
1844 if (!mmc_host_is_spi(mq
->card
->host
) &&
1845 (err
|| !mmc_ready_for_data(status
)))
1846 err
= mmc_blk_fix_state(mq
->card
, req
);
1849 * Special case for SD cards where the card might record the number of
1852 if (!err
&& mmc_blk_cmd_started(brq
) && mmc_card_sd(card
) &&
1853 rq_data_dir(req
) == WRITE
) {
1854 if (mmc_sd_num_wr_blocks(card
, &blocks
))
1855 brq
->data
.bytes_xfered
= 0;
1857 brq
->data
.bytes_xfered
= blocks
<< 9;
1860 /* Reset if the card is in a bad state */
1861 if (!mmc_host_is_spi(mq
->card
->host
) &&
1862 err
&& mmc_blk_reset(md
, card
->host
, type
)) {
1863 pr_err("%s: recovery failed!\n", req
->q
->disk
->disk_name
);
1864 mqrq
->retries
= MMC_NO_RETRIES
;
1869 * If anything was done, just return and if there is anything remaining
1870 * on the request it will get requeued.
1872 if (brq
->data
.bytes_xfered
)
1875 /* Reset before last retry */
1876 if (mqrq
->retries
+ 1 == MMC_MAX_RETRIES
&&
1877 mmc_blk_reset(md
, card
->host
, type
))
1880 /* Command errors fail fast, so use all MMC_MAX_RETRIES */
1881 if (brq
->sbc
.error
|| brq
->cmd
.error
)
1884 /* Reduce the remaining retries for data errors */
1885 if (mqrq
->retries
< MMC_MAX_RETRIES
- MMC_DATA_RETRIES
) {
1886 mqrq
->retries
= MMC_MAX_RETRIES
- MMC_DATA_RETRIES
;
1890 if (rq_data_dir(req
) == READ
&& brq
->data
.blocks
>
1891 queue_physical_block_size(mq
->queue
) >> 9) {
1892 /* Read one (native) sector at a time */
1893 mmc_blk_read_single(mq
, req
);
1898 static inline bool mmc_blk_rq_error(struct mmc_blk_request
*brq
)
1900 mmc_blk_eval_resp_error(brq
);
1902 return brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1903 brq
->data
.error
|| brq
->cmd
.resp
[0] & CMD_ERRORS
;
1906 static int mmc_spi_err_check(struct mmc_card
*card
)
1912 * SPI does not have a TRAN state we have to wait on, instead the
1913 * card is ready again when it no longer holds the line LOW.
1914 * We still have to ensure two things here before we know the write
1916 * 1. The card has not disconnected during busy and we actually read our
1917 * own pull-up, thinking it was still connected, so ensure it
1919 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
1920 * just reconnected card after being disconnected during busy.
1922 err
= __mmc_send_status(card
, &status
, 0);
1925 /* All R1 and R2 bits of SPI are errors in our case */
1931 static int mmc_blk_busy_cb(void *cb_data
, bool *busy
)
1933 struct mmc_blk_busy_data
*data
= cb_data
;
1937 err
= mmc_send_status(data
->card
, &status
);
1941 /* Accumulate response error bits. */
1942 data
->status
|= status
;
1944 *busy
= !mmc_ready_for_data(status
);
1948 static int mmc_blk_card_busy(struct mmc_card
*card
, struct request
*req
)
1950 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1951 struct mmc_blk_busy_data cb_data
;
1954 if (rq_data_dir(req
) == READ
)
1957 if (mmc_host_is_spi(card
->host
)) {
1958 err
= mmc_spi_err_check(card
);
1960 mqrq
->brq
.data
.bytes_xfered
= 0;
1964 cb_data
.card
= card
;
1966 err
= __mmc_poll_for_busy(card
->host
, 0, MMC_BLK_TIMEOUT_MS
,
1967 &mmc_blk_busy_cb
, &cb_data
);
1970 * Do not assume data transferred correctly if there are any error bits
1973 if (cb_data
.status
& mmc_blk_stop_err_bits(&mqrq
->brq
)) {
1974 mqrq
->brq
.data
.bytes_xfered
= 0;
1975 err
= err
? err
: -EIO
;
1978 /* Copy the exception bit so it will be seen later on */
1979 if (mmc_card_mmc(card
) && cb_data
.status
& R1_EXCEPTION_EVENT
)
1980 mqrq
->brq
.cmd
.resp
[0] |= R1_EXCEPTION_EVENT
;
1985 static inline void mmc_blk_rw_reset_success(struct mmc_queue
*mq
,
1986 struct request
*req
)
1988 int type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1990 mmc_blk_reset_success(mq
->blkdata
, type
);
1993 static void mmc_blk_mq_complete_rq(struct mmc_queue
*mq
, struct request
*req
)
1995 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
1996 unsigned int nr_bytes
= mqrq
->brq
.data
.bytes_xfered
;
1999 if (blk_update_request(req
, BLK_STS_OK
, nr_bytes
))
2000 blk_mq_requeue_request(req
, true);
2002 __blk_mq_end_request(req
, BLK_STS_OK
);
2003 } else if (!blk_rq_bytes(req
)) {
2004 __blk_mq_end_request(req
, BLK_STS_IOERR
);
2005 } else if (mqrq
->retries
++ < MMC_MAX_RETRIES
) {
2006 blk_mq_requeue_request(req
, true);
2008 if (mmc_card_removed(mq
->card
))
2009 req
->rq_flags
|= RQF_QUIET
;
2010 blk_mq_end_request(req
, BLK_STS_IOERR
);
2014 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue
*mq
,
2015 struct mmc_queue_req
*mqrq
)
2017 return mmc_card_mmc(mq
->card
) && !mmc_host_is_spi(mq
->card
->host
) &&
2018 (mqrq
->brq
.cmd
.resp
[0] & R1_EXCEPTION_EVENT
||
2019 mqrq
->brq
.stop
.resp
[0] & R1_EXCEPTION_EVENT
);
2022 static void mmc_blk_urgent_bkops(struct mmc_queue
*mq
,
2023 struct mmc_queue_req
*mqrq
)
2025 if (mmc_blk_urgent_bkops_needed(mq
, mqrq
))
2026 mmc_run_bkops(mq
->card
);
2029 static void mmc_blk_hsq_req_done(struct mmc_request
*mrq
)
2031 struct mmc_queue_req
*mqrq
=
2032 container_of(mrq
, struct mmc_queue_req
, brq
.mrq
);
2033 struct request
*req
= mmc_queue_req_to_req(mqrq
);
2034 struct request_queue
*q
= req
->q
;
2035 struct mmc_queue
*mq
= q
->queuedata
;
2036 struct mmc_host
*host
= mq
->card
->host
;
2037 unsigned long flags
;
2039 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2040 mmc_blk_urgent_bkops_needed(mq
, mqrq
)) {
2041 spin_lock_irqsave(&mq
->lock
, flags
);
2042 mq
->recovery_needed
= true;
2043 mq
->recovery_req
= req
;
2044 spin_unlock_irqrestore(&mq
->lock
, flags
);
2046 host
->cqe_ops
->cqe_recovery_start(host
);
2048 schedule_work(&mq
->recovery_work
);
2052 mmc_blk_rw_reset_success(mq
, req
);
2055 * Block layer timeouts race with completions which means the normal
2056 * completion path cannot be used during recovery.
2058 if (mq
->in_recovery
)
2059 mmc_blk_cqe_complete_rq(mq
, req
);
2060 else if (likely(!blk_should_fake_timeout(req
->q
)))
2061 blk_mq_complete_request(req
);
2064 void mmc_blk_mq_complete(struct request
*req
)
2066 struct mmc_queue
*mq
= req
->q
->queuedata
;
2067 struct mmc_host
*host
= mq
->card
->host
;
2069 if (host
->cqe_enabled
)
2070 mmc_blk_cqe_complete_rq(mq
, req
);
2071 else if (likely(!blk_should_fake_timeout(req
->q
)))
2072 mmc_blk_mq_complete_rq(mq
, req
);
2075 static void mmc_blk_mq_poll_completion(struct mmc_queue
*mq
,
2076 struct request
*req
)
2078 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2079 struct mmc_host
*host
= mq
->card
->host
;
2081 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2082 mmc_blk_card_busy(mq
->card
, req
)) {
2083 mmc_blk_mq_rw_recovery(mq
, req
);
2085 mmc_blk_rw_reset_success(mq
, req
);
2086 mmc_retune_release(host
);
2089 mmc_blk_urgent_bkops(mq
, mqrq
);
2092 static void mmc_blk_mq_dec_in_flight(struct mmc_queue
*mq
, enum mmc_issue_type issue_type
)
2094 unsigned long flags
;
2097 spin_lock_irqsave(&mq
->lock
, flags
);
2099 mq
->in_flight
[issue_type
] -= 1;
2101 put_card
= (mmc_tot_in_flight(mq
) == 0);
2103 spin_unlock_irqrestore(&mq
->lock
, flags
);
2106 mmc_put_card(mq
->card
, &mq
->ctx
);
2109 static void mmc_blk_mq_post_req(struct mmc_queue
*mq
, struct request
*req
,
2112 enum mmc_issue_type issue_type
= mmc_issue_type(mq
, req
);
2113 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2114 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
2115 struct mmc_host
*host
= mq
->card
->host
;
2117 mmc_post_req(host
, mrq
, 0);
2120 * Block layer timeouts race with completions which means the normal
2121 * completion path cannot be used during recovery.
2123 if (mq
->in_recovery
) {
2124 mmc_blk_mq_complete_rq(mq
, req
);
2125 } else if (likely(!blk_should_fake_timeout(req
->q
))) {
2127 blk_mq_complete_request_direct(req
, mmc_blk_mq_complete
);
2129 blk_mq_complete_request(req
);
2132 mmc_blk_mq_dec_in_flight(mq
, issue_type
);
2135 void mmc_blk_mq_recovery(struct mmc_queue
*mq
)
2137 struct request
*req
= mq
->recovery_req
;
2138 struct mmc_host
*host
= mq
->card
->host
;
2139 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2141 mq
->recovery_req
= NULL
;
2142 mq
->rw_wait
= false;
2144 if (mmc_blk_rq_error(&mqrq
->brq
)) {
2145 mmc_retune_hold_now(host
);
2146 mmc_blk_mq_rw_recovery(mq
, req
);
2149 mmc_blk_urgent_bkops(mq
, mqrq
);
2151 mmc_blk_mq_post_req(mq
, req
, true);
2154 static void mmc_blk_mq_complete_prev_req(struct mmc_queue
*mq
,
2155 struct request
**prev_req
)
2157 if (mmc_host_done_complete(mq
->card
->host
))
2160 mutex_lock(&mq
->complete_lock
);
2162 if (!mq
->complete_req
)
2165 mmc_blk_mq_poll_completion(mq
, mq
->complete_req
);
2168 *prev_req
= mq
->complete_req
;
2170 mmc_blk_mq_post_req(mq
, mq
->complete_req
, true);
2172 mq
->complete_req
= NULL
;
2175 mutex_unlock(&mq
->complete_lock
);
2178 void mmc_blk_mq_complete_work(struct work_struct
*work
)
2180 struct mmc_queue
*mq
= container_of(work
, struct mmc_queue
,
2183 mmc_blk_mq_complete_prev_req(mq
, NULL
);
2186 static void mmc_blk_mq_req_done(struct mmc_request
*mrq
)
2188 struct mmc_queue_req
*mqrq
= container_of(mrq
, struct mmc_queue_req
,
2190 struct request
*req
= mmc_queue_req_to_req(mqrq
);
2191 struct request_queue
*q
= req
->q
;
2192 struct mmc_queue
*mq
= q
->queuedata
;
2193 struct mmc_host
*host
= mq
->card
->host
;
2194 unsigned long flags
;
2196 if (!mmc_host_done_complete(host
)) {
2200 * We cannot complete the request in this context, so record
2201 * that there is a request to complete, and that a following
2202 * request does not need to wait (although it does need to
2203 * complete complete_req first).
2205 spin_lock_irqsave(&mq
->lock
, flags
);
2206 mq
->complete_req
= req
;
2207 mq
->rw_wait
= false;
2208 waiting
= mq
->waiting
;
2209 spin_unlock_irqrestore(&mq
->lock
, flags
);
2212 * If 'waiting' then the waiting task will complete this
2213 * request, otherwise queue a work to do it. Note that
2214 * complete_work may still race with the dispatch of a following
2220 queue_work(mq
->card
->complete_wq
, &mq
->complete_work
);
2225 /* Take the recovery path for errors or urgent background operations */
2226 if (mmc_blk_rq_error(&mqrq
->brq
) ||
2227 mmc_blk_urgent_bkops_needed(mq
, mqrq
)) {
2228 spin_lock_irqsave(&mq
->lock
, flags
);
2229 mq
->recovery_needed
= true;
2230 mq
->recovery_req
= req
;
2231 spin_unlock_irqrestore(&mq
->lock
, flags
);
2233 schedule_work(&mq
->recovery_work
);
2237 mmc_blk_rw_reset_success(mq
, req
);
2239 mq
->rw_wait
= false;
2242 /* context unknown */
2243 mmc_blk_mq_post_req(mq
, req
, false);
2246 static bool mmc_blk_rw_wait_cond(struct mmc_queue
*mq
, int *err
)
2248 unsigned long flags
;
2252 * Wait while there is another request in progress, but not if recovery
2253 * is needed. Also indicate whether there is a request waiting to start.
2255 spin_lock_irqsave(&mq
->lock
, flags
);
2256 if (mq
->recovery_needed
) {
2260 done
= !mq
->rw_wait
;
2262 mq
->waiting
= !done
;
2263 spin_unlock_irqrestore(&mq
->lock
, flags
);
2268 static int mmc_blk_rw_wait(struct mmc_queue
*mq
, struct request
**prev_req
)
2272 wait_event(mq
->wait
, mmc_blk_rw_wait_cond(mq
, &err
));
2274 /* Always complete the previous request if there is one */
2275 mmc_blk_mq_complete_prev_req(mq
, prev_req
);
2280 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue
*mq
,
2281 struct request
*req
)
2283 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
2284 struct mmc_host
*host
= mq
->card
->host
;
2285 struct request
*prev_req
= NULL
;
2288 mmc_blk_rw_rq_prep(mqrq
, mq
->card
, 0, mq
);
2290 mqrq
->brq
.mrq
.done
= mmc_blk_mq_req_done
;
2292 mmc_pre_req(host
, &mqrq
->brq
.mrq
);
2294 err
= mmc_blk_rw_wait(mq
, &prev_req
);
2300 err
= mmc_start_request(host
, &mqrq
->brq
.mrq
);
2303 mmc_blk_mq_post_req(mq
, prev_req
, true);
2306 mq
->rw_wait
= false;
2308 /* Release re-tuning here where there is no synchronization required */
2309 if (err
|| mmc_host_done_complete(host
))
2310 mmc_retune_release(host
);
2314 mmc_post_req(host
, &mqrq
->brq
.mrq
, err
);
2319 static int mmc_blk_wait_for_idle(struct mmc_queue
*mq
, struct mmc_host
*host
)
2321 if (host
->cqe_enabled
)
2322 return host
->cqe_ops
->cqe_wait_for_idle(host
);
2324 return mmc_blk_rw_wait(mq
, NULL
);
2327 enum mmc_issued
mmc_blk_mq_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
2329 struct mmc_blk_data
*md
= mq
->blkdata
;
2330 struct mmc_card
*card
= md
->queue
.card
;
2331 struct mmc_host
*host
= card
->host
;
2334 ret
= mmc_blk_part_switch(card
, md
->part_type
);
2336 return MMC_REQ_FAILED_TO_START
;
2338 switch (mmc_issue_type(mq
, req
)) {
2339 case MMC_ISSUE_SYNC
:
2340 ret
= mmc_blk_wait_for_idle(mq
, host
);
2342 return MMC_REQ_BUSY
;
2343 switch (req_op(req
)) {
2345 case REQ_OP_DRV_OUT
:
2346 mmc_blk_issue_drv_op(mq
, req
);
2348 case REQ_OP_DISCARD
:
2349 mmc_blk_issue_discard_rq(mq
, req
);
2351 case REQ_OP_SECURE_ERASE
:
2352 mmc_blk_issue_secdiscard_rq(mq
, req
);
2354 case REQ_OP_WRITE_ZEROES
:
2355 mmc_blk_issue_trim_rq(mq
, req
);
2358 mmc_blk_issue_flush(mq
, req
);
2362 return MMC_REQ_FAILED_TO_START
;
2364 return MMC_REQ_FINISHED
;
2365 case MMC_ISSUE_DCMD
:
2366 case MMC_ISSUE_ASYNC
:
2367 switch (req_op(req
)) {
2369 if (!mmc_cache_enabled(host
)) {
2370 blk_mq_end_request(req
, BLK_STS_OK
);
2371 return MMC_REQ_FINISHED
;
2373 ret
= mmc_blk_cqe_issue_flush(mq
, req
);
2377 if (host
->cqe_enabled
)
2378 ret
= mmc_blk_cqe_issue_rw_rq(mq
, req
);
2380 ret
= mmc_blk_mq_issue_rw_rq(mq
, req
);
2387 return MMC_REQ_STARTED
;
2388 return ret
== -EBUSY
? MMC_REQ_BUSY
: MMC_REQ_FAILED_TO_START
;
2391 return MMC_REQ_FAILED_TO_START
;
2395 static inline int mmc_blk_readonly(struct mmc_card
*card
)
2397 return mmc_card_readonly(card
) ||
2398 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
2401 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
2402 struct device
*parent
,
2405 const char *subname
,
2407 unsigned int part_type
)
2409 struct mmc_blk_data
*md
;
2412 bool cache_enabled
= false;
2413 bool fua_enabled
= false;
2415 devidx
= ida_simple_get(&mmc_blk_ida
, 0, max_devices
, GFP_KERNEL
);
2418 * We get -ENOSPC because there are no more any available
2419 * devidx. The reason may be that, either userspace haven't yet
2420 * unmounted the partitions, which postpones mmc_blk_release()
2421 * from being called, or the device has more partitions than
2424 if (devidx
== -ENOSPC
)
2425 dev_err(mmc_dev(card
->host
),
2426 "no more device IDs available\n");
2428 return ERR_PTR(devidx
);
2431 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
2437 md
->area_type
= area_type
;
2440 * Set the read-only status based on the supported commands
2441 * and the write protect switch.
2443 md
->read_only
= mmc_blk_readonly(card
);
2445 md
->disk
= mmc_init_queue(&md
->queue
, card
);
2446 if (IS_ERR(md
->disk
)) {
2447 ret
= PTR_ERR(md
->disk
);
2451 INIT_LIST_HEAD(&md
->part
);
2452 INIT_LIST_HEAD(&md
->rpmbs
);
2453 kref_init(&md
->kref
);
2455 md
->queue
.blkdata
= md
;
2456 md
->part_type
= part_type
;
2458 md
->disk
->major
= MMC_BLOCK_MAJOR
;
2459 md
->disk
->minors
= perdev_minors
;
2460 md
->disk
->first_minor
= devidx
* perdev_minors
;
2461 md
->disk
->fops
= &mmc_bdops
;
2462 md
->disk
->private_data
= md
;
2463 md
->parent
= parent
;
2464 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
2465 if (area_type
& (MMC_BLK_DATA_AREA_RPMB
| MMC_BLK_DATA_AREA_BOOT
))
2466 md
->disk
->flags
|= GENHD_FL_NO_PART
;
2469 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2471 * - be set for removable media with permanent block devices
2472 * - be unset for removable block devices with permanent media
2474 * Since MMC block devices clearly fall under the second
2475 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2476 * should use the block device creation/destruction hotplug
2477 * messages to tell when the card is present.
2480 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
2481 "mmcblk%u%s", card
->host
->index
, subname
? subname
: "");
2483 set_capacity(md
->disk
, size
);
2485 if (mmc_host_cmd23(card
->host
)) {
2486 if ((mmc_card_mmc(card
) &&
2487 card
->csd
.mmca_vsn
>= CSD_SPEC_VER_3
) ||
2488 (mmc_card_sd(card
) &&
2489 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
2490 md
->flags
|= MMC_BLK_CMD23
;
2493 if (md
->flags
& MMC_BLK_CMD23
&&
2494 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
2495 card
->ext_csd
.rel_sectors
)) {
2496 md
->flags
|= MMC_BLK_REL_WR
;
2498 cache_enabled
= true;
2500 if (mmc_cache_enabled(card
->host
))
2501 cache_enabled
= true;
2503 blk_queue_write_cache(md
->queue
.queue
, cache_enabled
, fua_enabled
);
2505 string_get_size((u64
)size
, 512, STRING_UNITS_2
,
2506 cap_str
, sizeof(cap_str
));
2507 pr_info("%s: %s %s %s%s\n",
2508 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
2509 cap_str
, md
->read_only
? " (ro)" : "");
2511 /* used in ->open, must be set before add_disk: */
2512 if (area_type
== MMC_BLK_DATA_AREA_MAIN
)
2513 dev_set_drvdata(&card
->dev
, md
);
2514 ret
= device_add_disk(md
->parent
, md
->disk
, mmc_disk_attr_groups
);
2521 blk_mq_free_tag_set(&md
->queue
.tag_set
);
2525 ida_simple_remove(&mmc_blk_ida
, devidx
);
2526 return ERR_PTR(ret
);
2529 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
2533 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
2535 * The EXT_CSD sector count is in number or 512 byte
2538 size
= card
->ext_csd
.sectors
;
2541 * The CSD capacity field is in units of read_blkbits.
2542 * set_capacity takes units of 512 bytes.
2544 size
= (typeof(sector_t
))card
->csd
.capacity
2545 << (card
->csd
.read_blkbits
- 9);
2548 return mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
2549 MMC_BLK_DATA_AREA_MAIN
, 0);
2552 static int mmc_blk_alloc_part(struct mmc_card
*card
,
2553 struct mmc_blk_data
*md
,
2554 unsigned int part_type
,
2557 const char *subname
,
2560 struct mmc_blk_data
*part_md
;
2562 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
2563 subname
, area_type
, part_type
);
2564 if (IS_ERR(part_md
))
2565 return PTR_ERR(part_md
);
2566 list_add(&part_md
->part
, &md
->part
);
2572 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2573 * @filp: the character device file
2574 * @cmd: the ioctl() command
2575 * @arg: the argument from userspace
2577 * This will essentially just redirect the ioctl()s coming in over to
2578 * the main block device spawning the RPMB character device.
2580 static long mmc_rpmb_ioctl(struct file
*filp
, unsigned int cmd
,
2583 struct mmc_rpmb_data
*rpmb
= filp
->private_data
;
2588 ret
= mmc_blk_ioctl_cmd(rpmb
->md
,
2589 (struct mmc_ioc_cmd __user
*)arg
,
2592 case MMC_IOC_MULTI_CMD
:
2593 ret
= mmc_blk_ioctl_multi_cmd(rpmb
->md
,
2594 (struct mmc_ioc_multi_cmd __user
*)arg
,
2605 #ifdef CONFIG_COMPAT
2606 static long mmc_rpmb_ioctl_compat(struct file
*filp
, unsigned int cmd
,
2609 return mmc_rpmb_ioctl(filp
, cmd
, (unsigned long)compat_ptr(arg
));
2613 static int mmc_rpmb_chrdev_open(struct inode
*inode
, struct file
*filp
)
2615 struct mmc_rpmb_data
*rpmb
= container_of(inode
->i_cdev
,
2616 struct mmc_rpmb_data
, chrdev
);
2618 get_device(&rpmb
->dev
);
2619 filp
->private_data
= rpmb
;
2620 mmc_blk_get(rpmb
->md
->disk
);
2622 return nonseekable_open(inode
, filp
);
2625 static int mmc_rpmb_chrdev_release(struct inode
*inode
, struct file
*filp
)
2627 struct mmc_rpmb_data
*rpmb
= container_of(inode
->i_cdev
,
2628 struct mmc_rpmb_data
, chrdev
);
2630 mmc_blk_put(rpmb
->md
);
2631 put_device(&rpmb
->dev
);
2636 static const struct file_operations mmc_rpmb_fileops
= {
2637 .release
= mmc_rpmb_chrdev_release
,
2638 .open
= mmc_rpmb_chrdev_open
,
2639 .owner
= THIS_MODULE
,
2640 .llseek
= no_llseek
,
2641 .unlocked_ioctl
= mmc_rpmb_ioctl
,
2642 #ifdef CONFIG_COMPAT
2643 .compat_ioctl
= mmc_rpmb_ioctl_compat
,
2647 static void mmc_blk_rpmb_device_release(struct device
*dev
)
2649 struct mmc_rpmb_data
*rpmb
= dev_get_drvdata(dev
);
2651 ida_simple_remove(&mmc_rpmb_ida
, rpmb
->id
);
2655 static int mmc_blk_alloc_rpmb_part(struct mmc_card
*card
,
2656 struct mmc_blk_data
*md
,
2657 unsigned int part_index
,
2659 const char *subname
)
2662 char rpmb_name
[DISK_NAME_LEN
];
2664 struct mmc_rpmb_data
*rpmb
;
2666 /* This creates the minor number for the RPMB char device */
2667 devidx
= ida_simple_get(&mmc_rpmb_ida
, 0, max_devices
, GFP_KERNEL
);
2671 rpmb
= kzalloc(sizeof(*rpmb
), GFP_KERNEL
);
2673 ida_simple_remove(&mmc_rpmb_ida
, devidx
);
2677 snprintf(rpmb_name
, sizeof(rpmb_name
),
2678 "mmcblk%u%s", card
->host
->index
, subname
? subname
: "");
2681 rpmb
->part_index
= part_index
;
2682 rpmb
->dev
.init_name
= rpmb_name
;
2683 rpmb
->dev
.bus
= &mmc_rpmb_bus_type
;
2684 rpmb
->dev
.devt
= MKDEV(MAJOR(mmc_rpmb_devt
), rpmb
->id
);
2685 rpmb
->dev
.parent
= &card
->dev
;
2686 rpmb
->dev
.release
= mmc_blk_rpmb_device_release
;
2687 device_initialize(&rpmb
->dev
);
2688 dev_set_drvdata(&rpmb
->dev
, rpmb
);
2691 cdev_init(&rpmb
->chrdev
, &mmc_rpmb_fileops
);
2692 rpmb
->chrdev
.owner
= THIS_MODULE
;
2693 ret
= cdev_device_add(&rpmb
->chrdev
, &rpmb
->dev
);
2695 pr_err("%s: could not add character device\n", rpmb_name
);
2696 goto out_put_device
;
2699 list_add(&rpmb
->node
, &md
->rpmbs
);
2701 string_get_size((u64
)size
, 512, STRING_UNITS_2
,
2702 cap_str
, sizeof(cap_str
));
2704 pr_info("%s: %s %s %s, chardev (%d:%d)\n",
2705 rpmb_name
, mmc_card_id(card
), mmc_card_name(card
), cap_str
,
2706 MAJOR(mmc_rpmb_devt
), rpmb
->id
);
2711 put_device(&rpmb
->dev
);
2715 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data
*rpmb
)
2718 cdev_device_del(&rpmb
->chrdev
, &rpmb
->dev
);
2719 put_device(&rpmb
->dev
);
2722 /* MMC Physical partitions consist of two boot partitions and
2723 * up to four general purpose partitions.
2724 * For each partition enabled in EXT_CSD a block device will be allocatedi
2725 * to provide access to the partition.
2728 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2732 if (!mmc_card_mmc(card
))
2735 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
2736 if (card
->part
[idx
].area_type
& MMC_BLK_DATA_AREA_RPMB
) {
2738 * RPMB partitions does not provide block access, they
2739 * are only accessed using ioctl():s. Thus create
2740 * special RPMB block devices that do not have a
2741 * backing block queue for these.
2743 ret
= mmc_blk_alloc_rpmb_part(card
, md
,
2744 card
->part
[idx
].part_cfg
,
2745 card
->part
[idx
].size
>> 9,
2746 card
->part
[idx
].name
);
2749 } else if (card
->part
[idx
].size
) {
2750 ret
= mmc_blk_alloc_part(card
, md
,
2751 card
->part
[idx
].part_cfg
,
2752 card
->part
[idx
].size
>> 9,
2753 card
->part
[idx
].force_ro
,
2754 card
->part
[idx
].name
,
2755 card
->part
[idx
].area_type
);
2764 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
2767 * Flush remaining requests and free queues. It is freeing the queue
2768 * that stops new requests from being accepted.
2770 del_gendisk(md
->disk
);
2771 mmc_cleanup_queue(&md
->queue
);
2775 static void mmc_blk_remove_parts(struct mmc_card
*card
,
2776 struct mmc_blk_data
*md
)
2778 struct list_head
*pos
, *q
;
2779 struct mmc_blk_data
*part_md
;
2780 struct mmc_rpmb_data
*rpmb
;
2782 /* Remove RPMB partitions */
2783 list_for_each_safe(pos
, q
, &md
->rpmbs
) {
2784 rpmb
= list_entry(pos
, struct mmc_rpmb_data
, node
);
2786 mmc_blk_remove_rpmb_part(rpmb
);
2788 /* Remove block partitions */
2789 list_for_each_safe(pos
, q
, &md
->part
) {
2790 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
2792 mmc_blk_remove_req(part_md
);
2796 #ifdef CONFIG_DEBUG_FS
2798 static int mmc_dbg_card_status_get(void *data
, u64
*val
)
2800 struct mmc_card
*card
= data
;
2801 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2802 struct mmc_queue
*mq
= &md
->queue
;
2803 struct request
*req
;
2806 /* Ask the block layer about the card status */
2807 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_IN
, 0);
2809 return PTR_ERR(req
);
2810 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_GET_CARD_STATUS
;
2811 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
2812 blk_execute_rq(req
, false);
2813 ret
= req_to_mmc_queue_req(req
)->drv_op_result
;
2818 blk_mq_free_request(req
);
2822 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops
, mmc_dbg_card_status_get
,
2825 /* That is two digits * 512 + 1 for newline */
2826 #define EXT_CSD_STR_LEN 1025
2828 static int mmc_ext_csd_open(struct inode
*inode
, struct file
*filp
)
2830 struct mmc_card
*card
= inode
->i_private
;
2831 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2832 struct mmc_queue
*mq
= &md
->queue
;
2833 struct request
*req
;
2839 buf
= kmalloc(EXT_CSD_STR_LEN
+ 1, GFP_KERNEL
);
2843 /* Ask the block layer for the EXT CSD */
2844 req
= blk_mq_alloc_request(mq
->queue
, REQ_OP_DRV_IN
, 0);
2849 req_to_mmc_queue_req(req
)->drv_op
= MMC_DRV_OP_GET_EXT_CSD
;
2850 req_to_mmc_queue_req(req
)->drv_op_result
= -EIO
;
2851 req_to_mmc_queue_req(req
)->drv_op_data
= &ext_csd
;
2852 blk_execute_rq(req
, false);
2853 err
= req_to_mmc_queue_req(req
)->drv_op_result
;
2854 blk_mq_free_request(req
);
2856 pr_err("FAILED %d\n", err
);
2860 for (i
= 0; i
< 512; i
++)
2861 n
+= sprintf(buf
+ n
, "%02x", ext_csd
[i
]);
2862 n
+= sprintf(buf
+ n
, "\n");
2864 if (n
!= EXT_CSD_STR_LEN
) {
2870 filp
->private_data
= buf
;
2879 static ssize_t
mmc_ext_csd_read(struct file
*filp
, char __user
*ubuf
,
2880 size_t cnt
, loff_t
*ppos
)
2882 char *buf
= filp
->private_data
;
2884 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
2885 buf
, EXT_CSD_STR_LEN
);
2888 static int mmc_ext_csd_release(struct inode
*inode
, struct file
*file
)
2890 kfree(file
->private_data
);
2894 static const struct file_operations mmc_dbg_ext_csd_fops
= {
2895 .open
= mmc_ext_csd_open
,
2896 .read
= mmc_ext_csd_read
,
2897 .release
= mmc_ext_csd_release
,
2898 .llseek
= default_llseek
,
2901 static void mmc_blk_add_debugfs(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2903 struct dentry
*root
;
2905 if (!card
->debugfs_root
)
2908 root
= card
->debugfs_root
;
2910 if (mmc_card_mmc(card
) || mmc_card_sd(card
)) {
2912 debugfs_create_file_unsafe("status", 0400, root
,
2914 &mmc_dbg_card_status_fops
);
2917 if (mmc_card_mmc(card
)) {
2918 md
->ext_csd_dentry
=
2919 debugfs_create_file("ext_csd", S_IRUSR
, root
, card
,
2920 &mmc_dbg_ext_csd_fops
);
2924 static void mmc_blk_remove_debugfs(struct mmc_card
*card
,
2925 struct mmc_blk_data
*md
)
2927 if (!card
->debugfs_root
)
2930 debugfs_remove(md
->status_dentry
);
2931 md
->status_dentry
= NULL
;
2933 debugfs_remove(md
->ext_csd_dentry
);
2934 md
->ext_csd_dentry
= NULL
;
2939 static void mmc_blk_add_debugfs(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2943 static void mmc_blk_remove_debugfs(struct mmc_card
*card
,
2944 struct mmc_blk_data
*md
)
2948 #endif /* CONFIG_DEBUG_FS */
2950 static int mmc_blk_probe(struct mmc_card
*card
)
2952 struct mmc_blk_data
*md
;
2956 * Check that the card supports the command class(es) we need.
2958 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
2961 mmc_fixup_device(card
, mmc_blk_fixups
);
2963 card
->complete_wq
= alloc_workqueue("mmc_complete",
2964 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
2965 if (!card
->complete_wq
) {
2966 pr_err("Failed to create mmc completion workqueue");
2970 md
= mmc_blk_alloc(card
);
2976 ret
= mmc_blk_alloc_parts(card
, md
);
2980 /* Add two debugfs entries */
2981 mmc_blk_add_debugfs(card
, md
);
2983 pm_runtime_set_autosuspend_delay(&card
->dev
, 3000);
2984 pm_runtime_use_autosuspend(&card
->dev
);
2987 * Don't enable runtime PM for SD-combo cards here. Leave that
2988 * decision to be taken during the SDIO init sequence instead.
2990 if (!mmc_card_sd_combo(card
)) {
2991 pm_runtime_set_active(&card
->dev
);
2992 pm_runtime_enable(&card
->dev
);
2998 mmc_blk_remove_parts(card
, md
);
2999 mmc_blk_remove_req(md
);
3001 destroy_workqueue(card
->complete_wq
);
3005 static void mmc_blk_remove(struct mmc_card
*card
)
3007 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3009 mmc_blk_remove_debugfs(card
, md
);
3010 mmc_blk_remove_parts(card
, md
);
3011 pm_runtime_get_sync(&card
->dev
);
3012 if (md
->part_curr
!= md
->part_type
) {
3013 mmc_claim_host(card
->host
);
3014 mmc_blk_part_switch(card
, md
->part_type
);
3015 mmc_release_host(card
->host
);
3017 if (!mmc_card_sd_combo(card
))
3018 pm_runtime_disable(&card
->dev
);
3019 pm_runtime_put_noidle(&card
->dev
);
3020 mmc_blk_remove_req(md
);
3021 destroy_workqueue(card
->complete_wq
);
3024 static int _mmc_blk_suspend(struct mmc_card
*card
)
3026 struct mmc_blk_data
*part_md
;
3027 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
3030 mmc_queue_suspend(&md
->queue
);
3031 list_for_each_entry(part_md
, &md
->part
, part
) {
3032 mmc_queue_suspend(&part_md
->queue
);
3038 static void mmc_blk_shutdown(struct mmc_card
*card
)
3040 _mmc_blk_suspend(card
);
3043 #ifdef CONFIG_PM_SLEEP
3044 static int mmc_blk_suspend(struct device
*dev
)
3046 struct mmc_card
*card
= mmc_dev_to_card(dev
);
3048 return _mmc_blk_suspend(card
);
3051 static int mmc_blk_resume(struct device
*dev
)
3053 struct mmc_blk_data
*part_md
;
3054 struct mmc_blk_data
*md
= dev_get_drvdata(dev
);
3058 * Resume involves the card going into idle state,
3059 * so current partition is always the main one.
3061 md
->part_curr
= md
->part_type
;
3062 mmc_queue_resume(&md
->queue
);
3063 list_for_each_entry(part_md
, &md
->part
, part
) {
3064 mmc_queue_resume(&part_md
->queue
);
3071 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops
, mmc_blk_suspend
, mmc_blk_resume
);
3073 static struct mmc_driver mmc_driver
= {
3076 .pm
= &mmc_blk_pm_ops
,
3078 .probe
= mmc_blk_probe
,
3079 .remove
= mmc_blk_remove
,
3080 .shutdown
= mmc_blk_shutdown
,
3083 static int __init
mmc_blk_init(void)
3087 res
= bus_register(&mmc_rpmb_bus_type
);
3089 pr_err("mmcblk: could not register RPMB bus type\n");
3092 res
= alloc_chrdev_region(&mmc_rpmb_devt
, 0, MAX_DEVICES
, "rpmb");
3094 pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
3098 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
3099 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
3101 max_devices
= min(MAX_DEVICES
, (1 << MINORBITS
) / perdev_minors
);
3103 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3105 goto out_chrdev_unreg
;
3107 res
= mmc_register_driver(&mmc_driver
);
3109 goto out_blkdev_unreg
;
3114 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3116 unregister_chrdev_region(mmc_rpmb_devt
, MAX_DEVICES
);
3118 bus_unregister(&mmc_rpmb_bus_type
);
3122 static void __exit
mmc_blk_exit(void)
3124 mmc_unregister_driver(&mmc_driver
);
3125 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3126 unregister_chrdev_region(mmc_rpmb_devt
, MAX_DEVICES
);
3127 bus_unregister(&mmc_rpmb_bus_type
);
3130 module_init(mmc_blk_init
);
3131 module_exit(mmc_blk_exit
);
3133 MODULE_LICENSE("GPL");
3134 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");