--- /dev/null
+From c9c9762d4d44dcb1b2ba90cfb4122dc11ceebf31 Mon Sep 17 00:00:00 2001
+From: Long Li <longli@microsoft.com>
+Date: Mon, 7 Jun 2021 12:34:05 -0700
+Subject: block: return the correct bvec when checking for gaps
+
+From: Long Li <longli@microsoft.com>
+
+commit c9c9762d4d44dcb1b2ba90cfb4122dc11ceebf31 upstream.
+
+After commit 07173c3ec276 ("block: enable multipage bvecs"), a bvec can
+have multiple pages. But bio_will_gap() still assumes one page bvec while
+checking for merging. If the pages in the bvec go across the
+seg_boundary_mask, this check for merging can potentially succeed if only
+the 1st page is tested, and can fail if all the pages are tested.
+
+Later, when SCSI builds the SG list the same check for merging is done in
+__blk_segment_map_sg_merge() with all the pages in the bvec tested. This
+time the check may fail if the pages in bvec go across the
+seg_boundary_mask (but tested okay in bio_will_gap() earlier, so those
+BIOs were merged). If this check fails, we end up with a broken SG list
+for drivers assuming the SG list not having offsets in intermediate pages.
+This results in incorrect pages written to the disk.
+
+Fix this by returning the multi-page bvec when testing gaps for merging.
+
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Cc: Pavel Begunkov <asml.silence@gmail.com>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Jeffle Xu <jefflexu@linux.alibaba.com>
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: 07173c3ec276 ("block: enable multipage bvecs")
+Signed-off-by: Long Li <longli@microsoft.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/1623094445-22332-1-git-send-email-longli@linuxonhyperv.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/bio.h | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -40,9 +40,6 @@
+ #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
+ #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
+
+-#define bio_multiple_segments(bio) \
+- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+-
+ #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
+ #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
+
+@@ -246,7 +243,7 @@ static inline void bio_clear_flag(struct
+
+ static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+ {
+- *bv = bio_iovec(bio);
++ *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ }
+
+ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+@@ -254,10 +251,9 @@ static inline void bio_get_last_bvec(str
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+- if (unlikely(!bio_multiple_segments(bio))) {
+- *bv = bio_iovec(bio);
+- return;
+- }
++ bio_get_first_bvec(bio, bv);
++ if (bv->bv_len == bio->bi_iter.bi_size)
++ return; /* this bio only has a single bvec */
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
--- /dev/null
+From 70b52f09080565030a530a784f1c9948a7f48ca3 Mon Sep 17 00:00:00 2001
+From: Bean Huo <beanhuo@micron.com>
+Date: Tue, 4 May 2021 22:32:09 +0200
+Subject: mmc: block: Disable CMDQ on the ioctl path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bean Huo <beanhuo@micron.com>
+
+commit 70b52f09080565030a530a784f1c9948a7f48ca3 upstream.
+
+According to the eMMC Spec:
+"When command queuing is enabled (CMDQ Mode En bit in CMDQ_MODE_EN
+field is set to ‘1’) class 11 commands are the only method through
+which data transfer tasks can be issued. Existing data transfer
+commands, namely CMD18/CMD17 and CMD25/CMD24, are not supported when
+command queuing is enabled."
+which means if CMDQ is enabled, the FFU commands will not be supported.
+To fix this issue, just simply disable CMDQ on the ioctl path, and
+re-enable CMDQ once ioctl request is completed.
+
+Tested-by: Michael Brunner <Michael.Brunner@kontron.com>
+Signed-off-by: Bean Huo <beanhuo@micron.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Fixes: 1e8e55b67030 (mmc: block: Add CQE support)
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210504203209.361597-1-huobean@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/block.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1053,6 +1053,12 @@ static void mmc_blk_issue_drv_op(struct
+
+ switch (mq_rq->drv_op) {
+ case MMC_DRV_OP_IOCTL:
++ if (card->ext_csd.cmdq_en) {
++ ret = mmc_cmdq_disable(card);
++ if (ret)
++ break;
++ }
++ fallthrough;
+ case MMC_DRV_OP_IOCTL_RPMB:
+ idata = mq_rq->drv_op_data;
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+@@ -1063,6 +1069,8 @@ static void mmc_blk_issue_drv_op(struct
+ /* Always switch back to main area after RPMB access */
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
++ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
++ mmc_cmdq_enable(card);
+ break;
+ case MMC_DRV_OP_BOOT_WP:
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
--- /dev/null
+From 3c0bb3107703d2c58f7a0a7a2060bb57bc120326 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 21 May 2021 15:30:26 +0200
+Subject: mmc: vub3000: fix control-request direction
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 3c0bb3107703d2c58f7a0a7a2060bb57bc120326 upstream.
+
+The direction of the pipe argument must match the request-type direction
+bit or control requests may fail depending on the host-controller-driver
+implementation.
+
+Fix the SET_ROM_WAIT_STATES request which erroneously used
+usb_rcvctrlpipe().
+
+Fixes: 88095e7b473a ("mmc: Add new VUB300 USB-to-SD/SDIO/MMC driver")
+Cc: stable@vger.kernel.org # 3.0
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://lore.kernel.org/r/20210521133026.17296-1-johan@kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/vub300.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2286,7 +2286,7 @@ static int vub300_probe(struct usb_inter
+ if (retval < 0)
+ goto error5;
+ retval =
+- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
++ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_ROM_WAIT_STATES,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
--- /dev/null
+From 6ecdafaec79d4b3388a5b017245f23a0ff9d852d Mon Sep 17 00:00:00 2001
+From: Varun Prakash <varun@chelsio.com>
+Date: Wed, 14 Apr 2021 18:09:09 +0530
+Subject: scsi: target: cxgbit: Unmap DMA buffer before calling target_execute_cmd()
+
+From: Varun Prakash <varun@chelsio.com>
+
+commit 6ecdafaec79d4b3388a5b017245f23a0ff9d852d upstream.
+
+Instead of calling dma_unmap_sg() after completing WRITE I/O, call
+dma_unmap_sg() before calling target_execute_cmd() to sync the DMA buffer.
+
+Link: https://lore.kernel.org/r/1618403949-3443-1-git-send-email-varun@chelsio.com
+Cc: <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/iscsi/cxgbit/cxgbit_ddp.c | 19 ++++++++++---------
+ drivers/target/iscsi/cxgbit/cxgbit_target.c | 21 ++++++++++++++++++---
+ 2 files changed, 28 insertions(+), 12 deletions(-)
+
+--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+ if (ccmd->release) {
+- struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+-
+- if (ttinfo->sgl) {
++ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++ put_page(sg_page(&ccmd->sg));
++ } else {
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
++ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
++ if (unlikely(ttinfo->sgl)) {
++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
++ ttinfo->nents, DMA_FROM_DEVICE);
++ ttinfo->nents = 0;
++ ttinfo->sgl = NULL;
++ }
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+-
+- dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+- ttinfo->nents, DMA_FROM_DEVICE);
+- } else {
+- put_page(sg_page(&ccmd->sg));
+ }
+-
+ ccmd->release = false;
+ }
+ }
+--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+@@ -1013,17 +1013,18 @@ static int cxgbit_handle_iscsi_dataout(s
+ struct scatterlist *sg_start;
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_cmd *cmd = NULL;
++ struct cxgbit_cmd *ccmd;
++ struct cxgbi_task_tag_info *ttinfo;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+ u32 data_offset = be32_to_cpu(hdr->offset);
+- u32 data_len = pdu_cb->dlen;
++ u32 data_len = ntoh24(hdr->dlength);
+ int rc, sg_nents, sg_off;
+ bool dcrc_err = false;
+
+ if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
+ u32 offset = be32_to_cpu(hdr->offset);
+ u32 ddp_data_len;
+- u32 payload_length = ntoh24(hdr->dlength);
+ bool success = false;
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
+@@ -1038,7 +1039,7 @@ static int cxgbit_handle_iscsi_dataout(s
+ cmd->data_sn = be32_to_cpu(hdr->datasn);
+
+ rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
+- cmd, payload_length, &success);
++ cmd, data_len, &success);
+ if (rc < 0)
+ return rc;
+ else if (!success)
+@@ -1076,6 +1077,20 @@ static int cxgbit_handle_iscsi_dataout(s
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
+ }
+
++ ccmd = iscsit_priv_cmd(cmd);
++ ttinfo = &ccmd->ttinfo;
++
++ if (ccmd->release && ttinfo->sgl &&
++ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
++ struct cxgbit_device *cdev = csk->com.cdev;
++ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
++
++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
++ DMA_FROM_DEVICE);
++ ttinfo->nents = 0;
++ ttinfo->sgl = NULL;
++ }
++
+ check_payload:
+
+ rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
mm-z3fold-fix-potential-memory-leak-in-z3fold_destro.patch
selftests-vm-pkeys-fix-alloc_random_pkey-to-make-it-.patch
perf-llvm-return-enomem-when-asprintf-fails.patch
+scsi-target-cxgbit-unmap-dma-buffer-before-calling-target_execute_cmd.patch
+block-return-the-correct-bvec-when-checking-for-gaps.patch
+mmc-block-disable-cmdq-on-the-ioctl-path.patch
+mmc-vub3000-fix-control-request-direction.patch