]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mtd: nand: qpic_common: prevent out of bounds access of BAM arrays
authorGabor Juhos <j4g8y7@gmail.com>
Wed, 18 Jun 2025 20:22:50 +0000 (22:22 +0200)
committerMark Brown <broonie@kernel.org>
Sun, 29 Jun 2025 21:10:47 +0000 (22:10 +0100)
The common QPIC code does not do any boundary checking when it handles
the command elements and scatter gater list arrays of a BAM transaction,
thus it allows to access out of bounds elements in those.

Although it is the responsibility of the given driver to allocate enough
space for all possible BAM transaction variations, however there can be
mistakes in the driver code which can lead to hidden memory corruption
issues which are hard to debug.

This kind of problem has been observed during testing the 'spi-qpic-snand'
driver. Although the driver has been fixed with a preceding patch, but it
still makes sense to reduce the chance of having such errors again later.

In order to prevent such errors, change the qcom_alloc_bam_transaction()
function to store the number of elements of the arrays in the
'bam_transaction' strucutre during allocation. Also, add sanity checks to
the qcom_prep_bam_dma_desc_{cmd,data}() functions to avoid using out of
bounds indices for the arrays.

Tested-by: Lakshmi Sowjanya D <quic_laksd@quicinc.com> # on SDX75
Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
Link: https://patch.msgid.link/20250618-qpic-snand-avoid-mem-corruption-v3-2-319c71296cda@gmail.com
Signed-off-by: Mark Brown <broonie@kernel.org>
drivers/mtd/nand/qpic_common.c
include/linux/mtd/nand-qpic-common.h

index 4dc4d65e7d323e2843edecca8e3849a5090b775d..8e604cc22ca310159edf4d8dbc2f6a82d5119eb4 100644 (file)
@@ -57,14 +57,15 @@ qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
        bam_txn_buf += sizeof(*bam_txn);
 
        bam_txn->bam_ce = bam_txn_buf;
-       bam_txn_buf +=
-               sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+       bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+       bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems;
 
        bam_txn->cmd_sgl = bam_txn_buf;
-       bam_txn_buf +=
-               sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+       bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw;
+       bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems;
 
        bam_txn->data_sgl = bam_txn_buf;
+       bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw;
 
        init_completion(&bam_txn->txn_done);
 
@@ -238,6 +239,11 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
        struct bam_transaction *bam_txn = nandc->bam_txn;
        u32 offset;
 
+       if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) {
+               dev_err(nandc->dev, "BAM %s array is full\n", "CE");
+               return -EINVAL;
+       }
+
        bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
 
        /* fill the command desc */
@@ -258,6 +264,12 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
 
        /* use the separate sgl after this command */
        if (flags & NAND_BAM_NEXT_SGL) {
+               if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) {
+                       dev_err(nandc->dev, "BAM %s array is full\n",
+                               "CMD sgl");
+                       return -EINVAL;
+               }
+
                bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
                bam_ce_size = (bam_txn->bam_ce_pos -
                                bam_txn->bam_ce_start) *
@@ -297,10 +309,20 @@ int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
        struct bam_transaction *bam_txn = nandc->bam_txn;
 
        if (read) {
+               if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) {
+                       dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl");
+                       return -EINVAL;
+               }
+
                sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
                           vaddr, size);
                bam_txn->rx_sgl_pos++;
        } else {
+               if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) {
+                       dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl");
+                       return -EINVAL;
+               }
+
                sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
                           vaddr, size);
                bam_txn->tx_sgl_pos++;
index e8462deda6dbf61f99bbcb39e7cb12cdf66898fd..f0aa098a395f7140c3c4ad5640f973293f73a1cc 100644 (file)
  * @last_data_desc - last DMA desc in data channel (tx/rx).
  * @last_cmd_desc - last DMA desc in command channel.
  * @txn_done - completion for NAND transfer.
+ * @bam_ce_nitems - the number of elements in the @bam_ce array
+ * @cmd_sgl_nitems - the number of elements in the @cmd_sgl array
+ * @data_sgl_nitems - the number of elements in the @data_sgl array
  * @bam_ce_pos - the index in bam_ce which is available for next sgl
  * @bam_ce_start - the index in bam_ce which marks the start position ce
  *                for current sgl. It will be used for size calculation
@@ -255,6 +258,11 @@ struct bam_transaction {
        struct dma_async_tx_descriptor *last_data_desc;
        struct dma_async_tx_descriptor *last_cmd_desc;
        struct completion txn_done;
+
+       unsigned int bam_ce_nitems;
+       unsigned int cmd_sgl_nitems;
+       unsigned int data_sgl_nitems;
+
        struct_group(bam_positions,
                u32 bam_ce_pos;
                u32 bam_ce_start;