wifi-ath6kl-remove-warn-on-bad-firmware-input.patch
acpica-refuse-to-evaluate-a-method-if-arguments-are-.patch
rcu-return-early-if-callback-is-not-specified.patch
-virtio_ring-introduce-dma-map-api-for-virtqueue.patch
-virtio_ring-introduce-dma-sync-api-for-virtqueue.patch
-virtio-net-ensure-the-received-length-does-not-excee.patch
regulator-gpio-fix-the-out-of-bounds-access-to-drvda.patch
mmc-mediatek-use-data-instead-of-mrq-parameter-from-.patch
mtk-sd-prevent-memory-corruption-from-dma-map-failur.patch
+++ /dev/null
-From bb037700dfb1d8734a583f76d2550eafbcc75a1d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 7 Jul 2025 10:06:07 -0400
-Subject: virtio-net: ensure the received length does not exceed allocated size
-
-From: Bui Quang Minh <minhquangbui99@gmail.com>
-
-[ Upstream commit 315dbdd7cdf6aa533829774caaf4d25f1fd20e73 ]
-
-In xdp_linearize_page, when reading the following buffers from the ring,
-we forget to check the received length with the true allocate size. This
-can lead to an out-of-bound read. This commit adds that missing check.
-
-Cc: <stable@vger.kernel.org>
-Fixes: 4941d472bf95 ("virtio-net: do not reset during XDP set")
-Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
-Acked-by: Jason Wang <jasowang@redhat.com>
-Link: https://patch.msgid.link/20250630144212.48471-2-minhquangbui99@gmail.com
-Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/virtio_net.c | 44 +++++++++++++++++++++++++++++++++-------
- 1 file changed, 37 insertions(+), 7 deletions(-)
-
-diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 99dea89b26788..3de39df3462c7 100644
---- a/drivers/net/virtio_net.c
-+++ b/drivers/net/virtio_net.c
-@@ -394,6 +394,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
- return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
- }
-
-+static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
-+ unsigned int len)
-+{
-+ unsigned int headroom, tailroom, room, truesize;
-+
-+ truesize = mergeable_ctx_to_truesize(mrg_ctx);
-+ headroom = mergeable_ctx_to_headroom(mrg_ctx);
-+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
-+ room = SKB_DATA_ALIGN(headroom + tailroom);
-+
-+ if (len > truesize - room) {
-+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-+ dev->name, len, (unsigned long)(truesize - room));
-+ DEV_STATS_INC(dev, rx_length_errors);
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
- /* Called from bottom half context */
- static struct sk_buff *page_to_skb(struct virtnet_info *vi,
- struct receive_queue *rq,
-@@ -639,8 +659,9 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
- * across multiple buffers (num_buf > 1), and we make sure buffers
- * have enough headroom.
- */
--static struct page *xdp_linearize_page(struct receive_queue *rq,
-- u16 *num_buf,
-+static struct page *xdp_linearize_page(struct net_device *dev,
-+ struct receive_queue *rq,
-+ int *num_buf,
- struct page *p,
- int offset,
- int page_off,
-@@ -659,18 +680,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
- memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
- page_off += *len;
-
-+ /* Only mergeable mode can go inside this while loop. In small mode,
-+ * *num_buf == 1, so it cannot go inside.
-+ */
- while (--*num_buf) {
- unsigned int buflen;
- void *buf;
-+ void *ctx;
- int off;
-
-- buf = virtqueue_get_buf(rq->vq, &buflen);
-+ buf = virtqueue_get_buf_ctx(rq->vq, &buflen, &ctx);
- if (unlikely(!buf))
- goto err_buf;
-
- p = virt_to_head_page(buf);
- off = buf - page_address(p);
-
-+ if (check_mergeable_len(dev, ctx, buflen)) {
-+ put_page(p);
-+ goto err_buf;
-+ }
-+
- /* guard against a misconfigured or uncooperative backend that
- * is sending packet larger than the MTU.
- */
-@@ -738,14 +768,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
- if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
- int offset = buf - page_address(page) + header_offset;
- unsigned int tlen = len + vi->hdr_len;
-- u16 num_buf = 1;
-+ int num_buf = 1;
-
- xdp_headroom = virtnet_get_headroom(vi);
- header_offset = VIRTNET_RX_PAD + xdp_headroom;
- headroom = vi->hdr_len + header_offset;
- buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-- xdp_page = xdp_linearize_page(rq, &num_buf, page,
-+ xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
- offset, header_offset,
- &tlen);
- if (!xdp_page)
-@@ -866,7 +896,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
- struct virtnet_rq_stats *stats)
- {
- struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
-- u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
-+ int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
- struct page *page = virt_to_head_page(buf);
- int offset = buf - page_address(page);
- struct sk_buff *head_skb, *curr_skb;
-@@ -916,7 +946,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
- if (unlikely(num_buf > 1 ||
- headroom < virtnet_get_headroom(vi))) {
- /* linearize data for XDP */
-- xdp_page = xdp_linearize_page(rq, &num_buf,
-+ xdp_page = xdp_linearize_page(vi->dev, rq, &num_buf,
- page, offset,
- VIRTIO_XDP_HEADROOM,
- &len);
---
-2.39.5
-
+++ /dev/null
-From fc185d9d91feecf009348f76e2ed624bb53b4f2d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Aug 2023 20:30:55 +0800
-Subject: virtio_ring: introduce dma map api for virtqueue
-
-From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-
-[ Upstream commit b6253b4e21939f1bb54e8fdb84c23af9c3fb834a ]
-
-Added virtqueue_dma_map_api* to map DMA addresses for virtual memory in
-advance. The purpose is to keep memory mapped across multiple add/get
-buf operations.
-
-Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-Message-Id: <20230810123057.43407-11-xuanzhuo@linux.alibaba.com>
-Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-Stable-dep-of: 315dbdd7cdf6 ("virtio-net: ensure the received length does not exceed allocated size")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/virtio/virtio_ring.c | 69 ++++++++++++++++++++++++++++++++++++
- include/linux/virtio.h | 8 +++++
- 2 files changed, 77 insertions(+)
-
-diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
-index cf0e8e1893ee6..4bd5af5fd819d 100644
---- a/drivers/virtio/virtio_ring.c
-+++ b/drivers/virtio/virtio_ring.c
-@@ -2349,4 +2349,73 @@ const struct vring *virtqueue_get_vring(struct virtqueue *vq)
- }
- EXPORT_SYMBOL_GPL(virtqueue_get_vring);
-
-+/**
-+ * virtqueue_dma_map_single_attrs - map DMA for _vq
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @ptr: the pointer of the buffer to do dma
-+ * @size: the size of the buffer to do dma
-+ * @dir: DMA direction
-+ * @attrs: DMA Attrs
-+ *
-+ * The caller calls this to do dma mapping in advance. The DMA address can be
-+ * passed to this _vq when it is in pre-mapped mode.
-+ *
-+ * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
-+ */
-+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
-+ size_t size,
-+ enum dma_data_direction dir,
-+ unsigned long attrs)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+
-+ if (!vq->use_dma_api)
-+ return (dma_addr_t)virt_to_phys(ptr);
-+
-+ return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
-+
-+/**
-+ * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @addr: the dma address to unmap
-+ * @size: the size of the buffer
-+ * @dir: DMA direction
-+ * @attrs: DMA Attrs
-+ *
-+ * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
-+ *
-+ */
-+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
-+ size_t size, enum dma_data_direction dir,
-+ unsigned long attrs)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+
-+ if (!vq->use_dma_api)
-+ return;
-+
-+ dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
-+
-+/**
-+ * virtqueue_dma_mapping_error - check dma address
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @addr: DMA address
-+ *
-+ * Returns 0 means dma valid. Other means invalid dma address.
-+ */
-+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+
-+ if (!vq->use_dma_api)
-+ return 0;
-+
-+ return dma_mapping_error(vring_dma_dev(vq), addr);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
-+
- MODULE_LICENSE("GPL");
-diff --git a/include/linux/virtio.h b/include/linux/virtio.h
-index 90c5ad5568097..0ad13391f7c6b 100644
---- a/include/linux/virtio.h
-+++ b/include/linux/virtio.h
-@@ -9,6 +9,7 @@
- #include <linux/device.h>
- #include <linux/mod_devicetable.h>
- #include <linux/gfp.h>
-+#include <linux/dma-mapping.h>
-
- /**
- * virtqueue - a queue to register buffers for sending or receiving.
-@@ -196,4 +197,11 @@ void unregister_virtio_driver(struct virtio_driver *drv);
- #define module_virtio_driver(__virtio_driver) \
- module_driver(__virtio_driver, register_virtio_driver, \
- unregister_virtio_driver)
-+
-+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
-+ enum dma_data_direction dir, unsigned long attrs);
-+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
-+ size_t size, enum dma_data_direction dir,
-+ unsigned long attrs);
-+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
- #endif /* _LINUX_VIRTIO_H */
---
-2.39.5
-
+++ /dev/null
-From 7505756a61ead8f60e6e0096f013cfaa21fe8f55 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Aug 2023 20:30:56 +0800
-Subject: virtio_ring: introduce dma sync api for virtqueue
-
-From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-
-[ Upstream commit 8bd2f71054bd0bc997833e9825143672eb7e2801 ]
-
-These API has been introduced:
-
-* virtqueue_dma_need_sync
-* virtqueue_dma_sync_single_range_for_cpu
-* virtqueue_dma_sync_single_range_for_device
-
-These APIs can be used together with the premapped mechanism to sync the
-DMA address.
-
-Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-Message-Id: <20230810123057.43407-12-xuanzhuo@linux.alibaba.com>
-Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-Stable-dep-of: 315dbdd7cdf6 ("virtio-net: ensure the received length does not exceed allocated size")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/virtio/virtio_ring.c | 76 ++++++++++++++++++++++++++++++++++++
- include/linux/virtio.h | 8 ++++
- 2 files changed, 84 insertions(+)
-
-diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
-index 4bd5af5fd819d..7e5b30ea8c8e2 100644
---- a/drivers/virtio/virtio_ring.c
-+++ b/drivers/virtio/virtio_ring.c
-@@ -2418,4 +2418,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
- }
- EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
-
-+/**
-+ * virtqueue_dma_need_sync - check a dma address needs sync
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @addr: DMA address
-+ *
-+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
-+ * synchronized
-+ *
-+ * return bool
-+ */
-+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+
-+ if (!vq->use_dma_api)
-+ return false;
-+
-+ return dma_need_sync(vring_dma_dev(vq), addr);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
-+
-+/**
-+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @addr: DMA address
-+ * @offset: DMA address offset
-+ * @size: buf size for sync
-+ * @dir: DMA direction
-+ *
-+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
-+ * the DMA address really needs to be synchronized
-+ *
-+ */
-+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
-+ dma_addr_t addr,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction dir)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+ struct device *dev = vring_dma_dev(vq);
-+
-+ if (!vq->use_dma_api)
-+ return;
-+
-+ dma_sync_single_range_for_cpu(dev, addr, offset, size,
-+ DMA_BIDIRECTIONAL);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
-+
-+/**
-+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
-+ * @_vq: the struct virtqueue we're talking about.
-+ * @addr: DMA address
-+ * @offset: DMA address offset
-+ * @size: buf size for sync
-+ * @dir: DMA direction
-+ *
-+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
-+ * the DMA address really needs to be synchronized
-+ */
-+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
-+ dma_addr_t addr,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction dir)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+ struct device *dev = vring_dma_dev(vq);
-+
-+ if (!vq->use_dma_api)
-+ return;
-+
-+ dma_sync_single_range_for_device(dev, addr, offset, size,
-+ DMA_BIDIRECTIONAL);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
-+
- MODULE_LICENSE("GPL");
-diff --git a/include/linux/virtio.h b/include/linux/virtio.h
-index 0ad13391f7c6b..5d3b16dc3913c 100644
---- a/include/linux/virtio.h
-+++ b/include/linux/virtio.h
-@@ -204,4 +204,12 @@ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
-+
-+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
-+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction dir);
-+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction dir);
- #endif /* _LINUX_VIRTIO_H */
---
-2.39.5
-