From: Sasha Levin Date: Thu, 23 Feb 2023 02:42:41 +0000 (-0500) Subject: Fixes for 5.4 X-Git-Tag: v6.2.1~36 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=693e524dc9163d027c6fa27c4383ecdae9b90fc9;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.4 Signed-off-by: Sasha Levin --- diff --git a/queue-5.4/can-kvaser_usb-hydra-help-gcc-13-to-figure-out-cmd_l.patch b/queue-5.4/can-kvaser_usb-hydra-help-gcc-13-to-figure-out-cmd_l.patch new file mode 100644 index 00000000000..c7e9c6a4d00 --- /dev/null +++ b/queue-5.4/can-kvaser_usb-hydra-help-gcc-13-to-figure-out-cmd_l.patch @@ -0,0 +1,248 @@ +From e6bbbdf7e7991b71f44c552a7511cfdbf27051c7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 19 Dec 2022 11:39:27 +0100 +Subject: can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Marc Kleine-Budde + +[ Upstream commit f006229135b7debf4037adb1eb93e358559593db ] + +Debian's gcc-13 [1] throws the following error in +kvaser_usb_hydra_cmd_size(): + +[1] gcc version 13.0.0 20221214 (experimental) [master r13-4693-g512098a3316] (Debian 13-20221214-1) + +| drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c:502:65: error: +| array subscript ‘struct kvaser_cmd_ext[0]’ is partly outside array +| bounds of ‘unsigned char[32]’ [-Werror=array-bounds=] +| 502 | ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len); + +kvaser_usb_hydra_cmd_size() returns the size of given command. It +depends on the command number (cmd->header.cmd_no). For extended +commands (cmd->header.cmd_no == CMD_EXTENDED) the above shown code is +executed. + +Help gcc to recognize that this code path is not taken in all cases, +by calling kvaser_usb_hydra_cmd_size() directly after assigning the +command number. + +Fixes: aec5fb2268b7 ("can: kvaser_usb: Add support for Kvaser USB hydra family") +Cc: Jimmy Assarsson +Cc: Anssi Hannula +Link: https://lore.kernel.org/all/20221219110104.1073881-1-mkl@pengutronix.de +Tested-by: Jimmy Assarsson +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Sasha Levin +--- + .../net/can/usb/kvaser_usb/kvaser_usb_hydra.c | 33 ++++++++++++++----- + 1 file changed, 24 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +index 2764fdd7e84b3..233bbfeaa771e 100644 +--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c ++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +@@ -518,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, + u8 cmd_no, int channel) + { + struct kvaser_cmd *cmd; ++ size_t cmd_len; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); +@@ -525,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + if (channel < 0) { + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); +@@ -541,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + goto end; + +@@ -557,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, + { + struct kvaser_cmd *cmd; + struct kvaser_usb *dev = priv->dev; ++ size_t cmd_len; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); +@@ -564,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd_async(priv, cmd, +- kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); + if (err) + kfree(cmd); + +@@ -715,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, + { + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; ++ size_t cmd_len; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; +@@ -726,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + + kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + goto end; + +@@ -1555,6 +1560,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + struct kvaser_usb *dev = priv->dev; + struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; + struct kvaser_cmd *cmd; ++ size_t cmd_len; + int err; + + if (!hydra) +@@ -1565,6 +1571,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid +@@ -1574,7 +1581,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + + reinit_completion(&priv->get_busparams_comp); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + return err; + +@@ -1601,6 +1608,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; ++ size_t cmd_len; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); +@@ -1608,6 +1616,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, + sizeof(cmd->set_busparams_req.busparams_nominal)); + +@@ -1616,7 +1625,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + + kfree(cmd); + +@@ -1629,6 +1638,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; ++ size_t cmd_len; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); +@@ -1636,6 +1646,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_data, busparams, + sizeof(cmd->set_busparams_req.busparams_data)); + +@@ -1653,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + + kfree(cmd); + +@@ -1781,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) + static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) + { + struct kvaser_cmd *cmd; ++ size_t cmd_len; + int err; + u32 flags; + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; +@@ -1790,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + cmd->sw_detail_req.use_ext_cmd = 1; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); +@@ -1797,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + goto end; + +@@ -1913,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) + { + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; ++ size_t cmd_len; + int err; + + if ((priv->can.ctrlmode & +@@ -1928,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; ++ cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid +@@ -1937,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) + else + cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; + +- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); ++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + kfree(cmd); + + return err; +-- +2.39.0 + diff --git a/queue-5.4/dma-mapping-add-generic-helpers-for-mapping-sgtable-.patch b/queue-5.4/dma-mapping-add-generic-helpers-for-mapping-sgtable-.patch new file mode 100644 index 00000000000..7a3eb871022 --- /dev/null +++ b/queue-5.4/dma-mapping-add-generic-helpers-for-mapping-sgtable-.patch @@ -0,0 +1,126 @@ +From d96c86d7eeb8b13b6faa1aa32ac2683a2766b739 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 May 2020 15:32:08 +0200 +Subject: dma-mapping: add generic helpers for mapping sgtable objects + +From: Marek Szyprowski + +[ Upstream commit d9d200bcebc1f6e56f0178cbb8db9953e8cc9a11 ] + +struct sg_table is a common structure used for describing a memory +buffer. It consists of a scatterlist with memory pages and DMA addresses +(sgl entry), as well as the number of scatterlist entries: CPU pages +(orig_nents entry) and DMA mapped pages (nents entry). + +It turned out that it was a common mistake to misuse nents and orig_nents +entries, calling DMA-mapping functions with a wrong number of entries or +ignoring the number of mapped entries returned by the dma_map_sg +function. + +To avoid such issues, let's introduce a common wrappers operating +directly on the struct sg_table objects, which take care of the proper +use of the nents and orig_nents entries. + +Signed-off-by: Marek Szyprowski +Reviewed-by: Robin Murphy +Signed-off-by: Christoph Hellwig +Stable-dep-of: d37c120b7312 ("drm/etnaviv: don't truncate physical page address") +Signed-off-by: Sasha Levin +--- + include/linux/dma-mapping.h | 80 +++++++++++++++++++++++++++++++++++++ + 1 file changed, 80 insertions(+) + +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index 4d450672b7d66..87cbae4b051f1 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -612,6 +612,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev, + return dma_sync_single_for_device(dev, addr + offset, size, dir); + } + ++/** ++ * dma_map_sgtable - Map the given buffer for DMA ++ * @dev: The device for which to perform the DMA operation ++ * @sgt: The sg_table object describing the buffer ++ * @dir: DMA direction ++ * @attrs: Optional DMA attributes for the map operation ++ * ++ * Maps a buffer described by a scatterlist stored in the given sg_table ++ * object for the @dir DMA operation by the @dev device. After success the ++ * ownership for the buffer is transferred to the DMA domain. One has to ++ * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the ++ * ownership of the buffer back to the CPU domain before touching the ++ * buffer by the CPU. ++ * ++ * Returns 0 on success or -EINVAL on error during mapping the buffer. ++ */ ++static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, ++ enum dma_data_direction dir, unsigned long attrs) ++{ ++ int nents; ++ ++ nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); ++ if (nents <= 0) ++ return -EINVAL; ++ sgt->nents = nents; ++ return 0; ++} ++ ++/** ++ * dma_unmap_sgtable - Unmap the given buffer for DMA ++ * @dev: The device for which to perform the DMA operation ++ * @sgt: The sg_table object describing the buffer ++ * @dir: DMA direction ++ * @attrs: Optional DMA attributes for the unmap operation ++ * ++ * Unmaps a buffer described by a scatterlist stored in the given sg_table ++ * object for the @dir DMA operation by the @dev device. After this function ++ * the ownership of the buffer is transferred back to the CPU domain. ++ */ ++static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, ++ enum dma_data_direction dir, unsigned long attrs) ++{ ++ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); ++} ++ ++/** ++ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access ++ * @dev: The device for which to perform the DMA operation ++ * @sgt: The sg_table object describing the buffer ++ * @dir: DMA direction ++ * ++ * Performs the needed cache synchronization and moves the ownership of the ++ * buffer back to the CPU domain, so it is safe to perform any access to it ++ * by the CPU. Before doing any further DMA operations, one has to transfer ++ * the ownership of the buffer back to the DMA domain by calling the ++ * dma_sync_sgtable_for_device(). ++ */ ++static inline void dma_sync_sgtable_for_cpu(struct device *dev, ++ struct sg_table *sgt, enum dma_data_direction dir) ++{ ++ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); ++} ++ ++/** ++ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA ++ * @dev: The device for which to perform the DMA operation ++ * @sgt: The sg_table object describing the buffer ++ * @dir: DMA direction ++ * ++ * Performs the needed cache synchronization and moves the ownership of the ++ * buffer back to the DMA domain, so it is safe to perform the DMA operation. ++ * Once finished, one has to call dma_sync_sgtable_for_cpu() or ++ * dma_unmap_sgtable(). ++ */ ++static inline void dma_sync_sgtable_for_device(struct device *dev, ++ struct sg_table *sgt, enum dma_data_direction dir) ++{ ++ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); ++} ++ + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) + #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) + #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) +-- +2.39.0 + diff --git a/queue-5.4/drm-etnaviv-don-t-truncate-physical-page-address.patch b/queue-5.4/drm-etnaviv-don-t-truncate-physical-page-address.patch new file mode 100644 index 00000000000..2d4bb6ab718 --- /dev/null +++ b/queue-5.4/drm-etnaviv-don-t-truncate-physical-page-address.patch @@ -0,0 +1,42 @@ +From 4c93841d7527d7b6431c0c0c60cb65cd4c8211da Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Sep 2022 12:40:31 +0200 +Subject: drm/etnaviv: don't truncate physical page address + +From: Lucas Stach + +[ Upstream commit d37c120b73128690434cc093952439eef9d56af1 ] + +While the interface for the MMU mapping takes phys_addr_t to hold a +full 64bit address when necessary and MMUv2 is able to map physical +addresses with up to 40bit, etnaviv_iommu_map() truncates the address +to 32bits. Fix this by using the correct type. + +Fixes: 931e97f3afd8 ("drm/etnaviv: mmuv2: support 40 bit phys address") +Signed-off-by: Lucas Stach +Reviewed-by: Philipp Zabel +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +index 9ba2fe48228f1..44fbc0a123bf3 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +@@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, + return -EINVAL; + + for_each_sgtable_dma_sg(sgt, sg, i) { +- u32 pa = sg_dma_address(sg) - sg->offset; ++ phys_addr_t pa = sg_dma_address(sg) - sg->offset; + size_t bytes = sg_dma_len(sg) + sg->offset; + +- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); ++ VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + + ret = etnaviv_context_map(context, da, pa, bytes, prot); + if (ret) +-- +2.39.0 + diff --git a/queue-5.4/drm-etnaviv-fix-common-struct-sg_table-related-issue.patch b/queue-5.4/drm-etnaviv-fix-common-struct-sg_table-related-issue.patch new file mode 100644 index 00000000000..d7b4ff4cc91 --- /dev/null +++ b/queue-5.4/drm-etnaviv-fix-common-struct-sg_table-related-issue.patch @@ -0,0 +1,134 @@ +From dbecfeab174d4ab9e939a3b084cf970d05f88a8e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 28 Apr 2020 13:08:23 +0200 +Subject: drm: etnaviv: fix common struct sg_table related issues + +From: Marek Szyprowski + +[ Upstream commit 182354a526a0540c9197e03d9fce8a949ffd36ca ] + +The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function +returns the number of the created entries in the DMA address space. +However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and +dma_unmap_sg must be called with the original number of the entries +passed to the dma_map_sg(). + +struct sg_table is a common structure used for describing a non-contiguous +memory buffer, used commonly in the DRM and graphics subsystems. It +consists of a scatterlist with memory pages and DMA addresses (sgl entry), +as well as the number of scatterlist entries: CPU pages (orig_nents entry) +and DMA mapped pages (nents entry). + +It turned out that it was a common mistake to misuse nents and orig_nents +entries, calling DMA-mapping functions with a wrong number of entries or +ignoring the number of mapped entries returned by the dma_map_sg() +function. + +To avoid such issues, lets use a common dma-mapping wrappers operating +directly on the struct sg_table objects and use scatterlist page +iterators where possible. This, almost always, hides references to the +nents and orig_nents entries, making the code robust, easier to follow +and copy/paste safe. + +Signed-off-by: Marek Szyprowski +Reviewed-by: Robin Murphy +Acked-by: Lucas Stach +Stable-dep-of: d37c120b7312 ("drm/etnaviv: don't truncate physical page address") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/etnaviv/etnaviv_gem.c | 12 +++++------- + drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 15 ++++----------- + 2 files changed, 9 insertions(+), 18 deletions(-) + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c +index 519948637186e..5107a0f5bc7fe 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c +@@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) + * because display controller, GPU, etc. are not coherent. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) +- dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); ++ dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + } + + static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) +@@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj + * discard those writes. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) +- dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); ++ dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + } + + /* called with etnaviv_obj->lock held */ +@@ -403,9 +403,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, + } + + if (etnaviv_obj->flags & ETNA_BO_CACHED) { +- dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, +- etnaviv_obj->sgt->nents, +- etnaviv_op_to_dma_dir(op)); ++ dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, ++ etnaviv_op_to_dma_dir(op)); + etnaviv_obj->last_cpu_prep_op = op; + } + +@@ -420,8 +419,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) + if (etnaviv_obj->flags & ETNA_BO_CACHED) { + /* fini without a prep is almost certainly a userspace error */ + WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); +- dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, +- etnaviv_obj->sgt->nents, ++ dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, + etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); + etnaviv_obj->last_cpu_prep_op = 0; + } +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +index 790cbb20aaeba..9ba2fe48228f1 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +@@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, + struct sg_table *sgt, unsigned len, int prot) + { struct scatterlist *sg; + unsigned int da = iova; +- unsigned int i, j; ++ unsigned int i; + int ret; + + if (!context || !sgt) + return -EINVAL; + +- for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ for_each_sgtable_dma_sg(sgt, sg, i) { + u32 pa = sg_dma_address(sg) - sg->offset; + size_t bytes = sg_dma_len(sg) + sg->offset; + +@@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, + return 0; + + fail: +- da = iova; +- +- for_each_sg(sgt->sgl, sg, i, j) { +- size_t bytes = sg_dma_len(sg) + sg->offset; +- +- etnaviv_context_unmap(context, da, bytes); +- da += bytes; +- } ++ etnaviv_context_unmap(context, iova, da - iova); + return ret; + } + +@@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, + unsigned int da = iova; + int i; + +- for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ for_each_sgtable_dma_sg(sgt, sg, i) { + size_t bytes = sg_dma_len(sg) + sg->offset; + + etnaviv_context_unmap(context, da, bytes); +-- +2.39.0 + diff --git a/queue-5.4/kvm-vmx-execute-ibpb-on-emulated-vm-exit-when-guest-.patch b/queue-5.4/kvm-vmx-execute-ibpb-on-emulated-vm-exit-when-guest-.patch new file mode 100644 index 00000000000..df041cdca17 --- /dev/null +++ b/queue-5.4/kvm-vmx-execute-ibpb-on-emulated-vm-exit-when-guest-.patch @@ -0,0 +1,90 @@ +From 3d0fd59e764b38a8e0f570d5726ecb5963d19c91 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 19 Oct 2022 14:36:20 -0700 +Subject: KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS + +From: Jim Mattson + +[ Upstream commit 2e7eab81425ad6c875f2ed47c0ce01e78afc38a5 ] + +According to Intel's document on Indirect Branch Restricted +Speculation, "Enabling IBRS does not prevent software from controlling +the predicted targets of indirect branches of unrelated software +executed later at the same predictor mode (for example, between two +different user applications, or two different virtual machines). Such +isolation can be ensured through use of the Indirect Branch Predictor +Barrier (IBPB) command." This applies to both basic and enhanced IBRS. + +Since L1 and L2 VMs share hardware predictor modes (guest-user and +guest-kernel), hardware IBRS is not sufficient to virtualize +IBRS. (The way that basic IBRS is implemented on pre-eIBRS parts, +hardware IBRS is actually sufficient in practice, even though it isn't +sufficient architecturally.) + +For virtual CPUs that support IBRS, add an indirect branch prediction +barrier on emulated VM-exit, to ensure that the predicted targets of +indirect branches executed in L1 cannot be controlled by software that +was executed in L2. + +Since we typically don't intercept guest writes to IA32_SPEC_CTRL, +perform the IBPB at emulated VM-exit regardless of the current +IA32_SPEC_CTRL.IBRS value, even though the IBPB could technically be +deferred until L1 sets IA32_SPEC_CTRL.IBRS, if IA32_SPEC_CTRL.IBRS is +clear at emulated VM-exit. + +This is CVE-2022-2196. + +Fixes: 5c911beff20a ("KVM: nVMX: Skip IBPB when switching between vmcs01 and vmcs02") +Cc: Sean Christopherson +Signed-off-by: Jim Mattson +Reviewed-by: Sean Christopherson +Link: https://lore.kernel.org/r/20221019213620.1953281-3-jmattson@google.com +Signed-off-by: Sean Christopherson +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/vmx/nested.c | 11 +++++++++++ + arch/x86/kvm/vmx/vmx.c | 6 ++++-- + 2 files changed, 15 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 00f3336194a96..d3a8ee0ef988a 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -4118,6 +4118,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + ++ /* ++ * If IBRS is advertised to the vCPU, KVM must flush the indirect ++ * branch predictors when transitioning from L2 to L1, as L1 expects ++ * hardware (KVM in this case) to provide separate predictor modes. ++ * Bare metal isolates VMX root (host) from VMX non-root (guest), but ++ * doesn't isolate different VMCSs, i.e. in this case, doesn't provide ++ * separate modes for L2 vs L1. ++ */ ++ if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) ++ indirect_branch_prediction_barrier(); ++ + /* Update any VMCS fields that might have changed while L2 ran */ + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index a8c8073654cf1..e6dd6a7e86893 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -1397,8 +1397,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, + + /* + * No indirect branch prediction barrier needed when switching +- * the active VMCS within a guest, e.g. on nested VM-Enter. +- * The L1 VMM can protect itself with retpolines, IBPB or IBRS. ++ * the active VMCS within a vCPU, unless IBRS is advertised to ++ * the vCPU. To minimize the number of IBPBs executed, KVM ++ * performs IBPB on nested VM-Exit (a single nested transition ++ * may switch the active VMCS multiple times). + */ + if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) + indirect_branch_prediction_barrier(); +-- +2.39.0 + diff --git a/queue-5.4/kvm-x86-fail-emulation-during-emultype_skip-on-any-e.patch b/queue-5.4/kvm-x86-fail-emulation-during-emultype_skip-on-any-e.patch new file mode 100644 index 00000000000..43b025606d9 --- /dev/null +++ b/queue-5.4/kvm-x86-fail-emulation-during-emultype_skip-on-any-e.patch @@ -0,0 +1,73 @@ +From a8491afa5799db9528d9c8e46436955944d4a2a5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 30 Sep 2022 23:36:32 +0000 +Subject: KVM: x86: Fail emulation during EMULTYPE_SKIP on any exception + +From: Sean Christopherson + +[ Upstream commit 17122c06b86c9f77f45b86b8e62c3ed440847a59 ] + +Treat any exception during instruction decode for EMULTYPE_SKIP as a +"full" emulation failure, i.e. signal failure instead of queuing the +exception. When decoding purely to skip an instruction, KVM and/or the +CPU has already done some amount of emulation that cannot be unwound, +e.g. on an EPT misconfig VM-Exit KVM has already processeed the emulated +MMIO. KVM already does this if a #UD is encountered, but not for other +exceptions, e.g. if a #PF is encountered during fetch. + +In SVM's soft-injection use case, queueing the exception is particularly +problematic as queueing exceptions while injecting events can put KVM +into an infinite loop due to bailing from VM-Enter to service the newly +pending exception. E.g. multiple warnings to detect such behavior fire: + + ------------[ cut here ]------------ + WARNING: CPU: 3 PID: 1017 at arch/x86/kvm/x86.c:9873 kvm_arch_vcpu_ioctl_run+0x1de5/0x20a0 [kvm] + Modules linked in: kvm_amd ccp kvm irqbypass + CPU: 3 PID: 1017 Comm: svm_nested_soft Not tainted 6.0.0-rc1+ #220 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 + RIP: 0010:kvm_arch_vcpu_ioctl_run+0x1de5/0x20a0 [kvm] + Call Trace: + kvm_vcpu_ioctl+0x223/0x6d0 [kvm] + __x64_sys_ioctl+0x85/0xc0 + do_syscall_64+0x2b/0x50 + entry_SYSCALL_64_after_hwframe+0x46/0xb0 + ---[ end trace 0000000000000000 ]--- + ------------[ cut here ]------------ + WARNING: CPU: 3 PID: 1017 at arch/x86/kvm/x86.c:9987 kvm_arch_vcpu_ioctl_run+0x12a3/0x20a0 [kvm] + Modules linked in: kvm_amd ccp kvm irqbypass + CPU: 3 PID: 1017 Comm: svm_nested_soft Tainted: G W 6.0.0-rc1+ #220 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 + RIP: 0010:kvm_arch_vcpu_ioctl_run+0x12a3/0x20a0 [kvm] + Call Trace: + kvm_vcpu_ioctl+0x223/0x6d0 [kvm] + __x64_sys_ioctl+0x85/0xc0 + do_syscall_64+0x2b/0x50 + entry_SYSCALL_64_after_hwframe+0x46/0xb0 + ---[ end trace 0000000000000000 ]--- + +Fixes: 6ea6e84309ca ("KVM: x86: inject exceptions produced by x86_decode_insn") +Signed-off-by: Sean Christopherson +Link: https://lore.kernel.org/r/20220930233632.1725475-1-seanjc@google.com +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/x86.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index cf37a61729972..f5e9590a8f311 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6787,7 +6787,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + write_fault_to_spt, + emulation_type)) + return 1; +- if (ctxt->have_exception) { ++ ++ if (ctxt->have_exception && ++ !(emulation_type & EMULTYPE_SKIP)) { + /* + * #UD should result in just EMULATION_FAILED, and trap-like + * exception should not be encountered during decode. +-- +2.39.0 + diff --git a/queue-5.4/powerpc-dts-t208x-disable-10g-on-mac1-and-mac2.patch b/queue-5.4/powerpc-dts-t208x-disable-10g-on-mac1-and-mac2.patch new file mode 100644 index 00000000000..1a2c12d1f21 --- /dev/null +++ b/queue-5.4/powerpc-dts-t208x-disable-10g-on-mac1-and-mac2.patch @@ -0,0 +1,51 @@ +From 288a4f031a8ec96f54f965b2d67789e3c8042cd9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Dec 2022 12:29:37 -0500 +Subject: powerpc: dts: t208x: Disable 10G on MAC1 and MAC2 + +From: Sean Anderson + +[ Upstream commit 8d8bee13ae9e316443c6666286360126a19c8d94 ] + +There aren't enough resources to run these ports at 10G speeds. Disable +10G for these ports, reverting to the previous speed. + +Fixes: 36926a7d70c2 ("powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G") +Reported-by: Camelia Alexandra Groza +Signed-off-by: Sean Anderson +Reviewed-by: Camelia Groza +Tested-by: Camelia Groza +Link: https://lore.kernel.org/r/20221216172937.2960054-1-sean.anderson@seco.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + arch/powerpc/boot/dts/fsl/t2081si-post.dtsi | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +index 74e17e134387d..27714dc2f04a5 100644 +--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi ++++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +@@ -659,3 +659,19 @@ + interrupts = <16 2 1 9>; + }; + }; ++ ++&fman0_rx_0x08 { ++ /delete-property/ fsl,fman-10g-port; ++}; ++ ++&fman0_tx_0x28 { ++ /delete-property/ fsl,fman-10g-port; ++}; ++ ++&fman0_rx_0x09 { ++ /delete-property/ fsl,fman-10g-port; ++}; ++ ++&fman0_tx_0x29 { ++ /delete-property/ fsl,fman-10g-port; ++}; +-- +2.39.0 + diff --git a/queue-5.4/powerpc-dts-t208x-mark-mac1-and-mac2-as-10g.patch b/queue-5.4/powerpc-dts-t208x-mark-mac1-and-mac2-as-10g.patch new file mode 100644 index 00000000000..41cb1f57372 --- /dev/null +++ b/queue-5.4/powerpc-dts-t208x-mark-mac1-and-mac2-as-10g.patch @@ -0,0 +1,142 @@ +From 9cfa36f19b3514955da541ef5982c3b938d3743d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 17 Oct 2022 16:22:39 -0400 +Subject: powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G + +From: Sean Anderson + +[ Upstream commit 36926a7d70c2d462fca1ed85bfee000d17fd8662 ] + +On the T208X SoCs, MAC1 and MAC2 support XGMII. Add some new MAC dtsi +fragments, and mark the QMAN ports as 10G. + +Fixes: da414bb923d9 ("powerpc/mpc85xx: Add FSL QorIQ DPAA FMan support to the SoC device tree(s)") +Signed-off-by: Sean Anderson +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi | 44 +++++++++++++++++++ + .../boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi | 44 +++++++++++++++++++ + arch/powerpc/boot/dts/fsl/t2081si-post.dtsi | 4 +- + 3 files changed, 90 insertions(+), 2 deletions(-) + create mode 100644 arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi + create mode 100644 arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi + +diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi +new file mode 100644 +index 0000000000000..437dab3fc0176 +--- /dev/null ++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi +@@ -0,0 +1,44 @@ ++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later ++/* ++ * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ] ++ * ++ * Copyright 2022 Sean Anderson ++ * Copyright 2012 - 2015 Freescale Semiconductor Inc. ++ */ ++ ++fman@400000 { ++ fman0_rx_0x08: port@88000 { ++ cell-index = <0x8>; ++ compatible = "fsl,fman-v3-port-rx"; ++ reg = <0x88000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ fman0_tx_0x28: port@a8000 { ++ cell-index = <0x28>; ++ compatible = "fsl,fman-v3-port-tx"; ++ reg = <0xa8000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ ethernet@e0000 { ++ cell-index = <0>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe0000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy0>; ++ }; ++ ++ mdio@e1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe1000 0x1000>; ++ fsl,erratum-a011043; /* must ignore read errors */ ++ ++ pcsphy0: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi +new file mode 100644 +index 0000000000000..ad116b17850a8 +--- /dev/null ++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi +@@ -0,0 +1,44 @@ ++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later ++/* ++ * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ] ++ * ++ * Copyright 2022 Sean Anderson ++ * Copyright 2012 - 2015 Freescale Semiconductor Inc. ++ */ ++ ++fman@400000 { ++ fman0_rx_0x09: port@89000 { ++ cell-index = <0x9>; ++ compatible = "fsl,fman-v3-port-rx"; ++ reg = <0x89000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ fman0_tx_0x29: port@a9000 { ++ cell-index = <0x29>; ++ compatible = "fsl,fman-v3-port-tx"; ++ reg = <0xa9000 0x1000>; ++ fsl,fman-10g-port; ++ }; ++ ++ ethernet@e2000 { ++ cell-index = <1>; ++ compatible = "fsl,fman-memac"; ++ reg = <0xe2000 0x1000>; ++ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; ++ ptp-timer = <&ptp_timer0>; ++ pcsphy-handle = <&pcsphy1>; ++ }; ++ ++ mdio@e3000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; ++ reg = <0xe3000 0x1000>; ++ fsl,erratum-a011043; /* must ignore read errors */ ++ ++ pcsphy1: ethernet-phy@0 { ++ reg = <0x0>; ++ }; ++ }; ++}; +diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +index ecbb447920bc6..74e17e134387d 100644 +--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi ++++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +@@ -609,8 +609,8 @@ + /include/ "qoriq-bman1.dtsi" + + /include/ "qoriq-fman3-0.dtsi" +-/include/ "qoriq-fman3-0-1g-0.dtsi" +-/include/ "qoriq-fman3-0-1g-1.dtsi" ++/include/ "qoriq-fman3-0-10g-2.dtsi" ++/include/ "qoriq-fman3-0-10g-3.dtsi" + /include/ "qoriq-fman3-0-1g-2.dtsi" + /include/ "qoriq-fman3-0-1g-3.dtsi" + /include/ "qoriq-fman3-0-1g-4.dtsi" +-- +2.39.0 + diff --git a/queue-5.4/random-always-mix-cycle-counter-in-add_latent_entrop.patch b/queue-5.4/random-always-mix-cycle-counter-in-add_latent_entrop.patch new file mode 100644 index 00000000000..703c1526763 --- /dev/null +++ b/queue-5.4/random-always-mix-cycle-counter-in-add_latent_entrop.patch @@ -0,0 +1,61 @@ +From 1f6f2b96154263d062bdfc6de652dacc2e8b749b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 1 Jun 2022 22:45:33 +0200 +Subject: random: always mix cycle counter in add_latent_entropy() + +From: Jason A. Donenfeld + +[ Upstream commit d7bf7f3b813e3755226bcb5114ad2ac477514ebf ] + +add_latent_entropy() is called every time a process forks, in +kernel_clone(). This in turn calls add_device_randomness() using the +latent entropy global state. add_device_randomness() does two things: + + 2) Mixes into the input pool the latent entropy argument passed; and + 1) Mixes in a cycle counter, a sort of measurement of when the event + took place, the high precision bits of which are presumably + difficult to predict. + +(2) is impossible without CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y. But (1) is +always possible. However, currently CONFIG_GCC_PLUGIN_LATENT_ENTROPY=n +disables both (1) and (2), instead of just (2). + +This commit causes the CONFIG_GCC_PLUGIN_LATENT_ENTROPY=n case to still +do (1) by passing NULL (len 0) to add_device_randomness() when add_latent_ +entropy() is called. + +Cc: Dominik Brodowski +Cc: PaX Team +Cc: Emese Revfy +Fixes: 38addce8b600 ("gcc-plugins: Add latent_entropy plugin") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Sasha Levin +--- + include/linux/random.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/include/linux/random.h b/include/linux/random.h +index 3feafab498ad9..ed75fb2b0ca94 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code, + void add_interrupt_randomness(int irq) __latent_entropy; + void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); + +-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) + static inline void add_latent_entropy(void) + { ++#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); +-} + #else +-static inline void add_latent_entropy(void) { } ++ add_device_randomness(NULL, 0); + #endif ++} + + void get_random_bytes(void *buf, size_t len); + size_t __must_check get_random_bytes_arch(void *buf, size_t len); +-- +2.39.0 + diff --git a/queue-5.4/scatterlist-add-generic-wrappers-for-iterating-over-.patch b/queue-5.4/scatterlist-add-generic-wrappers-for-iterating-over-.patch new file mode 100644 index 00000000000..86b159f7469 --- /dev/null +++ b/queue-5.4/scatterlist-add-generic-wrappers-for-iterating-over-.patch @@ -0,0 +1,124 @@ +From 202d08e788f3521cd73e40a0ab56fd358676cd87 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 May 2020 15:32:09 +0200 +Subject: scatterlist: add generic wrappers for iterating over sgtable objects + +From: Marek Szyprowski + +[ Upstream commit 709d6d73c756107fb8a292a9f957d630097425fa ] + +struct sg_table is a common structure used for describing a memory +buffer. It consists of a scatterlist with memory pages and DMA addresses +(sgl entry), as well as the number of scatterlist entries: CPU pages +(orig_nents entry) and DMA mapped pages (nents entry). + +It turned out that it was a common mistake to misuse nents and orig_nents +entries, calling the scatterlist iterating functions with a wrong number +of the entries. + +To avoid such issues, lets introduce a common wrappers operating directly +on the struct sg_table objects, which take care of the proper use of +the nents and orig_nents entries. + +While touching this, lets clarify some ambiguities in the comments for +the existing for_each helpers. + +Signed-off-by: Marek Szyprowski +Reviewed-by: Robin Murphy +Signed-off-by: Christoph Hellwig +Stable-dep-of: d37c120b7312 ("drm/etnaviv: don't truncate physical page address") +Signed-off-by: Sasha Levin +--- + include/linux/scatterlist.h | 50 ++++++++++++++++++++++++++++++++++--- + 1 file changed, 47 insertions(+), 3 deletions(-) + +diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h +index 6eec50fb36c80..4f922afb607ac 100644 +--- a/include/linux/scatterlist.h ++++ b/include/linux/scatterlist.h +@@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + #define for_each_sg(sglist, sg, nr, __i) \ + for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) + ++/* ++ * Loop over each sg element in the given sg_table object. ++ */ ++#define for_each_sgtable_sg(sgt, sg, i) \ ++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) ++ ++/* ++ * Loop over each sg element in the given *DMA mapped* sg_table object. ++ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses ++ * of the each element. ++ */ ++#define for_each_sgtable_dma_sg(sgt, sg, i) \ ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) ++ + /** + * sg_chain - Chain two sglists together + * @prv: First scatterlist +@@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) + * @sglist: sglist to iterate over + * @piter: page iterator to hold current page, sg, sg_pgoffset + * @nents: maximum number of sg entries to iterate over +- * @pgoffset: starting page offset ++ * @pgoffset: starting page offset (in pages) + * + * Callers may use sg_page_iter_page() to get each page pointer. ++ * In each loop it operates on PAGE_SIZE unit. + */ + #define for_each_sg_page(sglist, piter, nents, pgoffset) \ + for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ +@@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) + /** + * for_each_sg_dma_page - iterate over the pages of the given sg list + * @sglist: sglist to iterate over +- * @dma_iter: page iterator to hold current page ++ * @dma_iter: DMA page iterator to hold current page + * @dma_nents: maximum number of sg entries to iterate over, this is the value + * returned from dma_map_sg +- * @pgoffset: starting page offset ++ * @pgoffset: starting page offset (in pages) + * + * Callers may use sg_page_iter_dma_address() to get each page's DMA address. ++ * In each loop it operates on PAGE_SIZE unit. + */ + #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \ + for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \ + pgoffset); \ + __sg_page_iter_dma_next(dma_iter);) + ++/** ++ * for_each_sgtable_page - iterate over all pages in the sg_table object ++ * @sgt: sg_table object to iterate over ++ * @piter: page iterator to hold current page ++ * @pgoffset: starting page offset (in pages) ++ * ++ * Iterates over the all memory pages in the buffer described by ++ * a scatterlist stored in the given sg_table object. ++ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. ++ */ ++#define for_each_sgtable_page(sgt, piter, pgoffset) \ ++ for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset) ++ ++/** ++ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object ++ * @sgt: sg_table object to iterate over ++ * @dma_iter: DMA page iterator to hold current page ++ * @pgoffset: starting page offset (in pages) ++ * ++ * Iterates over the all DMA mapped pages in the buffer described by ++ * a scatterlist stored in the given sg_table object. ++ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE ++ * unit. ++ */ ++#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ ++ for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset) ++ ++ + /* + * Mapping sg iterator + * +-- +2.39.0 + diff --git a/queue-5.4/series b/queue-5.4/series new file mode 100644 index 00000000000..2758c799728 --- /dev/null +++ b/queue-5.4/series @@ -0,0 +1,11 @@ +dma-mapping-add-generic-helpers-for-mapping-sgtable-.patch +scatterlist-add-generic-wrappers-for-iterating-over-.patch +drm-etnaviv-fix-common-struct-sg_table-related-issue.patch +drm-etnaviv-don-t-truncate-physical-page-address.patch +wifi-rtl8xxxu-gen2-turn-on-the-rate-control.patch +powerpc-dts-t208x-mark-mac1-and-mac2-as-10g.patch +random-always-mix-cycle-counter-in-add_latent_entrop.patch +kvm-x86-fail-emulation-during-emultype_skip-on-any-e.patch +kvm-vmx-execute-ibpb-on-emulated-vm-exit-when-guest-.patch +can-kvaser_usb-hydra-help-gcc-13-to-figure-out-cmd_l.patch +powerpc-dts-t208x-disable-10g-on-mac1-and-mac2.patch diff --git a/queue-5.4/wifi-rtl8xxxu-gen2-turn-on-the-rate-control.patch b/queue-5.4/wifi-rtl8xxxu-gen2-turn-on-the-rate-control.patch new file mode 100644 index 00000000000..b81f18e0524 --- /dev/null +++ b/queue-5.4/wifi-rtl8xxxu-gen2-turn-on-the-rate-control.patch @@ -0,0 +1,69 @@ +From a4d12be8673f1d10d1e974e82e501f5b40145041 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Sep 2022 23:36:51 +0300 +Subject: wifi: rtl8xxxu: gen2: Turn on the rate control + +From: Bitterblue Smith + +[ Upstream commit 791082ec0ab843e0be07c8ce3678e4c2afd2e33d ] + +Re-enable the function rtl8xxxu_gen2_report_connect. + +It informs the firmware when connecting to a network. This makes the +firmware enable the rate control, which makes the upload faster. + +It also informs the firmware when disconnecting from a network. In the +past this made reconnecting impossible because it was sending the +auth on queue 0x7 (TXDESC_QUEUE_VO) instead of queue 0x12 +(TXDESC_QUEUE_MGNT): + +wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 1/3) +wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 2/3) +wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 3/3) +wlp0s20f0u3: authentication with 90:55:de:__:__:__ timed out + +Probably the firmware disables the unnecessary TX queues when it +knows it's disconnected. + +However, this was fixed in commit edd5747aa12e ("wifi: rtl8xxxu: Fix +skb misuse in TX queue selection"). + +Fixes: c59f13bbead4 ("rtl8xxxu: Work around issue with 8192eu and 8723bu devices not reconnecting") +Signed-off-by: Bitterblue Smith +Signed-off-by: Kalle Valo +Link: https://lore.kernel.org/r/43200afc-0c65-ee72-48f8-231edd1df493@gmail.com +Signed-off-by: Sasha Levin +--- + drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 8 ++------ + 1 file changed, 2 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +index 4a81e810a0ce3..0bc747489c55a 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +@@ -4372,12 +4372,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, + void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, + u8 macid, bool connect) + { +-#ifdef RTL8XXXU_GEN2_REPORT_CONNECT + /* +- * Barry Day reports this causes issues with 8192eu and 8723bu +- * devices reconnecting. The reason for this is unclear, but +- * until it is better understood, leave the code in place but +- * disabled, so it is not lost. ++ * The firmware turns on the rate control when it knows it's ++ * connected to a network. + */ + struct h2c_cmd h2c; + +@@ -4390,7 +4387,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, + h2c.media_status_rpt.parm &= ~BIT(0); + + rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); +-#endif + } + + void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) +-- +2.39.0 +