--- /dev/null
+From a4e03921c1bb118e6718e0a3b0322a2c13ed172b Mon Sep 17 00:00:00 2001
+From: Giulio Benetti <giulio.benetti@benettiengineering.com>
+Date: Tue, 13 Dec 2022 20:24:03 +0100
+Subject: ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment
+
+From: Giulio Benetti <giulio.benetti@benettiengineering.com>
+
+commit a4e03921c1bb118e6718e0a3b0322a2c13ed172b upstream.
+
+zero_page is a void* pointer but memblock_alloc() returns phys_addr_t type
+so this generates a warning while using clang and with -Wint-error enabled
+that becomes and error. So let's cast the return of memblock_alloc() to
+(void *).
+
+Cc: <stable@vger.kernel.org> # 4.14.x +
+Fixes: 340a982825f7 ("ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation")
+Signed-off-by: Giulio Benetti <giulio.benetti@benettiengineering.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/nommu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -161,7 +161,7 @@ void __init paging_init(const struct mac
+ mpu_setup();
+
+ /* allocate the zero page. */
+- zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
--- /dev/null
+From cec669ff716cc83505c77b242aecf6f7baad869d Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Date: Wed, 18 Jan 2023 20:38:48 +0530
+Subject: EDAC/device: Respect any driver-supplied workqueue polling value
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+commit cec669ff716cc83505c77b242aecf6f7baad869d upstream.
+
+The EDAC drivers may optionally pass the poll_msec value. Use that value
+if available, else fall back to 1000ms.
+
+ [ bp: Touchups. ]
+
+Fixes: e27e3dac6517 ("drivers/edac: add edac_device class")
+Reported-by: Luca Weiss <luca.weiss@fairphone.com>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Steev Klimaszewski <steev@kali.org> # Thinkpad X13s
+Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8540p-ride
+Cc: <stable@vger.kernel.org> # 4.9
+Link: https://lore.kernel.org/r/COZYL8MWN97H.MROQ391BGA09@otso
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/edac_device.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -366,7 +369,7 @@ static void edac_device_workq_function(s
+ * whole one second to save timers firing all over the period
+ * between integral seconds
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -396,7 +399,7 @@ static void edac_device_workq_setup(stru
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -430,7 +433,7 @@ void edac_device_reset_delay_period(stru
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ /* See comment in edac_device_workq_setup() above */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_d
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+- /*
+- * enable workq processing on this instance,
+- * default = 1000 msec
+- */
+- edac_device_workq_setup(edac_dev, 1000);
++ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
--- /dev/null
+From 977c6ba624f24ae20cf0faee871257a39348d4a9 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Date: Wed, 18 Jan 2023 20:38:50 +0530
+Subject: EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+commit 977c6ba624f24ae20cf0faee871257a39348d4a9 upstream.
+
+The memory for llcc_driv_data is allocated by the LLCC driver. But when
+it is passed as the private driver info to the EDAC core, it will get freed
+during the qcom_edac driver release. So when the qcom_edac driver gets probed
+again, it will try to use the freed data leading to the use-after-free bug.
+
+Hence, do not pass llcc_driv_data as pvt_info but rather reference it
+using the platform_data pointer in the qcom_edac driver.
+
+Fixes: 27450653f1db ("drivers: edac: Add EDAC driver support for QCOM SoCs")
+Reported-by: Steev Klimaszewski <steev@kali.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Steev Klimaszewski <steev@kali.org> # Thinkpad X13s
+Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8540p-ride
+Cc: <stable@vger.kernel.org> # 4.20
+Link: https://lore.kernel.org/r/20230118150904.26913-4-manivannan.sadhasivam@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/edac/qcom_edac.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -252,7 +252,7 @@ clear:
+ static int
+ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ {
+- struct llcc_drv_data *drv = edev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
+ int ret;
+
+ ret = dump_syn_reg_values(drv, bank, err_type);
+@@ -289,7 +289,7 @@ static irqreturn_t
+ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+ irqreturn_t irq_rc = IRQ_NONE;
+ u32 drp_error, trp_error, i;
+ int ret;
+@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct p
+ edev_ctl->dev_name = dev_name(dev);
+ edev_ctl->ctl_name = "llcc";
+ edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+- edev_ctl->pvt_info = llcc_driv_data;
+
+ rc = edac_device_add_device(edev_ctl);
+ if (rc)
--- /dev/null
+From 9f535c870e493841ac7be390610ff2edec755762 Mon Sep 17 00:00:00 2001
+From: Gergely Risko <gergely.risko@gmail.com>
+Date: Thu, 19 Jan 2023 14:40:41 +0100
+Subject: ipv6: fix reachability confirmation with proxy_ndp
+
+From: Gergely Risko <gergely.risko@gmail.com>
+
+commit 9f535c870e493841ac7be390610ff2edec755762 upstream.
+
+When proxying IPv6 NDP requests, the adverts to the initial multicast
+solicits are correct and working. On the other hand, when later a
+reachability confirmation is requested (on unicast), no reply is sent.
+
+This causes the neighbor entry expiring on the sending node, which is
+mostly a non-issue, as a new multicast request is sent. There are
+routers, where the multicast requests are intentionally delayed, and in
+these environments the current implementation causes periodic packet
+loss for the proxied endpoints.
+
+The root cause is the erroneous decrease of the hop limit, as this
+is checked in ndisc.c and no answer is generated when it's 254 instead
+of the correct 255.
+
+Cc: stable@vger.kernel.org
+Fixes: 46c7655f0b56 ("ipv6: decrease hop limit counter in ip6_forward()")
+Signed-off-by: Gergely Risko <gergely.risko@gmail.com>
+Tested-by: Gergely Risko <gergely.risko@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -527,7 +527,20 @@ int ip6_forward(struct sk_buff *skb)
+ pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ int proxied = ip6_forward_proxy_check(skb);
+ if (proxied > 0) {
+- hdr->hop_limit--;
++ /* It's tempting to decrease the hop limit
++ * here by 1, as we do at the end of the
++ * function too.
++ *
++ * But that would be incorrect, as proxying is
++ * not forwarding. The ip6_input function
++ * will handle this packet locally, and it
++ * depends on the hop limit being unchanged.
++ *
++ * One example is the NDP hop limit, that
++ * always has to stay 255, but other would be
++ * similar checks around RA packets, where the
++ * user can even change the desired limit.
++ */
+ return ip6_input(skb);
+ } else if (proxied < 0) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
--- /dev/null
+From 0d0d4680db22eda1eea785c47bbf66a9b33a8b16 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 29 Dec 2022 18:33:25 +0900
+Subject: ksmbd: add max connections parameter
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 0d0d4680db22eda1eea785c47bbf66a9b33a8b16 upstream.
+
+Add max connections parameter to limit number of maximum simultaneous
+connections.
+
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/ksmbd_netlink.h | 3 ++-
+ fs/ksmbd/server.h | 1 +
+ fs/ksmbd/transport_ipc.c | 3 +++
+ fs/ksmbd/transport_tcp.c | 17 ++++++++++++++++-
+ 4 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ b/fs/ksmbd/ksmbd_netlink.h
+@@ -105,7 +105,8 @@ struct ksmbd_startup_request {
+ __u32 sub_auth[3]; /* Subauth value for Security ID */
+ __u32 smb2_max_credits; /* MAX credits */
+ __u32 smbd_max_io_size; /* smbd read write size */
+- __u32 reserved[127]; /* Reserved room */
++ __u32 max_connections; /* Number of maximum simultaneous connections */
++ __u32 reserved[126]; /* Reserved room */
+ __u32 ifc_list_sz; /* interfaces list size */
+ __s8 ____payload[];
+ };
+--- a/fs/ksmbd/server.h
++++ b/fs/ksmbd/server.h
+@@ -41,6 +41,7 @@ struct ksmbd_server_config {
+ unsigned int share_fake_fscaps;
+ struct smb_sid domain_sid;
+ unsigned int auth_mechs;
++ unsigned int max_connections;
+
+ char *conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+--- a/fs/ksmbd/transport_ipc.c
++++ b/fs/ksmbd/transport_ipc.c
+@@ -307,6 +307,9 @@ static int ipc_server_config_on_startup(
+ if (req->smbd_max_io_size)
+ init_smbd_max_io_size(req->smbd_max_io_size);
+
++ if (req->max_connections)
++ server_conf.max_connections = req->max_connections;
++
+ ret = ksmbd_set_netbios_name(req->netbios_name);
+ ret |= ksmbd_set_server_string(req->server_string);
+ ret |= ksmbd_set_work_group(req->work_group);
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -15,6 +15,8 @@
+ #define IFACE_STATE_DOWN BIT(0)
+ #define IFACE_STATE_CONFIGURED BIT(1)
+
++static atomic_t active_num_conn;
++
+ struct interface {
+ struct task_struct *ksmbd_kthread;
+ struct socket *ksmbd_socket;
+@@ -185,8 +187,10 @@ static int ksmbd_tcp_new_connection(stru
+ struct tcp_transport *t;
+
+ t = alloc_transport(client_sk);
+- if (!t)
++ if (!t) {
++ sock_release(client_sk);
+ return -ENOMEM;
++ }
+
+ csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+ if (kernel_getpeername(client_sk, csin) < 0) {
+@@ -239,6 +243,15 @@ static int ksmbd_kthread_fn(void *p)
+ continue;
+ }
+
++ if (server_conf.max_connections &&
++ atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
++ pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
++ atomic_read(&active_num_conn));
++ atomic_dec(&active_num_conn);
++ sock_release(client_sk);
++ continue;
++ }
++
+ ksmbd_debug(CONN, "connect success: accepted new connection\n");
+ client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+ client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+@@ -368,6 +381,8 @@ static int ksmbd_tcp_writev(struct ksmbd
+ static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+ {
+ free_transport(TCP_TRANS(t));
++ if (server_conf.max_connections)
++ atomic_dec(&active_num_conn);
+ }
+
+ static void tcp_destroy_socket(struct socket *ksmbd_socket)
--- /dev/null
+From 65bb45b97b578c8eed1ffa80caec84708df49729 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Mon, 16 May 2022 16:22:43 +0900
+Subject: ksmbd: add smbd max io size parameter
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 65bb45b97b578c8eed1ffa80caec84708df49729 upstream.
+
+Add 'smbd max io size' parameter to adjust smbd-direct max read/write
+size.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/ksmbd_netlink.h | 3 ++-
+ fs/ksmbd/transport_ipc.c | 3 +++
+ fs/ksmbd/transport_rdma.c | 8 +++++++-
+ fs/ksmbd/transport_rdma.h | 6 ++++++
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ b/fs/ksmbd/ksmbd_netlink.h
+@@ -104,7 +104,8 @@ struct ksmbd_startup_request {
+ */
+ __u32 sub_auth[3]; /* Subauth value for Security ID */
+ __u32 smb2_max_credits; /* MAX credits */
+- __u32 reserved[128]; /* Reserved room */
++ __u32 smbd_max_io_size; /* smbd read write size */
++ __u32 reserved[127]; /* Reserved room */
+ __u32 ifc_list_sz; /* interfaces list size */
+ __s8 ____payload[];
+ };
+--- a/fs/ksmbd/transport_ipc.c
++++ b/fs/ksmbd/transport_ipc.c
+@@ -26,6 +26,7 @@
+ #include "mgmt/ksmbd_ida.h"
+ #include "connection.h"
+ #include "transport_tcp.h"
++#include "transport_rdma.h"
+
+ #define IPC_WAIT_TIMEOUT (2 * HZ)
+
+@@ -303,6 +304,8 @@ static int ipc_server_config_on_startup(
+ init_smb2_max_trans_size(req->smb2_max_trans);
+ if (req->smb2_max_credits)
+ init_smb2_max_credits(req->smb2_max_credits);
++ if (req->smbd_max_io_size)
++ init_smbd_max_io_size(req->smbd_max_io_size);
+
+ ret = ksmbd_set_netbios_name(req->netbios_name);
+ ret |= ksmbd_set_server_string(req->server_string);
+--- a/fs/ksmbd/transport_rdma.c
++++ b/fs/ksmbd/transport_rdma.c
+@@ -75,7 +75,7 @@ static int smb_direct_max_fragmented_rec
+ /* The maximum single-message size which can be received */
+ static int smb_direct_max_receive_size = 8192;
+
+-static int smb_direct_max_read_write_size = 1024 * 1024;
++static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
+
+ static int smb_direct_max_outstanding_rw_ops = 8;
+
+@@ -201,6 +201,12 @@ struct smb_direct_rdma_rw_msg {
+ struct scatterlist sg_list[0];
+ };
+
++void init_smbd_max_io_size(unsigned int sz)
++{
++ sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
++ smb_direct_max_read_write_size = sz;
++}
++
+ static inline int get_buf_page_count(void *buf, int size)
+ {
+ return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
+--- a/fs/ksmbd/transport_rdma.h
++++ b/fs/ksmbd/transport_rdma.h
+@@ -9,6 +9,10 @@
+
+ #define SMB_DIRECT_PORT 5445
+
++#define SMBD_DEFAULT_IOSIZE (8 * 1024 * 1024)
++#define SMBD_MIN_IOSIZE (512 * 1024)
++#define SMBD_MAX_IOSIZE (16 * 1024 * 1024)
++
+ /* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
+ struct smb_direct_negotiate_req {
+ __le16 min_version;
+@@ -54,10 +58,12 @@ struct smb_direct_data_transfer {
+ int ksmbd_rdma_init(void);
+ int ksmbd_rdma_destroy(void);
+ bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
++void init_smbd_max_io_size(unsigned int sz);
+ #else
+ static inline int ksmbd_rdma_init(void) { return 0; }
+ static inline int ksmbd_rdma_destroy(void) { return 0; }
+ static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
++static inline void init_smbd_max_io_size(unsigned int sz) { }
+ #endif
+
+ #endif /* __KSMBD_TRANSPORT_RDMA_H__ */
--- /dev/null
+From 5fde3c21cf33830eda7bfd006dc7f4bf07ec9fe6 Mon Sep 17 00:00:00 2001
+From: Marios Makassikis <mmakassikis@freebox.fr>
+Date: Wed, 11 Jan 2023 17:39:02 +0100
+Subject: ksmbd: do not sign response to session request for guest login
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+commit 5fde3c21cf33830eda7bfd006dc7f4bf07ec9fe6 upstream.
+
+If ksmbd.mountd is configured to assign unknown users to the guest account
+("map to guest = bad user" in the config), ksmbd signs the response.
+
+This is wrong according to MS-SMB2 3.3.5.5.3:
+ 12. If the SMB2_SESSION_FLAG_IS_GUEST bit is not set in the SessionFlags
+ field, and Session.IsAnonymous is FALSE, the server MUST sign the
+ final session setup response before sending it to the client, as
+ follows:
+ [...]
+
+This fixes libsmb2 based applications failing to establish a session
+("Wrong signature in received").
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -8613,6 +8613,7 @@ int smb3_decrypt_req(struct ksmbd_work *
+ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_session *sess = work->sess;
+ struct smb2_hdr *rsp = work->response_buf;
+
+ if (conn->dialect < SMB30_PROT_ID)
+@@ -8622,6 +8623,7 @@ bool smb3_11_final_sess_setup_resp(struc
+ rsp = ksmbd_resp_buf_next(work);
+
+ if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
++ sess->user && !user_guest(sess->user) &&
+ rsp->Status == STATUS_SUCCESS)
+ return true;
+ return false;
--- /dev/null
+From a34dc4a9b9e2fb3a45c179a60bb0b26539c96189 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 25 Jan 2023 00:09:02 +0900
+Subject: ksmbd: downgrade ndr version error message to debug
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit a34dc4a9b9e2fb3a45c179a60bb0b26539c96189 upstream.
+
+When user switch samba to ksmbd, The following message flood is coming
+when accessing files. Samba seems to changs dos attribute version to v5.
+This patch downgrade ndr version error message to debug.
+
+$ dmesg
+...
+[68971.766914] ksmbd: v5 version is not supported
+[68971.779808] ksmbd: v5 version is not supported
+[68971.871544] ksmbd: v5 version is not supported
+[68971.910135] ksmbd: v5 version is not supported
+...
+
+Cc: stable@vger.kernel.org
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/ndr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/ksmbd/ndr.c
++++ b/fs/ksmbd/ndr.c
+@@ -242,7 +242,7 @@ int ndr_decode_dos_attr(struct ndr *n, s
+ return ret;
+
+ if (da->version != 3 && da->version != 4) {
+- pr_err("v%d version is not supported\n", da->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
+ return -EINVAL;
+ }
+
+@@ -251,7 +251,7 @@ int ndr_decode_dos_attr(struct ndr *n, s
+ return ret;
+
+ if (da->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ da->version, version2);
+ return -EINVAL;
+ }
+@@ -453,7 +453,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, s
+ if (ret)
+ return ret;
+ if (acl->version != 4) {
+- pr_err("v%d version is not supported\n", acl->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
+ return -EINVAL;
+ }
+
+@@ -461,7 +461,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, s
+ if (ret)
+ return ret;
+ if (acl->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ acl->version, version2);
+ return -EINVAL;
+ }
--- /dev/null
+From 62c487b53a7ff31e322cf2874d3796b8202c54a5 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 25 Jan 2023 00:13:20 +0900
+Subject: ksmbd: limit pdu length size according to connection status
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 62c487b53a7ff31e322cf2874d3796b8202c54a5 upstream.
+
+Stream protocol length will never be larger than 16KB until session setup.
+After session setup, the size of requests will not be larger than
+16KB + SMB2 MAX WRITE size. This patch limits these invalidly oversized
+requests and closes the connection immediately.
+
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-18259
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/connection.c | 17 +++++++++++++++--
+ fs/ksmbd/smb2pdu.h | 5 +++--
+ 2 files changed, 18 insertions(+), 4 deletions(-)
+
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -274,7 +274,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ struct ksmbd_transport *t = conn->transport;
+- unsigned int pdu_size;
++ unsigned int pdu_size, max_allowed_pdu_size;
+ char hdr_buf[4] = {0,};
+ int size;
+
+@@ -299,13 +299,26 @@ int ksmbd_conn_handler_loop(void *p)
+ pdu_size = get_rfc1002_len(hdr_buf);
+ ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+
++ if (conn->status == KSMBD_SESS_GOOD)
++ max_allowed_pdu_size =
++ SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
++ else
++ max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
++
++ if (pdu_size > max_allowed_pdu_size) {
++ pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++ pdu_size, max_allowed_pdu_size,
++ conn->status);
++ break;
++ }
++
+ /*
+ * Check if pdu size is valid (min : smb header size,
+ * max : 0x00FFFFFF).
+ */
+ if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ pdu_size > MAX_STREAM_PROT_LEN) {
+- continue;
++ break;
+ }
+
+ /* 4 for rfc1002 length field */
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -113,8 +113,9 @@
+ #define SMB21_DEFAULT_IOSIZE (1024 * 1024)
+ #define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+ #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
+-#define SMB3_MIN_IOSIZE (64 * 1024)
+-#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MIN_IOSIZE (64 * 1024)
++#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MAX_MSGSIZE (4 * 4096)
+
+ /*
+ * SMB2 Header Definition
--- /dev/null
+From ef3691683d7bfd0a2acf48812e4ffe894f10bfa8 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 19 Jan 2023 11:07:59 +0000
+Subject: KVM: arm64: GICv4.1: Fix race with doorbell on VPE activation/deactivation
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit ef3691683d7bfd0a2acf48812e4ffe894f10bfa8 upstream.
+
+To save the vgic LPI pending state with GICv4.1, the VPEs must all be
+unmapped from the ITSs so that the sGIC caches can be flushed.
+The opposite is done once the state is saved.
+
+This is all done by using the activate/deactivate irqdomain callbacks
+directly from the vgic code. Crutially, this is done without holding
+the irqdesc lock for the interrupts that represent the VPE. And these
+callbacks are changing the state of the irqdesc. What could possibly
+go wrong?
+
+If a doorbell fires while we are messing with the irqdesc state,
+it will acquire the lock and change the interrupt state concurrently.
+Since we don't hole the lock, curruption occurs in on the interrupt
+state. Oh well.
+
+While acquiring the lock would fix this (and this was Shanker's
+initial approach), this is still a layering violation we could do
+without. A better approach is actually to free the VPE interrupt,
+do what we have to do, and re-request it.
+
+It is more work, but this usually happens only once in the lifetime
+of the VM and we don't really care about this sort of overhead.
+
+Fixes: f66b7b151e00 ("KVM: arm64: GICv4.1: Try to save VLPI state in save_pending_tables")
+Reported-by: Shanker Donthineni <sdonthineni@nvidia.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230118022348.4137094-1-sdonthineni@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-v3.c | 25 +++++++++++--------------
+ arch/arm64/kvm/vgic/vgic-v4.c | 8 ++++++--
+ arch/arm64/kvm/vgic/vgic.h | 1 +
+ 3 files changed, 18 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -347,26 +347,23 @@ retry:
+ * The deactivation of the doorbell interrupt will trigger the
+ * unmapping of the associated vPE.
+ */
+-static void unmap_all_vpes(struct vgic_dist *dist)
++static void unmap_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
+ }
+
+-static void map_all_vpes(struct vgic_dist *dist)
++static void map_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
++ dist->its_vm.vpes[i]->irq));
+ }
+
+ /**
+@@ -391,7 +388,7 @@ int vgic_v3_save_pending_tables(struct k
+ * and enabling of the doorbells have already been done.
+ */
+ if (kvm_vgic_global_state.has_gicv4_1) {
+- unmap_all_vpes(dist);
++ unmap_all_vpes(kvm);
+ vlpi_avail = true;
+ }
+
+@@ -441,7 +438,7 @@ int vgic_v3_save_pending_tables(struct k
+
+ out:
+ if (vlpi_avail)
+- map_all_vpes(dist);
++ map_all_vpes(kvm);
+
+ return ret;
+ }
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_
+ *val = !!(*ptr & mask);
+ }
+
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
++{
++ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
++}
++
+ /**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm: Pointer to the VM being initialized
+@@ -282,8 +287,7 @@ int vgic_v4_init(struct kvm *kvm)
+ irq_flags &= ~IRQ_NOAUTOEN;
+ irq_set_status_flags(irq, irq_flags);
+
+- ret = request_irq(irq, vgic_v4_doorbell_handler,
+- 0, "vcpu", vcpu);
++ ret = vgic_v4_request_vpe_irq(vcpu, irq);
+ if (ret) {
+ kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ /*
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -321,5 +321,6 @@ int vgic_v4_init(struct kvm *kvm);
+ void vgic_v4_teardown(struct kvm *kvm);
+ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+
+ #endif
--- /dev/null
+From a44b331614e6f7e63902ed7dff7adc8c85edd8bc Mon Sep 17 00:00:00 2001
+From: Hendrik Borghorst <hborghor@amazon.de>
+Date: Mon, 14 Nov 2022 16:48:23 +0000
+Subject: KVM: x86/vmx: Do not skip segment attributes if unusable bit is set
+
+From: Hendrik Borghorst <hborghor@amazon.de>
+
+commit a44b331614e6f7e63902ed7dff7adc8c85edd8bc upstream.
+
+When serializing and deserializing kvm_sregs, attributes of the segment
+descriptors are stored by user space. For unusable segments,
+vmx_segment_access_rights skips all attributes and sets them to 0.
+
+This means we zero out the DPL (Descriptor Privilege Level) for unusable
+entries.
+
+Unusable segments are - contrary to their name - usable in 64bit mode and
+are used by guests to for example create a linear map through the
+NULL selector.
+
+VMENTER checks if SS.DPL is correct depending on the CS segment type.
+For types 9 (Execute Only) and 11 (Execute Read), CS.DPL must be equal to
+SS.DPL [1].
+
+We have seen real world guests setting CS to a usable segment with DPL=3
+and SS to an unusable segment with DPL=3. Once we go through an sregs
+get/set cycle, SS.DPL turns to 0. This causes the virtual machine to crash
+reproducibly.
+
+This commit changes the attribute logic to always preserve attributes for
+unusable segments. According to [2] SS.DPL is always saved on VM exits,
+regardless of the unusable bit so user space applications should have saved
+the information on serialization correctly.
+
+[3] specifies that besides SS.DPL the rest of the attributes of the
+descriptors are undefined after VM entry if unusable bit is set. So, there
+should be no harm in setting them all to the previous state.
+
+[1] Intel SDM Vol 3C 26.3.1.2 Checks on Guest Segment Registers
+[2] Intel SDM Vol 3C 27.3.2 Saving Segment Registers and Descriptor-Table
+Registers
+[3] Intel SDM Vol 3C 26.3.2.2 Loading Guest Segment Registers and
+Descriptor-Table Registers
+
+Cc: Alexander Graf <graf@amazon.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Hendrik Borghorst <hborghor@amazon.de>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Alexander Graf <graf@amazon.com>
+Message-Id: <20221114164823.69555-1-hborghor@amazon.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3361,18 +3361,15 @@ static u32 vmx_segment_access_rights(str
+ {
+ u32 ar;
+
+- if (var->unusable || !var->present)
+- ar = 1 << 16;
+- else {
+- ar = var->type & 15;
+- ar |= (var->s & 1) << 4;
+- ar |= (var->dpl & 3) << 5;
+- ar |= (var->present & 1) << 7;
+- ar |= (var->avl & 1) << 12;
+- ar |= (var->l & 1) << 13;
+- ar |= (var->db & 1) << 14;
+- ar |= (var->g & 1) << 15;
+- }
++ ar = var->type & 15;
++ ar |= (var->s & 1) << 4;
++ ar |= (var->dpl & 3) << 5;
++ ar |= (var->present & 1) << 7;
++ ar |= (var->avl & 1) << 12;
++ ar |= (var->l & 1) << 13;
++ ar |= (var->db & 1) << 14;
++ ar |= (var->g & 1) << 15;
++ ar |= (var->unusable || !var->present) << 16;
+
+ return ar;
+ }
--- /dev/null
+From 4f11ada10d0ad3fd53e2bd67806351de63a4f9c3 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 24 Jan 2023 16:41:18 +0100
+Subject: ovl: fail on invalid uid/gid mapping at copy up
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 4f11ada10d0ad3fd53e2bd67806351de63a4f9c3 upstream.
+
+If st_uid/st_gid doesn't have a mapping in the mounter's user_ns, then
+copy-up should fail, just like it would fail if the mounter task was doing
+the copy using "cp -a".
+
+There's a corner case where the "cp -a" would succeed but copy up fail: if
+there's a mapping of the invalid uid/gid (65534 by default) in the user
+namespace. This is because stat(2) will return this value if the mapping
+doesn't exist in the current user_ns and "cp -a" will in turn be able to
+create a file with this uid/gid.
+
+This behavior would be inconsistent with POSIX ACL's, which return -1 for
+invalid uid/gid which result in a failed copy.
+
+For consistency and simplicity fail the copy of the st_uid/st_gid are
+invalid.
+
+Fixes: 459c7c565ac3 ("ovl: unprivieged mounts")
+Cc: <stable@vger.kernel.org> # v5.11
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Reviewed-by: Seth Forshee <sforshee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/copy_up.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -960,6 +960,10 @@ static int ovl_copy_up_one(struct dentry
+ if (err)
+ return err;
+
++ if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
++ !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
++ return -EOVERFLOW;
++
+ ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+
+ if (parent) {
cifs-fix-oops-due-to-uncleared-server-smbd_conn-in-reconnect.patch
i2c-mv64xxx-remove-shutdown-method-from-driver.patch
i2c-mv64xxx-add-atomic_xfer-method-to-driver.patch
+ksmbd-add-smbd-max-io-size-parameter.patch
+ksmbd-add-max-connections-parameter.patch
+ksmbd-do-not-sign-response-to-session-request-for-guest-login.patch
+ksmbd-downgrade-ndr-version-error-message-to-debug.patch
+ksmbd-limit-pdu-length-size-according-to-connection-status.patch
+ovl-fail-on-invalid-uid-gid-mapping-at-copy-up.patch
+kvm-x86-vmx-do-not-skip-segment-attributes-if-unusable-bit-is-set.patch
+kvm-arm64-gicv4.1-fix-race-with-doorbell-on-vpe-activation-deactivation.patch
+thermal-intel-int340x-protect-trip-temperature-from-concurrent-updates.patch
+ipv6-fix-reachability-confirmation-with-proxy_ndp.patch
+arm-9280-1-mm-fix-warning-on-phys_addr_t-to-void-pointer-assignment.patch
+edac-device-respect-any-driver-supplied-workqueue-polling-value.patch
+edac-qcom-do-not-pass-llcc_driv_data-as-edac_device_ctl_info-s-pvt_info.patch
--- /dev/null
+From 6757a7abe47bcb12cb2d45661067e182424b0ee3 Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Mon, 23 Jan 2023 09:21:10 -0800
+Subject: thermal: intel: int340x: Protect trip temperature from concurrent updates
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 6757a7abe47bcb12cb2d45661067e182424b0ee3 upstream.
+
+Trip temperatures are read using ACPI methods and stored in the memory
+during zone initializtion and when the firmware sends a notification for
+change. This trip temperature is returned when the thermal core calls via
+callback get_trip_temp().
+
+But it is possible that while updating the memory copy of the trips when
+the firmware sends a notification for change, thermal core is reading the
+trip temperature via the callback get_trip_temp(). This may return invalid
+trip temperature.
+
+To address this add a mutex to protect the invalid temperature reads in
+the callback get_trip_temp() and int340x_thermal_read_trips().
+
+Fixes: 5fbf7f27fa3d ("Thermal/int340x: Add common thermal zone handler")
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: 5.0+ <stable@vger.kernel.org> # 5.0+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c | 18 +++++++++--
+ drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h | 1
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp
+ int trip, int *temp)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *temp = d->aux_trips[trip];
+ else if (trip == d->crt_trip_id)
+@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+@@ -180,6 +184,8 @@ int int340x_thermal_read_trips(struct in
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
++ mutex_lock(&int34x_zone->trip_mutex);
++
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+@@ -207,6 +213,8 @@ int int340x_thermal_read_trips(struct in
+ int34x_zone->act_trips[i].valid = true;
+ }
+
++ mutex_unlock(&int34x_zone->trip_mutex);
++
+ return trip_cnt;
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+@@ -230,6 +238,8 @@ struct int34x_thermal_zone *int340x_ther
+ if (!int34x_thermal_zone)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_init(&int34x_thermal_zone->trip_mutex);
++
+ int34x_thermal_zone->adev = adev;
+ int34x_thermal_zone->override_ops = override_ops;
+
+@@ -281,6 +291,7 @@ err_thermal_zone:
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+ err_trip_alloc:
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ return ERR_PTR(ret);
+ }
+@@ -292,6 +303,7 @@ void int340x_thermal_zone_remove(struct
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
+ struct thermal_zone_device_ops *override_ops;
+ void *priv_data;
+ struct acpi_lpat_conversion_table *lpat_table;
++ struct mutex trip_mutex;
+ };
+
+ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,