--- /dev/null
+From dd64f896a8470b5ef86b339e16988bf3d9006dd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Mar 2023 11:32:42 +0100
+Subject: drm/sun4i: fix missing component unbind on bind errors
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit c22f2ff8724b49dce2ae797e9fbf4bc0fa91112f ]
+
+Make sure to unbind all subcomponents when binding the aggregate device
+fails.
+
+Fixes: 9026e0d122ac ("drm: Add Allwinner A10 Display Engine support")
+Cc: stable@vger.kernel.org # 4.7
+Cc: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230306103242.4775-1-johan+linaro@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/sun4i/sun4i_drv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index c5912fd537729..9c6ae8cfa0b2c 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -93,7 +93,7 @@ static int sun4i_drv_bind(struct device *dev)
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret)
+- goto cleanup_mode_config;
++ goto unbind_all;
+
+ drm->irq_enabled = true;
+
+@@ -117,6 +117,8 @@ static int sun4i_drv_bind(struct device *dev)
+
+ finish_poll:
+ drm_kms_helper_poll_fini(drm);
++unbind_all:
++ component_unbind_all(dev, NULL);
+ cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ of_reserved_mem_device_release(dev);
+--
+2.39.2
+
--- /dev/null
+From 5fd4966c1caea8556e7282bd687faa91ec91deb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Mar 2023 08:56:39 +0100
+Subject: interconnect: qcom: msm8974: fix registration race
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit bfe7bcd2b9f5215de2144f097f39971180e7ea54 ]
+
+The current interconnect provider registration interface is inherently
+racy as nodes are not added until the after adding the provider. This
+can specifically cause racing DT lookups to fail.
+
+Switch to using the new API where the provider is not registered until
+after it has been fully initialised.
+
+Fixes: 4e60a9568dc6 ("interconnect: qcom: add msm8974 driver")
+Cc: stable@vger.kernel.org # 5.5
+Reviewed-by: Brian Masney <bmasney@redhat.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20230306075651.2449-12-johan+linaro@kernel.org
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/interconnect/qcom/msm8974.c | 22 ++++++++++------------
+ 1 file changed, 10 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
+index da68ce375a89d..e1489930191d2 100644
+--- a/drivers/interconnect/qcom/msm8974.c
++++ b/drivers/interconnect/qcom/msm8974.c
+@@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ return ret;
+
+ provider = &qp->provider;
+- INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8974_icc_set;
+ provider->aggregate = icc_std_aggregate;
+@@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ provider->data = data;
+ provider->get_bw = msm8974_get_bw;
+
+- ret = icc_provider_add(provider);
+- if (ret) {
+- dev_err(dev, "error adding interconnect provider: %d\n", ret);
+- goto err_disable_clks;
+- }
++ icc_provider_init(provider);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+@@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+- goto err_del_icc;
++ goto err_remove_nodes;
+ }
+
+ node->name = qnodes[i]->name;
+@@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ }
+ data->num_nodes = num_nodes;
+
++ ret = icc_provider_register(provider);
++ if (ret)
++ goto err_remove_nodes;
++
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+-err_del_icc:
++err_remove_nodes:
+ icc_nodes_remove(provider);
+- icc_provider_del(provider);
+-
+-err_disable_clks:
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+@@ -747,9 +743,11 @@ static int msm8974_icc_remove(struct platform_device *pdev)
+ {
+ struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+
++ icc_provider_deregister(&qp->provider);
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+- return icc_provider_del(&qp->provider);
++
++ return 0;
+ }
+
+ static const struct of_device_id msm8974_noc_of_match[] = {
+--
+2.39.2
+
--- /dev/null
+From c0929a3fe65cc9756fd348356d90a4899d1aa66d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Jan 2023 02:22:19 +0200
+Subject: interconnect: qcom: osm-l3: fix icc_onecell_data allocation
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit f77ebdda0ee652124061c2ac42399bb6c367e729 ]
+
+This is a struct with a trailing zero-length array of icc_node pointers
+but it's allocated as if it were a single array of icc_nodes instead.
+
+Fortunately this overallocates memory rather then allocating less memory
+than required.
+
+Fix by replacing devm_kcalloc() with devm_kzalloc() and struct_size()
+macro.
+
+Fixes: 5bc9900addaf ("interconnect: qcom: Add OSM L3 interconnect provider support")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20230105002221.1416479-2-dmitry.baryshkov@linaro.org
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/interconnect/qcom/osm-l3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index 695f28789e98a..08a282d573203 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -258,7 +258,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
++ data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+--
+2.39.2
+
--- /dev/null
+From b62cc80775dcd81a4d3f61f59f0979563d8fa941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 10:34:47 -0600
+Subject: ipmi:ssif: Add a timer between request retries
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit 00bb7e763ec9f384cb382455cb6ba5588b5375cf ]
+
+The IPMI spec has a time (T6) specified between request retries. Add
+the handling for that.
+
+Reported by: Tony Camuso <tcamuso@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 34 +++++++++++++++++++++++++++-------
+ 1 file changed, 27 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index cae21632cf079..20dc2452815c7 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -74,7 +74,8 @@
+ /*
+ * Timer values
+ */
+-#define SSIF_MSG_USEC 60000 /* 60ms between message tries. */
++#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
++#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
+ #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+ /* How many times to we retry sending/receiving the message. */
+@@ -82,7 +83,9 @@
+ #define SSIF_RECV_RETRIES 250
+
+ #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
++#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
+ #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
++#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
+ #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+ /*
+@@ -229,6 +232,9 @@ struct ssif_info {
+ bool got_alert;
+ bool waiting_alert;
+
++ /* Used to inform the timeout that it should do a resend. */
++ bool do_resend;
++
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+@@ -538,22 +544,28 @@ static void start_get(struct ssif_info *ssif_info)
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ }
+
++static void start_resend(struct ssif_info *ssif_info);
++
+ static void retry_timeout(struct timer_list *t)
+ {
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ unsigned long oflags, *flags;
+- bool waiting;
++ bool waiting, resend;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
++ resend = ssif_info->do_resend;
++ ssif_info->do_resend = false;
+ waiting = ssif_info->waiting_alert;
+ ssif_info->waiting_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (waiting)
+ start_get(ssif_info);
++ if (resend)
++ start_resend(ssif_info);
+ }
+
+ static void watch_timeout(struct timer_list *t)
+@@ -602,8 +614,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ start_get(ssif_info);
+ }
+
+-static void start_resend(struct ssif_info *ssif_info);
+-
+ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+ {
+@@ -909,7 +919,13 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+- start_resend(ssif_info);
++ /*
++ * Wait the retry timeout time per the spec,
++ * then redo the send.
++ */
++ ssif_info->do_resend = true;
++ mod_timer(&ssif_info->retry_timer,
++ jiffies + SSIF_REQ_RETRY_JIFFIES);
+ return;
+ }
+
+@@ -1322,8 +1338,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ if (ret) {
+ retry_cnt--;
+- if (retry_cnt > 0)
++ if (retry_cnt > 0) {
++ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry1;
++ }
+ return -ENODEV;
+ }
+
+@@ -1464,8 +1482,10 @@ static int start_multipart_test(struct i2c_client *client,
+ 32, msg);
+ if (ret) {
+ retry_cnt--;
+- if (retry_cnt > 0)
++ if (retry_cnt > 0) {
++ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry_write;
++ }
+ dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From 9c515130212a39d385cdfc36e19032c4a5571f63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Nov 2022 15:03:11 -0500
+Subject: ipmi:ssif: Increase the message retry time
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit 39721d62bbc16ebc9bb2bdc2c163658f33da3b0b ]
+
+The spec states that the minimum message retry time is 60ms, but it was
+set to 20ms. Correct it.
+
+Reported by: Tony Camuso <tcamuso@redhat.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Stable-dep-of: 00bb7e763ec9 ("ipmi:ssif: Add a timer between request retries")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index e9775b17dc92e..167ca54d186cb 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -74,7 +74,7 @@
+ /*
+ * Timer values
+ */
+-#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
++#define SSIF_MSG_USEC 60000 /* 60ms between message tries. */
+ #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+ /* How many times to we retry sending/receiving the message. */
+--
+2.39.2
+
--- /dev/null
+From 33762ef924c5eaa0b4c0e89b34dd0aacde7f5e5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Mar 2021 22:05:15 +0800
+Subject: ipmi:ssif: make ssif_i2c_send() void
+
+From: Liguang Zhang <zhangliguang@linux.alibaba.com>
+
+[ Upstream commit dcd10526ac5a0d6cc94ce60b9acfca458163277b ]
+
+This function actually needs no return value. So remove the unneeded
+check and make it void.
+
+Signed-off-by: Liguang Zhang <zhangliguang@linux.alibaba.com>
+Message-Id: <20210301140515.18951-1-zhangliguang@linux.alibaba.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Stable-dep-of: 00bb7e763ec9 ("ipmi:ssif: Add a timer between request retries")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 81 +++++++++--------------------------
+ 1 file changed, 20 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 0f2bac24e564d..e9775b17dc92e 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -510,7 +510,7 @@ static int ipmi_ssif_thread(void *data)
+ return 0;
+ }
+
+-static int ssif_i2c_send(struct ssif_info *ssif_info,
++static void ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_i2c_done handler,
+ int read_write, int command,
+ unsigned char *data, unsigned int size)
+@@ -522,7 +522,6 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_info->i2c_data = data;
+ ssif_info->i2c_size = size;
+ complete(&ssif_info->wake_thread);
+- return 0;
+ }
+
+
+@@ -531,22 +530,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+
+ static void start_get(struct ssif_info *ssif_info)
+ {
+- int rv;
+-
+ ssif_info->rtc_us_timer = 0;
+ ssif_info->multi_pos = 0;
+
+- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+- SSIF_IPMI_RESPONSE,
+- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+- if (rv < 0) {
+- /* request failed, just return the error. */
+- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+- dev_dbg(&ssif_info->client->dev,
+- "Error from i2c_non_blocking_op(5)\n");
+-
+- msg_done_handler(ssif_info, -EIO, NULL, 0);
+- }
++ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
++ SSIF_IPMI_RESPONSE,
++ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ }
+
+ static void retry_timeout(struct timer_list *t)
+@@ -620,7 +609,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ {
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags, *flags;
+- int rv;
+
+ /*
+ * We are single-threaded here, so no need for a lock until we
+@@ -666,17 +654,10 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ ssif_info->multi_len = len;
+ ssif_info->multi_pos = 1;
+
+- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+- if (rv < 0) {
+- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+- dev_dbg(&ssif_info->client->dev,
+- "Error from i2c_non_blocking_op(1)\n");
+-
+- result = -EIO;
+- } else
+- return;
++ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
++ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
++ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
++ return;
+ } else if (ssif_info->multi_pos) {
+ /* Middle of multi-part read. Start the next transaction. */
+ int i;
+@@ -738,19 +719,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+
+ ssif_info->multi_pos++;
+
+- rv = ssif_i2c_send(ssif_info, msg_done_handler,
+- I2C_SMBUS_READ,
+- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+- ssif_info->recv,
+- I2C_SMBUS_BLOCK_DATA);
+- if (rv < 0) {
+- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+- dev_dbg(&ssif_info->client->dev,
+- "Error from ssif_i2c_send\n");
+-
+- result = -EIO;
+- } else
+- return;
++ ssif_i2c_send(ssif_info, msg_done_handler,
++ I2C_SMBUS_READ,
++ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
++ ssif_info->recv,
++ I2C_SMBUS_BLOCK_DATA);
++ return;
+ }
+ }
+
+@@ -931,8 +905,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+ {
+- int rv;
+-
+ /* We are single-threaded here, so no need for a lock. */
+ if (result < 0) {
+ ssif_info->retries_left--;
+@@ -995,18 +967,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ ssif_info->multi_data = NULL;
+ }
+
+- rv = ssif_i2c_send(ssif_info, msg_written_handler,
+- I2C_SMBUS_WRITE, cmd,
+- data_to_send, I2C_SMBUS_BLOCK_DATA);
+- if (rv < 0) {
+- /* request failed, just return the error. */
+- ssif_inc_stat(ssif_info, send_errors);
+-
+- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+- dev_dbg(&ssif_info->client->dev,
+- "Error from i2c_non_blocking_op(3)\n");
+- msg_done_handler(ssif_info, -EIO, NULL, 0);
+- }
++ ssif_i2c_send(ssif_info, msg_written_handler,
++ I2C_SMBUS_WRITE, cmd,
++ data_to_send, I2C_SMBUS_BLOCK_DATA);
+ } else {
+ /* Ready to request the result. */
+ unsigned long oflags, *flags;
+@@ -1035,7 +998,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+
+ static int start_resend(struct ssif_info *ssif_info)
+ {
+- int rv;
+ int command;
+
+ ssif_info->got_alert = false;
+@@ -1057,12 +1019,9 @@ static int start_resend(struct ssif_info *ssif_info)
+ ssif_info->data[0] = ssif_info->data_len;
+ }
+
+- rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+- command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+- if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
+- dev_dbg(&ssif_info->client->dev,
+- "Error from i2c_non_blocking_op(4)\n");
+- return rv;
++ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
++ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
++ return 0;
+ }
+
+ static int start_send(struct ssif_info *ssif_info,
+--
+2.39.2
+
--- /dev/null
+From b1324298c8233a74c7f9e1996a94f1a71bcad391 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 10:11:06 -0600
+Subject: ipmi:ssif: resend_msg() cannot fail
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit 95767ed78a181d5404202627499f9cde56053b96 ]
+
+The resend_msg() function cannot fail, but there was error handling
+around using it. Rework the handling of the error, and fix the out of
+retries debug reporting that was wrong around this, too.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Stable-dep-of: 00bb7e763ec9 ("ipmi:ssif: Add a timer between request retries")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 28 +++++++---------------------
+ 1 file changed, 7 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 167ca54d186cb..cae21632cf079 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -602,7 +602,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ start_get(ssif_info);
+ }
+
+-static int start_resend(struct ssif_info *ssif_info);
++static void start_resend(struct ssif_info *ssif_info);
+
+ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+@@ -909,31 +909,17 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+- if (!start_resend(ssif_info)) {
+- ssif_inc_stat(ssif_info, send_retries);
+- return;
+- }
+- /* request failed, just return the error. */
+- ssif_inc_stat(ssif_info, send_errors);
+-
+- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+- dev_dbg(&ssif_info->client->dev,
+- "%s: Out of retries\n", __func__);
+- msg_done_handler(ssif_info, -EIO, NULL, 0);
++ start_resend(ssif_info);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, send_errors);
+
+- /*
+- * Got an error on transmit, let the done routine
+- * handle it.
+- */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+- "%s: Error %d\n", __func__, result);
++ "%s: Out of retries\n", __func__);
+
+- msg_done_handler(ssif_info, result, NULL, 0);
++ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ return;
+ }
+
+@@ -996,7 +982,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ }
+ }
+
+-static int start_resend(struct ssif_info *ssif_info)
++static void start_resend(struct ssif_info *ssif_info)
+ {
+ int command;
+
+@@ -1021,7 +1007,6 @@ static int start_resend(struct ssif_info *ssif_info)
+
+ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+- return 0;
+ }
+
+ static int start_send(struct ssif_info *ssif_info,
+@@ -1036,7 +1021,8 @@ static int start_send(struct ssif_info *ssif_info,
+ ssif_info->retries_left = SSIF_SEND_RETRIES;
+ memcpy(ssif_info->data + 1, data, len);
+ ssif_info->data_len = len;
+- return start_resend(ssif_info);
++ start_resend(ssif_info);
++ return 0;
+ }
+
+ /* Must be called with the message lock held. */
+--
+2.39.2
+
--- /dev/null
+From 0fcae9372e827e726d3deb9d5ff5916d34647f8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Aug 2021 11:25:09 +0200
+Subject: KVM: Clean up benign vcpu->cpu data races when kicking vCPUs
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 85b640450ddcfa09cf72771b69a9c3daf0ddc772 ]
+
+Fix a benign data race reported by syzbot+KCSAN[*] by ensuring vcpu->cpu
+is read exactly once, and by ensuring the vCPU is booted from guest mode
+if kvm_arch_vcpu_should_kick() returns true. Fix a similar race in
+kvm_make_vcpus_request_mask() by ensuring the vCPU is interrupted if
+kvm_request_needs_ipi() returns true.
+
+Reading vcpu->cpu before vcpu->mode (via kvm_arch_vcpu_should_kick() or
+kvm_request_needs_ipi()) means the target vCPU could get migrated (change
+vcpu->cpu) and enter !OUTSIDE_GUEST_MODE between reading vcpu->cpud and
+reading vcpu->mode. If that happens, the kick/IPI will be sent to the
+old pCPU, not the new pCPU that is now running the vCPU or reading SPTEs.
+
+Although failing to kick the vCPU is not exactly ideal, practically
+speaking it cannot cause a functional issue unless there is also a bug in
+the caller, and any such bug would exist regardless of kvm_vcpu_kick()'s
+behavior.
+
+The purpose of sending an IPI is purely to get a vCPU into the host (or
+out of reading SPTEs) so that the vCPU can recognize a change in state,
+e.g. a KVM_REQ_* request. If vCPU's handling of the state change is
+required for correctness, KVM must ensure either the vCPU sees the change
+before entering the guest, or that the sender sees the vCPU as running in
+guest mode. All architectures handle this by (a) sending the request
+before calling kvm_vcpu_kick() and (b) checking for requests _after_
+setting vcpu->mode.
+
+x86's READING_SHADOW_PAGE_TABLES has similar requirements; KVM needs to
+ensure it kicks and waits for vCPUs that started reading SPTEs _before_
+MMU changes were finalized, but any vCPU that starts reading after MMU
+changes were finalized will see the new state and can continue on
+uninterrupted.
+
+For uses of kvm_vcpu_kick() that are not paired with a KVM_REQ_*, e.g.
+x86's kvm_arch_sync_dirty_log(), the order of the kick must not be relied
+upon for functional correctness, e.g. in the dirty log case, userspace
+cannot assume it has a 100% complete log if vCPUs are still running.
+
+All that said, eliminate the benign race since the cost of doing so is an
+"extra" atomic cmpxchg() in the case where the target vCPU is loaded by
+the current pCPU or is not loaded at all. I.e. the kick will be skipped
+due to kvm_vcpu_exiting_guest_mode() seeing a compatible vcpu->mode as
+opposed to the kick being skipped because of the cpu checks.
+
+Keep the "cpu != me" checks even though they appear useless/impossible at
+first glance. x86 processes guest IPI writes in a fast path that runs in
+IN_GUEST_MODE, i.e. can call kvm_vcpu_kick() from IN_GUEST_MODE. And
+calling kvm_vm_bugged()->kvm_make_vcpus_request_mask() from IN_GUEST or
+READING_SHADOW_PAGE_TABLES is perfectly reasonable.
+
+Note, a race with the cpu_online() check in kvm_vcpu_kick() likely
+persists, e.g. the vCPU could exit guest mode and get offlined between
+the cpu_online() check and the sending of smp_send_reschedule(). But,
+the online check appears to exist only to avoid a WARN in x86's
+native_smp_send_reschedule() that fires if the target CPU is not online.
+The reschedule WARN exists because CPU offlining takes the CPU out of the
+scheduling pool, i.e. the WARN is intended to detect the case where the
+kernel attempts to schedule a task on an offline CPU. The actual sending
+of the IPI is a non-issue as at worst it will simpy be dropped on the
+floor. In other words, KVM's usurping of the reschedule IPI could
+theoretically trigger a WARN if the stars align, but there will be no
+loss of functionality.
+
+[*] https://syzkaller.appspot.com/bug?extid=cd4154e502f43f10808a
+
+Cc: Venkatesh Srinivas <venkateshs@google.com>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Fixes: 97222cc83163 ("KVM: Emulate local APIC in kernel")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20210827092516.1027264-2-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 36 ++++++++++++++++++++++++++++--------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 564d5c145fbe7..b5134f3046483 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -276,14 +276,26 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ continue;
+
+ kvm_make_request(req, vcpu);
+- cpu = vcpu->cpu;
+
+ if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+ continue;
+
+- if (tmp != NULL && cpu != -1 && cpu != me &&
+- kvm_request_needs_ipi(vcpu, req))
+- __cpumask_set_cpu(cpu, tmp);
++ /*
++ * Note, the vCPU could get migrated to a different pCPU at any
++ * point after kvm_request_needs_ipi(), which could result in
++ * sending an IPI to the previous pCPU. But, that's ok because
++ * the purpose of the IPI is to ensure the vCPU returns to
++ * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
++ * Entering READING_SHADOW_PAGE_TABLES after this point is also
++ * ok, as the requirement is only that KVM wait for vCPUs that
++ * were reading SPTEs _before_ any changes were finalized. See
++ * kvm_vcpu_kick() for more details on handling requests.
++ */
++ if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
++ cpu = READ_ONCE(vcpu->cpu);
++ if (cpu != -1 && cpu != me)
++ __cpumask_set_cpu(cpu, tmp);
++ }
+ }
+
+ called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
+@@ -2937,16 +2949,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
+ */
+ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+ {
+- int me;
+- int cpu = vcpu->cpu;
++ int me, cpu;
+
+ if (kvm_vcpu_wake_up(vcpu))
+ return;
+
++ /*
++ * Note, the vCPU could get migrated to a different pCPU at any point
++ * after kvm_arch_vcpu_should_kick(), which could result in sending an
++ * IPI to the previous pCPU. But, that's ok because the purpose of the
++ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
++ * vCPU also requires it to leave IN_GUEST_MODE.
++ */
+ me = get_cpu();
+- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+- if (kvm_arch_vcpu_should_kick(vcpu))
++ if (kvm_arch_vcpu_should_kick(vcpu)) {
++ cpu = READ_ONCE(vcpu->cpu);
++ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+ smp_send_reschedule(cpu);
++ }
+ put_cpu();
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
+--
+2.39.2
+
--- /dev/null
+From a6d04ea442b7a56256c718407892bda158bf41bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Aug 2021 11:25:10 +0200
+Subject: KVM: KVM: Use cpumask_available() to check for NULL cpumask when
+ kicking vCPUs
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 0bbc2ca8515f9cdf11df84ccb63dc7c44bc3d8f4 ]
+
+Check for a NULL cpumask_var_t when kicking multiple vCPUs via
+cpumask_available(), which performs a !NULL check if and only if cpumasks
+are configured to be allocated off-stack. This is a meaningless
+optimization, e.g. avoids a TEST+Jcc and TEST+CMOV on x86, but more
+importantly helps document that the NULL check is necessary even though
+all callers pass in a local variable.
+
+No functional change intended.
+
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20210827092516.1027264-3-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b5134f3046483..f379398b43d59 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -248,9 +248,13 @@ static void ack_flush(void *_completed)
+ {
+ }
+
+-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
++static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
+ {
+- if (unlikely(!cpus))
++ const struct cpumask *cpus;
++
++ if (likely(cpumask_available(tmp)))
++ cpus = tmp;
++ else
+ cpus = cpu_online_mask;
+
+ if (cpumask_empty(cpus))
+@@ -280,6 +284,14 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+ continue;
+
++ /*
++ * tmp can be "unavailable" if cpumasks are allocated off stack
++ * as allocation of the mask is deliberately not fatal and is
++ * handled by falling back to kicking all online CPUs.
++ */
++ if (!cpumask_available(tmp))
++ continue;
++
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any
+ * point after kvm_request_needs_ipi(), which could result in
+@@ -291,7 +303,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ * were reading SPTEs _before_ any changes were finalized. See
+ * kvm_vcpu_kick() for more details on handling requests.
+ */
+- if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
++ if (kvm_request_needs_ipi(vcpu, req)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != -1 && cpu != me)
+ __cpumask_set_cpu(cpu, tmp);
+--
+2.39.2
+
--- /dev/null
+From 7975423b500182869b3d98ea8dd9f9b2d5cbabe3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 09:51:37 +0200
+Subject: KVM: Optimize kvm_make_vcpus_request_mask() a bit
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+[ Upstream commit ae0946cd3601752dc58f86d84258e5361e9c8cd4 ]
+
+Iterating over set bits in 'vcpu_bitmap' should be faster than going
+through all vCPUs, especially when just a few bits are set.
+
+Drop kvm_make_vcpus_request_mask() call from kvm_make_all_cpus_request_except()
+to avoid handling the special case when 'vcpu_bitmap' is NULL, move the
+code to kvm_make_all_cpus_request_except() itself.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20210903075141.403071-5-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 88 +++++++++++++++++++++++++++------------------
+ 1 file changed, 53 insertions(+), 35 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index f379398b43d59..34931443dafac 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -264,50 +264,57 @@ static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
+ return true;
+ }
+
++static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
++ unsigned int req, cpumask_var_t tmp,
++ int current_cpu)
++{
++ int cpu;
++
++ kvm_make_request(req, vcpu);
++
++ if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
++ return;
++
++ /*
++ * tmp can be "unavailable" if cpumasks are allocated off stack as
++ * allocation of the mask is deliberately not fatal and is handled by
++ * falling back to kicking all online CPUs.
++ */
++ if (!cpumask_available(tmp))
++ return;
++
++ /*
++ * Note, the vCPU could get migrated to a different pCPU at any point
++ * after kvm_request_needs_ipi(), which could result in sending an IPI
++ * to the previous pCPU. But, that's OK because the purpose of the IPI
++ * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
++ * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
++ * after this point is also OK, as the requirement is only that KVM wait
++ * for vCPUs that were reading SPTEs _before_ any changes were
++ * finalized. See kvm_vcpu_kick() for more details on handling requests.
++ */
++ if (kvm_request_needs_ipi(vcpu, req)) {
++ cpu = READ_ONCE(vcpu->cpu);
++ if (cpu != -1 && cpu != current_cpu)
++ __cpumask_set_cpu(cpu, tmp);
++ }
++}
++
+ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except,
+ unsigned long *vcpu_bitmap, cpumask_var_t tmp)
+ {
+- int i, cpu, me;
+ struct kvm_vcpu *vcpu;
++ int i, me;
+ bool called;
+
+ me = get_cpu();
+
+- kvm_for_each_vcpu(i, vcpu, kvm) {
+- if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
+- vcpu == except)
++ for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
++ vcpu = kvm_get_vcpu(kvm, i);
++ if (!vcpu || vcpu == except)
+ continue;
+-
+- kvm_make_request(req, vcpu);
+-
+- if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+- continue;
+-
+- /*
+- * tmp can be "unavailable" if cpumasks are allocated off stack
+- * as allocation of the mask is deliberately not fatal and is
+- * handled by falling back to kicking all online CPUs.
+- */
+- if (!cpumask_available(tmp))
+- continue;
+-
+- /*
+- * Note, the vCPU could get migrated to a different pCPU at any
+- * point after kvm_request_needs_ipi(), which could result in
+- * sending an IPI to the previous pCPU. But, that's ok because
+- * the purpose of the IPI is to ensure the vCPU returns to
+- * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+- * Entering READING_SHADOW_PAGE_TABLES after this point is also
+- * ok, as the requirement is only that KVM wait for vCPUs that
+- * were reading SPTEs _before_ any changes were finalized. See
+- * kvm_vcpu_kick() for more details on handling requests.
+- */
+- if (kvm_request_needs_ipi(vcpu, req)) {
+- cpu = READ_ONCE(vcpu->cpu);
+- if (cpu != -1 && cpu != me)
+- __cpumask_set_cpu(cpu, tmp);
+- }
++ kvm_make_vcpu_request(kvm, vcpu, req, tmp, me);
+ }
+
+ called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
+@@ -319,12 +326,23 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except)
+ {
++ struct kvm_vcpu *vcpu;
+ cpumask_var_t cpus;
+ bool called;
++ int i, me;
+
+ zalloc_cpumask_var(&cpus, GFP_ATOMIC);
+
+- called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
++ me = get_cpu();
++
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ if (vcpu == except)
++ continue;
++ kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
++ }
++
++ called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
++ put_cpu();
+
+ free_cpumask_var(cpus);
+ return called;
+--
+2.39.2
+
--- /dev/null
+From 5478cdd8ade37304b8f56d7f27c006c57398e7f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Sep 2021 09:51:40 +0200
+Subject: KVM: Pre-allocate cpumasks for kvm_make_all_cpus_request_except()
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+[ Upstream commit baff59ccdc657d290be51b95b38ebe5de40036b4 ]
+
+Allocating cpumask dynamically in zalloc_cpumask_var() is not ideal.
+Allocation is somewhat slow and can (in theory and when CPUMASK_OFFSTACK)
+fail. kvm_make_all_cpus_request_except() already disables preemption so
+we can use pre-allocated per-cpu cpumasks instead.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20210903075141.403071-8-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 29 +++++++++++++++++++++++------
+ 1 file changed, 23 insertions(+), 6 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 34931443dafac..d96a076aef0dd 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -154,6 +154,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
+ static unsigned long long kvm_createvm_count;
+ static unsigned long long kvm_active_vms;
+
++static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
++
+ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
+ {
+@@ -327,14 +329,15 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except)
+ {
+ struct kvm_vcpu *vcpu;
+- cpumask_var_t cpus;
++ struct cpumask *cpus;
+ bool called;
+ int i, me;
+
+- zalloc_cpumask_var(&cpus, GFP_ATOMIC);
+-
+ me = get_cpu();
+
++ cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
++ cpumask_clear(cpus);
++
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu == except)
+ continue;
+@@ -344,7 +347,6 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
+ put_cpu();
+
+- free_cpumask_var(cpus);
+ return called;
+ }
+
+@@ -5002,9 +5004,17 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ goto out_free_3;
+ }
+
++ for_each_possible_cpu(cpu) {
++ if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
++ GFP_KERNEL, cpu_to_node(cpu))) {
++ r = -ENOMEM;
++ goto out_free_4;
++ }
++ }
++
+ r = kvm_async_pf_init();
+ if (r)
+- goto out_free;
++ goto out_free_5;
+
+ kvm_chardev_ops.owner = module;
+ kvm_vm_fops.owner = module;
+@@ -5030,7 +5040,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+
+ out_unreg:
+ kvm_async_pf_deinit();
+-out_free:
++out_free_5:
++ for_each_possible_cpu(cpu)
++ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
++out_free_4:
+ kmem_cache_destroy(kvm_vcpu_cache);
+ out_free_3:
+ unregister_reboot_notifier(&kvm_reboot_notifier);
+@@ -5050,8 +5063,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
+
+ void kvm_exit(void)
+ {
++ int cpu;
++
+ debugfs_remove_recursive(kvm_debugfs_dir);
+ misc_deregister(&kvm_dev);
++ for_each_possible_cpu(cpu)
++ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+ kmem_cache_destroy(kvm_vcpu_cache);
+ kvm_async_pf_deinit();
+ unregister_syscore_ops(&kvm_syscore_ops);
+--
+2.39.2
+
--- /dev/null
+From c65cefeadff29edfd49b4bdaaae6f7d3bc4cc485 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 23:08:45 +0000
+Subject: KVM: Register /dev/kvm as the _very_ last thing during initialization
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 2b01281273738bf2d6551da48d65db2df3f28998 ]
+
+Register /dev/kvm, i.e. expose KVM to userspace, only after all other
+setup has completed. Once /dev/kvm is exposed, userspace can start
+invoking KVM ioctls, creating VMs, etc... If userspace creates a VM
+before KVM is done with its configuration, bad things may happen, e.g.
+KVM will fail to properly migrate vCPU state if a VM is created before
+KVM has registered preemption notifiers.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20221130230934.1014142-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 31 ++++++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 9 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index d96a076aef0dd..d2df081021714 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -5020,12 +5020,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
+
+- r = misc_register(&kvm_dev);
+- if (r) {
+- pr_err("kvm: misc device register failed\n");
+- goto out_unreg;
+- }
+-
+ register_syscore_ops(&kvm_syscore_ops);
+
+ kvm_preempt_ops.sched_in = kvm_sched_in;
+@@ -5034,11 +5028,24 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ kvm_init_debug();
+
+ r = kvm_vfio_ops_init();
+- WARN_ON(r);
++ if (WARN_ON_ONCE(r))
++ goto err_vfio;
++
++ /*
++ * Registration _must_ be the very last thing done, as this exposes
++ * /dev/kvm to userspace, i.e. all infrastructure must be setup!
++ */
++ r = misc_register(&kvm_dev);
++ if (r) {
++ pr_err("kvm: misc device register failed\n");
++ goto err_register;
++ }
+
+ return 0;
+
+-out_unreg:
++err_register:
++ kvm_vfio_ops_exit();
++err_vfio:
+ kvm_async_pf_deinit();
+ out_free_5:
+ for_each_possible_cpu(cpu)
+@@ -5065,8 +5072,14 @@ void kvm_exit(void)
+ {
+ int cpu;
+
+- debugfs_remove_recursive(kvm_debugfs_dir);
++ /*
++ * Note, unregistering /dev/kvm doesn't strictly need to come first,
++ * fops_get(), a.k.a. try_module_get(), prevents acquiring references
++ * to KVM while the module is being stopped.
++ */
+ misc_deregister(&kvm_dev);
++
++ debugfs_remove_recursive(kvm_debugfs_dir);
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+ kmem_cache_destroy(kvm_vcpu_cache);
+--
+2.39.2
+
--- /dev/null
+From aac26468d232afc47b8ea8b7300f20b97ad5ed4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Mar 2023 04:47:35 +0000
+Subject: perf/core: Fix perf_output_begin parameter is incorrectly invoked in
+ perf_event_bpf_output
+
+From: Yang Jihong <yangjihong1@huawei.com>
+
+[ Upstream commit eb81a2ed4f52be831c9fb879752d89645a312c13 ]
+
+syzkaller reportes a KASAN issue with stack-out-of-bounds.
+The call trace is as follows:
+ dump_stack+0x9c/0xd3
+ print_address_description.constprop.0+0x19/0x170
+ __kasan_report.cold+0x6c/0x84
+ kasan_report+0x3a/0x50
+ __perf_event_header__init_id+0x34/0x290
+ perf_event_header__init_id+0x48/0x60
+ perf_output_begin+0x4a4/0x560
+ perf_event_bpf_output+0x161/0x1e0
+ perf_iterate_sb_cpu+0x29e/0x340
+ perf_iterate_sb+0x4c/0xc0
+ perf_event_bpf_event+0x194/0x2c0
+ __bpf_prog_put.constprop.0+0x55/0xf0
+ __cls_bpf_delete_prog+0xea/0x120 [cls_bpf]
+ cls_bpf_delete_prog_work+0x1c/0x30 [cls_bpf]
+ process_one_work+0x3c2/0x730
+ worker_thread+0x93/0x650
+ kthread+0x1b8/0x210
+ ret_from_fork+0x1f/0x30
+
+commit 267fb27352b6 ("perf: Reduce stack usage of perf_output_begin()")
+use on-stack struct perf_sample_data of the caller function.
+
+However, perf_event_bpf_output uses incorrect parameter to convert
+small-sized data (struct perf_bpf_event) into large-sized data
+(struct perf_sample_data), which causes memory overwriting occurs in
+__perf_event_header__init_id.
+
+Fixes: 267fb27352b6 ("perf: Reduce stack usage of perf_output_begin()")
+Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230314044735.56551-1-yangjihong1@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index d7b61116f15bb..eb8660ed1abba 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8710,7 +8710,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
+
+ perf_event_header__init_id(&bpf_event->event_id.header,
+ &sample, event);
+- ret = perf_output_begin(&handle, data, event,
++ ret = perf_output_begin(&handle, &sample, event,
+ bpf_event->event_id.header.size);
+ if (ret)
+ return;
+--
+2.39.2
+
--- /dev/null
+From 259bc92ebe1e3de34e394106f8b8fb150bfc9fae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Mar 2023 10:16:08 -0700
+Subject: perf: fix perf_event_context->time
+
+From: Song Liu <song@kernel.org>
+
+[ Upstream commit baf1b12a67f5b24f395baca03e442ce27cab0c18 ]
+
+Time readers rely on perf_event_context->[time|timestamp|timeoffset] to get
+accurate time_enabled and time_running for an event. The difference between
+ctx->timestamp and ctx->time is the among of time when the context is not
+enabled. __update_context_time(ctx, false) is used to increase timestamp,
+but not time. Therefore, it should only be called in ctx_sched_in() when
+EVENT_TIME was not enabled.
+
+Fixes: 09f5e7dc7ad7 ("perf: Fix perf_event_read_local() time")
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Link: https://lkml.kernel.org/r/20230313171608.298734-1-song@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index eb8660ed1abba..e2e1371fbb9d3 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3817,7 +3817,7 @@ ctx_sched_in(struct perf_event_context *ctx,
+ if (likely(!ctx->nr_events))
+ return;
+
+- if (is_active ^ EVENT_TIME) {
++ if (!(is_active & EVENT_TIME)) {
+ /* start ctx time */
+ __update_context_time(ctx, false);
+ perf_cgroup_set_timestamp(task, ctx);
+--
+2.39.2
+
--- /dev/null
+From 52b867301f93f4630d2caddcbaed2df8b5fc28d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Feb 2023 21:39:53 -0800
+Subject: serial: 8250: ASPEED_VUART: select REGMAP instead of depending on it
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit f8086d1a65ac693e3fd863128352b4b11ee7324d ]
+
+REGMAP is a hidden (not user visible) symbol. Users cannot set it
+directly thru "make *config", so drivers should select it instead of
+depending on it if they need it.
+
+Consistently using "select" or "depends on" can also help reduce
+Kconfig circular dependency issues.
+
+Therefore, change the use of "depends on REGMAP" to "select REGMAP".
+
+Fixes: 8d310c9107a2 ("drivers/tty/serial/8250: Make Aspeed VUART SIRQ polarity configurable")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Oskar Senft <osk@google.com>
+Cc: linux-serial@vger.kernel.org
+Link: https://lore.kernel.org/r/20230226053953.4681-9-rdunlap@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index dcf89db183df9..b7922c8da1e61 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -254,8 +254,9 @@ config SERIAL_8250_ASPEED_VUART
+ tristate "Aspeed Virtual UART"
+ depends on SERIAL_8250
+ depends on OF
+- depends on REGMAP && MFD_SYSCON
++ depends on MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
++ select REGMAP
+ help
+ If you want to use the virtual UART (VUART) device on Aspeed
+ BMC platforms, enable this option. This enables the 16550A-
+--
+2.39.2
+
--- /dev/null
+From 30e545e3639c4b413bb1c967c07874f89c2dcab2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Jul 2022 10:42:52 +0200
+Subject: serial: 8250: SERIAL_8250_ASPEED_VUART should depend on ARCH_ASPEED
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit 806a449725cbd679a7f52c394d3c87b451d66bd5 ]
+
+The Aspeed Virtual UART is only present on Aspeed BMC platforms. Hence
+add a dependency on ARCH_ASPEED, to prevent asking the user about this
+driver when configuring a kernel without Aspeed BMC support.
+
+Reviewed-by: Jeremy Kerr <jk@ozlabs.org>
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://lore.kernel.org/r/259138c372d433005b4871789ef9ee8d15320307.1657528861.git.geert+renesas@glider.be
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: f8086d1a65ac ("serial: 8250: ASPEED_VUART: select REGMAP instead of depending on it")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index 136f2b1460f91..dcf89db183df9 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -255,6 +255,7 @@ config SERIAL_8250_ASPEED_VUART
+ depends on SERIAL_8250
+ depends on OF
+ depends on REGMAP && MFD_SYSCON
++ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ If you want to use the virtual UART (VUART) device on Aspeed
+ BMC platforms, enable this option. This enables the 16550A-
+--
+2.39.2
+
--- /dev/null
+From 9705f3b74d8f44124a5a6ad0bcb4981f7eadf362 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Aug 2022 18:42:08 +0800
+Subject: serial: fsl_lpuart: Fix comment typo
+
+From: Jason Wang <wangborong@cdjrlc.com>
+
+[ Upstream commit 374e01fa1304e1eabd2cd16f750da3ecaeab069b ]
+
+The double `as' is duplicated in the comment, remove one.
+
+Signed-off-by: Jason Wang <wangborong@cdjrlc.com>
+Link: https://lore.kernel.org/r/20220803104208.4127-1-wangborong@cdjrlc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 1be6f2b15f90 ("tty: serial: fsl_lpuart: fix race on RX DMA shutdown")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/fsl_lpuart.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 32cce52800a73..bab296c5a0211 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2766,7 +2766,7 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
+ * EDMA driver during suspend will forcefully release any
+ * non-idle DMA channels. If port wakeup is enabled or if port
+ * is console port or 'no_console_suspend' is set the Rx DMA
+- * cannot resume as as expected, hence gracefully release the
++ * cannot resume as expected, hence gracefully release the
+ * Rx DMA path before suspend and start Rx DMA path on resume.
+ */
+ if (irq_wake) {
+--
+2.39.2
+
--- /dev/null
+interconnect-qcom-osm-l3-fix-icc_onecell_data-alloca.patch
+perf-core-fix-perf_output_begin-parameter-is-incorre.patch
+perf-fix-perf_event_context-time.patch
+ipmi-ssif-make-ssif_i2c_send-void.patch
+ipmi-ssif-increase-the-message-retry-time.patch
+ipmi-ssif-resend_msg-cannot-fail.patch
+ipmi-ssif-add-a-timer-between-request-retries.patch
+kvm-clean-up-benign-vcpu-cpu-data-races-when-kicking.patch
+kvm-kvm-use-cpumask_available-to-check-for-null-cpum.patch
+kvm-optimize-kvm_make_vcpus_request_mask-a-bit.patch
+kvm-pre-allocate-cpumasks-for-kvm_make_all_cpus_requ.patch
+kvm-register-dev-kvm-as-the-_very_-last-thing-during.patch
+serial-fsl_lpuart-fix-comment-typo.patch
+tty-serial-fsl_lpuart-fix-race-on-rx-dma-shutdown.patch
+serial-8250-serial_8250_aspeed_vuart-should-depend-o.patch
+serial-8250-aspeed_vuart-select-regmap-instead-of-de.patch
+interconnect-qcom-msm8974-fix-registration-race.patch
+drm-sun4i-fix-missing-component-unbind-on-bind-error.patch
--- /dev/null
+From 9f495608b9127825bad2b5542a8e8a6166a182e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Mar 2023 14:43:02 +0100
+Subject: tty: serial: fsl_lpuart: fix race on RX DMA shutdown
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+[ Upstream commit 1be6f2b15f902c02e055ae0b419ca789200473c9 ]
+
+From time to time DMA completion can come in the middle of DMA shutdown:
+
+<process ctx>: <IRQ>:
+lpuart32_shutdown()
+ lpuart_dma_shutdown()
+ del_timer_sync()
+ lpuart_dma_rx_complete()
+ lpuart_copy_rx_to_tty()
+ mod_timer()
+ lpuart_dma_rx_free()
+
+When the timer fires a bit later, sport->dma_rx_desc is NULL:
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000004
+pc : lpuart_copy_rx_to_tty+0xcc/0x5bc
+lr : lpuart_timer_func+0x1c/0x2c
+Call trace:
+ lpuart_copy_rx_to_tty
+ lpuart_timer_func
+ call_timer_fn
+ __run_timers.part.0
+ run_timer_softirq
+ __do_softirq
+ __irq_exit_rcu
+ irq_exit
+ handle_domain_irq
+ gic_handle_irq
+ call_on_irq_stack
+ do_interrupt_handler
+ ...
+
+To fix this fold del_timer_sync() into lpuart_dma_rx_free() after
+dmaengine_terminate_sync() to make sure timer will not be re-started in
+lpuart_copy_rx_to_tty() <= lpuart_dma_rx_complete().
+
+Fixes: 4a8588a1cf86 ("serial: fsl_lpuart: delete timer on shutdown")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Link: https://lore.kernel.org/r/20230309134302.74940-2-alexander.sverdlin@siemens.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/fsl_lpuart.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index bab296c5a0211..99f29bd930bd0 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1278,6 +1278,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
+ struct dma_chan *chan = sport->dma_rx_chan;
+
+ dmaengine_terminate_all(chan);
++ del_timer_sync(&sport->lpuart_timer);
+ dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ kfree(sport->rx_ring.buf);
+ sport->rx_ring.tail = 0;
+@@ -1743,7 +1744,6 @@ static int lpuart32_startup(struct uart_port *port)
+ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ {
+ if (sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ sport->lpuart_dma_rx_use = false;
+ }
+@@ -1894,10 +1894,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ * Since timer function acqures sport->port.lock, need to stop before
+ * acquring same lock because otherwise del_timer_sync() can deadlock.
+ */
+- if (old && sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
++ if (old && sport->lpuart_dma_rx_use)
+ lpuart_dma_rx_free(&sport->port);
+- }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+@@ -2129,10 +2127,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ * Since timer function acqures sport->port.lock, need to stop before
+ * acquring same lock because otherwise del_timer_sync() can deadlock.
+ */
+- if (old && sport->lpuart_dma_rx_use) {
+- del_timer_sync(&sport->lpuart_timer);
++ if (old && sport->lpuart_dma_rx_use)
+ lpuart_dma_rx_free(&sport->port);
+- }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+@@ -2770,7 +2766,6 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
+ * Rx DMA path before suspend and start Rx DMA path on resume.
+ */
+ if (irq_wake) {
+- del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+--
+2.39.2
+