--- /dev/null
+From f4f3a864c1732fc91aa489f416d022837b0c813f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2023 16:23:07 +0100
+Subject: gpiolib: sysfs: Fix error handling on failed export
+
+From: Boerge Struempfel <boerge.struempfel@gmail.com>
+
+[ Upstream commit 95dd1e34ff5bbee93a28ff3947eceaf6de811b1a ]
+
+If gpio_set_transitory() fails, we should free the GPIO again. Most
+notably, the flag FLAG_REQUESTED has previously been set in
+gpiod_request_commit(), and should be reset on failure.
+
+To my knowledge, this does not affect any current users, since the
+gpio_set_transitory() mainly returns 0 and -ENOTSUPP, which is converted
+to 0. However the gpio_set_transitory() function calles the .set_config()
+function of the corresponding GPIO chip and there are some GPIO drivers in
+which some (unlikely) branches return other values like -EPROBE_DEFER,
+and -EINVAL. In these cases, the above mentioned FLAG_REQUESTED would not
+be reset, which results in the pin being blocked until the next reboot.
+
+Fixes: e10f72bf4b3e ("gpio: gpiolib: Generalise state persistence beyond sleep")
+Signed-off-by: Boerge Struempfel <boerge.struempfel@gmail.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpiolib-sysfs.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index e0ccc79b239a5..7a8dfc36a6c15 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -495,14 +495,17 @@ static ssize_t export_store(struct class *class,
+ }
+
+ status = gpiod_set_transitory(desc, false);
+- if (!status) {
+- status = gpiod_export(desc, true);
+- if (status < 0)
+- gpiod_free(desc);
+- else
+- set_bit(FLAG_SYSFS, &desc->flags);
++ if (status) {
++ gpiod_free(desc);
++ goto done;
+ }
+
++ status = gpiod_export(desc, true);
++ if (status < 0)
++ gpiod_free(desc);
++ else
++ set_bit(FLAG_SYSFS, &desc->flags);
++
+ done:
+ if (status)
+ pr_debug("%s: status %d\n", __func__, status);
+--
+2.42.0
+
--- /dev/null
+From 6ae9186f2ee8a48d2233ed733508292406a9a0b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jun 2022 11:06:23 -0700
+Subject: perf/core: Add a new read format to get a number of lost samples
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+[ Upstream commit 119a784c81270eb88e573174ed2209225d646656 ]
+
+Sometimes we want to know an accurate number of samples even if it's
+lost. Currenlty PERF_RECORD_LOST is generated for a ring-buffer which
+might be shared with other events. So it's hard to know per-event
+lost count.
+
+Add event->lost_samples field and PERF_FORMAT_LOST to retrieve it from
+userspace.
+
+Original-patch-by: Jiri Olsa <jolsa@redhat.com>
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20220616180623.1358843-1-namhyung@kernel.org
+Stable-dep-of: 382c27f4ed28 ("perf: Fix perf_event_validate_size()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/perf_event.h | 2 ++
+ include/uapi/linux/perf_event.h | 5 ++++-
+ kernel/events/core.c | 21 ++++++++++++++++++---
+ kernel/events/ring_buffer.c | 5 ++++-
+ 4 files changed, 28 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index d46eeddeb8593..123e4d715c98d 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -685,6 +685,8 @@ struct perf_event {
+ struct pid_namespace *ns;
+ u64 id;
+
++ atomic64_t lost_samples;
++
+ u64 (*clock)(void);
+ perf_overflow_handler_t overflow_handler;
+ void *overflow_handler_context;
+diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
+index 5fb4cdf37100c..140024b2db381 100644
+--- a/include/uapi/linux/perf_event.h
++++ b/include/uapi/linux/perf_event.h
+@@ -273,6 +273,7 @@ enum {
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
++ * { u64 lost; } && PERF_FORMAT_LOST
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+@@ -280,6 +281,7 @@ enum {
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
++ * { u64 lost; } && PERF_FORMAT_LOST
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
+@@ -289,8 +291,9 @@ enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
++ PERF_FORMAT_LOST = 1U << 4,
+
+- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
++ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
+ };
+
+ #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3c8eb5d842142..acc6403fc87be 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1725,6 +1725,9 @@ static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
+ if (event->attr.read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
++ if (event->attr.read_format & PERF_FORMAT_LOST)
++ entry += sizeof(u64);
++
+ if (event->attr.read_format & PERF_FORMAT_GROUP) {
+ nr += nr_siblings;
+ size += sizeof(u64);
+@@ -4816,11 +4819,15 @@ static int __perf_read_group_add(struct perf_event *leader,
+ values[n++] += perf_event_count(leader);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(leader);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&leader->lost_samples);
+
+ for_each_sibling_event(sub, leader) {
+ values[n++] += perf_event_count(sub);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(sub);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&sub->lost_samples);
+ }
+
+ unlock:
+@@ -4874,7 +4881,7 @@ static int perf_read_one(struct perf_event *event,
+ u64 read_format, char __user *buf)
+ {
+ u64 enabled, running;
+- u64 values[4];
++ u64 values[5];
+ int n = 0;
+
+ values[n++] = __perf_event_read_value(event, &enabled, &running);
+@@ -4884,6 +4891,8 @@ static int perf_read_one(struct perf_event *event,
+ values[n++] = running;
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&event->lost_samples);
+
+ if (copy_to_user(buf, values, n * sizeof(u64)))
+ return -EFAULT;
+@@ -6193,7 +6202,7 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+ u64 enabled, u64 running)
+ {
+ u64 read_format = event->attr.read_format;
+- u64 values[4];
++ u64 values[5];
+ int n = 0;
+
+ values[n++] = perf_event_count(event);
+@@ -6207,6 +6216,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+ }
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&event->lost_samples);
+
+ __output_copy(handle, values, n * sizeof(u64));
+ }
+@@ -6217,7 +6228,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ {
+ struct perf_event *leader = event->group_leader, *sub;
+ u64 read_format = event->attr.read_format;
+- u64 values[5];
++ u64 values[6];
+ int n = 0;
+
+ values[n++] = 1 + leader->nr_siblings;
+@@ -6235,6 +6246,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ values[n++] = perf_event_count(leader);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(leader);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&leader->lost_samples);
+
+ __output_copy(handle, values, n * sizeof(u64));
+
+@@ -6248,6 +6261,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ values[n++] = perf_event_count(sub);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(sub);
++ if (read_format & PERF_FORMAT_LOST)
++ values[n++] = atomic64_read(&sub->lost_samples);
+
+ __output_copy(handle, values, n * sizeof(u64));
+ }
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 2f6f77658eba2..ddcbd03eccbba 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -165,8 +165,10 @@ __perf_output_begin(struct perf_output_handle *handle,
+ goto out;
+
+ if (unlikely(rb->paused)) {
+- if (rb->nr_pages)
++ if (rb->nr_pages) {
+ local_inc(&rb->lost);
++ atomic64_inc(&event->lost_samples);
++ }
+ goto out;
+ }
+
+@@ -249,6 +251,7 @@ __perf_output_begin(struct perf_output_handle *handle,
+
+ fail:
+ local_inc(&rb->lost);
++ atomic64_inc(&event->lost_samples);
+ perf_output_put_handle(handle);
+ out:
+ rcu_read_unlock();
+--
+2.42.0
+
--- /dev/null
+From 9d4c31b570c1fcbe06705dc10f78aa4f841c4cac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2023 15:24:52 +0100
+Subject: perf: Fix perf_event_validate_size()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 382c27f4ed28f803b1f1473ac2d8db0afc795a1b ]
+
+Budimir noted that perf_event_validate_size() only checks the size of
+the newly added event, even though the sizes of all existing events
+can also change due to not all events having the same read_format.
+
+When we attach the new event, perf_group_attach(), we do re-compute
+the size for all events.
+
+Fixes: a723968c0ed3 ("perf: Fix u16 overflows")
+Reported-by: Budimir Markovic <markovicbudimir@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 61 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 38 insertions(+), 23 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index acc6403fc87be..4182e265176dc 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1710,31 +1710,34 @@ static inline void perf_event__state_init(struct perf_event *event)
+ PERF_EVENT_STATE_INACTIVE;
+ }
+
+-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
++static int __perf_event_read_size(u64 read_format, int nr_siblings)
+ {
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_ID)
++ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_LOST)
++ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_GROUP) {
++ if (read_format & PERF_FORMAT_GROUP) {
+ nr += nr_siblings;
+ size += sizeof(u64);
+ }
+
+- size += entry * nr;
+- event->read_size = size;
++ /*
++ * Since perf_event_validate_size() limits this to 16k and inhibits
++ * adding more siblings, this will never overflow.
++ */
++ return size + nr * entry;
+ }
+
+ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+@@ -1775,8 +1778,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+ */
+ static void perf_event__header_size(struct perf_event *event)
+ {
+- __perf_event_read_size(event,
+- event->group_leader->nr_siblings);
++ event->read_size =
++ __perf_event_read_size(event->attr.read_format,
++ event->group_leader->nr_siblings);
+ __perf_event_header_size(event, event->attr.sample_type);
+ }
+
+@@ -1807,24 +1811,35 @@ static void perf_event__id_header_size(struct perf_event *event)
+ event->id_header_size = size;
+ }
+
++/*
++ * Check that adding an event to the group does not result in anybody
++ * overflowing the 64k event limit imposed by the output buffer.
++ *
++ * Specifically, check that the read_size for the event does not exceed 16k,
++ * read_size being the one term that grows with groups size. Since read_size
++ * depends on per-event read_format, also (re)check the existing events.
++ *
++ * This leaves 48k for the constant size fields and things like callchains,
++ * branch stacks and register sets.
++ */
+ static bool perf_event_validate_size(struct perf_event *event)
+ {
+- /*
+- * The values computed here will be over-written when we actually
+- * attach the event.
+- */
+- __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+- __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+- perf_event__id_header_size(event);
++ struct perf_event *sibling, *group_leader = event->group_leader;
+
+- /*
+- * Sum the lot; should not exceed the 64k limit we have on records.
+- * Conservative limit to allow for callchains and other variable fields.
+- */
+- if (event->read_size + event->header_size +
+- event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
++ if (__perf_event_read_size(event->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+
++ if (__perf_event_read_size(group_leader->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++
++ for_each_sibling_event(sibling, group_leader) {
++ if (__perf_event_read_size(sibling->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.42.0
+