--- /dev/null
+From fc45e55ebc58dbf622cb89ddbf797589c7a5510b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Thu, 21 Apr 2022 16:36:34 +0300
+Subject: ACPI: processor: idle: Avoid falling back to C3 type C-states
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit fc45e55ebc58dbf622cb89ddbf797589c7a5510b upstream.
+
+The "safe state" index is used by acpi_idle_enter_bm() to avoid
+entering a C-state that may require bus mastering to be disabled
+on entry in the cases when this is not going to happen. For this
+reason, it should not be set to point to C3 type of C-states, because
+they may require bus mastering to be disabled on entry in principle.
+
+This was broken by commit d6b88ce2eb9d ("ACPI: processor idle: Allow
+playing dead in C3 state") which inadvertently allowed the "safe
+state" index to point to C3 type of C-states.
+
+This results in a machine that won't boot past the point when it first
+enters C3. Restore the correct behaviour (either demote to C1/C2, or
+use C3 but also set ARB_DIS=1).
+
+I hit this on a Fujitsu Siemens Lifebook S6010 (P3) machine.
+
+Fixes: d6b88ce2eb9d ("ACPI: processor idle: Allow playing dead in C3 state")
+Cc: 5.16+ <stable@vger.kernel.org> # 5.16+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Tested-by: Woody Suwalski <wsuwalski@gmail.com>
+[ rjw: Subject and changelog adjustments ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/processor_idle.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -790,7 +790,8 @@ static int acpi_processor_setup_cstates(
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
+ cx->type == ACPI_STATE_C3) {
+ state->enter_dead = acpi_idle_play_dead;
+- drv->safe_state_index = count;
++ if (cx->type != ACPI_STATE_C3)
++ drv->safe_state_index = count;
+ }
+ /*
+ * Halt-induced C1 is not good for ->enter_s2idle, because it
--- /dev/null
+From 3f7ce6d7091765ed6c67c5d78aa364b9d17e3aab Mon Sep 17 00:00:00 2001
+From: Eugen Hristev <eugen.hristev@microchip.com>
+Date: Mon, 7 Mar 2022 13:38:27 +0200
+Subject: ARM: dts: at91: sama7g5ek: enable pull-up on flexcom3 console lines
+
+From: Eugen Hristev <eugen.hristev@microchip.com>
+
+commit 3f7ce6d7091765ed6c67c5d78aa364b9d17e3aab upstream.
+
+Flexcom3 is used as board console serial. There are no pull-ups on these
+lines on the board. This means that if a cable is not connected (that has
+pull-ups included), stray characters could appear on the console as the
+floating pins voltage levels are interpreted as incoming characters.
+To avoid this problem, enable the internal pull-ups on these lines.
+
+Fixes: 7540629e2fc7 ("ARM: dts: at91: add sama7g5 SoC DT and sama7g5-ek")
+Cc: stable@vger.kernel.org # v5.15+
+Signed-off-by: Eugen Hristev <eugen.hristev@microchip.com>
+Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Link: https://lore.kernel.org/r/20220307113827.2419331-1-eugen.hristev@microchip.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/at91-sama7g5ek.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
++++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
+@@ -465,7 +465,7 @@
+ pinctrl_flx3_default: flx3_default {
+ pinmux = <PIN_PD16__FLEXCOM3_IO0>,
+ <PIN_PD17__FLEXCOM3_IO1>;
+- bias-disable;
++ bias-pull-up;
+ };
+
+ pinctrl_flx4_default: flx4_default {
--- /dev/null
+From 4c79865f3e8a2db93ec1e844509edfebe5a6ae56 Mon Sep 17 00:00:00 2001
+From: Tim Harvey <tharvey@gateworks.com>
+Date: Tue, 5 Apr 2022 12:35:09 -0700
+Subject: ARM: dts: imx8mm-venice-gw{71xx,72xx,73xx}: fix OTG controller OC mode
+
+From: Tim Harvey <tharvey@gateworks.com>
+
+commit 4c79865f3e8a2db93ec1e844509edfebe5a6ae56 upstream.
+
+The GW71xx, GW72xx and GW73xx boards have USB1 routed to a USB OTG
+connectors and USB2 routed to a USB hub.
+
+The OTG connector has a over-currently protection with an active-low
+pin and the USB1 to HUB connection has no over-current protection (as
+the HUB itself implements this for its downstream ports).
+
+Add proper dt nodes to specify the over-current pin polarity for USB1
+and disable over-current protection for USB2.
+
+Fixes: 6f30b27c5ef5 ("arm64: dts: imx8mm: Add Gateworks i.MX 8M Mini Development Kits")
+Cc: stable@vger.kernel.org
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi | 2 ++
+ arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi | 2 ++
+ arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+@@ -103,12 +103,14 @@
+
+ &usbotg1 {
+ dr_mode = "otg";
++ over-current-active-low;
+ vbus-supply = <®_usb_otg1_vbus>;
+ status = "okay";
+ };
+
+ &usbotg2 {
+ dr_mode = "host";
++ disable-over-current;
+ status = "okay";
+ };
+
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
+@@ -139,12 +139,14 @@
+
+ &usbotg1 {
+ dr_mode = "otg";
++ over-current-active-low;
+ vbus-supply = <®_usb_otg1_vbus>;
+ status = "okay";
+ };
+
+ &usbotg2 {
+ dr_mode = "host";
++ disable-over-current;
+ vbus-supply = <®_usb_otg2_vbus>;
+ status = "okay";
+ };
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+@@ -166,12 +166,14 @@
+
+ &usbotg1 {
+ dr_mode = "otg";
++ over-current-active-low;
+ vbus-supply = <®_usb_otg1_vbus>;
+ status = "okay";
+ };
+
+ &usbotg2 {
+ dr_mode = "host";
++ disable-over-current;
+ vbus-supply = <®_usb_otg2_vbus>;
+ status = "okay";
+ };
--- /dev/null
+From 09df6a75fffa68169c5ef9bef990cd7ba94f3eef Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 7 Apr 2022 16:07:38 +0200
+Subject: bfq: Fix warning in bfqq_request_over_limit()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 09df6a75fffa68169c5ef9bef990cd7ba94f3eef upstream.
+
+People are occasionally reporting a warning bfqq_request_over_limit()
+triggering reporting that BFQ's idea of cgroup hierarchy (and its depth)
+does not match what generic blkcg code thinks. This can actually happen
+when bfqq gets moved between BFQ groups while bfqq_request_over_limit()
+is running. Make sure the code is safe against BFQ queue being moved to
+a different BFQ group.
+
+Fixes: 76f1df88bbc2 ("bfq: Limit number of requests consumed by each cgroup")
+CC: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/CAJCQCtTw_2C7ZSz7as5Gvq=OmnDiio=HRkQekqWpKot84sQhFA@mail.gmail.com/
+Reported-by: Chris Murphy <lists@colorremedies.com>
+Reported-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20220407140738.9723-1-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -569,7 +569,7 @@ static bool bfqq_request_over_limit(stru
+ struct bfq_entity *entity = &bfqq->entity;
+ struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
+ struct bfq_entity **entities = inline_entities;
+- int depth, level;
++ int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
+ int class_idx = bfqq->ioprio_class - 1;
+ struct bfq_sched_data *sched_data;
+ unsigned long wsum;
+@@ -578,15 +578,21 @@ static bool bfqq_request_over_limit(stru
+ if (!entity->on_st_or_in_serv)
+ return false;
+
++retry:
++ spin_lock_irq(&bfqd->lock);
+ /* +1 for bfqq entity, root cgroup not included */
+ depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
+- if (depth > BFQ_LIMIT_INLINE_DEPTH) {
++ if (depth > alloc_depth) {
++ spin_unlock_irq(&bfqd->lock);
++ if (entities != inline_entities)
++ kfree(entities);
+ entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
+ if (!entities)
+ return false;
++ alloc_depth = depth;
++ goto retry;
+ }
+
+- spin_lock_irq(&bfqd->lock);
+ sched_data = entity->sched_data;
+ /* Gather our ancestors as we need to traverse them in reverse order */
+ level = 0;
--- /dev/null
+From a692e13d87cb6d0193387aac55cfcc947077c20b Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 19 Apr 2022 14:23:57 +0100
+Subject: btrfs: fix assertion failure during scrub due to block group reallocation
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit a692e13d87cb6d0193387aac55cfcc947077c20b upstream.
+
+During a scrub, or device replace, we can race with block group removal
+and allocation and trigger the following assertion failure:
+
+[7526.385524] assertion failed: cache->start == chunk_offset, in fs/btrfs/scrub.c:3817
+[7526.387351] ------------[ cut here ]------------
+[7526.387373] kernel BUG at fs/btrfs/ctree.h:3599!
+[7526.388001] invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC PTI
+[7526.388970] CPU: 2 PID: 1158150 Comm: btrfs Not tainted 5.17.0-rc8-btrfs-next-114 #4
+[7526.390279] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
+[7526.392430] RIP: 0010:assertfail.constprop.0+0x18/0x1a [btrfs]
+[7526.393520] Code: f3 48 c7 c7 20 (...)
+[7526.396926] RSP: 0018:ffffb9154176bc40 EFLAGS: 00010246
+[7526.397690] RAX: 0000000000000048 RBX: ffffa0db8a910000 RCX: 0000000000000000
+[7526.398732] RDX: 0000000000000000 RSI: ffffffff9d7239a2 RDI: 00000000ffffffff
+[7526.399766] RBP: ffffa0db8a911e10 R08: ffffffffa71a3ca0 R09: 0000000000000001
+[7526.400793] R10: 0000000000000001 R11: 0000000000000000 R12: ffffa0db4b170800
+[7526.401839] R13: 00000003494b0000 R14: ffffa0db7c55b488 R15: ffffa0db8b19a000
+[7526.402874] FS: 00007f6c99c40640(0000) GS:ffffa0de6d200000(0000) knlGS:0000000000000000
+[7526.404038] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[7526.405040] CR2: 00007f31b0882160 CR3: 000000014b38c004 CR4: 0000000000370ee0
+[7526.406112] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[7526.407148] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[7526.408169] Call Trace:
+[7526.408529] <TASK>
+[7526.408839] scrub_enumerate_chunks.cold+0x11/0x79 [btrfs]
+[7526.409690] ? do_wait_intr_irq+0xb0/0xb0
+[7526.410276] btrfs_scrub_dev+0x226/0x620 [btrfs]
+[7526.410995] ? preempt_count_add+0x49/0xa0
+[7526.411592] btrfs_ioctl+0x1ab5/0x36d0 [btrfs]
+[7526.412278] ? __fget_files+0xc9/0x1b0
+[7526.412825] ? kvm_sched_clock_read+0x14/0x40
+[7526.413459] ? lock_release+0x155/0x4a0
+[7526.414022] ? __x64_sys_ioctl+0x83/0xb0
+[7526.414601] __x64_sys_ioctl+0x83/0xb0
+[7526.415150] do_syscall_64+0x3b/0xc0
+[7526.415675] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[7526.416408] RIP: 0033:0x7f6c99d34397
+[7526.416931] Code: 3c 1c e8 1c ff (...)
+[7526.419641] RSP: 002b:00007f6c99c3fca8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+[7526.420735] RAX: ffffffffffffffda RBX: 00005624e1e007b0 RCX: 00007f6c99d34397
+[7526.421779] RDX: 00005624e1e007b0 RSI: 00000000c400941b RDI: 0000000000000003
+[7526.422820] RBP: 0000000000000000 R08: 00007f6c99c40640 R09: 0000000000000000
+[7526.423906] R10: 00007f6c99c40640 R11: 0000000000000246 R12: 00007fff746755de
+[7526.424924] R13: 00007fff746755df R14: 0000000000000000 R15: 00007f6c99c40640
+[7526.425950] </TASK>
+
+That assertion is relatively new, introduced with commit d04fbe19aefd2
+("btrfs: scrub: cleanup the argument list of scrub_chunk()").
+
+The block group we get at scrub_enumerate_chunks() can actually have a
+start address that is smaller then the chunk offset we extracted from a
+device extent item we got from the commit root of the device tree.
+This is very rare, but it can happen due to a race with block group
+removal and allocation. For example, the following steps show how this
+can happen:
+
+1) We are at transaction T, and we have the following blocks groups,
+ sorted by their logical start address:
+
+ [ bg A, start address A, length 1G (data) ]
+ [ bg B, start address B, length 1G (data) ]
+ (...)
+ [ bg W, start address W, length 1G (data) ]
+
+ --> logical address space hole of 256M,
+ there used to be a 256M metadata block group here
+
+ [ bg Y, start address Y, length 256M (metadata) ]
+
+ --> Y matches W's end offset + 256M
+
+ Block group Y is the block group with the highest logical address in
+ the whole filesystem;
+
+2) Block group Y is deleted and its extent mapping is removed by the call
+ to remove_extent_mapping() made from btrfs_remove_block_group().
+
+ So after this point, the last element of the mapping red black tree,
+ its rightmost node, is the mapping for block group W;
+
+3) While still at transaction T, a new data block group is allocated,
+ with a length of 1G. When creating the block group we do a call to
+ find_next_chunk(), which returns the logical start address for the
+ new block group. This calls returns X, which corresponds to the
+ end offset of the last block group, the rightmost node in the mapping
+ red black tree (fs_info->mapping_tree), plus one.
+
+ So we get a new block group that starts at logical address X and with
+ a length of 1G. It spans over the whole logical range of the old block
+ group Y, that was previously removed in the same transaction.
+
+ However the device extent allocated to block group X is not the same
+ device extent that was used by block group Y, and it also does not
+ overlap that extent, which must be always the case because we allocate
+ extents by searching through the commit root of the device tree
+ (otherwise it could corrupt a filesystem after a power failure or
+ an unclean shutdown in general), so the extent allocator is behaving
+ as expected;
+
+4) We have a task running scrub, currently at scrub_enumerate_chunks().
+ There it searches for device extent items in the device tree, using
+ its commit root. It finds a device extent item that was used by
+ block group Y, and it extracts the value Y from that item into the
+ local variable 'chunk_offset', using btrfs_dev_extent_chunk_offset();
+
+ It then calls btrfs_lookup_block_group() to find block group for
+ the logical address Y - since there's currently no block group that
+ starts at that logical address, it returns block group X, because
+ its range contains Y.
+
+ This results in triggering the assertion:
+
+ ASSERT(cache->start == chunk_offset);
+
+ right before calling scrub_chunk(), as cache->start is X and
+ chunk_offset is Y.
+
+This is more likely to happen of filesystems not larger than 50G, because
+for these filesystems we use a 256M size for metadata block groups and
+a 1G size for data block groups, while for filesystems larger than 50G,
+we use a 1G size for both data and metadata block groups (except for
+zoned filesystems). It could also happen on any filesystem size due to
+the fact that system block groups are always smaller (32M) than both
+data and metadata block groups, but these are not frequently deleted, so
+much less likely to trigger the race.
+
+So make scrub skip any block group with a start offset that is less than
+the value we expect, as that means it's a new block group that was created
+in the current transaction. It's pointless to continue and try to scrub
+its extents, because scrub searches for extents using the commit root, so
+it won't find any. For a device replace, skip it as well for the same
+reasons, and we don't need to worry about the possibility of extents of
+the new block group not being to the new device, because we have the write
+duplication setup done through btrfs_map_block().
+
+Fixes: d04fbe19aefd ("btrfs: scrub: cleanup the argument list of scrub_chunk()")
+CC: stable@vger.kernel.org # 5.17
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/dev-replace.c | 7 ++++++-
+ fs/btrfs/scrub.c | 26 +++++++++++++++++++++++++-
+ 2 files changed, 31 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -730,7 +730,12 @@ static int btrfs_dev_replace_start(struc
+
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+
+- /* Commit dev_replace state and reserve 1 item for it. */
++ /*
++ * Commit dev_replace state and reserve 1 item for it.
++ * This is crucial to ensure we won't miss copying extents for new block
++ * groups that are allocated after we started the device replace, and
++ * must be done after setting up the device replace state.
++ */
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3699,6 +3699,31 @@ int scrub_enumerate_chunks(struct scrub_
+ if (!cache)
+ goto skip;
+
++ ASSERT(cache->start <= chunk_offset);
++ /*
++ * We are using the commit root to search for device extents, so
++ * that means we could have found a device extent item from a
++ * block group that was deleted in the current transaction. The
++ * logical start offset of the deleted block group, stored at
++ * @chunk_offset, might be part of the logical address range of
++ * a new block group (which uses different physical extents).
++ * In this case btrfs_lookup_block_group() has returned the new
++ * block group, and its start address is less than @chunk_offset.
++ *
++ * We skip such new block groups, because it's pointless to
++ * process them, as we won't find their extents because we search
++ * for them using the commit root of the extent tree. For a device
++ * replace it's also fine to skip it, we won't miss copying them
++ * to the target device because we have the write duplication
++ * setup through the regular write path (by btrfs_map_block()),
++ * and we have committed a transaction when we started the device
++ * replace, right after setting up the device replace state.
++ */
++ if (cache->start < chunk_offset) {
++ btrfs_put_block_group(cache);
++ goto skip;
++ }
++
+ if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
+ spin_lock(&cache->lock);
+ if (!cache->to_copy) {
+@@ -3822,7 +3847,6 @@ int scrub_enumerate_chunks(struct scrub_
+ dev_replace->item_needs_writeback = 1;
+ up_write(&dev_replace->rwsem);
+
+- ASSERT(cache->start == chunk_offset);
+ ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
+ dev_extent_len);
+
--- /dev/null
+From 00d825258bcc09c0e1b99aa7f9ad7d2c2fad41fa Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 24 Mar 2022 17:06:27 +0100
+Subject: btrfs: fix direct I/O read repair for split bios
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 00d825258bcc09c0e1b99aa7f9ad7d2c2fad41fa upstream.
+
+When a bio is split in btrfs_submit_direct, dip->file_offset contains
+the file offset for the first bio. But this means the start value used
+in btrfs_check_read_dio_bio is incorrect for subsequent bios. Add
+a file_offset field to struct btrfs_bio to pass along the correct offset.
+
+Given that check_data_csum only uses start of an error message this
+means problems with this miscalculation will only show up when I/O fails
+or checksums mismatch.
+
+The logic was removed in f4f39fc5dc30 ("btrfs: remove btrfs_bio::logical
+member") but we need it due to the bio splitting.
+
+CC: stable@vger.kernel.org # 5.16+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.c | 1 +
+ fs/btrfs/inode.c | 13 +++++--------
+ fs/btrfs/volumes.h | 3 +++
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2657,6 +2657,7 @@ int btrfs_repair_one_sector(struct inode
+
+ repair_bio = btrfs_bio_alloc(1);
+ repair_bbio = btrfs_bio(repair_bio);
++ repair_bbio->file_offset = start;
+ repair_bio->bi_opf = REQ_OP_READ;
+ repair_bio->bi_end_io = failed_bio->bi_end_io;
+ repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7789,8 +7789,6 @@ static blk_status_t btrfs_check_read_dio
+ const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+- const u64 orig_file_offset = dip->file_offset;
+- u64 start = orig_file_offset;
+ u32 bio_offset = 0;
+ blk_status_t err = BLK_STS_OK;
+
+@@ -7800,6 +7798,8 @@ static blk_status_t btrfs_check_read_dio
+ nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
+ pgoff = bvec.bv_offset;
+ for (i = 0; i < nr_sectors; i++) {
++ u64 start = bbio->file_offset + bio_offset;
++
+ ASSERT(pgoff < PAGE_SIZE);
+ if (uptodate &&
+ (!csum || !check_data_csum(inode, bbio,
+@@ -7812,17 +7812,13 @@ static blk_status_t btrfs_check_read_dio
+ } else {
+ int ret;
+
+- ASSERT((start - orig_file_offset) < UINT_MAX);
+- ret = btrfs_repair_one_sector(inode,
+- &bbio->bio,
+- start - orig_file_offset,
+- bvec.bv_page, pgoff,
++ ret = btrfs_repair_one_sector(inode, &bbio->bio,
++ bio_offset, bvec.bv_page, pgoff,
+ start, bbio->mirror_num,
+ submit_dio_repair_bio);
+ if (ret)
+ err = errno_to_blk_status(ret);
+ }
+- start += sectorsize;
+ ASSERT(bio_offset + sectorsize > bio_offset);
+ bio_offset += sectorsize;
+ pgoff += sectorsize;
+@@ -8025,6 +8021,7 @@ static void btrfs_submit_direct(const st
+ bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
++ btrfs_bio(bio)->file_offset = file_offset;
+
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ status = extract_ordered_extent(BTRFS_I(inode), bio,
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -323,6 +323,9 @@ struct btrfs_fs_devices {
+ struct btrfs_bio {
+ unsigned int mirror_num;
+
++ /* for direct I/O */
++ u64 file_offset;
++
+ /* @device is for stripe IO submission. */
+ struct btrfs_device *device;
+ u8 *csum;
--- /dev/null
+From 0fdf977d4576ee0decd612e22f6a837a239573cc Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 24 Mar 2022 17:06:28 +0100
+Subject: btrfs: fix direct I/O writes for split bios on zoned devices
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 0fdf977d4576ee0decd612e22f6a837a239573cc upstream.
+
+When a bio is split in btrfs_submit_direct, dip->file_offset contains
+the file offset for the first bio. But this means the start value used
+in btrfs_end_dio_bio to record the write location for zone devices is
+incorrect for subsequent bios.
+
+CC: stable@vger.kernel.org # 5.16+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7845,6 +7845,7 @@ static blk_status_t btrfs_submit_bio_sta
+ static void btrfs_end_dio_bio(struct bio *bio)
+ {
+ struct btrfs_dio_private *dip = bio->bi_private;
++ struct btrfs_bio *bbio = btrfs_bio(bio);
+ blk_status_t err = bio->bi_status;
+
+ if (err)
+@@ -7855,12 +7856,12 @@ static void btrfs_end_dio_bio(struct bio
+ bio->bi_iter.bi_size, err);
+
+ if (bio_op(bio) == REQ_OP_READ)
+- err = btrfs_check_read_dio_bio(dip, btrfs_bio(bio), !err);
++ err = btrfs_check_read_dio_bio(dip, bbio, !err);
+
+ if (err)
+ dip->dio_bio->bi_status = err;
+
+- btrfs_record_physical_zoned(dip->inode, dip->file_offset, bio);
++ btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
+
+ bio_put(bio);
+ btrfs_dio_private_put(dip);
--- /dev/null
+From 50ff57888d0b13440e7f4cde05dc339ee8d0f1f8 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 6 Apr 2022 17:07:54 +0100
+Subject: btrfs: fix leaked plug after failure syncing log on zoned filesystems
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 50ff57888d0b13440e7f4cde05dc339ee8d0f1f8 upstream.
+
+On a zoned filesystem, if we fail to allocate the root node for the log
+root tree while syncing the log, we end up returning without finishing
+the IO plug we started before, resulting in leaking resources as we
+have started writeback for extent buffers of a log tree before. That
+allocation failure, which typically is either -ENOMEM or -ENOSPC, is not
+fatal and the fsync can safely fallback to a full transaction commit.
+
+So release the IO plug if we fail to allocate the extent buffer for the
+root of the log root tree when syncing the log on a zoned filesystem.
+
+Fixes: 3ddebf27fcd3a9 ("btrfs: zoned: reorder log node allocation on zoned filesystem")
+CC: stable@vger.kernel.org # 5.15+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/tree-log.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3225,6 +3225,7 @@ int btrfs_sync_log(struct btrfs_trans_ha
+ ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
+ if (ret) {
+ mutex_unlock(&fs_info->tree_root->log_mutex);
++ blk_finish_plug(&plug);
+ goto out;
+ }
+ }
--- /dev/null
+From 5f0addf7b89085f8e0a2593faa419d6111612b9b Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Mon, 18 Apr 2022 16:15:03 +0900
+Subject: btrfs: zoned: use dedicated lock for data relocation
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit 5f0addf7b89085f8e0a2593faa419d6111612b9b upstream.
+
+Currently, we use btrfs_inode_{lock,unlock}() to grant an exclusive
+writeback of the relocation data inode in
+btrfs_zoned_data_reloc_{lock,unlock}(). However, that can cause a deadlock
+in the following path.
+
+Thread A takes btrfs_inode_lock() and waits for metadata reservation by
+e.g, waiting for writeback:
+
+prealloc_file_extent_cluster()
+ - btrfs_inode_lock(&inode->vfs_inode, 0);
+ - btrfs_prealloc_file_range()
+ ...
+ - btrfs_replace_file_extents()
+ - btrfs_start_transaction
+ ...
+ - btrfs_reserve_metadata_bytes()
+
+Thread B (e.g, doing a writeback work) needs to wait for the inode lock to
+continue writeback process:
+
+do_writepages
+ - btrfs_writepages
+ - extent_writpages
+ - btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
+ - btrfs_inode_lock()
+
+The deadlock is caused by relying on the vfs_inode's lock. By using it, we
+introduced unnecessary exclusion of writeback and
+btrfs_prealloc_file_range(). Also, the lock at this point is useless as we
+don't have any dirty pages in the inode yet.
+
+Introduce fs_info->zoned_data_reloc_io_lock and use it for the exclusive
+writeback.
+
+Fixes: 35156d852762 ("btrfs: zoned: only allow one process to add pages to a relocation inode")
+CC: stable@vger.kernel.org # 5.16.x: 869f4cdc73f9: btrfs: zoned: encapsulate inode locking for zoned relocation
+CC: stable@vger.kernel.org # 5.16.x
+CC: stable@vger.kernel.org # 5.17
+Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ctree.h | 1 +
+ fs/btrfs/disk-io.c | 1 +
+ fs/btrfs/zoned.h | 4 ++--
+ 3 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1029,6 +1029,7 @@ struct btrfs_fs_info {
+ */
+ spinlock_t relocation_bg_lock;
+ u64 data_reloc_bg;
++ struct mutex zoned_data_reloc_io_lock;
+
+ spinlock_t zone_active_bgs_lock;
+ struct list_head zone_active_bgs;
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3068,6 +3068,7 @@ void btrfs_init_fs_info(struct btrfs_fs_
+ mutex_init(&fs_info->reloc_mutex);
+ mutex_init(&fs_info->delalloc_root_mutex);
+ mutex_init(&fs_info->zoned_meta_io_lock);
++ mutex_init(&fs_info->zoned_data_reloc_io_lock);
+ seqlock_init(&fs_info->profiles_lock);
+
+ INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
+--- a/fs/btrfs/zoned.h
++++ b/fs/btrfs/zoned.h
+@@ -359,7 +359,7 @@ static inline void btrfs_zoned_data_relo
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+- btrfs_inode_lock(&inode->vfs_inode, 0);
++ mutex_lock(&root->fs_info->zoned_data_reloc_io_lock);
+ }
+
+ static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
+@@ -367,7 +367,7 @@ static inline void btrfs_zoned_data_relo
+ struct btrfs_root *root = inode->root;
+
+ if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
+- btrfs_inode_unlock(&inode->vfs_inode, 0);
++ mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock);
+ }
+
+ #endif
--- /dev/null
+From f95af4a9236695caed24fe6401256bb974e8f2a7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 28 Dec 2021 17:26:24 -0500
+Subject: drm/amdgpu: don't runtime suspend if there are displays attached (v3)
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit f95af4a9236695caed24fe6401256bb974e8f2a7 upstream.
+
+We normally runtime suspend when there are displays attached if they
+are in the DPMS off state, however, if something wakes the GPU
+we send a hotplug event on resume (in case any displays were connected
+while the GPU was in suspend) which can cause userspace to light
+up the displays again soon after they were turned off.
+
+Prior to
+commit 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of setting up AMD own's."),
+the driver took a runtime pm reference when the fbdev emulation was
+enabled because we didn't implement proper shadowing support for
+vram access when the device was off so the device never runtime
+suspended when there was a console bound. Once that commit landed,
+we now utilize the core fb helper implementation which properly
+handles the emulation, so runtime pm now suspends in cases where it did
+not before. Ultimately, we need to sort out why runtime suspend in not
+working in this case for some users, but this should restore similar
+behavior to before.
+
+v2: move check into runtime_suspend
+v3: wake ups -> wakeups in comment, retain pm_runtime behavior in
+ runtime_idle callback
+
+Fixes: 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of setting up AMD own's.")
+Link: https://lore.kernel.org/r/20220403132322.51c90903@darkstar.example.org/
+Tested-by: Michele Ballabio <ballabio.m@gmail.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 105 +++++++++++++++++++++-----------
+ 1 file changed, 70 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2348,6 +2348,71 @@ static int amdgpu_pmops_restore(struct d
+ return amdgpu_device_resume(drm_dev, true);
+ }
+
++static int amdgpu_runtime_idle_check_display(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct amdgpu_device *adev = drm_to_adev(drm_dev);
++
++ if (adev->mode_info.num_crtc) {
++ struct drm_connector *list_connector;
++ struct drm_connector_list_iter iter;
++ int ret = 0;
++
++ /* XXX: Return busy if any displays are connected to avoid
++ * possible display wakeups after runtime resume due to
++ * hotplug events in case any displays were connected while
++ * the GPU was in suspend. Remove this once that is fixed.
++ */
++ mutex_lock(&drm_dev->mode_config.mutex);
++ drm_connector_list_iter_begin(drm_dev, &iter);
++ drm_for_each_connector_iter(list_connector, &iter) {
++ if (list_connector->status == connector_status_connected) {
++ ret = -EBUSY;
++ break;
++ }
++ }
++ drm_connector_list_iter_end(&iter);
++ mutex_unlock(&drm_dev->mode_config.mutex);
++
++ if (ret)
++ return ret;
++
++ if (amdgpu_device_has_dc_support(adev)) {
++ struct drm_crtc *crtc;
++
++ drm_for_each_crtc(crtc, drm_dev) {
++ drm_modeset_lock(&crtc->mutex, NULL);
++ if (crtc->state->active)
++ ret = -EBUSY;
++ drm_modeset_unlock(&crtc->mutex);
++ if (ret < 0)
++ break;
++ }
++ } else {
++ mutex_lock(&drm_dev->mode_config.mutex);
++ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
++
++ drm_connector_list_iter_begin(drm_dev, &iter);
++ drm_for_each_connector_iter(list_connector, &iter) {
++ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
++ ret = -EBUSY;
++ break;
++ }
++ }
++
++ drm_connector_list_iter_end(&iter);
++
++ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
++ mutex_unlock(&drm_dev->mode_config.mutex);
++ }
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
+ static int amdgpu_pmops_runtime_suspend(struct device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+@@ -2360,6 +2425,10 @@ static int amdgpu_pmops_runtime_suspend(
+ return -EBUSY;
+ }
+
++ ret = amdgpu_runtime_idle_check_display(dev);
++ if (ret)
++ return ret;
++
+ /* wait for all rings to drain before suspending */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+@@ -2469,41 +2538,7 @@ static int amdgpu_pmops_runtime_idle(str
+ return -EBUSY;
+ }
+
+- if (amdgpu_device_has_dc_support(adev)) {
+- struct drm_crtc *crtc;
+-
+- drm_for_each_crtc(crtc, drm_dev) {
+- drm_modeset_lock(&crtc->mutex, NULL);
+- if (crtc->state->active)
+- ret = -EBUSY;
+- drm_modeset_unlock(&crtc->mutex);
+- if (ret < 0)
+- break;
+- }
+-
+- } else {
+- struct drm_connector *list_connector;
+- struct drm_connector_list_iter iter;
+-
+- mutex_lock(&drm_dev->mode_config.mutex);
+- drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+-
+- drm_connector_list_iter_begin(drm_dev, &iter);
+- drm_for_each_connector_iter(list_connector, &iter) {
+- if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+- ret = -EBUSY;
+- break;
+- }
+- }
+-
+- drm_connector_list_iter_end(&iter);
+-
+- drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+- mutex_unlock(&drm_dev->mode_config.mutex);
+- }
+-
+- if (ret == -EBUSY)
+- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
++ ret = amdgpu_runtime_idle_check_display(dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
--- /dev/null
+From c05d8332f5d23fa3b521911cbe55a2b67fb21248 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jouni=20H=C3=B6gander?= <jouni.hogander@intel.com>
+Date: Wed, 13 Apr 2022 11:28:26 +0300
+Subject: drm/i915: Check EDID for HDR static metadata when choosing blc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+commit c05d8332f5d23fa3b521911cbe55a2b67fb21248 upstream.
+
+We have now seen panel (XMG Core 15 e21 laptop) advertizing support
+for Intel proprietary eDP backlight control via DPCD registers, but
+actually working only with legacy pwm control.
+
+This patch adds panel EDID check for possible HDR static metadata and
+Intel proprietary eDP backlight control is used only if that exists.
+Missing HDR static metadata is ignored if user specifically asks for
+Intel proprietary eDP backlight control via enable_dpcd_backlight
+parameter.
+
+v2 :
+- Ignore missing HDR static metadata if Intel proprietary eDP
+ backlight control is forced via i915.enable_dpcd_backlight
+- Printout info message if panel is missing HDR static metadata and
+ support for Intel proprietary eDP backlight control is detected
+
+Fixes: 4a8d79901d5b ("drm/i915/dp: Enable Intel's HDR backlight interface (only SDR for now)")
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5284
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Mika Kahola <mika.kahola@intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: Filippo Falezza <filippo.falezza@outlook.it>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220413082826.120634-1-jouni.hogander@intel.com
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+(cherry picked from commit b4b157577cb1de13bee8bebc3576f1de6799a921)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c | 34 +++++++++++++-----
+ 1 file changed, 26 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+@@ -97,6 +97,14 @@
+
+ #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
++enum intel_dp_aux_backlight_modparam {
++ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
++ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
++ INTEL_DP_AUX_BACKLIGHT_ON = 1,
++ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
++ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
++};
++
+ /* Intel EDP backlight callbacks */
+ static bool
+ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+@@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(stru
+ return false;
+ }
+
++ /*
++ * If we don't have HDR static metadata there is no way to
++ * runtime detect used range for nits based control. For now
++ * do not use Intel proprietary eDP backlight control if we
++ * don't have this data in panel EDID. In case we find panel
++ * which supports only nits based control, but doesn't provide
++ * HDR static metadata we need to start maintaining table of
++ * ranges for such panels.
++ */
++ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
++ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
++ BIT(HDMI_STATIC_METADATA_TYPE1))) {
++ drm_info(&i915->drm,
++ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
++ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
++ return false;
++ }
++
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+@@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs
+ .get = intel_dp_aux_vesa_get_backlight,
+ };
+
+-enum intel_dp_aux_backlight_modparam {
+- INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+- INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+- INTEL_DP_AUX_BACKLIGHT_ON = 1,
+- INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+- INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+-};
+-
+ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
+ {
+ struct drm_device *dev = connector->base.dev;
--- /dev/null
+From 4ae4dd2e26fdfebf0b8c6af6c325383eadfefdb4 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 21 Apr 2022 19:22:21 +0300
+Subject: drm/i915: Fix SEL_FETCH_PLANE_*(PIPE_B+) register addresses
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 4ae4dd2e26fdfebf0b8c6af6c325383eadfefdb4 upstream.
+
+Fix typo in the _SEL_FETCH_PLANE_BASE_1_B register base address.
+
+Fixes: a5523e2ff074a5 ("drm/i915: Add PSR2 selective fetch registers")
+References: https://gitlab.freedesktop.org/drm/intel/-/issues/5400
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: <stable@vger.kernel.org> # v5.9+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220421162221.2261895-1-imre.deak@intel.com
+(cherry picked from commit af2cbc6ef967f61711a3c40fca5366ea0bc7fecc)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7578,7 +7578,7 @@ enum {
+ #define _SEL_FETCH_PLANE_BASE_6_A 0x70940
+ #define _SEL_FETCH_PLANE_BASE_7_A 0x70960
+ #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
+-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
++#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
+
+ #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
+ _SEL_FETCH_PLANE_BASE_1_A, \
--- /dev/null
+From 31fa985b4196f8a66f027672e9bf2b81fea0417c Mon Sep 17 00:00:00 2001
+From: Zqiang <qiang1.zhang@intel.com>
+Date: Wed, 27 Apr 2022 12:41:56 -0700
+Subject: kasan: prevent cpu_quarantine corruption when CPU offline and cache shrink occur at same time
+
+From: Zqiang <qiang1.zhang@intel.com>
+
+commit 31fa985b4196f8a66f027672e9bf2b81fea0417c upstream.
+
+kasan_quarantine_remove_cache() is called in kmem_cache_shrink()/
+destroy(). The kasan_quarantine_remove_cache() call is protected by
+cpuslock in kmem_cache_destroy() to ensure serialization with
+kasan_cpu_offline().
+
+However the kasan_quarantine_remove_cache() call is not protected by
+cpuslock in kmem_cache_shrink(). When a CPU is going offline and cache
+shrink occurs at same time, the cpu_quarantine may be corrupted by
+interrupt (per_cpu_remove_cache operation).
+
+So add a cpu_quarantine offline flags check in per_cpu_remove_cache().
+
+[akpm@linux-foundation.org: add comment, per Zqiang]
+
+Link: https://lkml.kernel.org/r/20220414025925.2423818-1-qiang1.zhang@intel.com
+Signed-off-by: Zqiang <qiang1.zhang@intel.com>
+Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/quarantine.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/mm/kasan/quarantine.c
++++ b/mm/kasan/quarantine.c
+@@ -315,6 +315,13 @@ static void per_cpu_remove_cache(void *a
+ struct qlist_head *q;
+
+ q = this_cpu_ptr(&cpu_quarantine);
++ /*
++ * Ensure the ordering between the writing to q->offline and
++ * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
++ * by interrupt.
++ */
++ if (READ_ONCE(q->offline))
++ return;
+ qlist_move_cache(q, &to_free, cache);
+ qlist_free_all(&to_free, cache);
+ }
--- /dev/null
+From ba7542eb2dd5dfc75c457198b88986642e602065 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Mon, 18 Apr 2022 13:18:27 +0530
+Subject: mtd: rawnand: qcom: fix memory corruption that causes panic
+
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+
+commit ba7542eb2dd5dfc75c457198b88986642e602065 upstream.
+
+This patch fixes a memory corruption that occurred in the
+nand_scan() path for Hynix nand device.
+
+On boot, for Hynix nand device will panic at a weird place:
+| Unable to handle kernel NULL pointer dereference at virtual
+ address 00000070
+| [00000070] *pgd=00000000
+| Internal error: Oops: 5 [#1] PREEMPT SMP ARM
+| Modules linked in:
+| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.17.0-01473-g13ae1769cfb0
+ #38
+| Hardware name: Generic DT based system
+| PC is at nandc_set_reg+0x8/0x1c
+| LR is at qcom_nandc_command+0x20c/0x5d0
+| pc : [<c088b74c>] lr : [<c088d9c8>] psr: 00000113
+| sp : c14adc50 ip : c14ee208 fp : c0cc970c
+| r10: 000000a3 r9 : 00000000 r8 : 00000040
+| r7 : c16f6a00 r6 : 00000090 r5 : 00000004 r4 :c14ee040
+| r3 : 00000000 r2 : 0000000b r1 : 00000000 r0 :c14ee040
+| Flags: nzcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
+| Control: 10c5387d Table: 8020406a DAC: 00000051
+| Register r0 information: slab kmalloc-2k start c14ee000 pointer offset
+ 64 size 2048
+| Process swapper/0 (pid: 1, stack limit = 0x(ptrval))
+| nandc_set_reg from qcom_nandc_command+0x20c/0x5d0
+| qcom_nandc_command from nand_readid_op+0x198/0x1e8
+| nand_readid_op from hynix_nand_has_valid_jedecid+0x30/0x78
+| hynix_nand_has_valid_jedecid from hynix_nand_init+0xb8/0x454
+| hynix_nand_init from nand_scan_with_ids+0xa30/0x14a8
+| nand_scan_with_ids from qcom_nandc_probe+0x648/0x7b0
+| qcom_nandc_probe from platform_probe+0x58/0xac
+
+The problem is that the nand_scan()'s qcom_nand_attach_chip callback
+is updating the nandc->max_cwperpage from 1 to 4 or 8 based on page size.
+This causes the sg_init_table of clear_bam_transaction() in the driver's
+qcom_nandc_command() to memset much more than what was initially
+allocated by alloc_bam_transaction().
+
+This patch will update nandc->max_cwperpage 1 to 4 or 8 based on page
+size in qcom_nand_attach_chip call back after freeing the previously
+allocated memory for bam txn as per nandc->max_cwperpage = 1 and then
+again allocating bam txn as per nandc->max_cwperpage = 4 or 8 based on
+page size in qcom_nand_attach_chip call back itself.
+
+Cc: stable@vger.kernel.org
+Fixes: 6a3cec64f18c ("mtd: rawnand: qcom: convert driver to nand_scan()")
+Reported-by: Konrad Dybcio <konrad.dybcio@somainline.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Co-developed-by: Sricharan R <quic_srichara@quicinc.com>
+Signed-off-by: Sricharan R <quic_srichara@quicinc.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/1650268107-5363-1-git-send-email-quic_mdalam@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2651,10 +2651,23 @@ static int qcom_nand_attach_chip(struct
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
++ /* Free the initially allocated BAM transaction for reading the ONFI params */
++ if (nandc->props->is_bam)
++ free_bam_transaction(nandc);
+
+ nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ cwperpage);
+
++ /* Now allocate the BAM transaction based on updated max_cwperpage */
++ if (nandc->props->is_bam) {
++ nandc->bam_txn = alloc_bam_transaction(nandc);
++ if (!nandc->bam_txn) {
++ dev_err(nandc->dev,
++ "failed to allocate bam transaction\n");
++ return -ENOMEM;
++ }
++ }
++
+ /*
+ * DATA_UD_BYTES varies based on whether the read/write command protects
+ * spare data with ECC too. We protect spare data by default, so we set
+@@ -2955,17 +2968,6 @@ static int qcom_nand_host_init_and_regis
+ if (ret)
+ return ret;
+
+- if (nandc->props->is_bam) {
+- free_bam_transaction(nandc);
+- nandc->bam_txn = alloc_bam_transaction(nandc);
+- if (!nandc->bam_txn) {
+- dev_err(nandc->dev,
+- "failed to allocate bam transaction\n");
+- nand_cleanup(chip);
+- return -ENOMEM;
+- }
+- }
+-
+ ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
--- /dev/null
+From 5fd1fe4807f91ea0cca043114d929faa11bd4190 Mon Sep 17 00:00:00 2001
+From: Dinh Nguyen <dinguyen@kernel.org>
+Date: Wed, 20 Apr 2022 10:23:45 -0500
+Subject: net: ethernet: stmmac: fix write to sgmii_adapter_base
+
+From: Dinh Nguyen <dinguyen@kernel.org>
+
+commit 5fd1fe4807f91ea0cca043114d929faa11bd4190 upstream.
+
+I made a mistake with the commit a6aaa0032424 ("net: ethernet: stmmac:
+fix altr_tse_pcs function when using a fixed-link"). I should have
+tested against both scenario of having a SGMII interface and one
+without.
+
+Without the SGMII PCS TSE adpater, the sgmii_adapter_base address is
+NULL, thus a write to this address will fail.
+
+Cc: stable@vger.kernel.org
+Fixes: a6aaa0032424 ("net: ethernet: stmmac: fix altr_tse_pcs function when using a fixed-link")
+Signed-off-by: Dinh Nguyen <dinguyen@kernel.org>
+Link: https://lore.kernel.org/r/20220420152345.27415-1-dinguyen@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -65,8 +65,9 @@ static void socfpga_dwmac_fix_mac_speed(
+ struct phy_device *phy_dev = ndev->phydev;
+ u32 val;
+
+- writew(SGMII_ADAPTER_DISABLE,
+- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
++ if (sgmii_adapter_base)
++ writew(SGMII_ADAPTER_DISABLE,
++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ if (splitter_base) {
+ val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+@@ -88,10 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(
+ writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+ }
+
+- writew(SGMII_ADAPTER_ENABLE,
+- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+- if (phy_dev)
++ if (phy_dev && sgmii_adapter_base) {
++ writew(SGMII_ADAPTER_ENABLE,
++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
++ }
+ }
+
+ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
--- /dev/null
+From 8ddffdb9442a9d60b4a6e679ac48d7d21403a674 Mon Sep 17 00:00:00 2001
+From: Martin Willi <martin@strongswan.org>
+Date: Tue, 19 Apr 2022 15:47:00 +0200
+Subject: netfilter: Update ip6_route_me_harder to consider L3 domain
+
+From: Martin Willi <martin@strongswan.org>
+
+commit 8ddffdb9442a9d60b4a6e679ac48d7d21403a674 upstream.
+
+The commit referenced below fixed packet re-routing if Netfilter mangles
+a routing key property of a packet and the packet is routed in a VRF L3
+domain. The fix, however, addressed IPv4 re-routing, only.
+
+This commit applies the same behavior for IPv6. While at it, untangle
+the nested ternary operator to make the code more readable.
+
+Fixes: 6d8b49c3a3a3 ("netfilter: Update ip_route_me_harder to consider L3 domain")
+Cc: stable@vger.kernel.org
+Signed-off-by: Martin Willi <martin@strongswan.org>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/netfilter.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net,
+ {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct sock *sk = sk_to_full_sk(sk_partial);
++ struct net_device *dev = skb_dst(skb)->dev;
+ struct flow_keys flkeys;
+ unsigned int hh_len;
+ struct dst_entry *dst;
+ int strict = (ipv6_addr_type(&iph->daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+ struct flowi6 fl6 = {
+- .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
+- strict ? skb_dst(skb)->dev->ifindex : 0,
+ .flowi6_mark = skb->mark,
+ .flowi6_uid = sock_net_uid(net, sk),
+ .daddr = iph->daddr,
+@@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net,
+ };
+ int err;
+
++ if (sk && sk->sk_bound_dev_if)
++ fl6.flowi6_oif = sk->sk_bound_dev_if;
++ else if (strict)
++ fl6.flowi6_oif = dev->ifindex;
++ else
++ fl6.flowi6_oif = l3mdev_master_ifindex(dev);
++
+ fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
+ dst = ip6_route_output(net, sk, &fl6);
+ err = dst->error;
--- /dev/null
+From 20e582e16af24b074e583f9551fad557882a3c9d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 20 Apr 2022 16:44:17 +0300
+Subject: Revert "ACPI: processor: idle: fix lockup regression on 32-bit ThinkPad T40"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 20e582e16af24b074e583f9551fad557882a3c9d upstream.
+
+This reverts commit bfe55a1f7fd6bfede16078bf04c6250fbca11588.
+
+This was presumably misdiagnosed as an inability to use C3 at
+all when I suspect the real problem is just misconfiguration of
+C3 vs. ARB_DIS.
+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: 5.16+ <stable@vger.kernel.org> # 5.16+
+Tested-by: Woody Suwalski <wsuwalski@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/processor_idle.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -96,11 +96,6 @@ static const struct dmi_system_id proces
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+ (void *)1},
+- /* T40 can not handle C3 idle state */
+- { set_max_cstate, "IBM ThinkPad T40", {
+- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+- (void *)2},
+ {},
+ };
+
--- /dev/null
+From 4cddeacad6d4b23493a108d0705e7d2ab89ba5a3 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 27 Apr 2022 09:49:12 -1000
+Subject: Revert "block: inherit request start time from bio for BLK_CGROUP"
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 4cddeacad6d4b23493a108d0705e7d2ab89ba5a3 upstream.
+
+This reverts commit 0006707723233cb2a9a23ca19fc3d0864835704c. It has a
+couple problems:
+
+* bio_issue_time() is stored in bio->bi_issue truncated to 51 bits. This
+ overflows in slightly over 26 days. Setting rq->io_start_time_ns with it
+ means that io duration calculation would yield >26days after 26 days of
+ uptime. This, for example, confuses kyber making it cause high IO
+ latencies.
+
+* rq->io_start_time_ns should record the time that the IO is issued to the
+ device so that on-device latency can be measured. However,
+ bio_issue_time() is set before the bio goes through the rq-qos controllers
+ (wbt, iolatency, iocost), so when the bio gets throttled in any of the
+ mechanisms, the measured latencies make no sense - on-device latencies end
+ up higher than request-alloc-to-completion latencies.
+
+We'll need a smarter way to avoid calling ktime_get_ns() repeatedly
+back-to-back. For now, let's revert the commit.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: stable@vger.kernel.org # v5.16+
+Link: https://lore.kernel.org/r/YmmeOLfo5lzc+8yI@slm.duckdns.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1122,14 +1122,7 @@ void blk_mq_start_request(struct request
+ trace_block_rq_issue(rq);
+
+ if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+- u64 start_time;
+-#ifdef CONFIG_BLK_CGROUP
+- if (rq->bio)
+- start_time = bio_issue_time(&rq->bio->bi_issue);
+- else
+-#endif
+- start_time = ktime_get_ns();
+- rq->io_start_time_ns = start_time;
++ rq->io_start_time_ns = ktime_get_ns();
+ rq->stats_sectors = blk_rq_sectors(rq);
+ rq->rq_flags |= RQF_STATS;
+ rq_qos_issue(q, rq);
alsa-hda-intel-dsp-config-add-raptorlake-pci-ids.patch
selftest-vm-verify-mmap-addr-in-mremap_test.patch
selftest-vm-verify-remap-destination-address-in-mrem.patch
+bfq-fix-warning-in-bfqq_request_over_limit.patch
+revert-acpi-processor-idle-fix-lockup-regression-on-32-bit-thinkpad-t40.patch
+revert-block-inherit-request-start-time-from-bio-for-blk_cgroup.patch
+zonefs-fix-management-of-open-zones.patch
+zonefs-clear-inode-information-flags-on-inode-creation.patch
+kasan-prevent-cpu_quarantine-corruption-when-cpu-offline-and-cache-shrink-occur-at-same-time.patch
+mtd-rawnand-qcom-fix-memory-corruption-that-causes-panic.patch
+netfilter-update-ip6_route_me_harder-to-consider-l3-domain.patch
+drm-amdgpu-don-t-runtime-suspend-if-there-are-displays-attached-v3.patch
+drm-i915-check-edid-for-hdr-static-metadata-when-choosing-blc.patch
+drm-i915-fix-sel_fetch_plane_-pipe_b-register-addresses.patch
+net-ethernet-stmmac-fix-write-to-sgmii_adapter_base.patch
+acpi-processor-idle-avoid-falling-back-to-c3-type-c-states.patch
+thermal-int340x-fix-attr.show-callback-prototype.patch
+btrfs-fix-direct-i-o-read-repair-for-split-bios.patch
+btrfs-fix-direct-i-o-writes-for-split-bios-on-zoned-devices.patch
+btrfs-fix-leaked-plug-after-failure-syncing-log-on-zoned-filesystems.patch
+btrfs-zoned-use-dedicated-lock-for-data-relocation.patch
+btrfs-fix-assertion-failure-during-scrub-due-to-block-group-reallocation.patch
+arm-dts-at91-sama7g5ek-enable-pull-up-on-flexcom3-console-lines.patch
+arm-dts-imx8mm-venice-gw-71xx-72xx-73xx-fix-otg-controller-oc-mode.patch
--- /dev/null
+From d0f6cfb2bd165b0aa307750e07e03420859bd554 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 21 Apr 2022 09:55:04 -0700
+Subject: thermal: int340x: Fix attr.show callback prototype
+
+From: Kees Cook <keescook@chromium.org>
+
+commit d0f6cfb2bd165b0aa307750e07e03420859bd554 upstream.
+
+Control Flow Integrity (CFI) instrumentation of the kernel noticed that
+the caller, dev_attr_show(), and the callback, odvp_show(), did not have
+matching function prototypes, which would cause a CFI exception to be
+raised. Correct the prototype by using struct device_attribute instead
+of struct kobj_attribute.
+
+Reported-and-tested-by: Joao Moreira <joao@overdrivepizza.com>
+Link: https://lore.kernel.org/lkml/067ce8bd4c3968054509831fa2347f4f@overdrivepizza.com/
+Fixes: 006f006f1e5c ("thermal/int340x_thermal: Export OEM vendor variables")
+Cc: 5.8+ <stable@vger.kernel.org> # 5.8+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/intel/int340x_thermal/int3400_thermal.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+@@ -67,7 +67,7 @@ static int evaluate_odvp(struct int3400_
+ struct odvp_attr {
+ int odvp;
+ struct int3400_thermal_priv *priv;
+- struct kobj_attribute attr;
++ struct device_attribute attr;
+ };
+
+ static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
+@@ -271,7 +271,7 @@ static int int3400_thermal_run_osc(acpi_
+ return result;
+ }
+
+-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
++static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+ struct odvp_attr *odvp_attr;
--- /dev/null
+From 694852ead287a3433126e7ebda397b242dc99624 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Tue, 12 Apr 2022 20:52:35 +0900
+Subject: zonefs: Clear inode information flags on inode creation
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit 694852ead287a3433126e7ebda397b242dc99624 upstream.
+
+Ensure that the i_flags field of struct zonefs_inode_info is cleared to
+0 when initializing a zone file inode, avoiding seeing the flag
+ZONEFS_ZONE_OPEN being incorrectly set.
+
+Fixes: b5c00e975779 ("zonefs: open/close zone on file open/close")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/super.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -1155,6 +1155,7 @@ static struct inode *zonefs_alloc_inode(
+ inode_init_once(&zi->i_vnode);
+ mutex_init(&zi->i_truncate_mutex);
+ zi->i_wr_refcnt = 0;
++ zi->i_flags = 0;
+
+ return &zi->i_vnode;
+ }
--- /dev/null
+From 1da18a296f5ba4f99429e62a7cf4fdbefa598902 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Tue, 12 Apr 2022 17:41:37 +0900
+Subject: zonefs: Fix management of open zones
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit 1da18a296f5ba4f99429e62a7cf4fdbefa598902 upstream.
+
+The mount option "explicit_open" manages the device open zone
+resources to ensure that if an application opens a sequential file for
+writing, the file zone can always be written by explicitly opening
+the zone and accounting for that state with the s_open_zones counter.
+
+However, if some zones are already open when mounting, the device open
+zone resource usage status will be larger than the initial s_open_zones
+value of 0. Ensure that this inconsistency does not happen by closing
+any sequential zone that is open when mounting.
+
+Furthermore, with ZNS drives, closing an explicitly open zone that has
+not been written will change the zone state to "closed", that is, the
+zone will remain in an active state. Since this can then cause failures
+of explicit open operations on other zones if the drive active zone
+resources are exceeded, we need to make sure that the zone is not
+active anymore by resetting it instead of closing it. To address this,
+zonefs_zone_mgmt() is modified to change a REQ_OP_ZONE_CLOSE request
+into a REQ_OP_ZONE_RESET for sequential zones that have not been
+written.
+
+Fixes: b5c00e975779 ("zonefs: open/close zone on file open/close")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/super.c | 45 ++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 40 insertions(+), 5 deletions(-)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -35,6 +35,17 @@ static inline int zonefs_zone_mgmt(struc
+
+ lockdep_assert_held(&zi->i_truncate_mutex);
+
++ /*
++ * With ZNS drives, closing an explicitly open zone that has not been
++ * written will change the zone state to "closed", that is, the zone
++ * will remain active. Since this can then cause failure of explicit
++ * open operation on other zones if the drive active zone resources
++ * are exceeded, make sure that the zone does not remain active by
++ * resetting it.
++ */
++ if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
++ op = REQ_OP_ZONE_RESET;
++
+ trace_zonefs_zone_mgmt(inode, op);
+ ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
+ zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
+@@ -1295,12 +1306,13 @@ static void zonefs_init_dir_inode(struct
+ inc_nlink(parent);
+ }
+
+-static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+- enum zonefs_ztype type)
++static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
++ enum zonefs_ztype type)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
++ int ret = 0;
+
+ inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
+ inode->i_mode = S_IFREG | sbi->s_perm;
+@@ -1325,6 +1337,22 @@ static void zonefs_init_file_inode(struc
+ sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
+ sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
+ sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
++
++ /*
++ * For sequential zones, make sure that any open zone is closed first
++ * to ensure that the initial number of open zones is 0, in sync with
++ * the open zone accounting done when the mount option
++ * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
++ */
++ if (type == ZONEFS_ZTYPE_SEQ &&
++ (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
++ zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
++ mutex_lock(&zi->i_truncate_mutex);
++ ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
++ mutex_unlock(&zi->i_truncate_mutex);
++ }
++
++ return ret;
+ }
+
+ static struct dentry *zonefs_create_inode(struct dentry *parent,
+@@ -1334,6 +1362,7 @@ static struct dentry *zonefs_create_inod
+ struct inode *dir = d_inode(parent);
+ struct dentry *dentry;
+ struct inode *inode;
++ int ret;
+
+ dentry = d_alloc_name(parent, name);
+ if (!dentry)
+@@ -1344,10 +1373,16 @@ static struct dentry *zonefs_create_inod
+ goto dput;
+
+ inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+- if (zone)
+- zonefs_init_file_inode(inode, zone, type);
+- else
++ if (zone) {
++ ret = zonefs_init_file_inode(inode, zone, type);
++ if (ret) {
++ iput(inode);
++ goto dput;
++ }
++ } else {
+ zonefs_init_dir_inode(dir, inode, type);
++ }
++
+ d_add(dentry, inode);
+ dir->i_size++;
+