]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jun 2021 13:23:27 +0000 (15:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jun 2021 13:23:27 +0000 (15:23 +0200)
added patches:
kfence-maximize-allocation-wait-timeout-duration.patch
kfence-use-task_idle-when-awaiting-allocation.patch
pid-take-a-reference-when-initializing-cad_pid.patch
revert-mips-make-userspace-mapping-young-by-default.patch
usb-dwc2-fix-build-in-periphal-only-mode.patch

queue-5.12/kfence-maximize-allocation-wait-timeout-duration.patch [new file with mode: 0644]
queue-5.12/kfence-use-task_idle-when-awaiting-allocation.patch [new file with mode: 0644]
queue-5.12/pid-take-a-reference-when-initializing-cad_pid.patch [new file with mode: 0644]
queue-5.12/revert-mips-make-userspace-mapping-young-by-default.patch [new file with mode: 0644]
queue-5.12/series
queue-5.12/usb-dwc2-fix-build-in-periphal-only-mode.patch [new file with mode: 0644]

diff --git a/queue-5.12/kfence-maximize-allocation-wait-timeout-duration.patch b/queue-5.12/kfence-maximize-allocation-wait-timeout-duration.patch
new file mode 100644 (file)
index 0000000..92b5e93
--- /dev/null
@@ -0,0 +1,61 @@
+From 37c9284f6932b915043717703d6496dfd59c85f5 Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Tue, 4 May 2021 18:40:24 -0700
+Subject: kfence: maximize allocation wait timeout duration
+
+From: Marco Elver <elver@google.com>
+
+commit 37c9284f6932b915043717703d6496dfd59c85f5 upstream.
+
+The allocation wait timeout was initially added because of warnings due to
+CONFIG_DETECT_HUNG_TASK=y [1].  While the 1 sec timeout is sufficient to
+resolve the warnings (given the hung task timeout must be 1 sec or larger)
+it may cause unnecessary wake-ups if the system is idle:
+
+  https://lkml.kernel.org/r/CADYN=9J0DQhizAGB0-jz4HOBBh+05kMBXb4c0cXMS7Qi5NAJiw@mail.gmail.com
+
+Fix it by computing the timeout duration in terms of the current
+sysctl_hung_task_timeout_secs value.
+
+Link: https://lkml.kernel.org/r/20210421105132.3965998-3-elver@google.com
+Signed-off-by: Marco Elver <elver@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Hillf Danton <hdanton@sina.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -20,6 +20,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/random.h>
+ #include <linux/rcupdate.h>
++#include <linux/sched/sysctl.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -620,7 +621,16 @@ static void toggle_allocation_gate(struc
+       /* Enable static key, and await allocation to happen. */
+       static_branch_enable(&kfence_allocation_key);
+-      wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
++      if (sysctl_hung_task_timeout_secs) {
++              /*
++               * During low activity with no allocations we might wait a
++               * while; let's avoid the hung task warning.
++               */
++              wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
++                                 sysctl_hung_task_timeout_secs * HZ / 2);
++      } else {
++              wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
++      }
+       /* Disable static key and reset timer. */
+       static_branch_disable(&kfence_allocation_key);
diff --git a/queue-5.12/kfence-use-task_idle-when-awaiting-allocation.patch b/queue-5.12/kfence-use-task_idle-when-awaiting-allocation.patch
new file mode 100644 (file)
index 0000000..4464fb2
--- /dev/null
@@ -0,0 +1,48 @@
+From 8fd0e995cc7b6a7a8a40bc03d52a2cd445beeff4 Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Fri, 4 Jun 2021 20:01:11 -0700
+Subject: kfence: use TASK_IDLE when awaiting allocation
+
+From: Marco Elver <elver@google.com>
+
+commit 8fd0e995cc7b6a7a8a40bc03d52a2cd445beeff4 upstream.
+
+Since wait_event() uses TASK_UNINTERRUPTIBLE by default, waiting for an
+allocation counts towards load.  However, for KFENCE, this does not make
+any sense, since there is no busy work we're awaiting.
+
+Instead, use TASK_IDLE via wait_event_idle() to not count towards load.
+
+BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1185565
+Link: https://lkml.kernel.org/r/20210521083209.3740269-1-elver@google.com
+Fixes: 407f1d8c1b5f ("kfence: await for allocation using wait_event")
+Signed-off-by: Marco Elver <elver@google.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: David Laight <David.Laight@ACULAB.COM>
+Cc: Hillf Danton <hdanton@sina.com>
+Cc: <stable@vger.kernel.org>   [5.12+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -626,10 +626,10 @@ static void toggle_allocation_gate(struc
+                * During low activity with no allocations we might wait a
+                * while; let's avoid the hung task warning.
+                */
+-              wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+-                                 sysctl_hung_task_timeout_secs * HZ / 2);
++              wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
++                                      sysctl_hung_task_timeout_secs * HZ / 2);
+       } else {
+-              wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
++              wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
+       }
+       /* Disable static key and reset timer. */
diff --git a/queue-5.12/pid-take-a-reference-when-initializing-cad_pid.patch b/queue-5.12/pid-take-a-reference-when-initializing-cad_pid.patch
new file mode 100644 (file)
index 0000000..c80ac26
--- /dev/null
@@ -0,0 +1,138 @@
+From 0711f0d7050b9e07c44bc159bbc64ac0a1022c7f Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 4 Jun 2021 20:01:14 -0700
+Subject: pid: take a reference when initializing `cad_pid`
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0711f0d7050b9e07c44bc159bbc64ac0a1022c7f upstream.
+
+During boot, kernel_init_freeable() initializes `cad_pid` to the init
+task's struct pid.  Later on, we may change `cad_pid` via a sysctl, and
+when this happens proc_do_cad_pid() will increment the refcount on the
+new pid via get_pid(), and will decrement the refcount on the old pid
+via put_pid().  As we never called get_pid() when we initialized
+`cad_pid`, we decrement a reference we never incremented, can therefore
+free the init task's struct pid early.  As there can be dangling
+references to the struct pid, we can later encounter a use-after-free
+(e.g.  when delivering signals).
+
+This was spotted when fuzzing v5.13-rc3 with Syzkaller, but seems to
+have been around since the conversion of `cad_pid` to struct pid in
+commit 9ec52099e4b8 ("[PATCH] replace cad_pid by a struct pid") from the
+pre-KASAN stone age of v2.6.19.
+
+Fix this by getting a reference to the init task's struct pid when we
+assign it to `cad_pid`.
+
+Full KASAN splat below.
+
+   ==================================================================
+   BUG: KASAN: use-after-free in ns_of_pid include/linux/pid.h:153 [inline]
+   BUG: KASAN: use-after-free in task_active_pid_ns+0xc0/0xc8 kernel/pid.c:509
+   Read of size 4 at addr ffff23794dda0004 by task syz-executor.0/273
+
+   CPU: 1 PID: 273 Comm: syz-executor.0 Not tainted 5.12.0-00001-g9aef892b2d15 #1
+   Hardware name: linux,dummy-virt (DT)
+   Call trace:
+    ns_of_pid include/linux/pid.h:153 [inline]
+    task_active_pid_ns+0xc0/0xc8 kernel/pid.c:509
+    do_notify_parent+0x308/0xe60 kernel/signal.c:1950
+    exit_notify kernel/exit.c:682 [inline]
+    do_exit+0x2334/0x2bd0 kernel/exit.c:845
+    do_group_exit+0x108/0x2c8 kernel/exit.c:922
+    get_signal+0x4e4/0x2a88 kernel/signal.c:2781
+    do_signal arch/arm64/kernel/signal.c:882 [inline]
+    do_notify_resume+0x300/0x970 arch/arm64/kernel/signal.c:936
+    work_pending+0xc/0x2dc
+
+   Allocated by task 0:
+    slab_post_alloc_hook+0x50/0x5c0 mm/slab.h:516
+    slab_alloc_node mm/slub.c:2907 [inline]
+    slab_alloc mm/slub.c:2915 [inline]
+    kmem_cache_alloc+0x1f4/0x4c0 mm/slub.c:2920
+    alloc_pid+0xdc/0xc00 kernel/pid.c:180
+    copy_process+0x2794/0x5e18 kernel/fork.c:2129
+    kernel_clone+0x194/0x13c8 kernel/fork.c:2500
+    kernel_thread+0xd4/0x110 kernel/fork.c:2552
+    rest_init+0x44/0x4a0 init/main.c:687
+    arch_call_rest_init+0x1c/0x28
+    start_kernel+0x520/0x554 init/main.c:1064
+    0x0
+
+   Freed by task 270:
+    slab_free_hook mm/slub.c:1562 [inline]
+    slab_free_freelist_hook+0x98/0x260 mm/slub.c:1600
+    slab_free mm/slub.c:3161 [inline]
+    kmem_cache_free+0x224/0x8e0 mm/slub.c:3177
+    put_pid.part.4+0xe0/0x1a8 kernel/pid.c:114
+    put_pid+0x30/0x48 kernel/pid.c:109
+    proc_do_cad_pid+0x190/0x1b0 kernel/sysctl.c:1401
+    proc_sys_call_handler+0x338/0x4b0 fs/proc/proc_sysctl.c:591
+    proc_sys_write+0x34/0x48 fs/proc/proc_sysctl.c:617
+    call_write_iter include/linux/fs.h:1977 [inline]
+    new_sync_write+0x3ac/0x510 fs/read_write.c:518
+    vfs_write fs/read_write.c:605 [inline]
+    vfs_write+0x9c4/0x1018 fs/read_write.c:585
+    ksys_write+0x124/0x240 fs/read_write.c:658
+    __do_sys_write fs/read_write.c:670 [inline]
+    __se_sys_write fs/read_write.c:667 [inline]
+    __arm64_sys_write+0x78/0xb0 fs/read_write.c:667
+    __invoke_syscall arch/arm64/kernel/syscall.c:37 [inline]
+    invoke_syscall arch/arm64/kernel/syscall.c:49 [inline]
+    el0_svc_common.constprop.1+0x16c/0x388 arch/arm64/kernel/syscall.c:129
+    do_el0_svc+0xf8/0x150 arch/arm64/kernel/syscall.c:168
+    el0_svc+0x28/0x38 arch/arm64/kernel/entry-common.c:416
+    el0_sync_handler+0x134/0x180 arch/arm64/kernel/entry-common.c:432
+    el0_sync+0x154/0x180 arch/arm64/kernel/entry.S:701
+
+   The buggy address belongs to the object at ffff23794dda0000
+    which belongs to the cache pid of size 224
+   The buggy address is located 4 bytes inside of
+    224-byte region [ffff23794dda0000, ffff23794dda00e0)
+   The buggy address belongs to the page:
+   page:(____ptrval____) refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x4dda0
+   head:(____ptrval____) order:1 compound_mapcount:0
+   flags: 0x3fffc0000010200(slab|head)
+   raw: 03fffc0000010200 dead000000000100 dead000000000122 ffff23794d40d080
+   raw: 0000000000000000 0000000000190019 00000001ffffffff 0000000000000000
+   page dumped because: kasan: bad access detected
+
+   Memory state around the buggy address:
+    ffff23794dd9ff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+    ffff23794dd9ff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+   >ffff23794dda0000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+                      ^
+    ffff23794dda0080: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+    ffff23794dda0100: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00
+   ==================================================================
+
+Link: https://lkml.kernel.org/r/20210524172230.38715-1-mark.rutland@arm.com
+Fixes: 9ec52099e4b8678a ("[PATCH] replace cad_pid by a struct pid")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Cedric Le Goater <clg@fr.ibm.com>
+Cc: Christian Brauner <christian@brauner.io>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Kees Cook <keescook@chromium.org
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -1514,7 +1514,7 @@ static noinline void __init kernel_init_
+        */
+       set_mems_allowed(node_states[N_MEMORY]);
+-      cad_pid = task_pid(current);
++      cad_pid = get_pid(task_pid(current));
+       smp_prepare_cpus(setup_max_cpus);
diff --git a/queue-5.12/revert-mips-make-userspace-mapping-young-by-default.patch b/queue-5.12/revert-mips-make-userspace-mapping-young-by-default.patch
new file mode 100644 (file)
index 0000000..526c153
--- /dev/null
@@ -0,0 +1,124 @@
+From 50c25ee97cf6ab011542167ab590c17012cea4ed Mon Sep 17 00:00:00 2001
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Date: Fri, 4 Jun 2021 20:01:08 -0700
+Subject: Revert "MIPS: make userspace mapping young by default"
+
+From: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+commit 50c25ee97cf6ab011542167ab590c17012cea4ed upstream.
+
+This reverts commit f685a533a7fab35c5d069dcd663f59c8e4171a75.
+
+The MIPS cache flush logic needs to know whether the mapping was already
+established to decide how to flush caches.  This is done by checking the
+valid bit in the PTE.  The commit above breaks this logic by setting the
+valid in the PTE in new mappings, which causes kernel crashes.
+
+Link: https://lkml.kernel.org/r/20210526094335.92948-1-tsbogend@alpha.franken.de
+Fixes: f685a533a7f ("MIPS: make userspace mapping young by default")
+Reported-by: Zhou Yanjie <zhouyanjie@wanyeetech.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: Huang Pei <huangpei@loongson.cn>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/cache.c    |   30 ++++++++++++++----------------
+ include/linux/pgtable.h |    8 ++++++++
+ mm/memory.c             |    4 ++++
+ 3 files changed, 26 insertions(+), 16 deletions(-)
+
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -157,31 +157,29 @@ unsigned long _page_cachable_default;
+ EXPORT_SYMBOL(_page_cachable_default);
+ #define PM(p) __pgprot(_page_cachable_default | (p))
+-#define PVA(p)        PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
+ static inline void setup_protection_map(void)
+ {
+       protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-      protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-      protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-      protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-      protection_map[4]  = PVA(_PAGE_PRESENT);
+-      protection_map[5]  = PVA(_PAGE_PRESENT);
+-      protection_map[6]  = PVA(_PAGE_PRESENT);
+-      protection_map[7]  = PVA(_PAGE_PRESENT);
++      protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++      protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
++      protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++      protection_map[4]  = PM(_PAGE_PRESENT);
++      protection_map[5]  = PM(_PAGE_PRESENT);
++      protection_map[6]  = PM(_PAGE_PRESENT);
++      protection_map[7]  = PM(_PAGE_PRESENT);
+       protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+-      protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+-      protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
++      protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
++      protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+                               _PAGE_NO_READ);
+-      protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+-      protection_map[12] = PVA(_PAGE_PRESENT);
+-      protection_map[13] = PVA(_PAGE_PRESENT);
+-      protection_map[14] = PVA(_PAGE_PRESENT);
+-      protection_map[15] = PVA(_PAGE_PRESENT);
++      protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
++      protection_map[12] = PM(_PAGE_PRESENT);
++      protection_map[13] = PM(_PAGE_PRESENT);
++      protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
++      protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ }
+-#undef _PVA
+ #undef PM
+ void cpu_cache_init(void)
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(st
+  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+  * where software maintains page access bit.
+  */
++#ifndef pte_sw_mkyoung
++static inline pte_t pte_sw_mkyoung(pte_t pte)
++{
++      return pte;
++}
++#define pte_sw_mkyoung        pte_sw_mkyoung
++#endif
++
+ #ifndef pte_savedwrite
+ #define pte_savedwrite pte_write
+ #endif
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2896,6 +2896,7 @@ static vm_fault_t wp_page_copy(struct vm
+               }
+               flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
+               entry = mk_pte(new_page, vma->vm_page_prot);
++              entry = pte_sw_mkyoung(entry);
+               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               /*
+@@ -3561,6 +3562,7 @@ static vm_fault_t do_anonymous_page(stru
+       __SetPageUptodate(page);
+       entry = mk_pte(page, vma->vm_page_prot);
++      entry = pte_sw_mkyoung(entry);
+       if (vma->vm_flags & VM_WRITE)
+               entry = pte_mkwrite(pte_mkdirty(entry));
+@@ -3745,6 +3747,8 @@ void do_set_pte(struct vm_fault *vmf, st
+       if (prefault && arch_wants_old_prefaulted_pte())
+               entry = pte_mkold(entry);
++      else
++              entry = pte_sw_mkyoung(entry);
+       if (write)
+               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index 267fe1f7b91d6d28fce5a56e488056b2cbcdc395..3dccea738b4af35029176941eb5c21bbeee3f756 100644 (file)
@@ -118,3 +118,8 @@ ext4-fix-bug-on-in-ext4_es_cache_extent-as-ext4_split_extent_at-failed.patch
 ext4-fix-fast-commit-alignment-issues.patch
 ext4-fix-memory-leak-in-ext4_mb_init_backend-on-error-path.patch
 ext4-fix-accessing-uninit-percpu-counter-variable-with-fast_commit.patch
+usb-dwc2-fix-build-in-periphal-only-mode.patch
+revert-mips-make-userspace-mapping-young-by-default.patch
+kfence-maximize-allocation-wait-timeout-duration.patch
+kfence-use-task_idle-when-awaiting-allocation.patch
+pid-take-a-reference-when-initializing-cad_pid.patch
diff --git a/queue-5.12/usb-dwc2-fix-build-in-periphal-only-mode.patch b/queue-5.12/usb-dwc2-fix-build-in-periphal-only-mode.patch
new file mode 100644 (file)
index 0000000..d0adaa3
--- /dev/null
@@ -0,0 +1,38 @@
+From phil@raspberrypi.com  Tue Jun  8 15:06:41 2021
+From: Phil Elwell <phil@raspberrypi.com>
+Date: Tue,  8 Jun 2021 13:00:49 +0100
+Subject: usb: dwc2: Fix build in periphal-only mode
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>, stable@vger.kernel.org
+Cc: Phil Elwell <phil@raspberrypi.com>
+Message-ID: <20210608120049.1393123-1-phil@raspberrypi.com>
+
+From: Phil Elwell <phil@raspberrypi.com>
+
+In branches to which 24d209dba5a3 ("usb: dwc2: Fix hibernation between
+host and device modes.") has been back-ported, the bus_suspended member
+of struct dwc2_hsotg is only present in builds that support host-mode.
+To avoid having to pull in several more non-Fix commits in order to
+get it to compile, wrap the usage of the member in a macro conditional.
+
+Fixes: 24d209dba5a3 ("usb: dwc2: Fix hibernation between host and device modes.")
+Signed-off-by: Phil Elwell <phil@raspberrypi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/dwc2/core_intr.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_dis
+       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+       hsotg->hibernated = 0;
++
++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) ||       \
++      IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+       hsotg->bus_suspended = 0;
++#endif
+       if (gpwrdn & GPWRDN_IDSTS) {
+               hsotg->op_state = OTG_STATE_B_PERIPHERAL;