]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2022 06:44:25 +0000 (07:44 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2022 06:44:25 +0000 (07:44 +0100)
added patches:
coresight-cti-fix-hang-in-cti_disable_hw.patch
counter-104-quad-8-fix-race-getting-function-mode-and-direction.patch
counter-microchip-tcb-capture-handle-signal1-read-and-synapse.patch
mm-huge_memory-do-not-clobber-swp_entry_t-during-thp-split.patch
mm-kmemleak-prevent-soft-lockup-in-kmemleak_scan-s-object-iteration-loops.patch
mm-madvise-hugetlb-fix-unexpected-data-loss-with-madv_dontneed-on-hugetlbfs.patch
mm-migrate-fix-return-value-if-all-subpages-of-thps-are-migrated-successfully.patch
mm-prep_compound_tail-clear-page-private.patch
mm-uffd-fix-vma-check-on-userfault-for-wp.patch
mmc-block-remove-error-check-of-hw_reset-on-reset.patch
mmc-core-fix-kernel-panic-when-remove-non-standard-sdio-card.patch
mmc-core-fix-write_zeroes-cqe-handling.patch
mmc-queue-cancel-recovery-work-on-cleanup.patch
mmc-sdhci-esdhc-imx-propagate-esdhc_flag_hs400-only-on-8bit-bus.patch
mmc-sdhci-pci-core-disable-es-for-asus-bios-on-jasper-lake.patch
mmc-sdhci_am654-select-not-depends-regmap_mmio.patch
random-use-arch_get_random-_early-in-random_init.patch

18 files changed:
queue-6.0/coresight-cti-fix-hang-in-cti_disable_hw.patch [new file with mode: 0644]
queue-6.0/counter-104-quad-8-fix-race-getting-function-mode-and-direction.patch [new file with mode: 0644]
queue-6.0/counter-microchip-tcb-capture-handle-signal1-read-and-synapse.patch [new file with mode: 0644]
queue-6.0/mm-huge_memory-do-not-clobber-swp_entry_t-during-thp-split.patch [new file with mode: 0644]
queue-6.0/mm-kmemleak-prevent-soft-lockup-in-kmemleak_scan-s-object-iteration-loops.patch [new file with mode: 0644]
queue-6.0/mm-madvise-hugetlb-fix-unexpected-data-loss-with-madv_dontneed-on-hugetlbfs.patch [new file with mode: 0644]
queue-6.0/mm-migrate-fix-return-value-if-all-subpages-of-thps-are-migrated-successfully.patch [new file with mode: 0644]
queue-6.0/mm-prep_compound_tail-clear-page-private.patch [new file with mode: 0644]
queue-6.0/mm-uffd-fix-vma-check-on-userfault-for-wp.patch [new file with mode: 0644]
queue-6.0/mmc-block-remove-error-check-of-hw_reset-on-reset.patch [new file with mode: 0644]
queue-6.0/mmc-core-fix-kernel-panic-when-remove-non-standard-sdio-card.patch [new file with mode: 0644]
queue-6.0/mmc-core-fix-write_zeroes-cqe-handling.patch [new file with mode: 0644]
queue-6.0/mmc-queue-cancel-recovery-work-on-cleanup.patch [new file with mode: 0644]
queue-6.0/mmc-sdhci-esdhc-imx-propagate-esdhc_flag_hs400-only-on-8bit-bus.patch [new file with mode: 0644]
queue-6.0/mmc-sdhci-pci-core-disable-es-for-asus-bios-on-jasper-lake.patch [new file with mode: 0644]
queue-6.0/mmc-sdhci_am654-select-not-depends-regmap_mmio.patch [new file with mode: 0644]
queue-6.0/random-use-arch_get_random-_early-in-random_init.patch [new file with mode: 0644]
queue-6.0/series

diff --git a/queue-6.0/coresight-cti-fix-hang-in-cti_disable_hw.patch b/queue-6.0/coresight-cti-fix-hang-in-cti_disable_hw.patch
new file mode 100644 (file)
index 0000000..5dbe49a
--- /dev/null
@@ -0,0 +1,129 @@
+From 6746eae4bbaddcc16b40efb33dab79210828b3ce Mon Sep 17 00:00:00 2001
+From: James Clark <james.clark@arm.com>
+Date: Tue, 25 Oct 2022 14:10:32 +0100
+Subject: coresight: cti: Fix hang in cti_disable_hw()
+
+From: James Clark <james.clark@arm.com>
+
+commit 6746eae4bbaddcc16b40efb33dab79210828b3ce upstream.
+
+cti_enable_hw() and cti_disable_hw() are called from an atomic context
+so shouldn't use runtime PM because it can result in a sleep when
+communicating with firmware.
+
+Since commit 3c6656337852 ("Revert "firmware: arm_scmi: Add clock
+management to the SCMI power domain""), this causes a hang on Juno when
+running the Perf Coresight tests or running this command:
+
+  perf record -e cs_etm//u -- ls
+
+This was also missed until the revert commit because pm_runtime_put()
+was called with the wrong device until commit 692c9a499b28 ("coresight:
+cti: Correct the parameter for pm_runtime_put")
+
+With lock and scheduler debugging enabled the following is output:
+
+   coresight cti_sys0: cti_enable_hw -- dev:cti_sys0  parent: 20020000.cti
+   BUG: sleeping function called from invalid context at drivers/base/power/runtime.c:1151
+   in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 330, name: perf-exec
+   preempt_count: 2, expected: 0
+   RCU nest depth: 0, expected: 0
+   INFO: lockdep is turned off.
+   irq event stamp: 0
+   hardirqs last  enabled at (0): [<0000000000000000>] 0x0
+   hardirqs last disabled at (0): [<ffff80000822b394>] copy_process+0xa0c/0x1948
+   softirqs last  enabled at (0): [<ffff80000822b394>] copy_process+0xa0c/0x1948
+   softirqs last disabled at (0): [<0000000000000000>] 0x0
+   CPU: 3 PID: 330 Comm: perf-exec Not tainted 6.0.0-00053-g042116d99298 #7
+   Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development Platform, BIOS EDK II Sep 13 2022
+   Call trace:
+    dump_backtrace+0x134/0x140
+    show_stack+0x20/0x58
+    dump_stack_lvl+0x8c/0xb8
+    dump_stack+0x18/0x34
+    __might_resched+0x180/0x228
+    __might_sleep+0x50/0x88
+    __pm_runtime_resume+0xac/0xb0
+    cti_enable+0x44/0x120
+    coresight_control_assoc_ectdev+0xc0/0x150
+    coresight_enable_path+0xb4/0x288
+    etm_event_start+0x138/0x170
+    etm_event_add+0x48/0x70
+    event_sched_in.isra.122+0xb4/0x280
+    merge_sched_in+0x1fc/0x3d0
+    visit_groups_merge.constprop.137+0x16c/0x4b0
+    ctx_sched_in+0x114/0x1f0
+    perf_event_sched_in+0x60/0x90
+    ctx_resched+0x68/0xb0
+    perf_event_exec+0x138/0x508
+    begin_new_exec+0x52c/0xd40
+    load_elf_binary+0x6b8/0x17d0
+    bprm_execve+0x360/0x7f8
+    do_execveat_common.isra.47+0x218/0x238
+    __arm64_sys_execve+0x48/0x60
+    invoke_syscall+0x4c/0x110
+    el0_svc_common.constprop.4+0xfc/0x120
+    do_el0_svc+0x34/0xc0
+    el0_svc+0x40/0x98
+    el0t_64_sync_handler+0x98/0xc0
+    el0t_64_sync+0x170/0x174
+
+Fix the issue by removing the runtime PM calls completely. They are not
+needed here because it must have already been done when building the
+path for a trace.
+
+Fixes: 835d722ba10a ("coresight: cti: Initial CoreSight CTI Driver")
+Cc: stable <stable@kernel.org>
+Reported-by: Aishwarya TCV <Aishwarya.TCV@arm.com>
+Reported-by: Cristian Marussi <Cristian.Marussi@arm.com>
+Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: James Clark <james.clark@arm.com>
+Reviewed-by: Mike Leach <mike.leach@linaro.org>
+Tested-by: Mike Leach <mike.leach@linaro.org>
+[ Fix build warnings ]
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/20221025131032.1149459-1-suzuki.poulose@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwtracing/coresight/coresight-cti-core.c |    5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-cti-core.c
++++ b/drivers/hwtracing/coresight/coresight-cti-core.c
+@@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_dr
+ static int cti_enable_hw(struct cti_drvdata *drvdata)
+ {
+       struct cti_config *config = &drvdata->config;
+-      struct device *dev = &drvdata->csdev->dev;
+       unsigned long flags;
+       int rc = 0;
+-      pm_runtime_get_sync(dev->parent);
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       /* no need to do anything if enabled or unpowered*/
+@@ -119,7 +117,6 @@ cti_state_unchanged:
+       /* cannot enable due to error */
+ cti_err_not_enabled:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+-      pm_runtime_put(dev->parent);
+       return rc;
+ }
+@@ -153,7 +150,6 @@ cti_hp_not_enabled:
+ static int cti_disable_hw(struct cti_drvdata *drvdata)
+ {
+       struct cti_config *config = &drvdata->config;
+-      struct device *dev = &drvdata->csdev->dev;
+       struct coresight_device *csdev = drvdata->csdev;
+       spin_lock(&drvdata->spinlock);
+@@ -175,7 +171,6 @@ static int cti_disable_hw(struct cti_drv
+       coresight_disclaim_device_unlocked(csdev);
+       CS_LOCK(drvdata->base);
+       spin_unlock(&drvdata->spinlock);
+-      pm_runtime_put(dev->parent);
+       return 0;
+       /* not disabled this call */
diff --git a/queue-6.0/counter-104-quad-8-fix-race-getting-function-mode-and-direction.patch b/queue-6.0/counter-104-quad-8-fix-race-getting-function-mode-and-direction.patch
new file mode 100644 (file)
index 0000000..c9eb491
--- /dev/null
@@ -0,0 +1,138 @@
+From d501d37841d3b7f18402d71a9ef057eb9dde127e Mon Sep 17 00:00:00 2001
+From: William Breathitt Gray <william.gray@linaro.org>
+Date: Thu, 20 Oct 2022 10:11:21 -0400
+Subject: counter: 104-quad-8: Fix race getting function mode and direction
+
+From: William Breathitt Gray <william.gray@linaro.org>
+
+commit d501d37841d3b7f18402d71a9ef057eb9dde127e upstream.
+
+The quad8_action_read() function checks the Count function mode and
+Count direction without first acquiring a lock. This is a race condition
+because the function mode could change by the time the direction is
+checked.
+
+Because the quad8_function_read() already acquires a lock internally,
+the quad8_function_read() is refactored to spin out the no-lock code to
+a new quad8_function_get() function.
+
+To resolve the race condition in quad8_action_read(), a lock is acquired
+before calling quad8_function_get() and quad8_direction_read() in order
+to get both function mode and direction atomically.
+
+Fixes: f1d8a071d45b ("counter: 104-quad-8: Add Generic Counter interface support")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221020141121.15434-1-william.gray@linaro.org/
+Signed-off-by: William Breathitt Gray <william.gray@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/104-quad-8.c |   64 ++++++++++++++++++++++++++++---------------
+ 1 file changed, 42 insertions(+), 22 deletions(-)
+
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -231,34 +231,45 @@ static const enum counter_function quad8
+       COUNTER_FUNCTION_QUADRATURE_X4,
+ };
++static int quad8_function_get(const struct quad8 *const priv, const size_t id,
++                            enum counter_function *const function)
++{
++      if (!priv->quadrature_mode[id]) {
++              *function = COUNTER_FUNCTION_PULSE_DIRECTION;
++              return 0;
++      }
++
++      switch (priv->quadrature_scale[id]) {
++      case 0:
++              *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
++              return 0;
++      case 1:
++              *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
++              return 0;
++      case 2:
++              *function = COUNTER_FUNCTION_QUADRATURE_X4;
++              return 0;
++      default:
++              /* should never reach this path */
++              return -EINVAL;
++      }
++}
++
+ static int quad8_function_read(struct counter_device *counter,
+                              struct counter_count *count,
+                              enum counter_function *function)
+ {
+       struct quad8 *const priv = counter_priv(counter);
+-      const int id = count->id;
+       unsigned long irqflags;
++      int retval;
+       spin_lock_irqsave(&priv->lock, irqflags);
+-      if (priv->quadrature_mode[id])
+-              switch (priv->quadrature_scale[id]) {
+-              case 0:
+-                      *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
+-                      break;
+-              case 1:
+-                      *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
+-                      break;
+-              case 2:
+-                      *function = COUNTER_FUNCTION_QUADRATURE_X4;
+-                      break;
+-              }
+-      else
+-              *function = COUNTER_FUNCTION_PULSE_DIRECTION;
++      retval = quad8_function_get(priv, count->id, function);
+       spin_unlock_irqrestore(&priv->lock, irqflags);
+-      return 0;
++      return retval;
+ }
+ static int quad8_function_write(struct counter_device *counter,
+@@ -358,6 +369,7 @@ static int quad8_action_read(struct coun
+                            enum counter_synapse_action *action)
+ {
+       struct quad8 *const priv = counter_priv(counter);
++      unsigned long irqflags;
+       int err;
+       enum counter_function function;
+       const size_t signal_a_id = count->synapses[0].signal->id;
+@@ -373,9 +385,21 @@ static int quad8_action_read(struct coun
+               return 0;
+       }
+-      err = quad8_function_read(counter, count, &function);
+-      if (err)
++      spin_lock_irqsave(&priv->lock, irqflags);
++
++      /* Get Count function and direction atomically */
++      err = quad8_function_get(priv, count->id, &function);
++      if (err) {
++              spin_unlock_irqrestore(&priv->lock, irqflags);
+               return err;
++      }
++      err = quad8_direction_read(counter, count, &direction);
++      if (err) {
++              spin_unlock_irqrestore(&priv->lock, irqflags);
++              return err;
++      }
++
++      spin_unlock_irqrestore(&priv->lock, irqflags);
+       /* Default action mode */
+       *action = COUNTER_SYNAPSE_ACTION_NONE;
+@@ -388,10 +412,6 @@ static int quad8_action_read(struct coun
+               return 0;
+       case COUNTER_FUNCTION_QUADRATURE_X1_A:
+               if (synapse->signal->id == signal_a_id) {
+-                      err = quad8_direction_read(counter, count, &direction);
+-                      if (err)
+-                              return err;
+-
+                       if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
+                               *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+                       else
diff --git a/queue-6.0/counter-microchip-tcb-capture-handle-signal1-read-and-synapse.patch b/queue-6.0/counter-microchip-tcb-capture-handle-signal1-read-and-synapse.patch
new file mode 100644 (file)
index 0000000..de9734e
--- /dev/null
@@ -0,0 +1,75 @@
+From d917a62af81b133f35f627e7936e193c842a7947 Mon Sep 17 00:00:00 2001
+From: William Breathitt Gray <william.gray@linaro.org>
+Date: Tue, 18 Oct 2022 08:10:14 -0400
+Subject: counter: microchip-tcb-capture: Handle Signal1 read and Synapse
+
+From: William Breathitt Gray <william.gray@linaro.org>
+
+commit d917a62af81b133f35f627e7936e193c842a7947 upstream.
+
+The signal_read(), action_read(), and action_write() callbacks have been
+assuming Signal0 is requested without checking. This results in requests
+for Signal1 returning data for Signal0. This patch fixes these
+oversights by properly checking for the Signal's id in the respective
+callbacks and handling accordingly based on the particular Signal
+requested. The trig_inverted member of the mchp_tc_data is removed as
+superfluous.
+
+Fixes: 106b104137fd ("counter: Add microchip TCB capture counter")
+Cc: stable@vger.kernel.org
+Reviewed-by: Kamel Bouhara <kamel.bouhara@bootlin.com>
+Link: https://lore.kernel.org/r/20221018121014.7368-1-william.gray@linaro.org/
+Signed-off-by: William Breathitt Gray <william.gray@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/microchip-tcb-capture.c |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/counter/microchip-tcb-capture.c
++++ b/drivers/counter/microchip-tcb-capture.c
+@@ -28,7 +28,6 @@ struct mchp_tc_data {
+       int qdec_mode;
+       int num_channels;
+       int channel[2];
+-      bool trig_inverted;
+ };
+ static const enum counter_function mchp_tc_count_functions[] = {
+@@ -153,7 +152,7 @@ static int mchp_tc_count_signal_read(str
+       regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
+-      if (priv->trig_inverted)
++      if (signal->id == 1)
+               sigstatus = (sr & ATMEL_TC_MTIOB);
+       else
+               sigstatus = (sr & ATMEL_TC_MTIOA);
+@@ -171,6 +170,17 @@ static int mchp_tc_count_action_read(str
+       struct mchp_tc_data *const priv = counter_priv(counter);
+       u32 cmr;
++      if (priv->qdec_mode) {
++              *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
++              return 0;
++      }
++
++      /* Only TIOA signal is evaluated in non-QDEC mode */
++      if (synapse->signal->id != 0) {
++              *action = COUNTER_SYNAPSE_ACTION_NONE;
++              return 0;
++      }
++
+       regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
+       switch (cmr & ATMEL_TC_ETRGEDG) {
+@@ -199,8 +209,8 @@ static int mchp_tc_count_action_write(st
+       struct mchp_tc_data *const priv = counter_priv(counter);
+       u32 edge = ATMEL_TC_ETRGEDG_NONE;
+-      /* QDEC mode is rising edge only */
+-      if (priv->qdec_mode)
++      /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
++      if (priv->qdec_mode || synapse->signal->id != 0)
+               return -EINVAL;
+       switch (action) {
diff --git a/queue-6.0/mm-huge_memory-do-not-clobber-swp_entry_t-during-thp-split.patch b/queue-6.0/mm-huge_memory-do-not-clobber-swp_entry_t-during-thp-split.patch
new file mode 100644 (file)
index 0000000..d6381a5
--- /dev/null
@@ -0,0 +1,115 @@
+From 71e2d666ef85d51834d658830f823560c402b8b6 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Wed, 19 Oct 2022 14:41:56 +0100
+Subject: mm/huge_memory: do not clobber swp_entry_t during THP split
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit 71e2d666ef85d51834d658830f823560c402b8b6 upstream.
+
+The following has been observed when running stressng mmap since commit
+b653db77350c ("mm: Clear page->private when splitting or migrating a page")
+
+   watchdog: BUG: soft lockup - CPU#75 stuck for 26s! [stress-ng:9546]
+   CPU: 75 PID: 9546 Comm: stress-ng Tainted: G            E      6.0.0-revert-b653db77-fix+ #29 0357d79b60fb09775f678e4f3f64ef0579ad1374
+   Hardware name: SGI.COM C2112-4GP3/X10DRT-P-Series, BIOS 2.0a 05/09/2016
+   RIP: 0010:xas_descend+0x28/0x80
+   Code: cc cc 0f b6 0e 48 8b 57 08 48 d3 ea 83 e2 3f 89 d0 48 83 c0 04 48 8b 44 c6 08 48 89 77 18 48 89 c1 83 e1 03 48 83 f9 02 75 08 <48> 3d fd 00 00 00 76 08 88 57 12 c3 cc cc cc cc 48 c1 e8 02 89 c2
+   RSP: 0018:ffffbbf02a2236a8 EFLAGS: 00000246
+   RAX: ffff9cab7d6a0002 RBX: ffffe04b0af88040 RCX: 0000000000000002
+   RDX: 0000000000000030 RSI: ffff9cab60509b60 RDI: ffffbbf02a2236c0
+   RBP: 0000000000000000 R08: ffff9cab60509b60 R09: ffffbbf02a2236c0
+   R10: 0000000000000001 R11: ffffbbf02a223698 R12: 0000000000000000
+   R13: ffff9cab4e28da80 R14: 0000000000039c01 R15: ffff9cab4e28da88
+   FS:  00007fab89b85e40(0000) GS:ffff9cea3fcc0000(0000) knlGS:0000000000000000
+   CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+   CR2: 00007fab84e00000 CR3: 00000040b73a4003 CR4: 00000000003706e0
+   DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+   DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+   Call Trace:
+    <TASK>
+    xas_load+0x3a/0x50
+    __filemap_get_folio+0x80/0x370
+    ? put_swap_page+0x163/0x360
+    pagecache_get_page+0x13/0x90
+    __try_to_reclaim_swap+0x50/0x190
+    scan_swap_map_slots+0x31e/0x670
+    get_swap_pages+0x226/0x3c0
+    folio_alloc_swap+0x1cc/0x240
+    add_to_swap+0x14/0x70
+    shrink_page_list+0x968/0xbc0
+    reclaim_page_list+0x70/0xf0
+    reclaim_pages+0xdd/0x120
+    madvise_cold_or_pageout_pte_range+0x814/0xf30
+    walk_pgd_range+0x637/0xa30
+    __walk_page_range+0x142/0x170
+    walk_page_range+0x146/0x170
+    madvise_pageout+0xb7/0x280
+    ? asm_common_interrupt+0x22/0x40
+    madvise_vma_behavior+0x3b7/0xac0
+    ? find_vma+0x4a/0x70
+    ? find_vma+0x64/0x70
+    ? madvise_vma_anon_name+0x40/0x40
+    madvise_walk_vmas+0xa6/0x130
+    do_madvise+0x2f4/0x360
+    __x64_sys_madvise+0x26/0x30
+    do_syscall_64+0x5b/0x80
+    ? do_syscall_64+0x67/0x80
+    ? syscall_exit_to_user_mode+0x17/0x40
+    ? do_syscall_64+0x67/0x80
+    ? syscall_exit_to_user_mode+0x17/0x40
+    ? do_syscall_64+0x67/0x80
+    ? do_syscall_64+0x67/0x80
+    ? common_interrupt+0x8b/0xa0
+    entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+The problem can be reproduced with the mmtests config
+config-workload-stressng-mmap.  It does not always happen and when it
+triggers is variable but it has happened on multiple machines.
+
+The intent of commit b653db77350c patch was to avoid the case where
+PG_private is clear but folio->private is not-NULL.  However, THP tail
+pages uses page->private for "swp_entry_t if folio_test_swapcache()" as
+stated in the documentation for struct folio.  This patch only clobbers
+page->private for tail pages if the head page was not in swapcache and
+warns once if page->private had an unexpected value.
+
+Link: https://lkml.kernel.org/r/20221019134156.zjyyn5aownakvztf@techsingularity.net
+Fixes: b653db77350c ("mm: Clear page->private when splitting or migrating a page")
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Brian Foster <bfoster@redhat.com>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Oleksandr Natalenko <oleksandr@natalenko.name>
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Vitaly Wool <vitaly.wool@konsulko.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2445,7 +2445,16 @@ static void __split_huge_page_tail(struc
+                       page_tail);
+       page_tail->mapping = head->mapping;
+       page_tail->index = head->index + tail;
+-      page_tail->private = 0;
++
++      /*
++       * page->private should not be set in tail pages with the exception
++       * of swap cache pages that store the swp_entry_t in tail pages.
++       * Fix up and warn once if private is unexpectedly set.
++       */
++      if (!folio_test_swapcache(page_folio(head))) {
++              VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head);
++              page_tail->private = 0;
++      }
+       /* Page flags must be visible before we make the page non-compound. */
+       smp_wmb();
diff --git a/queue-6.0/mm-kmemleak-prevent-soft-lockup-in-kmemleak_scan-s-object-iteration-loops.patch b/queue-6.0/mm-kmemleak-prevent-soft-lockup-in-kmemleak_scan-s-object-iteration-loops.patch
new file mode 100644 (file)
index 0000000..904a3ac
--- /dev/null
@@ -0,0 +1,138 @@
+From 984a608377cb623351b8a3670b285f32ebeb2d32 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Thu, 20 Oct 2022 13:56:19 -0400
+Subject: mm/kmemleak: prevent soft lockup in kmemleak_scan()'s object iteration loops
+
+From: Waiman Long <longman@redhat.com>
+
+commit 984a608377cb623351b8a3670b285f32ebeb2d32 upstream.
+
+Commit 6edda04ccc7c ("mm/kmemleak: prevent soft lockup in first object
+iteration loop of kmemleak_scan()") adds cond_resched() in the first
+object iteration loop of kmemleak_scan().  However, it turns that the 2nd
+objection iteration loop can still cause soft lockup to happen in some
+cases.  So add a cond_resched() call in the 2nd and 3rd loops as well to
+prevent that and for completeness.
+
+Link: https://lkml.kernel.org/r/20221020175619.366317-1-longman@redhat.com
+Fixes: 6edda04ccc7c ("mm/kmemleak: prevent soft lockup in first object iteration loop of kmemleak_scan()")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmemleak.c |   61 +++++++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 42 insertions(+), 19 deletions(-)
+
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1464,6 +1464,27 @@ static void scan_gray_list(void)
+ }
+ /*
++ * Conditionally call resched() in a object iteration loop while making sure
++ * that the given object won't go away without RCU read lock by performing a
++ * get_object() if !pinned.
++ *
++ * Return: false if can't do a cond_resched() due to get_object() failure
++ *       true otherwise
++ */
++static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
++{
++      if (!pinned && !get_object(object))
++              return false;
++
++      rcu_read_unlock();
++      cond_resched();
++      rcu_read_lock();
++      if (!pinned)
++              put_object(object);
++      return true;
++}
++
++/*
+  * Scan data sections and all the referenced memory blocks allocated via the
+  * kernel's standard allocators. This function must be called with the
+  * scan_mutex held.
+@@ -1474,7 +1495,7 @@ static void kmemleak_scan(void)
+       struct zone *zone;
+       int __maybe_unused i;
+       int new_leaks = 0;
+-      int loop1_cnt = 0;
++      int loop_cnt = 0;
+       jiffies_last_scan = jiffies;
+@@ -1483,7 +1504,6 @@ static void kmemleak_scan(void)
+       list_for_each_entry_rcu(object, &object_list, object_list) {
+               bool obj_pinned = false;
+-              loop1_cnt++;
+               raw_spin_lock_irq(&object->lock);
+ #ifdef DEBUG
+               /*
+@@ -1517,24 +1537,11 @@ static void kmemleak_scan(void)
+               raw_spin_unlock_irq(&object->lock);
+               /*
+-               * Do a cond_resched() to avoid soft lockup every 64k objects.
+-               * Make sure a reference has been taken so that the object
+-               * won't go away without RCU read lock.
++               * Do a cond_resched() every 64k objects to avoid soft lockup.
+                */
+-              if (!(loop1_cnt & 0xffff)) {
+-                      if (!obj_pinned && !get_object(object)) {
+-                              /* Try the next object instead */
+-                              loop1_cnt--;
+-                              continue;
+-                      }
+-
+-                      rcu_read_unlock();
+-                      cond_resched();
+-                      rcu_read_lock();
+-
+-                      if (!obj_pinned)
+-                              put_object(object);
+-              }
++              if (!(++loop_cnt & 0xffff) &&
++                  !kmemleak_cond_resched(object, obj_pinned))
++                      loop_cnt--; /* Try again on next object */
+       }
+       rcu_read_unlock();
+@@ -1601,8 +1608,16 @@ static void kmemleak_scan(void)
+        * scan and color them gray until the next scan.
+        */
+       rcu_read_lock();
++      loop_cnt = 0;
+       list_for_each_entry_rcu(object, &object_list, object_list) {
+               /*
++               * Do a cond_resched() every 64k objects to avoid soft lockup.
++               */
++              if (!(++loop_cnt & 0xffff) &&
++                  !kmemleak_cond_resched(object, false))
++                      loop_cnt--;     /* Try again on next object */
++
++              /*
+                * This is racy but we can save the overhead of lock/unlock
+                * calls. The missed objects, if any, should be caught in
+                * the next scan.
+@@ -1635,8 +1650,16 @@ static void kmemleak_scan(void)
+        * Scanning result reporting.
+        */
+       rcu_read_lock();
++      loop_cnt = 0;
+       list_for_each_entry_rcu(object, &object_list, object_list) {
+               /*
++               * Do a cond_resched() every 64k objects to avoid soft lockup.
++               */
++              if (!(++loop_cnt & 0xffff) &&
++                  !kmemleak_cond_resched(object, false))
++                      loop_cnt--;     /* Try again on next object */
++
++              /*
+                * This is racy but we can save the overhead of lock/unlock
+                * calls. The missed objects, if any, should be caught in
+                * the next scan.
diff --git a/queue-6.0/mm-madvise-hugetlb-fix-unexpected-data-loss-with-madv_dontneed-on-hugetlbfs.patch b/queue-6.0/mm-madvise-hugetlb-fix-unexpected-data-loss-with-madv_dontneed-on-hugetlbfs.patch
new file mode 100644 (file)
index 0000000..095767a
--- /dev/null
@@ -0,0 +1,72 @@
+From 8ebe0a5eaaeb099de03d09ad20f54ed962e2261e Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@surriel.com>
+Date: Fri, 21 Oct 2022 19:28:05 -0400
+Subject: mm,madvise,hugetlb: fix unexpected data loss with MADV_DONTNEED on hugetlbfs
+
+From: Rik van Riel <riel@surriel.com>
+
+commit 8ebe0a5eaaeb099de03d09ad20f54ed962e2261e upstream.
+
+A common use case for hugetlbfs is for the application to create
+memory pools backed by huge pages, which then get handed over to
+some malloc library (eg. jemalloc) for further management.
+
+That malloc library may be doing MADV_DONTNEED calls on memory
+that is no longer needed, expecting those calls to happen on
+PAGE_SIZE boundaries.
+
+However, currently the MADV_DONTNEED code rounds up any such
+requests to HPAGE_PMD_SIZE boundaries. This leads to undesired
+outcomes when jemalloc expects a 4kB MADV_DONTNEED, but 2MB of
+memory get zeroed out, instead.
+
+Use of pre-built shared libraries means that user code does not
+always know the page size of every memory arena in use.
+
+Avoid unexpected data loss with MADV_DONTNEED by rounding up
+only to PAGE_SIZE (in do_madvise), and rounding down to huge
+page granularity.
+
+That way programs will only get as much memory zeroed out as
+they requested.
+
+Link: https://lkml.kernel.org/r/20221021192805.366ad573@imladris.surriel.com
+Fixes: 90e7e7f5ef3f ("mm: enable MADV_DONTNEED for hugetlb mappings")
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/madvise.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -811,7 +811,14 @@ static bool madvise_dontneed_free_valid_
+       if (start & ~huge_page_mask(hstate_vma(vma)))
+               return false;
+-      *end = ALIGN(*end, huge_page_size(hstate_vma(vma)));
++      /*
++       * Madvise callers expect the length to be rounded up to PAGE_SIZE
++       * boundaries, and may be unaware that this VMA uses huge pages.
++       * Avoid unexpected data loss by rounding down the number of
++       * huge pages freed.
++       */
++      *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
++
+       return true;
+ }
+@@ -826,6 +833,9 @@ static long madvise_dontneed_free(struct
+       if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
+               return -EINVAL;
++      if (start == end)
++              return 0;
++
+       if (!userfaultfd_remove(vma, start, end)) {
+               *prev = NULL; /* mmap_lock has been dropped, prev is stale */
diff --git a/queue-6.0/mm-migrate-fix-return-value-if-all-subpages-of-thps-are-migrated-successfully.patch b/queue-6.0/mm-migrate-fix-return-value-if-all-subpages-of-thps-are-migrated-successfully.patch
new file mode 100644 (file)
index 0000000..8fc6580
--- /dev/null
@@ -0,0 +1,49 @@
+From 03e5f82ea632af329e32ec03d952b2d99497eeaa Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+Date: Mon, 24 Oct 2022 16:34:21 +0800
+Subject: mm: migrate: fix return value if all subpages of THPs are migrated successfully
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+commit 03e5f82ea632af329e32ec03d952b2d99497eeaa upstream.
+
+During THP migration, if THPs are not migrated but they are split and all
+subpages are migrated successfully, migrate_pages() will still return the
+number of THP pages that were not migrated.  This will confuse the callers
+of migrate_pages().  For example, the longterm pinning will failed though
+all pages are migrated successfully.
+
+Thus we should return 0 to indicate that all pages are migrated in this
+case
+
+Link: https://lkml.kernel.org/r/de386aa864be9158d2f3b344091419ea7c38b2f7.1666599848.git.baolin.wang@linux.alibaba.com
+Fixes: b5bade978e9b ("mm: migrate: fix the return value of migrate_pages()")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Alistair Popple <apopple@nvidia.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Huang, Ying" <ying.huang@intel.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1558,6 +1558,13 @@ out:
+        */
+       list_splice(&ret_pages, from);
++      /*
++       * Return 0 in case all subpages of fail-to-migrate THPs are
++       * migrated successfully.
++       */
++      if (list_empty(from))
++              rc = 0;
++
+       count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+       count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
+       count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
diff --git a/queue-6.0/mm-prep_compound_tail-clear-page-private.patch b/queue-6.0/mm-prep_compound_tail-clear-page-private.patch
new file mode 100644 (file)
index 0000000..fe5a6c0
--- /dev/null
@@ -0,0 +1,60 @@
+From 5aae9265ee1a30cf716d6caf6b29fe99b9d55130 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Sat, 22 Oct 2022 00:51:06 -0700
+Subject: mm: prep_compound_tail() clear page->private
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 5aae9265ee1a30cf716d6caf6b29fe99b9d55130 upstream.
+
+Although page allocation always clears page->private in the first page or
+head page of an allocation, it has never made a point of clearing
+page->private in the tails (though 0 is often what is already there).
+
+But now commit 71e2d666ef85 ("mm/huge_memory: do not clobber swp_entry_t
+during THP split") issues a warning when page_tail->private is found to be
+non-0 (unless it's swapcache).
+
+Change that warning to dump page_tail (which also dumps head), instead of
+just the head: so far we have seen dead000000000122, dead000000000003,
+dead000000000001 or 0000000000000002 in the raw output for tail private.
+
+We could just delete the warning, but today's consensus appears to want
+page->private to be 0, unless there's a good reason for it to be set: so
+now clear it in prep_compound_tail() (more general than just for THP; but
+not for high order allocation, which makes no pass down the tails).
+
+Link: https://lkml.kernel.org/r/1c4233bb-4e4d-5969-fbd4-96604268a285@google.com
+Fixes: 71e2d666ef85 ("mm/huge_memory: do not clobber swp_entry_t during THP split")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |    2 +-
+ mm/page_alloc.c  |    1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2452,7 +2452,7 @@ static void __split_huge_page_tail(struc
+        * Fix up and warn once if private is unexpectedly set.
+        */
+       if (!folio_test_swapcache(page_folio(head))) {
+-              VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head);
++              VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
+               page_tail->private = 0;
+       }
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -804,6 +804,7 @@ static void prep_compound_tail(struct pa
+       p->mapping = TAIL_MAPPING;
+       set_compound_head(p, head);
++      set_page_private(p, 0);
+ }
+ void prep_compound_page(struct page *page, unsigned int order)
diff --git a/queue-6.0/mm-uffd-fix-vma-check-on-userfault-for-wp.patch b/queue-6.0/mm-uffd-fix-vma-check-on-userfault-for-wp.patch
new file mode 100644 (file)
index 0000000..20ebdb3
--- /dev/null
@@ -0,0 +1,62 @@
+From 67eae54bc227b30dedcce9db68b063ba1adb7838 Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Mon, 24 Oct 2022 15:33:35 -0400
+Subject: mm/uffd: fix vma check on userfault for wp
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 67eae54bc227b30dedcce9db68b063ba1adb7838 upstream.
+
+We used to have a report that pte-marker code can be reached even when
+uffd-wp is not compiled in for file memories, here:
+
+https://lore.kernel.org/all/YzeR+R6b4bwBlBHh@x1n/T/#u
+
+I just got time to revisit this and found that the root cause is we simply
+messed up with the vma check, so that for !PTE_MARKER_UFFD_WP system, we
+will allow UFFDIO_REGISTER of MINOR & WP upon shmem as the check was
+wrong:
+
+    if (vm_flags & VM_UFFD_MINOR)
+        return is_vm_hugetlb_page(vma) || vma_is_shmem(vma);
+
+Where we'll allow anything to pass on shmem as long as minor mode is
+requested.
+
+Axel did it right when introducing minor mode but I messed it up in
+b1f9e876862d when moving code around.  Fix it.
+
+Link: https://lkml.kernel.org/r/20221024193336.1233616-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20221024193336.1233616-2-peterx@redhat.com
+Fixes: b1f9e876862d ("mm/uffd: enable write protection for shmem & hugetlbfs")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/userfaultfd_k.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
+index f07e6998bb68..9df0b9a762cc 100644
+--- a/include/linux/userfaultfd_k.h
++++ b/include/linux/userfaultfd_k.h
+@@ -146,9 +146,9 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+ static inline bool vma_can_userfault(struct vm_area_struct *vma,
+                                    unsigned long vm_flags)
+ {
+-      if (vm_flags & VM_UFFD_MINOR)
+-              return is_vm_hugetlb_page(vma) || vma_is_shmem(vma);
+-
++      if ((vm_flags & VM_UFFD_MINOR) &&
++          (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
++              return false;
+ #ifndef CONFIG_PTE_MARKER_UFFD_WP
+       /*
+        * If user requested uffd-wp but not enabled pte markers for
+-- 
+2.38.1
+
diff --git a/queue-6.0/mmc-block-remove-error-check-of-hw_reset-on-reset.patch b/queue-6.0/mmc-block-remove-error-check-of-hw_reset-on-reset.patch
new file mode 100644 (file)
index 0000000..357f48e
--- /dev/null
@@ -0,0 +1,108 @@
+From 406e14808ee695cbae1eafa5fd3ac563c29470ab Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20L=C3=B6hle?= <CLoehle@hyperstone.com>
+Date: Thu, 13 Oct 2022 11:16:37 +0000
+Subject: mmc: block: Remove error check of hw_reset on reset
+
+From: Christian Löhle <CLoehle@hyperstone.com>
+
+commit 406e14808ee695cbae1eafa5fd3ac563c29470ab upstream.
+
+Before switching back to the right partition in mmc_blk_reset there used
+to be a check if hw_reset was even supported. This return value
+was removed, so there is no reason to check. Furthermore ensure
+part_curr is not falsely set to a valid value on reset or
+partition switch error.
+
+As part of this change the code paths of mmc_blk_reset calls were checked
+to ensure no commands are issued after a failed mmc_blk_reset directly
+without going through the block layer.
+
+Fixes: fefdd3c91e0a ("mmc: core: Drop superfluous validations in mmc_hw|sw_reset()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
+Reviewed-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/e91be6199d04414a91e20611c81bfe1d@hyperstone.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c |   44 ++++++++++++++++++++++++++------------------
+ 1 file changed, 26 insertions(+), 18 deletions(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -134,6 +134,7 @@ struct mmc_blk_data {
+        * track of the current selected device partition.
+        */
+       unsigned int    part_curr;
++#define MMC_BLK_PART_INVALID  UINT_MAX        /* Unknown partition active */
+       int     area_type;
+       /* debugfs files (only in main mmc_blk_data) */
+@@ -987,33 +988,39 @@ static unsigned int mmc_blk_data_timeout
+       return ms;
+ }
++/*
++ * Attempts to reset the card and get back to the requested partition.
++ * Therefore any error here must result in cancelling the block layer
++ * request, it must not be reattempted without going through the mmc_blk
++ * partition sanity checks.
++ */
+ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+                        int type)
+ {
+       int err;
++      struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
+       if (md->reset_done & type)
+               return -EEXIST;
+       md->reset_done |= type;
+       err = mmc_hw_reset(host->card);
++      /*
++       * A successful reset will leave the card in the main partition, but
++       * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
++       * in that case.
++       */
++      main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type;
++      if (err)
++              return err;
+       /* Ensure we switch back to the correct partition */
+-      if (err) {
+-              struct mmc_blk_data *main_md =
+-                      dev_get_drvdata(&host->card->dev);
+-              int part_err;
+-
+-              main_md->part_curr = main_md->part_type;
+-              part_err = mmc_blk_part_switch(host->card, md->part_type);
+-              if (part_err) {
+-                      /*
+-                       * We have failed to get back into the correct
+-                       * partition, so we need to abort the whole request.
+-                       */
+-                      return -ENODEV;
+-              }
+-      }
+-      return err;
++      if (mmc_blk_part_switch(host->card, md->part_type))
++              /*
++               * We have failed to get back into the correct
++               * partition, so we need to abort the whole request.
++               */
++              return -ENODEV;
++      return 0;
+ }
+ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+@@ -1871,8 +1878,9 @@ static void mmc_blk_mq_rw_recovery(struc
+               return;
+       /* Reset before last retry */
+-      if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+-              mmc_blk_reset(md, card->host, type);
++      if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
++          mmc_blk_reset(md, card->host, type))
++              return;
+       /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+       if (brq->sbc.error || brq->cmd.error)
diff --git a/queue-6.0/mmc-core-fix-kernel-panic-when-remove-non-standard-sdio-card.patch b/queue-6.0/mmc-core-fix-kernel-panic-when-remove-non-standard-sdio-card.patch
new file mode 100644 (file)
index 0000000..313bfd7
--- /dev/null
@@ -0,0 +1,39 @@
+From 9972e6b404884adae9eec7463e30d9b3c9a70b18 Mon Sep 17 00:00:00 2001
+From: Matthew Ma <mahongwei@zeku.com>
+Date: Fri, 14 Oct 2022 11:49:51 +0800
+Subject: mmc: core: Fix kernel panic when remove non-standard SDIO card
+
+From: Matthew Ma <mahongwei@zeku.com>
+
+commit 9972e6b404884adae9eec7463e30d9b3c9a70b18 upstream.
+
+SDIO tuple is only allocated for standard SDIO card, especially it causes
+memory corruption issues when the non-standard SDIO card has removed, which
+is because the card device's reference counter does not increase for it at
+sdio_init_func(), but all SDIO card device reference counter gets decreased
+at sdio_release_func().
+
+Fixes: 6f51be3d37df ("sdio: allow non-standard SDIO cards")
+Signed-off-by: Matthew Ma <mahongwei@zeku.com>
+Reviewed-by: Weizhao Ouyang <ouyangweizhao@zeku.com>
+Reviewed-by: John Wang <wangdayu@zeku.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221014034951.2300386-1-ouyangweizhao@zeku.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/sdio_bus.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/sdio_bus.c
++++ b/drivers/mmc/core/sdio_bus.c
+@@ -291,7 +291,8 @@ static void sdio_release_func(struct dev
+ {
+       struct sdio_func *func = dev_to_sdio_func(dev);
+-      sdio_free_func_cis(func);
++      if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
++              sdio_free_func_cis(func);
+       kfree(func->info);
+       kfree(func->tmpbuf);
diff --git a/queue-6.0/mmc-core-fix-write_zeroes-cqe-handling.patch b/queue-6.0/mmc-core-fix-write_zeroes-cqe-handling.patch
new file mode 100644 (file)
index 0000000..09be089
--- /dev/null
@@ -0,0 +1,42 @@
+From 028822b714bd3a159d65416c53f1549345b53d9e Mon Sep 17 00:00:00 2001
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Date: Thu, 20 Oct 2022 15:01:23 +0200
+Subject: mmc: core: Fix WRITE_ZEROES CQE handling
+
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+
+commit 028822b714bd3a159d65416c53f1549345b53d9e upstream.
+
+WRITE_ZEROES requests use TRIM, so mark them as needing to be issued
+synchronously even when a CQE is being used.  Without this,
+mmc_blk_mq_issue_rq() triggers a WARN_ON_ONCE() and fails the request
+since we don't have any handling for issuing this asynchronously.
+
+Fixes: f7b6fc327327 ("mmc: core: Support zeroout using TRIM for eMMC")
+Reported-by: Jon Hunter <jonathanh@nvidia.com>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Reviewed-by: Avri Altman <avri.altman@wdc.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221020130123.4033218-1-vincent.whitchurch@axis.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/queue.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index 86be55d7cf55..b396e3900717 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -48,6 +48,7 @@ static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+       case REQ_OP_DRV_OUT:
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
++      case REQ_OP_WRITE_ZEROES:
+               return MMC_ISSUE_SYNC;
+       case REQ_OP_FLUSH:
+               return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+-- 
+2.38.1
+
diff --git a/queue-6.0/mmc-queue-cancel-recovery-work-on-cleanup.patch b/queue-6.0/mmc-queue-cancel-recovery-work-on-cleanup.patch
new file mode 100644 (file)
index 0000000..6265fea
--- /dev/null
@@ -0,0 +1,39 @@
+From 339e3eb1facd18a98ceb1171d70674780e5014a7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20L=C3=B6hle?= <CLoehle@hyperstone.com>
+Date: Fri, 7 Oct 2022 15:43:52 +0000
+Subject: mmc: queue: Cancel recovery work on cleanup
+
+From: Christian Löhle <CLoehle@hyperstone.com>
+
+commit 339e3eb1facd18a98ceb1171d70674780e5014a7 upstream.
+
+To prevent any recovery work running after the queue cleanup cancel it.
+Any recovery running post-cleanup dereferenced mq->card as NULL
+and was not meaningful to begin with.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/c865c0c9789d428494b67b820a78923e@hyperstone.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/queue.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -493,6 +493,13 @@ void mmc_cleanup_queue(struct mmc_queue
+       if (blk_queue_quiesced(q))
+               blk_mq_unquiesce_queue(q);
++      /*
++       * If the recovery completes the last (and only remaining) request in
++       * the queue, and the card has been removed, we could end up here with
++       * the recovery not quite finished yet, so cancel it.
++       */
++      cancel_work_sync(&mq->recovery_work);
++
+       blk_mq_free_tag_set(&mq->tag_set);
+       /*
diff --git a/queue-6.0/mmc-sdhci-esdhc-imx-propagate-esdhc_flag_hs400-only-on-8bit-bus.patch b/queue-6.0/mmc-sdhci-esdhc-imx-propagate-esdhc_flag_hs400-only-on-8bit-bus.patch
new file mode 100644 (file)
index 0000000..bbf6782
--- /dev/null
@@ -0,0 +1,67 @@
+From 1ed5c3b22fc78735c539e4767832aea58db6761c Mon Sep 17 00:00:00 2001
+From: Sascha Hauer <s.hauer@pengutronix.de>
+Date: Thu, 13 Oct 2022 11:32:48 +0200
+Subject: mmc: sdhci-esdhc-imx: Propagate ESDHC_FLAG_HS400* only on 8bit bus
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+commit 1ed5c3b22fc78735c539e4767832aea58db6761c upstream.
+
+The core issues the warning "drop HS400 support since no 8-bit bus" when
+one of the ESDHC_FLAG_HS400* flags is set on a non 8bit capable host. To
+avoid this warning set these flags only on hosts that actually can do
+8bit, i.e. have bus-width = <8> set in the device tree.
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Haibo Chen <haibo.chen@nxp.com>
+Fixes: 029e2476f9e6 ("mmc: sdhci-esdhc-imx: add HS400_ES support for i.MX8QXP")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221013093248.2220802-1-s.hauer@pengutronix.de
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-esdhc-imx.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1660,6 +1660,10 @@ static int sdhci_esdhc_imx_probe(struct
+               host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
+       }
++      err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
++      if (err)
++              goto disable_ahb_clk;
++
+       if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+               sdhci_esdhc_ops.platform_execute_tuning =
+                                       esdhc_executing_tuning;
+@@ -1667,13 +1671,15 @@ static int sdhci_esdhc_imx_probe(struct
+       if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
+               host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+-      if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
++      if (host->caps & MMC_CAP_8_BIT_DATA &&
++          imx_data->socdata->flags & ESDHC_FLAG_HS400)
+               host->mmc->caps2 |= MMC_CAP2_HS400;
+       if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
+               host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
+-      if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
++      if (host->caps & MMC_CAP_8_BIT_DATA &&
++          imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+               host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+               host->mmc_host_ops.hs400_enhanced_strobe =
+                                       esdhc_hs400_enhanced_strobe;
+@@ -1695,10 +1701,6 @@ static int sdhci_esdhc_imx_probe(struct
+                       goto disable_ahb_clk;
+       }
+-      err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+-      if (err)
+-              goto disable_ahb_clk;
+-
+       sdhci_esdhc_imx_hwinit(host);
+       err = sdhci_add_host(host);
diff --git a/queue-6.0/mmc-sdhci-pci-core-disable-es-for-asus-bios-on-jasper-lake.patch b/queue-6.0/mmc-sdhci-pci-core-disable-es-for-asus-bios-on-jasper-lake.patch
new file mode 100644 (file)
index 0000000..5a0d754
--- /dev/null
@@ -0,0 +1,58 @@
+From 9dc0033e4658d6f9d9952c3c0c6be3ec25bc2985 Mon Sep 17 00:00:00 2001
+From: Patrick Thompson <ptf@google.com>
+Date: Thu, 13 Oct 2022 17:00:17 -0400
+Subject: mmc: sdhci-pci-core: Disable ES for ASUS BIOS on Jasper Lake
+
+From: Patrick Thompson <ptf@google.com>
+
+commit 9dc0033e4658d6f9d9952c3c0c6be3ec25bc2985 upstream.
+
+Enhanced Strobe (ES) does not work correctly on the ASUS 1100 series of
+devices. Jasper Lake eMMCs (pci_id 8086:4dc4) are supposed to support
+ES. There are also two system families under the series, thus this is
+being scoped to the ASUS BIOS.
+
+The failing ES prevents the installer from writing to disk. Falling back
+to HS400 without ES fixes the issue.
+
+Signed-off-by: Patrick Thompson <ptf@google.com>
+Fixes: 315e3bd7ac19 ("mmc: sdhci-pci: Add support for Intel JSL")
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221013210017.3751025-1-ptf@google.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-pci-core.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -893,6 +893,12 @@ static bool glk_broken_cqhci(struct sdhc
+               dmi_match(DMI_SYS_VENDOR, "IRBIS"));
+ }
++static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
++{
++      return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
++                      dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
++}
++
+ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+       int ret = byt_emmc_probe_slot(slot);
+@@ -901,9 +907,11 @@ static int glk_emmc_probe_slot(struct sd
+               slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+       if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
+-              slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+-              slot->host->mmc_host_ops.hs400_enhanced_strobe =
+-                                              intel_hs400_enhanced_strobe;
++              if (!jsl_broken_hs400es(slot)) {
++                      slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
++                      slot->host->mmc_host_ops.hs400_enhanced_strobe =
++                                                      intel_hs400_enhanced_strobe;
++              }
+               slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+       }
diff --git a/queue-6.0/mmc-sdhci_am654-select-not-depends-regmap_mmio.patch b/queue-6.0/mmc-sdhci_am654-select-not-depends-regmap_mmio.patch
new file mode 100644 (file)
index 0000000..a85e323
--- /dev/null
@@ -0,0 +1,39 @@
+From 8d280b1df87e0b3d1355aeac7e62b62214b93f1c Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Mon, 24 Oct 2022 11:02:59 -0700
+Subject: mmc: sdhci_am654: 'select', not 'depends' REGMAP_MMIO
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 8d280b1df87e0b3d1355aeac7e62b62214b93f1c upstream.
+
+REGMAP_MMIO is not user-configurable, so we can only satisfy this
+dependency by enabling some other Kconfig symbol that properly 'select's
+it. Use select like everybody else.
+
+Noticed when trying to enable this driver for compile testing.
+
+Fixes: 59592cc1f593 ("mmc: sdhci_am654: Add dependency on MMC_SDHCI_AM654")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221024180300.2292208-1-briannorris@chromium.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/Kconfig |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -1074,9 +1074,10 @@ config MMC_SDHCI_OMAP
+ config MMC_SDHCI_AM654
+       tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
+-      depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
++      depends on MMC_SDHCI_PLTFM && OF
+       select MMC_SDHCI_IO_ACCESSORS
+       select MMC_CQHCI
++      select REGMAP_MMIO
+       help
+         This selects the Secure Digital Host Controller Interface (SDHCI)
+         support present in TI's AM654 SOCs. The controller supports
diff --git a/queue-6.0/random-use-arch_get_random-_early-in-random_init.patch b/queue-6.0/random-use-arch_get_random-_early-in-random_init.patch
new file mode 100644 (file)
index 0000000..561b45e
--- /dev/null
@@ -0,0 +1,47 @@
+From f5e4ec155d145002fd9840868453d785fab86d42 Mon Sep 17 00:00:00 2001
+From: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Date: Fri, 28 Oct 2022 17:00:42 +0100
+Subject: random: use arch_get_random*_early() in random_init()
+
+From: Jean-Philippe Brucker <jean-philippe@linaro.org>
+
+commit f5e4ec155d145002fd9840868453d785fab86d42 upstream.
+
+While reworking the archrandom handling, commit d349ab99eec7 ("random:
+handle archrandom with multiple longs") switched to the non-early
+archrandom helpers in random_init(), which broke initialization of the
+entropy pool from the arm64 random generator.
+
+Indeed at that point the arm64 CPU features, which verify that all CPUs
+have compatible capabilities, are not finalized so arch_get_random_seed_longs()
+is unsuccessful. Instead random_init() should use the _early functions,
+which check only the boot CPU on arm64. On other architectures the
+_early functions directly call the normal ones.
+
+Fixes: d349ab99eec7 ("random: handle archrandom with multiple longs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/random.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -793,13 +793,13 @@ int __init random_init(const char *comma
+ #endif
+       for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
+-              longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
++              longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+               if (longs) {
+                       _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+                       i += longs;
+                       continue;
+               }
+-              longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
++              longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+               if (longs) {
+                       _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+                       i += longs;
index 14546a0a6b2384f4a66bc2d9e680d8dbe3f7f677..ee8f2460d6466c5d2d802e2e0210756e019fad2f 100644 (file)
@@ -73,3 +73,20 @@ drm-msm-dp-fix-aux-bus-ep-lifetime.patch
 drm-msm-dp-fix-irq-lifetime.patch
 drm-msm-dp-fix-bridge-lifetime.patch
 crypto-x86-polyval-fix-crashes-when-keys-are-not-16-byte-aligned.patch
+random-use-arch_get_random-_early-in-random_init.patch
+coresight-cti-fix-hang-in-cti_disable_hw.patch
+mmc-sdhci_am654-select-not-depends-regmap_mmio.patch
+mmc-block-remove-error-check-of-hw_reset-on-reset.patch
+mmc-queue-cancel-recovery-work-on-cleanup.patch
+mmc-core-fix-kernel-panic-when-remove-non-standard-sdio-card.patch
+mmc-core-fix-write_zeroes-cqe-handling.patch
+mmc-sdhci-pci-core-disable-es-for-asus-bios-on-jasper-lake.patch
+mmc-sdhci-esdhc-imx-propagate-esdhc_flag_hs400-only-on-8bit-bus.patch
+counter-microchip-tcb-capture-handle-signal1-read-and-synapse.patch
+counter-104-quad-8-fix-race-getting-function-mode-and-direction.patch
+mm-uffd-fix-vma-check-on-userfault-for-wp.patch
+mm-migrate-fix-return-value-if-all-subpages-of-thps-are-migrated-successfully.patch
+mm-madvise-hugetlb-fix-unexpected-data-loss-with-madv_dontneed-on-hugetlbfs.patch
+mm-kmemleak-prevent-soft-lockup-in-kmemleak_scan-s-object-iteration-loops.patch
+mm-huge_memory-do-not-clobber-swp_entry_t-during-thp-split.patch
+mm-prep_compound_tail-clear-page-private.patch