From: Greg Kroah-Hartman Date: Wed, 24 Jul 2019 14:30:47 +0000 (+0200) Subject: 5.2-stable patches X-Git-Tag: v5.2.3~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=48a5a0441a94ea7297bc96165850893768f5317a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.2-stable patches added patches: blk-iolatency-clear-use_delay-when-io.latency-is-set-to-zero.patch blk-throttle-fix-zero-wait-time-for-iops-throttled-group.patch blkcg-update-blkcg_print_stat-to-handle-larger-outputs.patch clk-imx-imx8mm-correct-audio_pll2_clk-to-audio_pll2_out.patch dax-fix-missed-wakeup-with-pmd-faults.patch dm-bufio-fix-deadlock-with-loop-device.patch dm-thin-metadata-check-if-in-fail_io-mode-when-setting-needs_check.patch dt-bindings-allow-up-to-four-clocks-for-orion-mdio.patch net-mvmdio-allow-up-to-four-clocks-to-be-specified-for-orion-mdio.patch phy-qcom-qmp-correct-ready_status-poll-break-condition.patch pstore-fix-double-free-in-pstore_mkfile-failure-path.patch usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch --- diff --git a/queue-5.2/blk-iolatency-clear-use_delay-when-io.latency-is-set-to-zero.patch b/queue-5.2/blk-iolatency-clear-use_delay-when-io.latency-is-set-to-zero.patch new file mode 100644 index 00000000000..656331eb3fd --- /dev/null +++ b/queue-5.2/blk-iolatency-clear-use_delay-when-io.latency-is-set-to-zero.patch @@ -0,0 +1,39 @@ +From 5de0073fcd50cc1f150895a7bb04d3cf8067b1d7 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Thu, 13 Jun 2019 15:30:37 -0700 +Subject: blk-iolatency: clear use_delay when io.latency is set to zero + +From: Tejun Heo + +commit 5de0073fcd50cc1f150895a7bb04d3cf8067b1d7 upstream. + +If use_delay was non-zero when the latency target of a cgroup was set +to zero, it will stay stuck until io.latency is enabled on the cgroup +again. This keeps readahead disabled for the cgroup impacting +performance negatively. + +Signed-off-by: Tejun Heo +Cc: Josef Bacik +Fixes: d70675121546 ("block: introduce blk-iolatency io controller") +Cc: stable@vger.kernel.org # v4.19+ +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-iolatency.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/block/blk-iolatency.c ++++ b/block/blk-iolatency.c +@@ -759,8 +759,10 @@ static int iolatency_set_min_lat_nsec(st + + if (!oldval && val) + return 1; +- if (oldval && !val) ++ if (oldval && !val) { ++ blkcg_clear_delay(blkg); + return -1; ++ } + return 0; + } + diff --git a/queue-5.2/blk-throttle-fix-zero-wait-time-for-iops-throttled-group.patch b/queue-5.2/blk-throttle-fix-zero-wait-time-for-iops-throttled-group.patch new file mode 100644 index 00000000000..0de85bbe8be --- /dev/null +++ b/queue-5.2/blk-throttle-fix-zero-wait-time-for-iops-throttled-group.patch @@ -0,0 +1,46 @@ +From 3a10f999ffd464d01c5a05592a15470a3c4bbc36 Mon Sep 17 00:00:00 2001 +From: Konstantin Khlebnikov +Date: Mon, 8 Jul 2019 18:29:57 +0300 +Subject: blk-throttle: fix zero wait time for iops throttled group + +From: Konstantin Khlebnikov + +commit 3a10f999ffd464d01c5a05592a15470a3c4bbc36 upstream. + +After commit 991f61fe7e1d ("Blk-throttle: reduce tail io latency when +iops limit is enforced") wait time could be zero even if group is +throttled and cannot issue requests right now. As a result +throtl_select_dispatch() turns into busy-loop under irq-safe queue +spinlock. + +Fix is simple: always round up target time to the next throttle slice. + +Fixes: 991f61fe7e1d ("Blk-throttle: reduce tail io latency when iops limit is enforced") +Signed-off-by: Konstantin Khlebnikov +Cc: stable@vger.kernel.org # v4.19+ +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-throttle.c | 9 +++------ + 1 file changed, 3 insertions(+), 6 deletions(-) + +--- a/block/blk-throttle.c ++++ b/block/blk-throttle.c +@@ -881,13 +881,10 @@ static bool tg_with_in_iops_limit(struct + unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; + u64 tmp; + +- jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; ++ jiffy_elapsed = jiffies - tg->slice_start[rw]; + +- /* Slice has just started. Consider one slice interval */ +- if (!jiffy_elapsed) +- jiffy_elapsed_rnd = tg->td->throtl_slice; +- +- jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); ++ /* Round up to the next throttle slice, wait time must be nonzero */ ++ jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); + + /* + * jiffy_elapsed_rnd should not be a big value as minimum iops can be diff --git a/queue-5.2/blkcg-update-blkcg_print_stat-to-handle-larger-outputs.patch b/queue-5.2/blkcg-update-blkcg_print_stat-to-handle-larger-outputs.patch new file mode 100644 index 00000000000..8e67c97962e --- /dev/null +++ b/queue-5.2/blkcg-update-blkcg_print_stat-to-handle-larger-outputs.patch @@ -0,0 +1,44 @@ +From f539da82f2158916e154d206054e0efd5df7ab61 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Thu, 13 Jun 2019 15:30:38 -0700 +Subject: blkcg: update blkcg_print_stat() to handle larger outputs + +From: Tejun Heo + +commit f539da82f2158916e154d206054e0efd5df7ab61 upstream. + +Depending on the number of devices, blkcg stats can go over the +default seqfile buf size. seqfile normally retries with a larger +buffer but since the ->pd_stat() addition, blkcg_print_stat() doesn't +tell seqfile that overflow has happened and the output gets printed +truncated. Fix it by calling seq_commit() w/ -1 on possible +overflows. + +Signed-off-by: Tejun Heo +Fixes: 903d23f0a354 ("blk-cgroup: allow controllers to output their own stats") +Cc: stable@vger.kernel.org # v4.19+ +Cc: Josef Bacik +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-cgroup.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1006,8 +1006,12 @@ static int blkcg_print_stat(struct seq_f + } + next: + if (has_stats) { +- off += scnprintf(buf+off, size-off, "\n"); +- seq_commit(sf, off); ++ if (off < size - 1) { ++ off += scnprintf(buf+off, size-off, "\n"); ++ seq_commit(sf, off); ++ } else { ++ seq_commit(sf, -1); ++ } + } + } + diff --git a/queue-5.2/clk-imx-imx8mm-correct-audio_pll2_clk-to-audio_pll2_out.patch b/queue-5.2/clk-imx-imx8mm-correct-audio_pll2_clk-to-audio_pll2_out.patch new file mode 100644 index 00000000000..74f8f774552 --- /dev/null +++ b/queue-5.2/clk-imx-imx8mm-correct-audio_pll2_clk-to-audio_pll2_out.patch @@ -0,0 +1,46 @@ +From 5b933e28d8b1fbdc7fbac4bfc569f3b152c3dd59 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Fri, 31 May 2019 15:56:38 +0800 +Subject: clk: imx: imx8mm: correct audio_pll2_clk to audio_pll2_out + +From: Peng Fan + +commit 5b933e28d8b1fbdc7fbac4bfc569f3b152c3dd59 upstream. + +There is no audio_pll2_clk registered, it should be audio_pll2_out. + +Cc: +Fixes: ba5625c3e272 ("clk: imx: Add clock driver support for imx8mm") +Signed-off-by: Peng Fan +Signed-off-by: Shawn Guo +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/clk/imx/clk-imx8mm.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/clk/imx/clk-imx8mm.c ++++ b/drivers/clk/imx/clk-imx8mm.c +@@ -325,7 +325,7 @@ static const char *imx8mm_dsi_dbi_sels[] + "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; + + static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", +- "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", }; ++ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", }; + + static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", + "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; +@@ -361,11 +361,11 @@ static const char *imx8mm_pdm_sels[] = { + "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; + + static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", +- "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; ++ "audio_pll2_out", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; + + static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; + +-static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk", ++static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out", + "vpu_pll", "sys_pll1_80m", }; + + static struct clk *clks[IMX8MM_CLK_END]; diff --git a/queue-5.2/dax-fix-missed-wakeup-with-pmd-faults.patch b/queue-5.2/dax-fix-missed-wakeup-with-pmd-faults.patch new file mode 100644 index 00000000000..7889291def8 --- /dev/null +++ b/queue-5.2/dax-fix-missed-wakeup-with-pmd-faults.patch @@ -0,0 +1,187 @@ +From 23c84eb7837514e16d79ed6d849b13745e0ce688 Mon Sep 17 00:00:00 2001 +From: "Matthew Wilcox (Oracle)" +Date: Wed, 3 Jul 2019 23:21:25 -0400 +Subject: dax: Fix missed wakeup with PMD faults + +From: Matthew Wilcox (Oracle) + +commit 23c84eb7837514e16d79ed6d849b13745e0ce688 upstream. + +RocksDB can hang indefinitely when using a DAX file. This is due to +a bug in the XArray conversion when handling a PMD fault and finding a +PTE entry. We use the wrong index in the hash and end up waiting on +the wrong waitqueue. + +There's actually no need to wait; if we find a PTE entry while looking +for a PMD entry, we can return immediately as we know we should fall +back to a PTE fault (which may not conflict with the lock held). + +We reuse the XA_RETRY_ENTRY to signal a conflicting entry was found. +This value can never be found in an XArray while holding its lock, so +it does not create an ambiguity. + +Cc: +Link: http://lkml.kernel.org/r/CAPcyv4hwHpX-MkUEqxwdTj7wCCZCN4RV-L4jsnuwLGyL_UEG4A@mail.gmail.com +Fixes: b15cd800682f ("dax: Convert page fault handlers to XArray") +Signed-off-by: Matthew Wilcox (Oracle) +Tested-by: Dan Williams +Reported-by: Robert Barror +Reported-by: Seema Pandit +Reviewed-by: Jan Kara +Signed-off-by: Dan Williams +Signed-off-by: Greg Kroah-Hartman + +--- + fs/dax.c | 53 +++++++++++++++++++++++++++++++++-------------------- + 1 file changed, 33 insertions(+), 20 deletions(-) + +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -124,6 +124,15 @@ static int dax_is_empty_entry(void *entr + } + + /* ++ * true if the entry that was found is of a smaller order than the entry ++ * we were looking for ++ */ ++static bool dax_is_conflict(void *entry) ++{ ++ return entry == XA_RETRY_ENTRY; ++} ++ ++/* + * DAX page cache entry locking + */ + struct exceptional_entry_key { +@@ -195,11 +204,13 @@ static void dax_wake_entry(struct xa_sta + * Look up entry in page cache, wait for it to become unlocked if it + * is a DAX entry and return it. The caller must subsequently call + * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() +- * if it did. ++ * if it did. The entry returned may have a larger order than @order. ++ * If @order is larger than the order of the entry found in i_pages, this ++ * function returns a dax_is_conflict entry. + * + * Must be called with the i_pages lock held. + */ +-static void *get_unlocked_entry(struct xa_state *xas) ++static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) + { + void *entry; + struct wait_exceptional_entry_queue ewait; +@@ -210,6 +221,8 @@ static void *get_unlocked_entry(struct x + + for (;;) { + entry = xas_find_conflict(xas); ++ if (dax_entry_order(entry) < order) ++ return XA_RETRY_ENTRY; + if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || + !dax_is_locked(entry)) + return entry; +@@ -254,7 +267,7 @@ static void wait_entry_unlocked(struct x + static void put_unlocked_entry(struct xa_state *xas, void *entry) + { + /* If we were the only waiter woken, wake the next one */ +- if (entry) ++ if (entry && dax_is_conflict(entry)) + dax_wake_entry(xas, entry, false); + } + +@@ -461,7 +474,7 @@ void dax_unlock_page(struct page *page, + * overlap with xarray value entries. + */ + static void *grab_mapping_entry(struct xa_state *xas, +- struct address_space *mapping, unsigned long size_flag) ++ struct address_space *mapping, unsigned int order) + { + unsigned long index = xas->xa_index; + bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ +@@ -469,20 +482,17 @@ static void *grab_mapping_entry(struct x + + retry: + xas_lock_irq(xas); +- entry = get_unlocked_entry(xas); ++ entry = get_unlocked_entry(xas, order); + + if (entry) { ++ if (dax_is_conflict(entry)) ++ goto fallback; + if (!xa_is_value(entry)) { + xas_set_err(xas, EIO); + goto out_unlock; + } + +- if (size_flag & DAX_PMD) { +- if (dax_is_pte_entry(entry)) { +- put_unlocked_entry(xas, entry); +- goto fallback; +- } +- } else { /* trying to grab a PTE entry */ ++ if (order == 0) { + if (dax_is_pmd_entry(entry) && + (dax_is_zero_entry(entry) || + dax_is_empty_entry(entry))) { +@@ -523,7 +533,11 @@ retry: + if (entry) { + dax_lock_entry(xas, entry); + } else { +- entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY); ++ unsigned long flags = DAX_EMPTY; ++ ++ if (order > 0) ++ flags |= DAX_PMD; ++ entry = dax_make_entry(pfn_to_pfn_t(0), flags); + dax_lock_entry(xas, entry); + if (xas_error(xas)) + goto out_unlock; +@@ -594,7 +608,7 @@ struct page *dax_layout_busy_page(struct + if (WARN_ON_ONCE(!xa_is_value(entry))) + continue; + if (unlikely(dax_is_locked(entry))) +- entry = get_unlocked_entry(&xas); ++ entry = get_unlocked_entry(&xas, 0); + if (entry) + page = dax_busy_page(entry); + put_unlocked_entry(&xas, entry); +@@ -621,7 +635,7 @@ static int __dax_invalidate_entry(struct + void *entry; + + xas_lock_irq(&xas); +- entry = get_unlocked_entry(&xas); ++ entry = get_unlocked_entry(&xas, 0); + if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) + goto out; + if (!trunc && +@@ -848,7 +862,7 @@ static int dax_writeback_one(struct xa_s + if (unlikely(dax_is_locked(entry))) { + void *old_entry = entry; + +- entry = get_unlocked_entry(xas); ++ entry = get_unlocked_entry(xas, 0); + + /* Entry got punched out / reallocated? */ + if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) +@@ -1509,7 +1523,7 @@ static vm_fault_t dax_iomap_pmd_fault(st + * entry is already in the array, for instance), it will return + * VM_FAULT_FALLBACK. + */ +- entry = grab_mapping_entry(&xas, mapping, DAX_PMD); ++ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); + if (xa_is_internal(entry)) { + result = xa_to_internal(entry); + goto fallback; +@@ -1658,11 +1672,10 @@ dax_insert_pfn_mkwrite(struct vm_fault * + vm_fault_t ret; + + xas_lock_irq(&xas); +- entry = get_unlocked_entry(&xas); ++ entry = get_unlocked_entry(&xas, order); + /* Did we race with someone splitting entry or so? */ +- if (!entry || +- (order == 0 && !dax_is_pte_entry(entry)) || +- (order == PMD_ORDER && !dax_is_pmd_entry(entry))) { ++ if (!entry || dax_is_conflict(entry) || ++ (order == 0 && !dax_is_pte_entry(entry))) { + put_unlocked_entry(&xas, entry); + xas_unlock_irq(&xas); + trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, diff --git a/queue-5.2/dm-bufio-fix-deadlock-with-loop-device.patch b/queue-5.2/dm-bufio-fix-deadlock-with-loop-device.patch new file mode 100644 index 00000000000..cd696e5d71c --- /dev/null +++ b/queue-5.2/dm-bufio-fix-deadlock-with-loop-device.patch @@ -0,0 +1,47 @@ +From bd293d071ffe65e645b4d8104f9d8fe15ea13862 Mon Sep 17 00:00:00 2001 +From: Junxiao Bi +Date: Tue, 9 Jul 2019 17:17:19 -0700 +Subject: dm bufio: fix deadlock with loop device + +From: Junxiao Bi + +commit bd293d071ffe65e645b4d8104f9d8fe15ea13862 upstream. + +When thin-volume is built on loop device, if available memory is low, +the following deadlock can be triggered: + +One process P1 allocates memory with GFP_FS flag, direct alloc fails, +memory reclaim invokes memory shrinker in dm_bufio, dm_bufio_shrink_scan() +runs, mutex dm_bufio_client->lock is acquired, then P1 waits for dm_buffer +IO to complete in __try_evict_buffer(). + +But this IO may never complete if issued to an underlying loop device +that forwards it using direct-IO, which allocates memory using +GFP_KERNEL (see: do_blockdev_direct_IO()). If allocation fails, memory +reclaim will invoke memory shrinker in dm_bufio, dm_bufio_shrink_scan() +will be invoked, and since the mutex is already held by P1 the loop +thread will hang, and IO will never complete. Resulting in ABBA +deadlock. + +Cc: stable@vger.kernel.org +Signed-off-by: Junxiao Bi +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-bufio.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -1599,9 +1599,7 @@ dm_bufio_shrink_scan(struct shrinker *sh + unsigned long freed; + + c = container_of(shrink, struct dm_bufio_client, shrinker); +- if (sc->gfp_mask & __GFP_FS) +- dm_bufio_lock(c); +- else if (!dm_bufio_trylock(c)) ++ if (!dm_bufio_trylock(c)) + return SHRINK_STOP; + + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); diff --git a/queue-5.2/dm-thin-metadata-check-if-in-fail_io-mode-when-setting-needs_check.patch b/queue-5.2/dm-thin-metadata-check-if-in-fail_io-mode-when-setting-needs_check.patch new file mode 100644 index 00000000000..56249518edd --- /dev/null +++ b/queue-5.2/dm-thin-metadata-check-if-in-fail_io-mode-when-setting-needs_check.patch @@ -0,0 +1,51 @@ +From 54fa16ee532705985e6c946da455856f18f63ee1 Mon Sep 17 00:00:00 2001 +From: Mike Snitzer +Date: Tue, 2 Jul 2019 15:50:08 -0400 +Subject: dm thin metadata: check if in fail_io mode when setting needs_check + +From: Mike Snitzer + +commit 54fa16ee532705985e6c946da455856f18f63ee1 upstream. + +Check if in fail_io mode at start of dm_pool_metadata_set_needs_check(). +Otherwise dm_pool_metadata_set_needs_check()'s superblock_lock() can +crash in dm_bm_write_lock() while accessing the block manager object +that was previously destroyed as part of a failed +dm_pool_abort_metadata() that ultimately set fail_io to begin with. + +Also, update DMERR() message to more accurately describe +superblock_lock() failure. + +Cc: stable@vger.kernel.org +Reported-by: Zdenek Kabelac +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-thin-metadata.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -2046,16 +2046,19 @@ int dm_pool_register_metadata_threshold( + + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) + { +- int r; ++ int r = -EINVAL; + struct dm_block *sblock; + struct thin_disk_superblock *disk_super; + + pmd_write_lock(pmd); ++ if (pmd->fail_io) ++ goto out; ++ + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; + + r = superblock_lock(pmd, &sblock); + if (r) { +- DMERR("couldn't read superblock"); ++ DMERR("couldn't lock superblock"); + goto out; + } + diff --git a/queue-5.2/dt-bindings-allow-up-to-four-clocks-for-orion-mdio.patch b/queue-5.2/dt-bindings-allow-up-to-four-clocks-for-orion-mdio.patch new file mode 100644 index 00000000000..52a5b47e0a8 --- /dev/null +++ b/queue-5.2/dt-bindings-allow-up-to-four-clocks-for-orion-mdio.patch @@ -0,0 +1,34 @@ +From 80785f5a22e9073e2ded5958feb7f220e066d17b Mon Sep 17 00:00:00 2001 +From: Josua Mayer +Date: Tue, 9 Jul 2019 15:00:58 +0200 +Subject: dt-bindings: allow up to four clocks for orion-mdio + +From: Josua Mayer + +commit 80785f5a22e9073e2ded5958feb7f220e066d17b upstream. + +Armada 8040 needs four clocks to be enabled for MDIO accesses to work. +Update the binding to allow the extra clock to be specified. + +Cc: stable@vger.kernel.org +Fixes: 6d6a331f44a1 ("dt-bindings: allow up to three clocks for orion-mdio") +Reviewed-by: Andrew Lunn +Signed-off-by: Josua Mayer +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + Documentation/devicetree/bindings/net/marvell-orion-mdio.txt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt ++++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt +@@ -16,7 +16,7 @@ Required properties: + + Optional properties: + - interrupts: interrupt line number for the SMI error/done interrupt +-- clocks: phandle for up to three required clocks for the MDIO instance ++- clocks: phandle for up to four required clocks for the MDIO instance + + The child nodes of the MDIO driver are the individual PHY devices + connected to this MDIO bus. They must have a "reg" property given the diff --git a/queue-5.2/net-mvmdio-allow-up-to-four-clocks-to-be-specified-for-orion-mdio.patch b/queue-5.2/net-mvmdio-allow-up-to-four-clocks-to-be-specified-for-orion-mdio.patch new file mode 100644 index 00000000000..babc9d57451 --- /dev/null +++ b/queue-5.2/net-mvmdio-allow-up-to-four-clocks-to-be-specified-for-orion-mdio.patch @@ -0,0 +1,39 @@ +From 4aabed699c400810981d3dda170f05fa4d782905 Mon Sep 17 00:00:00 2001 +From: Josua Mayer +Date: Tue, 9 Jul 2019 15:00:59 +0200 +Subject: net: mvmdio: allow up to four clocks to be specified for orion-mdio + +From: Josua Mayer + +commit 4aabed699c400810981d3dda170f05fa4d782905 upstream. + +Allow up to four clocks to be specified and enabled for the orion-mdio +interface, which are required by the Armada 8k and defined in +armada-cp110.dtsi. + +Fixes a hang in probing the mvmdio driver that was encountered on the +Clearfog GT 8K with all drivers built as modules, but also affects other +boards such as the MacchiatoBIN. + +Cc: stable@vger.kernel.org +Fixes: 96cb43423822 ("net: mvmdio: allow up to three clocks to be specified for orion-mdio") +Reviewed-by: Andrew Lunn +Signed-off-by: Josua Mayer +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/ethernet/marvell/mvmdio.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/marvell/mvmdio.c ++++ b/drivers/net/ethernet/marvell/mvmdio.c +@@ -64,7 +64,7 @@ + + struct orion_mdio_dev { + void __iomem *regs; +- struct clk *clk[3]; ++ struct clk *clk[4]; + /* + * If we have access to the error interrupt pin (which is + * somewhat misnamed as it not only reflects internal errors diff --git a/queue-5.2/phy-qcom-qmp-correct-ready_status-poll-break-condition.patch b/queue-5.2/phy-qcom-qmp-correct-ready_status-poll-break-condition.patch new file mode 100644 index 00000000000..b4d85cfce3b --- /dev/null +++ b/queue-5.2/phy-qcom-qmp-correct-ready_status-poll-break-condition.patch @@ -0,0 +1,75 @@ +From 885bd765963b42c380db442db7f1c0f2a26076fa Mon Sep 17 00:00:00 2001 +From: Bjorn Andersson +Date: Tue, 4 Jun 2019 16:24:43 -0700 +Subject: phy: qcom-qmp: Correct READY_STATUS poll break condition + +From: Bjorn Andersson + +commit 885bd765963b42c380db442db7f1c0f2a26076fa upstream. + +After issuing a PHY_START request to the QMP, the hardware documentation +states that the software should wait for the PCS_READY_STATUS to become +1. + +With the introduction of commit c9b589791fc1 ("phy: qcom: Utilize UFS +reset controller") an additional 1ms delay was introduced between the +start request and the check of the status bit. This greatly increases +the chances for the hardware to actually becoming ready before the +status bit is read. + +The result can be seen in that UFS PHY enabling is now reported as a +failure in 10% of the boots on SDM845, which is a clear regression from +the previous rare/occasional failure. + +This patch fixes the "break condition" of the poll to check for the +correct state of the status bit. + +Unfortunately PCIe on 8996 and 8998 does not specify the mask_pcs_ready +register, which means that the code checks a bit that's always 0. So the +patch also fixes these, in order to not regress these targets. + +Fixes: 73d7ec899bd8 ("phy: qcom-qmp: Add msm8998 PCIe QMP PHY support") +Fixes: e78f3d15e115 ("phy: qcom-qmp: new qmp phy driver for qcom-chipsets") +Cc: stable@vger.kernel.org +Cc: Evan Green +Cc: Marc Gonzalez +Cc: Vivek Gautam +Reviewed-by: Evan Green +Reviewed-by: Niklas Cassel +Reviewed-by: Marc Gonzalez +Tested-by: Marc Gonzalez +Signed-off-by: Bjorn Andersson +Signed-off-by: Kishon Vijay Abraham I +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/phy/qualcomm/phy-qcom-qmp.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c +@@ -1074,6 +1074,7 @@ static const struct qmp_phy_cfg msm8996_ + + .start_ctrl = PCS_START | PLL_READY_GATE_EN, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, ++ .mask_pcs_ready = PHYSTATUS, + .mask_com_pcs_ready = PCS_READY, + + .has_phy_com_ctrl = true, +@@ -1253,6 +1254,7 @@ static const struct qmp_phy_cfg msm8998_ + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, ++ .mask_pcs_ready = PHYSTATUS, + .mask_com_pcs_ready = PCS_READY, + }; + +@@ -1547,7 +1549,7 @@ static int qcom_qmp_phy_enable(struct ph + status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; + mask = cfg->mask_pcs_ready; + +- ret = readl_poll_timeout(status, val, !(val & mask), 1, ++ ret = readl_poll_timeout(status, val, val & mask, 1, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); diff --git a/queue-5.2/pstore-fix-double-free-in-pstore_mkfile-failure-path.patch b/queue-5.2/pstore-fix-double-free-in-pstore_mkfile-failure-path.patch new file mode 100644 index 00000000000..7da6f2a421f --- /dev/null +++ b/queue-5.2/pstore-fix-double-free-in-pstore_mkfile-failure-path.patch @@ -0,0 +1,73 @@ +From 4c6d80e1144bdf48cae6b602ae30d41f3e5c76a9 Mon Sep 17 00:00:00 2001 +From: Norbert Manthey +Date: Fri, 5 Jul 2019 15:06:00 +0200 +Subject: pstore: Fix double-free in pstore_mkfile() failure path + +From: Norbert Manthey + +commit 4c6d80e1144bdf48cae6b602ae30d41f3e5c76a9 upstream. + +The pstore_mkfile() function is passed a pointer to a struct +pstore_record. On success it consumes this 'record' pointer and +references it from the created inode. + +On failure, however, it may or may not free the record. There are even +two different code paths which return -ENOMEM -- one of which does and +the other doesn't free the record. + +Make the behaviour deterministic by never consuming and freeing the +record when returning failure, allowing the caller to do the cleanup +consistently. + +Signed-off-by: Norbert Manthey +Link: https://lore.kernel.org/r/1562331960-26198-1-git-send-email-nmanthey@amazon.de +Fixes: 83f70f0769ddd ("pstore: Do not duplicate record metadata") +Fixes: 1dfff7dd67d1a ("pstore: Pass record contents instead of copying") +Cc: stable@vger.kernel.org +[kees: also move "private" allocation location, rename inode cleanup label] +Signed-off-by: Kees Cook +Signed-off-by: Greg Kroah-Hartman + +--- + fs/pstore/inode.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +--- a/fs/pstore/inode.c ++++ b/fs/pstore/inode.c +@@ -318,22 +318,21 @@ int pstore_mkfile(struct dentry *root, s + goto fail; + inode->i_mode = S_IFREG | 0444; + inode->i_fop = &pstore_file_operations; +- private = kzalloc(sizeof(*private), GFP_KERNEL); +- if (!private) +- goto fail_alloc; +- private->record = record; +- + scnprintf(name, sizeof(name), "%s-%s-%llu%s", + pstore_type_to_name(record->type), + record->psi->name, record->id, + record->compressed ? ".enc.z" : ""); + ++ private = kzalloc(sizeof(*private), GFP_KERNEL); ++ if (!private) ++ goto fail_inode; ++ + dentry = d_alloc_name(root, name); + if (!dentry) + goto fail_private; + ++ private->record = record; + inode->i_size = private->total_size = size; +- + inode->i_private = private; + + if (record->time.tv_sec) +@@ -349,7 +348,7 @@ int pstore_mkfile(struct dentry *root, s + + fail_private: + free_pstore_private(private); +-fail_alloc: ++fail_inode: + iput(inode); + + fail: diff --git a/queue-5.2/series b/queue-5.2/series index ba60f6e0b41..0429a1f4ff7 100644 --- a/queue-5.2/series +++ b/queue-5.2/series @@ -399,3 +399,15 @@ mtd-spinand-read-returns-badly-if-the-last-page-has-bitflips.patch intel_th-msu-remove-set-but-not-used-variable-last.patch intel_th-msu-fix-single-mode-with-disabled-iommu.patch bluetooth-add-smp-workaround-microsoft-surface-precision-mouse-bug.patch +dax-fix-missed-wakeup-with-pmd-faults.patch +usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch +blk-throttle-fix-zero-wait-time-for-iops-throttled-group.patch +clk-imx-imx8mm-correct-audio_pll2_clk-to-audio_pll2_out.patch +blk-iolatency-clear-use_delay-when-io.latency-is-set-to-zero.patch +blkcg-update-blkcg_print_stat-to-handle-larger-outputs.patch +net-mvmdio-allow-up-to-four-clocks-to-be-specified-for-orion-mdio.patch +dt-bindings-allow-up-to-four-clocks-for-orion-mdio.patch +pstore-fix-double-free-in-pstore_mkfile-failure-path.patch +phy-qcom-qmp-correct-ready_status-poll-break-condition.patch +dm-thin-metadata-check-if-in-fail_io-mode-when-setting-needs_check.patch +dm-bufio-fix-deadlock-with-loop-device.patch diff --git a/queue-5.2/usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch b/queue-5.2/usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch new file mode 100644 index 00000000000..c465a74775d --- /dev/null +++ b/queue-5.2/usb-handle-usb3-remote-wakeup-for-lpm-enabled-devices-correctly.patch @@ -0,0 +1,59 @@ +From e244c4699f859cf7149b0781b1894c7996a8a1df Mon Sep 17 00:00:00 2001 +From: "Lee, Chiasheng" +Date: Thu, 20 Jun 2019 10:56:04 +0300 +Subject: usb: Handle USB3 remote wakeup for LPM enabled devices correctly + +From: Lee, Chiasheng + +commit e244c4699f859cf7149b0781b1894c7996a8a1df upstream. + +With Link Power Management (LPM) enabled USB3 links transition to low +power U1/U2 link states from U0 state automatically. + +Current hub code detects USB3 remote wakeups by checking if the software +state still shows suspended, but the link has transitioned from suspended +U3 to enabled U0 state. + +As it takes some time before the hub thread reads the port link state +after a USB3 wake notification, the link may have transitioned from U0 +to U1/U2, and wake is not detected by hub code. + +Fix this by handling U1/U2 states in the same way as U0 in USB3 wakeup +handling + +This patch should be added to stable kernels since 4.13 where LPM was +kept enabled during suspend/resume + +Cc: # v4.13+ +Signed-off-by: Lee, Chiasheng +Signed-off-by: Mathias Nyman +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/usb/core/hub.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3617,6 +3617,7 @@ static int hub_handle_remote_wakeup(stru + struct usb_device *hdev; + struct usb_device *udev; + int connect_change = 0; ++ u16 link_state; + int ret; + + hdev = hub->hdev; +@@ -3626,9 +3627,11 @@ static int hub_handle_remote_wakeup(stru + return 0; + usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); + } else { ++ link_state = portstatus & USB_PORT_STAT_LINK_STATE; + if (!udev || udev->state != USB_STATE_SUSPENDED || +- (portstatus & USB_PORT_STAT_LINK_STATE) != +- USB_SS_PORT_LS_U0) ++ (link_state != USB_SS_PORT_LS_U0 && ++ link_state != USB_SS_PORT_LS_U1 && ++ link_state != USB_SS_PORT_LS_U2)) + return 0; + } +