]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 4.9
authorSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 02:08:05 +0000 (21:08 -0500)
committerSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 02:08:05 +0000 (21:08 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.9/pci-keystone-fix-link-training-retries-initiation.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/ubifs-change-gfp-flags-in-page-allocation-for-bulk-r.patch [new file with mode: 0644]
queue-4.9/ubifs-fix-deadlock-in-concurrent-bulk-read-and-write.patch [new file with mode: 0644]

diff --git a/queue-4.9/pci-keystone-fix-link-training-retries-initiation.patch b/queue-4.9/pci-keystone-fix-link-training-retries-initiation.patch
new file mode 100644 (file)
index 0000000..04ea2eb
--- /dev/null
@@ -0,0 +1,40 @@
+From c77751824ba6a2d52a9684cb1b09cebdb6856375 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2019 14:38:36 +0300
+Subject: PCI: keystone: Fix link training retries initiation
+
+From: Yurii Monakov <monakov.y@gmail.com>
+
+[ Upstream commit 6df19872d881641e6394f93ef2938cffcbdae5bb ]
+
+ks_pcie_stop_link() function does not clear LTSSM_EN_VAL bit so
+link training was not triggered more than once after startup.
+In configurations where link can be unstable during early boot,
+for example, under low temperature, it will never be established.
+
+Fixes: 0c4ffcfe1fbc ("PCI: keystone: Add TI Keystone PCIe driver")
+Signed-off-by: Yurii Monakov <monakov.y@gmail.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Andrew Murray <andrew.murray@arm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/host/pci-keystone-dw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
+index 9397c46671062..f011a8780ff53 100644
+--- a/drivers/pci/host/pci-keystone-dw.c
++++ b/drivers/pci/host/pci-keystone-dw.c
+@@ -502,7 +502,7 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+       /* Disable Link training */
+       val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+       val &= ~LTSSM_EN_VAL;
+-      ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
++      ks_dw_app_writel(ks_pcie, CMD_STATUS, val);
+       /* Initiate Link Training */
+       val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+-- 
+2.20.1
+
index 0dfbb67f8335d75b2777447b16238a90d17cb777..69a6e44292a22af40d61221fa8fc198da98a6d0f 100644 (file)
@@ -25,3 +25,6 @@ lib-test_kasan.c-fix-memory-leak-in-kmalloc_oob_krealloc_more.patch
 s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch
 powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch
 mmc-spi-toggle-spi-polarity-do-not-hardcode-it.patch
+pci-keystone-fix-link-training-retries-initiation.patch
+ubifs-change-gfp-flags-in-page-allocation-for-bulk-r.patch
+ubifs-fix-deadlock-in-concurrent-bulk-read-and-write.patch
diff --git a/queue-4.9/ubifs-change-gfp-flags-in-page-allocation-for-bulk-r.patch b/queue-4.9/ubifs-change-gfp-flags-in-page-allocation-for-bulk-r.patch
new file mode 100644 (file)
index 0000000..ccada43
--- /dev/null
@@ -0,0 +1,55 @@
+From 7c95626170aec4766069348040979c0cd76da20b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2017 09:31:49 +0900
+Subject: ubifs: Change gfp flags in page allocation for bulk read
+
+From: Hyunchul Lee <cheol.lee@lge.com>
+
+[ Upstream commit 480a1a6a3ef6fb6be4cd2f37b34314fbf64867dd ]
+
+In low memory situations, page allocations for bulk read
+can kill applications for reclaiming memory, and print an
+failure message when allocations are failed.
+Because bulk read is just an optimization, we don't have
+to do these and can stop page allocations.
+
+Though this siutation happens rarely, add __GFP_NORETRY
+to prevent from excessive memory reclaim and killing
+applications, and __GFP_WARN to suppress this failure
+message.
+
+For this, Use readahead_gfp_mask for gfp flags when
+allocating pages.
+
+Signed-off-by: Hyunchul Lee <cheol.lee@lge.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/file.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index b4fbeefba246a..f2e6162f8e656 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -721,6 +721,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
+       int err, page_idx, page_cnt, ret = 0, n = 0;
+       int allocate = bu->buf ? 0 : 1;
+       loff_t isize;
++      gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
+       err = ubifs_tnc_get_bu_keys(c, bu);
+       if (err)
+@@ -782,8 +783,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
+               if (page_offset > end_index)
+                       break;
+-              page = find_or_create_page(mapping, page_offset,
+-                                         GFP_NOFS | __GFP_COLD);
++              page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
+               if (!page)
+                       break;
+               if (!PageUptodate(page))
+-- 
+2.20.1
+
diff --git a/queue-4.9/ubifs-fix-deadlock-in-concurrent-bulk-read-and-write.patch b/queue-4.9/ubifs-fix-deadlock-in-concurrent-bulk-read-and-write.patch
new file mode 100644 (file)
index 0000000..274aced
--- /dev/null
@@ -0,0 +1,63 @@
+From e17aeb9264cdadcaebc9f4db3728d93d95fa0ab2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 11 Jan 2020 17:50:36 +0800
+Subject: ubifs: Fix deadlock in concurrent bulk-read and writepage
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+[ Upstream commit f5de5b83303e61b1f3fb09bd77ce3ac2d7a475f2 ]
+
+In ubifs, concurrent execution of writepage and bulk read on the same file
+may cause ABBA deadlock, for example (Reproduce method see Link):
+
+Process A(Bulk-read starts from page4)         Process B(write page4 back)
+  vfs_read                                       wb_workfn or fsync
+  ...                                            ...
+  generic_file_buffered_read                     write_cache_pages
+    ubifs_readpage                                 LOCK(page4)
+
+      ubifs_bulk_read                              ubifs_writepage
+        LOCK(ui->ui_mutex)                           ubifs_write_inode
+
+         ubifs_do_bulk_read                           LOCK(ui->ui_mutex)
+           find_or_create_page(alloc page4)                  ↑
+             LOCK(page4)                   <--     ABBA deadlock occurs!
+
+In order to ensure the serialization execution of bulk read, we can't
+remove the big lock 'ui->ui_mutex' in ubifs_bulk_read(). Instead, we
+allow ubifs_do_bulk_read() to lock page failed by replacing
+find_or_create_page(FGP_LOCK) with
+pagecache_get_page(FGP_LOCK | FGP_NOWAIT).
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Suggested-by: zhangyi (F) <yi.zhang@huawei.com>
+Cc: <Stable@vger.kernel.org>
+Fixes: 4793e7c5e1c ("UBIFS: add bulk-read facility")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=206153
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/file.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index f2e6162f8e656..5ef0d1d607431 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -783,7 +783,9 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
+               if (page_offset > end_index)
+                       break;
+-              page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
++              page = pagecache_get_page(mapping, page_offset,
++                               FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
++                               ra_gfp_mask);
+               if (!page)
+                       break;
+               if (!PageUptodate(page))
+-- 
+2.20.1
+