]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/5.0.2/staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 5.0.2 / staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch
1 From af692e117cb8cd9d3d844d413095775abc1217f9 Mon Sep 17 00:00:00 2001
2 From: Gao Xiang <gaoxiang25@huawei.com>
3 Date: Wed, 27 Feb 2019 13:33:30 +0800
4 Subject: staging: erofs: compressed_pages should not be accessed again after freed
5
6 From: Gao Xiang <gaoxiang25@huawei.com>
7
8 commit af692e117cb8cd9d3d844d413095775abc1217f9 upstream.
9
10 This patch resolves the following page use-after-free issue,
11 z_erofs_vle_unzip:
12 ...
13 for (i = 0; i < nr_pages; ++i) {
14 ...
15 z_erofs_onlinepage_endio(page); (1)
16 }
17
18 for (i = 0; i < clusterpages; ++i) {
19 page = compressed_pages[i];
20
21 if (page->mapping == mngda) (2)
22 continue;
23 /* recycle all individual staging pages */
24 (void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
25 WRITE_ONCE(compressed_pages[i], NULL);
26 }
27 ...
28
29 After (1) is executed, page is freed and could be then reused, if
30 compressed_pages is scanned after that, it could fall info (2) or
31 (3) by mistake and that could finally be in a mess.
32
33 This patch aims to solve the above issue only with little changes
34 as much as possible in order to make the fix backport easier.
35
36 Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
37 Cc: <stable@vger.kernel.org> # 4.19+
38 Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
39 Reviewed-by: Chao Yu <yuchao0@huawei.com>
40 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
41
42
43 ---
44 drivers/staging/erofs/unzip_vle.c | 38 +++++++++++++++++-----------------
45 drivers/staging/erofs/unzip_vle.h | 3 --
46 drivers/staging/erofs/unzip_vle_lz4.c | 19 +++++++----------
47 3 files changed, 29 insertions(+), 31 deletions(-)
48
49 --- a/drivers/staging/erofs/unzip_vle.c
50 +++ b/drivers/staging/erofs/unzip_vle.c
51 @@ -1017,11 +1017,10 @@ repeat:
52 if (llen > grp->llen)
53 llen = grp->llen;
54
55 - err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
56 - clusterpages, pages, llen, work->pageofs,
57 - z_erofs_onlinepage_endio);
58 + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
59 + pages, llen, work->pageofs);
60 if (err != -ENOTSUPP)
61 - goto out_percpu;
62 + goto out;
63
64 if (sparsemem_pages >= nr_pages)
65 goto skip_allocpage;
66 @@ -1042,8 +1041,25 @@ skip_allocpage:
67 erofs_vunmap(vout, nr_pages);
68
69 out:
70 + /* must handle all compressed pages before endding pages */
71 + for (i = 0; i < clusterpages; ++i) {
72 + page = compressed_pages[i];
73 +
74 +#ifdef EROFS_FS_HAS_MANAGED_CACHE
75 + if (page->mapping == MNGD_MAPPING(sbi))
76 + continue;
77 +#endif
78 + /* recycle all individual staging pages */
79 + (void)z_erofs_gather_if_stagingpage(page_pool, page);
80 +
81 + WRITE_ONCE(compressed_pages[i], NULL);
82 + }
83 +
84 for (i = 0; i < nr_pages; ++i) {
85 page = pages[i];
86 + if (!page)
87 + continue;
88 +
89 DBG_BUGON(!page->mapping);
90
91 /* recycle all individual staging pages */
92 @@ -1056,20 +1072,6 @@ out:
93 z_erofs_onlinepage_endio(page);
94 }
95
96 -out_percpu:
97 - for (i = 0; i < clusterpages; ++i) {
98 - page = compressed_pages[i];
99 -
100 -#ifdef EROFS_FS_HAS_MANAGED_CACHE
101 - if (page->mapping == MNGD_MAPPING(sbi))
102 - continue;
103 -#endif
104 - /* recycle all individual staging pages */
105 - (void)z_erofs_gather_if_stagingpage(page_pool, page);
106 -
107 - WRITE_ONCE(compressed_pages[i], NULL);
108 - }
109 -
110 if (pages == z_pagemap_global)
111 mutex_unlock(&z_pagemap_global_lock);
112 else if (unlikely(pages != pages_onstack))
113 --- a/drivers/staging/erofs/unzip_vle.h
114 +++ b/drivers/staging/erofs/unzip_vle.h
115 @@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct
116
117 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
118 unsigned clusterpages, struct page **pages,
119 - unsigned outlen, unsigned short pageofs,
120 - void (*endio)(struct page *));
121 + unsigned int outlen, unsigned short pageofs);
122
123 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
124 unsigned clusterpages, void *vaddr, unsigned llen,
125 --- a/drivers/staging/erofs/unzip_vle_lz4.c
126 +++ b/drivers/staging/erofs/unzip_vle_lz4.c
127 @@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct
128 unsigned int clusterpages,
129 struct page **pages,
130 unsigned int outlen,
131 - unsigned short pageofs,
132 - void (*endio)(struct page *))
133 + unsigned short pageofs)
134 {
135 void *vin, *vout;
136 unsigned int nr_pages, i, j;
137 @@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct
138 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
139 clusterpages * PAGE_SIZE, outlen);
140
141 - if (ret >= 0) {
142 - outlen = ret;
143 - ret = 0;
144 - }
145 + if (ret < 0)
146 + goto out;
147 + ret = 0;
148
149 for (i = 0; i < nr_pages; ++i) {
150 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
151
152 if (pages[i]) {
153 - if (ret < 0) {
154 - SetPageError(pages[i]);
155 - } else if (clusterpages == 1 &&
156 - pages[i] == compressed_pages[0]) {
157 + if (clusterpages == 1 &&
158 + pages[i] == compressed_pages[0]) {
159 memcpy(vin + pageofs, vout + pageofs, j);
160 } else {
161 void *dst = kmap_atomic(pages[i]);
162 @@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct
163 memcpy(dst + pageofs, vout + pageofs, j);
164 kunmap_atomic(dst);
165 }
166 - endio(pages[i]);
167 }
168 vout += PAGE_SIZE;
169 outlen -= j;
170 pageofs = 0;
171 }
172 +
173 +out:
174 preempt_enable();
175
176 if (clusterpages == 1)