1 From af692e117cb8cd9d3d844d413095775abc1217f9 Mon Sep 17 00:00:00 2001
2 From: Gao Xiang <gaoxiang25@huawei.com>
3 Date: Wed, 27 Feb 2019 13:33:30 +0800
4 Subject: staging: erofs: compressed_pages should not be accessed again after freed
6 From: Gao Xiang <gaoxiang25@huawei.com>
8 commit af692e117cb8cd9d3d844d413095775abc1217f9 upstream.
10 This patch resolves the following page use-after-free issue,
13 for (i = 0; i < nr_pages; ++i) {
15 z_erofs_onlinepage_endio(page); (1)
18 for (i = 0; i < clusterpages; ++i) {
19 page = compressed_pages[i];
21 if (page->mapping == mngda) (2)
23 /* recycle all individual staging pages */
24 (void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
25 WRITE_ONCE(compressed_pages[i], NULL);
29 After (1) is executed, page is freed and could be then reused, if
30 compressed_pages is scanned after that, it could fall info (2) or
31 (3) by mistake and that could finally be in a mess.
33 This patch aims to solve the above issue only with little changes
34 as much as possible in order to make the fix backport easier.
36 Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
37 Cc: <stable@vger.kernel.org> # 4.19+
38 Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
39 Reviewed-by: Chao Yu <yuchao0@huawei.com>
40 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
44 drivers/staging/erofs/unzip_vle.c | 38 +++++++++++++++++-----------------
45 drivers/staging/erofs/unzip_vle.h | 3 --
46 drivers/staging/erofs/unzip_vle_lz4.c | 19 +++++++----------
47 3 files changed, 29 insertions(+), 31 deletions(-)
49 --- a/drivers/staging/erofs/unzip_vle.c
50 +++ b/drivers/staging/erofs/unzip_vle.c
51 @@ -1017,11 +1017,10 @@ repeat:
55 - err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
56 - clusterpages, pages, llen, work->pageofs,
57 - z_erofs_onlinepage_endio);
58 + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
59 + pages, llen, work->pageofs);
64 if (sparsemem_pages >= nr_pages)
66 @@ -1042,8 +1041,25 @@ skip_allocpage:
67 erofs_vunmap(vout, nr_pages);
70 + /* must handle all compressed pages before endding pages */
71 + for (i = 0; i < clusterpages; ++i) {
72 + page = compressed_pages[i];
74 +#ifdef EROFS_FS_HAS_MANAGED_CACHE
75 + if (page->mapping == MNGD_MAPPING(sbi))
78 + /* recycle all individual staging pages */
79 + (void)z_erofs_gather_if_stagingpage(page_pool, page);
81 + WRITE_ONCE(compressed_pages[i], NULL);
84 for (i = 0; i < nr_pages; ++i) {
89 DBG_BUGON(!page->mapping);
91 /* recycle all individual staging pages */
92 @@ -1056,20 +1072,6 @@ out:
93 z_erofs_onlinepage_endio(page);
97 - for (i = 0; i < clusterpages; ++i) {
98 - page = compressed_pages[i];
100 -#ifdef EROFS_FS_HAS_MANAGED_CACHE
101 - if (page->mapping == MNGD_MAPPING(sbi))
104 - /* recycle all individual staging pages */
105 - (void)z_erofs_gather_if_stagingpage(page_pool, page);
107 - WRITE_ONCE(compressed_pages[i], NULL);
110 if (pages == z_pagemap_global)
111 mutex_unlock(&z_pagemap_global_lock);
112 else if (unlikely(pages != pages_onstack))
113 --- a/drivers/staging/erofs/unzip_vle.h
114 +++ b/drivers/staging/erofs/unzip_vle.h
115 @@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct
117 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
118 unsigned clusterpages, struct page **pages,
119 - unsigned outlen, unsigned short pageofs,
120 - void (*endio)(struct page *));
121 + unsigned int outlen, unsigned short pageofs);
123 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
124 unsigned clusterpages, void *vaddr, unsigned llen,
125 --- a/drivers/staging/erofs/unzip_vle_lz4.c
126 +++ b/drivers/staging/erofs/unzip_vle_lz4.c
127 @@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct
128 unsigned int clusterpages,
131 - unsigned short pageofs,
132 - void (*endio)(struct page *))
133 + unsigned short pageofs)
136 unsigned int nr_pages, i, j;
137 @@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct
138 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
139 clusterpages * PAGE_SIZE, outlen);
149 for (i = 0; i < nr_pages; ++i) {
150 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
154 - SetPageError(pages[i]);
155 - } else if (clusterpages == 1 &&
156 - pages[i] == compressed_pages[0]) {
157 + if (clusterpages == 1 &&
158 + pages[i] == compressed_pages[0]) {
159 memcpy(vin + pageofs, vout + pageofs, j);
161 void *dst = kmap_atomic(pages[i]);
162 @@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct
163 memcpy(dst + pageofs, vout + pageofs, j);
176 if (clusterpages == 1)