]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.20-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Mar 2019 17:49:01 +0000 (18:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 7 Mar 2019 17:49:01 +0000 (18:49 +0100)
added patches:
staging-android-ashmem-avoid-range_alloc-allocation-with-ashmem_mutex-held.patch
staging-android-ashmem-don-t-call-fallocate-with-ashmem_mutex-held.patch
staging-android-ion-fix-sys-heap-pool-s-gfp_flags.patch
staging-comedi-ni_660x-fix-missing-break-in-switch-statement.patch
staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch
staging-erofs-fix-fast-symlink-w-o-xattr-when-fs-xattr-is-on.patch
staging-erofs-fix-illegal-address-access-under-memory-pressure.patch
staging-erofs-fix-memleak-of-inode-s-shared-xattr-array.patch
staging-erofs-fix-race-of-initializing-xattrs-of-a-inode-at-the-same-time.patch
staging-wilc1000-fix-to-set-correct-value-for-vif_num.patch

queue-4.20/series
queue-4.20/staging-android-ashmem-avoid-range_alloc-allocation-with-ashmem_mutex-held.patch [new file with mode: 0644]
queue-4.20/staging-android-ashmem-don-t-call-fallocate-with-ashmem_mutex-held.patch [new file with mode: 0644]
queue-4.20/staging-android-ion-fix-sys-heap-pool-s-gfp_flags.patch [new file with mode: 0644]
queue-4.20/staging-comedi-ni_660x-fix-missing-break-in-switch-statement.patch [new file with mode: 0644]
queue-4.20/staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch [new file with mode: 0644]
queue-4.20/staging-erofs-fix-fast-symlink-w-o-xattr-when-fs-xattr-is-on.patch [new file with mode: 0644]
queue-4.20/staging-erofs-fix-illegal-address-access-under-memory-pressure.patch [new file with mode: 0644]
queue-4.20/staging-erofs-fix-memleak-of-inode-s-shared-xattr-array.patch [new file with mode: 0644]
queue-4.20/staging-erofs-fix-race-of-initializing-xattrs-of-a-inode-at-the-same-time.patch [new file with mode: 0644]
queue-4.20/staging-wilc1000-fix-to-set-correct-value-for-vif_num.patch [new file with mode: 0644]

index 0c5666c574d168895ec78965803cde7caf59e5e6..50bc83692b095fd92f3b7c8e09c328a9381a5dce 100644 (file)
@@ -5,3 +5,13 @@ usb-serial-option-add-telit-me910-ecm-composition.patch
 usb-serial-cp210x-add-id-for-ingenico-3070.patch
 usb-serial-ftdi_sio-add-id-for-hjelmslund-electronics-usb485.patch
 driver-core-postpone-dma-tear-down-until-after-devres-release.patch
+staging-erofs-fix-fast-symlink-w-o-xattr-when-fs-xattr-is-on.patch
+staging-erofs-fix-memleak-of-inode-s-shared-xattr-array.patch
+staging-erofs-fix-race-of-initializing-xattrs-of-a-inode-at-the-same-time.patch
+staging-erofs-fix-illegal-address-access-under-memory-pressure.patch
+staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch
+staging-comedi-ni_660x-fix-missing-break-in-switch-statement.patch
+staging-wilc1000-fix-to-set-correct-value-for-vif_num.patch
+staging-android-ion-fix-sys-heap-pool-s-gfp_flags.patch
+staging-android-ashmem-don-t-call-fallocate-with-ashmem_mutex-held.patch
+staging-android-ashmem-avoid-range_alloc-allocation-with-ashmem_mutex-held.patch
diff --git a/queue-4.20/staging-android-ashmem-avoid-range_alloc-allocation-with-ashmem_mutex-held.patch b/queue-4.20/staging-android-ashmem-avoid-range_alloc-allocation-with-ashmem_mutex-held.patch
new file mode 100644 (file)
index 0000000..92e4984
--- /dev/null
@@ -0,0 +1,145 @@
+From ecd182cbf4e107928077866399100228d2359c60 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Fri, 22 Feb 2019 20:03:55 +0900
+Subject: staging: android: ashmem: Avoid range_alloc() allocation with ashmem_mutex held.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit ecd182cbf4e107928077866399100228d2359c60 upstream.
+
+ashmem_pin() is calling range_shrink() without checking whether
+range_alloc() succeeded. Also, doing memory allocation with ashmem_mutex
+held should be avoided because ashmem_shrink_scan() tries to hold it.
+
+Therefore, move memory allocation for range_alloc() to ashmem_pin_unpin()
+and make range_alloc() not to fail.
+
+This patch is mostly meant for backporting purpose for fuzz testing on
+stable/distributor kernels, for there is a plan to remove this code in
+near future.
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: stable@vger.kernel.org
+Reviewed-by: Joel Fernandes <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ashmem.c |   42 ++++++++++++++++++++++-----------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -171,19 +171,15 @@ static inline void lru_del(struct ashmem
+  * @end:         The ending page (inclusive)
+  *
+  * This function is protected by ashmem_mutex.
+- *
+- * Return: 0 if successful, or -ENOMEM if there is an error
+  */
+-static int range_alloc(struct ashmem_area *asma,
+-                     struct ashmem_range *prev_range, unsigned int purged,
+-                     size_t start, size_t end)
++static void range_alloc(struct ashmem_area *asma,
++                      struct ashmem_range *prev_range, unsigned int purged,
++                      size_t start, size_t end,
++                      struct ashmem_range **new_range)
+ {
+-      struct ashmem_range *range;
+-
+-      range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+-      if (!range)
+-              return -ENOMEM;
++      struct ashmem_range *range = *new_range;
++      *new_range = NULL;
+       range->asma = asma;
+       range->pgstart = start;
+       range->pgend = end;
+@@ -193,8 +189,6 @@ static int range_alloc(struct ashmem_are
+       if (range_on_lru(range))
+               lru_add(range);
+-
+-      return 0;
+ }
+ /**
+@@ -596,7 +590,8 @@ static int get_name(struct ashmem_area *
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
++static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
++                    struct ashmem_range **new_range)
+ {
+       struct ashmem_range *range, *next;
+       int ret = ASHMEM_NOT_PURGED;
+@@ -649,7 +644,7 @@ static int ashmem_pin(struct ashmem_area
+                        * second half and adjust the first chunk's endpoint.
+                        */
+                       range_alloc(asma, range, range->purged,
+-                                  pgend + 1, range->pgend);
++                                  pgend + 1, range->pgend, new_range);
+                       range_shrink(range, range->pgstart, pgstart - 1);
+                       break;
+               }
+@@ -663,7 +658,8 @@ static int ashmem_pin(struct ashmem_area
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
++static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
++                      struct ashmem_range **new_range)
+ {
+       struct ashmem_range *range, *next;
+       unsigned int purged = ASHMEM_NOT_PURGED;
+@@ -689,7 +685,8 @@ restart:
+               }
+       }
+-      return range_alloc(asma, range, purged, pgstart, pgend);
++      range_alloc(asma, range, purged, pgstart, pgend, new_range);
++      return 0;
+ }
+ /*
+@@ -722,10 +719,17 @@ static int ashmem_pin_unpin(struct ashme
+       struct ashmem_pin pin;
+       size_t pgstart, pgend;
+       int ret = -EINVAL;
++      struct ashmem_range *range = NULL;
+       if (copy_from_user(&pin, p, sizeof(pin)))
+               return -EFAULT;
++      if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
++              range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
++              if (!range)
++                      return -ENOMEM;
++      }
++
+       mutex_lock(&ashmem_mutex);
+       wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
+@@ -750,10 +754,10 @@ static int ashmem_pin_unpin(struct ashme
+       switch (cmd) {
+       case ASHMEM_PIN:
+-              ret = ashmem_pin(asma, pgstart, pgend);
++              ret = ashmem_pin(asma, pgstart, pgend, &range);
+               break;
+       case ASHMEM_UNPIN:
+-              ret = ashmem_unpin(asma, pgstart, pgend);
++              ret = ashmem_unpin(asma, pgstart, pgend, &range);
+               break;
+       case ASHMEM_GET_PIN_STATUS:
+               ret = ashmem_get_pin_status(asma, pgstart, pgend);
+@@ -762,6 +766,8 @@ static int ashmem_pin_unpin(struct ashme
+ out_unlock:
+       mutex_unlock(&ashmem_mutex);
++      if (range)
++              kmem_cache_free(ashmem_range_cachep, range);
+       return ret;
+ }
diff --git a/queue-4.20/staging-android-ashmem-don-t-call-fallocate-with-ashmem_mutex-held.patch b/queue-4.20/staging-android-ashmem-don-t-call-fallocate-with-ashmem_mutex-held.patch
new file mode 100644 (file)
index 0000000..7c27db0
--- /dev/null
@@ -0,0 +1,95 @@
+From fb4415a12632f0b9078a0aa80c16745d48fcfc74 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Tue, 5 Feb 2019 19:28:40 +0900
+Subject: staging: android: ashmem: Don't call fallocate() with ashmem_mutex held.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit fb4415a12632f0b9078a0aa80c16745d48fcfc74 upstream.
+
+syzbot is hitting lockdep warnings [1][2][3]. This patch tries to fix
+the warning by eliminating ashmem_shrink_scan() => {shmem|vfs}_fallocate()
+sequence.
+
+[1] https://syzkaller.appspot.com/bug?id=87c399f6fa6955006080b24142e2ce7680295ad4
+[2] https://syzkaller.appspot.com/bug?id=7ebea492de7521048355fc84210220e1038a7908
+[3] https://syzkaller.appspot.com/bug?id=e02419c12131c24e2a957ea050c2ab6dcbbc3270
+
+Reported-by: syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com>
+Reported-by: syzbot <syzbot+148c2885d71194f18d28@syzkaller.appspotmail.com>
+Reported-by: syzbot <syzbot+4b8b031b89e6b96c4b2e@syzkaller.appspotmail.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: stable@vger.kernel.org
+Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ashmem.c |   25 ++++++++++++++++++++-----
+ 1 file changed, 20 insertions(+), 5 deletions(-)
+
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -75,6 +75,9 @@ struct ashmem_range {
+ /* LRU list of unpinned pages, protected by ashmem_mutex */
+ static LIST_HEAD(ashmem_lru_list);
++static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
++static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
++
+ /*
+  * long lru_count - The count of pages on our LRU list.
+  *
+@@ -438,7 +441,6 @@ out:
+ static unsigned long
+ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-      struct ashmem_range *range, *next;
+       unsigned long freed = 0;
+       /* We might recurse into filesystem code, so bail out if necessary */
+@@ -448,21 +450,33 @@ ashmem_shrink_scan(struct shrinker *shri
+       if (!mutex_trylock(&ashmem_mutex))
+               return -1;
+-      list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
++      while (!list_empty(&ashmem_lru_list)) {
++              struct ashmem_range *range =
++                      list_first_entry(&ashmem_lru_list, typeof(*range), lru);
+               loff_t start = range->pgstart * PAGE_SIZE;
+               loff_t end = (range->pgend + 1) * PAGE_SIZE;
++              struct file *f = range->asma->file;
+-              range->asma->file->f_op->fallocate(range->asma->file,
+-                              FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+-                              start, end - start);
++              get_file(f);
++              atomic_inc(&ashmem_shrink_inflight);
+               range->purged = ASHMEM_WAS_PURGED;
+               lru_del(range);
+               freed += range_size(range);
++              mutex_unlock(&ashmem_mutex);
++              f->f_op->fallocate(f,
++                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
++                                 start, end - start);
++              fput(f);
++              if (atomic_dec_and_test(&ashmem_shrink_inflight))
++                      wake_up_all(&ashmem_shrink_wait);
++              if (!mutex_trylock(&ashmem_mutex))
++                      goto out;
+               if (--sc->nr_to_scan <= 0)
+                       break;
+       }
+       mutex_unlock(&ashmem_mutex);
++out:
+       return freed;
+ }
+@@ -713,6 +727,7 @@ static int ashmem_pin_unpin(struct ashme
+               return -EFAULT;
+       mutex_lock(&ashmem_mutex);
++      wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
+       if (!asma->file)
+               goto out_unlock;
diff --git a/queue-4.20/staging-android-ion-fix-sys-heap-pool-s-gfp_flags.patch b/queue-4.20/staging-android-ion-fix-sys-heap-pool-s-gfp_flags.patch
new file mode 100644 (file)
index 0000000..33b2fed
--- /dev/null
@@ -0,0 +1,39 @@
+From 9bcf065e28122588a6cbee08cf847826dacbb438 Mon Sep 17 00:00:00 2001
+From: Qing Xia <saberlily.xia@hisilicon.com>
+Date: Fri, 1 Feb 2019 14:59:46 +0800
+Subject: staging: android: ion: fix sys heap pool's gfp_flags
+
+From: Qing Xia <saberlily.xia@hisilicon.com>
+
+commit 9bcf065e28122588a6cbee08cf847826dacbb438 upstream.
+
+In the first loop, gfp_flags will be modified to high_order_gfp_flags,
+and there will be no chance to change back to low_order_gfp_flags.
+
+Fixes: e7f63771b60e ("ION: Sys_heap: Add cached pool to spead up cached buffer alloc")
+Signed-off-by: Qing Xia <saberlily.xia@hisilicon.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Jing Xia <jing.xia@unisoc.com>
+Reviewed-by: Yuming Han <yuming.han@unisoc.com>
+Reviewed-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
+Reviewed-by: Orson Zhai <orson.zhai@unisoc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ion/ion_system_heap.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -224,10 +224,10 @@ static void ion_system_heap_destroy_pool
+ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
+ {
+       int i;
+-      gfp_t gfp_flags = low_order_gfp_flags;
+       for (i = 0; i < NUM_ORDERS; i++) {
+               struct ion_page_pool *pool;
++              gfp_t gfp_flags = low_order_gfp_flags;
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
diff --git a/queue-4.20/staging-comedi-ni_660x-fix-missing-break-in-switch-statement.patch b/queue-4.20/staging-comedi-ni_660x-fix-missing-break-in-switch-statement.patch
new file mode 100644 (file)
index 0000000..adce49b
--- /dev/null
@@ -0,0 +1,35 @@
+From 479826cc86118e0d87e5cefb3df5b748e0480924 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Tue, 12 Feb 2019 12:44:50 -0600
+Subject: staging: comedi: ni_660x: fix missing break in switch statement
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit 479826cc86118e0d87e5cefb3df5b748e0480924 upstream.
+
+Add missing break statement in order to prevent the code from falling
+through to the default case and return -EINVAL every time.
+
+This bug was found thanks to the ongoing efforts to enable
+-Wimplicit-fallthrough.
+
+Fixes: aa94f2888825 ("staging: comedi: ni_660x: tidy up ni_660x_set_pfi_routing()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Reviewed-by: Ian Abbott <abbotti@mev.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/comedi/drivers/ni_660x.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/comedi/drivers/ni_660x.c
++++ b/drivers/staging/comedi/drivers/ni_660x.c
+@@ -656,6 +656,7 @@ static int ni_660x_set_pfi_routing(struc
+       case NI_660X_PFI_OUTPUT_DIO:
+               if (chan > 31)
+                       return -EINVAL;
++              break;
+       default:
+               return -EINVAL;
+       }
diff --git a/queue-4.20/staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch b/queue-4.20/staging-erofs-compressed_pages-should-not-be-accessed-again-after-freed.patch
new file mode 100644 (file)
index 0000000..405a05b
--- /dev/null
@@ -0,0 +1,176 @@
+From af692e117cb8cd9d3d844d413095775abc1217f9 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 27 Feb 2019 13:33:30 +0800
+Subject: staging: erofs: compressed_pages should not be accessed again after freed
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit af692e117cb8cd9d3d844d413095775abc1217f9 upstream.
+
+This patch resolves the following page use-after-free issue,
+z_erofs_vle_unzip:
+    ...
+    for (i = 0; i < nr_pages; ++i) {
+        ...
+        z_erofs_onlinepage_endio(page);  (1)
+    }
+
+    for (i = 0; i < clusterpages; ++i) {
+        page = compressed_pages[i];
+
+        if (page->mapping == mngda)      (2)
+            continue;
+        /* recycle all individual staging pages */
+        (void)z_erofs_gather_if_stagingpage(page_pool, page); (3)
+        WRITE_ONCE(compressed_pages[i], NULL);
+    }
+    ...
+
+After (1) is executed, page is freed and could be then reused, if
+compressed_pages is scanned after that, it could fall info (2) or
+(3) by mistake and that could finally be in a mess.
+
+This patch aims to solve the above issue only with little changes
+as much as possible in order to make the fix backport easier.
+
+Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/staging/erofs/unzip_vle.c     |   38 +++++++++++++++++-----------------
+ drivers/staging/erofs/unzip_vle.h     |    3 --
+ drivers/staging/erofs/unzip_vle_lz4.c |   19 +++++++----------
+ 3 files changed, 29 insertions(+), 31 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -929,11 +929,10 @@ repeat:
+       if (llen > grp->llen)
+               llen = grp->llen;
+-      err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
+-              clusterpages, pages, llen, work->pageofs,
+-              z_erofs_onlinepage_endio);
++      err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
++                                          pages, llen, work->pageofs);
+       if (err != -ENOTSUPP)
+-              goto out_percpu;
++              goto out;
+       if (sparsemem_pages >= nr_pages)
+               goto skip_allocpage;
+@@ -954,8 +953,25 @@ skip_allocpage:
+       erofs_vunmap(vout, nr_pages);
+ out:
++      /* must handle all compressed pages before endding pages */
++      for (i = 0; i < clusterpages; ++i) {
++              page = compressed_pages[i];
++
++#ifdef EROFS_FS_HAS_MANAGED_CACHE
++              if (page->mapping == mngda)
++                      continue;
++#endif
++              /* recycle all individual staging pages */
++              (void)z_erofs_gather_if_stagingpage(page_pool, page);
++
++              WRITE_ONCE(compressed_pages[i], NULL);
++      }
++
+       for (i = 0; i < nr_pages; ++i) {
+               page = pages[i];
++              if (!page)
++                      continue;
++
+               DBG_BUGON(page->mapping == NULL);
+               /* recycle all individual staging pages */
+@@ -968,20 +984,6 @@ out:
+               z_erofs_onlinepage_endio(page);
+       }
+-out_percpu:
+-      for (i = 0; i < clusterpages; ++i) {
+-              page = compressed_pages[i];
+-
+-#ifdef EROFS_FS_HAS_MANAGED_CACHE
+-              if (page->mapping == mngda)
+-                      continue;
+-#endif
+-              /* recycle all individual staging pages */
+-              (void)z_erofs_gather_if_stagingpage(page_pool, page);
+-
+-              WRITE_ONCE(compressed_pages[i], NULL);
+-      }
+-
+       if (pages == z_pagemap_global)
+               mutex_unlock(&z_pagemap_global_lock);
+       else if (unlikely(pages != pages_onstack))
+--- a/drivers/staging/erofs/unzip_vle.h
++++ b/drivers/staging/erofs/unzip_vle.h
+@@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct
+ extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+       unsigned clusterpages, struct page **pages,
+-      unsigned outlen, unsigned short pageofs,
+-      void (*endio)(struct page *));
++      unsigned int outlen, unsigned short pageofs);
+ extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+       unsigned clusterpages, void *vaddr, unsigned llen,
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -105,8 +105,7 @@ int z_erofs_vle_unzip_fast_percpu(struct
+                                 unsigned int clusterpages,
+                                 struct page **pages,
+                                 unsigned int outlen,
+-                                unsigned short pageofs,
+-                                void (*endio)(struct page *))
++                                unsigned short pageofs)
+ {
+       void *vin, *vout;
+       unsigned int nr_pages, i, j;
+@@ -128,19 +127,16 @@ int z_erofs_vle_unzip_fast_percpu(struct
+       ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+                               clusterpages * PAGE_SIZE, outlen);
+-      if (ret >= 0) {
+-              outlen = ret;
+-              ret = 0;
+-      }
++      if (ret < 0)
++              goto out;
++      ret = 0;
+       for (i = 0; i < nr_pages; ++i) {
+               j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
+               if (pages[i]) {
+-                      if (ret < 0) {
+-                              SetPageError(pages[i]);
+-                      } else if (clusterpages == 1 &&
+-                                 pages[i] == compressed_pages[0]) {
++                      if (clusterpages == 1 &&
++                          pages[i] == compressed_pages[0]) {
+                               memcpy(vin + pageofs, vout + pageofs, j);
+                       } else {
+                               void *dst = kmap_atomic(pages[i]);
+@@ -148,12 +144,13 @@ int z_erofs_vle_unzip_fast_percpu(struct
+                               memcpy(dst + pageofs, vout + pageofs, j);
+                               kunmap_atomic(dst);
+                       }
+-                      endio(pages[i]);
+               }
+               vout += PAGE_SIZE;
+               outlen -= j;
+               pageofs = 0;
+       }
++
++out:
+       preempt_enable();
+       if (clusterpages == 1)
diff --git a/queue-4.20/staging-erofs-fix-fast-symlink-w-o-xattr-when-fs-xattr-is-on.patch b/queue-4.20/staging-erofs-fix-fast-symlink-w-o-xattr-when-fs-xattr-is-on.patch
new file mode 100644 (file)
index 0000000..6eb4d09
--- /dev/null
@@ -0,0 +1,123 @@
+From 7077fffcb0b0b65dc75e341306aeef4d0e7f2ec6 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Mon, 14 Jan 2019 19:40:23 +0800
+Subject: staging: erofs: fix fast symlink w/o xattr when fs xattr is on
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 7077fffcb0b0b65dc75e341306aeef4d0e7f2ec6 upstream.
+
+Currently, this will hit a BUG_ON for these symlinks as follows:
+
+- kernel message
+------------[ cut here ]------------
+kernel BUG at drivers/staging/erofs/xattr.c:59!
+SMP PTI
+CPU: 1 PID: 1170 Comm: getllxattr Not tainted 4.20.0-rc6+ #92
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-2.fc27 04/01/2014
+RIP: 0010:init_inode_xattrs+0x22b/0x270
+Code: 48 0f 45 ea f0 ff 4d 34 74 0d 41 83 4c 24 e0 01 31 c0 e9 00 fe ff ff 48 89 ef e8 e0 31 9e ff eb e9 89 e8 e9 ef fd ff ff 0f 0$
+ <0f> 0b 48 89 ef e8 fb f6 9c ff 48 8b 45 08 a8 01 75 24 f0 ff 4d 34
+RSP: 0018:ffffa03ac026bdf8 EFLAGS: 00010246
+------------[ cut here ]------------
+...
+Call Trace:
+ erofs_listxattr+0x30/0x2c0
+ ? selinux_inode_listxattr+0x5a/0x80
+ ? kmem_cache_alloc+0x33/0x170
+ ? security_inode_listxattr+0x27/0x40
+ listxattr+0xaf/0xc0
+ path_listxattr+0x5a/0xa0
+ do_syscall_64+0x43/0xf0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+...
+---[ end trace 3c24b49408dc0c72 ]---
+
+Fix it by checking ->xattr_isize in init_inode_xattrs(),
+and it also fixes improper return value -ENOTSUPP
+(it should be -ENODATA if xattr is enabled) for those inodes.
+
+Fixes: b17500a0fdba ("staging: erofs: introduce xattr & acl support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Reported-by: Li Guifu <bluce.liguifu@huawei.com>
+Tested-by: Li Guifu <bluce.liguifu@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/inode.c |    8 ++++----
+ drivers/staging/erofs/xattr.c |   25 ++++++++++++++++++++-----
+ 2 files changed, 24 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/erofs/inode.c
++++ b/drivers/staging/erofs/inode.c
+@@ -185,16 +185,16 @@ static int fill_inode(struct inode *inod
+               /* setup the new inode */
+               if (S_ISREG(inode->i_mode)) {
+ #ifdef CONFIG_EROFS_FS_XATTR
+-                      if (vi->xattr_isize)
+-                              inode->i_op = &erofs_generic_xattr_iops;
++                      inode->i_op = &erofs_generic_xattr_iops;
+ #endif
+                       inode->i_fop = &generic_ro_fops;
+               } else if (S_ISDIR(inode->i_mode)) {
+                       inode->i_op =
+ #ifdef CONFIG_EROFS_FS_XATTR
+-                              vi->xattr_isize ? &erofs_dir_xattr_iops :
+-#endif
++                              &erofs_dir_xattr_iops;
++#else
+                               &erofs_dir_iops;
++#endif
+                       inode->i_fop = &erofs_dir_fops;
+               } else if (S_ISLNK(inode->i_mode)) {
+                       /* by default, page_get_link is used for symlink */
+--- a/drivers/staging/erofs/xattr.c
++++ b/drivers/staging/erofs/xattr.c
+@@ -56,7 +56,26 @@ static int init_inode_xattrs(struct inod
+               return 0;
+       vi = EROFS_V(inode);
+-      BUG_ON(!vi->xattr_isize);
++
++      /*
++       * bypass all xattr operations if ->xattr_isize is not greater than
++       * sizeof(struct erofs_xattr_ibody_header), in detail:
++       * 1) it is not enough to contain erofs_xattr_ibody_header then
++       *    ->xattr_isize should be 0 (it means no xattr);
++       * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
++       *    undefined right now (maybe use later with some new sb feature).
++       */
++      if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
++              errln("xattr_isize %d of nid %llu is not supported yet",
++                    vi->xattr_isize, vi->nid);
++              return -ENOTSUPP;
++      } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
++              if (unlikely(vi->xattr_isize)) {
++                      DBG_BUGON(1);
++                      return -EIO;    /* xattr ondisk layout error */
++              }
++              return -ENOATTR;
++      }
+       sb = inode->i_sb;
+       sbi = EROFS_SB(sb);
+@@ -422,7 +441,6 @@ static int erofs_xattr_generic_get(const
+               struct dentry *unused, struct inode *inode,
+               const char *name, void *buffer, size_t size)
+ {
+-      struct erofs_vnode *const vi = EROFS_V(inode);
+       struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+       switch (handler->flags) {
+@@ -440,9 +458,6 @@ static int erofs_xattr_generic_get(const
+               return -EINVAL;
+       }
+-      if (!vi->xattr_isize)
+-              return -ENOATTR;
+-
+       return erofs_getxattr(inode, handler->flags, name, buffer, size);
+ }
diff --git a/queue-4.20/staging-erofs-fix-illegal-address-access-under-memory-pressure.patch b/queue-4.20/staging-erofs-fix-illegal-address-access-under-memory-pressure.patch
new file mode 100644 (file)
index 0000000..68b752e
--- /dev/null
@@ -0,0 +1,86 @@
+From 1e5ceeab6929585512c63d05911d6657064abf7b Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 27 Feb 2019 13:33:31 +0800
+Subject: staging: erofs: fix illegal address access under memory pressure
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 1e5ceeab6929585512c63d05911d6657064abf7b upstream.
+
+Considering a read request with two decompressed file pages,
+If a decompression work cannot be started on the previous page
+due to memory pressure but in-memory LTP map lookup is done,
+builder->work should be still NULL.
+
+Moreover, if the current page also belongs to the same map,
+it won't try to start the decompression work again and then
+run into trouble.
+
+This patch aims to solve the above issue only with little changes
+as much as possible in order to make the fix backport easier.
+
+kernel message is:
+<4>[1051408.015930s]SLUB: Unable to allocate memory on node -1, gfp=0x2408040(GFP_NOFS|__GFP_ZERO)
+<4>[1051408.015930s]  cache: erofs_compress, object size: 144, buffer size: 144, default order: 0, min order: 0
+<4>[1051408.015930s]  node 0: slabs: 98, objs: 2744, free: 0
+  * Cannot allocate the decompression work
+
+<3>[1051408.015960s]erofs: z_erofs_vle_normalaccess_readpages, readahead error at page 1008 of nid 5391488
+  * Note that the previous page was failed to read
+
+<0>[1051408.015960s]Internal error: Accessing user space memory outside uaccess.h routines: 96000005 [#1] PREEMPT SMP
+...
+<4>[1051408.015991s]Hardware name: kirin710 (DT)
+...
+<4>[1051408.016021s]PC is at z_erofs_vle_work_add_page+0xa0/0x17c
+<4>[1051408.016021s]LR is at z_erofs_do_read_page+0x12c/0xcf0
+...
+<4>[1051408.018096s][<ffffff80c6fb0fd4>] z_erofs_vle_work_add_page+0xa0/0x17c
+<4>[1051408.018096s][<ffffff80c6fb3814>] z_erofs_vle_normalaccess_readpages+0x1a0/0x37c
+<4>[1051408.018096s][<ffffff80c6d670b8>] read_pages+0x70/0x190
+<4>[1051408.018127s][<ffffff80c6d6736c>] __do_page_cache_readahead+0x194/0x1a8
+<4>[1051408.018127s][<ffffff80c6d59318>] filemap_fault+0x398/0x684
+<4>[1051408.018127s][<ffffff80c6d8a9e0>] __do_fault+0x8c/0x138
+<4>[1051408.018127s][<ffffff80c6d8f90c>] handle_pte_fault+0x730/0xb7c
+<4>[1051408.018127s][<ffffff80c6d8fe04>] __handle_mm_fault+0xac/0xf4
+<4>[1051408.018157s][<ffffff80c6d8fec8>] handle_mm_fault+0x7c/0x118
+<4>[1051408.018157s][<ffffff80c8c52998>] do_page_fault+0x354/0x474
+<4>[1051408.018157s][<ffffff80c8c52af8>] do_translation_fault+0x40/0x48
+<4>[1051408.018157s][<ffffff80c6c002f4>] do_mem_abort+0x80/0x100
+<4>[1051408.018310s]---[ end trace 9f4009a3283bd78b ]---
+
+Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -626,8 +626,12 @@ repeat:
+       /* lucky, within the range of the current map_blocks */
+       if (offset + cur >= map->m_la &&
+-              offset + cur < map->m_la + map->m_llen)
++              offset + cur < map->m_la + map->m_llen) {
++              /* didn't get a valid unzip work previously (very rare) */
++              if (!builder->work)
++                      goto restart_now;
+               goto hitted;
++      }
+       /* go ahead the next map_blocks */
+       debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
+@@ -641,6 +645,7 @@ repeat:
+       if (unlikely(err))
+               goto err_out;
++restart_now:
+       if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
+               goto hitted;
diff --git a/queue-4.20/staging-erofs-fix-memleak-of-inode-s-shared-xattr-array.patch b/queue-4.20/staging-erofs-fix-memleak-of-inode-s-shared-xattr-array.patch
new file mode 100644 (file)
index 0000000..2244072
--- /dev/null
@@ -0,0 +1,38 @@
+From 3b1b5291f79d040d549d7c746669fc30e8045b9b Mon Sep 17 00:00:00 2001
+From: Sheng Yong <shengyong1@huawei.com>
+Date: Thu, 14 Feb 2019 14:46:36 +0800
+Subject: staging: erofs: fix memleak of inode's shared xattr array
+
+From: Sheng Yong <shengyong1@huawei.com>
+
+commit 3b1b5291f79d040d549d7c746669fc30e8045b9b upstream.
+
+If it fails to read a shared xattr page, the inode's shared xattr array
+is not freed. The next time the inode's xattr is accessed, the previously
+allocated array is leaked.
+
+Signed-off-by: Sheng Yong <shengyong1@huawei.com>
+Fixes: b17500a0fdba ("staging: erofs: introduce xattr & acl support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Reviewed-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/xattr.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/erofs/xattr.c
++++ b/drivers/staging/erofs/xattr.c
+@@ -111,8 +111,11 @@ static int init_inode_xattrs(struct inod
+                       it.page = erofs_get_meta_page(sb,
+                               ++it.blkaddr, S_ISDIR(inode->i_mode));
+-                      if (IS_ERR(it.page))
++                      if (IS_ERR(it.page)) {
++                              kfree(vi->xattr_shared_xattrs);
++                              vi->xattr_shared_xattrs = NULL;
+                               return PTR_ERR(it.page);
++                      }
+                       it.kaddr = kmap_atomic(it.page);
+                       atomic_map = true;
diff --git a/queue-4.20/staging-erofs-fix-race-of-initializing-xattrs-of-a-inode-at-the-same-time.patch b/queue-4.20/staging-erofs-fix-race-of-initializing-xattrs-of-a-inode-at-the-same-time.patch
new file mode 100644 (file)
index 0000000..b4bc78c
--- /dev/null
@@ -0,0 +1,147 @@
+From 62dc45979f3f8cb0ea67302a93bff686f0c46c5a Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Mon, 18 Feb 2019 15:19:04 +0800
+Subject: staging: erofs: fix race of initializing xattrs of a inode at the same time
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 62dc45979f3f8cb0ea67302a93bff686f0c46c5a upstream.
+
+In real scenario, there could be several threads accessing xattrs
+of the same xattr-uninitialized inode, and init_inode_xattrs()
+almost at the same time.
+
+That's actually an unexpected behavior, this patch closes the race.
+
+Fixes: b17500a0fdba ("staging: erofs: introduce xattr & acl support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h |   11 +++++++---
+ drivers/staging/erofs/xattr.c    |   41 +++++++++++++++++++++++++++------------
+ 2 files changed, 37 insertions(+), 15 deletions(-)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -352,12 +352,17 @@ static inline erofs_off_t iloc(struct er
+       return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+ }
+-#define inode_set_inited_xattr(inode)   (EROFS_V(inode)->flags |= 1)
+-#define inode_has_inited_xattr(inode)   (EROFS_V(inode)->flags & 1)
++/* atomic flag definitions */
++#define EROFS_V_EA_INITED_BIT 0
++
++/* bitlock definitions (arranged in reverse order) */
++#define EROFS_V_BL_XATTR_BIT  (BITS_PER_LONG - 1)
+ struct erofs_vnode {
+       erofs_nid_t nid;
+-      unsigned int flags;
++
++      /* atomic flags (including bitlocks) */
++      unsigned long flags;
+       unsigned char data_mapping_mode;
+       /* inline size in bytes */
+--- a/drivers/staging/erofs/xattr.c
++++ b/drivers/staging/erofs/xattr.c
+@@ -44,18 +44,25 @@ static inline void xattr_iter_end_final(
+ static int init_inode_xattrs(struct inode *inode)
+ {
++      struct erofs_vnode *const vi = EROFS_V(inode);
+       struct xattr_iter it;
+       unsigned int i;
+       struct erofs_xattr_ibody_header *ih;
+       struct super_block *sb;
+       struct erofs_sb_info *sbi;
+-      struct erofs_vnode *vi;
+       bool atomic_map;
++      int ret = 0;
+-      if (likely(inode_has_inited_xattr(inode)))
++      /* the most case is that xattrs of this inode are initialized. */
++      if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+               return 0;
+-      vi = EROFS_V(inode);
++      if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
++              return -ERESTARTSYS;
++
++      /* someone has initialized xattrs for us? */
++      if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
++              goto out_unlock;
+       /*
+        * bypass all xattr operations if ->xattr_isize is not greater than
+@@ -68,13 +75,16 @@ static int init_inode_xattrs(struct inod
+       if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+               errln("xattr_isize %d of nid %llu is not supported yet",
+                     vi->xattr_isize, vi->nid);
+-              return -ENOTSUPP;
++              ret = -ENOTSUPP;
++              goto out_unlock;
+       } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+               if (unlikely(vi->xattr_isize)) {
+                       DBG_BUGON(1);
+-                      return -EIO;    /* xattr ondisk layout error */
++                      ret = -EIO;
++                      goto out_unlock;        /* xattr ondisk layout error */
+               }
+-              return -ENOATTR;
++              ret = -ENOATTR;
++              goto out_unlock;
+       }
+       sb = inode->i_sb;
+@@ -83,8 +93,10 @@ static int init_inode_xattrs(struct inod
+       it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
+       it.page = erofs_get_inline_page(inode, it.blkaddr);
+-      if (IS_ERR(it.page))
+-              return PTR_ERR(it.page);
++      if (IS_ERR(it.page)) {
++              ret = PTR_ERR(it.page);
++              goto out_unlock;
++      }
+       /* read in shared xattr array (non-atomic, see kmalloc below) */
+       it.kaddr = kmap(it.page);
+@@ -97,7 +109,8 @@ static int init_inode_xattrs(struct inod
+                                               sizeof(uint), GFP_KERNEL);
+       if (vi->xattr_shared_xattrs == NULL) {
+               xattr_iter_end(&it, atomic_map);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out_unlock;
+       }
+       /* let's skip ibody header */
+@@ -114,7 +127,8 @@ static int init_inode_xattrs(struct inod
+                       if (IS_ERR(it.page)) {
+                               kfree(vi->xattr_shared_xattrs);
+                               vi->xattr_shared_xattrs = NULL;
+-                              return PTR_ERR(it.page);
++                              ret = PTR_ERR(it.page);
++                              goto out_unlock;
+                       }
+                       it.kaddr = kmap_atomic(it.page);
+@@ -127,8 +141,11 @@ static int init_inode_xattrs(struct inod
+       }
+       xattr_iter_end(&it, atomic_map);
+-      inode_set_inited_xattr(inode);
+-      return 0;
++      set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
++
++out_unlock:
++      clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
++      return ret;
+ }
+ /*
diff --git a/queue-4.20/staging-wilc1000-fix-to-set-correct-value-for-vif_num.patch b/queue-4.20/staging-wilc1000-fix-to-set-correct-value-for-vif_num.patch
new file mode 100644 (file)
index 0000000..99dfc1b
--- /dev/null
@@ -0,0 +1,37 @@
+From dda037057a572f5c82ac2499eb4e6fb17600ba3e Mon Sep 17 00:00:00 2001
+From: Ajay Singh <ajay.kathat@microchip.com>
+Date: Thu, 7 Feb 2019 11:28:58 +0000
+Subject: staging: wilc1000: fix to set correct value for 'vif_num'
+
+From: Ajay Singh <ajay.kathat@microchip.com>
+
+commit dda037057a572f5c82ac2499eb4e6fb17600ba3e upstream.
+
+Set correct value in '->vif_num' for the total number of interfaces and
+set '->idx' value using 'i'.
+
+Fixes: 735bb39ca3be ("staging: wilc1000: simplify vif[i]->ndev accesses")
+Fixes: 0e490657c721 ("staging: wilc1000: Fix problem with wrong vif index")
+Cc: <stable@vger.kernel.org>
+Suggested-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/wilc1000/linux_wlan.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -1104,8 +1104,8 @@ int wilc_netdev_init(struct wilc **wilc,
+               vif->wilc = *wilc;
+               vif->ndev = ndev;
+               wl->vif[i] = vif;
+-              wl->vif_num = i;
+-              vif->idx = wl->vif_num;
++              wl->vif_num = i + 1;
++              vif->idx = i;
+               ndev->netdev_ops = &wilc_netdev_ops;