--- /dev/null
+From 09ac6a817bd687e7f5dac00470262efdd72f9319 Mon Sep 17 00:00:00 2001
+From: Curtis Malainey <cujomalainey@chromium.org>
+Date: Thu, 10 Jan 2019 16:21:04 -0800
+Subject: ASoC: soc-core: fix init platform memory handling
+
+From: Curtis Malainey <cujomalainey@chromium.org>
+
+commit 09ac6a817bd687e7f5dac00470262efdd72f9319 upstream.
+
+snd_soc_init_platform initializes pointers to snd_soc_dai_link which is
+statically allocated and it does this by devm_kzalloc. In the event of
+an EPROBE_DEFER the memory will be freed and the pointers are left
+dangling. snd_soc_init_platform sees the dangling pointers and assumes
+they are pointing to initialized memory and does not reallocate them on
+the second probe attempt which results in a use after free bug since
+devm has freed the memory from the first probe attempt.
+
+Since the intention for snd_soc_dai_link->platform is that it can be set
+statically by the machine driver we need to respect the pointer in the
+event we did not set it but still catch dangling pointers. The solution
+is to add a flag to track whether the pointer was dynamically allocated
+or not.
+
+Signed-off-by: Curtis Malainey <cujomalainey@chromium.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/soc.h | 6 ++++++
+ sound/soc/soc-core.c | 11 ++++++-----
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
+ /* Do not create a PCM for this DAI link (Backend link) */
+ unsigned int ignore:1;
+
++ /*
++ * This driver uses legacy platform naming. Set by the core, machine
++ * drivers should not modify this value.
++ */
++ unsigned int legacy_platform:1;
++
+ struct list_head list; /* DAI link list of the soc card */
+ struct snd_soc_dobj dobj; /* For topology */
+ };
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct
+ * this function should be removed in the future
+ */
+ /* convert Legacy platform link */
+- if (!platform) {
++ if (!platform || dai_link->legacy_platform) {
+ platform = devm_kzalloc(card->dev,
+ sizeof(struct snd_soc_dai_link_component),
+ GFP_KERNEL);
+ if (!platform)
+ return -ENOMEM;
+
+- dai_link->platform = platform;
+- platform->name = dai_link->platform_name;
+- platform->of_node = dai_link->platform_of_node;
+- platform->dai_name = NULL;
++ dai_link->platform = platform;
++ dai_link->legacy_platform = 1;
++ platform->name = dai_link->platform_name;
++ platform->of_node = dai_link->platform_of_node;
++ platform->dai_name = NULL;
+ }
+
+ /* if there's no platform we match on the empty platform */
--- /dev/null
+From 9e8db5913264d3967b93c765a6a9e464d9c473db Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Mon, 18 Feb 2019 23:37:12 -0500
+Subject: net: avoid false positives in untrusted gso validation
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit 9e8db5913264d3967b93c765a6a9e464d9c473db upstream.
+
+GSO packets with vnet_hdr must conform to a small set of gso_types.
+The below commit uses flow dissection to drop packets that do not.
+
+But it has false positives when the skb is not fully initialized.
+Dissection needs skb->protocol and skb->network_header.
+
+Infer skb->protocol from gso_type as the two must agree.
+SKB_GSO_UDP can use both ipv4 and ipv6, so try both.
+
+Exclude callers for which network header offset is not known.
+
+Fixes: d5be7f632bad ("net: validate untrusted gso packets without csum offload")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/virtio_net.h | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -61,10 +61,20 @@ static inline int virtio_net_hdr_to_skb(
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
+ * probe and drop if does not match one of the above types.
+ */
+- if (gso_type) {
++ if (gso_type && skb->network_header) {
++ if (!skb->protocol)
++ virtio_net_hdr_set_proto(skb, hdr);
++retry:
+ skb_probe_transport_header(skb, -1);
+- if (!skb_transport_header_was_set(skb))
++ if (!skb_transport_header_was_set(skb)) {
++ /* UFO does not specify ipv4 or 6: try both */
++ if (gso_type & SKB_GSO_UDP &&
++ skb->protocol == htons(ETH_P_IP)) {
++ skb->protocol = htons(ETH_P_IPV6);
++ goto retry;
++ }
+ return -EINVAL;
++ }
+ }
+ }
+
--- /dev/null
+From d5be7f632bad0f489879eed0ff4b99bd7fe0b74c Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 15 Feb 2019 12:15:47 -0500
+Subject: net: validate untrusted gso packets without csum offload
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit d5be7f632bad0f489879eed0ff4b99bd7fe0b74c upstream.
+
+Syzkaller again found a path to a kernel crash through bad gso input.
+By building an excessively large packet to cause an skb field to wrap.
+
+If VIRTIO_NET_HDR_F_NEEDS_CSUM was set this would have been dropped in
+skb_partial_csum_set.
+
+GSO packets that do not set checksum offload are suspicious and rare.
+Most callers of virtio_net_hdr_to_skb already pass them to
+skb_probe_transport_header.
+
+Move that test forward, change it to detect parse failure and drop
+packets on failure as those cleary are not one of the legitimate
+VIRTIO_NET_HDR_GSO types.
+
+Fixes: bfd5f4a3d605 ("packet: Add GSO/csum offload support.")
+Fixes: f43798c27684 ("tun: Allow GSO using virtio_net_hdr")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/skbuff.h | 2 +-
+ include/linux/virtio_net.h | 9 +++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2418,7 +2418,7 @@ static inline void skb_probe_transport_h
+
+ if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
+ skb_set_transport_header(skb, keys.control.thoff);
+- else
++ else if (offset_hint >= 0)
+ skb_set_transport_header(skb, offset_hint);
+ }
+
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(
+
+ if (!skb_partial_csum_set(skb, start, off))
+ return -EINVAL;
++ } else {
++ /* gso packets without NEEDS_CSUM do not set transport_offset.
++ * probe and drop if does not match one of the above types.
++ */
++ if (gso_type) {
++ skb_probe_transport_header(skb, -1);
++ if (!skb_transport_header_was_set(skb))
++ return -EINVAL;
++ }
+ }
+
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
x86-kvm-mmu-fix-switch-between-root-and-guest-mmus.patch
kvm-x86-return-la57-feature-based-on-hardware-capability.patch
s390-vsie-use-effective-crycbd.31-to-check-crycbd-validity.patch
+asoc-soc-core-fix-init-platform-memory-handling.patch
+net-validate-untrusted-gso-packets-without-csum-offload.patch
+net-avoid-false-positives-in-untrusted-gso-validation.patch
+staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch
+staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch
+staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch
+staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch
+staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch
+staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch
+staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch
--- /dev/null
+From 948bbdb1818b7ad6e539dad4fbd2dd4650793ea9 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:03 +0800
+Subject: staging: erofs: add a full barrier in erofs_workgroup_unfreeze
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 948bbdb1818b7ad6e539dad4fbd2dd4650793ea9 upstream.
+
+Just like other generic locks, insert a full barrier
+in case of memory reorder.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -209,6 +209,11 @@ static inline bool erofs_workgroup_try_t
+ static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+ {
++ /*
++ * other observers should notice all modifications
++ * in the freezing period.
++ */
++ smp_mb();
+ atomic_set(&grp->refcount, orig_val);
+ preempt_enable();
+ }
--- /dev/null
+From df134b8d17b90c1e7720e318d36416b57424ff7a Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:01 +0800
+Subject: staging: erofs: atomic_cond_read_relaxed on ref-locked workgroup
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit df134b8d17b90c1e7720e318d36416b57424ff7a upstream.
+
+It's better to use atomic_cond_read_relaxed, which is implemented
+in hardware instructions to monitor a variable changes currently
+for ARM64, instead of open-coded busy waiting.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -221,23 +221,29 @@ static inline void erofs_workgroup_unfre
+ preempt_enable();
+ }
+
++#if defined(CONFIG_SMP)
++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
++{
++ return atomic_cond_read_relaxed(&grp->refcount,
++ VAL != EROFS_LOCKED_MAGIC);
++}
++#else
++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
++{
++ int v = atomic_read(&grp->refcount);
++
++ /* workgroup is never freezed on uniprocessor systems */
++ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
++ return v;
++}
++#endif
++
+ static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
+ {
+- const int locked = (int)EROFS_LOCKED_MAGIC;
+ int o;
+
+ repeat:
+- o = atomic_read(&grp->refcount);
+-
+- /* spin if it is temporarily locked at the reclaim path */
+- if (unlikely(o == locked)) {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+- do
+- cpu_relax();
+- while (atomic_read(&grp->refcount) == locked);
+-#endif
+- goto repeat;
+- }
++ o = erofs_wait_on_workgroup_freezed(grp);
+
+ if (unlikely(o <= 0))
+ return -1;
--- /dev/null
+From 8b987bca2d09649683cbe496419a011df8c08493 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 5 Dec 2018 21:23:13 +0800
+Subject: staging: erofs: {dir,inode,super}.c: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 8b987bca2d09649683cbe496419a011df8c08493 upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/dir.c | 7 +++++--
+ drivers/staging/erofs/inode.c | 10 ++++++++--
+ drivers/staging/erofs/super.c | 13 ++++++-------
+ 3 files changed, 19 insertions(+), 11 deletions(-)
+
+--- a/drivers/staging/erofs/dir.c
++++ b/drivers/staging/erofs/dir.c
+@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct di
+ strnlen(de_name, maxsize - nameoff) :
+ le16_to_cpu(de[1].nameoff) - nameoff;
+
+- /* the corrupted directory found */
+- BUG_ON(de_namelen < 0);
++ /* a corrupted entry is found */
++ if (unlikely(de_namelen < 0)) {
++ DBG_BUGON(1);
++ return -EIO;
++ }
+
+ #ifdef CONFIG_EROFS_FS_DEBUG
+ dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
+--- a/drivers/staging/erofs/inode.c
++++ b/drivers/staging/erofs/inode.c
+@@ -133,7 +133,13 @@ static int fill_inline_data(struct inode
+ return -ENOMEM;
+
+ m_pofs += vi->inode_isize + vi->xattr_isize;
+- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
++
++ /* inline symlink data shouldn't across page boundary as well */
++ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
++ DBG_BUGON(1);
++ kfree(lnk);
++ return -EIO;
++ }
+
+ /* get in-page inline data */
+ memcpy(lnk, data + m_pofs, inode->i_size);
+@@ -171,7 +177,7 @@ static int fill_inode(struct inode *inod
+ return PTR_ERR(page);
+ }
+
+- BUG_ON(!PageUptodate(page));
++ DBG_BUGON(!PageUptodate(page));
+ data = page_address(page);
+
+ err = read_inode(inode, data + ofs);
+--- a/drivers/staging/erofs/super.c
++++ b/drivers/staging/erofs/super.c
+@@ -40,7 +40,6 @@ static int __init erofs_init_inode_cache
+
+ static void erofs_exit_inode_cache(void)
+ {
+- BUG_ON(erofs_inode_cachep == NULL);
+ kmem_cache_destroy(erofs_inode_cachep);
+ }
+
+@@ -303,8 +302,8 @@ static int managed_cache_releasepage(str
+ int ret = 1; /* 0 - busy */
+ struct address_space *const mapping = page->mapping;
+
+- BUG_ON(!PageLocked(page));
+- BUG_ON(mapping->a_ops != &managed_cache_aops);
++ DBG_BUGON(!PageLocked(page));
++ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
+
+ if (PagePrivate(page))
+ ret = erofs_try_to_free_cached_page(mapping, page);
+@@ -317,10 +316,10 @@ static void managed_cache_invalidatepage
+ {
+ const unsigned int stop = length + offset;
+
+- BUG_ON(!PageLocked(page));
++ DBG_BUGON(!PageLocked(page));
+
+- /* Check for overflow */
+- BUG_ON(stop > PAGE_SIZE || stop < length);
++ /* Check for potential overflow in debug mode */
++ DBG_BUGON(stop > PAGE_SIZE || stop < length);
+
+ if (offset == 0 && stop == PAGE_SIZE)
+ while (!managed_cache_releasepage(page, GFP_NOFS))
+@@ -649,7 +648,7 @@ static int erofs_remount(struct super_bl
+ unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
+ int err;
+
+- BUG_ON(!sb_rdonly(sb));
++ DBG_BUGON(!sb_rdonly(sb));
+ err = parse_options(sb, data);
+ if (err)
+ goto out;
--- /dev/null
+From 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:02 +0800
+Subject: staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad upstream.
+
+There are two minor issues in the current freeze interface:
+
+ 1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK,
+ therefore fix the incorrect conditions;
+
+ 2) For SMP platforms, it should also disable preemption before
+ doing atomic_cmpxchg in case that some high priority tasks
+ preempt between atomic_cmpxchg and disable_preempt, then spin
+ on the locked refcount later.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h | 41 +++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -194,40 +194,49 @@ struct erofs_workgroup {
+
+ #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
+
+-static inline bool erofs_workgroup_try_to_freeze(
+- struct erofs_workgroup *grp, int v)
++#if defined(CONFIG_SMP)
++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
++ int val)
+ {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+- if (v != atomic_cmpxchg(&grp->refcount,
+- v, EROFS_LOCKED_MAGIC))
+- return false;
+ preempt_disable();
+-#else
+- preempt_disable();
+- if (atomic_read(&grp->refcount) != v) {
++ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
+ preempt_enable();
+ return false;
+ }
+-#endif
+ return true;
+ }
+
+-static inline void erofs_workgroup_unfreeze(
+- struct erofs_workgroup *grp, int v)
++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
++ int orig_val)
+ {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+- atomic_set(&grp->refcount, v);
+-#endif
++ atomic_set(&grp->refcount, orig_val);
+ preempt_enable();
+ }
+
+-#if defined(CONFIG_SMP)
+ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+ {
+ return atomic_cond_read_relaxed(&grp->refcount,
+ VAL != EROFS_LOCKED_MAGIC);
+ }
+ #else
++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
++ int val)
++{
++ preempt_disable();
++ /* no need to spin on UP platforms, let's just disable preemption. */
++ if (val != atomic_read(&grp->refcount)) {
++ preempt_enable();
++ return false;
++ }
++ return true;
++}
++
++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
++ int orig_val)
++{
++ preempt_enable();
++}
++
+ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+ {
+ int v = atomic_read(&grp->refcount);
--- /dev/null
+From e9c892465583c8f42d61fafe30970d36580925df Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Sat, 3 Nov 2018 17:23:56 +0800
+Subject: staging: erofs: remove the redundant d_rehash() for the root dentry
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit e9c892465583c8f42d61fafe30970d36580925df upstream.
+
+There is actually no need at all to d_rehash() for the root dentry
+as Al pointed out, fix it.
+
+Reported-by: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/super.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/drivers/staging/erofs/super.c
++++ b/drivers/staging/erofs/super.c
+@@ -442,12 +442,6 @@ static int erofs_read_super(struct super
+
+ erofs_register_super(sb);
+
+- /*
+- * We already have a positive dentry, which was instantiated
+- * by d_make_root. Just need to d_rehash it.
+- */
+- d_rehash(sb->s_root);
+-
+ if (!silent)
+ infoln("mounted on %s with opts: %s.", dev_name,
+ (char *)data);
--- /dev/null
+From 70b17991d89554cdd16f3e4fb0179bcc03c808d9 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 11 Dec 2018 15:17:49 +0800
+Subject: staging: erofs: unzip_{pagevec.h,vle.c}: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 70b17991d89554cdd16f3e4fb0179bcc03c808d9 upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/staging/erofs/unzip_pagevec.h | 2 -
+ drivers/staging/erofs/unzip_vle.c | 35 +++++++++++++---------------------
+ 2 files changed, 15 insertions(+), 22 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_pagevec.h
++++ b/drivers/staging/erofs/unzip_pagevec.h
+@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_er
+ erofs_vtptr_t t;
+
+ if (unlikely(ctor->index >= ctor->nr)) {
+- BUG_ON(ctor->next == NULL);
++ DBG_BUGON(!ctor->next);
+ z_erofs_pagevec_ctor_pagedown(ctor, true);
+ }
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -20,9 +20,6 @@ static struct kmem_cache *z_erofs_workgr
+
+ void z_erofs_exit_zip_subsystem(void)
+ {
+- BUG_ON(z_erofs_workqueue == NULL);
+- BUG_ON(z_erofs_workgroup_cachep == NULL);
+-
+ destroy_workqueue(z_erofs_workqueue);
+ kmem_cache_destroy(z_erofs_workgroup_cachep);
+ }
+@@ -366,7 +363,10 @@ z_erofs_vle_work_register(const struct z
+ struct z_erofs_vle_work *work;
+
+ /* if multiref is disabled, grp should never be nullptr */
+- BUG_ON(grp != NULL);
++ if (unlikely(grp)) {
++ DBG_BUGON(1);
++ return ERR_PTR(-EINVAL);
++ }
+
+ /* no available workgroup, let's allocate one */
+ grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
+@@ -745,7 +745,7 @@ static inline void z_erofs_vle_read_endi
+ bool cachemngd = false;
+
+ DBG_BUGON(PageUptodate(page));
+- BUG_ON(page->mapping == NULL);
++ DBG_BUGON(!page->mapping);
+
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
+@@ -803,7 +803,7 @@ static int z_erofs_vle_unzip(struct supe
+
+ might_sleep();
+ work = z_erofs_vle_grab_primary_work(grp);
+- BUG_ON(!READ_ONCE(work->nr_pages));
++ DBG_BUGON(!READ_ONCE(work->nr_pages));
+
+ mutex_lock(&work->lock);
+ nr_pages = work->nr_pages;
+@@ -852,8 +852,8 @@ repeat:
+ else
+ pagenr = z_erofs_onlinepage_index(page);
+
+- BUG_ON(pagenr >= nr_pages);
+- BUG_ON(pages[pagenr] != NULL);
++ DBG_BUGON(pagenr >= nr_pages);
++ DBG_BUGON(pages[pagenr]);
+
+ pages[pagenr] = page;
+ }
+@@ -876,9 +876,8 @@ repeat:
+ if (z_erofs_is_stagingpage(page))
+ continue;
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+- else if (page->mapping == mngda) {
+- BUG_ON(PageLocked(page));
+- BUG_ON(!PageUptodate(page));
++ if (page->mapping == mngda) {
++ DBG_BUGON(!PageUptodate(page));
+ continue;
+ }
+ #endif
+@@ -886,8 +885,8 @@ repeat:
+ /* only non-head page could be reused as a compressed page */
+ pagenr = z_erofs_onlinepage_index(page);
+
+- BUG_ON(pagenr >= nr_pages);
+- BUG_ON(pages[pagenr] != NULL);
++ DBG_BUGON(pagenr >= nr_pages);
++ DBG_BUGON(pages[pagenr]);
+ ++sparsemem_pages;
+ pages[pagenr] = page;
+
+@@ -897,9 +896,6 @@ repeat:
+ llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
+
+ if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
+- /* FIXME! this should be fixed in the future */
+- BUG_ON(grp->llen != llen);
+-
+ err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
+ pages, nr_pages, work->pageofs);
+ goto out;
+@@ -914,10 +910,8 @@ repeat:
+ if (err != -ENOTSUPP)
+ goto out_percpu;
+
+- if (sparsemem_pages >= nr_pages) {
+- BUG_ON(sparsemem_pages > nr_pages);
++ if (sparsemem_pages >= nr_pages)
+ goto skip_allocpage;
+- }
+
+ for (i = 0; i < nr_pages; ++i) {
+ if (pages[i] != NULL)
+@@ -1010,7 +1004,7 @@ static void z_erofs_vle_unzip_wq(struct
+ struct z_erofs_vle_unzip_io_sb, io.u.work);
+ LIST_HEAD(page_pool);
+
+- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
++ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+ z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
+
+ put_pages_list(&page_pool);
+@@ -1344,7 +1338,6 @@ static int z_erofs_vle_normalaccess_read
+ continue;
+ }
+
+- BUG_ON(PagePrivate(page));
+ set_page_private(page, (unsigned long)head);
+ head = page;
+ }
--- /dev/null
+From b8e076a6ef253e763bfdb81e5c72bcc828b0fbeb Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 11 Dec 2018 15:17:50 +0800
+Subject: staging: erofs: unzip_vle_lz4.c,utils.c: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit b8e076a6ef253e763bfdb81e5c72bcc828b0fbeb upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle_lz4.c | 2 +-
+ drivers/staging/erofs/utils.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page *
+ if (compressed_pages[j] != page)
+ continue;
+
+- BUG_ON(mirrored[j]);
++ DBG_BUGON(mirrored[j]);
+ memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
+ mirrored[j] = true;
+ break;
+--- a/drivers/staging/erofs/utils.c
++++ b/drivers/staging/erofs/utils.c
+@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list
+ list_del(&page->lru);
+ } else {
+ page = alloc_pages(gfp | __GFP_NOFAIL, 0);
+-
+- BUG_ON(page == NULL);
+- BUG_ON(page->mapping != NULL);
+ }
+ return page;
+ }
+@@ -58,7 +55,7 @@ repeat:
+ /* decrease refcount added by erofs_workgroup_put */
+ if (unlikely(oldcount == 1))
+ atomic_long_dec(&erofs_global_shrink_cnt);
+- BUG_ON(index != grp->index);
++ DBG_BUGON(index != grp->index);
+ }
+ rcu_read_unlock();
+ return grp;
+@@ -71,8 +68,11 @@ int erofs_register_workgroup(struct supe
+ struct erofs_sb_info *sbi;
+ int err;
+
+- /* grp->refcount should not < 1 */
+- BUG_ON(!atomic_read(&grp->refcount));
++ /* grp shouldn't be broken or used before */
++ if (unlikely(atomic_read(&grp->refcount) != 1)) {
++ DBG_BUGON(1);
++ return -EINVAL;
++ }
+
+ err = radix_tree_preload(GFP_NOFS);
+ if (err)