]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Feb 2019 15:38:26 +0000 (16:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Feb 2019 15:38:26 +0000 (16:38 +0100)
added patches:
net-avoid-false-positives-in-untrusted-gso-validation.patch
net-validate-untrusted-gso-packets-without-csum-offload.patch
netfilter-ipt_clusterip-fix-sleep-in-atomic-bug-in-clusterip_config_entry_put.patch
netfilter-ipv6-don-t-preserve-original-oif-for-loopback-address.patch
netfilter-nf_tables-fix-flush-after-rule-deletion-in-the-same-batch.patch
netfilter-nfnetlink_osf-add-missing-fmatch-check.patch
netfilter-nft_compat-use-after-free-when-deleting-targets.patch
revert-bridge-do-not-add-port-to-router-list-when-receives-query-with-source-0.0.0.0.patch
staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch
staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch
staging-erofs-complete-error-handing-of-z_erofs_do_read_page.patch
staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch
staging-erofs-drop-multiref-support-temporarily.patch
staging-erofs-fix-a-bug-when-appling-cache-strategy.patch
staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch
staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch
staging-erofs-replace-bug_on-with-dbg_bugon-in-data.c.patch
staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch
staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch

20 files changed:
queue-4.19/net-avoid-false-positives-in-untrusted-gso-validation.patch [new file with mode: 0644]
queue-4.19/net-validate-untrusted-gso-packets-without-csum-offload.patch [new file with mode: 0644]
queue-4.19/netfilter-ipt_clusterip-fix-sleep-in-atomic-bug-in-clusterip_config_entry_put.patch [new file with mode: 0644]
queue-4.19/netfilter-ipv6-don-t-preserve-original-oif-for-loopback-address.patch [new file with mode: 0644]
queue-4.19/netfilter-nf_tables-fix-flush-after-rule-deletion-in-the-same-batch.patch [new file with mode: 0644]
queue-4.19/netfilter-nfnetlink_osf-add-missing-fmatch-check.patch [new file with mode: 0644]
queue-4.19/netfilter-nft_compat-use-after-free-when-deleting-targets.patch [new file with mode: 0644]
queue-4.19/revert-bridge-do-not-add-port-to-router-list-when-receives-query-with-source-0.0.0.0.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch [new file with mode: 0644]
queue-4.19/staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch [new file with mode: 0644]
queue-4.19/staging-erofs-complete-error-handing-of-z_erofs_do_read_page.patch [new file with mode: 0644]
queue-4.19/staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch [new file with mode: 0644]
queue-4.19/staging-erofs-drop-multiref-support-temporarily.patch [new file with mode: 0644]
queue-4.19/staging-erofs-fix-a-bug-when-appling-cache-strategy.patch [new file with mode: 0644]
queue-4.19/staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch [new file with mode: 0644]
queue-4.19/staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch [new file with mode: 0644]
queue-4.19/staging-erofs-replace-bug_on-with-dbg_bugon-in-data.c.patch [new file with mode: 0644]
queue-4.19/staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch [new file with mode: 0644]
queue-4.19/staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch [new file with mode: 0644]

diff --git a/queue-4.19/net-avoid-false-positives-in-untrusted-gso-validation.patch b/queue-4.19/net-avoid-false-positives-in-untrusted-gso-validation.patch
new file mode 100644 (file)
index 0000000..5471689
--- /dev/null
@@ -0,0 +1,54 @@
+From 9e8db5913264d3967b93c765a6a9e464d9c473db Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Mon, 18 Feb 2019 23:37:12 -0500
+Subject: net: avoid false positives in untrusted gso validation
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit 9e8db5913264d3967b93c765a6a9e464d9c473db upstream.
+
+GSO packets with vnet_hdr must conform to a small set of gso_types.
+The below commit uses flow dissection to drop packets that do not.
+
+But it has false positives when the skb is not fully initialized.
+Dissection needs skb->protocol and skb->network_header.
+
+Infer skb->protocol from gso_type as the two must agree.
+SKB_GSO_UDP can use both ipv4 and ipv6, so try both.
+
+Exclude callers for which network header offset is not known.
+
+Fixes: d5be7f632bad ("net: validate untrusted gso packets without csum offload")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/virtio_net.h |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -61,10 +61,20 @@ static inline int virtio_net_hdr_to_skb(
+               /* gso packets without NEEDS_CSUM do not set transport_offset.
+                * probe and drop if does not match one of the above types.
+                */
+-              if (gso_type) {
++              if (gso_type && skb->network_header) {
++                      if (!skb->protocol)
++                              virtio_net_hdr_set_proto(skb, hdr);
++retry:
+                       skb_probe_transport_header(skb, -1);
+-                      if (!skb_transport_header_was_set(skb))
++                      if (!skb_transport_header_was_set(skb)) {
++                              /* UFO does not specify ipv4 or 6: try both */
++                              if (gso_type & SKB_GSO_UDP &&
++                                  skb->protocol == htons(ETH_P_IP)) {
++                                      skb->protocol = htons(ETH_P_IPV6);
++                                      goto retry;
++                              }
+                               return -EINVAL;
++                      }
+               }
+       }
diff --git a/queue-4.19/net-validate-untrusted-gso-packets-without-csum-offload.patch b/queue-4.19/net-validate-untrusted-gso-packets-without-csum-offload.patch
new file mode 100644 (file)
index 0000000..51caafe
--- /dev/null
@@ -0,0 +1,65 @@
+From d5be7f632bad0f489879eed0ff4b99bd7fe0b74c Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 15 Feb 2019 12:15:47 -0500
+Subject: net: validate untrusted gso packets without csum offload
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit d5be7f632bad0f489879eed0ff4b99bd7fe0b74c upstream.
+
+Syzkaller again found a path to a kernel crash through bad gso input.
+By building an excessively large packet to cause an skb field to wrap.
+
+If VIRTIO_NET_HDR_F_NEEDS_CSUM was set this would have been dropped in
+skb_partial_csum_set.
+
+GSO packets that do not set checksum offload are suspicious and rare.
+Most callers of virtio_net_hdr_to_skb already pass them to
+skb_probe_transport_header.
+
+Move that test forward, change it to detect parse failure and drop
+packets on failure as those cleary are not one of the legitimate
+VIRTIO_NET_HDR_GSO types.
+
+Fixes: bfd5f4a3d605 ("packet: Add GSO/csum offload support.")
+Fixes: f43798c27684 ("tun: Allow GSO using virtio_net_hdr")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/skbuff.h     |    2 +-
+ include/linux/virtio_net.h |    9 +++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2392,7 +2392,7 @@ static inline void skb_probe_transport_h
+       if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
+               skb_set_transport_header(skb, keys.control.thoff);
+-      else
++      else if (offset_hint >= 0)
+               skb_set_transport_header(skb, offset_hint);
+ }
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(
+               if (!skb_partial_csum_set(skb, start, off))
+                       return -EINVAL;
++      } else {
++              /* gso packets without NEEDS_CSUM do not set transport_offset.
++               * probe and drop if does not match one of the above types.
++               */
++              if (gso_type) {
++                      skb_probe_transport_header(skb, -1);
++                      if (!skb_transport_header_was_set(skb))
++                              return -EINVAL;
++              }
+       }
+       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/queue-4.19/netfilter-ipt_clusterip-fix-sleep-in-atomic-bug-in-clusterip_config_entry_put.patch b/queue-4.19/netfilter-ipt_clusterip-fix-sleep-in-atomic-bug-in-clusterip_config_entry_put.patch
new file mode 100644 (file)
index 0000000..cfab3c8
--- /dev/null
@@ -0,0 +1,137 @@
+From 2a61d8b883bbad26b06d2e6cc3777a697e78830d Mon Sep 17 00:00:00 2001
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 5 Nov 2018 18:23:13 +0900
+Subject: netfilter: ipt_CLUSTERIP: fix sleep-in-atomic bug in clusterip_config_entry_put()
+
+From: Taehee Yoo <ap420073@gmail.com>
+
+commit 2a61d8b883bbad26b06d2e6cc3777a697e78830d upstream.
+
+A proc_remove() can sleep. so that it can't be inside of spin_lock.
+Hence proc_remove() is moved to outside of spin_lock. and it also
+adds mutex to sync create and remove of proc entry(config->pde).
+
+test commands:
+SHELL#1
+   %while :; do iptables -A INPUT -p udp -i enp2s0 -d 192.168.1.100 \
+          --dport 9000  -j CLUSTERIP --new --hashmode sourceip \
+          --clustermac 01:00:5e:00:00:21 --total-nodes 3 --local-node 3; \
+          iptables -F; done
+
+SHELL#2
+   %while :; do echo +1 > /proc/net/ipt_CLUSTERIP/192.168.1.100; \
+          echo -1 > /proc/net/ipt_CLUSTERIP/192.168.1.100; done
+
+[ 2949.569864] BUG: sleeping function called from invalid context at kernel/sched/completion.c:99
+[ 2949.579944] in_atomic(): 1, irqs_disabled(): 0, pid: 5472, name: iptables
+[ 2949.587920] 1 lock held by iptables/5472:
+[ 2949.592711]  #0: 000000008f0ebcf2 (&(&cn->lock)->rlock){+...}, at: refcount_dec_and_lock+0x24/0x50
+[ 2949.603307] CPU: 1 PID: 5472 Comm: iptables Tainted: G        W         4.19.0-rc5+ #16
+[ 2949.604212] Hardware name: To be filled by O.E.M. To be filled by O.E.M./Aptio CRB, BIOS 5.6.5 07/08/2015
+[ 2949.604212] Call Trace:
+[ 2949.604212]  dump_stack+0xc9/0x16b
+[ 2949.604212]  ? show_regs_print_info+0x5/0x5
+[ 2949.604212]  ___might_sleep+0x2eb/0x420
+[ 2949.604212]  ? set_rq_offline.part.87+0x140/0x140
+[ 2949.604212]  ? _rcu_barrier_trace+0x400/0x400
+[ 2949.604212]  wait_for_completion+0x94/0x710
+[ 2949.604212]  ? wait_for_completion_interruptible+0x780/0x780
+[ 2949.604212]  ? __kernel_text_address+0xe/0x30
+[ 2949.604212]  ? __lockdep_init_map+0x10e/0x5c0
+[ 2949.604212]  ? __lockdep_init_map+0x10e/0x5c0
+[ 2949.604212]  ? __init_waitqueue_head+0x86/0x130
+[ 2949.604212]  ? init_wait_entry+0x1a0/0x1a0
+[ 2949.604212]  proc_entry_rundown+0x208/0x270
+[ 2949.604212]  ? proc_reg_get_unmapped_area+0x370/0x370
+[ 2949.604212]  ? __lock_acquire+0x4500/0x4500
+[ 2949.604212]  ? complete+0x18/0x70
+[ 2949.604212]  remove_proc_subtree+0x143/0x2a0
+[ 2949.708655]  ? remove_proc_entry+0x390/0x390
+[ 2949.708655]  clusterip_tg_destroy+0x27a/0x630 [ipt_CLUSTERIP]
+[ ... ]
+
+Fixes: b3e456fce9f5 ("netfilter: ipt_CLUSTERIP: fix a race condition of proc file creation")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/netfilter/ipt_CLUSTERIP.c |   19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+@@ -56,7 +56,7 @@ struct clusterip_config {
+ #endif
+       enum clusterip_hashmode hash_mode;      /* which hashing mode */
+       u_int32_t hash_initval;                 /* hash initialization */
+-      struct rcu_head rcu;
++      struct rcu_head rcu;                    /* for call_rcu_bh */
+       struct net *net;                        /* netns for pernet list */
+       char ifname[IFNAMSIZ];                  /* device ifname */
+ };
+@@ -72,6 +72,8 @@ struct clusterip_net {
+ #ifdef CONFIG_PROC_FS
+       struct proc_dir_entry *procdir;
++      /* mutex protects the config->pde*/
++      struct mutex mutex;
+ #endif
+ };
+@@ -118,17 +120,18 @@ clusterip_config_entry_put(struct cluste
+       local_bh_disable();
+       if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
++              list_del_rcu(&c->list);
++              spin_unlock(&cn->lock);
++              local_bh_enable();
+               /* In case anyone still accesses the file, the open/close
+                * functions are also incrementing the refcount on their own,
+                * so it's safe to remove the entry even if it's in use. */
+ #ifdef CONFIG_PROC_FS
++              mutex_lock(&cn->mutex);
+               if (cn->procdir)
+                       proc_remove(c->pde);
++              mutex_unlock(&cn->mutex);
+ #endif
+-              list_del_rcu(&c->list);
+-              spin_unlock(&cn->lock);
+-              local_bh_enable();
+-
+               return;
+       }
+       local_bh_enable();
+@@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, c
+               /* create proc dir entry */
+               sprintf(buffer, "%pI4", &ip);
++              mutex_lock(&cn->mutex);
+               c->pde = proc_create_data(buffer, 0600,
+                                         cn->procdir,
+                                         &clusterip_proc_fops, c);
++              mutex_unlock(&cn->mutex);
+               if (!c->pde) {
+                       err = -ENOMEM;
+                       goto err;
+@@ -833,6 +838,7 @@ static int clusterip_net_init(struct net
+               pr_err("Unable to proc dir entry\n");
+               return -ENOMEM;
+       }
++      mutex_init(&cn->mutex);
+ #endif /* CONFIG_PROC_FS */
+       return 0;
+@@ -841,9 +847,12 @@ static int clusterip_net_init(struct net
+ static void clusterip_net_exit(struct net *net)
+ {
+       struct clusterip_net *cn = clusterip_pernet(net);
++
+ #ifdef CONFIG_PROC_FS
++      mutex_lock(&cn->mutex);
+       proc_remove(cn->procdir);
+       cn->procdir = NULL;
++      mutex_unlock(&cn->mutex);
+ #endif
+       nf_unregister_net_hook(net, &cip_arp_ops);
+ }
diff --git a/queue-4.19/netfilter-ipv6-don-t-preserve-original-oif-for-loopback-address.patch b/queue-4.19/netfilter-ipv6-don-t-preserve-original-oif-for-loopback-address.patch
new file mode 100644 (file)
index 0000000..7a47eb3
--- /dev/null
@@ -0,0 +1,45 @@
+From 15df03c661cb362366ecfc3a21820cb934f3e4ca Mon Sep 17 00:00:00 2001
+From: Eli Cooper <elicooper@gmx.com>
+Date: Mon, 21 Jan 2019 18:45:27 +0800
+Subject: netfilter: ipv6: Don't preserve original oif for loopback address
+
+From: Eli Cooper <elicooper@gmx.com>
+
+commit 15df03c661cb362366ecfc3a21820cb934f3e4ca upstream.
+
+Commit 508b09046c0f ("netfilter: ipv6: Preserve link scope traffic
+original oif") made ip6_route_me_harder() keep the original oif for
+link-local and multicast packets. However, it also affected packets
+for the loopback address because it used rt6_need_strict().
+
+REDIRECT rules in the OUTPUT chain rewrite the destination to loopback
+address; thus its oif should not be preserved. This commit fixes the bug
+that redirected local packets are being dropped. Actually the packet was
+not exactly dropped; Instead it was sent out to the original oif rather
+than lo. When a packet with daddr ::1 is sent to the router, it is
+effectively dropped.
+
+Fixes: 508b09046c0f ("netfilter: ipv6: Preserve link scope traffic original oif")
+Signed-off-by: Eli Cooper <elicooper@gmx.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv6/netfilter.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net,
+       struct sock *sk = sk_to_full_sk(skb->sk);
+       unsigned int hh_len;
+       struct dst_entry *dst;
++      int strict = (ipv6_addr_type(&iph->daddr) &
++                    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+       struct flowi6 fl6 = {
+               .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
+-                      rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
++                      strict ? skb_dst(skb)->dev->ifindex : 0,
+               .flowi6_mark = skb->mark,
+               .flowi6_uid = sock_net_uid(net, sk),
+               .daddr = iph->daddr,
diff --git a/queue-4.19/netfilter-nf_tables-fix-flush-after-rule-deletion-in-the-same-batch.patch b/queue-4.19/netfilter-nf_tables-fix-flush-after-rule-deletion-in-the-same-batch.patch
new file mode 100644 (file)
index 0000000..57f20ea
--- /dev/null
@@ -0,0 +1,35 @@
+From 23b7ca4f745f21c2b9cfcb67fdd33733b3ae7e66 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 15 Feb 2019 12:50:24 +0100
+Subject: netfilter: nf_tables: fix flush after rule deletion in the same batch
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 23b7ca4f745f21c2b9cfcb67fdd33733b3ae7e66 upstream.
+
+Flush after rule deletion bogusly hits -ENOENT. Skip rules that have
+been already from nft_delrule_by_chain() which is always called from the
+flush path.
+
+Fixes: cf9dc09d0949 ("netfilter: nf_tables: fix missing rules flushing per table")
+Reported-by: Phil Sutter <phil@nwl.cc>
+Acked-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nf_tables_api.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -291,6 +291,9 @@ static int nft_delrule_by_chain(struct n
+       int err;
+       list_for_each_entry(rule, &ctx->chain->rules, list) {
++              if (!nft_is_active_next(ctx->net, rule))
++                      continue;
++
+               err = nft_delrule(ctx, rule);
+               if (err < 0)
+                       return err;
diff --git a/queue-4.19/netfilter-nfnetlink_osf-add-missing-fmatch-check.patch b/queue-4.19/netfilter-nfnetlink_osf-add-missing-fmatch-check.patch
new file mode 100644 (file)
index 0000000..284d256
--- /dev/null
@@ -0,0 +1,57 @@
+From 1a6a0951fc009f6d9fe8ebea2d2417d80d54097b Mon Sep 17 00:00:00 2001
+From: Fernando Fernandez Mancera <ffmancera@riseup.net>
+Date: Mon, 21 Jan 2019 12:53:21 +0100
+Subject: netfilter: nfnetlink_osf: add missing fmatch check
+
+From: Fernando Fernandez Mancera <ffmancera@riseup.net>
+
+commit 1a6a0951fc009f6d9fe8ebea2d2417d80d54097b upstream.
+
+When we check the tcp options of a packet and it doesn't match the current
+fingerprint, the tcp packet option pointer must be restored to its initial
+value in order to do the proper tcp options check for the next fingerprint.
+
+Here we can see an example.
+Assumming the following fingerprint base with two lines:
+
+S10:64:1:60:M*,S,T,N,W6:      Linux:3.0::Linux 3.0
+S20:64:1:60:M*,S,T,N,W7:      Linux:4.19:arch:Linux 4.1
+
+Where TCP options are the last field in the OS signature, all of them overlap
+except by the last one, ie. 'W6' versus 'W7'.
+
+In case a packet for Linux 4.19 kicks in, the osf finds no matching because the
+TCP options pointer is updated after checking for the TCP options in the first
+line.
+
+Therefore, reset pointer back to where it should be.
+
+Fixes: 11eeef41d5f6 ("netfilter: passive OS fingerprint xtables match")
+Signed-off-by: Fernando Fernandez Mancera <ffmancera@riseup.net>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nfnetlink_osf.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -71,6 +71,7 @@ static bool nf_osf_match_one(const struc
+                            int ttl_check,
+                            struct nf_osf_hdr_ctx *ctx)
+ {
++      const __u8 *optpinit = ctx->optp;
+       unsigned int check_WSS = 0;
+       int fmatch = FMATCH_WRONG;
+       int foptsize, optnum;
+@@ -160,6 +161,9 @@ static bool nf_osf_match_one(const struc
+               }
+       }
++      if (fmatch != FMATCH_OK)
++              ctx->optp = optpinit;
++
+       return fmatch == FMATCH_OK;
+ }
diff --git a/queue-4.19/netfilter-nft_compat-use-after-free-when-deleting-targets.patch b/queue-4.19/netfilter-nft_compat-use-after-free-when-deleting-targets.patch
new file mode 100644 (file)
index 0000000..0488264
--- /dev/null
@@ -0,0 +1,39 @@
+From 753c111f655e38bbd52fc01321266633f022ebe2 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Wed, 13 Feb 2019 13:03:53 +0100
+Subject: netfilter: nft_compat: use-after-free when deleting targets
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 753c111f655e38bbd52fc01321266633f022ebe2 upstream.
+
+Fetch pointer to module before target object is released.
+
+Fixes: 29e3880109e3 ("netfilter: nf_tables: fix use-after-free when deleting compat expressions")
+Fixes: 0ca743a55991 ("netfilter: nf_tables: add compatibility layer for x_tables")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nft_compat.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx
+ {
+       struct xt_target *target = expr->ops->data;
+       void *info = nft_expr_priv(expr);
++      struct module *me = target->me;
+       struct xt_tgdtor_param par;
+       par.net = ctx->net;
+@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx
+               par.target->destroy(&par);
+       if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
+-              module_put(target->me);
++              module_put(me);
+ }
+ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/queue-4.19/revert-bridge-do-not-add-port-to-router-list-when-receives-query-with-source-0.0.0.0.patch b/queue-4.19/revert-bridge-do-not-add-port-to-router-list-when-receives-query-with-source-0.0.0.0.patch
new file mode 100644 (file)
index 0000000..1319fc2
--- /dev/null
@@ -0,0 +1,56 @@
+From 278e2148c07559dd4ad8602f22366d61eb2ee7b7 Mon Sep 17 00:00:00 2001
+From: Hangbin Liu <liuhangbin@gmail.com>
+Date: Fri, 22 Feb 2019 21:22:32 +0800
+Subject: Revert "bridge: do not add port to router list when receives query with source 0.0.0.0"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+commit 278e2148c07559dd4ad8602f22366d61eb2ee7b7 upstream.
+
+This reverts commit 5a2de63fd1a5 ("bridge: do not add port to router list
+when receives query with source 0.0.0.0") and commit 0fe5119e267f ("net:
+bridge: remove ipv6 zero address check in mcast queries")
+
+The reason is RFC 4541 is not a standard but suggestive. Currently we
+will elect 0.0.0.0 as Querier if there is no ip address configured on
+bridge. If we do not add the port which recives query with source
+0.0.0.0 to router list, the IGMP reports will not be about to forward
+to Querier, IGMP data will also not be able to forward to dest.
+
+As Nikolay suggested, revert this change first and add a boolopt api
+to disable none-zero election in future if needed.
+
+Reported-by: Linus Lüssing <linus.luessing@c0d3.blue>
+Reported-by: Sebastian Gottschall <s.gottschall@newmedia-net.de>
+Fixes: 5a2de63fd1a5 ("bridge: do not add port to router list when receives query with source 0.0.0.0")
+Fixes: 0fe5119e267f ("net: bridge: remove ipv6 zero address check in mcast queries")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/br_multicast.c |    9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1420,14 +1420,7 @@ static void br_multicast_query_received(
+               return;
+       br_multicast_update_query_timer(br, query, max_delay);
+-
+-      /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
+-       * the arrival port for IGMP Queries where the source address
+-       * is 0.0.0.0 should not be added to router port list.
+-       */
+-      if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
+-          saddr->proto == htons(ETH_P_IPV6))
+-              br_multicast_mark_router(br, port);
++      br_multicast_mark_router(br, port);
+ }
+ static void br_ip4_multicast_query(struct net_bridge *br,
index c1b6ad4c55d346115d687fcc4511eb265fe60fd2..18672d267bd1b701bfe509dd9a4b45ce3689dd5b 100644 (file)
@@ -129,3 +129,22 @@ drm-i915-fbdev-actually-configure-untiled-displays.patch
 drm-amd-display-fix-mst-reboot-poweroff-sequence.patch
 mac80211-allocate-tailroom-for-forwarded-mesh-packets.patch
 kvm-x86-return-la57-feature-based-on-hardware-capability.patch
+net-validate-untrusted-gso-packets-without-csum-offload.patch
+net-avoid-false-positives-in-untrusted-gso-validation.patch
+staging-erofs-fix-a-bug-when-appling-cache-strategy.patch
+staging-erofs-complete-error-handing-of-z_erofs_do_read_page.patch
+staging-erofs-replace-bug_on-with-dbg_bugon-in-data.c.patch
+staging-erofs-drop-multiref-support-temporarily.patch
+staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch
+staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch
+staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch
+staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch
+staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch
+staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch
+staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch
+revert-bridge-do-not-add-port-to-router-list-when-receives-query-with-source-0.0.0.0.patch
+netfilter-nf_tables-fix-flush-after-rule-deletion-in-the-same-batch.patch
+netfilter-nft_compat-use-after-free-when-deleting-targets.patch
+netfilter-ipv6-don-t-preserve-original-oif-for-loopback-address.patch
+netfilter-nfnetlink_osf-add-missing-fmatch-check.patch
+netfilter-ipt_clusterip-fix-sleep-in-atomic-bug-in-clusterip_config_entry_put.patch
diff --git a/queue-4.19/staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch b/queue-4.19/staging-erofs-add-a-full-barrier-in-erofs_workgroup_unfreeze.patch
new file mode 100644 (file)
index 0000000..6dd7e29
--- /dev/null
@@ -0,0 +1,34 @@
+From 948bbdb1818b7ad6e539dad4fbd2dd4650793ea9 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:03 +0800
+Subject: staging: erofs: add a full barrier in erofs_workgroup_unfreeze
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 948bbdb1818b7ad6e539dad4fbd2dd4650793ea9 upstream.
+
+Just like other generic locks, insert a full barrier
+in case of memory reorder.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -199,6 +199,11 @@ static inline bool erofs_workgroup_try_t
+ static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+                                           int orig_val)
+ {
++      /*
++       * other observers should notice all modifications
++       * in the freezing period.
++       */
++      smp_mb();
+       atomic_set(&grp->refcount, orig_val);
+       preempt_enable();
+ }
diff --git a/queue-4.19/staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch b/queue-4.19/staging-erofs-atomic_cond_read_relaxed-on-ref-locked-workgroup.patch
new file mode 100644 (file)
index 0000000..5bc48f7
--- /dev/null
@@ -0,0 +1,65 @@
+From df134b8d17b90c1e7720e318d36416b57424ff7a Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:01 +0800
+Subject: staging: erofs: atomic_cond_read_relaxed on ref-locked workgroup
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit df134b8d17b90c1e7720e318d36416b57424ff7a upstream.
+
+It's better to use atomic_cond_read_relaxed, which is implemented
+in hardware instructions to monitor a variable changes currently
+for ARM64, instead of open-coded busy waiting.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h |   30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -211,23 +211,29 @@ static inline void erofs_workgroup_unfre
+       preempt_enable();
+ }
++#if defined(CONFIG_SMP)
++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
++{
++      return atomic_cond_read_relaxed(&grp->refcount,
++                                      VAL != EROFS_LOCKED_MAGIC);
++}
++#else
++static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
++{
++      int v = atomic_read(&grp->refcount);
++
++      /* workgroup is never freezed on uniprocessor systems */
++      DBG_BUGON(v == EROFS_LOCKED_MAGIC);
++      return v;
++}
++#endif
++
+ static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
+ {
+-      const int locked = (int)EROFS_LOCKED_MAGIC;
+       int o;
+ repeat:
+-      o = atomic_read(&grp->refcount);
+-
+-      /* spin if it is temporarily locked at the reclaim path */
+-      if (unlikely(o == locked)) {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+-              do
+-                      cpu_relax();
+-              while (atomic_read(&grp->refcount) == locked);
+-#endif
+-              goto repeat;
+-      }
++      o = erofs_wait_on_workgroup_freezed(grp);
+       if (unlikely(o <= 0))
+               return -1;
diff --git a/queue-4.19/staging-erofs-complete-error-handing-of-z_erofs_do_read_page.patch b/queue-4.19/staging-erofs-complete-error-handing-of-z_erofs_do_read_page.patch
new file mode 100644 (file)
index 0000000..41bf81f
--- /dev/null
@@ -0,0 +1,91 @@
+From 1e05ff36e6921ca61bdbf779f81a602863569ee3 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 18 Sep 2018 22:27:25 +0800
+Subject: staging: erofs: complete error handing of z_erofs_do_read_page
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 1e05ff36e6921ca61bdbf779f81a602863569ee3 upstream.
+
+This patch completes error handing code of z_erofs_do_read_page.
+PG_error will be set when some read error happens, therefore
+z_erofs_onlinepage_endio will unlock this page without setting
+PG_uptodate.
+
+Reviewed-by: Chao Yu <yucxhao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle.c |   20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -606,7 +606,7 @@ static int z_erofs_do_read_page(struct z
+       enum z_erofs_page_type page_type;
+       unsigned cur, end, spiltted, index;
+-      int err;
++      int err = 0;
+       /* register locked file pages as online pages in pack */
+       z_erofs_onlinepage_init(page);
+@@ -633,12 +633,11 @@ repeat:
+       if (unlikely(err))
+               goto err_out;
+-      /* deal with hole (FIXME! broken now) */
+       if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
+               goto hitted;
+       DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
+-      BUG_ON(erofs_blkoff(map->m_pa));
++      DBG_BUGON(erofs_blkoff(map->m_pa));
+       err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
+       if (unlikely(err))
+@@ -683,7 +682,7 @@ retry:
+               err = z_erofs_vle_work_add_page(builder,
+                       newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+-              if (!err)
++              if (likely(!err))
+                       goto retry;
+       }
+@@ -694,9 +693,10 @@ retry:
+       /* FIXME! avoid the last relundant fixup & endio */
+       z_erofs_onlinepage_fixup(page, index, true);
+-      ++spiltted;
+-      /* also update nr_pages and increase queued_pages */
++      /* bump up the number of spiltted parts of a page */
++      ++spiltted;
++      /* also update nr_pages */
+       work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
+ next_part:
+       /* can be used for verification */
+@@ -706,16 +706,18 @@ next_part:
+       if (end > 0)
+               goto repeat;
++out:
+       /* FIXME! avoid the last relundant fixup & endio */
+       z_erofs_onlinepage_endio(page);
+       debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
+               __func__, page, spiltted, map->m_llen);
+-      return 0;
++      return err;
++      /* if some error occurred while processing this page */
+ err_out:
+-      /* TODO: the missing error handing cases */
+-      return err;
++      SetPageError(page);
++      goto out;
+ }
+ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
diff --git a/queue-4.19/staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch b/queue-4.19/staging-erofs-dir-inode-super-.c-rectify-bug_ons.patch
new file mode 100644 (file)
index 0000000..e405440
--- /dev/null
@@ -0,0 +1,107 @@
+From 8b987bca2d09649683cbe496419a011df8c08493 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 5 Dec 2018 21:23:13 +0800
+Subject: staging: erofs: {dir,inode,super}.c: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 8b987bca2d09649683cbe496419a011df8c08493 upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/dir.c   |    7 +++++--
+ drivers/staging/erofs/inode.c |   10 ++++++++--
+ drivers/staging/erofs/super.c |   13 ++++++-------
+ 3 files changed, 19 insertions(+), 11 deletions(-)
+
+--- a/drivers/staging/erofs/dir.c
++++ b/drivers/staging/erofs/dir.c
+@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct di
+                       strnlen(de_name, maxsize - nameoff) :
+                       le16_to_cpu(de[1].nameoff) - nameoff;
+-              /* the corrupted directory found */
+-              BUG_ON(de_namelen < 0);
++              /* a corrupted entry is found */
++              if (unlikely(de_namelen < 0)) {
++                      DBG_BUGON(1);
++                      return -EIO;
++              }
+ #ifdef CONFIG_EROFS_FS_DEBUG
+               dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
+--- a/drivers/staging/erofs/inode.c
++++ b/drivers/staging/erofs/inode.c
+@@ -132,7 +132,13 @@ static int fill_inline_data(struct inode
+                       return -ENOMEM;
+               m_pofs += vi->inode_isize + vi->xattr_isize;
+-              BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
++
++              /* inline symlink data shouldn't across page boundary as well */
++              if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
++                      DBG_BUGON(1);
++                      kfree(lnk);
++                      return -EIO;
++              }
+               /* get in-page inline data */
+               memcpy(lnk, data + m_pofs, inode->i_size);
+@@ -170,7 +176,7 @@ static int fill_inode(struct inode *inod
+               return PTR_ERR(page);
+       }
+-      BUG_ON(!PageUptodate(page));
++      DBG_BUGON(!PageUptodate(page));
+       data = page_address(page);
+       err = read_inode(inode, data + ofs);
+--- a/drivers/staging/erofs/super.c
++++ b/drivers/staging/erofs/super.c
+@@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
+ static void erofs_exit_inode_cache(void)
+ {
+-      BUG_ON(erofs_inode_cachep == NULL);
+       kmem_cache_destroy(erofs_inode_cachep);
+ }
+@@ -265,8 +264,8 @@ static int managed_cache_releasepage(str
+       int ret = 1;    /* 0 - busy */
+       struct address_space *const mapping = page->mapping;
+-      BUG_ON(!PageLocked(page));
+-      BUG_ON(mapping->a_ops != &managed_cache_aops);
++      DBG_BUGON(!PageLocked(page));
++      DBG_BUGON(mapping->a_ops != &managed_cache_aops);
+       if (PagePrivate(page))
+               ret = erofs_try_to_free_cached_page(mapping, page);
+@@ -279,10 +278,10 @@ static void managed_cache_invalidatepage
+ {
+       const unsigned int stop = length + offset;
+-      BUG_ON(!PageLocked(page));
++      DBG_BUGON(!PageLocked(page));
+-      /* Check for overflow */
+-      BUG_ON(stop > PAGE_SIZE || stop < length);
++      /* Check for potential overflow in debug mode */
++      DBG_BUGON(stop > PAGE_SIZE || stop < length);
+       if (offset == 0 && stop == PAGE_SIZE)
+               while (!managed_cache_releasepage(page, GFP_NOFS))
+@@ -619,7 +618,7 @@ static int erofs_show_options(struct seq
+ static int erofs_remount(struct super_block *sb, int *flags, char *data)
+ {
+-      BUG_ON(!sb_rdonly(sb));
++      DBG_BUGON(!sb_rdonly(sb));
+       *flags |= SB_RDONLY;
+       return 0;
diff --git a/queue-4.19/staging-erofs-drop-multiref-support-temporarily.patch b/queue-4.19/staging-erofs-drop-multiref-support-temporarily.patch
new file mode 100644 (file)
index 0000000..a34764a
--- /dev/null
@@ -0,0 +1,183 @@
+From e5e3abbadf0dbd1068f64f8abe70401c5a178180 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Wed, 19 Sep 2018 13:49:07 +0800
+Subject: staging: erofs: drop multiref support temporarily
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit e5e3abbadf0dbd1068f64f8abe70401c5a178180 upstream.
+
+Multiref support means that a compressed page could have
+more than one reference, which is designed for on-disk data
+deduplication. However, mkfs doesn't support this mode
+at this moment, and the kernel implementation is also broken.
+
+Let's drop multiref support. If it is fully implemented
+in the future, it can be reverted later.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/staging/erofs/unzip_vle.c |   40 ++++++--------------------------------
+ drivers/staging/erofs/unzip_vle.h |   12 -----------
+ 2 files changed, 8 insertions(+), 44 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -293,12 +293,9 @@ z_erofs_vle_work_lookup(struct super_blo
+       *grp_ret = grp = container_of(egrp,
+               struct z_erofs_vle_workgroup, obj);
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+       work = z_erofs_vle_grab_work(grp, pageofs);
++      /* if multiref is disabled, `primary' is always true */
+       primary = true;
+-#else
+-      BUG();
+-#endif
+       DBG_BUGON(work->pageofs != pageofs);
+@@ -365,12 +362,9 @@ z_erofs_vle_work_register(struct super_b
+       struct z_erofs_vle_workgroup *grp = *grp_ret;
+       struct z_erofs_vle_work *work;
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
++      /* if multiref is disabled, grp should never be nullptr */
+       BUG_ON(grp != NULL);
+-#else
+-      if (grp != NULL)
+-              goto skip;
+-#endif
++
+       /* no available workgroup, let's allocate one */
+       grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
+       if (unlikely(grp == NULL))
+@@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_b
+       *hosted = true;
+       newgrp = true;
+-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+-skip:
+-      /* currently unimplemented */
+-      BUG();
+-#else
+       work = z_erofs_vle_grab_primary_work(grp);
+-#endif
+       work->pageofs = pageofs;
+       mutex_init(&work->lock);
+@@ -798,10 +786,8 @@ static int z_erofs_vle_unzip(struct supe
+       const unsigned clusterpages = erofs_clusterpages(sbi);
+       struct z_erofs_pagevec_ctor ctor;
+-      unsigned nr_pages;
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+-      unsigned sparsemem_pages = 0;
+-#endif
++      unsigned int nr_pages;
++      unsigned int sparsemem_pages = 0;
+       struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
+       struct page **pages, **compressed_pages, *page;
+       unsigned i, llen;
+@@ -813,11 +799,7 @@ static int z_erofs_vle_unzip(struct supe
+       int err;
+       might_sleep();
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+       work = z_erofs_vle_grab_primary_work(grp);
+-#else
+-      BUG();
+-#endif
+       BUG_ON(!READ_ONCE(work->nr_pages));
+       mutex_lock(&work->lock);
+@@ -868,13 +850,11 @@ repeat:
+                       pagenr = z_erofs_onlinepage_index(page);
+               BUG_ON(pagenr >= nr_pages);
+-
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+               BUG_ON(pages[pagenr] != NULL);
+-              ++sparsemem_pages;
+-#endif
++
+               pages[pagenr] = page;
+       }
++      sparsemem_pages = i;
+       z_erofs_pagevec_ctor_exit(&ctor, true);
+@@ -904,10 +884,8 @@ repeat:
+               pagenr = z_erofs_onlinepage_index(page);
+               BUG_ON(pagenr >= nr_pages);
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+               BUG_ON(pages[pagenr] != NULL);
+               ++sparsemem_pages;
+-#endif
+               pages[pagenr] = page;
+               overlapped = true;
+@@ -933,12 +911,10 @@ repeat:
+       if (err != -ENOTSUPP)
+               goto out_percpu;
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+       if (sparsemem_pages >= nr_pages) {
+               BUG_ON(sparsemem_pages > nr_pages);
+               goto skip_allocpage;
+       }
+-#endif
+       for (i = 0; i < nr_pages; ++i) {
+               if (pages[i] != NULL)
+@@ -947,9 +923,7 @@ repeat:
+               pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
+       }
+-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ skip_allocpage:
+-#endif
+       vout = erofs_vmap(pages, nr_pages);
+       err = z_erofs_vle_unzip_vmap(compressed_pages,
+--- a/drivers/staging/erofs/unzip_vle.h
++++ b/drivers/staging/erofs/unzip_vle.h
+@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_sta
+ #define Z_EROFS_VLE_INLINE_PAGEVECS     3
+ struct z_erofs_vle_work {
+-      /* struct z_erofs_vle_work *left, *right; */
+-
+-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+-      struct list_head list;
+-
+-      atomic_t refcount;
+-#endif
+       struct mutex lock;
+       /* I: decompression offset in page */
+@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workg
+       grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
+ }
+-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+-#error multiref decompression is unimplemented yet
+-#else
++/* definitions if multiref is disabled */
+ #define z_erofs_vle_grab_primary_work(grp)    (&(grp)->work)
+ #define z_erofs_vle_grab_work(grp, pageofs)   (&(grp)->work)
+ #define z_erofs_vle_work_workgroup(wrk, primary)      \
+@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workg
+               struct z_erofs_vle_workgroup, work) : \
+               ({ BUG(); (void *)NULL; }))
+-#endif
+ #define Z_EROFS_WORKGROUP_SIZE       sizeof(struct z_erofs_vle_workgroup)
diff --git a/queue-4.19/staging-erofs-fix-a-bug-when-appling-cache-strategy.patch b/queue-4.19/staging-erofs-fix-a-bug-when-appling-cache-strategy.patch
new file mode 100644 (file)
index 0000000..3517b1d
--- /dev/null
@@ -0,0 +1,38 @@
+From 0734ffbf574ee813b20899caef2fe0ed502bb783 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 18 Sep 2018 22:25:36 +0800
+Subject: staging: erofs: fix a bug when appling cache strategy
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 0734ffbf574ee813b20899caef2fe0ed502bb783 upstream.
+
+As described in Kconfig, the last compressed pack should be cached
+for further reading for either `EROFS_FS_ZIP_CACHE_UNIPOLAR' or
+`EROFS_FS_ZIP_CACHE_BIPOLAR' by design.
+
+However, there is a bug in z_erofs_do_read_page, it will
+switch `initial' to `false' at the very beginning before it decides
+to cache the last compressed pack.
+
+caching strategy should work properly after appling this patch.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -624,7 +624,7 @@ repeat:
+       /* go ahead the next map_blocks */
+       debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
+-      if (!z_erofs_vle_work_iter_end(builder))
++      if (z_erofs_vle_work_iter_end(builder))
+               fe->initial = false;
+       map->m_la = offset + cur;
diff --git a/queue-4.19/staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch b/queue-4.19/staging-erofs-fix-erofs_workgroup_-try_to_freeze-unfreeze.patch
new file mode 100644 (file)
index 0000000..7a9038f
--- /dev/null
@@ -0,0 +1,95 @@
+From 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Fri, 23 Nov 2018 01:16:02 +0800
+Subject: staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 73f5c66df3e26ab750cefcb9a3e08c71c9f79cad upstream.
+
+There are two minor issues in the current freeze interface:
+
+   1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK,
+      therefore fix the incorrect conditions;
+
+   2) For SMP platforms, it should also disable preemption before
+      doing atomic_cmpxchg in case that some high priority tasks
+      preempt between atomic_cmpxchg and disable_preempt, then spin
+      on the locked refcount later.
+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/internal.h |   41 +++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -184,40 +184,49 @@ struct erofs_workgroup {
+ #define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
+-static inline bool erofs_workgroup_try_to_freeze(
+-      struct erofs_workgroup *grp, int v)
++#if defined(CONFIG_SMP)
++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
++                                               int val)
+ {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+-      if (v != atomic_cmpxchg(&grp->refcount,
+-              v, EROFS_LOCKED_MAGIC))
+-              return false;
+       preempt_disable();
+-#else
+-      preempt_disable();
+-      if (atomic_read(&grp->refcount) != v) {
++      if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
+               preempt_enable();
+               return false;
+       }
+-#endif
+       return true;
+ }
+-static inline void erofs_workgroup_unfreeze(
+-      struct erofs_workgroup *grp, int v)
++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
++                                          int orig_val)
+ {
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+-      atomic_set(&grp->refcount, v);
+-#endif
++      atomic_set(&grp->refcount, orig_val);
+       preempt_enable();
+ }
+-#if defined(CONFIG_SMP)
+ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+ {
+       return atomic_cond_read_relaxed(&grp->refcount,
+                                       VAL != EROFS_LOCKED_MAGIC);
+ }
+ #else
++static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
++                                               int val)
++{
++      preempt_disable();
++      /* no need to spin on UP platforms, let's just disable preemption. */
++      if (val != atomic_read(&grp->refcount)) {
++              preempt_enable();
++              return false;
++      }
++      return true;
++}
++
++static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
++                                          int orig_val)
++{
++      preempt_enable();
++}
++
+ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+ {
+       int v = atomic_read(&grp->refcount);
diff --git a/queue-4.19/staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch b/queue-4.19/staging-erofs-remove-the-redundant-d_rehash-for-the-root-dentry.patch
new file mode 100644 (file)
index 0000000..1584c43
--- /dev/null
@@ -0,0 +1,36 @@
+From e9c892465583c8f42d61fafe30970d36580925df Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Sat, 3 Nov 2018 17:23:56 +0800
+Subject: staging: erofs: remove the redundant d_rehash() for the root dentry
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit e9c892465583c8f42d61fafe30970d36580925df upstream.
+
+There is actually no need at all to d_rehash() for the root dentry
+as Al pointed out, fix it.
+
+Reported-by: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/super.c |    6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/drivers/staging/erofs/super.c
++++ b/drivers/staging/erofs/super.c
+@@ -404,12 +404,6 @@ static int erofs_read_super(struct super
+       erofs_register_super(sb);
+-      /*
+-       * We already have a positive dentry, which was instantiated
+-       * by d_make_root. Just need to d_rehash it.
+-       */
+-      d_rehash(sb->s_root);
+-
+       if (!silent)
+               infoln("mounted on %s with opts: %s.", dev_name,
+                       (char *)data);
diff --git a/queue-4.19/staging-erofs-replace-bug_on-with-dbg_bugon-in-data.c.patch b/queue-4.19/staging-erofs-replace-bug_on-with-dbg_bugon-in-data.c.patch
new file mode 100644 (file)
index 0000000..4aa3773
--- /dev/null
@@ -0,0 +1,132 @@
+From 9141b60cf6a53c99f8a9309bf8e1c6650a6785c1 Mon Sep 17 00:00:00 2001
+From: Chen Gong <gongchen4@huawei.com>
+Date: Tue, 18 Sep 2018 22:27:28 +0800
+Subject: staging: erofs: replace BUG_ON with DBG_BUGON in data.c
+
+From: Chen Gong <gongchen4@huawei.com>
+
+commit 9141b60cf6a53c99f8a9309bf8e1c6650a6785c1 upstream.
+
+This patch replace BUG_ON with DBG_BUGON in data.c, and add necessary
+error handler.
+
+Signed-off-by: Chen Gong <gongchen4@huawei.com>
+Reviewed-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/data.c |   31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/drivers/staging/erofs/data.c
++++ b/drivers/staging/erofs/data.c
+@@ -25,7 +25,7 @@ static inline void read_endio(struct bio
+               struct page *page = bvec->bv_page;
+               /* page is already locked */
+-              BUG_ON(PageUptodate(page));
++              DBG_BUGON(PageUptodate(page));
+               if (unlikely(err))
+                       SetPageError(page);
+@@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(str
+       struct erofs_map_blocks *map,
+       int flags)
+ {
++      int err = 0;
+       erofs_blk_t nblocks, lastblk;
+       u64 offset = map->m_la;
+       struct erofs_vnode *vi = EROFS_V(inode);
+       trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
+-      BUG_ON(is_inode_layout_compression(inode));
+       nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+       lastblk = nblocks - is_inode_layout_inline(inode);
+@@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(str
+               map->m_plen = inode->i_size - offset;
+               /* inline data should locate in one meta block */
+-              BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
++              if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
++                      DBG_BUGON(1);
++                      err = -EIO;
++                      goto err_out;
++              }
++
+               map->m_flags |= EROFS_MAP_META;
+       } else {
+               errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
+                       vi->nid, inode->i_size, map->m_la);
+-              BUG();
++              DBG_BUGON(1);
++              err = -EIO;
++              goto err_out;
+       }
+ out:
+       map->m_llen = map->m_plen;
++
++err_out:
+       trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
+-      return 0;
++      return err;
+ }
+ #ifdef CONFIG_EROFS_FS_ZIP
+@@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw
+       erofs_off_t current_block = (erofs_off_t)page->index;
+       int err;
+-      BUG_ON(!nblocks);
++      DBG_BUGON(!nblocks);
+       if (PageUptodate(page)) {
+               err = 0;
+@@ -233,7 +242,7 @@ submit_bio_retry:
+               }
+               /* for RAW access mode, m_plen must be equal to m_llen */
+-              BUG_ON(map.m_plen != map.m_llen);
++              DBG_BUGON(map.m_plen != map.m_llen);
+               blknr = erofs_blknr(map.m_pa);
+               blkoff = erofs_blkoff(map.m_pa);
+@@ -243,7 +252,7 @@ submit_bio_retry:
+                       void *vsrc, *vto;
+                       struct page *ipage;
+-                      BUG_ON(map.m_plen > PAGE_SIZE);
++                      DBG_BUGON(map.m_plen > PAGE_SIZE);
+                       ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
+@@ -270,7 +279,7 @@ submit_bio_retry:
+               }
+               /* pa must be block-aligned for raw reading */
+-              BUG_ON(erofs_blkoff(map.m_pa) != 0);
++              DBG_BUGON(erofs_blkoff(map.m_pa));
+               /* max # of continuous pages */
+               if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
+@@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(str
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+-      BUG_ON(bio != NULL);    /* since we have only one bio -- must be NULL */
++      DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
+       return 0;
+ }
+@@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(st
+               /* pages could still be locked */
+               put_page(page);
+       }
+-      BUG_ON(!list_empty(pages));
++      DBG_BUGON(!list_empty(pages));
+       /* the rare case (end in gaps) */
+       if (unlikely(bio != NULL))
diff --git a/queue-4.19/staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch b/queue-4.19/staging-erofs-unzip_-pagevec.h-vle.c-rectify-bug_ons.patch
new file mode 100644 (file)
index 0000000..cc9f3db
--- /dev/null
@@ -0,0 +1,148 @@
+From 70b17991d89554cdd16f3e4fb0179bcc03c808d9 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 11 Dec 2018 15:17:49 +0800
+Subject: staging: erofs: unzip_{pagevec.h,vle.c}: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit 70b17991d89554cdd16f3e4fb0179bcc03c808d9 upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/staging/erofs/unzip_pagevec.h |    2 -
+ drivers/staging/erofs/unzip_vle.c     |   35 +++++++++++++---------------------
+ 2 files changed, 15 insertions(+), 22 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_pagevec.h
++++ b/drivers/staging/erofs/unzip_pagevec.h
+@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_er
+       erofs_vtptr_t t;
+       if (unlikely(ctor->index >= ctor->nr)) {
+-              BUG_ON(ctor->next == NULL);
++              DBG_BUGON(!ctor->next);
+               z_erofs_pagevec_ctor_pagedown(ctor, true);
+       }
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgr
+ void z_erofs_exit_zip_subsystem(void)
+ {
+-      BUG_ON(z_erofs_workqueue == NULL);
+-      BUG_ON(z_erofs_workgroup_cachep == NULL);
+-
+       destroy_workqueue(z_erofs_workqueue);
+       kmem_cache_destroy(z_erofs_workgroup_cachep);
+ }
+@@ -363,7 +360,10 @@ z_erofs_vle_work_register(struct super_b
+       struct z_erofs_vle_work *work;
+       /* if multiref is disabled, grp should never be nullptr */
+-      BUG_ON(grp != NULL);
++      if (unlikely(grp)) {
++              DBG_BUGON(1);
++              return ERR_PTR(-EINVAL);
++      }
+       /* no available workgroup, let's allocate one */
+       grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
+@@ -742,7 +742,7 @@ static inline void z_erofs_vle_read_endi
+               bool cachemngd = false;
+               DBG_BUGON(PageUptodate(page));
+-              BUG_ON(page->mapping == NULL);
++              DBG_BUGON(!page->mapping);
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+               if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
+@@ -800,7 +800,7 @@ static int z_erofs_vle_unzip(struct supe
+       might_sleep();
+       work = z_erofs_vle_grab_primary_work(grp);
+-      BUG_ON(!READ_ONCE(work->nr_pages));
++      DBG_BUGON(!READ_ONCE(work->nr_pages));
+       mutex_lock(&work->lock);
+       nr_pages = work->nr_pages;
+@@ -849,8 +849,8 @@ repeat:
+               else
+                       pagenr = z_erofs_onlinepage_index(page);
+-              BUG_ON(pagenr >= nr_pages);
+-              BUG_ON(pages[pagenr] != NULL);
++              DBG_BUGON(pagenr >= nr_pages);
++              DBG_BUGON(pages[pagenr]);
+               pages[pagenr] = page;
+       }
+@@ -873,9 +873,8 @@ repeat:
+               if (z_erofs_is_stagingpage(page))
+                       continue;
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+-              else if (page->mapping == mngda) {
+-                      BUG_ON(PageLocked(page));
+-                      BUG_ON(!PageUptodate(page));
++              if (page->mapping == mngda) {
++                      DBG_BUGON(!PageUptodate(page));
+                       continue;
+               }
+ #endif
+@@ -883,8 +882,8 @@ repeat:
+               /* only non-head page could be reused as a compressed page */
+               pagenr = z_erofs_onlinepage_index(page);
+-              BUG_ON(pagenr >= nr_pages);
+-              BUG_ON(pages[pagenr] != NULL);
++              DBG_BUGON(pagenr >= nr_pages);
++              DBG_BUGON(pages[pagenr]);
+               ++sparsemem_pages;
+               pages[pagenr] = page;
+@@ -894,9 +893,6 @@ repeat:
+       llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
+       if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
+-              /* FIXME! this should be fixed in the future */
+-              BUG_ON(grp->llen != llen);
+-
+               err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
+                       pages, nr_pages, work->pageofs);
+               goto out;
+@@ -911,10 +907,8 @@ repeat:
+       if (err != -ENOTSUPP)
+               goto out_percpu;
+-      if (sparsemem_pages >= nr_pages) {
+-              BUG_ON(sparsemem_pages > nr_pages);
++      if (sparsemem_pages >= nr_pages)
+               goto skip_allocpage;
+-      }
+       for (i = 0; i < nr_pages; ++i) {
+               if (pages[i] != NULL)
+@@ -1007,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct
+               struct z_erofs_vle_unzip_io_sb, io.u.work);
+       LIST_HEAD(page_pool);
+-      BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
++      DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+       z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
+       put_pages_list(&page_pool);
+@@ -1336,7 +1330,6 @@ static inline int __z_erofs_vle_normalac
+                       continue;
+               }
+-              BUG_ON(PagePrivate(page));
+               set_page_private(page, (unsigned long)head);
+               head = page;
+       }
diff --git a/queue-4.19/staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch b/queue-4.19/staging-erofs-unzip_vle_lz4.c-utils.c-rectify-bug_ons.patch
new file mode 100644 (file)
index 0000000..e2f519c
--- /dev/null
@@ -0,0 +1,67 @@
+From b8e076a6ef253e763bfdb81e5c72bcc828b0fbeb Mon Sep 17 00:00:00 2001
+From: Gao Xiang <gaoxiang25@huawei.com>
+Date: Tue, 11 Dec 2018 15:17:50 +0800
+Subject: staging: erofs: unzip_vle_lz4.c,utils.c: rectify BUG_ONs
+
+From: Gao Xiang <gaoxiang25@huawei.com>
+
+commit b8e076a6ef253e763bfdb81e5c72bcc828b0fbeb upstream.
+
+remove all redundant BUG_ONs, and turn the rest
+useful usages to DBG_BUGONs.
+
+Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/erofs/unzip_vle_lz4.c |    2 +-
+ drivers/staging/erofs/utils.c         |   12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page *
+                       if (compressed_pages[j] != page)
+                               continue;
+-                      BUG_ON(mirrored[j]);
++                      DBG_BUGON(mirrored[j]);
+                       memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
+                       mirrored[j] = true;
+                       break;
+--- a/drivers/staging/erofs/utils.c
++++ b/drivers/staging/erofs/utils.c
+@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list
+               list_del(&page->lru);
+       } else {
+               page = alloc_pages(gfp | __GFP_NOFAIL, 0);
+-
+-              BUG_ON(page == NULL);
+-              BUG_ON(page->mapping != NULL);
+       }
+       return page;
+ }
+@@ -60,7 +57,7 @@ repeat:
+               /* decrease refcount added by erofs_workgroup_put */
+               if (unlikely(oldcount == 1))
+                       atomic_long_dec(&erofs_global_shrink_cnt);
+-              BUG_ON(index != grp->index);
++              DBG_BUGON(index != grp->index);
+       }
+       rcu_read_unlock();
+       return grp;
+@@ -73,8 +70,11 @@ int erofs_register_workgroup(struct supe
+       struct erofs_sb_info *sbi;
+       int err;
+-      /* grp->refcount should not < 1 */
+-      BUG_ON(!atomic_read(&grp->refcount));
++      /* grp shouldn't be broken or used before */
++      if (unlikely(atomic_read(&grp->refcount) != 1)) {
++              DBG_BUGON(1);
++              return -EINVAL;
++      }
+       err = radix_tree_preload(GFP_NOFS);
+       if (err)