]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.9
authorSasha Levin <sashal@kernel.org>
Mon, 15 Feb 2021 04:28:17 +0000 (23:28 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 15 Feb 2021 04:28:17 +0000 (23:28 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.9/bpf-check-for-integer-overflow-when-using-roundup_po.patch [new file with mode: 0644]
queue-4.9/h8300-fix-preemption-build-ti_pre_count-undefined.patch [new file with mode: 0644]
queue-4.9/netfilter-conntrack-skip-identical-origin-tuple-in-s.patch [new file with mode: 0644]
queue-4.9/netfilter-xt_recent-fix-attempt-to-update-deleted-en.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/xen-netback-avoid-race-in-xenvif_rx_ring_slots_avail.patch [new file with mode: 0644]

diff --git a/queue-4.9/bpf-check-for-integer-overflow-when-using-roundup_po.patch b/queue-4.9/bpf-check-for-integer-overflow-when-using-roundup_po.patch
new file mode 100644 (file)
index 0000000..5900e40
--- /dev/null
@@ -0,0 +1,37 @@
+From fac94dee47186986e54e28027a5c1b5794283989 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Jan 2021 06:36:53 +0000
+Subject: bpf: Check for integer overflow when using roundup_pow_of_two()
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+[ Upstream commit 6183f4d3a0a2ad230511987c6c362ca43ec0055f ]
+
+On 32-bit architecture, roundup_pow_of_two() can return 0 when the argument
+has upper most bit set due to resulting 1UL << 32. Add a check for this case.
+
+Fixes: d5a3b1f69186 ("bpf: introduce BPF_MAP_TYPE_STACK_TRACE")
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20210127063653.3576-1-minhquangbui99@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/stackmap.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index a2a232dec2363..2fdf6f96f9762 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -70,6 +70,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
+       /* hash table size must be power of 2 */
+       n_buckets = roundup_pow_of_two(attr->max_entries);
++      if (!n_buckets)
++              return ERR_PTR(-E2BIG);
+       cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
+       if (cost >= U32_MAX - PAGE_SIZE)
+-- 
+2.27.0
+
diff --git a/queue-4.9/h8300-fix-preemption-build-ti_pre_count-undefined.patch b/queue-4.9/h8300-fix-preemption-build-ti_pre_count-undefined.patch
new file mode 100644 (file)
index 0000000..519a8e8
--- /dev/null
@@ -0,0 +1,44 @@
+From 07a7872d710b45f86a5981e982bee6c14cd8b868 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Feb 2021 20:52:54 -0800
+Subject: h8300: fix PREEMPTION build, TI_PRE_COUNT undefined
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit ade9679c159d5bbe14fb7e59e97daf6062872e2b ]
+
+Fix a build error for undefined 'TI_PRE_COUNT' by adding it to
+asm-offsets.c.
+
+  h8300-linux-ld: arch/h8300/kernel/entry.o: in function `resume_kernel': (.text+0x29a): undefined reference to `TI_PRE_COUNT'
+
+Link: https://lkml.kernel.org/r/20210212021650.22740-1-rdunlap@infradead.org
+Fixes: df2078b8daa7 ("h8300: Low level entry")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/h8300/kernel/asm-offsets.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
+index dc2d16ce8a0d5..3e33a9844d99a 100644
+--- a/arch/h8300/kernel/asm-offsets.c
++++ b/arch/h8300/kernel/asm-offsets.c
+@@ -62,6 +62,9 @@ int main(void)
+       OFFSET(TI_FLAGS, thread_info, flags);
+       OFFSET(TI_CPU, thread_info, cpu);
+       OFFSET(TI_PRE, thread_info, preempt_count);
++#ifdef CONFIG_PREEMPTION
++      DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
++#endif
+       return 0;
+ }
+-- 
+2.27.0
+
diff --git a/queue-4.9/netfilter-conntrack-skip-identical-origin-tuple-in-s.patch b/queue-4.9/netfilter-conntrack-skip-identical-origin-tuple-in-s.patch
new file mode 100644 (file)
index 0000000..5c443f6
--- /dev/null
@@ -0,0 +1,43 @@
+From 2e462153724c9016e1ca737a81825eff238312e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Feb 2021 12:56:43 +0100
+Subject: netfilter: conntrack: skip identical origin tuple in same zone only
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 07998281c268592963e1cd623fe6ab0270b65ae4 ]
+
+The origin skip check needs to re-test the zone. Else, we might skip
+a colliding tuple in the reply direction.
+
+This only occurs when using 'directional zones' where origin tuples
+reside in different zones but the reply tuples share the same zone.
+
+This causes the new conntrack entry to be dropped at confirmation time
+because NAT clash resolution was elided.
+
+Fixes: 4e35c1cb9460240 ("netfilter: nf_nat: skip nat clash resolution for same-origin entries")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index d507d0fc7858a..ddd90a3820d39 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -903,7 +903,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+                        * Let nf_ct_resolve_clash() deal with this later.
+                        */
+                       if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+-                                            &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
++                                            &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
++                                            nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
+                               continue;
+                       NF_CT_STAT_INC_ATOMIC(net, found);
+-- 
+2.27.0
+
diff --git a/queue-4.9/netfilter-xt_recent-fix-attempt-to-update-deleted-en.patch b/queue-4.9/netfilter-xt_recent-fix-attempt-to-update-deleted-en.patch
new file mode 100644 (file)
index 0000000..6e0b37f
--- /dev/null
@@ -0,0 +1,66 @@
+From 2890ae449957284b3a3e12d9bb8e17ee766714dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Jan 2021 20:57:43 +0100
+Subject: netfilter: xt_recent: Fix attempt to update deleted entry
+
+From: Jozsef Kadlecsik <kadlec@mail.kfki.hu>
+
+[ Upstream commit b1bdde33b72366da20d10770ab7a49fe87b5e190 ]
+
+When both --reap and --update flag are specified, there's a code
+path at which the entry to be updated is reaped beforehand,
+which then leads to kernel crash. Reap only entries which won't be
+updated.
+
+Fixes kernel bugzilla #207773.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=207773
+Reported-by: Reindl Harald <h.reindl@thelounge.net>
+Fixes: 0079c5aee348 ("netfilter: xt_recent: add an entry reaper")
+Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/xt_recent.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 79d7ad621a80f..03c8bd854e56a 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
+ /*
+  * Drop entries with timestamps older then 'time'.
+  */
+-static void recent_entry_reap(struct recent_table *t, unsigned long time)
++static void recent_entry_reap(struct recent_table *t, unsigned long time,
++                            struct recent_entry *working, bool update)
+ {
+       struct recent_entry *e;
+@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
+        */
+       e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
++      /*
++       * Do not reap the entry which are going to be updated.
++       */
++      if (e == working && update)
++              return;
++
+       /*
+        * The last time stamp is the most recent.
+        */
+@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
+               /* info->seconds must be non-zero */
+               if (info->check_set & XT_RECENT_REAP)
+-                      recent_entry_reap(t, time);
++                      recent_entry_reap(t, time, e,
++                              info->check_set & XT_RECENT_UPDATE && ret);
+       }
+       if (info->check_set & XT_RECENT_SET ||
+-- 
+2.27.0
+
index 9a3a5f3b6027f054dc60bdfad5429b190e8dc12b..1641b9a90a71afd822a70c1a44793094b06344c0 100644 (file)
@@ -21,3 +21,8 @@ tracing-check-length-before-giving-out-the-filter-buffer.patch
 ovl-skip-getxattr-of-security-labels.patch
 arm-dts-lpc32xx-revert-set-default-clock-rate-of-hcl.patch
 memblock-do-not-start-bottom-up-allocations-with-ker.patch
+bpf-check-for-integer-overflow-when-using-roundup_po.patch
+netfilter-xt_recent-fix-attempt-to-update-deleted-en.patch
+xen-netback-avoid-race-in-xenvif_rx_ring_slots_avail.patch
+netfilter-conntrack-skip-identical-origin-tuple-in-s.patch
+h8300-fix-preemption-build-ti_pre_count-undefined.patch
diff --git a/queue-4.9/xen-netback-avoid-race-in-xenvif_rx_ring_slots_avail.patch b/queue-4.9/xen-netback-avoid-race-in-xenvif_rx_ring_slots_avail.patch
new file mode 100644 (file)
index 0000000..85435a1
--- /dev/null
@@ -0,0 +1,58 @@
+From 40cd3783e0cc58fe91d572bdc36729e13b507d7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Feb 2021 08:09:38 +0100
+Subject: xen/netback: avoid race in xenvif_rx_ring_slots_available()
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit ec7d8e7dd3a59528e305a18e93f1cb98f7faf83b ]
+
+Since commit 23025393dbeb3b8b3 ("xen/netback: use lateeoi irq binding")
+xenvif_rx_ring_slots_available() is no longer called only from the rx
+queue kernel thread, so it needs to access the rx queue with the
+associated queue held.
+
+Reported-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Fixes: 23025393dbeb3b8b3 ("xen/netback: use lateeoi irq binding")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Acked-by: Wei Liu <wl@xen.org>
+Link: https://lore.kernel.org/r/20210202070938.7863-1-jgross@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/xen-netback/rx.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
+index f152246c7dfb7..ddfb1cfa2dd94 100644
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+       RING_IDX prod, cons;
+       struct sk_buff *skb;
+       int needed;
++      unsigned long flags;
++
++      spin_lock_irqsave(&queue->rx_queue.lock, flags);
+       skb = skb_peek(&queue->rx_queue);
+-      if (!skb)
++      if (!skb) {
++              spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+               return false;
++      }
+       needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+       if (skb_is_gso(skb))
+@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+       if (skb->sw_hash)
+               needed++;
++      spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
++
+       do {
+               prod = queue->rx.sring->req_prod;
+               cons = queue->rx.req_cons;
+-- 
+2.27.0
+