]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Nov 2022 08:32:47 +0000 (09:32 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Nov 2022 08:32:47 +0000 (09:32 +0100)
added patches:
bpf-prevent-bpf-program-recursion-for-raw-tracepoint-probes.patch
net-9p-use-a-dedicated-spinlock-for-trans_fd.patch
ntfs-check-overflow-when-iterating-attr_records.patch
ntfs-fix-out-of-bounds-read-in-ntfs_attr_find.patch
ntfs-fix-use-after-free-in-ntfs_attr_find.patch

queue-6.0/bpf-prevent-bpf-program-recursion-for-raw-tracepoint-probes.patch [new file with mode: 0644]
queue-6.0/net-9p-use-a-dedicated-spinlock-for-trans_fd.patch [new file with mode: 0644]
queue-6.0/ntfs-check-overflow-when-iterating-attr_records.patch [new file with mode: 0644]
queue-6.0/ntfs-fix-out-of-bounds-read-in-ntfs_attr_find.patch [new file with mode: 0644]
queue-6.0/ntfs-fix-use-after-free-in-ntfs_attr_find.patch [new file with mode: 0644]
queue-6.0/series

diff --git a/queue-6.0/bpf-prevent-bpf-program-recursion-for-raw-tracepoint-probes.patch b/queue-6.0/bpf-prevent-bpf-program-recursion-for-raw-tracepoint-probes.patch
new file mode 100644 (file)
index 0000000..32a1e87
--- /dev/null
@@ -0,0 +1,163 @@
+From 05b24ff9b2cfabfcfd951daaa915a036ab53c9e1 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Fri, 16 Sep 2022 09:19:14 +0200
+Subject: bpf: Prevent bpf program recursion for raw tracepoint probes
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit 05b24ff9b2cfabfcfd951daaa915a036ab53c9e1 upstream.
+
+We got report from sysbot [1] about warnings that were caused by
+bpf program attached to contention_begin raw tracepoint triggering
+the same tracepoint by using bpf_trace_printk helper that takes
+trace_printk_lock lock.
+
+ Call Trace:
+  <TASK>
+  ? trace_event_raw_event_bpf_trace_printk+0x5f/0x90
+  bpf_trace_printk+0x2b/0xe0
+  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
+  bpf_trace_run2+0x26/0x90
+  native_queued_spin_lock_slowpath+0x1c6/0x2b0
+  _raw_spin_lock_irqsave+0x44/0x50
+  bpf_trace_printk+0x3f/0xe0
+  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
+  bpf_trace_run2+0x26/0x90
+  native_queued_spin_lock_slowpath+0x1c6/0x2b0
+  _raw_spin_lock_irqsave+0x44/0x50
+  bpf_trace_printk+0x3f/0xe0
+  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
+  bpf_trace_run2+0x26/0x90
+  native_queued_spin_lock_slowpath+0x1c6/0x2b0
+  _raw_spin_lock_irqsave+0x44/0x50
+  bpf_trace_printk+0x3f/0xe0
+  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
+  bpf_trace_run2+0x26/0x90
+  native_queued_spin_lock_slowpath+0x1c6/0x2b0
+  _raw_spin_lock_irqsave+0x44/0x50
+  __unfreeze_partials+0x5b/0x160
+  ...
+
+The can be reproduced by attaching bpf program as raw tracepoint on
+contention_begin tracepoint. The bpf prog calls bpf_trace_printk
+helper. Then by running perf bench the spin lock code is forced to
+take slow path and call contention_begin tracepoint.
+
+Fixing this by skipping execution of the bpf program if it's
+already running, Using bpf prog 'active' field, which is being
+currently used by trampoline programs for the same reason.
+
+Moving bpf_prog_inc_misses_counter to syscall.c because
+trampoline.c is compiled in just for CONFIG_BPF_JIT option.
+
+Reviewed-by: Stanislav Fomichev <sdf@google.com>
+Reported-by: syzbot+2251879aa068ad9c960d@syzkaller.appspotmail.com
+[1] https://lore.kernel.org/bpf/YxhFe3EwqchC%2FfYf@krava/T/#t
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20220916071914.7156-1-jolsa@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h      |    5 +++++
+ kernel/bpf/syscall.c     |   11 +++++++++++
+ kernel/bpf/trampoline.c  |   15 ++-------------
+ kernel/trace/bpf_trace.c |    6 ++++++
+ 4 files changed, 24 insertions(+), 13 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1967,6 +1967,7 @@ static inline bool unprivileged_ebpf_ena
+       return !sysctl_unprivileged_bpf_disabled;
+ }
++void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+ #else /* !CONFIG_BPF_SYSCALL */
+ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+ {
+@@ -2305,6 +2306,10 @@ static inline int sock_map_bpf_prog_quer
+ {
+       return -EINVAL;
+ }
++
++static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
++{
++}
+ #endif /* CONFIG_BPF_SYSCALL */
+ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2094,6 +2094,17 @@ struct bpf_prog_kstats {
+       u64 misses;
+ };
++void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
++{
++      struct bpf_prog_stats *stats;
++      unsigned int flags;
++
++      stats = this_cpu_ptr(prog->stats);
++      flags = u64_stats_update_begin_irqsave(&stats->syncp);
++      u64_stats_inc(&stats->misses);
++      u64_stats_update_end_irqrestore(&stats->syncp, flags);
++}
++
+ static void bpf_prog_get_stats(const struct bpf_prog *prog,
+                              struct bpf_prog_kstats *stats)
+ {
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -863,17 +863,6 @@ static __always_inline u64 notrace bpf_p
+       return start;
+ }
+-static void notrace inc_misses_counter(struct bpf_prog *prog)
+-{
+-      struct bpf_prog_stats *stats;
+-      unsigned int flags;
+-
+-      stats = this_cpu_ptr(prog->stats);
+-      flags = u64_stats_update_begin_irqsave(&stats->syncp);
+-      u64_stats_inc(&stats->misses);
+-      u64_stats_update_end_irqrestore(&stats->syncp, flags);
+-}
+-
+ /* The logic is similar to bpf_prog_run(), but with an explicit
+  * rcu_read_lock() and migrate_disable() which are required
+  * for the trampoline. The macro is split into
+@@ -896,7 +885,7 @@ u64 notrace __bpf_prog_enter(struct bpf_
+       run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+-              inc_misses_counter(prog);
++              bpf_prog_inc_misses_counter(prog);
+               return 0;
+       }
+       return bpf_prog_start_time();
+@@ -967,7 +956,7 @@ u64 notrace __bpf_prog_enter_sleepable(s
+       might_fault();
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+-              inc_misses_counter(prog);
++              bpf_prog_inc_misses_counter(prog);
+               return 0;
+       }
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2058,9 +2058,15 @@ static __always_inline
+ void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
+ {
+       cant_sleep();
++      if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
++              bpf_prog_inc_misses_counter(prog);
++              goto out;
++      }
+       rcu_read_lock();
+       (void) bpf_prog_run(prog, args);
+       rcu_read_unlock();
++out:
++      this_cpu_dec(*(prog->active));
+ }
+ #define UNPACK(...)                   __VA_ARGS__
diff --git a/queue-6.0/net-9p-use-a-dedicated-spinlock-for-trans_fd.patch b/queue-6.0/net-9p-use-a-dedicated-spinlock-for-trans_fd.patch
new file mode 100644 (file)
index 0000000..7ff8a8e
--- /dev/null
@@ -0,0 +1,196 @@
+From 296ab4a813841ba1d5f40b03190fd1bd8f25aab0 Mon Sep 17 00:00:00 2001
+From: Dominique Martinet <asmadeus@codewreck.org>
+Date: Sun, 4 Sep 2022 20:17:49 +0900
+Subject: net/9p: use a dedicated spinlock for trans_fd
+
+From: Dominique Martinet <asmadeus@codewreck.org>
+
+commit 296ab4a813841ba1d5f40b03190fd1bd8f25aab0 upstream.
+
+Shamelessly copying the explanation from Tetsuo Handa's suggested
+patch[1] (slightly reworded):
+syzbot is reporting inconsistent lock state in p9_req_put()[2],
+for p9_tag_remove() from p9_req_put() from IRQ context is using
+spin_lock_irqsave() on "struct p9_client"->lock but trans_fd
+(not from IRQ context) is using spin_lock().
+
+Since the locks actually protect different things in client.c and in
+trans_fd.c, just replace trans_fd.c's lock by a new one specific to the
+transport (client.c's protect the idr for fid/tag allocations,
+while trans_fd.c's protects its own req list and request status field
+that acts as the transport's state machine)
+
+Link: https://lore.kernel.org/r/20220904112928.1308799-1-asmadeus@codewreck.org
+Link: https://lkml.kernel.org/r/2470e028-9b05-2013-7198-1fdad071d999@I-love.SAKURA.ne.jp [1]
+Link: https://syzkaller.appspot.com/bug?extid=2f20b523930c32c160cc [2]
+Reported-by: syzbot <syzbot+2f20b523930c32c160cc@syzkaller.appspotmail.com>
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reviewed-by: Christian Schoenebeck <linux_oss@crudebyte.com>
+Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/9p/trans_fd.c |   41 +++++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -91,6 +91,7 @@ struct p9_poll_wait {
+  * @mux_list: list link for mux to manage multiple connections (?)
+  * @client: reference to client instance for this connection
+  * @err: error state
++ * @req_lock: lock protecting req_list and requests statuses
+  * @req_list: accounting for requests which have been sent
+  * @unsent_req_list: accounting for requests that haven't been sent
+  * @rreq: read request
+@@ -114,6 +115,7 @@ struct p9_conn {
+       struct list_head mux_list;
+       struct p9_client *client;
+       int err;
++      spinlock_t req_lock;
+       struct list_head req_list;
+       struct list_head unsent_req_list;
+       struct p9_req_t *rreq;
+@@ -189,10 +191,10 @@ static void p9_conn_cancel(struct p9_con
+       p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
+-      spin_lock(&m->client->lock);
++      spin_lock(&m->req_lock);
+       if (m->err) {
+-              spin_unlock(&m->client->lock);
++              spin_unlock(&m->req_lock);
+               return;
+       }
+@@ -205,7 +207,7 @@ static void p9_conn_cancel(struct p9_con
+               list_move(&req->req_list, &cancel_list);
+       }
+-      spin_unlock(&m->client->lock);
++      spin_unlock(&m->req_lock);
+       list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
+               p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
+@@ -360,7 +362,7 @@ static void p9_read_work(struct work_str
+       if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
+               p9_debug(P9_DEBUG_TRANS, "got new packet\n");
+               m->rreq->rc.size = m->rc.offset;
+-              spin_lock(&m->client->lock);
++              spin_lock(&m->req_lock);
+               if (m->rreq->status == REQ_STATUS_SENT) {
+                       list_del(&m->rreq->req_list);
+                       p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
+@@ -369,14 +371,14 @@ static void p9_read_work(struct work_str
+                       p9_debug(P9_DEBUG_TRANS,
+                                "Ignore replies associated with a cancelled request\n");
+               } else {
+-                      spin_unlock(&m->client->lock);
++                      spin_unlock(&m->req_lock);
+                       p9_debug(P9_DEBUG_ERROR,
+                                "Request tag %d errored out while we were reading the reply\n",
+                                m->rc.tag);
+                       err = -EIO;
+                       goto error;
+               }
+-              spin_unlock(&m->client->lock);
++              spin_unlock(&m->req_lock);
+               m->rc.sdata = NULL;
+               m->rc.offset = 0;
+               m->rc.capacity = 0;
+@@ -454,10 +456,10 @@ static void p9_write_work(struct work_st
+       }
+       if (!m->wsize) {
+-              spin_lock(&m->client->lock);
++              spin_lock(&m->req_lock);
+               if (list_empty(&m->unsent_req_list)) {
+                       clear_bit(Wworksched, &m->wsched);
+-                      spin_unlock(&m->client->lock);
++                      spin_unlock(&m->req_lock);
+                       return;
+               }
+@@ -472,7 +474,7 @@ static void p9_write_work(struct work_st
+               m->wpos = 0;
+               p9_req_get(req);
+               m->wreq = req;
+-              spin_unlock(&m->client->lock);
++              spin_unlock(&m->req_lock);
+       }
+       p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
+@@ -589,6 +591,7 @@ static void p9_conn_create(struct p9_cli
+       INIT_LIST_HEAD(&m->mux_list);
+       m->client = client;
++      spin_lock_init(&m->req_lock);
+       INIT_LIST_HEAD(&m->req_list);
+       INIT_LIST_HEAD(&m->unsent_req_list);
+       INIT_WORK(&m->rq, p9_read_work);
+@@ -670,10 +673,10 @@ static int p9_fd_request(struct p9_clien
+       if (m->err < 0)
+               return m->err;
+-      spin_lock(&client->lock);
++      spin_lock(&m->req_lock);
+       req->status = REQ_STATUS_UNSENT;
+       list_add_tail(&req->req_list, &m->unsent_req_list);
+-      spin_unlock(&client->lock);
++      spin_unlock(&m->req_lock);
+       if (test_and_clear_bit(Wpending, &m->wsched))
+               n = EPOLLOUT;
+@@ -688,11 +691,13 @@ static int p9_fd_request(struct p9_clien
+ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
+ {
++      struct p9_trans_fd *ts = client->trans;
++      struct p9_conn *m = &ts->conn;
+       int ret = 1;
+       p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+-      spin_lock(&client->lock);
++      spin_lock(&m->req_lock);
+       if (req->status == REQ_STATUS_UNSENT) {
+               list_del(&req->req_list);
+@@ -700,21 +705,24 @@ static int p9_fd_cancel(struct p9_client
+               p9_req_put(client, req);
+               ret = 0;
+       }
+-      spin_unlock(&client->lock);
++      spin_unlock(&m->req_lock);
+       return ret;
+ }
+ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
+ {
++      struct p9_trans_fd *ts = client->trans;
++      struct p9_conn *m = &ts->conn;
++
+       p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+-      spin_lock(&client->lock);
++      spin_lock(&m->req_lock);
+       /* Ignore cancelled request if message has been received
+        * before lock.
+        */
+       if (req->status == REQ_STATUS_RCVD) {
+-              spin_unlock(&client->lock);
++              spin_unlock(&m->req_lock);
+               return 0;
+       }
+@@ -723,7 +731,8 @@ static int p9_fd_cancelled(struct p9_cli
+        */
+       list_del(&req->req_list);
+       req->status = REQ_STATUS_FLSHD;
+-      spin_unlock(&client->lock);
++      spin_unlock(&m->req_lock);
++
+       p9_req_put(client, req);
+       return 0;
diff --git a/queue-6.0/ntfs-check-overflow-when-iterating-attr_records.patch b/queue-6.0/ntfs-check-overflow-when-iterating-attr_records.patch
new file mode 100644 (file)
index 0000000..9d473bc
--- /dev/null
@@ -0,0 +1,52 @@
+From 63095f4f3af59322bea984a6ae44337439348fe0 Mon Sep 17 00:00:00 2001
+From: Hawkins Jiawei <yin31149@gmail.com>
+Date: Thu, 1 Sep 2022 00:09:38 +0800
+Subject: ntfs: check overflow when iterating ATTR_RECORDs
+
+From: Hawkins Jiawei <yin31149@gmail.com>
+
+commit 63095f4f3af59322bea984a6ae44337439348fe0 upstream.
+
+Kernel iterates over ATTR_RECORDs in mft record in ntfs_attr_find().
+Because the ATTR_RECORDs are next to each other, kernel can get the next
+ATTR_RECORD from end address of current ATTR_RECORD, through current
+ATTR_RECORD length field.
+
+The problem is that during iteration, when kernel calculates the end
+address of current ATTR_RECORD, kernel may trigger an integer overflow bug
+in executing `a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))`.  This
+may wrap, leading to a forever iteration on 32bit systems.
+
+This patch solves it by adding some checks on calculating end address
+of current ATTR_RECORD during iteration.
+
+Link: https://lkml.kernel.org/r/20220831160935.3409-4-yin31149@gmail.com
+Link: https://lore.kernel.org/all/20220827105842.GM2030@kadam/
+Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
+Suggested-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Anton Altaparmakov <anton@tuxera.com>
+Cc: chenxiaosong (A) <chenxiaosong2@huawei.com>
+Cc: syzkaller-bugs <syzkaller-bugs@googlegroups.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/attrib.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/ntfs/attrib.c
++++ b/fs/ntfs/attrib.c
+@@ -617,6 +617,14 @@ static int ntfs_attr_find(const ATTR_TYP
+                       return -ENOENT;
+               if (unlikely(!a->length))
+                       break;
++
++              /* check whether ATTR_RECORD's length wrap */
++              if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
++                      break;
++              /* check whether ATTR_RECORD's length is within bounds */
++              if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
++                      break;
++
+               if (a->type != type)
+                       continue;
+               /*
diff --git a/queue-6.0/ntfs-fix-out-of-bounds-read-in-ntfs_attr_find.patch b/queue-6.0/ntfs-fix-out-of-bounds-read-in-ntfs_attr_find.patch
new file mode 100644 (file)
index 0000000..bf44f57
--- /dev/null
@@ -0,0 +1,116 @@
+From 36a4d82dddbbd421d2b8e79e1cab68c8126d5075 Mon Sep 17 00:00:00 2001
+From: Hawkins Jiawei <yin31149@gmail.com>
+Date: Thu, 1 Sep 2022 00:09:36 +0800
+Subject: ntfs: fix out-of-bounds read in ntfs_attr_find()
+
+From: Hawkins Jiawei <yin31149@gmail.com>
+
+commit 36a4d82dddbbd421d2b8e79e1cab68c8126d5075 upstream.
+
+Kernel iterates over ATTR_RECORDs in mft record in ntfs_attr_find().  To
+ensure access on these ATTR_RECORDs are within bounds, kernel will do some
+checking during iteration.
+
+The problem is that during checking whether ATTR_RECORD's name is within
+bounds, kernel will dereferences the ATTR_RECORD name_offset field, before
+checking this ATTR_RECORD strcture is within bounds.  This problem may
+result out-of-bounds read in ntfs_attr_find(), reported by Syzkaller:
+
+==================================================================
+BUG: KASAN: use-after-free in ntfs_attr_find+0xc02/0xce0 fs/ntfs/attrib.c:597
+Read of size 2 at addr ffff88807e352009 by task syz-executor153/3607
+
+[...]
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:317 [inline]
+ print_report.cold+0x2ba/0x719 mm/kasan/report.c:433
+ kasan_report+0xb1/0x1e0 mm/kasan/report.c:495
+ ntfs_attr_find+0xc02/0xce0 fs/ntfs/attrib.c:597
+ ntfs_attr_lookup+0x1056/0x2070 fs/ntfs/attrib.c:1193
+ ntfs_read_inode_mount+0x89a/0x2580 fs/ntfs/inode.c:1845
+ ntfs_fill_super+0x1799/0x9320 fs/ntfs/super.c:2854
+ mount_bdev+0x34d/0x410 fs/super.c:1400
+ legacy_get_tree+0x105/0x220 fs/fs_context.c:610
+ vfs_get_tree+0x89/0x2f0 fs/super.c:1530
+ do_new_mount fs/namespace.c:3040 [inline]
+ path_mount+0x1326/0x1e20 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __x64_sys_mount+0x27f/0x300 fs/namespace.c:3568
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+ [...]
+ </TASK>
+
+The buggy address belongs to the physical page:
+page:ffffea0001f8d400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x7e350
+head:ffffea0001f8d400 order:3 compound_mapcount:0 compound_pincount:0
+flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff)
+raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888011842140
+raw: 0000000000000000 0000000000040004 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+Memory state around the buggy address:
+ ffff88807e351f00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff88807e351f80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+>ffff88807e352000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+                      ^
+ ffff88807e352080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff88807e352100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+This patch solves it by moving the ATTR_RECORD strcture's bounds checking
+earlier, then checking whether ATTR_RECORD's name is within bounds.
+What's more, this patch also add some comments to improve its
+maintainability.
+
+Link: https://lkml.kernel.org/r/20220831160935.3409-3-yin31149@gmail.com
+Link: https://lore.kernel.org/all/1636796c-c85e-7f47-e96f-e074fee3c7d3@huawei.com/
+Link: https://groups.google.com/g/syzkaller-bugs/c/t_XdeKPGTR4/m/LECAuIGcBgAJ
+Signed-off-by: chenxiaosong (A) <chenxiaosong2@huawei.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
+Reported-by: syzbot+5f8dcabe4a3b2c51c607@syzkaller.appspotmail.com
+Tested-by: syzbot+5f8dcabe4a3b2c51c607@syzkaller.appspotmail.com
+Cc: Anton Altaparmakov <anton@tuxera.com>
+Cc: syzkaller-bugs <syzkaller-bugs@googlegroups.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/attrib.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/fs/ntfs/attrib.c
++++ b/fs/ntfs/attrib.c
+@@ -594,11 +594,23 @@ static int ntfs_attr_find(const ATTR_TYP
+       for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
+               u8 *mrec_end = (u8 *)ctx->mrec +
+                              le32_to_cpu(ctx->mrec->bytes_allocated);
+-              u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+-                             a->name_length * sizeof(ntfschar);
+-              if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
+-                  name_end > mrec_end)
++              u8 *name_end;
++
++              /* check whether ATTR_RECORD wrap */
++              if ((u8 *)a < (u8 *)ctx->mrec)
+                       break;
++
++              /* check whether Attribute Record Header is within bounds */
++              if ((u8 *)a > mrec_end ||
++                  (u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
++                      break;
++
++              /* check whether ATTR_RECORD's name is within bounds */
++              name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
++                         a->name_length * sizeof(ntfschar);
++              if (name_end > mrec_end)
++                      break;
++
+               ctx->attr = a;
+               if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
+                               a->type == AT_END))
diff --git a/queue-6.0/ntfs-fix-use-after-free-in-ntfs_attr_find.patch b/queue-6.0/ntfs-fix-use-after-free-in-ntfs_attr_find.patch
new file mode 100644 (file)
index 0000000..b7c10d6
--- /dev/null
@@ -0,0 +1,124 @@
+From d85a1bec8e8d552ab13163ca1874dcd82f3d1550 Mon Sep 17 00:00:00 2001
+From: Hawkins Jiawei <yin31149@gmail.com>
+Date: Thu, 1 Sep 2022 00:09:34 +0800
+Subject: ntfs: fix use-after-free in ntfs_attr_find()
+
+From: Hawkins Jiawei <yin31149@gmail.com>
+
+commit d85a1bec8e8d552ab13163ca1874dcd82f3d1550 upstream.
+
+Patch series "ntfs: fix bugs about Attribute", v2.
+
+This patchset fixes three bugs relative to Attribute in record:
+
+Patch 1 adds a sanity check to ensure that, attrs_offset field in first
+mft record loading from disk is within bounds.
+
+Patch 2 moves the ATTR_RECORD's bounds checking earlier, to avoid
+dereferencing ATTR_RECORD before checking this ATTR_RECORD is within
+bounds.
+
+Patch 3 adds an overflow checking to avoid possible forever loop in
+ntfs_attr_find().
+
+Without patch 1 and patch 2, the kernel triggersa KASAN use-after-free
+detection as reported by Syzkaller.
+
+Although one of patch 1 or patch 2 can fix this, we still need both of
+them.  Because patch 1 fixes the root cause, and patch 2 not only fixes
+the direct cause, but also fixes the potential out-of-bounds bug.
+
+
+This patch (of 3):
+
+Syzkaller reported use-after-free read as follows:
+==================================================================
+BUG: KASAN: use-after-free in ntfs_attr_find+0xc02/0xce0 fs/ntfs/attrib.c:597
+Read of size 2 at addr ffff88807e352009 by task syz-executor153/3607
+
+[...]
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:317 [inline]
+ print_report.cold+0x2ba/0x719 mm/kasan/report.c:433
+ kasan_report+0xb1/0x1e0 mm/kasan/report.c:495
+ ntfs_attr_find+0xc02/0xce0 fs/ntfs/attrib.c:597
+ ntfs_attr_lookup+0x1056/0x2070 fs/ntfs/attrib.c:1193
+ ntfs_read_inode_mount+0x89a/0x2580 fs/ntfs/inode.c:1845
+ ntfs_fill_super+0x1799/0x9320 fs/ntfs/super.c:2854
+ mount_bdev+0x34d/0x410 fs/super.c:1400
+ legacy_get_tree+0x105/0x220 fs/fs_context.c:610
+ vfs_get_tree+0x89/0x2f0 fs/super.c:1530
+ do_new_mount fs/namespace.c:3040 [inline]
+ path_mount+0x1326/0x1e20 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __x64_sys_mount+0x27f/0x300 fs/namespace.c:3568
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+ [...]
+ </TASK>
+
+The buggy address belongs to the physical page:
+page:ffffea0001f8d400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x7e350
+head:ffffea0001f8d400 order:3 compound_mapcount:0 compound_pincount:0
+flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff)
+raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888011842140
+raw: 0000000000000000 0000000000040004 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+Memory state around the buggy address:
+ ffff88807e351f00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff88807e351f80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+>ffff88807e352000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+                      ^
+ ffff88807e352080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff88807e352100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+Kernel will loads $MFT/$DATA's first mft record in
+ntfs_read_inode_mount().
+
+Yet the problem is that after loading, kernel doesn't check whether
+attrs_offset field is a valid value.
+
+To be more specific, if attrs_offset field is larger than bytes_allocated
+field, then it may trigger the out-of-bounds read bug(reported as
+use-after-free bug) in ntfs_attr_find(), when kernel tries to access the
+corresponding mft record's attribute.
+
+This patch solves it by adding the sanity check between attrs_offset field
+and bytes_allocated field, after loading the first mft record.
+
+Link: https://lkml.kernel.org/r/20220831160935.3409-1-yin31149@gmail.com
+Link: https://lkml.kernel.org/r/20220831160935.3409-2-yin31149@gmail.com
+Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
+Cc: Anton Altaparmakov <anton@tuxera.com>
+Cc: ChenXiaoSong <chenxiaosong2@huawei.com>
+Cc: syzkaller-bugs <syzkaller-bugs@googlegroups.com>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/inode.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *
+               goto err_out;
+       }
++      /* Sanity check offset to the first attribute */
++      if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
++              ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
++                             le16_to_cpu(m->attrs_offset));
++              goto err_out;
++      }
++
+       /* Need this to sanity check attribute list references to $MFT. */
+       vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
index 72c24276ac37a06805f71433079a71bd1150e9e2..0458175cf96f46e5bd7488a7ebfe23695eb742c7 100644 (file)
@@ -307,3 +307,8 @@ netlink-bounds-check-struct-nlmsgerr-creation.patch
 wifi-wext-use-flex-array-destination-for-memcpy.patch
 rseq-use-pr_warn_once-when-deprecated-unknown-abi-flags-are-encountered.patch
 mm-fs-initialize-fsdata-passed-to-write_begin-write_end-interface.patch
+net-9p-use-a-dedicated-spinlock-for-trans_fd.patch
+bpf-prevent-bpf-program-recursion-for-raw-tracepoint-probes.patch
+ntfs-fix-use-after-free-in-ntfs_attr_find.patch
+ntfs-fix-out-of-bounds-read-in-ntfs_attr_find.patch
+ntfs-check-overflow-when-iterating-attr_records.patch