--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Cengiz Can <cengiz@kernel.wtf>
+Date: Wed, 4 Mar 2020 13:58:19 +0300
+Subject: blktrace: fix dereference after null check
+
+From: Cengiz Can <cengiz@kernel.wtf>
+
+commit 153031a301bb07194e9c37466cfce8eacb977621 upstream.
+
+There was a recent change in blktrace.c that added a RCU protection to
+`q->blk_trace` in order to fix a use-after-free issue during access.
+
+However the change missed an edge case that can lead to dereferencing of
+`bt` pointer even when it's NULL:
+
+Coverity static analyzer marked this as a FORWARD_NULL issue with CID
+1460458.
+
+```
+/kernel/trace/blktrace.c: 1904 in sysfs_blk_trace_attr_store()
+1898 ret = 0;
+1899 if (bt == NULL)
+1900 ret = blk_trace_setup_queue(q, bdev);
+1901
+1902 if (ret == 0) {
+1903 if (attr == &dev_attr_act_mask)
+>>> CID 1460458: Null pointer dereferences (FORWARD_NULL)
+>>> Dereferencing null pointer "bt".
+1904 bt->act_mask = value;
+1905 else if (attr == &dev_attr_pid)
+1906 bt->pid = value;
+1907 else if (attr == &dev_attr_start_lba)
+1908 bt->start_lba = value;
+1909 else if (attr == &dev_attr_end_lba)
+```
+
+Added a reassignment with RCU annotation to fix the issue.
+
+Fixes: c780e86dd48 ("blktrace: Protect q->blk_trace with RCU")
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Cengiz Can <cengiz@kernel.wtf>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/blktrace.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1911,8 +1911,11 @@ static ssize_t sysfs_blk_trace_attr_stor
+ }
+
+ ret = 0;
+- if (bt == NULL)
++ if (bt == NULL) {
+ ret = blk_trace_setup_queue(q, bdev);
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
++ }
+
+ if (ret == 0) {
+ if (attr == &dev_attr_act_mask)
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 19 Nov 2017 11:52:55 -0700
+Subject: blktrace: fix trace mutex deadlock
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 2967acbb257a6a9bf912f4778b727e00972eac9b upstream.
+
+A previous commit changed the locking around registration/cleanup,
+but direct callers of blk_trace_remove() were missed. This means
+that if we hit the error path in setup, we will deadlock on
+attempting to re-acquire the queue trace mutex.
+
+Fixes: 1f2cac107c59 ("blktrace: fix unlocked access to init/start-stop/teardown")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/blktrace.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct requ
+ return ret;
+
+ if (copy_to_user(arg, &buts, sizeof(buts))) {
+- blk_trace_remove(q);
++ __blk_trace_remove(q);
+ return -EFAULT;
+ }
+ return 0;
+@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct
+ return ret;
+
+ if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
+- blk_trace_remove(q);
++ __blk_trace_remove(q);
+ return -EFAULT;
+ }
+
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 5 Nov 2017 09:13:48 -0700
+Subject: blktrace: fix unlocked access to init/start-stop/teardown
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 1f2cac107c591c24b60b115d6050adc213d10fc0 upstream.
+
+sg.c calls into the blktrace functions without holding the proper queue
+mutex for doing setup, start/stop, or teardown.
+
+Add internal unlocked variants, and export the ones that do the proper
+locking.
+
+Fixes: 6da127ad0918 ("blktrace: Add blktrace ioctls to SCSI generic devices")
+Tested-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/blktrace.c | 58 +++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 48 insertions(+), 10 deletions(-)
+
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -352,7 +352,7 @@ static void blk_trace_cleanup(struct blk
+ put_probe_ref();
+ }
+
+-int blk_trace_remove(struct request_queue *q)
++static int __blk_trace_remove(struct request_queue *q)
+ {
+ struct blk_trace *bt;
+
+@@ -365,6 +365,17 @@ int blk_trace_remove(struct request_queu
+
+ return 0;
+ }
++
++int blk_trace_remove(struct request_queue *q)
++{
++ int ret;
++
++ mutex_lock(&q->blk_trace_mutex);
++ ret = __blk_trace_remove(q);
++ mutex_unlock(&q->blk_trace_mutex);
++
++ return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_remove);
+
+ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+@@ -565,9 +576,8 @@ err:
+ return ret;
+ }
+
+-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+- struct block_device *bdev,
+- char __user *arg)
++static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
++ struct block_device *bdev, char __user *arg)
+ {
+ struct blk_user_trace_setup buts;
+ int ret;
+@@ -586,6 +596,19 @@ int blk_trace_setup(struct request_queue
+ }
+ return 0;
+ }
++
++int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
++ struct block_device *bdev,
++ char __user *arg)
++{
++ int ret;
++
++ mutex_lock(&q->blk_trace_mutex);
++ ret = __blk_trace_setup(q, name, dev, bdev, arg);
++ mutex_unlock(&q->blk_trace_mutex);
++
++ return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_setup);
+
+ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
+@@ -622,7 +645,7 @@ static int compat_blk_trace_setup(struct
+ }
+ #endif
+
+-int blk_trace_startstop(struct request_queue *q, int start)
++static int __blk_trace_startstop(struct request_queue *q, int start)
+ {
+ int ret;
+ struct blk_trace *bt = q->blk_trace;
+@@ -661,6 +684,17 @@ int blk_trace_startstop(struct request_q
+
+ return ret;
+ }
++
++int blk_trace_startstop(struct request_queue *q, int start)
++{
++ int ret;
++
++ mutex_lock(&q->blk_trace_mutex);
++ ret = __blk_trace_startstop(q, start);
++ mutex_unlock(&q->blk_trace_mutex);
++
++ return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_startstop);
+
+ /*
+@@ -691,7 +725,7 @@ int blk_trace_ioctl(struct block_device
+ switch (cmd) {
+ case BLKTRACESETUP:
+ bdevname(bdev, b);
+- ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
++ ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
+ break;
+ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
+ case BLKTRACESETUP32:
+@@ -702,10 +736,10 @@ int blk_trace_ioctl(struct block_device
+ case BLKTRACESTART:
+ start = 1;
+ case BLKTRACESTOP:
+- ret = blk_trace_startstop(q, start);
++ ret = __blk_trace_startstop(q, start);
+ break;
+ case BLKTRACETEARDOWN:
+- ret = blk_trace_remove(q);
++ ret = __blk_trace_remove(q);
+ break;
+ default:
+ ret = -ENOTTY;
+@@ -723,10 +757,14 @@ int blk_trace_ioctl(struct block_device
+ **/
+ void blk_trace_shutdown(struct request_queue *q)
+ {
++ mutex_lock(&q->blk_trace_mutex);
++
+ if (q->blk_trace) {
+- blk_trace_startstop(q, 0);
+- blk_trace_remove(q);
++ __blk_trace_startstop(q, 0);
++ __blk_trace_remove(q);
+ }
++
++ mutex_unlock(&q->blk_trace_mutex);
+ }
+
+ #ifdef CONFIG_BLK_CGROUP
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 6 Feb 2020 15:28:12 +0100
+Subject: blktrace: Protect q->blk_trace with RCU
+
+From: Jan Kara <jack@suse.cz>
+
+commit c780e86dd48ef6467a1146cf7d0fe1e05a635039 upstream.
+
+KASAN is reporting that __blk_add_trace() has a use-after-free issue
+when accessing q->blk_trace. Indeed the switching of block tracing (and
+thus eventual freeing of q->blk_trace) is completely unsynchronized with
+the currently running tracing and thus it can happen that the blk_trace
+structure is being freed just while __blk_add_trace() works on it.
+Protect accesses to q->blk_trace by RCU during tracing and make sure we
+wait for the end of RCU grace period when shutting down tracing. Luckily
+that is rare enough event that we can afford that. Note that postponing
+the freeing of blk_trace to an RCU callback should better be avoided as
+it could have unexpected user visible side-effects as debugfs files
+would be still existing for a short while block tracing has been shut
+down.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=205711
+CC: stable@vger.kernel.org
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Tested-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Reported-by: Tristan Madani <tristmd@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[bwh: Backported to 4.14: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/blkdev.h | 2
+ include/linux/blktrace_api.h | 18 ++++--
+ kernel/trace/blktrace.c | 114 +++++++++++++++++++++++++++++++------------
+ 3 files changed, 97 insertions(+), 37 deletions(-)
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -568,7 +568,7 @@ struct request_queue {
+ unsigned int sg_reserved_size;
+ int node;
+ #ifdef CONFIG_BLK_DEV_IO_TRACE
+- struct blk_trace *blk_trace;
++ struct blk_trace __rcu *blk_trace;
+ struct mutex blk_trace_mutex;
+ #endif
+ /*
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_tra
+ **/
+ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
+ do { \
+- struct blk_trace *bt = (q)->blk_trace; \
++ struct blk_trace *bt; \
++ \
++ rcu_read_lock(); \
++ bt = rcu_dereference((q)->blk_trace); \
+ if (unlikely(bt)) \
+ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
++ rcu_read_unlock(); \
+ } while (0)
+ #define blk_add_trace_msg(q, fmt, ...) \
+ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
+@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_tra
+
+ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
+ {
+- struct blk_trace *bt = q->blk_trace;
+- if (likely(!bt))
+- return false;
+- return bt->act_mask & BLK_TC_NOTIFY;
++ struct blk_trace *bt;
++ bool ret;
++
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
++ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
++ rcu_read_unlock();
++ return ret;
+ }
+
+ extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -348,6 +348,7 @@ static void put_probe_ref(void)
+
+ static void blk_trace_cleanup(struct blk_trace *bt)
+ {
++ synchronize_rcu();
+ blk_trace_free(bt);
+ put_probe_ref();
+ }
+@@ -648,8 +649,10 @@ static int compat_blk_trace_setup(struct
+ static int __blk_trace_startstop(struct request_queue *q, int start)
+ {
+ int ret;
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
+ if (bt == NULL)
+ return -EINVAL;
+
+@@ -758,8 +761,8 @@ int blk_trace_ioctl(struct block_device
+ void blk_trace_shutdown(struct request_queue *q)
+ {
+ mutex_lock(&q->blk_trace_mutex);
+-
+- if (q->blk_trace) {
++ if (rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex))) {
+ __blk_trace_startstop(q, 0);
+ __blk_trace_remove(q);
+ }
+@@ -771,8 +774,10 @@ void blk_trace_shutdown(struct request_q
+ static union kernfs_node_id *
+ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ /* We don't use the 'bt' value here except as an optimization... */
++ bt = rcu_dereference_protected(q->blk_trace, 1);
+ if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
+ return NULL;
+
+@@ -817,10 +822,14 @@ static void blk_add_trace_rq(struct requ
+ unsigned int nr_bytes, u32 what,
+ union kernfs_node_id *cgid)
+ {
+- struct blk_trace *bt = rq->q->blk_trace;
++ struct blk_trace *bt;
+
+- if (likely(!bt))
++ rcu_read_lock();
++ bt = rcu_dereference(rq->q->blk_trace);
++ if (likely(!bt)) {
++ rcu_read_unlock();
+ return;
++ }
+
+ if (blk_rq_is_passthrough(rq))
+ what |= BLK_TC_ACT(BLK_TC_PC);
+@@ -829,6 +838,7 @@ static void blk_add_trace_rq(struct requ
+
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
+ rq->cmd_flags, what, error, 0, NULL, cgid);
++ rcu_read_unlock();
+ }
+
+ static void blk_add_trace_rq_insert(void *ignore,
+@@ -874,13 +884,18 @@ static void blk_add_trace_rq_complete(vo
+ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+ u32 what, int error, union kernfs_node_id *cgid)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
+- if (likely(!bt))
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
++ if (likely(!bt)) {
++ rcu_read_unlock();
+ return;
++ }
+
+ __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+ bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
++ rcu_read_unlock();
+ }
+
+ static void blk_add_trace_bio_bounce(void *ignore,
+@@ -931,11 +946,14 @@ static void blk_add_trace_getrq(void *ig
+ blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
+ blk_trace_bio_get_cgid(q, bio));
+ else {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
+ if (bt)
+ __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
+ NULL, NULL);
++ rcu_read_unlock();
+ }
+ }
+
+@@ -948,27 +966,35 @@ static void blk_add_trace_sleeprq(void *
+ blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
+ blk_trace_bio_get_cgid(q, bio));
+ else {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
+ if (bt)
+ __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
+ 0, 0, NULL, NULL);
++ rcu_read_unlock();
+ }
+ }
+
+ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
+ if (bt)
+ __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
++ rcu_read_unlock();
+ }
+
+ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+ unsigned int depth, bool explicit)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
+ if (bt) {
+ __be64 rpdu = cpu_to_be64(depth);
+ u32 what;
+@@ -980,14 +1006,17 @@ static void blk_add_trace_unplug(void *i
+
+ __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
+ }
++ rcu_read_unlock();
+ }
+
+ static void blk_add_trace_split(void *ignore,
+ struct request_queue *q, struct bio *bio,
+ unsigned int pdu)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
+ if (bt) {
+ __be64 rpdu = cpu_to_be64(pdu);
+
+@@ -996,6 +1025,7 @@ static void blk_add_trace_split(void *ig
+ BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
+ &rpdu, blk_trace_bio_get_cgid(q, bio));
+ }
++ rcu_read_unlock();
+ }
+
+ /**
+@@ -1015,11 +1045,15 @@ static void blk_add_trace_bio_remap(void
+ struct request_queue *q, struct bio *bio,
+ dev_t dev, sector_t from)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+ struct blk_io_trace_remap r;
+
+- if (likely(!bt))
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
++ if (likely(!bt)) {
++ rcu_read_unlock();
+ return;
++ }
+
+ r.device_from = cpu_to_be32(dev);
+ r.device_to = cpu_to_be32(bio_dev(bio));
+@@ -1028,6 +1062,7 @@ static void blk_add_trace_bio_remap(void
+ __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+ bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
+ sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
++ rcu_read_unlock();
+ }
+
+ /**
+@@ -1048,11 +1083,15 @@ static void blk_add_trace_rq_remap(void
+ struct request *rq, dev_t dev,
+ sector_t from)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+ struct blk_io_trace_remap r;
+
+- if (likely(!bt))
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
++ if (likely(!bt)) {
++ rcu_read_unlock();
+ return;
++ }
+
+ r.device_from = cpu_to_be32(dev);
+ r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
+@@ -1061,6 +1100,7 @@ static void blk_add_trace_rq_remap(void
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
+ sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
++ rcu_read_unlock();
+ }
+
+ /**
+@@ -1078,14 +1118,19 @@ void blk_add_driver_data(struct request_
+ struct request *rq,
+ void *data, size_t len)
+ {
+- struct blk_trace *bt = q->blk_trace;
++ struct blk_trace *bt;
+
+- if (likely(!bt))
++ rcu_read_lock();
++ bt = rcu_dereference(q->blk_trace);
++ if (likely(!bt)) {
++ rcu_read_unlock();
+ return;
++ }
+
+ __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
+ BLK_TA_DRV_DATA, 0, len, data,
+ blk_trace_request_get_cgid(q, rq));
++ rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(blk_add_driver_data);
+
+@@ -1612,6 +1657,7 @@ static int blk_trace_remove_queue(struct
+ return -EINVAL;
+
+ put_probe_ref();
++ synchronize_rcu();
+ blk_trace_free(bt);
+ return 0;
+ }
+@@ -1773,6 +1819,7 @@ static ssize_t sysfs_blk_trace_attr_show
+ struct hd_struct *p = dev_to_part(dev);
+ struct request_queue *q;
+ struct block_device *bdev;
++ struct blk_trace *bt;
+ ssize_t ret = -ENXIO;
+
+ bdev = bdget(part_devt(p));
+@@ -1785,21 +1832,23 @@ static ssize_t sysfs_blk_trace_attr_show
+
+ mutex_lock(&q->blk_trace_mutex);
+
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
+ if (attr == &dev_attr_enable) {
+- ret = sprintf(buf, "%u\n", !!q->blk_trace);
++ ret = sprintf(buf, "%u\n", !!bt);
+ goto out_unlock_bdev;
+ }
+
+- if (q->blk_trace == NULL)
++ if (bt == NULL)
+ ret = sprintf(buf, "disabled\n");
+ else if (attr == &dev_attr_act_mask)
+- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
++ ret = blk_trace_mask2str(buf, bt->act_mask);
+ else if (attr == &dev_attr_pid)
+- ret = sprintf(buf, "%u\n", q->blk_trace->pid);
++ ret = sprintf(buf, "%u\n", bt->pid);
+ else if (attr == &dev_attr_start_lba)
+- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
++ ret = sprintf(buf, "%llu\n", bt->start_lba);
+ else if (attr == &dev_attr_end_lba)
+- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
++ ret = sprintf(buf, "%llu\n", bt->end_lba);
+
+ out_unlock_bdev:
+ mutex_unlock(&q->blk_trace_mutex);
+@@ -1816,6 +1865,7 @@ static ssize_t sysfs_blk_trace_attr_stor
+ struct block_device *bdev;
+ struct request_queue *q;
+ struct hd_struct *p;
++ struct blk_trace *bt;
+ u64 value;
+ ssize_t ret = -EINVAL;
+
+@@ -1846,8 +1896,10 @@ static ssize_t sysfs_blk_trace_attr_stor
+
+ mutex_lock(&q->blk_trace_mutex);
+
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
+ if (attr == &dev_attr_enable) {
+- if (!!value == !!q->blk_trace) {
++ if (!!value == !!bt) {
+ ret = 0;
+ goto out_unlock_bdev;
+ }
+@@ -1859,18 +1911,18 @@ static ssize_t sysfs_blk_trace_attr_stor
+ }
+
+ ret = 0;
+- if (q->blk_trace == NULL)
++ if (bt == NULL)
+ ret = blk_trace_setup_queue(q, bdev);
+
+ if (ret == 0) {
+ if (attr == &dev_attr_act_mask)
+- q->blk_trace->act_mask = value;
++ bt->act_mask = value;
+ else if (attr == &dev_attr_pid)
+- q->blk_trace->pid = value;
++ bt->pid = value;
+ else if (attr == &dev_attr_start_lba)
+- q->blk_trace->start_lba = value;
++ bt->start_lba = value;
+ else if (attr == &dev_attr_end_lba)
+- q->blk_trace->end_lba = value;
++ bt->end_lba = value;
+ }
+
+ out_unlock_bdev:
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Randall Huang <huangrandall@google.com>
+Date: Thu, 11 Apr 2019 16:26:46 +0800
+Subject: f2fs: fix to avoid accessing xattr across the boundary
+
+From: Randall Huang <huangrandall@google.com>
+
+commit 2777e654371dd4207a3a7f4fb5fa39550053a080 upstream.
+
+When we traverse xattr entries via __find_xattr(),
+if the raw filesystem content is faked or any hardware failure occurs,
+out-of-bound error can be detected by KASAN.
+Fix the issue by introducing boundary check.
+
+[ 38.402878] c7 1827 BUG: KASAN: slab-out-of-bounds in f2fs_getxattr+0x518/0x68c
+[ 38.402891] c7 1827 Read of size 4 at addr ffffffc0b6fb35dc by task
+[ 38.402935] c7 1827 Call trace:
+[ 38.402952] c7 1827 [<ffffff900809003c>] dump_backtrace+0x0/0x6bc
+[ 38.402966] c7 1827 [<ffffff9008090030>] show_stack+0x20/0x2c
+[ 38.402981] c7 1827 [<ffffff900871ab10>] dump_stack+0xfc/0x140
+[ 38.402995] c7 1827 [<ffffff9008325c40>] print_address_description+0x80/0x2d8
+[ 38.403009] c7 1827 [<ffffff900832629c>] kasan_report_error+0x198/0x1fc
+[ 38.403022] c7 1827 [<ffffff9008326104>] kasan_report_error+0x0/0x1fc
+[ 38.403037] c7 1827 [<ffffff9008325000>] __asan_load4+0x1b0/0x1b8
+[ 38.403051] c7 1827 [<ffffff90085fcc44>] f2fs_getxattr+0x518/0x68c
+[ 38.403066] c7 1827 [<ffffff90085fc508>] f2fs_xattr_generic_get+0xb0/0xd0
+[ 38.403080] c7 1827 [<ffffff9008395708>] __vfs_getxattr+0x1f4/0x1fc
+[ 38.403096] c7 1827 [<ffffff9008621bd0>] inode_doinit_with_dentry+0x360/0x938
+[ 38.403109] c7 1827 [<ffffff900862d6cc>] selinux_d_instantiate+0x2c/0x38
+[ 38.403123] c7 1827 [<ffffff900861b018>] security_d_instantiate+0x68/0x98
+[ 38.403136] c7 1827 [<ffffff9008377db8>] d_splice_alias+0x58/0x348
+[ 38.403149] c7 1827 [<ffffff900858d16c>] f2fs_lookup+0x608/0x774
+[ 38.403163] c7 1827 [<ffffff900835eacc>] lookup_slow+0x1e0/0x2cc
+[ 38.403177] c7 1827 [<ffffff9008367fe0>] walk_component+0x160/0x520
+[ 38.403190] c7 1827 [<ffffff9008369ef4>] path_lookupat+0x110/0x2b4
+[ 38.403203] c7 1827 [<ffffff900835dd38>] filename_lookup+0x1d8/0x3a8
+[ 38.403216] c7 1827 [<ffffff900835eeb0>] user_path_at_empty+0x54/0x68
+[ 38.403229] c7 1827 [<ffffff9008395f44>] SyS_getxattr+0xb4/0x18c
+[ 38.403241] c7 1827 [<ffffff9008084200>] el0_svc_naked+0x34/0x38
+
+Signed-off-by: Randall Huang <huangrandall@google.com>
+[Jaegeuk Kim: Fix wrong ending boundary]
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[bwh: Backported to 4.14: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/xattr.c | 36 +++++++++++++++++++++++++++---------
+ fs/f2fs/xattr.h | 2 ++
+ 2 files changed, 29 insertions(+), 9 deletions(-)
+
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -201,12 +201,17 @@ static inline const struct xattr_handler
+ return handler;
+ }
+
+-static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
+- size_t len, const char *name)
++static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
++ void *last_base_addr, int index,
++ size_t len, const char *name)
+ {
+ struct f2fs_xattr_entry *entry;
+
+ list_for_each_xattr(entry, base_addr) {
++ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
++ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
++ return NULL;
++
+ if (entry->e_name_index != index)
+ continue;
+ if (entry->e_name_len != len)
+@@ -289,20 +294,22 @@ static int lookup_all_xattrs(struct inod
+ const char *name, struct f2fs_xattr_entry **xe,
+ void **base_addr, int *base_size)
+ {
+- void *cur_addr, *txattr_addr, *last_addr = NULL;
++ void *cur_addr, *txattr_addr, *last_txattr_addr;
++ void *last_addr = NULL;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+- unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
+ unsigned int inline_size = inline_xattr_size(inode);
+ int err = 0;
+
+- if (!size && !inline_size)
++ if (!xnid && !inline_size)
+ return -ENODATA;
+
+- *base_size = inline_size + size + XATTR_PADDING_SIZE;
++ *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
+ txattr_addr = kzalloc(*base_size, GFP_F2FS_ZERO);
+ if (!txattr_addr)
+ return -ENOMEM;
+
++ last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
++
+ /* read from inline xattr */
+ if (inline_size) {
+ err = read_inline_xattr(inode, ipage, txattr_addr);
+@@ -329,7 +336,11 @@ static int lookup_all_xattrs(struct inod
+ else
+ cur_addr = txattr_addr;
+
+- *xe = __find_xattr(cur_addr, index, len, name);
++ *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
++ if (!*xe) {
++ err = -EFAULT;
++ goto out;
++ }
+ check:
+ if (IS_XATTR_LAST_ENTRY(*xe)) {
+ err = -ENODATA;
+@@ -562,7 +573,8 @@ static int __f2fs_setxattr(struct inode
+ struct page *ipage, int flags)
+ {
+ struct f2fs_xattr_entry *here, *last;
+- void *base_addr;
++ void *base_addr, *last_base_addr;
++ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ int found, newsize;
+ size_t len;
+ __u32 new_hsize;
+@@ -586,8 +598,14 @@ static int __f2fs_setxattr(struct inode
+ if (error)
+ return error;
+
++ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
++
+ /* find entry with wanted name. */
+- here = __find_xattr(base_addr, index, len, name);
++ here = __find_xattr(base_addr, last_base_addr, index, len, name);
++ if (!here) {
++ error = -EFAULT;
++ goto exit;
++ }
+
+ found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
+
+--- a/fs/f2fs/xattr.h
++++ b/fs/f2fs/xattr.h
+@@ -74,6 +74,8 @@ struct f2fs_xattr_entry {
+ entry = XATTR_NEXT_ENTRY(entry))
+ #define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
+ #define XATTR_PADDING_SIZE (sizeof(__u32))
++#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \
++ (inline_xattr_size(i)))
+ #define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
+ VALID_XATTR_BLOCK_SIZE)
+
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Randall Huang <huangrandall@google.com>
+Date: Fri, 18 Oct 2019 14:56:22 +0800
+Subject: f2fs: fix to avoid memory leakage in f2fs_listxattr
+
+From: Randall Huang <huangrandall@google.com>
+
+commit 688078e7f36c293dae25b338ddc9e0a2790f6e06 upstream.
+
+In f2fs_listxattr, there is no boundary check before
+memcpy e_name to buffer.
+If the e_name_len is corrupted,
+unexpected memory contents may be returned to the buffer.
+
+Signed-off-by: Randall Huang <huangrandall@google.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[bwh: Backported to 4.14: Use f2fs_msg() instead of f2fs_err()]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/xattr.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -516,8 +516,9 @@ out:
+ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+ struct inode *inode = d_inode(dentry);
++ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ struct f2fs_xattr_entry *entry;
+- void *base_addr;
++ void *base_addr, *last_base_addr;
+ int error = 0;
+ size_t rest = buffer_size;
+
+@@ -527,6 +528,8 @@ ssize_t f2fs_listxattr(struct dentry *de
+ if (error)
+ return error;
+
++ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
++
+ list_for_each_xattr(entry, base_addr) {
+ const struct xattr_handler *handler =
+ f2fs_xattr_handler(entry->e_name_index);
+@@ -534,6 +537,16 @@ ssize_t f2fs_listxattr(struct dentry *de
+ size_t prefix_len;
+ size_t size;
+
++ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
++ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
++ f2fs_msg(dentry->d_sb, KERN_ERR,
++ "inode (%lu) has corrupted xattr",
++ inode->i_ino);
++ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
++ error = -EFSCORRUPTED;
++ goto cleanup;
++ }
++
+ if (!handler || (handler->list && !handler->list(dentry)))
+ continue;
+
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Chao Yu <yuchao0@huawei.com>
+Date: Mon, 4 Sep 2017 18:58:02 +0800
+Subject: f2fs: introduce read_inline_xattr
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit a5f433f7410530ae6bb907ebc049547d9dce665b upstream.
+
+Commit ba38c27eb93e ("f2fs: enhance lookup xattr") introduces
+lookup_all_xattrs duplicating from read_all_xattrs, which leaves
+lots of similar codes in between them, so introduce new help
+read_inline_xattr to clean up redundant codes.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/xattr.c | 59 +++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 29 insertions(+), 30 deletions(-)
+
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -241,6 +241,29 @@ static struct f2fs_xattr_entry *__find_i
+ return entry;
+ }
+
++static int read_inline_xattr(struct inode *inode, struct page *ipage,
++ void *txattr_addr)
++{
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ unsigned int inline_size = inline_xattr_size(inode);
++ struct page *page = NULL;
++ void *inline_addr;
++
++ if (ipage) {
++ inline_addr = inline_xattr_addr(ipage);
++ } else {
++ page = get_node_page(sbi, inode->i_ino);
++ if (IS_ERR(page))
++ return PTR_ERR(page);
++
++ inline_addr = inline_xattr_addr(page);
++ }
++ memcpy(txattr_addr, inline_addr, inline_size);
++ f2fs_put_page(page, 1);
++
++ return 0;
++}
++
+ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ unsigned int index, unsigned int len,
+ const char *name, struct f2fs_xattr_entry **xe,
+@@ -263,21 +286,9 @@ static int lookup_all_xattrs(struct inod
+
+ /* read from inline xattr */
+ if (inline_size) {
+- struct page *page = NULL;
+- void *inline_addr;
+-
+- if (ipage) {
+- inline_addr = inline_xattr_addr(ipage);
+- } else {
+- page = get_node_page(sbi, inode->i_ino);
+- if (IS_ERR(page)) {
+- err = PTR_ERR(page);
+- goto out;
+- }
+- inline_addr = inline_xattr_addr(page);
+- }
+- memcpy(txattr_addr, inline_addr, inline_size);
+- f2fs_put_page(page, 1);
++ err = read_inline_xattr(inode, ipage, txattr_addr);
++ if (err)
++ goto out;
+
+ *xe = __find_inline_xattr(txattr_addr, &last_addr,
+ index, len, name);
+@@ -339,21 +350,9 @@ static int read_all_xattrs(struct inode
+
+ /* read from inline xattr */
+ if (inline_size) {
+- struct page *page = NULL;
+- void *inline_addr;
+-
+- if (ipage) {
+- inline_addr = inline_xattr_addr(ipage);
+- } else {
+- page = get_node_page(sbi, inode->i_ino);
+- if (IS_ERR(page)) {
+- err = PTR_ERR(page);
+- goto fail;
+- }
+- inline_addr = inline_xattr_addr(page);
+- }
+- memcpy(txattr_addr, inline_addr, inline_size);
+- f2fs_put_page(page, 1);
++ err = read_inline_xattr(inode, ipage, txattr_addr);
++ if (err)
++ goto fail;
+ }
+
+ /* read from xattr node block */
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Chao Yu <yuchao0@huawei.com>
+Date: Mon, 4 Sep 2017 18:58:03 +0800
+Subject: f2fs: introduce read_xattr_block
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit 63840695f68c20735df8861062343cf1faa3768d upstream.
+
+Commit ba38c27eb93e ("f2fs: enhance lookup xattr") introduces
+lookup_all_xattrs duplicating from read_all_xattrs, which leaves
+lots of similar codes in between them, so introduce new help
+read_xattr_block to clean up redundant codes.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/xattr.c | 50 ++++++++++++++++++++++++--------------------------
+ 1 file changed, 24 insertions(+), 26 deletions(-)
+
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -264,12 +264,31 @@ static int read_inline_xattr(struct inod
+ return 0;
+ }
+
++static int read_xattr_block(struct inode *inode, void *txattr_addr)
++{
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
++ unsigned int inline_size = inline_xattr_size(inode);
++ struct page *xpage;
++ void *xattr_addr;
++
++ /* The inode already has an extended attribute block. */
++ xpage = get_node_page(sbi, xnid);
++ if (IS_ERR(xpage))
++ return PTR_ERR(xpage);
++
++ xattr_addr = page_address(xpage);
++ memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
++ f2fs_put_page(xpage, 1);
++
++ return 0;
++}
++
+ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ unsigned int index, unsigned int len,
+ const char *name, struct f2fs_xattr_entry **xe,
+ void **base_addr)
+ {
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ void *cur_addr, *txattr_addr, *last_addr = NULL;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
+@@ -298,19 +317,9 @@ static int lookup_all_xattrs(struct inod
+
+ /* read from xattr node block */
+ if (xnid) {
+- struct page *xpage;
+- void *xattr_addr;
+-
+- /* The inode already has an extended attribute block. */
+- xpage = get_node_page(sbi, xnid);
+- if (IS_ERR(xpage)) {
+- err = PTR_ERR(xpage);
++ err = read_xattr_block(inode, txattr_addr);
++ if (err)
+ goto out;
+- }
+-
+- xattr_addr = page_address(xpage);
+- memcpy(txattr_addr + inline_size, xattr_addr, size);
+- f2fs_put_page(xpage, 1);
+ }
+
+ if (last_addr)
+@@ -335,7 +344,6 @@ out:
+ static int read_all_xattrs(struct inode *inode, struct page *ipage,
+ void **base_addr)
+ {
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_xattr_header *header;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int size = VALID_XATTR_BLOCK_SIZE;
+@@ -357,19 +365,9 @@ static int read_all_xattrs(struct inode
+
+ /* read from xattr node block */
+ if (xnid) {
+- struct page *xpage;
+- void *xattr_addr;
+-
+- /* The inode already has an extended attribute block. */
+- xpage = get_node_page(sbi, xnid);
+- if (IS_ERR(xpage)) {
+- err = PTR_ERR(xpage);
++ err = read_xattr_block(inode, txattr_addr);
++ if (err)
+ goto fail;
+- }
+-
+- xattr_addr = page_address(xpage);
+- memcpy(txattr_addr + inline_size, xattr_addr, size);
+- f2fs_put_page(xpage, 1);
+ }
+
+ header = XATTR_HDR(txattr_addr);
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+Date: Wed, 26 Dec 2018 19:54:07 -0800
+Subject: f2fs: sanity check of xattr entry size
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+commit 64beba0558fce7b59e9a8a7afd77290e82a22163 upstream.
+
+There is a security report where f2fs_getxattr() has a hole to expose wrong
+memory region when the image is malformed like this.
+
+f2fs_getxattr: entry->e_name_len: 4, size: 12288, buffer_size: 16384, len: 4
+
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[bwh: Backported to 4.14: Keep using kzalloc()]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/xattr.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -287,7 +287,7 @@ static int read_xattr_block(struct inode
+ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ unsigned int index, unsigned int len,
+ const char *name, struct f2fs_xattr_entry **xe,
+- void **base_addr)
++ void **base_addr, int *base_size)
+ {
+ void *cur_addr, *txattr_addr, *last_addr = NULL;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+@@ -298,8 +298,8 @@ static int lookup_all_xattrs(struct inod
+ if (!size && !inline_size)
+ return -ENODATA;
+
+- txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
+- GFP_F2FS_ZERO);
++ *base_size = inline_size + size + XATTR_PADDING_SIZE;
++ txattr_addr = kzalloc(*base_size, GFP_F2FS_ZERO);
+ if (!txattr_addr)
+ return -ENOMEM;
+
+@@ -311,8 +311,10 @@ static int lookup_all_xattrs(struct inod
+
+ *xe = __find_inline_xattr(txattr_addr, &last_addr,
+ index, len, name);
+- if (*xe)
++ if (*xe) {
++ *base_size = inline_size;
+ goto check;
++ }
+ }
+
+ /* read from xattr node block */
+@@ -462,6 +464,7 @@ int f2fs_getxattr(struct inode *inode, i
+ int error = 0;
+ unsigned int size, len;
+ void *base_addr = NULL;
++ int base_size;
+
+ if (name == NULL)
+ return -EINVAL;
+@@ -472,7 +475,7 @@ int f2fs_getxattr(struct inode *inode, i
+
+ down_read(&F2FS_I(inode)->i_xattr_sem);
+ error = lookup_all_xattrs(inode, ipage, index, len, name,
+- &entry, &base_addr);
++ &entry, &base_addr, &base_size);
+ up_read(&F2FS_I(inode)->i_xattr_sem);
+ if (error)
+ return error;
+@@ -486,6 +489,11 @@ int f2fs_getxattr(struct inode *inode, i
+
+ if (buffer) {
+ char *pval = entry->e_name + entry->e_name_len;
++
++ if (base_size - (pval - (char *)base_addr) < size) {
++ error = -ERANGE;
++ goto out;
++ }
+ memcpy(buffer, pval, size);
+ }
+ error = size;
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Wed, 4 Dec 2019 15:35:52 +0100
+Subject: net: ipv6: add net argument to ip6_dst_lookup_flow
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+commit c4e85f73afb6384123e5ef1bba3315b2e3ad031e upstream.
+
+This will be used in the conversion of ipv6_stub to ip6_dst_lookup_flow,
+as some modules currently pass a net argument without a socket to
+ip6_dst_lookup. This is equivalent to commit 343d60aada5a ("ipv6: change
+ipv6_stub_impl.ipv6_dst_lookup to take net argument").
+
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 4.14: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ipv6.h | 2 +-
+ net/dccp/ipv6.c | 6 +++---
+ net/ipv6/af_inet6.c | 2 +-
+ net/ipv6/datagram.c | 2 +-
+ net/ipv6/inet6_connection_sock.c | 4 ++--
+ net/ipv6/ip6_output.c | 8 ++++----
+ net/ipv6/raw.c | 2 +-
+ net/ipv6/syncookies.c | 2 +-
+ net/ipv6/tcp_ipv6.c | 4 ++--
+ net/l2tp/l2tp_ip6.c | 2 +-
+ net/sctp/ipv6.c | 4 ++--
+ 11 files changed, 19 insertions(+), 19 deletions(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -862,7 +862,7 @@ static inline struct sk_buff *ip6_finish
+
+ int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
+-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -211,7 +211,7 @@ static int dccp_v6_send_response(const s
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
+@@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const
+ security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
+
+ /* sk = NULL, but it is safe for now. RST socket required. */
+- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
++ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(skb, dst);
+ ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
+@@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *
+ opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
+ final_p = fl6_update_dst(&fl6, opt, &final);
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto failure;
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -716,7 +716,7 @@ int inet6_sk_rebuild_header(struct sock
+ &final);
+ rcu_read_unlock();
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ sk->sk_route_caps = 0;
+ sk->sk_err_soft = -PTR_ERR(dst);
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -88,7 +88,7 @@ int ip6_datagram_dst_update(struct sock
+ final_p = fl6_update_dst(&fl6, opt, &final);
+ rcu_read_unlock();
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(co
+ fl6->flowi6_uid = sk->sk_uid;
+ security_req_classify_flow(req, flowi6_to_flowi(fl6));
+
+- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+ if (IS_ERR(dst))
+ return NULL;
+
+@@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route
+
+ dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ if (!dst) {
+- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+
+ if (!IS_ERR(dst))
+ ip6_dst_store(sk, dst, NULL, NULL);
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1089,19 +1089,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
+ * It returns a valid dst pointer on success, or a pointer encoded
+ * error code.
+ */
+-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
+ const struct in6_addr *final_dst)
+ {
+ struct dst_entry *dst = NULL;
+ int err;
+
+- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
++ err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
+ if (err)
+ return ERR_PTR(err);
+ if (final_dst)
+ fl6->daddr = *final_dst;
+
+- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++ return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+
+@@ -1126,7 +1126,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow
+
+ dst = ip6_sk_dst_check(sk, dst, fl6);
+ if (!dst)
+- dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
+
+ return dst;
+ }
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -929,7 +929,7 @@ static int rawv6_sendmsg(struct sock *sk
+
+ fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -238,7 +238,7 @@ struct sock *cookie_v6_check(struct sock
+ fl6.flowi6_uid = sk->sk_uid;
+ security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst))
+ goto out_free;
+ }
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -252,7 +252,7 @@ static int tcp_v6_connect(struct sock *s
+
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto failure;
+@@ -865,7 +865,7 @@ static void tcp_v6_send_response(const s
+ * Underlying function will use this to retrieve the network
+ * namespace
+ */
+- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
++ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(buff, dst);
+ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -629,7 +629,7 @@ static int l2tp_ip6_sendmsg(struct sock
+
+ fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
+
+- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -271,7 +271,7 @@ static void sctp_v6_get_dst(struct sctp_
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
+
+- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+ if (!asoc || saddr) {
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+@@ -329,7 +329,7 @@ static void sctp_v6_get_dst(struct sctp_
+ fl6->saddr = laddr->a.v6.sin6_addr;
+ fl6->fl6_sport = laddr->a.v6.sin6_port;
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+- bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
++ bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+
+ if (IS_ERR(bdst))
+ continue;
--- /dev/null
+From foo@baz Tue 12 May 2020 06:12:56 PM CEST
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Wed, 4 Dec 2019 15:35:53 +0100
+Subject: net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+commit 6c8991f41546c3c472503dff1ea9daaddf9331c2 upstream.
+
+ipv6_stub uses the ip6_dst_lookup function to allow other modules to
+perform IPv6 lookups. However, this function skips the XFRM layer
+entirely.
+
+All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
+ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
+which calls xfrm_lookup_route(). This patch fixes this inconsistent
+behavior by switching the stub to ip6_dst_lookup_flow, which also calls
+xfrm_lookup_route().
+
+This requires some changes in all the callers, as these two functions
+take different arguments and have different return types.
+
+Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
+Reported-by: Xiumei Mu <xmu@redhat.com>
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 4.14:
+ - Drop change in lwt_bpf.c
+ - Delete now-unused "ret" in mlx5e_route_lookup_ipv6()
+ - Initialise "out_dev" in mlx5e_create_encap_header_ipv6() to avoid
+ introducing a spurious "may be used uninitialised" warning
+ - Adjust filenames, context, indentation]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/addr.c | 7 +++----
+ drivers/infiniband/sw/rxe/rxe_net.c | 8 +++++---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 11 +++++------
+ drivers/net/geneve.c | 4 +++-
+ drivers/net/vxlan.c | 8 +++-----
+ include/net/addrconf.h | 6 ++++--
+ net/ipv6/addrconf_core.c | 11 ++++++-----
+ net/ipv6/af_inet6.c | 2 +-
+ net/mpls/af_mpls.c | 7 +++----
+ net/tipc/udp_media.c | 9 ++++++---
+ 10 files changed, 39 insertions(+), 34 deletions(-)
+
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -450,16 +450,15 @@ static int addr6_resolve(struct sockaddr
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ struct rt6_info *rt;
+- int ret;
+
+ memset(&fl6, 0, sizeof fl6);
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
+ fl6.flowi6_oif = addr->bound_dev_if;
+
+- ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+- if (ret < 0)
+- return ret;
++ dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
++ if (IS_ERR(dst))
++ return PTR_ERR(dst);
+
+ rt = (struct rt6_info *)dst;
+ if (ipv6_addr_any(&src_in->sin6_addr)) {
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6
+ memcpy(&fl6.daddr, daddr, sizeof(*daddr));
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+- if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
+- recv_sockets.sk6->sk, &ndst, &fl6))) {
++ ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
++ recv_sockets.sk6->sk, &fl6,
++ NULL);
++ if (unlikely(IS_ERR(ndst))) {
+ pr_err_ratelimited("no route to %pI6\n", daddr);
+- goto put;
++ return NULL;
+ }
+
+ if (unlikely(ndst->error)) {
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1550,12 +1550,11 @@ static int mlx5e_route_lookup_ipv6(struc
+
+ #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+- int ret;
+
+- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+- fl6);
+- if (ret < 0)
+- return ret;
++ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
++ NULL);
++ if (IS_ERR(dst))
++ return PTR_ERR(dst);
+
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+@@ -1754,7 +1753,7 @@ static int mlx5e_create_encap_header_ipv
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+- struct net_device *out_dev;
++ struct net_device *out_dev = NULL;
+ struct neighbour *n = NULL;
+ struct flowi6 fl6 = {};
+ char *encap_header;
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -796,7 +796,9 @@ static struct dst_entry *geneve_get_v6_d
+ if (dst)
+ return dst;
+ }
+- if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
++ dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
++ NULL);
++ if (IS_ERR(dst)) {
+ netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1962,7 +1962,6 @@ static struct dst_entry *vxlan6_get_rout
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+- int err;
+
+ if (!sock6)
+ return ERR_PTR(-EIO);
+@@ -1985,10 +1984,9 @@ static struct dst_entry *vxlan6_get_rout
+ fl6.fl6_dport = dport;
+ fl6.fl6_sport = sport;
+
+- err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+- sock6->sock->sk,
+- &ndst, &fl6);
+- if (unlikely(err < 0)) {
++ ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
++ &fl6, NULL);
++ if (unlikely(IS_ERR(ndst))) {
+ netdev_dbg(dev, "no route to %pI6\n", daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -223,8 +223,10 @@ struct ipv6_stub {
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+- int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+- struct dst_entry **dst, struct flowi6 *fl6);
++ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
++ const struct sock *sk,
++ struct flowi6 *fl6,
++ const struct in6_addr *final_dst);
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -126,15 +126,16 @@ int inet6addr_validator_notifier_call_ch
+ }
+ EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
+
+-static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+- struct dst_entry **u2,
+- struct flowi6 *u3)
++static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net,
++ const struct sock *sk,
++ struct flowi6 *fl6,
++ const struct in6_addr *final_dst)
+ {
+- return -EAFNOSUPPORT;
++ return ERR_PTR(-EAFNOSUPPORT);
+ }
+
+ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+- .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
++ .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
+ };
+ EXPORT_SYMBOL_GPL(ipv6_stub);
+
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -874,7 +874,7 @@ static struct pernet_operations inet6_ne
+ static const struct ipv6_stub ipv6_stub_impl = {
+ .ipv6_sock_mc_join = ipv6_sock_mc_join,
+ .ipv6_sock_mc_drop = ipv6_sock_mc_drop,
+- .ipv6_dst_lookup = ip6_dst_lookup,
++ .ipv6_dst_lookup_flow = ip6_dst_lookup_flow,
+ .udpv6_encap_enable = udpv6_encap_enable,
+ .ndisc_send_na = ndisc_send_na,
+ .nd_tbl = &nd_tbl,
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -587,16 +587,15 @@ static struct net_device *inet6_fib_look
+ struct net_device *dev;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+- int err;
+
+ if (!ipv6_stub)
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+- err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+- if (err)
+- return ERR_PTR(err);
++ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
++ if (IS_ERR(dst))
++ return ERR_CAST(dst);
+
+ dev = dst->dev;
+ dev_hold(dev);
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -187,10 +187,13 @@ static int tipc_udp_xmit(struct net *net
+ .saddr = src->ipv6,
+ .flowi6_proto = IPPROTO_UDP
+ };
+- err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+- &fl6);
+- if (err)
++ ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
++ ub->ubsock->sk,
++ &fl6, NULL);
++ if (IS_ERR(ndst)) {
++ err = PTR_ERR(ndst);
+ goto tx_error;
++ }
+ ttl = ip6_dst_hoplimit(ndst);
+ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
netfilter-nat-never-update-the-udp-checksum-when-it-s-0.patch
objtool-fix-stack-offset-tracking-for-indirect-cfas.patch
scripts-decodecode-fix-trapping-instruction-formatting.patch
+net-ipv6-add-net-argument-to-ip6_dst_lookup_flow.patch
+net-ipv6_stub-use-ip6_dst_lookup_flow-instead-of-ip6_dst_lookup.patch
+blktrace-fix-unlocked-access-to-init-start-stop-teardown.patch
+blktrace-fix-trace-mutex-deadlock.patch
+blktrace-protect-q-blk_trace-with-rcu.patch
+blktrace-fix-dereference-after-null-check.patch
+f2fs-introduce-read_inline_xattr.patch
+f2fs-introduce-read_xattr_block.patch
+f2fs-sanity-check-of-xattr-entry-size.patch
+f2fs-fix-to-avoid-accessing-xattr-across-the-boundary.patch
+f2fs-fix-to-avoid-memory-leakage-in-f2fs_listxattr.patch