--- /dev/null
+From 6f6a23a213be51728502b88741ba6a10cda2441d Mon Sep 17 00:00:00 2001
+From: Adam Wallis <awallis@codeaurora.org>
+Date: Mon, 27 Nov 2017 10:45:01 -0500
+Subject: dmaengine: dmatest: move callback wait queue to thread context
+
+From: Adam Wallis <awallis@codeaurora.org>
+
+commit 6f6a23a213be51728502b88741ba6a10cda2441d upstream.
+
+Commit adfa543e7314 ("dmatest: don't use set_freezable_with_signal()")
+introduced a bug (that is in fact documented by the patch commit text)
+that leaves behind a dangling pointer. Since the done_wait structure is
+allocated on the stack, future invocations to the DMATEST can produce
+undesirable results (e.g., corrupted spinlocks).
+
+Commit a9df21e34b42 ("dmaengine: dmatest: warn user when dma test times
+out") attempted to WARN the user that the stack was likely corrupted but
+did not fix the actual issue.
+
+This patch fixes the issue by pushing the wait queue and callback
+structs into the the thread structure. If a failure occurs due to time,
+dmaengine_terminate_all will force the callback to safely call
+wake_up_all() without possibility of using a freed pointer.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=197605
+Fixes: adfa543e7314 ("dmatest: don't use set_freezable_with_signal()")
+Reviewed-by: Sinan Kaya <okaya@codeaurora.org>
+Suggested-by: Shunyong Yang <shunyong.yang@hxt-semitech.com>
+Signed-off-by: Adam Wallis <awallis@codeaurora.org>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dmatest.c | 55 ++++++++++++++++++++++++++++----------------------
+ 1 file changed, 31 insertions(+), 24 deletions(-)
+
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -148,6 +148,12 @@ MODULE_PARM_DESC(run, "Run the test (def
+ #define PATTERN_OVERWRITE 0x20
+ #define PATTERN_COUNT_MASK 0x1f
+
++/* poor man's completion - we want to use wait_event_freezable() on it */
++struct dmatest_done {
++ bool done;
++ wait_queue_head_t *wait;
++};
++
+ struct dmatest_thread {
+ struct list_head node;
+ struct dmatest_info *info;
+@@ -156,6 +162,8 @@ struct dmatest_thread {
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
++ wait_queue_head_t done_wait;
++ struct dmatest_done test_done;
+ bool done;
+ };
+
+@@ -316,18 +324,25 @@ static unsigned int dmatest_verify(u8 **
+ return error_count;
+ }
+
+-/* poor man's completion - we want to use wait_event_freezable() on it */
+-struct dmatest_done {
+- bool done;
+- wait_queue_head_t *wait;
+-};
+
+ static void dmatest_callback(void *arg)
+ {
+ struct dmatest_done *done = arg;
+-
+- done->done = true;
+- wake_up_all(done->wait);
++ struct dmatest_thread *thread =
++ container_of(arg, struct dmatest_thread, done_wait);
++ if (!thread->done) {
++ done->done = true;
++ wake_up_all(done->wait);
++ } else {
++ /*
++ * If thread->done, it means that this callback occurred
++ * after the parent thread has cleaned up. This can
++ * happen in the case that driver doesn't implement
++ * the terminate_all() functionality and a dma operation
++ * did not occur within the timeout period
++ */
++ WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
++ }
+ }
+
+ static unsigned int min_odd(unsigned int x, unsigned int y)
+@@ -398,9 +413,8 @@ static unsigned long long dmatest_KBs(s6
+ */
+ static int dmatest_func(void *data)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
+ struct dmatest_thread *thread = data;
+- struct dmatest_done done = { .wait = &done_wait };
++ struct dmatest_done *done = &thread->test_done;
+ struct dmatest_info *info;
+ struct dmatest_params *params;
+ struct dma_chan *chan;
+@@ -605,9 +619,9 @@ static int dmatest_func(void *data)
+ continue;
+ }
+
+- done.done = false;
++ done->done = false;
+ tx->callback = dmatest_callback;
+- tx->callback_param = &done;
++ tx->callback_param = done;
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+@@ -620,21 +634,12 @@ static int dmatest_func(void *data)
+ }
+ dma_async_issue_pending(chan);
+
+- wait_event_freezable_timeout(done_wait, done.done,
++ wait_event_freezable_timeout(thread->done_wait, done->done,
+ msecs_to_jiffies(params->timeout));
+
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+- if (!done.done) {
+- /*
+- * We're leaving the timed out dma operation with
+- * dangling pointer to done_wait. To make this
+- * correct, we'll need to allocate wait_done for
+- * each test iteration and perform "who's gonna
+- * free it this time?" dancing. For now, just
+- * leave it dangling.
+- */
+- WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
++ if (!done->done) {
+ dmaengine_unmap_put(um);
+ result("test timed out", total_tests, src_off, dst_off,
+ len, 0);
+@@ -708,7 +713,7 @@ err_thread_type:
+ dmatest_KBs(runtime, total_len), ret);
+
+ /* terminate all transfers on specified channels */
+- if (ret)
++ if (ret || failed_tests)
+ dmaengine_terminate_all(chan);
+
+ thread->done = true;
+@@ -766,6 +771,8 @@ static int dmatest_add_threads(struct dm
+ thread->info = info;
+ thread->chan = dtc->chan;
+ thread->type = type;
++ thread->test_done.wait = &thread->done_wait;
++ init_waitqueue_head(&thread->done_wait);
+ smp_wmb();
+ thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
+ dma_chan_name(chan), op, i);
--- /dev/null
+From 9d5afec6b8bd46d6ed821aa1579634437f58ef1f Mon Sep 17 00:00:00 2001
+From: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Date: Mon, 11 Dec 2017 15:00:57 -0500
+Subject: ext4: fix crash when a directory's i_size is too small
+
+From: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+
+commit 9d5afec6b8bd46d6ed821aa1579634437f58ef1f upstream.
+
+On a ppc64 machine, when mounting a fuzzed ext2 image (generated by
+fsfuzzer) the following call trace is seen,
+
+VFS: brelse: Trying to free free buffer
+WARNING: CPU: 1 PID: 6913 at /root/repos/linux/fs/buffer.c:1165 .__brelse.part.6+0x24/0x40
+.__brelse.part.6+0x20/0x40 (unreliable)
+.ext4_find_entry+0x384/0x4f0
+.ext4_lookup+0x84/0x250
+.lookup_slow+0xdc/0x230
+.walk_component+0x268/0x400
+.path_lookupat+0xec/0x2d0
+.filename_lookup+0x9c/0x1d0
+.vfs_statx+0x98/0x140
+.SyS_newfstatat+0x48/0x80
+system_call+0x58/0x6c
+
+This happens because the directory that ext4_find_entry() looks up has
+inode->i_size that is less than the block size of the filesystem. This
+causes 'nblocks' to have a value of zero. ext4_bread_batch() ends up not
+reading any of the directory file's blocks. This renders the entries in
+bh_use[] array to continue to have garbage data. buffer_uptodate() on
+bh_use[0] can then return a zero value upon which brelse() function is
+invoked.
+
+This commit fixes the bug by returning -ENOENT when the directory file
+has no associated blocks.
+
+Reported-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
+Signed-off-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1403,6 +1403,10 @@ static struct buffer_head * ext4_find_en
+ "falling back\n"));
+ }
+ nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
++ if (!nblocks) {
++ ret = NULL;
++ goto cleanup_and_exit;
++ }
+ start = EXT4_I(dir)->i_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
--- /dev/null
+From c894aa97577e47d3066b27b32499ecf899bfa8b0 Mon Sep 17 00:00:00 2001
+From: Eryu Guan <eguan@redhat.com>
+Date: Sun, 3 Dec 2017 22:52:51 -0500
+Subject: ext4: fix fdatasync(2) after fallocate(2) operation
+
+From: Eryu Guan <eguan@redhat.com>
+
+commit c894aa97577e47d3066b27b32499ecf899bfa8b0 upstream.
+
+Currently, fallocate(2) with KEEP_SIZE followed by a fdatasync(2)
+then crash, we'll see wrong allocated block number (stat -c %b), the
+blocks allocated beyond EOF are all lost. fstests generic/468
+exposes this bug.
+
+Commit 67a7d5f561f4 ("ext4: fix fdatasync(2) after extent
+manipulation operations") fixed all the other extent manipulation
+operation paths such as hole punch, zero range, collapse range etc.,
+but forgot the fallocate case.
+
+So similarly, fix it by recording the correct journal tid in ext4
+inode in fallocate(2) path, so that ext4_sync_file() will wait for
+the right tid to be committed on fdatasync(2).
+
+This addresses the test failure in xfstests test generic/468.
+
+Signed-off-by: Eryu Guan <eguan@redhat.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4746,6 +4746,7 @@ retry:
+ EXT4_INODE_EOFBLOCKS);
+ }
+ ext4_mark_inode_dirty(handle, inode);
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ ret2 = ext4_journal_stop(handle);
+ if (ret2)
+ break;
--- /dev/null
+From f73c52a5bcd1710994e53fbccc378c42b97a06b6 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Sat, 2 Dec 2017 13:04:54 -0500
+Subject: sched/rt: Do not pull from current CPU if only one CPU to pull
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit f73c52a5bcd1710994e53fbccc378c42b97a06b6 upstream.
+
+Daniel Wagner reported a crash on the BeagleBone Black SoC.
+
+This is a single CPU architecture, and does not have a functional
+arch_send_call_function_single_ipi() implementation which can crash
+the kernel if that is called.
+
+As it only has one CPU, it shouldn't be called, but if the kernel is
+compiled for SMP, the push/pull RT scheduling logic now calls it for
+irq_work if the one CPU is overloaded, it can use that function to call
+itself and crash the kernel.
+
+Ideally, we should disable the SCHED_FEAT(RT_PUSH_IPI) if the system
+only has a single CPU. But SCHED_FEAT is a constant if sched debugging
+is turned off. Another fix can also be used, and this should also help
+with normal SMP machines. That is, do not initiate the pull code if
+there's only one RT overloaded CPU, and that CPU happens to be the
+current CPU that is scheduling in a lower priority task.
+
+Even on a system with many CPUs, if there's many RT tasks waiting to
+run on a single CPU, and that CPU schedules in another RT task of lower
+priority, it will initiate the PULL logic in case there's a higher
+priority RT task on another CPU that is waiting to run. But if there is
+no other CPU with waiting RT tasks, it will initiate the RT pull logic
+on itself (as it still has RT tasks waiting to run). This is a wasted
+effort.
+
+Not only does this help with SMP code where the current CPU is the only
+one with RT overloaded tasks, it should also solve the issue that
+Daniel encountered, because it will prevent the PULL logic from
+executing, as there's only one CPU on the system, and the check added
+here will cause it to exit the RT pull code.
+
+Reported-by: Daniel Wagner <wagi@monom.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
+Fixes: 4bdced5c9 ("sched/rt: Simplify the IPI based RT balancing logic")
+Link: http://lkml.kernel.org/r/20171202130454.4cbbfe8d@vmware.local.home
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1960,8 +1960,9 @@ static void pull_rt_task(struct rq *this
+ bool resched = false;
+ struct task_struct *p;
+ struct rq *src_rq;
++ int rt_overload_count = rt_overloaded(this_rq);
+
+- if (likely(!rt_overloaded(this_rq)))
++ if (likely(!rt_overload_count))
+ return;
+
+ /*
+@@ -1970,6 +1971,11 @@ static void pull_rt_task(struct rq *this
+ */
+ smp_rmb();
+
++ /* If we are the only overloaded CPU do nothing */
++ if (rt_overload_count == 1 &&
++ cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
++ return;
++
+ #ifdef HAVE_RT_PUSH_IPI
+ if (sched_feat(RT_PUSH_IPI)) {
+ tell_cpu_to_push(this_rq);
usbip-fix-stub_send_ret_submit-vulnerability-to-null-transfer_buffer.patch
ceph-drop-negative-child-dentries-before-try-pruning-inode-s-alias.patch
bluetooth-btusb-driver-to-enable-the-usb-wakeup-feature.patch
+xhci-don-t-add-a-virt_dev-to-the-devs-array-before-it-s-fully-allocated.patch
+sched-rt-do-not-pull-from-current-cpu-if-only-one-cpu-to-pull.patch
+dmaengine-dmatest-move-callback-wait-queue-to-thread-context.patch
+ext4-fix-fdatasync-2-after-fallocate-2-operation.patch
+ext4-fix-crash-when-a-directory-s-i_size-is-too-small.patch
--- /dev/null
+From 5d9b70f7d52eb14bb37861c663bae44de9521c35 Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Fri, 8 Dec 2017 18:10:05 +0200
+Subject: xhci: Don't add a virt_dev to the devs array before it's fully allocated
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit 5d9b70f7d52eb14bb37861c663bae44de9521c35 upstream.
+
+Avoid null pointer dereference if some function is walking through the
+devs array accessing members of a new virt_dev that is mid allocation.
+
+Add the virt_dev to xhci->devs[i] _after_ the virt_device and all its
+members are properly allocated.
+
+issue found by KASAN: null-ptr-deref in xhci_find_slot_id_by_port
+
+"Quick analysis suggests that xhci_alloc_virt_device() is not mutex
+protected. If so, there is a time frame where xhci->devs[slot_id] is set
+but not fully initialized. Specifically, xhci->devs[i]->udev can be NULL."
+
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-mem.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1017,10 +1017,9 @@ int xhci_alloc_virt_device(struct xhci_h
+ return 0;
+ }
+
+- xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+- if (!xhci->devs[slot_id])
++ dev = kzalloc(sizeof(*dev), flags);
++ if (!dev)
+ return 0;
+- dev = xhci->devs[slot_id];
+
+ /* Allocate the (output) device context that will be used in the HC. */
+ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+@@ -1068,9 +1067,17 @@ int xhci_alloc_virt_device(struct xhci_h
+ &xhci->dcbaa->dev_context_ptrs[slot_id],
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
+
++ xhci->devs[slot_id] = dev;
++
+ return 1;
+ fail:
+- xhci_free_virt_device(xhci, slot_id);
++
++ if (dev->in_ctx)
++ xhci_free_container_ctx(xhci, dev->in_ctx);
++ if (dev->out_ctx)
++ xhci_free_container_ctx(xhci, dev->out_ctx);
++ kfree(dev);
++
+ return 0;
+ }
+