--- /dev/null
+From e00f4f4d0ff7e13b9115428a245b49108d625f09 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 21 Nov 2016 18:03:32 -0500
+Subject: block,blkcg: use __GFP_NOWARN for best-effort allocations in blkcg
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e00f4f4d0ff7e13b9115428a245b49108d625f09 upstream.
+
+blkcg allocates some per-cgroup data structures with GFP_NOWAIT and
+when that fails falls back to operations which aren't specific to the
+cgroup. Occassional failures are expected under pressure and falling
+back to non-cgroup operation is the right thing to do.
+
+Unfortunately, I forgot to add __GFP_NOWARN to these allocations and
+these expected failures end up creating a lot of noise. Add
+__GFP_NOWARN.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Marc MERLIN <marc@merlins.org>
+Reported-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-cgroup.c | 9 +++++----
+ block/cfq-iosched.c | 3 ++-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(stru
+ }
+
+ wb_congested = wb_congested_get_create(&q->backing_dev_info,
+- blkcg->css.id, GFP_NOWAIT);
++ blkcg->css.id,
++ GFP_NOWAIT | __GFP_NOWARN);
+ if (!wb_congested) {
+ ret = -ENOMEM;
+ goto err_put_css;
+@@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(stru
+
+ /* allocate */
+ if (!new_blkg) {
+- new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
++ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
+ if (unlikely(!new_blkg)) {
+ ret = -ENOMEM;
+ goto err_put_congested;
+@@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_sta
+ }
+
+ spin_lock_init(&blkcg->lock);
+- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
++ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
+ INIT_HLIST_HEAD(&blkcg->blkg_list);
+ #ifdef CONFIG_CGROUP_WRITEBACK
+ INIT_LIST_HEAD(&blkcg->cgwb_list);
+@@ -1238,7 +1239,7 @@ pd_prealloc:
+ if (blkg->pd[pol->plid])
+ continue;
+
+- pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
++ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
+ if (!pd)
+ swap(pd, pd_prealloc);
+ if (!pd) {
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3811,7 +3811,8 @@ cfq_get_queue(struct cfq_data *cfqd, boo
+ goto out;
+ }
+
+- cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
++ cfqq = kmem_cache_alloc_node(cfq_pool,
++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
+ cfqd->queue->node);
+ if (!cfqq) {
+ cfqq = &cfqd->oom_cfqq;
--- /dev/null
+From b3193bc0dca9bb69c8ba1ec1a318105c76eb4172 Mon Sep 17 00:00:00 2001
+From: Ritesh Harjani <riteshh@codeaurora.org>
+Date: Wed, 9 Aug 2017 18:28:32 +0530
+Subject: cfq: Give a chance for arming slice idle timer in case of group_idle
+
+From: Ritesh Harjani <riteshh@codeaurora.org>
+
+commit b3193bc0dca9bb69c8ba1ec1a318105c76eb4172 upstream.
+
+In below scenario blkio cgroup does not work as per their assigned
+weights :-
+1. When the underlying device is nonrotational with a single HW queue
+with depth of >= CFQ_HW_QUEUE_MIN
+2. When the use case is forming two blkio cgroups cg1(weight 1000) &
+cg2(wight 100) and two processes(file1 and file2) doing sync IO in
+their respective blkio cgroups.
+
+For above usecase result of fio (without this patch):-
+file1: (groupid=0, jobs=1): err= 0: pid=685: Thu Jan 1 19:41:49 1970
+ write: IOPS=1315, BW=41.1MiB/s (43.1MB/s)(1024MiB/24906msec)
+<...>
+file2: (groupid=0, jobs=1): err= 0: pid=686: Thu Jan 1 19:41:49 1970
+ write: IOPS=1295, BW=40.5MiB/s (42.5MB/s)(1024MiB/25293msec)
+<...>
+// both the process BW is equal even though they belong to diff.
+cgroups with weight of 1000(cg1) and 100(cg2)
+
+In above case (for non rotational NCQ devices),
+as soon as the request from cg1 is completed and even
+though it is provided with higher set_slice=10, because of CFQ
+algorithm when the driver tries to fetch the request, CFQ expires
+this group without providing any idle time nor weight priority
+and schedules another cfq group (in this case cg2).
+And thus both cfq groups(cg1 & cg2) keep alternating to get the
+disk time and hence loses the cgroup weight based scheduling.
+
+Below patch gives a chance to cfq algorithm (cfq_arm_slice_timer)
+to arm the slice timer in case group_idle is enabled.
+In case if group_idle is also not required (including for nonrotational
+NCQ drives), we need to explicitly set group_idle = 0 from sysfs for
+such cases.
+
+With this patch result of fio(for above usecase) :-
+file1: (groupid=0, jobs=1): err= 0: pid=690: Thu Jan 1 00:06:08 1970
+ write: IOPS=1706, BW=53.3MiB/s (55.9MB/s)(1024MiB/19197msec)
+<..>
+file2: (groupid=0, jobs=1): err= 0: pid=691: Thu Jan 1 00:06:08 1970
+ write: IOPS=1043, BW=32.6MiB/s (34.2MB/s)(1024MiB/31401msec)
+<..>
+// In this processes BW is as per their respective cgroups weight.
+
+Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/cfq-iosched.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -2905,7 +2905,8 @@ static void cfq_arm_slice_timer(struct c
+ * for devices that support queuing, otherwise we still have a problem
+ * with sync vs async workloads.
+ */
+- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
++ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
++ !cfqd->cfq_group_idle)
+ return;
+
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
--- /dev/null
+From b0f5a8f32e8bbdaae1abb8abe2d3cbafaba57e08 Mon Sep 17 00:00:00 2001
+From: Vegard Nossum <vegard.nossum@oracle.com>
+Date: Mon, 29 May 2017 09:22:07 +0200
+Subject: kthread: fix boot hang (regression) on MIPS/OpenRISC
+
+From: Vegard Nossum <vegard.nossum@oracle.com>
+
+commit b0f5a8f32e8bbdaae1abb8abe2d3cbafaba57e08 upstream.
+
+This fixes a regression in commit 4d6501dce079 where I didn't notice
+that MIPS and OpenRISC were reinitialising p->{set,clear}_child_tid to
+NULL after our initialisation in copy_process().
+
+We can simply get rid of the arch-specific initialisation here since it
+is now always done in copy_process() before hitting copy_thread{,_tls}().
+
+Review notes:
+
+ - As far as I can tell, copy_process() is the only user of
+ copy_thread_tls(), which is the only caller of copy_thread() for
+ architectures that don't implement copy_thread_tls().
+
+ - After this patch, there is no arch-specific code touching
+ p->set_child_tid or p->clear_child_tid whatsoever.
+
+ - It may look like MIPS/OpenRISC wanted to always have these fields be
+ NULL, but that's not true, as copy_process() would unconditionally
+ set them again _after_ calling copy_thread_tls() before commit
+ 4d6501dce079.
+
+Fixes: 4d6501dce079c1eb6bf0b1d8f528a5e81770109e ("kthread: Fix use-after-free if kthread fork fails")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net> # MIPS only
+Acked-by: Stafford Horne <shorne@gmail.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: Jonas Bonn <jonas@southpole.se>
+Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+Cc: openrisc@lists.librecores.org
+Cc: Jamie Iles <jamie.iles@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/process.c | 1 -
+ arch/openrisc/kernel/process.c | 2 --
+ 2 files changed, 3 deletions(-)
+
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -115,7 +115,6 @@ int copy_thread(unsigned long clone_flag
+ struct thread_info *ti = task_thread_info(p);
+ struct pt_regs *childregs, *regs = current_pt_regs();
+ unsigned long childksp;
+- p->set_child_tid = p->clear_child_tid = NULL;
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+--- a/arch/openrisc/kernel/process.c
++++ b/arch/openrisc/kernel/process.c
+@@ -152,8 +152,6 @@ copy_thread(unsigned long clone_flags, u
+
+ top_of_kernel_stack = sp;
+
+- p->set_child_tid = p->clear_child_tid = NULL;
+-
+ /* Locate userspace context on stack... */
+ sp -= STACK_FRAME_OVERHEAD; /* redzone */
+ sp -= sizeof(struct pt_regs);
--- /dev/null
+From 4d6501dce079c1eb6bf0b1d8f528a5e81770109e Mon Sep 17 00:00:00 2001
+From: Vegard Nossum <vegard.nossum@oracle.com>
+Date: Tue, 9 May 2017 09:39:59 +0200
+Subject: kthread: Fix use-after-free if kthread fork fails
+
+From: Vegard Nossum <vegard.nossum@oracle.com>
+
+commit 4d6501dce079c1eb6bf0b1d8f528a5e81770109e upstream.
+
+If a kthread forks (e.g. usermodehelper since commit 1da5c46fa965) but
+fails in copy_process() between calling dup_task_struct() and setting
+p->set_child_tid, then the value of p->set_child_tid will be inherited
+from the parent and get prematurely freed by free_kthread_struct().
+
+ kthread()
+ - worker_thread()
+ - process_one_work()
+ | - call_usermodehelper_exec_work()
+ | - kernel_thread()
+ | - _do_fork()
+ | - copy_process()
+ | - dup_task_struct()
+ | - arch_dup_task_struct()
+ | - tsk->set_child_tid = current->set_child_tid // implied
+ | - ...
+ | - goto bad_fork_*
+ | - ...
+ | - free_task(tsk)
+ | - free_kthread_struct(tsk)
+ | - kfree(tsk->set_child_tid)
+ - ...
+ - schedule()
+ - __schedule()
+ - wq_worker_sleeping()
+ - kthread_data(task)->flags // UAF
+
+The problem started showing up with commit 1da5c46fa965 since it reused
+->set_child_tid for the kthread worker data.
+
+A better long-term solution might be to get rid of the ->set_child_tid
+abuse. The comment in set_kthread_struct() also looks slightly wrong.
+
+Debugged-by: Jamie Iles <jamie.iles@oracle.com>
+Fixes: 1da5c46fa965 ("kthread: Make struct kthread kmalloc'ed")
+Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Jamie Iles <jamie.iles@oracle.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20170509073959.17858-1-vegard.nossum@oracle.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/fork.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1337,6 +1337,18 @@ static struct task_struct *copy_process(
+ if (!p)
+ goto fork_out;
+
++ /*
++ * This _must_ happen before we call free_task(), i.e. before we jump
++ * to any of the bad_fork_* labels. This is to avoid freeing
++ * p->set_child_tid which is (ab)used as a kthread's data pointer for
++ * kernel threads (PF_KTHREAD).
++ */
++ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
++ /*
++ * Clear TID on mm_release()?
++ */
++ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
++
+ ftrace_graph_init_task(p);
+
+ rt_mutex_init_task(p);
+@@ -1498,11 +1510,6 @@ static struct task_struct *copy_process(
+ }
+ }
+
+- p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+- /*
+- * Clear TID on mm_release()?
+- */
+- p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+ #ifdef CONFIG_BLOCK
+ p->plug = NULL;
+ #endif
--- /dev/null
+From 50972fe78f24f1cd0b9d7bbf1f87d2be9e4f412e Mon Sep 17 00:00:00 2001
+From: Prateek Sood <prsood@codeaurora.org>
+Date: Fri, 14 Jul 2017 19:17:56 +0530
+Subject: locking/osq_lock: Fix osq_lock queue corruption
+
+From: Prateek Sood <prsood@codeaurora.org>
+
+commit 50972fe78f24f1cd0b9d7bbf1f87d2be9e4f412e upstream.
+
+Fix ordering of link creation between node->prev and prev->next in
+osq_lock(). A case in which the status of optimistic spin queue is
+CPU6->CPU2 in which CPU6 has acquired the lock.
+
+ tail
+ v
+ ,-. <- ,-.
+ |6| |2|
+ `-' -> `-'
+
+At this point if CPU0 comes in to acquire osq_lock, it will update the
+tail count.
+
+ CPU2 CPU0
+ ----------------------------------
+
+ tail
+ v
+ ,-. <- ,-. ,-.
+ |6| |2| |0|
+ `-' -> `-' `-'
+
+After tail count update if CPU2 starts to unqueue itself from
+optimistic spin queue, it will find an updated tail count with CPU0 and
+update CPU2 node->next to NULL in osq_wait_next().
+
+ unqueue-A
+
+ tail
+ v
+ ,-. <- ,-. ,-.
+ |6| |2| |0|
+ `-' `-' `-'
+
+ unqueue-B
+
+ ->tail != curr && !node->next
+
+If reordering of following stores happen then prev->next where prev
+being CPU2 would be updated to point to CPU0 node:
+
+ tail
+ v
+ ,-. <- ,-. ,-.
+ |6| |2| |0|
+ `-' `-' -> `-'
+
+ osq_wait_next()
+ node->next <- 0
+ xchg(node->next, NULL)
+
+ tail
+ v
+ ,-. <- ,-. ,-.
+ |6| |2| |0|
+ `-' `-' `-'
+
+ unqueue-C
+
+At this point if next instruction
+ WRITE_ONCE(next->prev, prev);
+in CPU2 path is committed before the update of CPU0 node->prev = prev then
+CPU0 node->prev will point to CPU6 node.
+
+ tail
+ v----------. v
+ ,-. <- ,-. ,-.
+ |6| |2| |0|
+ `-' `-' `-'
+ `----------^
+
+At this point if CPU0 path's node->prev = prev is committed resulting
+in change of CPU0 prev back to CPU2 node. CPU2 node->next is NULL
+currently,
+
+ tail
+ v
+ ,-. <- ,-. <- ,-.
+ |6| |2| |0|
+ `-' `-' `-'
+ `----------^
+
+so if CPU0 gets into unqueue path of osq_lock it will keep spinning
+in infinite loop as condition prev->next == node will never be true.
+
+Signed-off-by: Prateek Sood <prsood@codeaurora.org>
+[ Added pictures, rewrote comments. ]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: sramana@codeaurora.org
+Link: http://lkml.kernel.org/r/1500040076-27626-1-git-send-email-prsood@codeaurora.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/osq_lock.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/kernel/locking/osq_lock.c
++++ b/kernel/locking/osq_lock.c
+@@ -104,6 +104,19 @@ bool osq_lock(struct optimistic_spin_que
+
+ prev = decode_cpu(old);
+ node->prev = prev;
++
++ /*
++ * osq_lock() unqueue
++ *
++ * node->prev = prev osq_wait_next()
++ * WMB MB
++ * prev->next = node next->prev = prev // unqueue-C
++ *
++ * Here 'node->prev' and 'next->prev' are the same variable and we need
++ * to ensure these stores happen in-order to avoid corrupting the list.
++ */
++ smp_wmb();
++
+ WRITE_ONCE(prev->next, node);
+
+ /*
--- /dev/null
+From 9c29c31830a4eca724e137a9339137204bbb31be Mon Sep 17 00:00:00 2001
+From: Prateek Sood <prsood@codeaurora.org>
+Date: Thu, 7 Sep 2017 20:00:58 +0530
+Subject: locking/rwsem-xadd: Fix missed wakeup due to reordering of load
+
+From: Prateek Sood <prsood@codeaurora.org>
+
+commit 9c29c31830a4eca724e137a9339137204bbb31be upstream.
+
+If a spinner is present, there is a chance that the load of
+rwsem_has_spinner() in rwsem_wake() can be reordered with
+respect to decrement of rwsem count in __up_write() leading
+to wakeup being missed:
+
+ spinning writer up_write caller
+ --------------- -----------------------
+ [S] osq_unlock() [L] osq
+ spin_lock(wait_lock)
+ sem->count=0xFFFFFFFF00000001
+ +0xFFFFFFFF00000000
+ count=sem->count
+ MB
+ sem->count=0xFFFFFFFE00000001
+ -0xFFFFFFFF00000001
+ spin_trylock(wait_lock)
+ return
+ rwsem_try_write_lock(count)
+ spin_unlock(wait_lock)
+ schedule()
+
+Reordering of atomic_long_sub_return_release() in __up_write()
+and rwsem_has_spinner() in rwsem_wake() can cause missing of
+wakeup in up_write() context. In spinning writer, sem->count
+and local variable count is 0XFFFFFFFE00000001. It would result
+in rwsem_try_write_lock() failing to acquire rwsem and spinning
+writer going to sleep in rwsem_down_write_failed().
+
+The smp_rmb() will make sure that the spinner state is
+consulted after sem->count is updated in up_write context.
+
+Signed-off-by: Prateek Sood <prsood@codeaurora.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dave@stgolabs.net
+Cc: longman@redhat.com
+Cc: parri.andrea@gmail.com
+Cc: sramana@codeaurora.org
+Link: http://lkml.kernel.org/r/1504794658-15397-1-git-send-email-prsood@codeaurora.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/rwsem-xadd.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/kernel/locking/rwsem-xadd.c
++++ b/kernel/locking/rwsem-xadd.c
+@@ -511,6 +511,33 @@ struct rw_semaphore *rwsem_wake(struct r
+ unsigned long flags;
+
+ /*
++ * __rwsem_down_write_failed_common(sem)
++ * rwsem_optimistic_spin(sem)
++ * osq_unlock(sem->osq)
++ * ...
++ * atomic_long_add_return(&sem->count)
++ *
++ * - VS -
++ *
++ * __up_write()
++ * if (atomic_long_sub_return_release(&sem->count) < 0)
++ * rwsem_wake(sem)
++ * osq_is_locked(&sem->osq)
++ *
++ * And __up_write() must observe !osq_is_locked() when it observes the
++ * atomic_long_add_return() in order to not miss a wakeup.
++ *
++ * This boils down to:
++ *
++ * [S.rel] X = 1 [RmW] r0 = (Y += 0)
++ * MB RMB
++ * [RmW] Y += 1 [L] r1 = X
++ *
++ * exists (r0=1 /\ r1=0)
++ */
++ smp_rmb();
++
++ /*
+ * If a spinner is present, it is not necessary to do the wakeup.
+ * Try to do wakeup only if the trylock succeeds to minimize
+ * spinlock contention which may introduce too much delay in the
--- /dev/null
+From 476accbe2f6ef69caeebe99f52a286e12ac35aee Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@kernel.org>
+Date: Thu, 3 Aug 2017 10:11:52 +0200
+Subject: selinux: use GFP_NOWAIT in the AVC kmem_caches
+
+From: Michal Hocko <mhocko@kernel.org>
+
+commit 476accbe2f6ef69caeebe99f52a286e12ac35aee upstream.
+
+There is a strange __GFP_NOMEMALLOC usage pattern in SELinux,
+specifically GFP_ATOMIC | __GFP_NOMEMALLOC which doesn't make much
+sense. GFP_ATOMIC on its own allows to access memory reserves while
+__GFP_NOMEMALLOC dictates we cannot use memory reserves. Replace this
+with the much more sane GFP_NOWAIT in the AVC code as we can tolerate
+memory allocation failures in that code.
+
+Signed-off-by: Michal Hocko <mhocko@kernel.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/avc.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -348,27 +348,26 @@ static struct avc_xperms_decision_node
+ struct avc_xperms_decision_node *xpd_node;
+ struct extended_perms_decision *xpd;
+
+- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+- GFP_ATOMIC | __GFP_NOMEMALLOC);
++ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+ if (!xpd_node)
+ return NULL;
+
+ xpd = &xpd_node->xpd;
+ if (which & XPERMS_ALLOWED) {
+ xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+- GFP_ATOMIC | __GFP_NOMEMALLOC);
++ GFP_NOWAIT);
+ if (!xpd->allowed)
+ goto error;
+ }
+ if (which & XPERMS_AUDITALLOW) {
+ xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+- GFP_ATOMIC | __GFP_NOMEMALLOC);
++ GFP_NOWAIT);
+ if (!xpd->auditallow)
+ goto error;
+ }
+ if (which & XPERMS_DONTAUDIT) {
+ xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+- GFP_ATOMIC | __GFP_NOMEMALLOC);
++ GFP_NOWAIT);
+ if (!xpd->dontaudit)
+ goto error;
+ }
+@@ -396,8 +395,7 @@ static struct avc_xperms_node *avc_xperm
+ {
+ struct avc_xperms_node *xp_node;
+
+- xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+- GFP_ATOMIC|__GFP_NOMEMALLOC);
++ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+ if (!xp_node)
+ return xp_node;
+ INIT_LIST_HEAD(&xp_node->xpd_head);
+@@ -550,7 +548,7 @@ static struct avc_node *avc_alloc_node(v
+ {
+ struct avc_node *node;
+
+- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
++ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+ if (!node)
+ goto out;
+
i2c-xiic-make-the-start-and-the-byte-count-write-atomic.patch
i2c-i801-fix-dnv-s-smbctrl-register-offset.patch
alsa-hda-fix-cancel_work_sync-stall-from-jackpoll-work.patch
+cfq-give-a-chance-for-arming-slice-idle-timer-in-case-of-group_idle.patch
+kthread-fix-use-after-free-if-kthread-fork-fails.patch
+kthread-fix-boot-hang-regression-on-mips-openrisc.patch
+staging-rt5208-fix-a-sleep-in-atomic-bug-in-xd_copy_page.patch
+staging-rts5208-fix-read-overflow-in-memcpy.patch
+block-blkcg-use-__gfp_nowarn-for-best-effort-allocations-in-blkcg.patch
+locking-rwsem-xadd-fix-missed-wakeup-due-to-reordering-of-load.patch
+selinux-use-gfp_nowait-in-the-avc-kmem_caches.patch
+locking-osq_lock-fix-osq_lock-queue-corruption.patch
--- /dev/null
+From 498c4b4e9c23855d17ecc2a108d949bb68020481 Mon Sep 17 00:00:00 2001
+From: Jia-Ju Bai <baijiaju1990@163.com>
+Date: Mon, 5 Jun 2017 15:30:16 +0800
+Subject: staging: rt5208: Fix a sleep-in-atomic bug in xd_copy_page
+
+From: Jia-Ju Bai <baijiaju1990@163.com>
+
+commit 498c4b4e9c23855d17ecc2a108d949bb68020481 upstream.
+
+The driver may sleep under a spin lock, and the function call path is:
+rtsx_exclusive_enter_ss (acquire the lock by spin_lock)
+ rtsx_enter_ss
+ rtsx_power_off_card
+ xd_cleanup_work
+ xd_delay_write
+ xd_finish_write
+ xd_copy_page
+ wait_timeout
+ schedule_timeout --> may sleep
+
+To fix it, "wait_timeout" is replaced with mdelay in xd_copy_page.
+
+Signed-off-by: Jia-Ju Bai <baijiaju1990@163.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rts5208/xd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/rts5208/xd.c
++++ b/drivers/staging/rts5208/xd.c
+@@ -1252,7 +1252,7 @@ static int xd_copy_page(struct rtsx_chip
+ reg = 0;
+ rtsx_read_register(chip, XD_CTL, ®);
+ if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
+- wait_timeout(100);
++ mdelay(100);
+
+ if (detect_card_cd(chip,
+ XD_CARD) != STATUS_SUCCESS) {
--- /dev/null
+From 88a5b39b69ab1828fd4130e2baadd184109cea69 Mon Sep 17 00:00:00 2001
+From: Daniel Micay <danielmicay@gmail.com>
+Date: Mon, 5 Jun 2017 21:52:34 -0700
+Subject: staging/rts5208: Fix read overflow in memcpy
+
+From: Daniel Micay <danielmicay@gmail.com>
+
+commit 88a5b39b69ab1828fd4130e2baadd184109cea69 upstream.
+
+Noticed by FORTIFY_SOURCE, this swaps memcpy() for strncpy() to zero-value
+fill the end of the buffer instead of over-reading a string from .rodata.
+
+Signed-off-by: Daniel Micay <danielmicay@gmail.com>
+[kees: wrote commit log]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Wayne Porter <wporter82@gmail.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+
+---
+ drivers/staging/rts5208/rtsx_scsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/rts5208/rtsx_scsi.c
++++ b/drivers/staging/rts5208/rtsx_scsi.c
+@@ -536,7 +536,7 @@ static int inquiry(struct scsi_cmnd *srb
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+- memcpy(buf + 8, inquiry_string, sendbytes - 8);
++ strncpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag) {
+ /* Additional Length */
+ buf[4] = 0x33;