--- /dev/null
+From 1356aae08338f1c19ce1c67bf8c543a267688fc3 Mon Sep 17 00:00:00 2001
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Sun, 27 Sep 2015 02:09:19 +0900
+Subject: blk-mq: avoid setting hctx->tags->cpumask before allocation
+
+From: Akinobu Mita <akinobu.mita@gmail.com>
+
+commit 1356aae08338f1c19ce1c67bf8c543a267688fc3 upstream.
+
+When unmapped hw queue is remapped after CPU topology is changed,
+hctx->tags->cpumask has to be set after hctx->tags is setup in
+blk_mq_map_swqueue(), otherwise it causes null pointer dereference.
+
+Fixes: f26cdc8536 ("blk-mq: Shared tag enhancements")
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Cc: Keith Busch <keith.busch@intel.com>
+Cc: Ming Lei <tom.leiming@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1807,7 +1807,6 @@ static void blk_mq_map_swqueue(struct re
+
+ hctx = q->mq_ops->map_queue(q, i);
+ cpumask_set_cpu(i, hctx->cpumask);
+- cpumask_set_cpu(i, hctx->tags->cpumask);
+ ctx->index_hw = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
+ }
+@@ -1847,6 +1846,14 @@ static void blk_mq_map_swqueue(struct re
+ hctx->next_cpu = cpumask_first(hctx->cpumask);
+ hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ }
++
++ queue_for_each_ctx(q, ctx, i) {
++ if (!cpu_online(i))
++ continue;
++
++ hctx = q->mq_ops->map_queue(q, i);
++ cpumask_set_cpu(i, hctx->tags->cpumask);
++ }
+ }
+
+ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
--- /dev/null
+From 03a2d2a3eafe4015412cf4e9675ca0e2d9204074 Mon Sep 17 00:00:00 2001
+From: Joonsoo Kim <js1304@gmail.com>
+Date: Thu, 1 Oct 2015 15:36:54 -0700
+Subject: mm/slab: fix unexpected index mapping result of kmalloc_size(INDEX_NODE+1)
+
+From: Joonsoo Kim <js1304@gmail.com>
+
+commit 03a2d2a3eafe4015412cf4e9675ca0e2d9204074 upstream.
+
+Commit description is copied from the original post of this bug:
+
+ http://comments.gmane.org/gmane.linux.kernel.mm/135349
+
+Kernels after v3.9 use kmalloc_size(INDEX_NODE + 1) to get the next
+larger cache size than the size index INDEX_NODE mapping. In kernels
+3.9 and earlier we used malloc_sizes[INDEX_L3 + 1].cs_size.
+
+However, sometimes we can't get the right output we expected via
+kmalloc_size(INDEX_NODE + 1), causing a BUG().
+
+The mapping table in the latest kernel is like:
+ index = {0, 1, 2 , 3, 4, 5, 6, n}
+ size = {0, 96, 192, 8, 16, 32, 64, 2^n}
+The mapping table before 3.10 is like this:
+ index = {0 , 1 , 2, 3, 4 , 5 , 6, n}
+ size = {32, 64, 96, 128, 192, 256, 512, 2^(n+3)}
+
+The problem on my mips64 machine is as follows:
+
+(1) When configured DEBUG_SLAB && DEBUG_PAGEALLOC && DEBUG_LOCK_ALLOC
+ && DEBUG_SPINLOCK, the sizeof(struct kmem_cache_node) will be "150",
+ and the macro INDEX_NODE turns out to be "2": #define INDEX_NODE
+ kmalloc_index(sizeof(struct kmem_cache_node))
+
+(2) Then the result of kmalloc_size(INDEX_NODE + 1) is 8.
+
+(3) Then "if(size >= kmalloc_size(INDEX_NODE + 1)" will lead to "size
+ = PAGE_SIZE".
+
+(4) Then "if ((size >= (PAGE_SIZE >> 3))" test will be satisfied and
+ "flags |= CFLGS_OFF_SLAB" will be covered.
+
+(5) if (flags & CFLGS_OFF_SLAB)" test will be satisfied and will go to
+ "cachep->slabp_cache = kmalloc_slab(slab_size, 0u)", and the result
+ here may be NULL while kernel bootup.
+
+(6) Finally,"BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));" causes the
+ BUG info as the following shows (may be only mips64 has this problem):
+
+This patch fixes the problem of kmalloc_size(INDEX_NODE + 1) and removes
+the BUG by adding 'size >= 256' check to guarantee that all necessary
+small sized slabs are initialized regardless sequence of slab size in
+mapping table.
+
+Fixes: e33660165c90 ("slab: Use common kmalloc_index/kmalloc_size...")
+Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Reported-by: Liuhailong <liu.hailong6@zte.com.cn>
+Acked-by: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slab.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *
+ size += BYTES_PER_WORD;
+ }
+ #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+- if (size >= kmalloc_size(INDEX_NODE + 1)
+- && cachep->object_size > cache_line_size()
+- && ALIGN(size, cachep->align) < PAGE_SIZE) {
++ /*
++ * To activate debug pagealloc, off-slab management is necessary
++ * requirement. In early phase of initialization, small sized slab
++ * doesn't get initialized so it would not be possible. So, we need
++ * to check size >= 256. It guarantees that all necessary small
++ * sized slab is initialized in current slab initialization sequence.
++ */
++ if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
++ size >= 256 && cachep->object_size > cache_line_size() &&
++ ALIGN(size, cachep->align) < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
+ size = PAGE_SIZE;
+ }