]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.14
authorSasha Levin <sashal@kernel.org>
Fri, 22 May 2020 00:42:55 +0000 (20:42 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 22 May 2020 00:42:55 +0000 (20:42 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.14/arm64-fix-the-flush_icache_range-arguments-in-machin.patch [new file with mode: 0644]
queue-4.14/padata-initialize-pd-cpu-with-effective-cpumask.patch [new file with mode: 0644]
queue-4.14/padata-purge-get_cpu-and-reorder_via_wq-from-padata_.patch [new file with mode: 0644]
queue-4.14/padata-replace-delayed-timer-with-immediate-workqueu.patch [new file with mode: 0644]
queue-4.14/padata-set-cpu_index-of-unused-cpus-to-1.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/arm64-fix-the-flush_icache_range-arguments-in-machin.patch b/queue-4.14/arm64-fix-the-flush_icache_range-arguments-in-machin.patch
new file mode 100644 (file)
index 0000000..27ba2ac
--- /dev/null
@@ -0,0 +1,37 @@
+From b3dd863e124e4389128762796940e99a41d9f6a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 15:41:35 +0100
+Subject: arm64: fix the flush_icache_range arguments in machine_kexec
+
+From: Christoph Hellwig <hch@lst.de>
+
+Commit d51c214541c5154dda3037289ee895ea3ded5ebd upstream.
+
+The second argument is the end "pointer", not the length.
+
+Fixes: d28f6df1305a ("arm64/kexec: Add core kexec support")
+Cc: <stable@vger.kernel.org> # 4.8.x-
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/machine_kexec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
+index 11121f608eb5..f7e593965c1d 100644
+--- a/arch/arm64/kernel/machine_kexec.c
++++ b/arch/arm64/kernel/machine_kexec.c
+@@ -184,7 +184,8 @@ void machine_kexec(struct kimage *kimage)
+       /* Flush the reboot_code_buffer in preparation for its execution. */
+       __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
+       flush_icache_range((uintptr_t)reboot_code_buffer,
+-              arm64_relocate_new_kernel_size);
++                         (uintptr_t)reboot_code_buffer +
++                         arm64_relocate_new_kernel_size);
+       /* Flush the kimage list and its buffers. */
+       kexec_list_flush(kimage);
+-- 
+2.25.1
+
diff --git a/queue-4.14/padata-initialize-pd-cpu-with-effective-cpumask.patch b/queue-4.14/padata-initialize-pd-cpu-with-effective-cpumask.patch
new file mode 100644 (file)
index 0000000..d6ff5a8
--- /dev/null
@@ -0,0 +1,76 @@
+From f9bb5de140208caa2584884b3af45a6507244b97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 16:46:57 -0400
+Subject: padata: initialize pd->cpu with effective cpumask
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+[ Upstream commit ec9c7d19336ee98ecba8de80128aa405c45feebb ]
+
+Exercising CPU hotplug on a 5.2 kernel with recent padata fixes from
+cryptodev-2.6.git in an 8-CPU kvm guest...
+
+    # modprobe tcrypt alg="pcrypt(rfc4106(gcm(aes)))" type=3
+    # echo 0 > /sys/devices/system/cpu/cpu1/online
+    # echo c > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
+    # modprobe tcrypt mode=215
+
+...caused the following crash:
+
+    BUG: kernel NULL pointer dereference, address: 0000000000000000
+    #PF: supervisor read access in kernel mode
+    #PF: error_code(0x0000) - not-present page
+    PGD 0 P4D 0
+    Oops: 0000 [#1] SMP PTI
+    CPU: 2 PID: 134 Comm: kworker/2:2 Not tainted 5.2.0-padata-base+ #7
+    Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-<snip>
+    Workqueue: pencrypt padata_parallel_worker
+    RIP: 0010:padata_reorder+0xcb/0x180
+    ...
+    Call Trace:
+     padata_do_serial+0x57/0x60
+     pcrypt_aead_enc+0x3a/0x50 [pcrypt]
+     padata_parallel_worker+0x9b/0xe0
+     process_one_work+0x1b5/0x3f0
+     worker_thread+0x4a/0x3c0
+     ...
+
+In padata_alloc_pd, pd->cpu is set using the user-supplied cpumask
+instead of the effective cpumask, and in this case cpumask_first picked
+an offline CPU.
+
+The offline CPU's reorder->list.next is NULL in padata_reorder because
+the list wasn't initialized in padata_init_pqueues, which only operates
+on CPUs in the effective mask.
+
+Fix by using the effective mask in padata_alloc_pd.
+
+Fixes: 6fc4dbcf0276 ("padata: Replace delayed timer with immediate workqueue in padata_reorder")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 66d96ed62286..6d0cdee9d321 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -450,7 +450,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+       atomic_set(&pd->refcnt, 1);
+       pd->pinst = pinst;
+       spin_lock_init(&pd->lock);
+-      pd->cpu = cpumask_first(pcpumask);
++      pd->cpu = cpumask_first(pd->cpumask.pcpu);
+       INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
+       return pd;
+-- 
+2.25.1
+
diff --git a/queue-4.14/padata-purge-get_cpu-and-reorder_via_wq-from-padata_.patch b/queue-4.14/padata-purge-get_cpu-and-reorder_via_wq-from-padata_.patch
new file mode 100644 (file)
index 0000000..9aedf73
--- /dev/null
@@ -0,0 +1,68 @@
+From 77d14ec548612caf02001b8e7b3f512cd339a05d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 16:46:58 -0400
+Subject: padata: purge get_cpu and reorder_via_wq from padata_do_serial
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+[ Upstream commit 065cf577135a4977931c7a1e1edf442bfd9773dd ]
+
+With the removal of the padata timer, padata_do_serial no longer
+needs special CPU handling, so remove it.
+
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 23 +++--------------------
+ 1 file changed, 3 insertions(+), 20 deletions(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 6d0cdee9d321..f56ec63f60ba 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -323,24 +323,9 @@ static void padata_serial_worker(struct work_struct *serial_work)
+  */
+ void padata_do_serial(struct padata_priv *padata)
+ {
+-      int cpu;
+-      struct padata_parallel_queue *pqueue;
+-      struct parallel_data *pd;
+-      int reorder_via_wq = 0;
+-
+-      pd = padata->pd;
+-
+-      cpu = get_cpu();
+-
+-      /* We need to enqueue the padata object into the correct
+-       * per-cpu queue.
+-       */
+-      if (cpu != padata->cpu) {
+-              reorder_via_wq = 1;
+-              cpu = padata->cpu;
+-      }
+-
+-      pqueue = per_cpu_ptr(pd->pqueue, cpu);
++      struct parallel_data *pd = padata->pd;
++      struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
++                                                         padata->cpu);
+       spin_lock(&pqueue->reorder.lock);
+       list_add_tail(&padata->list, &pqueue->reorder.list);
+@@ -354,8 +339,6 @@ void padata_do_serial(struct padata_priv *padata)
+        */
+       smp_mb__after_atomic();
+-      put_cpu();
+-
+       padata_reorder(pd);
+ }
+ EXPORT_SYMBOL(padata_do_serial);
+-- 
+2.25.1
+
diff --git a/queue-4.14/padata-replace-delayed-timer-with-immediate-workqueu.patch b/queue-4.14/padata-replace-delayed-timer-with-immediate-workqueu.patch
new file mode 100644 (file)
index 0000000..a78170d
--- /dev/null
@@ -0,0 +1,308 @@
+From d4df6c147a2c4aeeedb3971a1cb4b7509382e4fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 16:46:56 -0400
+Subject: padata: Replace delayed timer with immediate workqueue in
+ padata_reorder
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 6fc4dbcf0276279d488c5fbbfabe94734134f4fa ]
+
+The function padata_reorder will use a timer when it cannot progress
+while completed jobs are outstanding (pd->reorder_objects > 0).  This
+is suboptimal as if we do end up using the timer then it would have
+introduced a gratuitous delay of one second.
+
+In fact we can easily distinguish between whether completed jobs
+are outstanding and whether we can make progress.  All we have to
+do is look at the next pqueue list.
+
+This patch does that by replacing pd->processed with pd->cpu so
+that the next pqueue is more accessible.
+
+A work queue is used instead of the original try_again to avoid
+hogging the CPU.
+
+Note that we don't bother removing the work queue in
+padata_flush_queues because the whole premise is broken.  You
+cannot flush async crypto requests so it makes no sense to even
+try.  A subsequent patch will fix it by replacing it with a ref
+counting scheme.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[dj: - adjust context
+     - corrected setup_timer -> timer_setup to delete hunk
+     - skip padata_flush_queues() hunk, function already removed
+       in 4.14]
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 13 ++----
+ kernel/padata.c        | 95 ++++++++----------------------------------
+ 2 files changed, 22 insertions(+), 86 deletions(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 5d13d25da2c8..d803397a28f7 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -24,7 +24,6 @@
+ #include <linux/workqueue.h>
+ #include <linux/spinlock.h>
+ #include <linux/list.h>
+-#include <linux/timer.h>
+ #include <linux/notifier.h>
+ #include <linux/kobject.h>
+@@ -85,18 +84,14 @@ struct padata_serial_queue {
+  * @serial: List to wait for serialization after reordering.
+  * @pwork: work struct for parallelization.
+  * @swork: work struct for serialization.
+- * @pd: Backpointer to the internal control structure.
+  * @work: work struct for parallelization.
+- * @reorder_work: work struct for reordering.
+  * @num_obj: Number of objects that are processed by this cpu.
+  * @cpu_index: Index of the cpu.
+  */
+ struct padata_parallel_queue {
+        struct padata_list    parallel;
+        struct padata_list    reorder;
+-       struct parallel_data *pd;
+        struct work_struct    work;
+-       struct work_struct    reorder_work;
+        atomic_t              num_obj;
+        int                   cpu_index;
+ };
+@@ -122,10 +117,10 @@ struct padata_cpumask {
+  * @reorder_objects: Number of objects waiting in the reorder queues.
+  * @refcnt: Number of objects holding a reference on this parallel_data.
+  * @max_seq_nr:  Maximal used sequence number.
++ * @cpu: Next CPU to be processed.
+  * @cpumask: The cpumasks in use for parallel and serial workers.
++ * @reorder_work: work struct for reordering.
+  * @lock: Reorder lock.
+- * @processed: Number of already processed objects.
+- * @timer: Reorder timer.
+  */
+ struct parallel_data {
+       struct padata_instance          *pinst;
+@@ -134,10 +129,10 @@ struct parallel_data {
+       atomic_t                        reorder_objects;
+       atomic_t                        refcnt;
+       atomic_t                        seq_nr;
++      int                             cpu;
+       struct padata_cpumask           cpumask;
++      struct work_struct              reorder_work;
+       spinlock_t                      lock ____cacheline_aligned;
+-      unsigned int                    processed;
+-      struct timer_list               timer;
+ };
+ /**
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 858e82179744..66d96ed62286 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -166,23 +166,12 @@ EXPORT_SYMBOL(padata_do_parallel);
+  */
+ static struct padata_priv *padata_get_next(struct parallel_data *pd)
+ {
+-      int cpu, num_cpus;
+-      unsigned int next_nr, next_index;
+       struct padata_parallel_queue *next_queue;
+       struct padata_priv *padata;
+       struct padata_list *reorder;
++      int cpu = pd->cpu;
+-      num_cpus = cpumask_weight(pd->cpumask.pcpu);
+-
+-      /*
+-       * Calculate the percpu reorder queue and the sequence
+-       * number of the next object.
+-       */
+-      next_nr = pd->processed;
+-      next_index = next_nr % num_cpus;
+-      cpu = padata_index_to_cpu(pd, next_index);
+       next_queue = per_cpu_ptr(pd->pqueue, cpu);
+-
+       reorder = &next_queue->reorder;
+       spin_lock(&reorder->lock);
+@@ -193,7 +182,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
+               list_del_init(&padata->list);
+               atomic_dec(&pd->reorder_objects);
+-              pd->processed++;
++              pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
++                                          false);
+               spin_unlock(&reorder->lock);
+               goto out;
+@@ -216,6 +206,7 @@ static void padata_reorder(struct parallel_data *pd)
+       struct padata_priv *padata;
+       struct padata_serial_queue *squeue;
+       struct padata_instance *pinst = pd->pinst;
++      struct padata_parallel_queue *next_queue;
+       /*
+        * We need to ensure that only one cpu can work on dequeueing of
+@@ -247,7 +238,6 @@ static void padata_reorder(struct parallel_data *pd)
+                * so exit immediately.
+                */
+               if (PTR_ERR(padata) == -ENODATA) {
+-                      del_timer(&pd->timer);
+                       spin_unlock_bh(&pd->lock);
+                       return;
+               }
+@@ -266,70 +256,29 @@ static void padata_reorder(struct parallel_data *pd)
+       /*
+        * The next object that needs serialization might have arrived to
+-       * the reorder queues in the meantime, we will be called again
+-       * from the timer function if no one else cares for it.
++       * the reorder queues in the meantime.
+        *
+-       * Ensure reorder_objects is read after pd->lock is dropped so we see
+-       * an increment from another task in padata_do_serial.  Pairs with
++       * Ensure reorder queue is read after pd->lock is dropped so we see
++       * new objects from another task in padata_do_serial.  Pairs with
+        * smp_mb__after_atomic in padata_do_serial.
+        */
+       smp_mb();
+-      if (atomic_read(&pd->reorder_objects)
+-                      && !(pinst->flags & PADATA_RESET))
+-              mod_timer(&pd->timer, jiffies + HZ);
+-      else
+-              del_timer(&pd->timer);
+-      return;
++      next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
++      if (!list_empty(&next_queue->reorder.list))
++              queue_work(pinst->wq, &pd->reorder_work);
+ }
+ static void invoke_padata_reorder(struct work_struct *work)
+ {
+-      struct padata_parallel_queue *pqueue;
+       struct parallel_data *pd;
+       local_bh_disable();
+-      pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
+-      pd = pqueue->pd;
++      pd = container_of(work, struct parallel_data, reorder_work);
+       padata_reorder(pd);
+       local_bh_enable();
+ }
+-static void padata_reorder_timer(unsigned long arg)
+-{
+-      struct parallel_data *pd = (struct parallel_data *)arg;
+-      unsigned int weight;
+-      int target_cpu, cpu;
+-
+-      cpu = get_cpu();
+-
+-      /* We don't lock pd here to not interfere with parallel processing
+-       * padata_reorder() calls on other CPUs. We just need any CPU out of
+-       * the cpumask.pcpu set. It would be nice if it's the right one but
+-       * it doesn't matter if we're off to the next one by using an outdated
+-       * pd->processed value.
+-       */
+-      weight = cpumask_weight(pd->cpumask.pcpu);
+-      target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
+-
+-      /* ensure to call the reorder callback on the correct CPU */
+-      if (cpu != target_cpu) {
+-              struct padata_parallel_queue *pqueue;
+-              struct padata_instance *pinst;
+-
+-              /* The timer function is serialized wrt itself -- no locking
+-               * needed.
+-               */
+-              pinst = pd->pinst;
+-              pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
+-              queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
+-      } else {
+-              padata_reorder(pd);
+-      }
+-
+-      put_cpu();
+-}
+-
+ static void padata_serial_worker(struct work_struct *serial_work)
+ {
+       struct padata_serial_queue *squeue;
+@@ -383,9 +332,8 @@ void padata_do_serial(struct padata_priv *padata)
+       cpu = get_cpu();
+-      /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
+-       * was called on -- or, at least, enqueue the padata object into the
+-       * correct per-cpu queue.
++      /* We need to enqueue the padata object into the correct
++       * per-cpu queue.
+        */
+       if (cpu != padata->cpu) {
+               reorder_via_wq = 1;
+@@ -395,12 +343,12 @@ void padata_do_serial(struct padata_priv *padata)
+       pqueue = per_cpu_ptr(pd->pqueue, cpu);
+       spin_lock(&pqueue->reorder.lock);
+-      atomic_inc(&pd->reorder_objects);
+       list_add_tail(&padata->list, &pqueue->reorder.list);
++      atomic_inc(&pd->reorder_objects);
+       spin_unlock(&pqueue->reorder.lock);
+       /*
+-       * Ensure the atomic_inc of reorder_objects above is ordered correctly
++       * Ensure the addition to the reorder list is ordered correctly
+        * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
+        * in padata_reorder.
+        */
+@@ -408,13 +356,7 @@ void padata_do_serial(struct padata_priv *padata)
+       put_cpu();
+-      /* If we're running on the wrong CPU, call padata_reorder() via a
+-       * kernel worker.
+-       */
+-      if (reorder_via_wq)
+-              queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
+-      else
+-              padata_reorder(pd);
++      padata_reorder(pd);
+ }
+ EXPORT_SYMBOL(padata_do_serial);
+@@ -470,14 +412,12 @@ static void padata_init_pqueues(struct parallel_data *pd)
+                       continue;
+               }
+-              pqueue->pd = pd;
+               pqueue->cpu_index = cpu_index;
+               cpu_index++;
+               __padata_list_init(&pqueue->reorder);
+               __padata_list_init(&pqueue->parallel);
+               INIT_WORK(&pqueue->work, padata_parallel_worker);
+-              INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
+               atomic_set(&pqueue->num_obj, 0);
+       }
+ }
+@@ -505,12 +445,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+       padata_init_pqueues(pd);
+       padata_init_squeues(pd);
+-      setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       atomic_set(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+       atomic_set(&pd->refcnt, 1);
+       pd->pinst = pinst;
+       spin_lock_init(&pd->lock);
++      pd->cpu = cpumask_first(pcpumask);
++      INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
+       return pd;
+-- 
+2.25.1
+
diff --git a/queue-4.14/padata-set-cpu_index-of-unused-cpus-to-1.patch b/queue-4.14/padata-set-cpu_index-of-unused-cpus-to-1.patch
new file mode 100644 (file)
index 0000000..ca74b6b
--- /dev/null
@@ -0,0 +1,50 @@
+From 49b9d2c5beefa6ddbfbfa14c76a883aa1cf425ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 16:46:55 -0400
+Subject: padata: set cpu_index of unused CPUs to -1
+
+From: Mathias Krause <minipli@googlemail.com>
+
+[ Upstream commit 1bd845bcb41d5b7f83745e0cb99273eb376f2ec5 ]
+
+The parallel queue per-cpu data structure gets initialized only for CPUs
+in the 'pcpu' CPU mask set. This is not sufficient as the reorder timer
+may run on a different CPU and might wrongly decide it's the target CPU
+for the next reorder item as per-cpu memory gets memset(0) and we might
+be waiting for the first CPU in cpumask.pcpu, i.e. cpu_index 0.
+
+Make the '__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index'
+compare in padata_get_next() fail in this case by initializing the
+cpu_index member of all per-cpu parallel queues. Use -1 for unused ones.
+
+Signed-off-by: Mathias Krause <minipli@googlemail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 40a0ebb8ea51..858e82179744 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -462,8 +462,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
+       struct padata_parallel_queue *pqueue;
+       cpu_index = 0;
+-      for_each_cpu(cpu, pd->cpumask.pcpu) {
++      for_each_possible_cpu(cpu) {
+               pqueue = per_cpu_ptr(pd->pqueue, cpu);
++
++              if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
++                      pqueue->cpu_index = -1;
++                      continue;
++              }
++
+               pqueue->pd = pd;
+               pqueue->cpu_index = cpu_index;
+               cpu_index++;
+-- 
+2.25.1
+
index 9bc95f36d71fd9527465faf0969a95caf3e1f13d..c28ade9f459405ace5cf0428489dc8739c29af93 100644 (file)
@@ -23,3 +23,8 @@ ceph-fix-double-unlock-in-handle_cap_export.patch
 usb-core-fix-misleading-driver-bug-report.patch
 platform-x86-asus-nb-wmi-do-not-load-on-asus-t100ta-.patch
 arm-futex-address-build-warning.patch
+padata-set-cpu_index-of-unused-cpus-to-1.patch
+padata-replace-delayed-timer-with-immediate-workqueu.patch
+padata-initialize-pd-cpu-with-effective-cpumask.patch
+padata-purge-get_cpu-and-reorder_via_wq-from-padata_.patch
+arm64-fix-the-flush_icache_range-arguments-in-machin.patch