]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Jun 2019 14:33:31 +0000 (16:33 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Jun 2019 14:33:31 +0000 (16:33 +0200)
added patches:
coredump-fix-race-condition-between-collapse_huge_page-and-core-dumping.patch
mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch
nvme-tcp-fix-possible-null-deref-on-a-timed-out-io-queue-connect.patch
nvme-tcp-fix-queue-mapping-when-queue-count-is-limited.patch
nvme-tcp-rename-function-to-have-nvme_tcp-prefix.patch

queue-5.1/coredump-fix-race-condition-between-collapse_huge_page-and-core-dumping.patch [new file with mode: 0644]
queue-5.1/mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch [new file with mode: 0644]
queue-5.1/nvme-tcp-fix-possible-null-deref-on-a-timed-out-io-queue-connect.patch [new file with mode: 0644]
queue-5.1/nvme-tcp-fix-queue-mapping-when-queue-count-is-limited.patch [new file with mode: 0644]
queue-5.1/nvme-tcp-rename-function-to-have-nvme_tcp-prefix.patch [new file with mode: 0644]
queue-5.1/series

diff --git a/queue-5.1/coredump-fix-race-condition-between-collapse_huge_page-and-core-dumping.patch b/queue-5.1/coredump-fix-race-condition-between-collapse_huge_page-and-core-dumping.patch
new file mode 100644 (file)
index 0000000..a22caed
--- /dev/null
@@ -0,0 +1,97 @@
+From 59ea6d06cfa9247b586a695c21f94afa7183af74 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Thu, 13 Jun 2019 15:56:11 -0700
+Subject: coredump: fix race condition between collapse_huge_page() and core dumping
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 59ea6d06cfa9247b586a695c21f94afa7183af74 upstream.
+
+When fixing the race conditions between the coredump and the mmap_sem
+holders outside the context of the process, we focused on
+mmget_not_zero()/get_task_mm() callers in 04f5866e41fb70 ("coredump: fix
+race condition between mmget_not_zero()/get_task_mm() and core
+dumping"), but those aren't the only cases where the mmap_sem can be
+taken outside of the context of the process as Michal Hocko noticed
+while backporting that commit to older -stable kernels.
+
+If mmgrab() is called in the context of the process, but then the
+mm_count reference is transferred outside the context of the process,
+that can also be a problem if the mmap_sem has to be taken for writing
+through that mm_count reference.
+
+khugepaged registration calls mmgrab() in the context of the process,
+but the mmap_sem for writing is taken later in the context of the
+khugepaged kernel thread.
+
+collapse_huge_page() after taking the mmap_sem for writing doesn't
+modify any vma, so it's not obvious that it could cause a problem to the
+coredump, but it happens to modify the pmd in a way that breaks an
+invariant that pmd_trans_huge_lock() relies upon.  collapse_huge_page()
+needs the mmap_sem for writing just to block concurrent page faults that
+call pmd_trans_huge_lock().
+
+Specifically the invariant that "!pmd_trans_huge()" cannot become a
+"pmd_trans_huge()" doesn't hold while collapse_huge_page() runs.
+
+The coredump will call __get_user_pages() without mmap_sem for reading,
+which eventually can invoke a lockless page fault which will need a
+functional pmd_trans_huge_lock().
+
+So collapse_huge_page() needs to use mmget_still_valid() to check it's
+not running concurrently with the coredump...  as long as the coredump
+can invoke page faults without holding the mmap_sem for reading.
+
+This has "Fixes: khugepaged" to facilitate backporting, but in my view
+it's more a bug in the coredump code that will eventually have to be
+rewritten to stop invoking page faults without the mmap_sem for reading.
+So the long term plan is still to drop all mmget_still_valid().
+
+Link: http://lkml.kernel.org/r/20190607161558.32104-1-aarcange@redhat.com
+Fixes: ba76149f47d8 ("thp: khugepaged")
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Reported-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Jason Gunthorpe <jgg@mellanox.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sched/mm.h |    4 ++++
+ mm/khugepaged.c          |    3 +++
+ 2 files changed, 7 insertions(+)
+
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_stru
+  * followed by taking the mmap_sem for writing before modifying the
+  * vmas or anything the coredump pretends not to change from under it.
+  *
++ * It also has to be called when mmgrab() is used in the context of
++ * the process, but then the mm_count refcount is transferred outside
++ * the context of the process to run down_write() on that pinned mm.
++ *
+  * NOTE: find_extend_vma() called from GUP context is the only place
+  * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+  * for reading and outside the context of the process, so it is also
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm
+        * handled by the anon_vma lock + PG_lock.
+        */
+       down_write(&mm->mmap_sem);
++      result = SCAN_ANY_PROCESS;
++      if (!mmget_still_valid(mm))
++              goto out;
+       result = hugepage_vma_revalidate(mm, address, &vma);
+       if (result)
+               goto out;
diff --git a/queue-5.1/mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch b/queue-5.1/mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch
new file mode 100644 (file)
index 0000000..cb7eaf8
--- /dev/null
@@ -0,0 +1,198 @@
+From 7a30df49f63ad92318ddf1f7498d1129a77dd4bd Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Thu, 13 Jun 2019 15:56:05 -0700
+Subject: mm: mmu_gather: remove __tlb_reset_range() for force flush
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit 7a30df49f63ad92318ddf1f7498d1129a77dd4bd upstream.
+
+A few new fields were added to mmu_gather to make TLB flush smarter for
+huge page by telling what level of page table is changed.
+
+__tlb_reset_range() is used to reset all these page table state to
+unchanged, which is called by TLB flush for parallel mapping changes for
+the same range under non-exclusive lock (i.e.  read mmap_sem).
+
+Before commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in
+munmap"), the syscalls (e.g.  MADV_DONTNEED, MADV_FREE) which may update
+PTEs in parallel don't remove page tables.  But, the forementioned
+commit may do munmap() under read mmap_sem and free page tables.  This
+may result in program hang on aarch64 reported by Jan Stancek.  The
+problem could be reproduced by his test program with slightly modified
+below.
+
+---8<---
+
+static int map_size = 4096;
+static int num_iter = 500;
+static long threads_total;
+
+static void *distant_area;
+
+void *map_write_unmap(void *ptr)
+{
+       int *fd = ptr;
+       unsigned char *map_address;
+       int i, j = 0;
+
+       for (i = 0; i < num_iter; i++) {
+               map_address = mmap(distant_area, (size_t) map_size, PROT_WRITE | PROT_READ,
+                       MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+               if (map_address == MAP_FAILED) {
+                       perror("mmap");
+                       exit(1);
+               }
+
+               for (j = 0; j < map_size; j++)
+                       map_address[j] = 'b';
+
+               if (munmap(map_address, map_size) == -1) {
+                       perror("munmap");
+                       exit(1);
+               }
+       }
+
+       return NULL;
+}
+
+void *dummy(void *ptr)
+{
+       return NULL;
+}
+
+int main(void)
+{
+       pthread_t thid[2];
+
+       /* hint for mmap in map_write_unmap() */
+       distant_area = mmap(0, DISTANT_MMAP_SIZE, PROT_WRITE | PROT_READ,
+                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+       munmap(distant_area, (size_t)DISTANT_MMAP_SIZE);
+       distant_area += DISTANT_MMAP_SIZE / 2;
+
+       while (1) {
+               pthread_create(&thid[0], NULL, map_write_unmap, NULL);
+               pthread_create(&thid[1], NULL, dummy, NULL);
+
+               pthread_join(thid[0], NULL);
+               pthread_join(thid[1], NULL);
+       }
+}
+---8<---
+
+The program may bring in parallel execution like below:
+
+        t1                                        t2
+munmap(map_address)
+  downgrade_write(&mm->mmap_sem);
+  unmap_region()
+  tlb_gather_mmu()
+    inc_tlb_flush_pending(tlb->mm);
+  free_pgtables()
+    tlb->freed_tables = 1
+    tlb->cleared_pmds = 1
+
+                                        pthread_exit()
+                                        madvise(thread_stack, 8M, MADV_DONTNEED)
+                                          zap_page_range()
+                                            tlb_gather_mmu()
+                                              inc_tlb_flush_pending(tlb->mm);
+
+  tlb_finish_mmu()
+    if (mm_tlb_flush_nested(tlb->mm))
+      __tlb_reset_range()
+
+__tlb_reset_range() would reset freed_tables and cleared_* bits, but this
+may cause inconsistency for munmap() which do free page tables.  Then it
+may result in some architectures, e.g.  aarch64, may not flush TLB
+completely as expected to have stale TLB entries remained.
+
+Use fullmm flush since it yields much better performance on aarch64 and
+non-fullmm doesn't yields significant difference on x86.
+
+The original proposed fix came from Jan Stancek who mainly debugged this
+issue, I just wrapped up everything together.
+
+Jan's testing results:
+
+v5.2-rc2-24-gbec7550cca10
+--------------------------
+         mean     stddev
+real    37.382   2.780
+user     1.420   0.078
+sys     54.658   1.855
+
+v5.2-rc2-24-gbec7550cca10 + "mm: mmu_gather: remove __tlb_reset_range() for force flush"
+---------------------------------------------------------------------------------------_
+         mean     stddev
+real    37.119   2.105
+user     1.548   0.087
+sys     55.698   1.357
+
+[akpm@linux-foundation.org: coding-style fixes]
+Link: http://lkml.kernel.org/r/1558322252-113575-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Signed-off-by: Jan Stancek <jstancek@redhat.com>
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Tested-by: Jan Stancek <jstancek@redhat.com>
+Suggested-by: Will Deacon <will.deacon@arm.com>
+Tested-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Nick Piggin <npiggin@gmail.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Cc: Nadav Amit <namit@vmware.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: <stable@vger.kernel.org>   [4.20+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmu_gather.c |   24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+--- a/mm/mmu_gather.c
++++ b/mm/mmu_gather.c
+@@ -93,8 +93,17 @@ void arch_tlb_finish_mmu(struct mmu_gath
+       struct mmu_gather_batch *batch, *next;
+       if (force) {
++              /*
++               * The aarch64 yields better performance with fullmm by
++               * avoiding multiple CPUs spamming TLBI messages at the
++               * same time.
++               *
++               * On x86 non-fullmm doesn't yield significant difference
++               * against fullmm.
++               */
++              tlb->fullmm = 1;
+               __tlb_reset_range(tlb);
+-              __tlb_adjust_range(tlb, start, end - start);
++              tlb->freed_tables = 1;
+       }
+       tlb_flush_mmu(tlb);
+@@ -249,10 +258,15 @@ void tlb_finish_mmu(struct mmu_gather *t
+ {
+       /*
+        * If there are parallel threads are doing PTE changes on same range
+-       * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+-       * flush by batching, a thread has stable TLB entry can fail to flush
+-       * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+-       * forcefully if we detect parallel PTE batching threads.
++       * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
++       * flush by batching, one thread may end up seeing inconsistent PTEs
++       * and result in having stale TLB entries.  So flush TLB forcefully
++       * if we detect parallel PTE batching threads.
++       *
++       * However, some syscalls, e.g. munmap(), may free page tables, this
++       * needs force flush everything in the given range. Otherwise this
++       * may result in having stale TLB entries for some architectures,
++       * e.g. aarch64, that could specify flush what level TLB.
+        */
+       bool force = mm_tlb_flush_nested(tlb->mm);
diff --git a/queue-5.1/nvme-tcp-fix-possible-null-deref-on-a-timed-out-io-queue-connect.patch b/queue-5.1/nvme-tcp-fix-possible-null-deref-on-a-timed-out-io-queue-connect.patch
new file mode 100644 (file)
index 0000000..82e02b3
--- /dev/null
@@ -0,0 +1,32 @@
+From f34e25898a608380a60135288019c4cb6013bec8 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagi@grimberg.me>
+Date: Mon, 29 Apr 2019 16:25:48 -0700
+Subject: nvme-tcp: fix possible null deref on a timed out io queue connect
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+commit f34e25898a608380a60135288019c4cb6013bec8 upstream.
+
+If I/O queue connect times out, we might have freed the queue socket
+already, so check for that on the error path in nvme_tcp_start_queue.
+
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/tcp.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1423,7 +1423,8 @@ static int nvme_tcp_start_queue(struct n
+       if (!ret) {
+               set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
+       } else {
+-              __nvme_tcp_stop_queue(&ctrl->queues[idx]);
++              if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
++                      __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+               dev_err(nctrl->device,
+                       "failed to connect queue: %d ret=%d\n", idx, ret);
+       }
diff --git a/queue-5.1/nvme-tcp-fix-queue-mapping-when-queue-count-is-limited.patch b/queue-5.1/nvme-tcp-fix-queue-mapping-when-queue-count-is-limited.patch
new file mode 100644 (file)
index 0000000..b71915d
--- /dev/null
@@ -0,0 +1,137 @@
+From 6486199378a505c58fddc47459631235c9fb7638 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagi@grimberg.me>
+Date: Tue, 28 May 2019 22:49:05 -0700
+Subject: nvme-tcp: fix queue mapping when queue count is limited
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+commit 6486199378a505c58fddc47459631235c9fb7638 upstream.
+
+When the controller supports less queues than requested, we
+should make sure that queue mapping does the right thing and
+not assume that all queues are available. This fixes a crash
+when the controller supports less queues than requested.
+
+The rules are:
+1. if no write queues are requested, we assign the available queues
+   to the default queue map. The default and read queue maps share the
+   existing queues.
+2. if write queues are requested:
+  - first make sure that read queue map gets the requested
+    nr_io_queues count
+  - then grant the default queue map the minimum between the requested
+    nr_write_queues and the remaining queues. If there are no available
+    queues to dedicate to the default queue map, fallback to (1) and
+    share all the queues in the existing queue map.
+
+Also, provide a log indication on how we constructed the different
+queue maps.
+
+Reported-by: Harris, James R <james.r.harris@intel.com>
+Tested-by: Jim Harris <james.r.harris@intel.com>
+Cc: <stable@vger.kernel.org> # v5.0+
+Suggested-by: Roy Shterman <roys@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/tcp.c |   57 ++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 50 insertions(+), 7 deletions(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -111,6 +111,7 @@ struct nvme_tcp_ctrl {
+       struct work_struct      err_work;
+       struct delayed_work     connect_work;
+       struct nvme_tcp_request async_req;
++      u32                     io_queues[HCTX_MAX_TYPES];
+ };
+ static LIST_HEAD(nvme_tcp_ctrl_list);
+@@ -1564,6 +1565,35 @@ static unsigned int nvme_tcp_nr_io_queue
+       return nr_io_queues;
+ }
++static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
++              unsigned int nr_io_queues)
++{
++      struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++      struct nvmf_ctrl_options *opts = nctrl->opts;
++
++      if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
++              /*
++               * separate read/write queues
++               * hand out dedicated default queues only after we have
++               * sufficient read queues.
++               */
++              ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
++              nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
++              ctrl->io_queues[HCTX_TYPE_DEFAULT] =
++                      min(opts->nr_write_queues, nr_io_queues);
++              nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
++      } else {
++              /*
++               * shared read/write queues
++               * either no write queues were requested, or we don't have
++               * sufficient queue count to have dedicated default queues.
++               */
++              ctrl->io_queues[HCTX_TYPE_DEFAULT] =
++                      min(opts->nr_io_queues, nr_io_queues);
++              nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
++      }
++}
++
+ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+ {
+       unsigned int nr_io_queues;
+@@ -1581,6 +1611,8 @@ static int nvme_tcp_alloc_io_queues(stru
+       dev_info(ctrl->device,
+               "creating %d I/O queues.\n", nr_io_queues);
++      nvme_tcp_set_io_queues(ctrl, nr_io_queues);
++
+       return __nvme_tcp_alloc_io_queues(ctrl);
+ }
+@@ -2089,23 +2121,34 @@ static blk_status_t nvme_tcp_queue_rq(st
+ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+ {
+       struct nvme_tcp_ctrl *ctrl = set->driver_data;
++      struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+-      set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+-      set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
+-      if (ctrl->ctrl.opts->nr_write_queues) {
++      if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
+               /* separate read/write queues */
+               set->map[HCTX_TYPE_DEFAULT].nr_queues =
+-                              ctrl->ctrl.opts->nr_write_queues;
++                      ctrl->io_queues[HCTX_TYPE_DEFAULT];
++              set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
++              set->map[HCTX_TYPE_READ].nr_queues =
++                      ctrl->io_queues[HCTX_TYPE_READ];
+               set->map[HCTX_TYPE_READ].queue_offset =
+-                              ctrl->ctrl.opts->nr_write_queues;
++                      ctrl->io_queues[HCTX_TYPE_DEFAULT];
+       } else {
+-              /* mixed read/write queues */
++              /* shared read/write queues */
+               set->map[HCTX_TYPE_DEFAULT].nr_queues =
+-                              ctrl->ctrl.opts->nr_io_queues;
++                      ctrl->io_queues[HCTX_TYPE_DEFAULT];
++              set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
++              set->map[HCTX_TYPE_READ].nr_queues =
++                      ctrl->io_queues[HCTX_TYPE_DEFAULT];
+               set->map[HCTX_TYPE_READ].queue_offset = 0;
+       }
+       blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+       blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
++
++      dev_info(ctrl->ctrl.device,
++              "mapped %d/%d default/read queues.\n",
++              ctrl->io_queues[HCTX_TYPE_DEFAULT],
++              ctrl->io_queues[HCTX_TYPE_READ]);
++
+       return 0;
+ }
diff --git a/queue-5.1/nvme-tcp-rename-function-to-have-nvme_tcp-prefix.patch b/queue-5.1/nvme-tcp-rename-function-to-have-nvme_tcp-prefix.patch
new file mode 100644 (file)
index 0000000..04dfd35
--- /dev/null
@@ -0,0 +1,75 @@
+From efb973b19b88642bb7e08b8ce8e03b0bbd2a7e2a Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagi@grimberg.me>
+Date: Wed, 24 Apr 2019 11:53:19 -0700
+Subject: nvme-tcp: rename function to have nvme_tcp prefix
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+commit efb973b19b88642bb7e08b8ce8e03b0bbd2a7e2a upstream.
+
+usually nvme_ prefix is for core functions.
+While we're cleaning up, remove redundant empty lines
+
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Minwoo Im <minwoo.im@samsung.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/tcp.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -473,7 +473,6 @@ static int nvme_tcp_handle_c2h_data(stru
+       }
+       return 0;
+-
+ }
+ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
+@@ -634,7 +633,6 @@ static inline void nvme_tcp_end_request(
+       nvme_end_request(rq, cpu_to_le16(status << 1), res);
+ }
+-
+ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+                             unsigned int *offset, size_t *len)
+ {
+@@ -1535,7 +1533,7 @@ out_free_queue:
+       return ret;
+ }
+-static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
++static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+ {
+       int i, ret;
+@@ -1565,7 +1563,7 @@ static unsigned int nvme_tcp_nr_io_queue
+       return nr_io_queues;
+ }
+-static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
++static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+ {
+       unsigned int nr_io_queues;
+       int ret;
+@@ -1582,7 +1580,7 @@ static int nvme_alloc_io_queues(struct n
+       dev_info(ctrl->device,
+               "creating %d I/O queues.\n", nr_io_queues);
+-      return nvme_tcp_alloc_io_queues(ctrl);
++      return __nvme_tcp_alloc_io_queues(ctrl);
+ }
+ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
+@@ -1599,7 +1597,7 @@ static int nvme_tcp_configure_io_queues(
+ {
+       int ret;
+-      ret = nvme_alloc_io_queues(ctrl);
++      ret = nvme_tcp_alloc_io_queues(ctrl);
+       if (ret)
+               return ret;
index a6a02543f3982b6b7c0725f45ac92e3731605b53..47a7d1619edbfaeb86c080bcff56f681b1ceb43b 100644 (file)
@@ -92,3 +92,8 @@ scsi-scsi_dh_alua-fix-possible-null-ptr-deref.patch
 scsi-libsas-delete-sas-port-if-expander-discover-fai.patch
 mlxsw-spectrum-prevent-force-of-56g.patch
 ocfs2-fix-error-path-kobject-memory-leak.patch
+mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch
+nvme-tcp-rename-function-to-have-nvme_tcp-prefix.patch
+nvme-tcp-fix-possible-null-deref-on-a-timed-out-io-queue-connect.patch
+nvme-tcp-fix-queue-mapping-when-queue-count-is-limited.patch
+coredump-fix-race-condition-between-collapse_huge_page-and-core-dumping.patch