]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jun 2017 15:07:21 +0000 (17:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jun 2017 15:07:21 +0000 (17:07 +0200)
added patches:
xfs-bad-assertion-for-delalloc-an-extent-that-start-at-i_size.patch
xfs-fix-indlen-accounting-error-on-partial-delalloc-conversion.patch
xfs-fix-over-copying-of-getbmap-parameters-from-userspace.patch
xfs-fix-unaligned-access-in-xfs_btree_visit_blocks.patch
xfs-fix-up-quotacheck-buffer-list-error-handling.patch
xfs-handle-array-index-overrun-in-xfs_dir2_leaf_readbuf.patch
xfs-prevent-multi-fsb-dir-readahead-from-reading-random-blocks.patch
xfs-xfs_trans_alloc_empty.patch

queue-3.18/series
queue-3.18/xfs-bad-assertion-for-delalloc-an-extent-that-start-at-i_size.patch [new file with mode: 0644]
queue-3.18/xfs-fix-indlen-accounting-error-on-partial-delalloc-conversion.patch [new file with mode: 0644]
queue-3.18/xfs-fix-over-copying-of-getbmap-parameters-from-userspace.patch [new file with mode: 0644]
queue-3.18/xfs-fix-unaligned-access-in-xfs_btree_visit_blocks.patch [new file with mode: 0644]
queue-3.18/xfs-fix-up-quotacheck-buffer-list-error-handling.patch [new file with mode: 0644]
queue-3.18/xfs-handle-array-index-overrun-in-xfs_dir2_leaf_readbuf.patch [new file with mode: 0644]
queue-3.18/xfs-prevent-multi-fsb-dir-readahead-from-reading-random-blocks.patch [new file with mode: 0644]
queue-3.18/xfs-xfs_trans_alloc_empty.patch [new file with mode: 0644]

index 0aade4e29578775f1f8616868190f2fe575dbcf8..a298432337bed0d97c36765d7a1fbf4d1d244527 100644 (file)
@@ -24,3 +24,11 @@ mm-migrate-fix-refcount-handling-when-hugepage_migration_supported.patch
 mlock-fix-mlock-count-can-not-decrease-in-race-condition.patch
 xfs-fix-missed-holes-in-seek_hole-implementation.patch
 xfs-fix-off-by-one-on-max-nr_pages-in-xfs_find_get_desired_pgoff.patch
+xfs-fix-over-copying-of-getbmap-parameters-from-userspace.patch
+xfs-handle-array-index-overrun-in-xfs_dir2_leaf_readbuf.patch
+xfs-prevent-multi-fsb-dir-readahead-from-reading-random-blocks.patch
+xfs-fix-up-quotacheck-buffer-list-error-handling.patch
+xfs-fix-indlen-accounting-error-on-partial-delalloc-conversion.patch
+xfs-bad-assertion-for-delalloc-an-extent-that-start-at-i_size.patch
+xfs-fix-unaligned-access-in-xfs_btree_visit_blocks.patch
+xfs-xfs_trans_alloc_empty.patch
diff --git a/queue-3.18/xfs-bad-assertion-for-delalloc-an-extent-that-start-at-i_size.patch b/queue-3.18/xfs-bad-assertion-for-delalloc-an-extent-that-start-at-i_size.patch
new file mode 100644 (file)
index 0000000..8aaaa23
--- /dev/null
@@ -0,0 +1,46 @@
+From 892d2a5f705723b2cb488bfb38bcbdcf83273184 Mon Sep 17 00:00:00 2001
+From: Zorro Lang <zlang@redhat.com>
+Date: Mon, 15 May 2017 08:40:02 -0700
+Subject: xfs: bad assertion for delalloc an extent that start at i_size
+
+From: Zorro Lang <zlang@redhat.com>
+
+commit 892d2a5f705723b2cb488bfb38bcbdcf83273184 upstream.
+
+By run fsstress long enough time enough in RHEL-7, I find an
+assertion failure (harder to reproduce on linux-4.11, but problem
+is still there):
+
+  XFS: Assertion failed: (iflags & BMV_IF_DELALLOC) != 0, file: fs/xfs/xfs_bmap_util.c
+
+The assertion is in xfs_getbmap() funciton:
+
+  if (map[i].br_startblock == DELAYSTARTBLOCK &&
+-->   map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+          ASSERT((iflags & BMV_IF_DELALLOC) != 0);
+
+When map[i].br_startoff == XFS_B_TO_FSB(mp, XFS_ISIZE(ip)), the
+startoff is just at EOF. But we only need to make sure delalloc
+extents that are within EOF, not include EOF.
+
+Signed-off-by: Zorro Lang <zlang@redhat.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_bmap_util.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -659,7 +659,7 @@ xfs_getbmap(
+                        * extents.
+                        */
+                       if (map[i].br_startblock == DELAYSTARTBLOCK &&
+-                          map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
++                          map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+                               ASSERT((iflags & BMV_IF_DELALLOC) != 0);
+                         if (map[i].br_startblock == HOLESTARTBLOCK &&
diff --git a/queue-3.18/xfs-fix-indlen-accounting-error-on-partial-delalloc-conversion.patch b/queue-3.18/xfs-fix-indlen-accounting-error-on-partial-delalloc-conversion.patch
new file mode 100644 (file)
index 0000000..3c58388
--- /dev/null
@@ -0,0 +1,71 @@
+From 0daaecacb83bc6b656a56393ab77a31c28139bc7 Mon Sep 17 00:00:00 2001
+From: Brian Foster <bfoster@redhat.com>
+Date: Fri, 12 May 2017 10:44:08 -0700
+Subject: xfs: fix indlen accounting error on partial delalloc conversion
+
+From: Brian Foster <bfoster@redhat.com>
+
+commit 0daaecacb83bc6b656a56393ab77a31c28139bc7 upstream.
+
+The delalloc -> real block conversion path uses an incorrect
+calculation in the case where the middle part of a delalloc extent
+is being converted. This is documented as a rare situation because
+XFS generally attempts to maximize contiguity by converting as much
+of a delalloc extent as possible.
+
+If this situation does occur, the indlen reservation for the two new
+delalloc extents left behind by the conversion of the middle range
+is calculated and compared with the original reservation. If more
+blocks are required, the delta is allocated from the global block
+pool. This delta value can be characterized as the difference
+between the new total requirement (temp + temp2) and the currently
+available reservation minus those blocks that have already been
+allocated (startblockval(PREV.br_startblock) - allocated).
+
+The problem is that the current code does not account for previously
+allocated blocks correctly. It subtracts the current allocation
+count from the (new - old) delta rather than the old indlen
+reservation. This means that more indlen blocks than have been
+allocated end up stashed in the remaining extents and free space
+accounting is broken as a result.
+
+Fix up the calculation to subtract the allocated block count from
+the original extent indlen and thus correctly allocate the
+reservation delta based on the difference between the new total
+requirement and the unused blocks from the original reservation.
+Also remove a bogus assert that contradicts the fact that the new
+indlen reservation can be larger than the original indlen
+reservation.
+
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/libxfs/xfs_bmap.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -2214,8 +2214,10 @@ xfs_bmap_add_extent_delay_real(
+               }
+               temp = xfs_bmap_worst_indlen(bma->ip, temp);
+               temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
+-              diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
+-                      (bma->cur ? bma->cur->bc_private.b.allocated : 0));
++              diff = (int)(temp + temp2 -
++                           (startblockval(PREV.br_startblock) -
++                            (bma->cur ?
++                             bma->cur->bc_private.b.allocated : 0)));
+               if (diff > 0) {
+                       error = xfs_icsb_modify_counters(bma->ip->i_mount,
+                                       XFS_SBS_FDBLOCKS,
+@@ -2268,7 +2270,6 @@ xfs_bmap_add_extent_delay_real(
+               temp = da_new;
+               if (bma->cur)
+                       temp += bma->cur->bc_private.b.allocated;
+-              ASSERT(temp <= da_old);
+               if (temp < da_old)
+                       xfs_icsb_modify_counters(bma->ip->i_mount,
+                                       XFS_SBS_FDBLOCKS,
diff --git a/queue-3.18/xfs-fix-over-copying-of-getbmap-parameters-from-userspace.patch b/queue-3.18/xfs-fix-over-copying-of-getbmap-parameters-from-userspace.patch
new file mode 100644 (file)
index 0000000..a0729bb
--- /dev/null
@@ -0,0 +1,38 @@
+From be6324c00c4d1e0e665f03ed1fc18863a88da119 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Mon, 3 Apr 2017 15:17:57 -0700
+Subject: xfs: fix over-copying of getbmap parameters from userspace
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+commit be6324c00c4d1e0e665f03ed1fc18863a88da119 upstream.
+
+In xfs_ioc_getbmap, we should only copy the fields of struct getbmap
+from userspace, or else we end up copying random stack contents into the
+kernel.  struct getbmap is a strict subset of getbmapx, so a partial
+structure copy should work fine.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_ioctl.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1378,10 +1378,11 @@ xfs_ioc_getbmap(
+       unsigned int            cmd,
+       void                    __user *arg)
+ {
+-      struct getbmapx         bmx;
++      struct getbmapx         bmx = { 0 };
+       int                     error;
+-      if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
++      /* struct getbmap is a strict subset of struct getbmapx. */
++      if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
+               return -EFAULT;
+       if (bmx.bmv_count < 2)
diff --git a/queue-3.18/xfs-fix-unaligned-access-in-xfs_btree_visit_blocks.patch b/queue-3.18/xfs-fix-unaligned-access-in-xfs_btree_visit_blocks.patch
new file mode 100644 (file)
index 0000000..35d26cd
--- /dev/null
@@ -0,0 +1,35 @@
+From a4d768e702de224cc85e0c8eac9311763403b368 Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@sandeen.net>
+Date: Mon, 22 May 2017 19:54:10 -0700
+Subject: xfs: fix unaligned access in xfs_btree_visit_blocks
+
+From: Eric Sandeen <sandeen@sandeen.net>
+
+commit a4d768e702de224cc85e0c8eac9311763403b368 upstream.
+
+This structure copy was throwing unaligned access warnings on sparc64:
+
+Kernel unaligned access at TPC[1043c088] xfs_btree_visit_blocks+0x88/0xe0 [xfs]
+
+xfs_btree_copy_ptrs does a memcpy, which avoids it.
+
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/libxfs/xfs_btree.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -4051,7 +4051,7 @@ xfs_btree_change_owner(
+                       xfs_btree_readahead_ptr(cur, ptr, 1);
+                       /* save for the next iteration of the loop */
+-                      lptr = *ptr;
++                      xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
+               }
+               /* for each buffer in the level */
diff --git a/queue-3.18/xfs-fix-up-quotacheck-buffer-list-error-handling.patch b/queue-3.18/xfs-fix-up-quotacheck-buffer-list-error-handling.patch
new file mode 100644 (file)
index 0000000..c754983
--- /dev/null
@@ -0,0 +1,96 @@
+From 20e8a063786050083fe05b4f45be338c60b49126 Mon Sep 17 00:00:00 2001
+From: Brian Foster <bfoster@redhat.com>
+Date: Fri, 21 Apr 2017 12:40:44 -0700
+Subject: xfs: fix up quotacheck buffer list error handling
+
+From: Brian Foster <bfoster@redhat.com>
+
+commit 20e8a063786050083fe05b4f45be338c60b49126 upstream.
+
+The quotacheck error handling of the delwri buffer list assumes the
+resident buffers are locked and doesn't clear the _XBF_DELWRI_Q flag
+on the buffers that are dequeued. This can lead to assert failures
+on buffer release and possibly other locking problems.
+
+Move this code to a delwri queue cancel helper function to
+encapsulate the logic required to properly release buffers from a
+delwri queue. Update the helper to clear the delwri queue flag and
+call it from quotacheck.
+
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_buf.c |   24 ++++++++++++++++++++++++
+ fs/xfs/xfs_buf.h |    1 +
+ fs/xfs/xfs_qm.c  |    7 +------
+ 3 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -982,6 +982,8 @@ void
+ xfs_buf_unlock(
+       struct xfs_buf          *bp)
+ {
++      ASSERT(xfs_buf_islocked(bp));
++
+       XB_CLEAR_OWNER(bp);
+       up(&bp->b_sema);
+@@ -1698,6 +1700,28 @@ error:
+ }
+ /*
++ * Cancel a delayed write list.
++ *
++ * Remove each buffer from the list, clear the delwri queue flag and drop the
++ * associated buffer reference.
++ */
++void
++xfs_buf_delwri_cancel(
++      struct list_head        *list)
++{
++      struct xfs_buf          *bp;
++
++      while (!list_empty(list)) {
++              bp = list_first_entry(list, struct xfs_buf, b_list);
++
++              xfs_buf_lock(bp);
++              bp->b_flags &= ~_XBF_DELWRI_Q;
++              list_del_init(&bp->b_list);
++              xfs_buf_relse(bp);
++      }
++}
++
++/*
+  * Add a buffer to the delayed write list.
+  *
+  * This queues a buffer for writeout if it hasn't already been.  Note that
+--- a/fs/xfs/xfs_buf.h
++++ b/fs/xfs/xfs_buf.h
+@@ -302,6 +302,7 @@ extern void xfs_buf_iomove(xfs_buf_t *,
+ extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
+ /* Delayed Write Buffer Routines */
++extern void xfs_buf_delwri_cancel(struct list_head *);
+ extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
+ extern int xfs_buf_delwri_submit(struct list_head *);
+ extern int xfs_buf_delwri_submit_nowait(struct list_head *);
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1359,12 +1359,7 @@ xfs_qm_quotacheck(
+       mp->m_qflags |= flags;
+  error_return:
+-      while (!list_empty(&buffer_list)) {
+-              struct xfs_buf *bp =
+-                      list_first_entry(&buffer_list, struct xfs_buf, b_list);
+-              list_del_init(&bp->b_list);
+-              xfs_buf_relse(bp);
+-      }
++      xfs_buf_delwri_cancel(&buffer_list);
+       if (error) {
+               xfs_warn(mp,
diff --git a/queue-3.18/xfs-handle-array-index-overrun-in-xfs_dir2_leaf_readbuf.patch b/queue-3.18/xfs-handle-array-index-overrun-in-xfs_dir2_leaf_readbuf.patch
new file mode 100644 (file)
index 0000000..7f3aefe
--- /dev/null
@@ -0,0 +1,103 @@
+From 023cc840b40fad95c6fe26fff1d380a8c9d45939 Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Thu, 13 Apr 2017 15:15:47 -0700
+Subject: xfs: handle array index overrun in xfs_dir2_leaf_readbuf()
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit 023cc840b40fad95c6fe26fff1d380a8c9d45939 upstream.
+
+Carlos had a case where "find" seemed to start spinning
+forever and never return.
+
+This was on a filesystem with non-default multi-fsb (8k)
+directory blocks, and a fragmented directory with extents
+like this:
+
+0:[0,133646,2,0]
+1:[2,195888,1,0]
+2:[3,195890,1,0]
+3:[4,195892,1,0]
+4:[5,195894,1,0]
+5:[6,195896,1,0]
+6:[7,195898,1,0]
+7:[8,195900,1,0]
+8:[9,195902,1,0]
+9:[10,195908,1,0]
+10:[11,195910,1,0]
+11:[12,195912,1,0]
+12:[13,195914,1,0]
+...
+
+i.e. the first extent is a contiguous 2-fsb dir block, but
+after that it is fragmented into 1 block extents.
+
+At the top of the readdir path, we allocate a mapping array
+which (for this filesystem geometry) can hold 10 extents; see
+the assignment to map_info->map_size.  During readdir, we are
+therefore able to map extents 0 through 9 above into the array
+for readahead purposes.  If we count by 2, we see that the last
+mapped index (9) is the first block of a 2-fsb directory block.
+
+At the end of xfs_dir2_leaf_readbuf() we have 2 loops to fill
+more readahead; the outer loop assumes one full dir block is
+processed each loop iteration, and an inner loop that ensures
+that this is so by advancing to the next extent until a full
+directory block is mapped.
+
+The problem is that this inner loop may step past the last
+extent in the mapping array as it tries to reach the end of
+the directory block.  This will read garbage for the extent
+length, and as a result the loop control variable 'j' may
+become corrupted and never fail the loop conditional.
+
+The number of valid mappings we have in our array is stored
+in map->map_valid, so stop this inner loop based on that limit.
+
+There is an ASSERT at the top of the outer loop for this
+same condition, but we never made it out of the inner loop,
+so the ASSERT never fired.
+
+Huge appreciation for Carlos for debugging and isolating
+the problem.
+
+Debugged-and-analyzed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Tested-by: Carlos Maiolino <cmaiolino@redhat.com>
+Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Reviewed-by: Bill O'Donnell <billodo@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_dir2_readdir.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_dir2_readdir.c
++++ b/fs/xfs/xfs_dir2_readdir.c
+@@ -422,6 +422,7 @@ xfs_dir2_leaf_readbuf(
+       /*
+        * Do we need more readahead?
++       * Each loop tries to process 1 full dir blk; last may be partial.
+        */
+       blk_start_plug(&plug);
+       for (mip->ra_index = mip->ra_offset = i = 0;
+@@ -453,9 +454,14 @@ xfs_dir2_leaf_readbuf(
+               }
+               /*
+-               * Advance offset through the mapping table.
++               * Advance offset through the mapping table, processing a full
++               * dir block even if it is fragmented into several extents.
++               * But stop if we have consumed all valid mappings, even if
++               * it's not yet a full directory block.
+                */
+-              for (j = 0; j < geo->fsbcount; j += length ) {
++              for (j = 0;
++                   j < geo->fsbcount && mip->ra_index < mip->map_valid;
++                   j += length ) {
+                       /*
+                        * The rest of this extent but not more than a dir
+                        * block.
diff --git a/queue-3.18/xfs-prevent-multi-fsb-dir-readahead-from-reading-random-blocks.patch b/queue-3.18/xfs-prevent-multi-fsb-dir-readahead-from-reading-random-blocks.patch
new file mode 100644 (file)
index 0000000..1ed8657
--- /dev/null
@@ -0,0 +1,80 @@
+From cb52ee334a45ae6c78a3999e4b473c43ddc528f4 Mon Sep 17 00:00:00 2001
+From: Brian Foster <bfoster@redhat.com>
+Date: Thu, 20 Apr 2017 08:06:47 -0700
+Subject: xfs: prevent multi-fsb dir readahead from reading random blocks
+
+From: Brian Foster <bfoster@redhat.com>
+
+commit cb52ee334a45ae6c78a3999e4b473c43ddc528f4 upstream.
+
+Directory block readahead uses a complex iteration mechanism to map
+between high-level directory blocks and underlying physical extents.
+This mechanism attempts to traverse the higher-level dir blocks in a
+manner that handles multi-fsb directory blocks and simultaneously
+maintains a reference to the corresponding physical blocks.
+
+This logic doesn't handle certain (discontiguous) physical extent
+layouts correctly with multi-fsb directory blocks. For example,
+consider the case of a 4k FSB filesystem with a 2 FSB (8k) directory
+block size and a directory with the following extent layout:
+
+ EXT: FILE-OFFSET      BLOCK-RANGE      AG AG-OFFSET        TOTAL
+   0: [0..7]:          88..95            0 (88..95)             8
+   1: [8..15]:         80..87            0 (80..87)             8
+   2: [16..39]:        168..191          0 (168..191)          24
+   3: [40..63]:        5242952..5242975  1 (72..95)            24
+
+Directory block 0 spans physical extents 0 and 1, dirblk 1 lies
+entirely within extent 2 and dirblk 2 spans extents 2 and 3. Because
+extent 2 is larger than the directory block size, the readahead code
+erroneously assumes the block is contiguous and issues a readahead
+based on the physical mapping of the first fsb of the dirblk. This
+results in read verifier failure and a spurious corruption or crc
+failure, depending on the filesystem format.
+
+Further, the subsequent readahead code responsible for walking
+through the physical table doesn't correctly advance the physical
+block reference for dirblk 2. Instead of advancing two physical
+filesystem blocks, the first iteration of the loop advances 1 block
+(correctly), but the subsequent iteration advances 2 more physical
+blocks because the next physical extent (extent 3, above) happens to
+cover more than dirblk 2. At this point, the higher-level directory
+block walking is completely off the rails of the actual physical
+layout of the directory for the respective mapping table.
+
+Update the contiguous dirblock logic to consider the current offset
+in the physical extent to avoid issuing directory readahead to
+unrelated blocks. Also, update the mapping table advancing code to
+consider the current offset within the current dirblock to avoid
+advancing the mapping reference too far beyond the dirblock.
+
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_dir2_readdir.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_dir2_readdir.c
++++ b/fs/xfs/xfs_dir2_readdir.c
+@@ -433,7 +433,8 @@ xfs_dir2_leaf_readbuf(
+                * Read-ahead a contiguous directory block.
+                */
+               if (i > mip->ra_current &&
+-                  map[mip->ra_index].br_blockcount >= geo->fsbcount) {
++                  (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
++                  geo->fsbcount) {
+                       xfs_dir3_data_readahead(dp,
+                               map[mip->ra_index].br_startoff + mip->ra_offset,
+                               XFS_FSB_TO_DADDR(dp->i_mount,
+@@ -466,7 +467,7 @@ xfs_dir2_leaf_readbuf(
+                        * The rest of this extent but not more than a dir
+                        * block.
+                        */
+-                      length = min_t(int, geo->fsbcount,
++                      length = min_t(int, geo->fsbcount - j,
+                                       map[mip->ra_index].br_blockcount -
+                                                       mip->ra_offset);
+                       mip->ra_offset += length;
diff --git a/queue-3.18/xfs-xfs_trans_alloc_empty.patch b/queue-3.18/xfs-xfs_trans_alloc_empty.patch
new file mode 100644 (file)
index 0000000..7b8bcdd
--- /dev/null
@@ -0,0 +1,72 @@
+From hch@lst.de  Mon Jun  5 17:05:12 2017
+From: Christoph Hellwig <hch@lst.de>
+Date: Sat,  3 Jun 2017 15:18:31 +0200
+Subject: xfs: xfs_trans_alloc_empty
+To: stable@vger.kernel.org
+Cc: linux-xfs@vger.kernel.org, "Darrick J . Wong" <darrick.wong@oracle.com>
+Message-ID: <20170603131836.26661-21-hch@lst.de>
+
+From: Christoph Hellwig <hch@lst.de>
+
+This is a partial cherry-pick of commit e89c041338
+("xfs: implement the GETFSMAP ioctl"), which also adds this helper, and
+a great example of why feature patches should be properly split into
+their parts.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+[hch: split from the larger patch for -stable]
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+---
+ fs/xfs/xfs_trans.c | 22 ++++++++++++++++++++++
+ fs/xfs/xfs_trans.h |  2 ++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index 70f42ea86dfb..a280e126491f 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -263,6 +263,28 @@ xfs_trans_alloc(
+ }
+ /*
++ * Create an empty transaction with no reservation.  This is a defensive
++ * mechanism for routines that query metadata without actually modifying
++ * them -- if the metadata being queried is somehow cross-linked (think a
++ * btree block pointer that points higher in the tree), we risk deadlock.
++ * However, blocks grabbed as part of a transaction can be re-grabbed.
++ * The verifiers will notice the corrupt block and the operation will fail
++ * back to userspace without deadlocking.
++ *
++ * Note the zero-length reservation; this transaction MUST be cancelled
++ * without any dirty data.
++ */
++int
++xfs_trans_alloc_empty(
++      struct xfs_mount                *mp,
++      struct xfs_trans                **tpp)
++{
++      struct xfs_trans_res            resv = {0};
++
++      return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
++}
++
++/*
+  * Record the indicated change to the given field for application
+  * to the file system's superblock when the transaction commits.
+  * For now, just store the change in the transaction structure.
+diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
+index 61b7fbdd3ebd..98024cb933ef 100644
+--- a/fs/xfs/xfs_trans.h
++++ b/fs/xfs/xfs_trans.h
+@@ -159,6 +159,8 @@ typedef struct xfs_trans {
+ int           xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
+                       uint blocks, uint rtextents, uint flags,
+                       struct xfs_trans **tpp);
++int           xfs_trans_alloc_empty(struct xfs_mount *mp,
++                      struct xfs_trans **tpp);
+ void          xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
+ struct xfs_buf        *xfs_trans_get_buf_map(struct xfs_trans *tp,
+-- 
+2.11.0
+