]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2016 23:31:17 +0000 (16:31 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 May 2016 23:31:17 +0000 (16:31 -0700)
added patches:
ext4-fix-races-between-buffered-io-and-collapse-insert-range.patch
ext4-fix-races-between-page-faults-and-hole-punching.patch
ext4-fix-races-of-writeback-with-punch-hole-and-zero-range.patch
ext4-move-unlocked-dio-protection-from-ext4_alloc_file_blocks.patch
mtd-brcmnand-fix-v7.1-register-offsets.patch
mtd-nand-drop-mtd.owner-requirement-in-nand_scan.patch
mtd-spi-nor-remove-micron_quad_enable.patch
perf-hists-browser-only-offer-symbol-scripting-when-a-symbol-is-under-the-cursor.patch
perf-stat-document-detailed-option.patch
perf-tools-handle-spaces-in-file-names-obtained-from-proc-pid-maps.patch

queue-4.4/ext4-fix-races-between-buffered-io-and-collapse-insert-range.patch [new file with mode: 0644]
queue-4.4/ext4-fix-races-between-page-faults-and-hole-punching.patch [new file with mode: 0644]
queue-4.4/ext4-fix-races-of-writeback-with-punch-hole-and-zero-range.patch [new file with mode: 0644]
queue-4.4/ext4-move-unlocked-dio-protection-from-ext4_alloc_file_blocks.patch [new file with mode: 0644]
queue-4.4/mtd-brcmnand-fix-v7.1-register-offsets.patch [new file with mode: 0644]
queue-4.4/mtd-nand-drop-mtd.owner-requirement-in-nand_scan.patch [new file with mode: 0644]
queue-4.4/mtd-spi-nor-remove-micron_quad_enable.patch [new file with mode: 0644]
queue-4.4/perf-hists-browser-only-offer-symbol-scripting-when-a-symbol-is-under-the-cursor.patch [new file with mode: 0644]
queue-4.4/perf-stat-document-detailed-option.patch [new file with mode: 0644]
queue-4.4/perf-tools-handle-spaces-in-file-names-obtained-from-proc-pid-maps.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/ext4-fix-races-between-buffered-io-and-collapse-insert-range.patch b/queue-4.4/ext4-fix-races-between-buffered-io-and-collapse-insert-range.patch
new file mode 100644 (file)
index 0000000..f673117
--- /dev/null
@@ -0,0 +1,119 @@
+From 32ebffd3bbb4162da5ff88f9a35dd32d0a28ea70 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.com>
+Date: Mon, 7 Dec 2015 14:31:11 -0500
+Subject: ext4: fix races between buffered IO and collapse / insert range
+
+From: Jan Kara <jack@suse.com>
+
+commit 32ebffd3bbb4162da5ff88f9a35dd32d0a28ea70 upstream.
+
+Current code implementing FALLOC_FL_COLLAPSE_RANGE and
+FALLOC_FL_INSERT_RANGE is prone to races with buffered writes and page
+faults. If buffered write or write via mmap manages to squeeze between
+filemap_write_and_wait_range() and truncate_pagecache() in the fallocate
+implementations, the written data is simply discarded by
+truncate_pagecache() although it should have been shifted.
+
+Fix the problem by moving filemap_write_and_wait_range() call inside
+i_mutex and i_mmap_sem. That way we are protected against races with
+both buffered writes and page faults.
+
+Signed-off-by: Jan Kara <jack@suse.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c |   59 ++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 31 insertions(+), 28 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5487,21 +5487,7 @@ int ext4_collapse_range(struct inode *in
+                       return ret;
+       }
+-      /*
+-       * Need to round down offset to be aligned with page size boundary
+-       * for page size > block size.
+-       */
+-      ioffset = round_down(offset, PAGE_SIZE);
+-
+-      /* Write out all dirty pages */
+-      ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+-                                         LLONG_MAX);
+-      if (ret)
+-              return ret;
+-
+-      /* Take mutex lock */
+       mutex_lock(&inode->i_mutex);
+-
+       /*
+        * There is no need to overlap collapse range with EOF, in which case
+        * it is effectively a truncate operation
+@@ -5526,6 +5512,27 @@ int ext4_collapse_range(struct inode *in
+        * page cache.
+        */
+       down_write(&EXT4_I(inode)->i_mmap_sem);
++      /*
++       * Need to round down offset to be aligned with page size boundary
++       * for page size > block size.
++       */
++      ioffset = round_down(offset, PAGE_SIZE);
++      /*
++       * Write tail of the last page before removed range since it will get
++       * removed from the page cache below.
++       */
++      ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
++      if (ret)
++              goto out_mmap;
++      /*
++       * Write data that will be shifted to preserve them when discarding
++       * page cache below. We are also protected from pages becoming dirty
++       * by i_mmap_sem.
++       */
++      ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
++                                         LLONG_MAX);
++      if (ret)
++              goto out_mmap;
+       truncate_pagecache(inode, ioffset);
+       credits = ext4_writepage_trans_blocks(inode);
+@@ -5626,21 +5633,7 @@ int ext4_insert_range(struct inode *inod
+                       return ret;
+       }
+-      /*
+-       * Need to round down to align start offset to page size boundary
+-       * for page size > block size.
+-       */
+-      ioffset = round_down(offset, PAGE_SIZE);
+-
+-      /* Write out all dirty pages */
+-      ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+-                      LLONG_MAX);
+-      if (ret)
+-              return ret;
+-
+-      /* Take mutex lock */
+       mutex_lock(&inode->i_mutex);
+-
+       /* Currently just for extent based files */
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+               ret = -EOPNOTSUPP;
+@@ -5668,6 +5661,16 @@ int ext4_insert_range(struct inode *inod
+        * page cache.
+        */
+       down_write(&EXT4_I(inode)->i_mmap_sem);
++      /*
++       * Need to round down to align start offset to page size boundary
++       * for page size > block size.
++       */
++      ioffset = round_down(offset, PAGE_SIZE);
++      /* Write out all dirty pages */
++      ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
++                      LLONG_MAX);
++      if (ret)
++              goto out_mmap;
+       truncate_pagecache(inode, ioffset);
+       credits = ext4_writepage_trans_blocks(inode);
diff --git a/queue-4.4/ext4-fix-races-between-page-faults-and-hole-punching.patch b/queue-4.4/ext4-fix-races-between-page-faults-and-hole-punching.patch
new file mode 100644 (file)
index 0000000..9ac3eec
--- /dev/null
@@ -0,0 +1,433 @@
+From ea3d7209ca01da209cda6f0dea8be9cc4b7a933b Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.com>
+Date: Mon, 7 Dec 2015 14:28:03 -0500
+Subject: ext4: fix races between page faults and hole punching
+
+From: Jan Kara <jack@suse.com>
+
+commit ea3d7209ca01da209cda6f0dea8be9cc4b7a933b upstream.
+
+Currently, page faults and hole punching are completely unsynchronized.
+This can result in page fault faulting in a page into a range that we
+are punching after truncate_pagecache_range() has been called and thus
+we can end up with a page mapped to disk blocks that will be shortly
+freed. Filesystem corruption will shortly follow. Note that the same
+race is avoided for truncate by checking page fault offset against
+i_size but there isn't similar mechanism available for punching holes.
+
+Fix the problem by creating new rw semaphore i_mmap_sem in inode and
+grab it for writing over truncate, hole punching, and other functions
+removing blocks from extent tree and for read over page faults. We
+cannot easily use i_data_sem for this since that ranks below transaction
+start and we need something ranking above it so that it can be held over
+the whole truncate / hole punching operation. Also remove various
+workarounds we had in the code to reduce race window when page fault
+could have created pages with stale mapping information.
+
+Signed-off-by: Jan Kara <jack@suse.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4.h     |   10 ++++++++
+ fs/ext4/extents.c  |   54 ++++++++++++++++++++++++-------------------
+ fs/ext4/file.c     |   66 +++++++++++++++++++++++++++++++++++++++++++++--------
+ fs/ext4/inode.c    |   36 +++++++++++++++++++++-------
+ fs/ext4/super.c    |    1 
+ fs/ext4/truncate.h |    2 +
+ 6 files changed, 127 insertions(+), 42 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -933,6 +933,15 @@ struct ext4_inode_info {
+        * by other means, so we have i_data_sem.
+        */
+       struct rw_semaphore i_data_sem;
++      /*
++       * i_mmap_sem is for serializing page faults with truncate / punch hole
++       * operations. We have to make sure that new page cannot be faulted in
++       * a section of the inode that is being punched. We cannot easily use
++       * i_data_sem for this since we need protection for the whole punch
++       * operation and i_data_sem ranks below transaction start so we have
++       * to occasionally drop it.
++       */
++      struct rw_semaphore i_mmap_sem;
+       struct inode vfs_inode;
+       struct jbd2_inode *jinode;
+@@ -2507,6 +2516,7 @@ extern int ext4_chunk_trans_blocks(struc
+ extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+                            loff_t lstart, loff_t lend);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
++extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern void ext4_da_update_reserve_space(struct inode *inode,
+                                       int used, int quota_claim);
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4770,7 +4770,6 @@ static long ext4_zero_range(struct file
+       int partial_begin, partial_end;
+       loff_t start, end;
+       ext4_lblk_t lblk;
+-      struct address_space *mapping = inode->i_mapping;
+       unsigned int blkbits = inode->i_blkbits;
+       trace_ext4_zero_range(inode, offset, len, mode);
+@@ -4786,17 +4785,6 @@ static long ext4_zero_range(struct file
+       }
+       /*
+-       * Write out all dirty pages to avoid race conditions
+-       * Then release them.
+-       */
+-      if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+-              ret = filemap_write_and_wait_range(mapping, offset,
+-                                                 offset + len - 1);
+-              if (ret)
+-                      return ret;
+-      }
+-
+-      /*
+        * Round up offset. This is not fallocate, we neet to zero out
+        * blocks, so convert interior block aligned part of the range to
+        * unwritten and possibly manually zero out unaligned parts of the
+@@ -4856,16 +4844,22 @@ static long ext4_zero_range(struct file
+               flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+                         EXT4_EX_NOCACHE);
+-              /* Now release the pages and zero block aligned part of pages*/
+-              truncate_pagecache_range(inode, start, end - 1);
+-              inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+-
+               /* Wait all existing dio workers, newcomers will block on i_mutex */
+               ext4_inode_block_unlocked_dio(inode);
+               inode_dio_wait(inode);
++              /*
++               * Prevent page faults from reinstantiating pages we have
++               * released from page cache.
++               */
++              down_write(&EXT4_I(inode)->i_mmap_sem);
++              /* Now release the pages and zero block aligned part of pages */
++              truncate_pagecache_range(inode, start, end - 1);
++              inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
++
+               ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+                                            flags, mode);
++              up_write(&EXT4_I(inode)->i_mmap_sem);
+               if (ret)
+                       goto out_dio;
+       }
+@@ -5524,17 +5518,22 @@ int ext4_collapse_range(struct inode *in
+               goto out_mutex;
+       }
+-      truncate_pagecache(inode, ioffset);
+-
+       /* Wait for existing dio to complete */
+       ext4_inode_block_unlocked_dio(inode);
+       inode_dio_wait(inode);
++      /*
++       * Prevent page faults from reinstantiating pages we have released from
++       * page cache.
++       */
++      down_write(&EXT4_I(inode)->i_mmap_sem);
++      truncate_pagecache(inode, ioffset);
++
+       credits = ext4_writepage_trans_blocks(inode);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+-              goto out_dio;
++              goto out_mmap;
+       }
+       down_write(&EXT4_I(inode)->i_data_sem);
+@@ -5573,7 +5572,8 @@ int ext4_collapse_range(struct inode *in
+ out_stop:
+       ext4_journal_stop(handle);
+-out_dio:
++out_mmap:
++      up_write(&EXT4_I(inode)->i_mmap_sem);
+       ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+       mutex_unlock(&inode->i_mutex);
+@@ -5660,17 +5660,22 @@ int ext4_insert_range(struct inode *inod
+               goto out_mutex;
+       }
+-      truncate_pagecache(inode, ioffset);
+-
+       /* Wait for existing dio to complete */
+       ext4_inode_block_unlocked_dio(inode);
+       inode_dio_wait(inode);
++      /*
++       * Prevent page faults from reinstantiating pages we have released from
++       * page cache.
++       */
++      down_write(&EXT4_I(inode)->i_mmap_sem);
++      truncate_pagecache(inode, ioffset);
++
+       credits = ext4_writepage_trans_blocks(inode);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+-              goto out_dio;
++              goto out_mmap;
+       }
+       /* Expand file to avoid data loss if there is error while shifting */
+@@ -5741,7 +5746,8 @@ int ext4_insert_range(struct inode *inod
+ out_stop:
+       ext4_journal_stop(handle);
+-out_dio:
++out_mmap:
++      up_write(&EXT4_I(inode)->i_mmap_sem);
+       ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+       mutex_unlock(&inode->i_mutex);
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -209,15 +209,18 @@ static int ext4_dax_fault(struct vm_area
+ {
+       int result;
+       handle_t *handle = NULL;
+-      struct super_block *sb = file_inode(vma->vm_file)->i_sb;
++      struct inode *inode = file_inode(vma->vm_file);
++      struct super_block *sb = inode->i_sb;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
+       if (write) {
+               sb_start_pagefault(sb);
+               file_update_time(vma->vm_file);
++              down_read(&EXT4_I(inode)->i_mmap_sem);
+               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+                                               EXT4_DATA_TRANS_BLOCKS(sb));
+-      }
++      } else
++              down_read(&EXT4_I(inode)->i_mmap_sem);
+       if (IS_ERR(handle))
+               result = VM_FAULT_SIGBUS;
+@@ -228,8 +231,10 @@ static int ext4_dax_fault(struct vm_area
+       if (write) {
+               if (!IS_ERR(handle))
+                       ext4_journal_stop(handle);
++              up_read(&EXT4_I(inode)->i_mmap_sem);
+               sb_end_pagefault(sb);
+-      }
++      } else
++              up_read(&EXT4_I(inode)->i_mmap_sem);
+       return result;
+ }
+@@ -246,10 +251,12 @@ static int ext4_dax_pmd_fault(struct vm_
+       if (write) {
+               sb_start_pagefault(sb);
+               file_update_time(vma->vm_file);
++              down_read(&EXT4_I(inode)->i_mmap_sem);
+               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+                               ext4_chunk_trans_blocks(inode,
+                                                       PMD_SIZE / PAGE_SIZE));
+-      }
++      } else
++              down_read(&EXT4_I(inode)->i_mmap_sem);
+       if (IS_ERR(handle))
+               result = VM_FAULT_SIGBUS;
+@@ -260,30 +267,71 @@ static int ext4_dax_pmd_fault(struct vm_
+       if (write) {
+               if (!IS_ERR(handle))
+                       ext4_journal_stop(handle);
++              up_read(&EXT4_I(inode)->i_mmap_sem);
+               sb_end_pagefault(sb);
+-      }
++      } else
++              up_read(&EXT4_I(inode)->i_mmap_sem);
+       return result;
+ }
+ static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+-      return dax_mkwrite(vma, vmf, ext4_get_block_dax,
+-                              ext4_end_io_unwritten);
++      int err;
++      struct inode *inode = file_inode(vma->vm_file);
++
++      sb_start_pagefault(inode->i_sb);
++      file_update_time(vma->vm_file);
++      down_read(&EXT4_I(inode)->i_mmap_sem);
++      err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
++                          ext4_end_io_unwritten);
++      up_read(&EXT4_I(inode)->i_mmap_sem);
++      sb_end_pagefault(inode->i_sb);
++
++      return err;
++}
++
++/*
++ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
++ * handler we check for races agaist truncate. Note that since we cycle through
++ * i_mmap_sem, we are sure that also any hole punching that began before we
++ * were called is finished by now and so if it included part of the file we
++ * are working on, our pte will get unmapped and the check for pte_same() in
++ * wp_pfn_shared() fails. Thus fault gets retried and things work out as
++ * desired.
++ */
++static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
++                              struct vm_fault *vmf)
++{
++      struct inode *inode = file_inode(vma->vm_file);
++      struct super_block *sb = inode->i_sb;
++      int ret = VM_FAULT_NOPAGE;
++      loff_t size;
++
++      sb_start_pagefault(sb);
++      file_update_time(vma->vm_file);
++      down_read(&EXT4_I(inode)->i_mmap_sem);
++      size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      if (vmf->pgoff >= size)
++              ret = VM_FAULT_SIGBUS;
++      up_read(&EXT4_I(inode)->i_mmap_sem);
++      sb_end_pagefault(sb);
++
++      return ret;
+ }
+ static const struct vm_operations_struct ext4_dax_vm_ops = {
+       .fault          = ext4_dax_fault,
+       .pmd_fault      = ext4_dax_pmd_fault,
+       .page_mkwrite   = ext4_dax_mkwrite,
+-      .pfn_mkwrite    = dax_pfn_mkwrite,
++      .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
+ };
+ #else
+ #define ext4_dax_vm_ops       ext4_file_vm_ops
+ #endif
+ static const struct vm_operations_struct ext4_file_vm_ops = {
+-      .fault          = filemap_fault,
++      .fault          = ext4_filemap_fault,
+       .map_pages      = filemap_map_pages,
+       .page_mkwrite   = ext4_page_mkwrite,
+ };
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3651,6 +3651,15 @@ int ext4_punch_hole(struct inode *inode,
+       }
++      /* Wait all existing dio workers, newcomers will block on i_mutex */
++      ext4_inode_block_unlocked_dio(inode);
++      inode_dio_wait(inode);
++
++      /*
++       * Prevent page faults from reinstantiating pages we have released from
++       * page cache.
++       */
++      down_write(&EXT4_I(inode)->i_mmap_sem);
+       first_block_offset = round_up(offset, sb->s_blocksize);
+       last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+@@ -3659,10 +3668,6 @@ int ext4_punch_hole(struct inode *inode,
+               truncate_pagecache_range(inode, first_block_offset,
+                                        last_block_offset);
+-      /* Wait all existing dio workers, newcomers will block on i_mutex */
+-      ext4_inode_block_unlocked_dio(inode);
+-      inode_dio_wait(inode);
+-
+       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               credits = ext4_writepage_trans_blocks(inode);
+       else
+@@ -3708,16 +3713,12 @@ int ext4_punch_hole(struct inode *inode,
+       if (IS_SYNC(inode))
+               ext4_handle_sync(handle);
+-      /* Now release the pages again to reduce race window */
+-      if (last_block_offset > first_block_offset)
+-              truncate_pagecache_range(inode, first_block_offset,
+-                                       last_block_offset);
+-
+       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+       ext4_mark_inode_dirty(handle, inode);
+ out_stop:
+       ext4_journal_stop(handle);
+ out_dio:
++      up_write(&EXT4_I(inode)->i_mmap_sem);
+       ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+       mutex_unlock(&inode->i_mutex);
+@@ -4851,6 +4852,7 @@ int ext4_setattr(struct dentry *dentry,
+                       } else
+                               ext4_wait_for_tail_page_commit(inode);
+               }
++              down_write(&EXT4_I(inode)->i_mmap_sem);
+               /*
+                * Truncate pagecache after we've waited for commit
+                * in data=journal mode to make pages freeable.
+@@ -4858,6 +4860,7 @@ int ext4_setattr(struct dentry *dentry,
+               truncate_pagecache(inode, inode->i_size);
+               if (shrink)
+                       ext4_truncate(inode);
++              up_write(&EXT4_I(inode)->i_mmap_sem);
+       }
+       if (!rc) {
+@@ -5306,6 +5309,8 @@ int ext4_page_mkwrite(struct vm_area_str
+       sb_start_pagefault(inode->i_sb);
+       file_update_time(vma->vm_file);
++
++      down_read(&EXT4_I(inode)->i_mmap_sem);
+       /* Delalloc case is easy... */
+       if (test_opt(inode->i_sb, DELALLOC) &&
+           !ext4_should_journal_data(inode) &&
+@@ -5375,6 +5380,19 @@ retry_alloc:
+ out_ret:
+       ret = block_page_mkwrite_return(ret);
+ out:
++      up_read(&EXT4_I(inode)->i_mmap_sem);
+       sb_end_pagefault(inode->i_sb);
+       return ret;
+ }
++
++int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct inode *inode = file_inode(vma->vm_file);
++      int err;
++
++      down_read(&EXT4_I(inode)->i_mmap_sem);
++      err = filemap_fault(vma, vmf);
++      up_read(&EXT4_I(inode)->i_mmap_sem);
++
++      return err;
++}
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -958,6 +958,7 @@ static void init_once(void *foo)
+       INIT_LIST_HEAD(&ei->i_orphan);
+       init_rwsem(&ei->xattr_sem);
+       init_rwsem(&ei->i_data_sem);
++      init_rwsem(&ei->i_mmap_sem);
+       inode_init_once(&ei->vfs_inode);
+ }
+--- a/fs/ext4/truncate.h
++++ b/fs/ext4/truncate.h
+@@ -10,8 +10,10 @@
+  */
+ static inline void ext4_truncate_failed_write(struct inode *inode)
+ {
++      down_write(&EXT4_I(inode)->i_mmap_sem);
+       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       ext4_truncate(inode);
++      up_write(&EXT4_I(inode)->i_mmap_sem);
+ }
+ /*
diff --git a/queue-4.4/ext4-fix-races-of-writeback-with-punch-hole-and-zero-range.patch b/queue-4.4/ext4-fix-races-of-writeback-with-punch-hole-and-zero-range.patch
new file mode 100644 (file)
index 0000000..8326139
--- /dev/null
@@ -0,0 +1,106 @@
+From 011278485ecc3cd2a3954b5d4c73101d919bf1fa Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.com>
+Date: Mon, 7 Dec 2015 14:34:49 -0500
+Subject: ext4: fix races of writeback with punch hole and zero range
+
+From: Jan Kara <jack@suse.com>
+
+commit 011278485ecc3cd2a3954b5d4c73101d919bf1fa upstream.
+
+When doing delayed allocation, update of on-disk inode size is postponed
+until IO submission time. However hole punch or zero range fallocate
+calls can end up discarding the tail page cache page and thus on-disk
+inode size would never be properly updated.
+
+Make sure the on-disk inode size is updated before truncating page
+cache.
+
+Signed-off-by: Jan Kara <jack@suse.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4.h    |    3 +++
+ fs/ext4/extents.c |    5 +++++
+ fs/ext4/inode.c   |   35 ++++++++++++++++++++++++++++++++++-
+ 3 files changed, 42 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2881,6 +2881,9 @@ static inline int ext4_update_inode_size
+       return changed;
+ }
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++                                    loff_t len);
++
+ struct ext4_group_info {
+       unsigned long   bb_state;
+       struct rb_root  bb_free_root;
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4847,6 +4847,11 @@ static long ext4_zero_range(struct file
+                * released from page cache.
+                */
+               down_write(&EXT4_I(inode)->i_mmap_sem);
++              ret = ext4_update_disksize_before_punch(inode, offset, len);
++              if (ret) {
++                      up_write(&EXT4_I(inode)->i_mmap_sem);
++                      goto out_dio;
++              }
+               /* Now release the pages and zero block aligned part of pages */
+               truncate_pagecache_range(inode, start, end - 1);
+               inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3587,6 +3587,35 @@ int ext4_can_truncate(struct inode *inod
+ }
+ /*
++ * We have to make sure i_disksize gets properly updated before we truncate
++ * page cache due to hole punching or zero range. Otherwise i_disksize update
++ * can get lost as it may have been postponed to submission of writeback but
++ * that will never happen after we truncate page cache.
++ */
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++                                    loff_t len)
++{
++      handle_t *handle;
++      loff_t size = i_size_read(inode);
++
++      WARN_ON(!mutex_is_locked(&inode->i_mutex));
++      if (offset > size || offset + len < size)
++              return 0;
++
++      if (EXT4_I(inode)->i_disksize >= size)
++              return 0;
++
++      handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
++      if (IS_ERR(handle))
++              return PTR_ERR(handle);
++      ext4_update_i_disksize(inode, size);
++      ext4_mark_inode_dirty(handle, inode);
++      ext4_journal_stop(handle);
++
++      return 0;
++}
++
++/*
+  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+  * associated with the given offset and length
+  *
+@@ -3664,9 +3693,13 @@ int ext4_punch_hole(struct inode *inode,
+       last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+       /* Now release the pages and zero block aligned part of pages*/
+-      if (last_block_offset > first_block_offset)
++      if (last_block_offset > first_block_offset) {
++              ret = ext4_update_disksize_before_punch(inode, offset, length);
++              if (ret)
++                      goto out_dio;
+               truncate_pagecache_range(inode, first_block_offset,
+                                        last_block_offset);
++      }
+       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               credits = ext4_writepage_trans_blocks(inode);
diff --git a/queue-4.4/ext4-move-unlocked-dio-protection-from-ext4_alloc_file_blocks.patch b/queue-4.4/ext4-move-unlocked-dio-protection-from-ext4_alloc_file_blocks.patch
new file mode 100644 (file)
index 0000000..aec5532
--- /dev/null
@@ -0,0 +1,93 @@
+From 17048e8a083fec7ad841d88ef0812707fbc7e39f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.com>
+Date: Mon, 7 Dec 2015 14:29:17 -0500
+Subject: ext4: move unlocked dio protection from ext4_alloc_file_blocks()
+
+From: Jan Kara <jack@suse.com>
+
+commit 17048e8a083fec7ad841d88ef0812707fbc7e39f upstream.
+
+Currently ext4_alloc_file_blocks() was handling protection against
+unlocked DIO. However we now need to sometimes call it under i_mmap_sem
+and sometimes not and DIO protection ranks above it (although strictly
+speaking this cannot currently create any deadlocks). Also
+ext4_zero_range() was actually getting & releasing unlocked DIO
+protection twice in some cases. Luckily it didn't introduce any real bug
+but it was a land mine waiting to be stepped on.  So move DIO protection
+out from ext4_alloc_file_blocks() into the two callsites.
+
+Signed-off-by: Jan Kara <jack@suse.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c |   21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4685,10 +4685,6 @@ static int ext4_alloc_file_blocks(struct
+       if (len <= EXT_UNWRITTEN_MAX_LEN)
+               flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+-      /* Wait all existing dio workers, newcomers will block on i_mutex */
+-      ext4_inode_block_unlocked_dio(inode);
+-      inode_dio_wait(inode);
+-
+       /*
+        * credits to insert 1 extent into extent tree
+        */
+@@ -4752,8 +4748,6 @@ retry:
+               goto retry;
+       }
+-      ext4_inode_resume_unlocked_dio(inode);
+-
+       return ret > 0 ? ret2 : ret;
+ }
+@@ -4827,6 +4821,10 @@ static long ext4_zero_range(struct file
+       if (mode & FALLOC_FL_KEEP_SIZE)
+               flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
++      /* Wait all existing dio workers, newcomers will block on i_mutex */
++      ext4_inode_block_unlocked_dio(inode);
++      inode_dio_wait(inode);
++
+       /* Preallocate the range including the unaligned edges */
+       if (partial_begin || partial_end) {
+               ret = ext4_alloc_file_blocks(file,
+@@ -4835,7 +4833,7 @@ static long ext4_zero_range(struct file
+                                round_down(offset, 1 << blkbits)) >> blkbits,
+                               new_size, flags, mode);
+               if (ret)
+-                      goto out_mutex;
++                      goto out_dio;
+       }
+@@ -4844,10 +4842,6 @@ static long ext4_zero_range(struct file
+               flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+                         EXT4_EX_NOCACHE);
+-              /* Wait all existing dio workers, newcomers will block on i_mutex */
+-              ext4_inode_block_unlocked_dio(inode);
+-              inode_dio_wait(inode);
+-
+               /*
+                * Prevent page faults from reinstantiating pages we have
+                * released from page cache.
+@@ -4992,8 +4986,13 @@ long ext4_fallocate(struct file *file, i
+                       goto out;
+       }
++      /* Wait all existing dio workers, newcomers will block on i_mutex */
++      ext4_inode_block_unlocked_dio(inode);
++      inode_dio_wait(inode);
++
+       ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+                                    flags, mode);
++      ext4_inode_resume_unlocked_dio(inode);
+       if (ret)
+               goto out;
diff --git a/queue-4.4/mtd-brcmnand-fix-v7.1-register-offsets.patch b/queue-4.4/mtd-brcmnand-fix-v7.1-register-offsets.patch
new file mode 100644 (file)
index 0000000..b4d6aea
--- /dev/null
@@ -0,0 +1,73 @@
+From d267aefc54a28efc5bda7f009598dc83b5f98734 Mon Sep 17 00:00:00 2001
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Wed, 24 Feb 2016 16:07:23 -0800
+Subject: mtd: brcmnand: Fix v7.1 register offsets
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+commit d267aefc54a28efc5bda7f009598dc83b5f98734 upstream.
+
+The BRCMNAND controller revision 7.1 is almost 100% compatible with the
+previous v6.0 register offset layout, except for the Correctable Error
+Reporting Threshold registers. Fix this by adding another table with the
+correct offsets for CORR_THRESHOLD and CORR_THRESHOLD_EXT.
+
+Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/brcmnand/brcmnand.c |   34 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 33 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
+@@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
+       [BRCMNAND_FC_BASE]              = 0x400,
+ };
++/* BRCMNAND v7.1 */
++static const u16 brcmnand_regs_v71[] = {
++      [BRCMNAND_CMD_START]            =  0x04,
++      [BRCMNAND_CMD_EXT_ADDRESS]      =  0x08,
++      [BRCMNAND_CMD_ADDRESS]          =  0x0c,
++      [BRCMNAND_INTFC_STATUS]         =  0x14,
++      [BRCMNAND_CS_SELECT]            =  0x18,
++      [BRCMNAND_CS_XOR]               =  0x1c,
++      [BRCMNAND_LL_OP]                =  0x20,
++      [BRCMNAND_CS0_BASE]             =  0x50,
++      [BRCMNAND_CS1_BASE]             =     0,
++      [BRCMNAND_CORR_THRESHOLD]       =  0xdc,
++      [BRCMNAND_CORR_THRESHOLD_EXT]   =  0xe0,
++      [BRCMNAND_UNCORR_COUNT]         =  0xfc,
++      [BRCMNAND_CORR_COUNT]           = 0x100,
++      [BRCMNAND_CORR_EXT_ADDR]        = 0x10c,
++      [BRCMNAND_CORR_ADDR]            = 0x110,
++      [BRCMNAND_UNCORR_EXT_ADDR]      = 0x114,
++      [BRCMNAND_UNCORR_ADDR]          = 0x118,
++      [BRCMNAND_SEMAPHORE]            = 0x150,
++      [BRCMNAND_ID]                   = 0x194,
++      [BRCMNAND_ID_EXT]               = 0x198,
++      [BRCMNAND_LL_RDATA]             = 0x19c,
++      [BRCMNAND_OOB_READ_BASE]        = 0x200,
++      [BRCMNAND_OOB_READ_10_BASE]     =     0,
++      [BRCMNAND_OOB_WRITE_BASE]       = 0x280,
++      [BRCMNAND_OOB_WRITE_10_BASE]    =     0,
++      [BRCMNAND_FC_BASE]              = 0x400,
++};
++
+ enum brcmnand_cs_reg {
+       BRCMNAND_CS_CFG_EXT = 0,
+       BRCMNAND_CS_CFG,
+@@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct
+       }
+       /* Register offsets */
+-      if (ctrl->nand_version >= 0x0600)
++      if (ctrl->nand_version >= 0x0701)
++              ctrl->reg_offsets = brcmnand_regs_v71;
++      else if (ctrl->nand_version >= 0x0600)
+               ctrl->reg_offsets = brcmnand_regs_v60;
+       else if (ctrl->nand_version >= 0x0500)
+               ctrl->reg_offsets = brcmnand_regs_v50;
diff --git a/queue-4.4/mtd-nand-drop-mtd.owner-requirement-in-nand_scan.patch b/queue-4.4/mtd-nand-drop-mtd.owner-requirement-in-nand_scan.patch
new file mode 100644 (file)
index 0000000..20a7034
--- /dev/null
@@ -0,0 +1,61 @@
+From 20c07a5bf094198ff2382aa5e7c930b3c9807792 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ezequiel=20Garc=C3=ADa?= <ezequiel@vanguardiasur.com.ar>
+Date: Fri, 1 Apr 2016 18:29:23 -0300
+Subject: mtd: nand: Drop mtd.owner requirement in nand_scan
+
+From: Ezequiel García <ezequiel@vanguardiasur.com.ar>
+
+commit 20c07a5bf094198ff2382aa5e7c930b3c9807792 upstream.
+
+Since commit 807f16d4db95 ("mtd: core: set some defaults
+when dev.parent is set"), it's now legal for drivers
+to call nand_scan and nand_scan_ident without setting
+mtd.owner.
+
+Drop the check and while at it remove the BUG() abuse.
+
+Fixes: 807f16d4db95 ("mtd: core: set some defaults when dev.parent is set")
+Signed-off-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+[Brian: editorial note - while commit 807f16d4db95 wasn't explicitly
+    broken, some follow-up commits in the v4.4 release broke a few
+    drivers, since they would hit this BUG() if they used nand_scan()
+    and were built as modules]
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/nand_base.c |   10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info
+  * This is the first phase of the normal nand_scan() function. It reads the
+  * flash ID and sets up MTD fields accordingly.
+  *
+- * The mtd->owner field must be set to the module of the caller.
+  */
+ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+                   struct nand_flash_dev *table)
+@@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
+  *
+  * This fills out all the uninitialized function pointers with the defaults.
+  * The flash ID is read and the mtd/chip structures are filled with the
+- * appropriate values. The mtd->owner field must be set to the module of the
+- * caller.
++ * appropriate values.
+  */
+ int nand_scan(struct mtd_info *mtd, int maxchips)
+ {
+       int ret;
+-      /* Many callers got this wrong, so check for it for a while... */
+-      if (!mtd->owner && caller_is_module()) {
+-              pr_crit("%s called with NULL mtd->owner!\n", __func__);
+-              BUG();
+-      }
+-
+       ret = nand_scan_ident(mtd, maxchips, NULL);
+       if (!ret)
+               ret = nand_scan_tail(mtd);
diff --git a/queue-4.4/mtd-spi-nor-remove-micron_quad_enable.patch b/queue-4.4/mtd-spi-nor-remove-micron_quad_enable.patch
new file mode 100644 (file)
index 0000000..8348a95
--- /dev/null
@@ -0,0 +1,112 @@
+From 3b5394a3ccffbfa1d1d448d48742853a862822c4 Mon Sep 17 00:00:00 2001
+From: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+Date: Wed, 3 Feb 2016 14:26:46 +0100
+Subject: mtd: spi-nor: remove micron_quad_enable()
+
+From: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+
+commit 3b5394a3ccffbfa1d1d448d48742853a862822c4 upstream.
+
+This patch remove the micron_quad_enable() function which force the Quad
+SPI mode. However, once this mode is enabled, the Micron memory expect ALL
+commands to use the SPI 4-4-4 protocol. Hence a failure does occur when
+calling spi_nor_wait_till_ready() right after the update of the Enhanced
+Volatile Configuration Register (EVCR) in the micron_quad_enable() as
+the SPI controller driver is not aware about the protocol change.
+
+Since there is almost no performance increase using Fast Read 4-4-4
+commands instead of Fast Read 1-1-4 commands, we rather keep on using the
+Extended SPI mode than enabling the Quad SPI mode.
+
+Let's take the example of the pretty standard use of 8 dummy cycles during
+Fast Read operations on 64KB erase sectors:
+
+Fast Read 1-1-4 requires 8 cycles for the command, then 24 cycles for the
+3byte address followed by 8 dummy clock cycles and finally 65536*2 cycles
+for the read data; so 131112 clock cycles.
+
+On the other hand the Fast Read 4-4-4 would require 2 cycles for the
+command, then 6 cycles for the 3byte address followed by 8 dummy clock
+cycles and finally 65536*2 cycles for the read data. So 131088 clock
+cycles. The theorical bandwidth increase is 0.0%.
+
+Now using Fast Read operations on 512byte pages:
+Fast Read 1-1-4 needs 8+24+8+(512*2) = 1064 clock cycles whereas Fast
+Read 4-4-4 would requires 2+6+8+(512*2) = 1040 clock cycles. Hence the
+theorical bandwidth increase is 2.3%.
+Consecutive reads for non sequential pages is not a relevant use case so
+The Quad SPI mode is not worth it.
+
+mtd_speedtest seems to confirm these figures.
+
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+Fixes: 548cd3ab54da ("mtd: spi-nor: Add quad I/O support for Micron SPI NOR")
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/spi-nor/spi-nor.c |   46 ------------------------------------------
+ 1 file changed, 1 insertion(+), 45 deletions(-)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct s
+       return 0;
+ }
+-static int micron_quad_enable(struct spi_nor *nor)
+-{
+-      int ret;
+-      u8 val;
+-
+-      ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-      if (ret < 0) {
+-              dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-              return ret;
+-      }
+-
+-      write_enable(nor);
+-
+-      /* set EVCR, enable quad I/O */
+-      nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+-      ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
+-      if (ret < 0) {
+-              dev_err(nor->dev, "error while writing EVCR register\n");
+-              return ret;
+-      }
+-
+-      ret = spi_nor_wait_till_ready(nor);
+-      if (ret)
+-              return ret;
+-
+-      /* read EVCR and check it */
+-      ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-      if (ret < 0) {
+-              dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-              return ret;
+-      }
+-      if (val & EVCR_QUAD_EN_MICRON) {
+-              dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+-              return -EINVAL;
+-      }
+-
+-      return 0;
+-}
+-
+ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ {
+       int status;
+@@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor
+               }
+               return status;
+       case SNOR_MFR_MICRON:
+-              status = micron_quad_enable(nor);
+-              if (status) {
+-                      dev_err(nor->dev, "Micron quad-read not enabled\n");
+-                      return -EINVAL;
+-              }
+-              return status;
++              return 0;
+       default:
+               status = spansion_quad_enable(nor);
+               if (status) {
diff --git a/queue-4.4/perf-hists-browser-only-offer-symbol-scripting-when-a-symbol-is-under-the-cursor.patch b/queue-4.4/perf-hists-browser-only-offer-symbol-scripting-when-a-symbol-is-under-the-cursor.patch
new file mode 100644 (file)
index 0000000..b7e9ed8
--- /dev/null
@@ -0,0 +1,50 @@
+From c221acb0f970d3b80d72c812cda19c121acf5d52 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Thu, 21 Jan 2016 19:50:09 -0300
+Subject: perf hists browser: Only offer symbol scripting when a symbol is under the cursor
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit c221acb0f970d3b80d72c812cda19c121acf5d52 upstream.
+
+When this feature was introduced a check was made if there was a
+resolved symbol under the cursor, it got lost in commit ea7cd5923309
+("perf hists browser: Split popup menu actions - part 2"), reinstate it.
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Jiri Olsa <jolsa@kernel.org>,
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Wang Nan <wangnan0@huawei.com>
+Fixes: ea7cd5923309 ("perf hists browser: Split popup menu actions - part 2")
+Link: http://lkml.kernel.org/r/1452960197-5323-9-git-send-email-namhyung@kernel.org
+[ Carved out from a  larger patch ]
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/ui/browsers/hists.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2059,10 +2059,12 @@ skip_annotation:
+                        *
+                        * See hist_browser__show_entry.
+                        */
+-                      nr_options += add_script_opt(browser,
+-                                                   &actions[nr_options],
+-                                                   &options[nr_options],
+-                                                   NULL, browser->selection->sym);
++                      if (sort__has_sym && browser->selection->sym) {
++                              nr_options += add_script_opt(browser,
++                                                           &actions[nr_options],
++                                                           &options[nr_options],
++                                                           NULL, browser->selection->sym);
++                      }
+               }
+               nr_options += add_script_opt(browser, &actions[nr_options],
+                                            &options[nr_options], NULL, NULL);
diff --git a/queue-4.4/perf-stat-document-detailed-option.patch b/queue-4.4/perf-stat-document-detailed-option.patch
new file mode 100644 (file)
index 0000000..81f53ac
--- /dev/null
@@ -0,0 +1,57 @@
+From f594bae08183fb6b57db55387794ece3e1edf6f6 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 7 Mar 2016 16:44:44 -0300
+Subject: perf stat: Document --detailed option
+
+From: Borislav Petkov <bp@suse.de>
+
+commit f594bae08183fb6b57db55387794ece3e1edf6f6 upstream.
+
+I'm surprised this remained undocumented since at least 2011. And it is
+actually a very useful switch, as Steve and I came to realize recently.
+
+Add the text from
+
+  2cba3ffb9a9d ("perf stat: Add -d -d and -d -d -d options to show more CPU events")
+
+which added the incrementing aspect to -d.
+
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Davidlohr Bueso <dbueso@suse.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mel Gorman <mgorman@suse.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 2cba3ffb9a9d ("perf stat: Add -d -d and -d -d -d options to show more CPU events")
+Link: http://lkml.kernel.org/r/1457347294-32546-1-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/Documentation/perf-stat.txt |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -62,6 +62,14 @@ OPTIONS
+ --scale::
+       scale/normalize counter values
++-d::
++--detailed::
++      print more detailed statistics, can be specified up to 3 times
++
++         -d:          detailed events, L1 and LLC data cache
++        -d -d:     more detailed events, dTLB and iTLB events
++     -d -d -d:     very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+       repeat command and print average + stddev (max: 100). 0 means forever.
diff --git a/queue-4.4/perf-tools-handle-spaces-in-file-names-obtained-from-proc-pid-maps.patch b/queue-4.4/perf-tools-handle-spaces-in-file-names-obtained-from-proc-pid-maps.patch
new file mode 100644 (file)
index 0000000..ef41953
--- /dev/null
@@ -0,0 +1,38 @@
+From 89fee59b504f86925894fcc9ba79d5c933842f93 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marcin=20=C5=9Alusarz?= <marcin.slusarz@gmail.com>
+Date: Tue, 19 Jan 2016 20:03:03 +0100
+Subject: perf tools: handle spaces in file names obtained from /proc/pid/maps
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marcin Ślusarz <marcin.slusarz@gmail.com>
+
+commit 89fee59b504f86925894fcc9ba79d5c933842f93 upstream.
+
+Steam frequently puts game binaries in folders with spaces.
+
+Note: "(deleted)" markers are now treated as part of the file name.
+
+Signed-off-by: Marcin Ślusarz <marcin.slusarz@gmail.com>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Fixes: 6064803313ba ("perf tools: Use sscanf for parsing /proc/pid/maps")
+Link: http://lkml.kernel.org/r/20160119190303.GA17579@marcin-Inspiron-7720
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/event.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(s
+               strcpy(execname, "");
+               /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
+-              n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
++              n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+                      &event->mmap2.start, &event->mmap2.len, prot,
+                      &event->mmap2.pgoff, &event->mmap2.maj,
+                      &event->mmap2.min,
index 14cb561b5f2ee4f3b0b2a1d5c8e1337fac2fecb5..ca46827d2f107043ddda65cec5a6cf49257f8c69 100644 (file)
@@ -137,3 +137,13 @@ spi-rockchip-modify-dma-max-burst-to-1.patch
 x86-mm-kmmio-fix-mmiotrace-for-hugepages.patch
 ext4-fix-null-pointer-dereference-in-ext4_mark_inode_dirty.patch
 serial-sh-sci-remove-cpufreq-notifier-to-fix-crash-deadlock.patch
+mtd-spi-nor-remove-micron_quad_enable.patch
+mtd-brcmnand-fix-v7.1-register-offsets.patch
+mtd-nand-drop-mtd.owner-requirement-in-nand_scan.patch
+perf-hists-browser-only-offer-symbol-scripting-when-a-symbol-is-under-the-cursor.patch
+perf-tools-handle-spaces-in-file-names-obtained-from-proc-pid-maps.patch
+perf-stat-document-detailed-option.patch
+ext4-fix-races-between-page-faults-and-hole-punching.patch
+ext4-move-unlocked-dio-protection-from-ext4_alloc_file_blocks.patch
+ext4-fix-races-between-buffered-io-and-collapse-insert-range.patch
+ext4-fix-races-of-writeback-with-punch-hole-and-zero-range.patch