From: Greg Kroah-Hartman Date: Mon, 12 Jun 2017 11:38:20 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v3.18.57~17 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=921148ed9960fa025b92e677565593d816dfe667;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: fs-add-i_blocksize.patch --- diff --git a/queue-4.9/fs-add-i_blocksize.patch b/queue-4.9/fs-add-i_blocksize.patch new file mode 100644 index 00000000000..fe85c144c9c --- /dev/null +++ b/queue-4.9/fs-add-i_blocksize.patch @@ -0,0 +1,545 @@ +From 93407472a21b82f39c955ea7787e5bc7da100642 Mon Sep 17 00:00:00 2001 +From: Fabian Frederick +Date: Mon, 27 Feb 2017 14:28:32 -0800 +Subject: fs: add i_blocksize() + +From: Fabian Frederick + +commit 93407472a21b82f39c955ea7787e5bc7da100642 upstream. + +Replace all 1 << inode->i_blkbits and (1 << inode->i_blkbits) in fs +branch. + +This patch also fixes multiple checkpatch warnings: WARNING: Prefer +'unsigned int' to bare use of 'unsigned' + +Thanks to Andrew Morton for suggesting more appropriate function instead +of macro. + +[geliangtang@gmail.com: truncate: use i_blocksize()] + Link: http://lkml.kernel.org/r/9c8b2cd83c8f5653805d43debde9fa8817e02fc4.1484895804.git.geliangtang@gmail.com +Link: http://lkml.kernel.org/r/1481319905-10126-1-git-send-email-fabf@skynet.be +Signed-off-by: Fabian Frederick +Signed-off-by: Geliang Tang +Cc: Alexander Viro +Cc: Ross Zwisler +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + fs/block_dev.c | 2 +- + fs/btrfs/file.c | 2 +- + fs/buffer.c | 12 ++++++------ + fs/ceph/addr.c | 2 +- + fs/direct-io.c | 2 +- + fs/ext4/inode.c | 8 ++++---- + fs/ext4/mballoc.c | 2 +- + fs/ext4/move_extent.c | 2 +- + fs/iomap.c | 4 ++-- + fs/jfs/super.c | 4 ++-- + fs/mpage.c | 2 +- + fs/nfsd/blocklayout.c | 6 +++--- + fs/nilfs2/btnode.c | 2 +- + fs/nilfs2/inode.c | 4 ++-- + fs/nilfs2/mdt.c | 4 ++-- + fs/nilfs2/segment.c | 2 +- + fs/ocfs2/aops.c | 2 +- + fs/ocfs2/file.c | 2 +- + fs/orangefs/orangefs-utils.c | 4 ++-- + fs/reiserfs/file.c | 2 +- + fs/reiserfs/inode.c | 2 +- + fs/stat.c | 2 +- + fs/udf/inode.c | 2 +- + fs/xfs/xfs_aops.c | 16 ++++++++-------- + fs/xfs/xfs_file.c | 4 ++-- + include/linux/fs.h | 5 +++++ + mm/truncate.c | 2 +- + 27 files changed, 54 insertions(+), 49 deletions(-) + +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -713,7 +713,7 @@ struct block_device *bdget(dev_t dev) + bdev->bd_contains = NULL; + bdev->bd_super = NULL; + bdev->bd_inode = inode; +- bdev->bd_block_size = (1 << inode->i_blkbits); ++ bdev->bd_block_size = i_blocksize(inode); + bdev->bd_part_count = 0; + bdev->bd_invalidated = 0; + inode->i_mode = S_IFBLK; +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2842,7 +2842,7 @@ static long btrfs_fallocate(struct file + if (!ret) + ret = btrfs_prealloc_file_range(inode, mode, + range->start, +- range->len, 1 << inode->i_blkbits, ++ range->len, i_blocksize(inode), + offset + len, &alloc_hint); + else + btrfs_free_reserved_data_space(inode, range->start, +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2353,7 +2353,7 @@ static int cont_expand_zero(struct file + loff_t pos, loff_t *bytes) + { + struct inode *inode = mapping->host; +- unsigned blocksize = 1 << inode->i_blkbits; ++ unsigned int blocksize = i_blocksize(inode); + struct page *page; + void *fsdata; + pgoff_t index, curidx; +@@ -2433,8 +2433,8 @@ int cont_write_begin(struct file *file, + get_block_t *get_block, loff_t *bytes) + { + struct inode *inode = mapping->host; +- unsigned blocksize = 1 << inode->i_blkbits; +- unsigned zerofrom; ++ unsigned int blocksize = i_blocksize(inode); ++ unsigned int zerofrom; + int err; + + err = cont_expand_zero(file, mapping, pos, bytes); +@@ -2796,7 +2796,7 @@ int nobh_truncate_page(struct address_sp + struct buffer_head map_bh; + int err; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + length = offset & (blocksize - 1); + + /* Block boundary? Nothing to do */ +@@ -2874,7 +2874,7 @@ int block_truncate_page(struct address_s + struct buffer_head *bh; + int err; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + length = offset & (blocksize - 1); + + /* Block boundary? Nothing to do */ +@@ -2986,7 +2986,7 @@ sector_t generic_block_bmap(struct addre + struct inode *inode = mapping->host; + tmp.b_state = 0; + tmp.b_blocknr = 0; +- tmp.b_size = 1 << inode->i_blkbits; ++ tmp.b_size = i_blocksize(inode); + get_block(inode, block, &tmp, 0); + return tmp.b_blocknr; + } +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -745,7 +745,7 @@ static int ceph_writepages_start(struct + struct pagevec pvec; + int done = 0; + int rc = 0; +- unsigned wsize = 1 << inode->i_blkbits; ++ unsigned int wsize = i_blocksize(inode); + struct ceph_osd_request *req = NULL; + int do_sync = 0; + loff_t snap_size, i_size; +--- a/fs/direct-io.c ++++ b/fs/direct-io.c +@@ -587,7 +587,7 @@ static int dio_set_defer_completion(stru + /* + * Call into the fs to map some more disk blocks. We record the current number + * of available blocks at sdio->blocks_available. These are in units of the +- * fs blocksize, (1 << inode->i_blkbits). ++ * fs blocksize, i_blocksize(inode). + * + * The fs is allowed to map lots of blocks at once. If it wants to do that, + * it uses the passed inode-relative block number as the file offset, as usual. +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -2205,7 +2205,7 @@ static int mpage_process_page_bufs(struc + { + struct inode *inode = mpd->inode; + int err; +- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) ++ ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) + >> inode->i_blkbits; + + do { +@@ -3454,14 +3454,14 @@ static ssize_t ext4_direct_IO_write(stru + * writes need zeroing either because they can race with page + * faults or because they use partial blocks. + */ +- if (round_down(offset, 1<i_blkbits) >= inode->i_size && ++ if (round_down(offset, i_blocksize(inode)) >= inode->i_size && + ext4_aligned_io(inode, offset, count)) + get_block_func = ext4_dio_get_block; + else + get_block_func = ext4_dax_get_block; + dio_flags = DIO_LOCKING; + } else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || +- round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) { ++ round_down(offset, i_blocksize(inode)) >= inode->i_size) { + get_block_func = ext4_dio_get_block; + dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; + } else if (is_sync_kiocb(iocb)) { +@@ -5048,7 +5048,7 @@ static void ext4_wait_for_tail_page_comm + * do. We do the check mainly to optimize the common PAGE_SIZE == + * blocksize case + */ +- if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) ++ if (offset > PAGE_SIZE - i_blocksize(inode)) + return; + while (1) { + page = find_lock_page(inode->i_mapping, +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -838,7 +838,7 @@ static int ext4_mb_init_cache(struct pag + inode = page->mapping->host; + sb = inode->i_sb; + ngroups = ext4_get_groups_count(sb); +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + blocks_per_page = PAGE_SIZE / blocksize; + + groups_per_page = blocks_per_page >> 1; +--- a/fs/ext4/move_extent.c ++++ b/fs/ext4/move_extent.c +@@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, + if (PageUptodate(page)) + return 0; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + if (!page_has_buffers(page)) + create_empty_buffers(page, blocksize, 0); + +--- a/fs/iomap.c ++++ b/fs/iomap.c +@@ -419,8 +419,8 @@ int + iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, + struct iomap_ops *ops) + { +- unsigned blocksize = (1 << inode->i_blkbits); +- unsigned off = pos & (blocksize - 1); ++ unsigned int blocksize = i_blocksize(inode); ++ unsigned int off = pos & (blocksize - 1); + + /* Block boundary? Nothing to do */ + if (!off) +--- a/fs/jfs/super.c ++++ b/fs/jfs/super.c +@@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct sup + sb->s_blocksize - offset : toread; + + tmp_bh.b_state = 0; +- tmp_bh.b_size = 1 << inode->i_blkbits; ++ tmp_bh.b_size = i_blocksize(inode); + err = jfs_get_block(inode, blk, &tmp_bh, 0); + if (err) + return err; +@@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct su + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; +- tmp_bh.b_size = 1 << inode->i_blkbits; ++ tmp_bh.b_size = i_blocksize(inode); + err = jfs_get_block(inode, blk, &tmp_bh, 1); + if (err) + goto out; +--- a/fs/mpage.c ++++ b/fs/mpage.c +@@ -115,7 +115,7 @@ map_buffer_to_page(struct page *page, st + SetPageUptodate(page); + return; + } +- create_empty_buffers(page, 1 << inode->i_blkbits, 0); ++ create_empty_buffers(page, i_blocksize(inode), 0); + } + head = page_buffers(page); + page_bh = head; +--- a/fs/nfsd/blocklayout.c ++++ b/fs/nfsd/blocklayout.c +@@ -23,7 +23,7 @@ nfsd4_block_proc_layoutget(struct inode + { + struct nfsd4_layout_seg *seg = &args->lg_seg; + struct super_block *sb = inode->i_sb; +- u32 block_size = (1 << inode->i_blkbits); ++ u32 block_size = i_blocksize(inode); + struct pnfs_block_extent *bex; + struct iomap iomap; + u32 device_generation = 0; +@@ -180,7 +180,7 @@ nfsd4_block_proc_layoutcommit(struct ino + int nr_iomaps; + + nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, +- lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); ++ lcp->lc_up_len, &iomaps, i_blocksize(inode)); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + +@@ -372,7 +372,7 @@ nfsd4_scsi_proc_layoutcommit(struct inod + int nr_iomaps; + + nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, +- lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); ++ lcp->lc_up_len, &iomaps, i_blocksize(inode)); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + +--- a/fs/nilfs2/btnode.c ++++ b/fs/nilfs2/btnode.c +@@ -50,7 +50,7 @@ nilfs_btnode_create_block(struct address + brelse(bh); + BUG(); + } +- memset(bh->b_data, 0, 1 << inode->i_blkbits); ++ memset(bh->b_data, 0, i_blocksize(inode)); + bh->b_bdev = inode->i_sb->s_bdev; + bh->b_blocknr = blocknr; + set_buffer_mapped(bh); +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -51,7 +51,7 @@ void nilfs_inode_add_blocks(struct inode + { + struct nilfs_root *root = NILFS_I(inode)->i_root; + +- inode_add_bytes(inode, (1 << inode->i_blkbits) * n); ++ inode_add_bytes(inode, i_blocksize(inode) * n); + if (root) + atomic64_add(n, &root->blocks_count); + } +@@ -60,7 +60,7 @@ void nilfs_inode_sub_blocks(struct inode + { + struct nilfs_root *root = NILFS_I(inode)->i_root; + +- inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); ++ inode_sub_bytes(inode, i_blocksize(inode) * n); + if (root) + atomic64_sub(n, &root->blocks_count); + } +--- a/fs/nilfs2/mdt.c ++++ b/fs/nilfs2/mdt.c +@@ -57,7 +57,7 @@ nilfs_mdt_insert_new_block(struct inode + set_buffer_mapped(bh); + + kaddr = kmap_atomic(bh->b_page); +- memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); ++ memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); + if (init_block) + init_block(inode, bh, kaddr); + flush_dcache_page(bh->b_page); +@@ -501,7 +501,7 @@ void nilfs_mdt_set_entry_size(struct ino + struct nilfs_mdt_info *mi = NILFS_MDT(inode); + + mi->mi_entry_size = entry_size; +- mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size; ++ mi->mi_entries_per_block = i_blocksize(inode) / entry_size; + mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); + } + +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -723,7 +723,7 @@ static size_t nilfs_lookup_dirty_data_bu + + lock_page(page); + if (!page_has_buffers(page)) +- create_empty_buffers(page, 1 << inode->i_blkbits, 0); ++ create_empty_buffers(page, i_blocksize(inode), 0); + unlock_page(page); + + bh = head = page_buffers(page); +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -599,7 +599,7 @@ int ocfs2_map_page_blocks(struct page *p + int ret = 0; + struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; + unsigned int block_end, block_start; +- unsigned int bsize = 1 << inode->i_blkbits; ++ unsigned int bsize = i_blocksize(inode); + + if (!page_has_buffers(page)) + create_empty_buffers(page, bsize, 0); +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct + /* We know that zero_from is block aligned */ + for (block_start = zero_from; block_start < zero_to; + block_start = block_end) { +- block_end = block_start + (1 << inode->i_blkbits); ++ block_end = block_start + i_blocksize(inode); + + /* + * block_start is block-aligned. Bump it by one to force +--- a/fs/orangefs/orangefs-utils.c ++++ b/fs/orangefs/orangefs-utils.c +@@ -306,7 +306,7 @@ int orangefs_inode_getattr(struct inode + break; + case S_IFDIR: + inode->i_size = PAGE_SIZE; +- orangefs_inode->blksize = (1 << inode->i_blkbits); ++ orangefs_inode->blksize = i_blocksize(inode); + spin_lock(&inode->i_lock); + inode_set_bytes(inode, inode->i_size); + spin_unlock(&inode->i_lock); +@@ -316,7 +316,7 @@ int orangefs_inode_getattr(struct inode + if (new) { + inode->i_size = (loff_t)strlen(new_op-> + downcall.resp.getattr.link_target); +- orangefs_inode->blksize = (1 << inode->i_blkbits); ++ orangefs_inode->blksize = i_blocksize(inode); + ret = strscpy(orangefs_inode->link_target, + new_op->downcall.resp.getattr.link_target, + ORANGEFS_NAME_MAX); +--- a/fs/reiserfs/file.c ++++ b/fs/reiserfs/file.c +@@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *i + int ret = 0; + + th.t_trans_id = 0; +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + + if (logit) { + reiserfs_write_lock(s); +--- a/fs/reiserfs/inode.c ++++ b/fs/reiserfs/inode.c +@@ -524,7 +524,7 @@ static int reiserfs_get_blocks_direct_io + * referenced in convert_tail_for_hole() that may be called from + * reiserfs_get_block() + */ +- bh_result->b_size = (1 << inode->i_blkbits); ++ bh_result->b_size = i_blocksize(inode); + + ret = reiserfs_get_block(inode, iblock, bh_result, + create | GET_BLOCK_NO_DANGLE); +--- a/fs/stat.c ++++ b/fs/stat.c +@@ -31,7 +31,7 @@ void generic_fillattr(struct inode *inod + stat->atime = inode->i_atime; + stat->mtime = inode->i_mtime; + stat->ctime = inode->i_ctime; +- stat->blksize = (1 << inode->i_blkbits); ++ stat->blksize = i_blocksize(inode); + stat->blocks = inode->i_blocks; + } + +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -1214,7 +1214,7 @@ int udf_setsize(struct inode *inode, lof + { + int err; + struct udf_inode_info *iinfo; +- int bsize = 1 << inode->i_blkbits; ++ int bsize = i_blocksize(inode); + + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) +--- a/fs/xfs/xfs_aops.c ++++ b/fs/xfs/xfs_aops.c +@@ -108,9 +108,9 @@ xfs_finish_page_writeback( + unsigned int bsize; + + ASSERT(bvec->bv_offset < PAGE_SIZE); +- ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0); ++ ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0); + ASSERT(end < PAGE_SIZE); +- ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0); ++ ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); + + bh = head = page_buffers(bvec->bv_page); + +@@ -349,7 +349,7 @@ xfs_map_blocks( + { + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; +- ssize_t count = 1 << inode->i_blkbits; ++ ssize_t count = i_blocksize(inode); + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int bmapi_flags = XFS_BMAPI_ENTIRE; +@@ -759,7 +759,7 @@ xfs_aops_discard_page( + break; + } + next_buffer: +- offset += 1 << inode->i_blkbits; ++ offset += i_blocksize(inode); + + } while ((bh = bh->b_this_page) != head); + +@@ -847,7 +847,7 @@ xfs_writepage_map( + LIST_HEAD(submit_list); + struct xfs_ioend *ioend, *next; + struct buffer_head *bh, *head; +- ssize_t len = 1 << inode->i_blkbits; ++ ssize_t len = i_blocksize(inode); + int error = 0; + int count = 0; + int uptodate = 1; +@@ -1250,7 +1250,7 @@ xfs_map_trim_size( + offset + mapping_size >= i_size_read(inode)) { + /* limit mapping to block that spans EOF */ + mapping_size = roundup_64(i_size_read(inode) - offset, +- 1 << inode->i_blkbits); ++ i_blocksize(inode)); + } + if (mapping_size > LONG_MAX) + mapping_size = LONG_MAX; +@@ -1286,7 +1286,7 @@ __xfs_get_blocks( + return -EIO; + + offset = (xfs_off_t)iblock << inode->i_blkbits; +- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); ++ ASSERT(bh_result->b_size >= i_blocksize(inode)); + size = bh_result->b_size; + + if (!create && offset >= i_size_read(inode)) +@@ -1634,7 +1634,7 @@ xfs_vm_set_page_dirty( + if (offset < end_offset) + set_buffer_dirty(bh); + bh = bh->b_this_page; +- offset += 1 << inode->i_blkbits; ++ offset += i_blocksize(inode); + } while (bh != head); + } + /* +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -823,7 +823,7 @@ xfs_file_fallocate( + if (error) + goto out_unlock; + } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { +- unsigned blksize_mask = (1 << inode->i_blkbits) - 1; ++ unsigned int blksize_mask = i_blocksize(inode) - 1; + + if (offset & blksize_mask || len & blksize_mask) { + error = -EINVAL; +@@ -845,7 +845,7 @@ xfs_file_fallocate( + if (error) + goto out_unlock; + } else if (mode & FALLOC_FL_INSERT_RANGE) { +- unsigned blksize_mask = (1 << inode->i_blkbits) - 1; ++ unsigned int blksize_mask = i_blocksize(inode) - 1; + + new_size = i_size_read(inode) + len; + if (offset & blksize_mask || len & blksize_mask) { +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -705,6 +705,11 @@ struct inode { + void *i_private; /* fs or device private pointer */ + }; + ++static inline unsigned int i_blocksize(const struct inode *node) ++{ ++ return (1 << node->i_blkbits); ++} ++ + static inline int inode_unhashed(struct inode *inode) + { + return hlist_unhashed(&inode->i_hash); +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -753,7 +753,7 @@ EXPORT_SYMBOL(truncate_setsize); + */ + void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) + { +- int bsize = 1 << inode->i_blkbits; ++ int bsize = i_blocksize(inode); + loff_t rounded_from; + struct page *page; + pgoff_t index; diff --git a/queue-4.9/series b/queue-4.9/series index edda98703a8..e4cac76125e 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -73,6 +73,7 @@ ahci-acer-sa5-271-ssd-not-detected-fix.patch cgroup-prevent-kill_css-from-being-called-more-than-once.patch input-elantech-add-fujitsu-lifebook-e546-e557-to-force-crc_enabled.patch cpuset-consider-dying-css-as-offline.patch +fs-add-i_blocksize.patch ufs-restore-proper-tail-allocation.patch fix-ufs_isblockset.patch ufs-restore-maintaining-i_blocks.patch