]> git.ipfire.org Git - people/ms/linux.git/blame - fs/gfs2/file.c
f2fs: convert to fileattr
[people/ms/linux.git] / fs / gfs2 / file.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
b3b94faa
DT
7#include <linux/slab.h>
8#include <linux/spinlock.h>
8d098070 9#include <linux/compat.h>
b3b94faa
DT
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/uio.h>
14#include <linux/blkdev.h>
15#include <linux/mm.h>
f58ba889 16#include <linux/mount.h>
18ec7d5c 17#include <linux/fs.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
2fe17c10
CH
19#include <linux/falloc.h>
20#include <linux/swap.h>
71b86f56 21#include <linux/crc32.h>
33c3de32 22#include <linux/writeback.h>
7c0f6ba6 23#include <linux/uaccess.h>
f057f6cd
SW
24#include <linux/dlm.h>
25#include <linux/dlm_plock.h>
2ddfbdd6 26#include <linux/delay.h>
64bc06bb 27#include <linux/backing-dev.h>
b3b94faa
DT
28
29#include "gfs2.h"
5c676f6d 30#include "incore.h"
b3b94faa 31#include "bmap.h"
64bc06bb 32#include "aops.h"
b3b94faa
DT
33#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
b3b94faa
DT
37#include "log.h"
38#include "meta_io.h"
b3b94faa
DT
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
5c676f6d 42#include "util.h"
b3b94faa 43
b3b94faa
DT
44/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
965c8e59 48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
b3b94faa
DT
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
965c8e59 56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
b3b94faa 57{
feaa7bba 58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
b3b94faa
DT
59 struct gfs2_holder i_gh;
60 loff_t error;
61
965c8e59 62 switch (whence) {
3a27411c 63 case SEEK_END:
b3b94faa
DT
64 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
65 &i_gh);
66 if (!error) {
965c8e59 67 error = generic_file_llseek(file, offset, whence);
b3b94faa
DT
68 gfs2_glock_dq_uninit(&i_gh);
69 }
9453615a 70 break;
3a27411c
AG
71
72 case SEEK_DATA:
73 error = gfs2_seek_data(file, offset);
74 break;
75
76 case SEEK_HOLE:
77 error = gfs2_seek_hole(file, offset);
78 break;
79
9453615a
SW
80 case SEEK_CUR:
81 case SEEK_SET:
3a27411c
AG
82 /*
83 * These don't reference inode->i_size and don't depend on the
84 * block mapping, so we don't need the glock.
85 */
965c8e59 86 error = generic_file_llseek(file, offset, whence);
9453615a
SW
87 break;
88 default:
89 error = -EINVAL;
90 }
b3b94faa
DT
91
92 return error;
93}
94
b3b94faa 95/**
d81a8ef5 96 * gfs2_readdir - Iterator for a directory
b3b94faa 97 * @file: The directory to read from
d81a8ef5 98 * @ctx: What to feed directory entries to
b3b94faa
DT
99 *
100 * Returns: errno
101 */
102
d81a8ef5 103static int gfs2_readdir(struct file *file, struct dir_context *ctx)
b3b94faa 104{
71b86f56 105 struct inode *dir = file->f_mapping->host;
feaa7bba 106 struct gfs2_inode *dip = GFS2_I(dir);
b3b94faa 107 struct gfs2_holder d_gh;
b3b94faa
DT
108 int error;
109
d81a8ef5
AV
110 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
111 if (error)
b3b94faa 112 return error;
b3b94faa 113
d81a8ef5 114 error = gfs2_dir_read(dir, ctx, &file->f_ra);
b3b94faa
DT
115
116 gfs2_glock_dq_uninit(&d_gh);
117
b3b94faa
DT
118 return error;
119}
120
128e5eba 121/**
b16f7e57 122 * fsflag_gfs2flag
128e5eba 123 *
b16f7e57
AG
124 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
125 * and to GFS2_DIF_JDATA for non-directories.
128e5eba 126 */
b16f7e57
AG
127static struct {
128 u32 fsflag;
129 u32 gfsflag;
130} fsflag_gfs2flag[] = {
131 {FS_SYNC_FL, GFS2_DIF_SYNC},
132 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
133 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
134 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
135 {FS_INDEX_FL, GFS2_DIF_EXHASH},
136 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
137 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
7ea9ea83 138};
71b86f56 139
5aca2842
DW
140static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
141{
142 int i;
143 u32 fsflags = 0;
144
145 if (S_ISDIR(inode->i_mode))
146 gfsflags &= ~GFS2_DIF_JDATA;
147 else
148 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
149
150 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
151 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
152 fsflags |= fsflag_gfs2flag[i].fsflag;
153 return fsflags;
154}
155
b09e593d 156static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
71b86f56 157{
496ad9aa 158 struct inode *inode = file_inode(filp);
feaa7bba 159 struct gfs2_inode *ip = GFS2_I(inode);
71b86f56 160 struct gfs2_holder gh;
5aca2842
DW
161 int error;
162 u32 fsflags;
71b86f56 163
719ee344
SW
164 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
165 error = gfs2_glock_nq(&gh);
71b86f56 166 if (error)
9c7fe835 167 goto out_uninit;
907b9bce 168
5aca2842 169 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
b16f7e57 170
128e5eba 171 if (put_user(fsflags, ptr))
71b86f56
SW
172 error = -EFAULT;
173
3cc3f710 174 gfs2_glock_dq(&gh);
9c7fe835 175out_uninit:
71b86f56
SW
176 gfs2_holder_uninit(&gh);
177 return error;
178}
179
6b124d8d
SW
180void gfs2_set_inode_flags(struct inode *inode)
181{
182 struct gfs2_inode *ip = GFS2_I(inode);
6b124d8d
SW
183 unsigned int flags = inode->i_flags;
184
9964afbb
SW
185 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
186 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
01e64ee4 187 flags |= S_NOSEC;
383f01fb 188 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
6b124d8d 189 flags |= S_IMMUTABLE;
383f01fb 190 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
6b124d8d 191 flags |= S_APPEND;
383f01fb 192 if (ip->i_diskflags & GFS2_DIF_NOATIME)
6b124d8d 193 flags |= S_NOATIME;
383f01fb 194 if (ip->i_diskflags & GFS2_DIF_SYNC)
6b124d8d
SW
195 flags |= S_SYNC;
196 inode->i_flags = flags;
197}
198
71b86f56
SW
199/* Flags that can be set by user space */
200#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
71b86f56
SW
201 GFS2_DIF_IMMUTABLE| \
202 GFS2_DIF_APPENDONLY| \
203 GFS2_DIF_NOATIME| \
204 GFS2_DIF_SYNC| \
23d0bb83 205 GFS2_DIF_TOPDIR| \
71b86f56
SW
206 GFS2_DIF_INHERIT_JDATA)
207
208/**
9dd868e1
FF
209 * do_gfs2_set_flags - set flags on an inode
210 * @filp: file pointer
211 * @reqflags: The flags to set
71b86f56 212 * @mask: Indicates which flags are valid
5aca2842 213 * @fsflags: The FS_* inode flags passed in
71b86f56
SW
214 *
215 */
5aca2842
DW
216static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
217 const u32 fsflags)
71b86f56 218{
496ad9aa 219 struct inode *inode = file_inode(filp);
feaa7bba
SW
220 struct gfs2_inode *ip = GFS2_I(inode);
221 struct gfs2_sbd *sdp = GFS2_SB(inode);
71b86f56
SW
222 struct buffer_head *bh;
223 struct gfs2_holder gh;
224 int error;
5aca2842 225 u32 new_flags, flags, oldflags;
71b86f56 226
a561be71 227 error = mnt_want_write_file(filp);
52f341cf 228 if (error)
71b86f56
SW
229 return error;
230
f58ba889
MS
231 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
232 if (error)
233 goto out_drop_write;
234
5aca2842
DW
235 oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
236 error = vfs_ioc_setflags_prepare(inode, oldflags, fsflags);
237 if (error)
238 goto out;
239
7df0e039 240 error = -EACCES;
21cb47be 241 if (!inode_owner_or_capable(&init_user_ns, inode))
7df0e039
SW
242 goto out;
243
244 error = 0;
383f01fb 245 flags = ip->i_diskflags;
55eccc6d 246 new_flags = (flags & ~mask) | (reqflags & mask);
71b86f56
SW
247 if ((new_flags ^ flags) == 0)
248 goto out;
249
71b86f56
SW
250 error = -EPERM;
251 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
252 goto out;
253 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
254 goto out;
907b9bce 255 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
b9cb9813 256 !capable(CAP_LINUX_IMMUTABLE))
71b86f56 257 goto out;
b9cb9813 258 if (!IS_IMMUTABLE(inode)) {
549c7297 259 error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
b9cb9813
SW
260 if (error)
261 goto out;
262 }
5561093e 263 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
cc555b09 264 if (new_flags & GFS2_DIF_JDATA)
c1696fb8 265 gfs2_log_flush(sdp, ip->i_gl,
805c0907
BP
266 GFS2_LOG_HEAD_FLUSH_NORMAL |
267 GFS2_LFC_SET_FLAGS);
5561093e
SW
268 error = filemap_fdatawrite(inode->i_mapping);
269 if (error)
270 goto out;
271 error = filemap_fdatawait(inode->i_mapping);
272 if (error)
273 goto out;
cc555b09
BP
274 if (new_flags & GFS2_DIF_JDATA)
275 gfs2_ordered_del_inode(ip);
5561093e 276 }
55eccc6d 277 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
71b86f56
SW
278 if (error)
279 goto out;
55eccc6d
SW
280 error = gfs2_meta_inode_buffer(ip, &bh);
281 if (error)
282 goto out_trans_end;
9b7c2ddb 283 inode->i_ctime = current_time(inode);
350a9b0a 284 gfs2_trans_add_meta(ip->i_gl, bh);
383f01fb 285 ip->i_diskflags = new_flags;
539e5d6b 286 gfs2_dinode_out(ip, bh->b_data);
71b86f56 287 brelse(bh);
6b124d8d 288 gfs2_set_inode_flags(inode);
5561093e 289 gfs2_set_aops(inode);
55eccc6d
SW
290out_trans_end:
291 gfs2_trans_end(sdp);
71b86f56
SW
292out:
293 gfs2_glock_dq_uninit(&gh);
f58ba889 294out_drop_write:
2a79f17e 295 mnt_drop_write_file(filp);
71b86f56
SW
296 return error;
297}
298
b09e593d 299static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
71b86f56 300{
496ad9aa 301 struct inode *inode = file_inode(filp);
b16f7e57
AG
302 u32 fsflags, gfsflags = 0;
303 u32 mask;
304 int i;
7df0e039 305
128e5eba 306 if (get_user(fsflags, ptr))
71b86f56 307 return -EFAULT;
7df0e039 308
b16f7e57
AG
309 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
310 if (fsflags & fsflag_gfs2flag[i].fsflag) {
311 fsflags &= ~fsflag_gfs2flag[i].fsflag;
312 gfsflags |= fsflag_gfs2flag[i].gfsflag;
313 }
314 }
315 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
316 return -EINVAL;
317
318 mask = GFS2_FLAGS_USER_SET;
319 if (S_ISDIR(inode->i_mode)) {
320 mask &= ~GFS2_DIF_JDATA;
321 } else {
322 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
323 if (gfsflags & GFS2_DIF_TOPDIR)
324 return -EINVAL;
325 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
b9af7ca6 326 }
b16f7e57 327
5aca2842 328 return do_gfs2_set_flags(filp, gfsflags, mask, fsflags);
71b86f56
SW
329}
330
6ddc5c3d
SW
331static int gfs2_getlabel(struct file *filp, char __user *label)
332{
333 struct inode *inode = file_inode(filp);
334 struct gfs2_sbd *sdp = GFS2_SB(inode);
335
336 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
337 return -EFAULT;
338
339 return 0;
340}
341
b09e593d 342static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
71b86f56
SW
343{
344 switch(cmd) {
128e5eba 345 case FS_IOC_GETFLAGS:
b09e593d 346 return gfs2_get_flags(filp, (u32 __user *)arg);
128e5eba 347 case FS_IOC_SETFLAGS:
b09e593d 348 return gfs2_set_flags(filp, (u32 __user *)arg);
66fc061b
SW
349 case FITRIM:
350 return gfs2_fitrim(filp, (void __user *)arg);
6ddc5c3d
SW
351 case FS_IOC_GETFSLABEL:
352 return gfs2_getlabel(filp, (char __user *)arg);
71b86f56 353 }
6ddc5c3d 354
71b86f56
SW
355 return -ENOTTY;
356}
357
8d098070
AB
358#ifdef CONFIG_COMPAT
359static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
360{
361 switch(cmd) {
362 /* These are just misnamed, they actually get/put from/to user an int */
363 case FS_IOC32_GETFLAGS:
364 cmd = FS_IOC_GETFLAGS;
365 break;
366 case FS_IOC32_SETFLAGS:
367 cmd = FS_IOC_SETFLAGS;
368 break;
369 /* Keep this list in sync with gfs2_ioctl */
370 case FITRIM:
371 case FS_IOC_GETFSLABEL:
372 break;
373 default:
374 return -ENOIOCTLCMD;
375 }
376
377 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
378}
379#else
380#define gfs2_compat_ioctl NULL
381#endif
382
da1dfb6a
SW
383/**
384 * gfs2_size_hint - Give a hint to the size of a write request
9dd868e1 385 * @filep: The struct file
da1dfb6a
SW
386 * @offset: The file offset of the write
387 * @size: The length of the write
388 *
389 * When we are about to do a write, this function records the total
390 * write size in order to provide a suitable hint to the lower layers
391 * about how many blocks will be required.
392 *
393 */
394
395static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
396{
496ad9aa 397 struct inode *inode = file_inode(filep);
da1dfb6a
SW
398 struct gfs2_sbd *sdp = GFS2_SB(inode);
399 struct gfs2_inode *ip = GFS2_I(inode);
400 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
401 int hint = min_t(size_t, INT_MAX, blks);
402
21f09c43
AG
403 if (hint > atomic_read(&ip->i_sizehint))
404 atomic_set(&ip->i_sizehint, hint);
da1dfb6a
SW
405}
406
3cc3f710 407/**
35af80ae 408 * gfs2_allocate_page_backing - Allocate blocks for a write fault
3cc3f710 409 * @page: The (locked) page to allocate backing for
f53056c4 410 * @length: Size of the allocation
3cc3f710 411 *
35af80ae
CH
412 * We try to allocate all the blocks required for the page in one go. This
413 * might fail for various reasons, so we keep trying until all the blocks to
414 * back this page are allocated. If some of the blocks are already allocated,
415 * that is ok too.
3cc3f710 416 */
f53056c4 417static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
3cc3f710 418{
35af80ae 419 u64 pos = page_offset(page);
3cc3f710
SW
420
421 do {
35af80ae
CH
422 struct iomap iomap = { };
423
f53056c4 424 if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
3cc3f710 425 return -EIO;
35af80ae 426
f53056c4
AG
427 if (length < iomap.length)
428 iomap.length = length;
429 length -= iomap.length;
35af80ae 430 pos += iomap.length;
f53056c4 431 } while (length > 0);
35af80ae 432
3cc3f710
SW
433 return 0;
434}
435
436/**
437 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
438 * @vma: The virtual memory area
9dd868e1 439 * @vmf: The virtual memory fault containing the page to become writable
3cc3f710
SW
440 *
441 * When the page becomes writable, we need to ensure that we have
442 * blocks allocated on disk to back that page.
443 */
444
109dbb1e 445static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
3cc3f710 446{
c2ec175c 447 struct page *page = vmf->page;
11bac800 448 struct inode *inode = file_inode(vmf->vma->vm_file);
3cc3f710
SW
449 struct gfs2_inode *ip = GFS2_I(inode);
450 struct gfs2_sbd *sdp = GFS2_SB(inode);
7b9cff46 451 struct gfs2_alloc_parms ap = { .aflags = 0, };
184b4e60 452 u64 offset = page_offset(page);
3cc3f710 453 unsigned int data_blocks, ind_blocks, rblocks;
3cc3f710 454 struct gfs2_holder gh;
184b4e60 455 unsigned int length;
13d921e3 456 loff_t size;
3cc3f710
SW
457 int ret;
458
39263d5e 459 sb_start_pagefault(inode->i_sb);
13d921e3 460
719ee344
SW
461 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
462 ret = gfs2_glock_nq(&gh);
3cc3f710 463 if (ret)
2b3dcf35 464 goto out_uninit;
3cc3f710 465
184b4e60
AG
466 /* Check page index against inode size */
467 size = i_size_read(inode);
468 if (offset >= size) {
469 ret = -EINVAL;
470 goto out_unlock;
471 }
472
d7c436cd 473 /* Update file times before taking page lock */
11bac800 474 file_update_time(vmf->vma->vm_file);
d7c436cd 475
184b4e60
AG
476 /* page is wholly or partially inside EOF */
477 if (offset > size - PAGE_SIZE)
478 length = offset_in_page(size);
479 else
480 length = PAGE_SIZE;
481
482 gfs2_size_hint(vmf->vma->vm_file, offset, length);
483
9c538837
SW
484 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
485 set_bit(GIF_SW_PAGED, &ip->i_flags);
486
184b4e60
AG
487 /*
488 * iomap_writepage / iomap_writepages currently don't support inline
489 * files, so always unstuff here.
490 */
491
492 if (!gfs2_is_stuffed(ip) &&
493 !gfs2_write_alloc_required(ip, offset, length)) {
13d921e3
SW
494 lock_page(page);
495 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
496 ret = -EAGAIN;
497 unlock_page(page);
498 }
3cc3f710 499 goto out_unlock;
13d921e3
SW
500 }
501
5407e242
BP
502 ret = gfs2_rindex_update(sdp);
503 if (ret)
6dbd8224
SW
504 goto out_unlock;
505
184b4e60 506 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
7b9cff46 507 ap.target = data_blocks + ind_blocks;
b8fbf471
AD
508 ret = gfs2_quota_lock_check(ip, &ap);
509 if (ret)
510 goto out_unlock;
7b9cff46 511 ret = gfs2_inplace_reserve(ip, &ap);
3cc3f710
SW
512 if (ret)
513 goto out_quota_unlock;
514
515 rblocks = RES_DINODE + ind_blocks;
516 if (gfs2_is_jdata(ip))
517 rblocks += data_blocks ? data_blocks : 1;
bf97b673 518 if (ind_blocks || data_blocks) {
3cc3f710 519 rblocks += RES_STATFS + RES_QUOTA;
71f890f7 520 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
bf97b673 521 }
3cc3f710
SW
522 ret = gfs2_trans_begin(sdp, rblocks, 0);
523 if (ret)
524 goto out_trans_fail;
525
526 lock_page(page);
13d921e3
SW
527 ret = -EAGAIN;
528 /* If truncated, we must retry the operation, we may have raced
529 * with the glock demotion code.
530 */
531 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
532 goto out_trans_end;
533
534 /* Unstuff, if required, and allocate backing blocks for page */
b7fe2e39 535 ret = 0;
13d921e3 536 if (gfs2_is_stuffed(ip))
3cc3f710 537 ret = gfs2_unstuff_dinode(ip, page);
13d921e3 538 if (ret == 0)
184b4e60 539 ret = gfs2_allocate_page_backing(page, length);
3cc3f710 540
13d921e3
SW
541out_trans_end:
542 if (ret)
543 unlock_page(page);
3cc3f710
SW
544 gfs2_trans_end(sdp);
545out_trans_fail:
546 gfs2_inplace_release(ip);
547out_quota_unlock:
548 gfs2_quota_unlock(ip);
3cc3f710
SW
549out_unlock:
550 gfs2_glock_dq(&gh);
2b3dcf35 551out_uninit:
3cc3f710 552 gfs2_holder_uninit(&gh);
13d921e3
SW
553 if (ret == 0) {
554 set_page_dirty(page);
1d1d1a76 555 wait_for_stable_page(page);
13d921e3 556 }
39263d5e 557 sb_end_pagefault(inode->i_sb);
13d921e3 558 return block_page_mkwrite_return(ret);
3cc3f710
SW
559}
560
20f82999
AG
561static vm_fault_t gfs2_fault(struct vm_fault *vmf)
562{
563 struct inode *inode = file_inode(vmf->vma->vm_file);
564 struct gfs2_inode *ip = GFS2_I(inode);
565 struct gfs2_holder gh;
566 vm_fault_t ret;
567 int err;
568
569 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
570 err = gfs2_glock_nq(&gh);
571 if (err) {
572 ret = block_page_mkwrite_return(err);
573 goto out_uninit;
574 }
575 ret = filemap_fault(vmf);
576 gfs2_glock_dq(&gh);
577out_uninit:
578 gfs2_holder_uninit(&gh);
579 return ret;
580}
581
f0f37e2f 582static const struct vm_operations_struct gfs2_vm_ops = {
20f82999 583 .fault = gfs2_fault,
f1820361 584 .map_pages = filemap_map_pages,
3cc3f710
SW
585 .page_mkwrite = gfs2_page_mkwrite,
586};
587
b3b94faa
DT
588/**
589 * gfs2_mmap -
590 * @file: The file to map
591 * @vma: The VMA which described the mapping
592 *
48bf2b17
SW
593 * There is no need to get a lock here unless we should be updating
594 * atime. We ignore any locking errors since the only consequence is
595 * a missed atime update (which will just be deferred until later).
596 *
597 * Returns: 0
b3b94faa
DT
598 */
599
600static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
601{
feaa7bba 602 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
b3b94faa 603
b9c93bb7
SW
604 if (!(file->f_flags & O_NOATIME) &&
605 !IS_NOATIME(&ip->i_inode)) {
48bf2b17
SW
606 struct gfs2_holder i_gh;
607 int error;
b3b94faa 608
3d162688
BM
609 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
610 &i_gh);
b9c93bb7
SW
611 if (error)
612 return error;
3d162688
BM
613 /* grab lock to update inode */
614 gfs2_glock_dq_uninit(&i_gh);
615 file_accessed(file);
48bf2b17 616 }
3cc3f710 617 vma->vm_ops = &gfs2_vm_ops;
b3b94faa 618
48bf2b17 619 return 0;
b3b94faa
DT
620}
621
622/**
6d4ade98
SW
623 * gfs2_open_common - This is common to open and atomic_open
624 * @inode: The inode being opened
625 * @file: The file being opened
b3b94faa 626 *
6d4ade98
SW
627 * This maybe called under a glock or not depending upon how it has
628 * been called. We must always be called under a glock for regular
629 * files, however. For other file types, it does not matter whether
630 * we hold the glock or not.
631 *
632 * Returns: Error code or 0 for success
b3b94faa
DT
633 */
634
6d4ade98 635int gfs2_open_common(struct inode *inode, struct file *file)
b3b94faa 636{
b3b94faa 637 struct gfs2_file *fp;
6d4ade98
SW
638 int ret;
639
640 if (S_ISREG(inode->i_mode)) {
641 ret = generic_file_open(inode, file);
642 if (ret)
643 return ret;
644 }
b3b94faa 645
6d4ade98 646 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
b3b94faa
DT
647 if (!fp)
648 return -ENOMEM;
649
f55ab26a 650 mutex_init(&fp->f_fl_mutex);
b3b94faa 651
feaa7bba 652 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
5c676f6d 653 file->private_data = fp;
2fba46a0
BP
654 if (file->f_mode & FMODE_WRITE) {
655 ret = gfs2_qa_get(GFS2_I(inode));
656 if (ret)
657 goto fail;
658 }
6d4ade98 659 return 0;
2fba46a0
BP
660
661fail:
662 kfree(file->private_data);
663 file->private_data = NULL;
664 return ret;
6d4ade98
SW
665}
666
667/**
668 * gfs2_open - open a file
669 * @inode: the inode to open
670 * @file: the struct file for this opening
671 *
672 * After atomic_open, this function is only used for opening files
673 * which are already cached. We must still get the glock for regular
674 * files to ensure that we have the file size uptodate for the large
675 * file check which is in the common code. That is only an issue for
676 * regular files though.
677 *
678 * Returns: errno
679 */
680
681static int gfs2_open(struct inode *inode, struct file *file)
682{
683 struct gfs2_inode *ip = GFS2_I(inode);
684 struct gfs2_holder i_gh;
685 int error;
686 bool need_unlock = false;
b3b94faa 687
b60623c2 688 if (S_ISREG(ip->i_inode.i_mode)) {
b3b94faa
DT
689 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
690 &i_gh);
691 if (error)
6d4ade98
SW
692 return error;
693 need_unlock = true;
694 }
b3b94faa 695
6d4ade98 696 error = gfs2_open_common(inode, file);
b3b94faa 697
6d4ade98 698 if (need_unlock)
b3b94faa 699 gfs2_glock_dq_uninit(&i_gh);
b3b94faa 700
b3b94faa
DT
701 return error;
702}
703
704/**
df3fd117 705 * gfs2_release - called to close a struct file
b3b94faa
DT
706 * @inode: the inode the struct file belongs to
707 * @file: the struct file being closed
708 *
709 * Returns: errno
710 */
711
df3fd117 712static int gfs2_release(struct inode *inode, struct file *file)
b3b94faa 713{
0a305e49 714 struct gfs2_inode *ip = GFS2_I(inode);
b3b94faa 715
8e2e0047 716 kfree(file->private_data);
5c676f6d 717 file->private_data = NULL;
b3b94faa 718
0ec9b9ea 719 if (gfs2_rs_active(&ip->i_res))
1595548f 720 gfs2_rs_delete(ip, &inode->i_writecount);
0ec9b9ea 721 if (file->f_mode & FMODE_WRITE)
1595548f 722 gfs2_qa_put(ip);
b3b94faa
DT
723 return 0;
724}
725
726/**
727 * gfs2_fsync - sync the dirty data for a file (across the cluster)
02c24a82
JB
728 * @file: the file that points to the dentry
729 * @start: the start position in the file to sync
730 * @end: the end position in the file to sync
dba898b0 731 * @datasync: set if we can ignore timestamp changes
b3b94faa 732 *
2f0264d5
SW
733 * We split the data flushing here so that we don't wait for the data
734 * until after we've also sent the metadata to disk. Note that for
735 * data=ordered, we will write & wait for the data at the log flush
736 * stage anyway, so this is unlikely to make much of a difference
737 * except in the data=writeback case.
738 *
739 * If the fdatawrite fails due to any reason except -EIO, we will
740 * continue the remainder of the fsync, although we'll still report
741 * the error at the end. This is to match filemap_write_and_wait_range()
742 * behaviour.
34126f9f 743 *
b3b94faa
DT
744 * Returns: errno
745 */
746
02c24a82
JB
747static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
748 int datasync)
b3b94faa 749{
2f0264d5
SW
750 struct address_space *mapping = file->f_mapping;
751 struct inode *inode = mapping->host;
3aac630b 752 int sync_state = inode->i_state & I_DIRTY;
dba898b0 753 struct gfs2_inode *ip = GFS2_I(inode);
87654896 754 int ret = 0, ret1 = 0;
b3b94faa 755
2f0264d5
SW
756 if (mapping->nrpages) {
757 ret1 = filemap_fdatawrite_range(mapping, start, end);
758 if (ret1 == -EIO)
759 return ret1;
760 }
02c24a82 761
0c901809
BM
762 if (!gfs2_is_jdata(ip))
763 sync_state &= ~I_DIRTY_PAGES;
dba898b0 764 if (datasync)
3aac630b 765 sync_state &= ~I_DIRTY_SYNC;
b3b94faa 766
dba898b0
SW
767 if (sync_state) {
768 ret = sync_inode_metadata(inode, 1);
b5b24d7a 769 if (ret)
dba898b0 770 return ret;
f1818529 771 if (gfs2_is_jdata(ip))
d07a6ac7
JL
772 ret = file_write_and_wait(file);
773 if (ret)
774 return ret;
b5b24d7a 775 gfs2_ail_flush(ip->i_gl, 1);
33c3de32
SW
776 }
777
2f0264d5 778 if (mapping->nrpages)
d07a6ac7 779 ret = file_fdatawait_range(file, start, end);
2f0264d5
SW
780
781 return ret ? ret : ret1;
b3b94faa
DT
782}
783
4c5c3010
AG
784static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
785 struct gfs2_holder *gh)
967bcc91
AG
786{
787 struct file *file = iocb->ki_filp;
788 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
789 size_t count = iov_iter_count(to);
967bcc91
AG
790 ssize_t ret;
791
792 if (!count)
793 return 0; /* skip atime */
794
4c5c3010
AG
795 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
796 ret = gfs2_glock_nq(gh);
967bcc91
AG
797 if (ret)
798 goto out_uninit;
799
2f632965 800 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0);
4c5c3010 801 gfs2_glock_dq(gh);
967bcc91 802out_uninit:
4c5c3010 803 gfs2_holder_uninit(gh);
967bcc91
AG
804 return ret;
805}
806
4c5c3010
AG
807static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
808 struct gfs2_holder *gh)
967bcc91
AG
809{
810 struct file *file = iocb->ki_filp;
811 struct inode *inode = file->f_mapping->host;
812 struct gfs2_inode *ip = GFS2_I(inode);
813 size_t len = iov_iter_count(from);
814 loff_t offset = iocb->ki_pos;
967bcc91
AG
815 ssize_t ret;
816
817 /*
818 * Deferred lock, even if its a write, since we do no allocation on
819 * this path. All we need to change is the atime, and this lock mode
820 * ensures that other nodes have flushed their buffered read caches
821 * (i.e. their page cache entries for this inode). We do not,
822 * unfortunately, have the option of only flushing a range like the
823 * VFS does.
824 */
4c5c3010
AG
825 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
826 ret = gfs2_glock_nq(gh);
967bcc91
AG
827 if (ret)
828 goto out_uninit;
829
830 /* Silently fall back to buffered I/O when writing beyond EOF */
831 if (offset + len > i_size_read(&ip->i_inode))
832 goto out;
833
2f632965 834 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0);
60263d58
CH
835 if (ret == -ENOTBLK)
836 ret = 0;
967bcc91 837out:
4c5c3010 838 gfs2_glock_dq(gh);
967bcc91 839out_uninit:
4c5c3010 840 gfs2_holder_uninit(gh);
967bcc91
AG
841 return ret;
842}
843
844static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
845{
20f82999
AG
846 struct gfs2_inode *ip;
847 struct gfs2_holder gh;
848 size_t written = 0;
967bcc91
AG
849 ssize_t ret;
850
851 if (iocb->ki_flags & IOCB_DIRECT) {
4c5c3010 852 ret = gfs2_file_direct_read(iocb, to, &gh);
967bcc91
AG
853 if (likely(ret != -ENOTBLK))
854 return ret;
855 iocb->ki_flags &= ~IOCB_DIRECT;
856 }
20f82999
AG
857 iocb->ki_flags |= IOCB_NOIO;
858 ret = generic_file_read_iter(iocb, to);
859 iocb->ki_flags &= ~IOCB_NOIO;
860 if (ret >= 0) {
861 if (!iov_iter_count(to))
862 return ret;
863 written = ret;
864 } else {
865 if (ret != -EAGAIN)
866 return ret;
867 if (iocb->ki_flags & IOCB_NOWAIT)
868 return ret;
869 }
870 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
871 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
872 ret = gfs2_glock_nq(&gh);
873 if (ret)
874 goto out_uninit;
875 ret = generic_file_read_iter(iocb, to);
876 if (ret > 0)
877 written += ret;
878 gfs2_glock_dq(&gh);
879out_uninit:
880 gfs2_holder_uninit(&gh);
881 return written ? written : ret;
967bcc91
AG
882}
883
56aa616a 884/**
da56e45b 885 * gfs2_file_write_iter - Perform a write to a file
56aa616a 886 * @iocb: The io context
64bc06bb 887 * @from: The data to write
56aa616a
SW
888 *
889 * We have to do a lock/unlock here to refresh the inode size for
890 * O_APPEND writes, otherwise we can land up writing at the wrong
891 * offset. There is still a race, but provided the app is using its
892 * own file locking, this will make O_APPEND work as expected.
893 *
894 */
895
da56e45b 896static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
56aa616a
SW
897{
898 struct file *file = iocb->ki_filp;
64bc06bb
AG
899 struct inode *inode = file_inode(file);
900 struct gfs2_inode *ip = GFS2_I(inode);
4c5c3010 901 struct gfs2_holder gh;
6e5e41e2 902 ssize_t ret;
0a305e49 903
da56e45b 904 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
da1dfb6a 905
2ba48ce5 906 if (iocb->ki_flags & IOCB_APPEND) {
56aa616a
SW
907 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
908 if (ret)
4bd684bc 909 return ret;
56aa616a
SW
910 gfs2_glock_dq_uninit(&gh);
911 }
912
64bc06bb
AG
913 inode_lock(inode);
914 ret = generic_write_checks(iocb, from);
915 if (ret <= 0)
4c0e8dda 916 goto out_unlock;
64bc06bb
AG
917
918 ret = file_remove_privs(file);
919 if (ret)
4c0e8dda 920 goto out_unlock;
64bc06bb
AG
921
922 ret = file_update_time(file);
923 if (ret)
4c0e8dda 924 goto out_unlock;
64bc06bb 925
967bcc91
AG
926 if (iocb->ki_flags & IOCB_DIRECT) {
927 struct address_space *mapping = file->f_mapping;
6e5e41e2 928 ssize_t buffered, ret2;
967bcc91 929
4c5c3010 930 ret = gfs2_file_direct_write(iocb, from, &gh);
6e5e41e2 931 if (ret < 0 || !iov_iter_count(from))
4c0e8dda 932 goto out_unlock;
967bcc91 933
6e5e41e2 934 iocb->ki_flags |= IOCB_DSYNC;
4c0e8dda 935 current->backing_dev_info = inode_to_bdi(inode);
6e5e41e2 936 buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
4c0e8dda 937 current->backing_dev_info = NULL;
6e5e41e2 938 if (unlikely(buffered <= 0))
4c0e8dda 939 goto out_unlock;
967bcc91
AG
940
941 /*
942 * We need to ensure that the page cache pages are written to
943 * disk and invalidated to preserve the expected O_DIRECT
6e5e41e2
AG
944 * semantics. If the writeback or invalidate fails, only report
945 * the direct I/O range as we don't know if the buffered pages
946 * made it to disk.
967bcc91 947 */
6e5e41e2
AG
948 iocb->ki_pos += buffered;
949 ret2 = generic_write_sync(iocb, buffered);
950 invalidate_mapping_pages(mapping,
951 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
952 (iocb->ki_pos - 1) >> PAGE_SHIFT);
953 if (!ret || ret2 > 0)
954 ret += ret2;
967bcc91 955 } else {
4c0e8dda 956 current->backing_dev_info = inode_to_bdi(inode);
967bcc91 957 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
4c0e8dda 958 current->backing_dev_info = NULL;
6e5e41e2 959 if (likely(ret > 0)) {
967bcc91 960 iocb->ki_pos += ret;
6e5e41e2
AG
961 ret = generic_write_sync(iocb, ret);
962 }
967bcc91 963 }
64bc06bb 964
4c0e8dda 965out_unlock:
64bc06bb 966 inode_unlock(inode);
6e5e41e2 967 return ret;
56aa616a
SW
968}
969
2fe17c10
CH
970static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
971 int mode)
972{
fffb6412 973 struct super_block *sb = inode->i_sb;
2fe17c10 974 struct gfs2_inode *ip = GFS2_I(inode);
fffb6412 975 loff_t end = offset + len;
2fe17c10
CH
976 struct buffer_head *dibh;
977 int error;
2fe17c10
CH
978
979 error = gfs2_meta_inode_buffer(ip, &dibh);
980 if (unlikely(error))
64dd153c 981 return error;
2fe17c10 982
350a9b0a 983 gfs2_trans_add_meta(ip->i_gl, dibh);
2fe17c10
CH
984
985 if (gfs2_is_stuffed(ip)) {
986 error = gfs2_unstuff_dinode(ip, NULL);
987 if (unlikely(error))
988 goto out;
989 }
990
fffb6412 991 while (offset < end) {
c2589282
AG
992 struct iomap iomap = { };
993
628e366d
AG
994 error = gfs2_iomap_get_alloc(inode, offset, end - offset,
995 &iomap);
fffb6412 996 if (error)
64dd153c 997 goto out;
fffb6412 998 offset = iomap.offset + iomap.length;
d505a96a 999 if (!(iomap.flags & IOMAP_F_NEW))
64dd153c 1000 continue;
fffb6412
AG
1001 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
1002 iomap.length >> inode->i_blkbits,
1003 GFP_NOFS);
1004 if (error) {
1005 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
2fe17c10 1006 goto out;
64dd153c 1007 }
2fe17c10 1008 }
2fe17c10 1009out:
64dd153c 1010 brelse(dibh);
2fe17c10
CH
1011 return error;
1012}
f3b64b57 1013
d9be0cda
AD
1014/**
1015 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1016 * blocks, determine how many bytes can be written.
1017 * @ip: The inode in question.
1018 * @len: Max cap of bytes. What we return in *len must be <= this.
1019 * @data_blocks: Compute and return the number of data blocks needed
1020 * @ind_blocks: Compute and return the number of indirect blocks needed
1021 * @max_blocks: The total blocks available to work with.
1022 *
1023 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1024 */
1025static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1026 unsigned int *data_blocks, unsigned int *ind_blocks,
1027 unsigned int max_blocks)
2fe17c10 1028{
d9be0cda 1029 loff_t max = *len;
2fe17c10 1030 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2fe17c10
CH
1031 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1032
1033 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1034 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1035 max_data -= tmp;
1036 }
d9be0cda 1037
2fe17c10
CH
1038 *data_blocks = max_data;
1039 *ind_blocks = max_blocks - max_data;
1040 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1041 if (*len > max) {
1042 *len = max;
1043 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1044 }
1045}
1046
9c9f1159 1047static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
2fe17c10 1048{
496ad9aa 1049 struct inode *inode = file_inode(file);
2fe17c10
CH
1050 struct gfs2_sbd *sdp = GFS2_SB(inode);
1051 struct gfs2_inode *ip = GFS2_I(inode);
7b9cff46 1052 struct gfs2_alloc_parms ap = { .aflags = 0, };
2fe17c10 1053 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
174d1232 1054 loff_t bytes, max_bytes, max_blks;
2fe17c10 1055 int error;
4442f2e0
SW
1056 const loff_t pos = offset;
1057 const loff_t count = len;
6905d9e4 1058 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
2fe17c10 1059 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
64dd153c 1060 loff_t max_chunk_size = UINT_MAX & bsize_mask;
a0846a53 1061
2fe17c10
CH
1062 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1063
6905d9e4 1064 offset &= bsize_mask;
2fe17c10
CH
1065
1066 len = next - offset;
1067 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1068 if (!bytes)
1069 bytes = UINT_MAX;
6905d9e4
BM
1070 bytes &= bsize_mask;
1071 if (bytes == 0)
1072 bytes = sdp->sd_sb.sb_bsize;
2fe17c10 1073
da1dfb6a 1074 gfs2_size_hint(file, offset, len);
8e2e0047 1075
d9be0cda
AD
1076 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1077 ap.min_target = data_blocks + ind_blocks;
1078
2fe17c10
CH
1079 while (len > 0) {
1080 if (len < bytes)
1081 bytes = len;
58a7d5fb
BM
1082 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1083 len -= bytes;
1084 offset += bytes;
1085 continue;
1086 }
d9be0cda
AD
1087
1088 /* We need to determine how many bytes we can actually
1089 * fallocate without exceeding quota or going over the
1090 * end of the fs. We start off optimistically by assuming
1091 * we can write max_bytes */
1092 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1093
1094 /* Since max_bytes is most likely a theoretical max, we
1095 * calculate a more realistic 'bytes' to serve as a good
1096 * starting point for the number of bytes we may be able
1097 * to write */
2fe17c10 1098 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
7b9cff46 1099 ap.target = data_blocks + ind_blocks;
b8fbf471
AD
1100
1101 error = gfs2_quota_lock_check(ip, &ap);
2fe17c10 1102 if (error)
9c9f1159 1103 return error;
d9be0cda
AD
1104 /* ap.allowed tells us how many blocks quota will allow
1105 * us to write. Check if this reduces max_blks */
174d1232
AG
1106 max_blks = UINT_MAX;
1107 if (ap.allowed)
d9be0cda 1108 max_blks = ap.allowed;
2fe17c10 1109
7b9cff46 1110 error = gfs2_inplace_reserve(ip, &ap);
d9be0cda 1111 if (error)
2fe17c10 1112 goto out_qunlock;
d9be0cda
AD
1113
1114 /* check if the selected rgrp limits our max_blks further */
725d0e9d
AG
1115 if (ip->i_res.rs_reserved < max_blks)
1116 max_blks = ip->i_res.rs_reserved;
d9be0cda
AD
1117
1118 /* Almost done. Calculate bytes that can be written using
1119 * max_blks. We also recompute max_bytes, data_blocks and
1120 * ind_blocks */
1121 calc_max_reserv(ip, &max_bytes, &data_blocks,
1122 &ind_blocks, max_blks);
2fe17c10
CH
1123
1124 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
71f890f7 1125 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
2fe17c10
CH
1126 if (gfs2_is_jdata(ip))
1127 rblocks += data_blocks ? data_blocks : 1;
1128
1129 error = gfs2_trans_begin(sdp, rblocks,
45eb0504 1130 PAGE_SIZE >> inode->i_blkbits);
2fe17c10
CH
1131 if (error)
1132 goto out_trans_fail;
1133
1134 error = fallocate_chunk(inode, offset, max_bytes, mode);
1135 gfs2_trans_end(sdp);
1136
1137 if (error)
1138 goto out_trans_fail;
1139
1140 len -= max_bytes;
1141 offset += max_bytes;
1142 gfs2_inplace_release(ip);
1143 gfs2_quota_unlock(ip);
2fe17c10 1144 }
4442f2e0 1145
0a6a4abc 1146 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1885867b 1147 i_size_write(inode, pos + count);
0a6a4abc
AG
1148 file_update_time(file);
1149 mark_inode_dirty(inode);
1885867b 1150
dde0c2e7
CH
1151 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1152 return vfs_fsync_range(file, pos, pos + count - 1,
1153 (file->f_flags & __O_SYNC) ? 0 : 1);
1154 return 0;
2fe17c10
CH
1155
1156out_trans_fail:
1157 gfs2_inplace_release(ip);
1158out_qunlock:
1159 gfs2_quota_unlock(ip);
9c9f1159
AP
1160 return error;
1161}
1162
1163static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1164{
1165 struct inode *inode = file_inode(file);
d4d7fc12 1166 struct gfs2_sbd *sdp = GFS2_SB(inode);
9c9f1159
AP
1167 struct gfs2_inode *ip = GFS2_I(inode);
1168 struct gfs2_holder gh;
1169 int ret;
1170
4e56a641 1171 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
d4d7fc12
AP
1172 return -EOPNOTSUPP;
1173 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1174 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
9c9f1159
AP
1175 return -EOPNOTSUPP;
1176
5955102c 1177 inode_lock(inode);
9c9f1159
AP
1178
1179 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1180 ret = gfs2_glock_nq(&gh);
1181 if (ret)
1182 goto out_uninit;
1183
1184 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1185 (offset + len) > inode->i_size) {
1186 ret = inode_newsize_ok(inode, offset + len);
1187 if (ret)
1188 goto out_unlock;
1189 }
1190
1191 ret = get_write_access(inode);
1192 if (ret)
1193 goto out_unlock;
1194
4e56a641
AG
1195 if (mode & FALLOC_FL_PUNCH_HOLE) {
1196 ret = __gfs2_punch_hole(file, offset, len);
1197 } else {
4e56a641 1198 ret = __gfs2_fallocate(file, mode, offset, len);
4e56a641
AG
1199 if (ret)
1200 gfs2_rs_deltree(&ip->i_res);
1201 }
a097dc7e 1202
9c9f1159 1203 put_write_access(inode);
2fe17c10 1204out_unlock:
a0846a53 1205 gfs2_glock_dq(&gh);
2fe17c10 1206out_uninit:
a0846a53 1207 gfs2_holder_uninit(&gh);
5955102c 1208 inode_unlock(inode);
9c9f1159 1209 return ret;
2fe17c10
CH
1210}
1211
f1ea6f4e
BP
1212static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1213 struct file *out, loff_t *ppos,
1214 size_t len, unsigned int flags)
1215{
2fba46a0 1216 ssize_t ret;
f1ea6f4e 1217
f1ea6f4e
BP
1218 gfs2_size_hint(out, *ppos, len);
1219
2fba46a0 1220 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
2fba46a0 1221 return ret;
f1ea6f4e
BP
1222}
1223
f057f6cd
SW
1224#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1225
b3b94faa
DT
1226/**
1227 * gfs2_lock - acquire/release a posix lock on a file
1228 * @file: the file pointer
1229 * @cmd: either modify or retrieve lock state, possibly wait
1230 * @fl: type and range of lock
1231 *
1232 * Returns: errno
1233 */
1234
1235static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1236{
feaa7bba
SW
1237 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1238 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
f057f6cd 1239 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
b3b94faa 1240
b3b94faa
DT
1241 if (!(fl->fl_flags & FL_POSIX))
1242 return -ENOLCK;
720e7749 1243 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
b3b94faa
DT
1244 return -ENOLCK;
1245
586759f0
ME
1246 if (cmd == F_CANCELLK) {
1247 /* Hack: */
1248 cmd = F_SETLK;
1249 fl->fl_type = F_UNLCK;
1250 }
eb43e660 1251 if (unlikely(gfs2_withdrawn(sdp))) {
c2952d20 1252 if (fl->fl_type == F_UNLCK)
4f656367 1253 locks_lock_file_wait(file, fl);
f057f6cd 1254 return -EIO;
c2952d20 1255 }
b3b94faa 1256 if (IS_GETLK(cmd))
f057f6cd 1257 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
b3b94faa 1258 else if (fl->fl_type == F_UNLCK)
f057f6cd 1259 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
b3b94faa 1260 else
f057f6cd 1261 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
b3b94faa
DT
1262}
1263
b3b94faa
DT
1264static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1265{
5c676f6d 1266 struct gfs2_file *fp = file->private_data;
b3b94faa 1267 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
496ad9aa 1268 struct gfs2_inode *ip = GFS2_I(file_inode(file));
b3b94faa
DT
1269 struct gfs2_glock *gl;
1270 unsigned int state;
b58bf407 1271 u16 flags;
b3b94faa 1272 int error = 0;
2ddfbdd6 1273 int sleeptime;
b3b94faa
DT
1274
1275 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
2ddfbdd6 1276 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
b3b94faa 1277
f55ab26a 1278 mutex_lock(&fp->f_fl_mutex);
b3b94faa 1279
283c9a97 1280 if (gfs2_holder_initialized(fl_gh)) {
4d62d3f7 1281 struct file_lock request;
b3b94faa
DT
1282 if (fl_gh->gh_state == state)
1283 goto out;
4d62d3f7
N
1284 locks_init_lock(&request);
1285 request.fl_type = F_UNLCK;
1286 request.fl_flags = FL_FLOCK;
1287 locks_lock_file_wait(file, &request);
5bef3e7c 1288 gfs2_glock_dq(fl_gh);
b4c20166 1289 gfs2_holder_reinit(state, flags, fl_gh);
b3b94faa 1290 } else {
6802e340
SW
1291 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1292 &gfs2_flock_glops, CREATE, &gl);
b3b94faa
DT
1293 if (error)
1294 goto out;
b4c20166
AD
1295 gfs2_holder_init(gl, state, flags, fl_gh);
1296 gfs2_glock_put(gl);
b3b94faa 1297 }
2ddfbdd6
BP
1298 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1299 error = gfs2_glock_nq(fl_gh);
1300 if (error != GLR_TRYFAILED)
1301 break;
1302 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1303 fl_gh->gh_error = 0;
1304 msleep(sleeptime);
1305 }
b3b94faa
DT
1306 if (error) {
1307 gfs2_holder_uninit(fl_gh);
1308 if (error == GLR_TRYFAILED)
1309 error = -EAGAIN;
1310 } else {
4f656367 1311 error = locks_lock_file_wait(file, fl);
feaa7bba 1312 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
b3b94faa
DT
1313 }
1314
420b9e5e 1315out:
f55ab26a 1316 mutex_unlock(&fp->f_fl_mutex);
b3b94faa
DT
1317 return error;
1318}
1319
1320static void do_unflock(struct file *file, struct file_lock *fl)
1321{
5c676f6d 1322 struct gfs2_file *fp = file->private_data;
b3b94faa
DT
1323 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1324
f55ab26a 1325 mutex_lock(&fp->f_fl_mutex);
4f656367 1326 locks_lock_file_wait(file, fl);
6df9f9a2 1327 if (gfs2_holder_initialized(fl_gh)) {
2ddfbdd6 1328 gfs2_glock_dq(fl_gh);
0a33443b
SW
1329 gfs2_holder_uninit(fl_gh);
1330 }
f55ab26a 1331 mutex_unlock(&fp->f_fl_mutex);
b3b94faa
DT
1332}
1333
1334/**
1335 * gfs2_flock - acquire/release a flock lock on a file
1336 * @file: the file pointer
1337 * @cmd: either modify or retrieve lock state, possibly wait
1338 * @fl: type and range of lock
1339 *
1340 * Returns: errno
1341 */
1342
1343static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1344{
b3b94faa
DT
1345 if (!(fl->fl_flags & FL_FLOCK))
1346 return -ENOLCK;
a12af1eb
AD
1347 if (fl->fl_type & LOCK_MAND)
1348 return -EOPNOTSUPP;
b3b94faa 1349
b3b94faa
DT
1350 if (fl->fl_type == F_UNLCK) {
1351 do_unflock(file, fl);
1352 return 0;
d00223f1 1353 } else {
b3b94faa 1354 return do_flock(file, cmd, fl);
d00223f1 1355 }
b3b94faa
DT
1356}
1357
10d21988 1358const struct file_operations gfs2_file_fops = {
26c1a574 1359 .llseek = gfs2_llseek,
967bcc91 1360 .read_iter = gfs2_file_read_iter,
da56e45b 1361 .write_iter = gfs2_file_write_iter,
81214bab 1362 .iopoll = iomap_dio_iopoll,
26c1a574 1363 .unlocked_ioctl = gfs2_ioctl,
8d098070 1364 .compat_ioctl = gfs2_compat_ioctl,
26c1a574
SW
1365 .mmap = gfs2_mmap,
1366 .open = gfs2_open,
df3fd117 1367 .release = gfs2_release,
26c1a574
SW
1368 .fsync = gfs2_fsync,
1369 .lock = gfs2_lock,
26c1a574 1370 .flock = gfs2_flock,
82c156f8 1371 .splice_read = generic_file_splice_read,
f42a69fa 1372 .splice_write = gfs2_file_splice_write,
1c994a09 1373 .setlease = simple_nosetlease,
2fe17c10 1374 .fallocate = gfs2_fallocate,
b3b94faa
DT
1375};
1376
10d21988 1377const struct file_operations gfs2_dir_fops = {
1d1bb236 1378 .iterate_shared = gfs2_readdir,
26c1a574 1379 .unlocked_ioctl = gfs2_ioctl,
8d098070 1380 .compat_ioctl = gfs2_compat_ioctl,
26c1a574 1381 .open = gfs2_open,
df3fd117 1382 .release = gfs2_release,
26c1a574
SW
1383 .fsync = gfs2_fsync,
1384 .lock = gfs2_lock,
1385 .flock = gfs2_flock,
6038f373 1386 .llseek = default_llseek,
b3b94faa
DT
1387};
1388
f057f6cd
SW
1389#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1390
10d21988 1391const struct file_operations gfs2_file_fops_nolock = {
c97bfe43 1392 .llseek = gfs2_llseek,
967bcc91 1393 .read_iter = gfs2_file_read_iter,
da56e45b 1394 .write_iter = gfs2_file_write_iter,
81214bab 1395 .iopoll = iomap_dio_iopoll,
c97bfe43 1396 .unlocked_ioctl = gfs2_ioctl,
8d098070 1397 .compat_ioctl = gfs2_compat_ioctl,
c97bfe43
WC
1398 .mmap = gfs2_mmap,
1399 .open = gfs2_open,
df3fd117 1400 .release = gfs2_release,
c97bfe43 1401 .fsync = gfs2_fsync,
82c156f8 1402 .splice_read = generic_file_splice_read,
f42a69fa 1403 .splice_write = gfs2_file_splice_write,
f057f6cd 1404 .setlease = generic_setlease,
2fe17c10 1405 .fallocate = gfs2_fallocate,
c97bfe43
WC
1406};
1407
10d21988 1408const struct file_operations gfs2_dir_fops_nolock = {
1d1bb236 1409 .iterate_shared = gfs2_readdir,
c97bfe43 1410 .unlocked_ioctl = gfs2_ioctl,
8d098070 1411 .compat_ioctl = gfs2_compat_ioctl,
c97bfe43 1412 .open = gfs2_open,
df3fd117 1413 .release = gfs2_release,
c97bfe43 1414 .fsync = gfs2_fsync,
6038f373 1415 .llseek = default_llseek,
c97bfe43
WC
1416};
1417