]> git.ipfire.org Git - people/ms/linux.git/blame - fs/gfs2/file.c
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / gfs2 / file.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
b3b94faa
DT
7#include <linux/slab.h>
8#include <linux/spinlock.h>
8d098070 9#include <linux/compat.h>
b3b94faa
DT
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/uio.h>
14#include <linux/blkdev.h>
15#include <linux/mm.h>
f58ba889 16#include <linux/mount.h>
18ec7d5c 17#include <linux/fs.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
2fe17c10
CH
19#include <linux/falloc.h>
20#include <linux/swap.h>
71b86f56 21#include <linux/crc32.h>
33c3de32 22#include <linux/writeback.h>
7c0f6ba6 23#include <linux/uaccess.h>
f057f6cd
SW
24#include <linux/dlm.h>
25#include <linux/dlm_plock.h>
2ddfbdd6 26#include <linux/delay.h>
64bc06bb 27#include <linux/backing-dev.h>
88b631cb 28#include <linux/fileattr.h>
b3b94faa
DT
29
30#include "gfs2.h"
5c676f6d 31#include "incore.h"
b3b94faa 32#include "bmap.h"
64bc06bb 33#include "aops.h"
b3b94faa
DT
34#include "dir.h"
35#include "glock.h"
36#include "glops.h"
37#include "inode.h"
b3b94faa
DT
38#include "log.h"
39#include "meta_io.h"
b3b94faa
DT
40#include "quota.h"
41#include "rgrp.h"
42#include "trans.h"
5c676f6d 43#include "util.h"
b3b94faa 44
b3b94faa
DT
45/**
46 * gfs2_llseek - seek to a location in a file
47 * @file: the file
48 * @offset: the offset
965c8e59 49 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
b3b94faa
DT
50 *
51 * SEEK_END requires the glock for the file because it references the
52 * file's size.
53 *
54 * Returns: The new offset, or errno
55 */
56
965c8e59 57static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
b3b94faa 58{
feaa7bba 59 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
b3b94faa
DT
60 struct gfs2_holder i_gh;
61 loff_t error;
62
965c8e59 63 switch (whence) {
3a27411c 64 case SEEK_END:
b3b94faa
DT
65 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
66 &i_gh);
67 if (!error) {
965c8e59 68 error = generic_file_llseek(file, offset, whence);
b3b94faa
DT
69 gfs2_glock_dq_uninit(&i_gh);
70 }
9453615a 71 break;
3a27411c
AG
72
73 case SEEK_DATA:
74 error = gfs2_seek_data(file, offset);
75 break;
76
77 case SEEK_HOLE:
78 error = gfs2_seek_hole(file, offset);
79 break;
80
9453615a
SW
81 case SEEK_CUR:
82 case SEEK_SET:
3a27411c
AG
83 /*
84 * These don't reference inode->i_size and don't depend on the
85 * block mapping, so we don't need the glock.
86 */
965c8e59 87 error = generic_file_llseek(file, offset, whence);
9453615a
SW
88 break;
89 default:
90 error = -EINVAL;
91 }
b3b94faa
DT
92
93 return error;
94}
95
b3b94faa 96/**
d81a8ef5 97 * gfs2_readdir - Iterator for a directory
b3b94faa 98 * @file: The directory to read from
d81a8ef5 99 * @ctx: What to feed directory entries to
b3b94faa
DT
100 *
101 * Returns: errno
102 */
103
d81a8ef5 104static int gfs2_readdir(struct file *file, struct dir_context *ctx)
b3b94faa 105{
71b86f56 106 struct inode *dir = file->f_mapping->host;
feaa7bba 107 struct gfs2_inode *dip = GFS2_I(dir);
b3b94faa 108 struct gfs2_holder d_gh;
b3b94faa
DT
109 int error;
110
d81a8ef5
AV
111 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
112 if (error)
b3b94faa 113 return error;
b3b94faa 114
d81a8ef5 115 error = gfs2_dir_read(dir, ctx, &file->f_ra);
b3b94faa
DT
116
117 gfs2_glock_dq_uninit(&d_gh);
118
b3b94faa
DT
119 return error;
120}
121
c551f66c
LJ
122/*
123 * struct fsflag_gfs2flag
128e5eba 124 *
b16f7e57
AG
125 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
126 * and to GFS2_DIF_JDATA for non-directories.
128e5eba 127 */
b16f7e57
AG
128static struct {
129 u32 fsflag;
130 u32 gfsflag;
131} fsflag_gfs2flag[] = {
132 {FS_SYNC_FL, GFS2_DIF_SYNC},
133 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
134 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
135 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
136 {FS_INDEX_FL, GFS2_DIF_EXHASH},
137 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
138 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
7ea9ea83 139};
71b86f56 140
5aca2842
DW
141static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
142{
143 int i;
144 u32 fsflags = 0;
145
146 if (S_ISDIR(inode->i_mode))
147 gfsflags &= ~GFS2_DIF_JDATA;
148 else
149 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
150
151 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
152 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
153 fsflags |= fsflag_gfs2flag[i].fsflag;
154 return fsflags;
155}
156
88b631cb 157int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
71b86f56 158{
88b631cb 159 struct inode *inode = d_inode(dentry);
feaa7bba 160 struct gfs2_inode *ip = GFS2_I(inode);
71b86f56 161 struct gfs2_holder gh;
5aca2842
DW
162 int error;
163 u32 fsflags;
71b86f56 164
88b631cb
MS
165 if (d_is_special(dentry))
166 return -ENOTTY;
167
719ee344
SW
168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
169 error = gfs2_glock_nq(&gh);
71b86f56 170 if (error)
9c7fe835 171 goto out_uninit;
907b9bce 172
5aca2842 173 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
b16f7e57 174
88b631cb 175 fileattr_fill_flags(fa, fsflags);
71b86f56 176
3cc3f710 177 gfs2_glock_dq(&gh);
9c7fe835 178out_uninit:
71b86f56
SW
179 gfs2_holder_uninit(&gh);
180 return error;
181}
182
6b124d8d
SW
183void gfs2_set_inode_flags(struct inode *inode)
184{
185 struct gfs2_inode *ip = GFS2_I(inode);
6b124d8d
SW
186 unsigned int flags = inode->i_flags;
187
9964afbb
SW
188 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
189 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
01e64ee4 190 flags |= S_NOSEC;
383f01fb 191 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
6b124d8d 192 flags |= S_IMMUTABLE;
383f01fb 193 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
6b124d8d 194 flags |= S_APPEND;
383f01fb 195 if (ip->i_diskflags & GFS2_DIF_NOATIME)
6b124d8d 196 flags |= S_NOATIME;
383f01fb 197 if (ip->i_diskflags & GFS2_DIF_SYNC)
6b124d8d
SW
198 flags |= S_SYNC;
199 inode->i_flags = flags;
200}
201
71b86f56
SW
202/* Flags that can be set by user space */
203#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
71b86f56
SW
204 GFS2_DIF_IMMUTABLE| \
205 GFS2_DIF_APPENDONLY| \
206 GFS2_DIF_NOATIME| \
207 GFS2_DIF_SYNC| \
23d0bb83 208 GFS2_DIF_TOPDIR| \
71b86f56
SW
209 GFS2_DIF_INHERIT_JDATA)
210
211/**
9dd868e1 212 * do_gfs2_set_flags - set flags on an inode
0f1616f6 213 * @inode: The inode
9dd868e1 214 * @reqflags: The flags to set
71b86f56
SW
215 * @mask: Indicates which flags are valid
216 *
217 */
a500bd31 218static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
71b86f56 219{
feaa7bba
SW
220 struct gfs2_inode *ip = GFS2_I(inode);
221 struct gfs2_sbd *sdp = GFS2_SB(inode);
71b86f56
SW
222 struct buffer_head *bh;
223 struct gfs2_holder gh;
224 int error;
88b631cb 225 u32 new_flags, flags;
71b86f56 226
f58ba889
MS
227 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
228 if (error)
88b631cb 229 return error;
7df0e039
SW
230
231 error = 0;
383f01fb 232 flags = ip->i_diskflags;
55eccc6d 233 new_flags = (flags & ~mask) | (reqflags & mask);
71b86f56
SW
234 if ((new_flags ^ flags) == 0)
235 goto out;
236
b9cb9813 237 if (!IS_IMMUTABLE(inode)) {
549c7297 238 error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
b9cb9813
SW
239 if (error)
240 goto out;
241 }
5561093e 242 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
cc555b09 243 if (new_flags & GFS2_DIF_JDATA)
c1696fb8 244 gfs2_log_flush(sdp, ip->i_gl,
805c0907
BP
245 GFS2_LOG_HEAD_FLUSH_NORMAL |
246 GFS2_LFC_SET_FLAGS);
5561093e
SW
247 error = filemap_fdatawrite(inode->i_mapping);
248 if (error)
249 goto out;
250 error = filemap_fdatawait(inode->i_mapping);
251 if (error)
252 goto out;
cc555b09
BP
253 if (new_flags & GFS2_DIF_JDATA)
254 gfs2_ordered_del_inode(ip);
5561093e 255 }
55eccc6d 256 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
71b86f56
SW
257 if (error)
258 goto out;
55eccc6d
SW
259 error = gfs2_meta_inode_buffer(ip, &bh);
260 if (error)
261 goto out_trans_end;
9b7c2ddb 262 inode->i_ctime = current_time(inode);
350a9b0a 263 gfs2_trans_add_meta(ip->i_gl, bh);
383f01fb 264 ip->i_diskflags = new_flags;
539e5d6b 265 gfs2_dinode_out(ip, bh->b_data);
71b86f56 266 brelse(bh);
6b124d8d 267 gfs2_set_inode_flags(inode);
5561093e 268 gfs2_set_aops(inode);
55eccc6d
SW
269out_trans_end:
270 gfs2_trans_end(sdp);
71b86f56
SW
271out:
272 gfs2_glock_dq_uninit(&gh);
273 return error;
274}
275
88b631cb
MS
276int gfs2_fileattr_set(struct user_namespace *mnt_userns,
277 struct dentry *dentry, struct fileattr *fa)
71b86f56 278{
88b631cb
MS
279 struct inode *inode = d_inode(dentry);
280 u32 fsflags = fa->flags, gfsflags = 0;
b16f7e57
AG
281 u32 mask;
282 int i;
7df0e039 283
88b631cb
MS
284 if (d_is_special(dentry))
285 return -ENOTTY;
286
287 if (fileattr_has_fsx(fa))
288 return -EOPNOTSUPP;
7df0e039 289
b16f7e57
AG
290 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
291 if (fsflags & fsflag_gfs2flag[i].fsflag) {
292 fsflags &= ~fsflag_gfs2flag[i].fsflag;
293 gfsflags |= fsflag_gfs2flag[i].gfsflag;
294 }
295 }
296 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
297 return -EINVAL;
298
299 mask = GFS2_FLAGS_USER_SET;
300 if (S_ISDIR(inode->i_mode)) {
301 mask &= ~GFS2_DIF_JDATA;
302 } else {
303 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
304 if (gfsflags & GFS2_DIF_TOPDIR)
305 return -EINVAL;
306 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
b9af7ca6 307 }
b16f7e57 308
a500bd31 309 return do_gfs2_set_flags(inode, gfsflags, mask);
71b86f56
SW
310}
311
6ddc5c3d
SW
312static int gfs2_getlabel(struct file *filp, char __user *label)
313{
314 struct inode *inode = file_inode(filp);
315 struct gfs2_sbd *sdp = GFS2_SB(inode);
316
317 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
318 return -EFAULT;
319
320 return 0;
321}
322
b09e593d 323static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
71b86f56
SW
324{
325 switch(cmd) {
66fc061b
SW
326 case FITRIM:
327 return gfs2_fitrim(filp, (void __user *)arg);
6ddc5c3d
SW
328 case FS_IOC_GETFSLABEL:
329 return gfs2_getlabel(filp, (char __user *)arg);
71b86f56 330 }
6ddc5c3d 331
71b86f56
SW
332 return -ENOTTY;
333}
334
8d098070
AB
335#ifdef CONFIG_COMPAT
336static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
337{
338 switch(cmd) {
8d098070
AB
339 /* Keep this list in sync with gfs2_ioctl */
340 case FITRIM:
341 case FS_IOC_GETFSLABEL:
342 break;
343 default:
344 return -ENOIOCTLCMD;
345 }
346
347 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
348}
349#else
350#define gfs2_compat_ioctl NULL
351#endif
352
da1dfb6a
SW
353/**
354 * gfs2_size_hint - Give a hint to the size of a write request
9dd868e1 355 * @filep: The struct file
da1dfb6a
SW
356 * @offset: The file offset of the write
357 * @size: The length of the write
358 *
359 * When we are about to do a write, this function records the total
360 * write size in order to provide a suitable hint to the lower layers
361 * about how many blocks will be required.
362 *
363 */
364
365static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
366{
496ad9aa 367 struct inode *inode = file_inode(filep);
da1dfb6a
SW
368 struct gfs2_sbd *sdp = GFS2_SB(inode);
369 struct gfs2_inode *ip = GFS2_I(inode);
370 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
371 int hint = min_t(size_t, INT_MAX, blks);
372
21f09c43
AG
373 if (hint > atomic_read(&ip->i_sizehint))
374 atomic_set(&ip->i_sizehint, hint);
da1dfb6a
SW
375}
376
3cc3f710 377/**
35af80ae 378 * gfs2_allocate_page_backing - Allocate blocks for a write fault
3cc3f710 379 * @page: The (locked) page to allocate backing for
f53056c4 380 * @length: Size of the allocation
3cc3f710 381 *
35af80ae
CH
382 * We try to allocate all the blocks required for the page in one go. This
383 * might fail for various reasons, so we keep trying until all the blocks to
384 * back this page are allocated. If some of the blocks are already allocated,
385 * that is ok too.
3cc3f710 386 */
f53056c4 387static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
3cc3f710 388{
35af80ae 389 u64 pos = page_offset(page);
3cc3f710
SW
390
391 do {
35af80ae
CH
392 struct iomap iomap = { };
393
54992257 394 if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
3cc3f710 395 return -EIO;
35af80ae 396
f53056c4
AG
397 if (length < iomap.length)
398 iomap.length = length;
399 length -= iomap.length;
35af80ae 400 pos += iomap.length;
f53056c4 401 } while (length > 0);
35af80ae 402
3cc3f710
SW
403 return 0;
404}
405
406/**
407 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
9dd868e1 408 * @vmf: The virtual memory fault containing the page to become writable
3cc3f710
SW
409 *
410 * When the page becomes writable, we need to ensure that we have
411 * blocks allocated on disk to back that page.
412 */
413
109dbb1e 414static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
3cc3f710 415{
c2ec175c 416 struct page *page = vmf->page;
11bac800 417 struct inode *inode = file_inode(vmf->vma->vm_file);
3cc3f710
SW
418 struct gfs2_inode *ip = GFS2_I(inode);
419 struct gfs2_sbd *sdp = GFS2_SB(inode);
7b9cff46 420 struct gfs2_alloc_parms ap = { .aflags = 0, };
184b4e60 421 u64 offset = page_offset(page);
3cc3f710 422 unsigned int data_blocks, ind_blocks, rblocks;
0fc3bcd6 423 vm_fault_t ret = VM_FAULT_LOCKED;
3cc3f710 424 struct gfs2_holder gh;
184b4e60 425 unsigned int length;
13d921e3 426 loff_t size;
0fc3bcd6 427 int err;
3cc3f710 428
39263d5e 429 sb_start_pagefault(inode->i_sb);
13d921e3 430
719ee344 431 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
0fc3bcd6
AG
432 err = gfs2_glock_nq(&gh);
433 if (err) {
434 ret = block_page_mkwrite_return(err);
2b3dcf35 435 goto out_uninit;
0fc3bcd6 436 }
3cc3f710 437
184b4e60
AG
438 /* Check page index against inode size */
439 size = i_size_read(inode);
440 if (offset >= size) {
0fc3bcd6 441 ret = VM_FAULT_SIGBUS;
184b4e60
AG
442 goto out_unlock;
443 }
444
d7c436cd 445 /* Update file times before taking page lock */
11bac800 446 file_update_time(vmf->vma->vm_file);
d7c436cd 447
184b4e60 448 /* page is wholly or partially inside EOF */
d3c51c55
AG
449 if (size - offset < PAGE_SIZE)
450 length = size - offset;
184b4e60
AG
451 else
452 length = PAGE_SIZE;
453
454 gfs2_size_hint(vmf->vma->vm_file, offset, length);
455
9c538837
SW
456 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
457 set_bit(GIF_SW_PAGED, &ip->i_flags);
458
184b4e60
AG
459 /*
460 * iomap_writepage / iomap_writepages currently don't support inline
461 * files, so always unstuff here.
462 */
463
464 if (!gfs2_is_stuffed(ip) &&
465 !gfs2_write_alloc_required(ip, offset, length)) {
13d921e3
SW
466 lock_page(page);
467 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
0fc3bcd6 468 ret = VM_FAULT_NOPAGE;
13d921e3
SW
469 unlock_page(page);
470 }
3cc3f710 471 goto out_unlock;
13d921e3
SW
472 }
473
0fc3bcd6
AG
474 err = gfs2_rindex_update(sdp);
475 if (err) {
476 ret = block_page_mkwrite_return(err);
6dbd8224 477 goto out_unlock;
0fc3bcd6 478 }
6dbd8224 479
184b4e60 480 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
7b9cff46 481 ap.target = data_blocks + ind_blocks;
0fc3bcd6
AG
482 err = gfs2_quota_lock_check(ip, &ap);
483 if (err) {
484 ret = block_page_mkwrite_return(err);
b8fbf471 485 goto out_unlock;
0fc3bcd6
AG
486 }
487 err = gfs2_inplace_reserve(ip, &ap);
488 if (err) {
489 ret = block_page_mkwrite_return(err);
3cc3f710 490 goto out_quota_unlock;
0fc3bcd6 491 }
3cc3f710
SW
492
493 rblocks = RES_DINODE + ind_blocks;
494 if (gfs2_is_jdata(ip))
495 rblocks += data_blocks ? data_blocks : 1;
bf97b673 496 if (ind_blocks || data_blocks) {
3cc3f710 497 rblocks += RES_STATFS + RES_QUOTA;
71f890f7 498 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
bf97b673 499 }
0fc3bcd6
AG
500 err = gfs2_trans_begin(sdp, rblocks, 0);
501 if (err) {
502 ret = block_page_mkwrite_return(err);
3cc3f710 503 goto out_trans_fail;
0fc3bcd6 504 }
3cc3f710 505
64090cbe
AG
506 /* Unstuff, if required, and allocate backing blocks for page */
507 if (gfs2_is_stuffed(ip)) {
7a607a41 508 err = gfs2_unstuff_dinode(ip);
64090cbe
AG
509 if (err) {
510 ret = block_page_mkwrite_return(err);
511 goto out_trans_end;
512 }
513 }
514
3cc3f710 515 lock_page(page);
13d921e3
SW
516 /* If truncated, we must retry the operation, we may have raced
517 * with the glock demotion code.
518 */
0fc3bcd6
AG
519 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
520 ret = VM_FAULT_NOPAGE;
64090cbe 521 goto out_page_locked;
0fc3bcd6 522 }
13d921e3 523
0fc3bcd6
AG
524 err = gfs2_allocate_page_backing(page, length);
525 if (err)
526 ret = block_page_mkwrite_return(err);
3cc3f710 527
64090cbe 528out_page_locked:
0fc3bcd6 529 if (ret != VM_FAULT_LOCKED)
13d921e3 530 unlock_page(page);
64090cbe 531out_trans_end:
3cc3f710
SW
532 gfs2_trans_end(sdp);
533out_trans_fail:
534 gfs2_inplace_release(ip);
535out_quota_unlock:
536 gfs2_quota_unlock(ip);
3cc3f710
SW
537out_unlock:
538 gfs2_glock_dq(&gh);
2b3dcf35 539out_uninit:
3cc3f710 540 gfs2_holder_uninit(&gh);
0fc3bcd6 541 if (ret == VM_FAULT_LOCKED) {
13d921e3 542 set_page_dirty(page);
1d1d1a76 543 wait_for_stable_page(page);
13d921e3 544 }
39263d5e 545 sb_end_pagefault(inode->i_sb);
0fc3bcd6 546 return ret;
3cc3f710
SW
547}
548
20f82999
AG
549static vm_fault_t gfs2_fault(struct vm_fault *vmf)
550{
551 struct inode *inode = file_inode(vmf->vma->vm_file);
552 struct gfs2_inode *ip = GFS2_I(inode);
553 struct gfs2_holder gh;
554 vm_fault_t ret;
555 int err;
556
d5b81454 557 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
20f82999
AG
558 err = gfs2_glock_nq(&gh);
559 if (err) {
560 ret = block_page_mkwrite_return(err);
561 goto out_uninit;
562 }
563 ret = filemap_fault(vmf);
564 gfs2_glock_dq(&gh);
565out_uninit:
566 gfs2_holder_uninit(&gh);
567 return ret;
568}
569
f0f37e2f 570static const struct vm_operations_struct gfs2_vm_ops = {
20f82999 571 .fault = gfs2_fault,
f1820361 572 .map_pages = filemap_map_pages,
3cc3f710
SW
573 .page_mkwrite = gfs2_page_mkwrite,
574};
575
b3b94faa 576/**
c551f66c 577 * gfs2_mmap
b3b94faa
DT
578 * @file: The file to map
579 * @vma: The VMA which described the mapping
580 *
48bf2b17
SW
581 * There is no need to get a lock here unless we should be updating
582 * atime. We ignore any locking errors since the only consequence is
583 * a missed atime update (which will just be deferred until later).
584 *
585 * Returns: 0
b3b94faa
DT
586 */
587
588static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
589{
feaa7bba 590 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
b3b94faa 591
b9c93bb7
SW
592 if (!(file->f_flags & O_NOATIME) &&
593 !IS_NOATIME(&ip->i_inode)) {
48bf2b17
SW
594 struct gfs2_holder i_gh;
595 int error;
b3b94faa 596
3d162688
BM
597 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
598 &i_gh);
b9c93bb7
SW
599 if (error)
600 return error;
3d162688
BM
601 /* grab lock to update inode */
602 gfs2_glock_dq_uninit(&i_gh);
603 file_accessed(file);
48bf2b17 604 }
3cc3f710 605 vma->vm_ops = &gfs2_vm_ops;
b3b94faa 606
48bf2b17 607 return 0;
b3b94faa
DT
608}
609
610/**
6d4ade98
SW
611 * gfs2_open_common - This is common to open and atomic_open
612 * @inode: The inode being opened
613 * @file: The file being opened
b3b94faa 614 *
6d4ade98
SW
615 * This maybe called under a glock or not depending upon how it has
616 * been called. We must always be called under a glock for regular
617 * files, however. For other file types, it does not matter whether
618 * we hold the glock or not.
619 *
620 * Returns: Error code or 0 for success
b3b94faa
DT
621 */
622
6d4ade98 623int gfs2_open_common(struct inode *inode, struct file *file)
b3b94faa 624{
b3b94faa 625 struct gfs2_file *fp;
6d4ade98
SW
626 int ret;
627
628 if (S_ISREG(inode->i_mode)) {
629 ret = generic_file_open(inode, file);
630 if (ret)
631 return ret;
632 }
b3b94faa 633
6d4ade98 634 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
b3b94faa
DT
635 if (!fp)
636 return -ENOMEM;
637
f55ab26a 638 mutex_init(&fp->f_fl_mutex);
b3b94faa 639
feaa7bba 640 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
5c676f6d 641 file->private_data = fp;
2fba46a0
BP
642 if (file->f_mode & FMODE_WRITE) {
643 ret = gfs2_qa_get(GFS2_I(inode));
644 if (ret)
645 goto fail;
646 }
6d4ade98 647 return 0;
2fba46a0
BP
648
649fail:
650 kfree(file->private_data);
651 file->private_data = NULL;
652 return ret;
6d4ade98
SW
653}
654
655/**
656 * gfs2_open - open a file
657 * @inode: the inode to open
658 * @file: the struct file for this opening
659 *
660 * After atomic_open, this function is only used for opening files
661 * which are already cached. We must still get the glock for regular
662 * files to ensure that we have the file size uptodate for the large
663 * file check which is in the common code. That is only an issue for
664 * regular files though.
665 *
666 * Returns: errno
667 */
668
669static int gfs2_open(struct inode *inode, struct file *file)
670{
671 struct gfs2_inode *ip = GFS2_I(inode);
672 struct gfs2_holder i_gh;
673 int error;
674 bool need_unlock = false;
b3b94faa 675
b60623c2 676 if (S_ISREG(ip->i_inode.i_mode)) {
b3b94faa
DT
677 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
678 &i_gh);
679 if (error)
6d4ade98
SW
680 return error;
681 need_unlock = true;
682 }
b3b94faa 683
6d4ade98 684 error = gfs2_open_common(inode, file);
b3b94faa 685
6d4ade98 686 if (need_unlock)
b3b94faa 687 gfs2_glock_dq_uninit(&i_gh);
b3b94faa 688
b3b94faa
DT
689 return error;
690}
691
692/**
df3fd117 693 * gfs2_release - called to close a struct file
b3b94faa
DT
694 * @inode: the inode the struct file belongs to
695 * @file: the struct file being closed
696 *
697 * Returns: errno
698 */
699
df3fd117 700static int gfs2_release(struct inode *inode, struct file *file)
b3b94faa 701{
0a305e49 702 struct gfs2_inode *ip = GFS2_I(inode);
b3b94faa 703
8e2e0047 704 kfree(file->private_data);
5c676f6d 705 file->private_data = NULL;
b3b94faa 706
d3add1a9
BP
707 if (file->f_mode & FMODE_WRITE) {
708 if (gfs2_rs_active(&ip->i_res))
7336905a 709 gfs2_rs_delete(ip);
1595548f 710 gfs2_qa_put(ip);
d3add1a9 711 }
b3b94faa
DT
712 return 0;
713}
714
715/**
716 * gfs2_fsync - sync the dirty data for a file (across the cluster)
02c24a82
JB
717 * @file: the file that points to the dentry
718 * @start: the start position in the file to sync
719 * @end: the end position in the file to sync
dba898b0 720 * @datasync: set if we can ignore timestamp changes
b3b94faa 721 *
2f0264d5
SW
722 * We split the data flushing here so that we don't wait for the data
723 * until after we've also sent the metadata to disk. Note that for
724 * data=ordered, we will write & wait for the data at the log flush
725 * stage anyway, so this is unlikely to make much of a difference
726 * except in the data=writeback case.
727 *
728 * If the fdatawrite fails due to any reason except -EIO, we will
729 * continue the remainder of the fsync, although we'll still report
730 * the error at the end. This is to match filemap_write_and_wait_range()
731 * behaviour.
34126f9f 732 *
b3b94faa
DT
733 * Returns: errno
734 */
735
02c24a82
JB
736static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
737 int datasync)
b3b94faa 738{
2f0264d5
SW
739 struct address_space *mapping = file->f_mapping;
740 struct inode *inode = mapping->host;
3aac630b 741 int sync_state = inode->i_state & I_DIRTY;
dba898b0 742 struct gfs2_inode *ip = GFS2_I(inode);
87654896 743 int ret = 0, ret1 = 0;
b3b94faa 744
2f0264d5
SW
745 if (mapping->nrpages) {
746 ret1 = filemap_fdatawrite_range(mapping, start, end);
747 if (ret1 == -EIO)
748 return ret1;
749 }
02c24a82 750
0c901809
BM
751 if (!gfs2_is_jdata(ip))
752 sync_state &= ~I_DIRTY_PAGES;
dba898b0 753 if (datasync)
3aac630b 754 sync_state &= ~I_DIRTY_SYNC;
b3b94faa 755
dba898b0
SW
756 if (sync_state) {
757 ret = sync_inode_metadata(inode, 1);
b5b24d7a 758 if (ret)
dba898b0 759 return ret;
f1818529 760 if (gfs2_is_jdata(ip))
d07a6ac7
JL
761 ret = file_write_and_wait(file);
762 if (ret)
763 return ret;
b5b24d7a 764 gfs2_ail_flush(ip->i_gl, 1);
33c3de32
SW
765 }
766
2f0264d5 767 if (mapping->nrpages)
d07a6ac7 768 ret = file_fdatawait_range(file, start, end);
2f0264d5
SW
769
770 return ret ? ret : ret1;
b3b94faa
DT
771}
772
72382264 773static inline bool should_fault_in_pages(struct iov_iter *i,
324d116c 774 struct kiocb *iocb,
00bfe02f
AG
775 size_t *prev_count,
776 size_t *window_size)
777{
00bfe02f 778 size_t count = iov_iter_count(i);
bb7f5d96 779 size_t size, offs;
00bfe02f 780
fa5dfa64 781 if (!count)
00bfe02f 782 return false;
fcb14cb1 783 if (!user_backed_iter(i))
00bfe02f
AG
784 return false;
785
bb7f5d96 786 size = PAGE_SIZE;
324d116c 787 offs = offset_in_page(iocb->ki_pos);
00bfe02f 788 if (*prev_count != count || !*window_size) {
bb7f5d96 789 size_t nr_dirtied;
00bfe02f 790
00bfe02f 791 nr_dirtied = max(current->nr_dirtied_pause -
bb7f5d96 792 current->nr_dirtied, 8);
324d116c 793 size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
00bfe02f
AG
794 }
795
796 *prev_count = count;
bb7f5d96 797 *window_size = size - offs;
00bfe02f
AG
798 return true;
799}
800
4c5c3010
AG
801static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
802 struct gfs2_holder *gh)
967bcc91
AG
803{
804 struct file *file = iocb->ki_filp;
805 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
b01b2d72 806 size_t prev_count = 0, window_size = 0;
42e4c3bd 807 size_t read = 0;
967bcc91
AG
808 ssize_t ret;
809
b01b2d72
AG
810 /*
811 * In this function, we disable page faults when we're holding the
812 * inode glock while doing I/O. If a page fault occurs, we indicate
813 * that the inode glock may be dropped, fault in the pages manually,
814 * and retry.
815 *
816 * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
817 * physical as well as manual page faults, and we need to disable both
818 * kinds.
819 *
820 * For direct I/O, gfs2 takes the inode glock in deferred mode. This
821 * locking mode is compatible with other deferred holders, so multiple
822 * processes and nodes can do direct I/O to a file at the same time.
823 * There's no guarantee that reads or writes will be atomic. Any
824 * coordination among readers and writers needs to happen externally.
825 */
826
827 if (!iov_iter_count(to))
967bcc91
AG
828 return 0; /* skip atime */
829
4c5c3010 830 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
b01b2d72 831retry:
4c5c3010 832 ret = gfs2_glock_nq(gh);
967bcc91
AG
833 if (ret)
834 goto out_uninit;
b01b2d72
AG
835 pagefault_disable();
836 to->nofault = true;
837 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
786f847f 838 IOMAP_DIO_PARTIAL, NULL, read);
b01b2d72
AG
839 to->nofault = false;
840 pagefault_enable();
72382264
AG
841 if (ret <= 0 && ret != -EFAULT)
842 goto out_unlock;
53bb540f 843 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
b01b2d72 844 if (ret > 0)
42e4c3bd 845 read = ret;
b01b2d72 846
324d116c 847 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
e1fa9ea8 848 gfs2_glock_dq(gh);
6d22ff47 849 window_size -= fault_in_iov_iter_writeable(to, window_size);
e1fa9ea8 850 if (window_size)
124c458a 851 goto retry;
b01b2d72 852 }
72382264 853out_unlock:
b01b2d72
AG
854 if (gfs2_holder_queued(gh))
855 gfs2_glock_dq(gh);
967bcc91 856out_uninit:
4c5c3010 857 gfs2_holder_uninit(gh);
53bb540f 858 /* User space doesn't expect partial success. */
b01b2d72
AG
859 if (ret < 0)
860 return ret;
42e4c3bd 861 return read;
967bcc91
AG
862}
863
4c5c3010
AG
864static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
865 struct gfs2_holder *gh)
967bcc91
AG
866{
867 struct file *file = iocb->ki_filp;
868 struct inode *inode = file->f_mapping->host;
869 struct gfs2_inode *ip = GFS2_I(inode);
b01b2d72 870 size_t prev_count = 0, window_size = 0;
42e4c3bd 871 size_t written = 0;
967bcc91
AG
872 ssize_t ret;
873
b01b2d72
AG
874 /*
875 * In this function, we disable page faults when we're holding the
876 * inode glock while doing I/O. If a page fault occurs, we indicate
877 * that the inode glock may be dropped, fault in the pages manually,
878 * and retry.
879 *
880 * For writes, iomap_dio_rw only triggers manual page faults, so we
881 * don't need to disable physical ones.
882 */
883
967bcc91
AG
884 /*
885 * Deferred lock, even if its a write, since we do no allocation on
886 * this path. All we need to change is the atime, and this lock mode
887 * ensures that other nodes have flushed their buffered read caches
888 * (i.e. their page cache entries for this inode). We do not,
889 * unfortunately, have the option of only flushing a range like the
890 * VFS does.
891 */
4c5c3010 892 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
b01b2d72 893retry:
4c5c3010 894 ret = gfs2_glock_nq(gh);
967bcc91
AG
895 if (ret)
896 goto out_uninit;
967bcc91 897 /* Silently fall back to buffered I/O when writing beyond EOF */
b01b2d72 898 if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
72382264 899 goto out_unlock;
967bcc91 900
b01b2d72
AG
901 from->nofault = true;
902 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
786f847f 903 IOMAP_DIO_PARTIAL, NULL, written);
b01b2d72 904 from->nofault = false;
72382264
AG
905 if (ret <= 0) {
906 if (ret == -ENOTBLK)
907 ret = 0;
908 if (ret != -EFAULT)
909 goto out_unlock;
910 }
53bb540f 911 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
b01b2d72 912 if (ret > 0)
42e4c3bd 913 written = ret;
b01b2d72 914
324d116c 915 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
e1fa9ea8 916 gfs2_glock_dq(gh);
6d22ff47 917 window_size -= fault_in_iov_iter_readable(from, window_size);
e1fa9ea8 918 if (window_size)
124c458a 919 goto retry;
b01b2d72 920 }
72382264 921out_unlock:
b01b2d72
AG
922 if (gfs2_holder_queued(gh))
923 gfs2_glock_dq(gh);
967bcc91 924out_uninit:
4c5c3010 925 gfs2_holder_uninit(gh);
53bb540f 926 /* User space doesn't expect partial success. */
b01b2d72
AG
927 if (ret < 0)
928 return ret;
42e4c3bd 929 return written;
967bcc91
AG
930}
931
932static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
933{
20f82999
AG
934 struct gfs2_inode *ip;
935 struct gfs2_holder gh;
00bfe02f 936 size_t prev_count = 0, window_size = 0;
42e4c3bd 937 size_t read = 0;
967bcc91
AG
938 ssize_t ret;
939
00bfe02f
AG
940 /*
941 * In this function, we disable page faults when we're holding the
942 * inode glock while doing I/O. If a page fault occurs, we indicate
943 * that the inode glock may be dropped, fault in the pages manually,
944 * and retry.
945 */
946
11661835
AG
947 if (iocb->ki_flags & IOCB_DIRECT)
948 return gfs2_file_direct_read(iocb, to, &gh);
949
52f3f033 950 pagefault_disable();
20f82999
AG
951 iocb->ki_flags |= IOCB_NOIO;
952 ret = generic_file_read_iter(iocb, to);
953 iocb->ki_flags &= ~IOCB_NOIO;
52f3f033 954 pagefault_enable();
20f82999
AG
955 if (ret >= 0) {
956 if (!iov_iter_count(to))
957 return ret;
42e4c3bd 958 read = ret;
52f3f033 959 } else if (ret != -EFAULT) {
20f82999
AG
960 if (ret != -EAGAIN)
961 return ret;
962 if (iocb->ki_flags & IOCB_NOWAIT)
963 return ret;
964 }
965 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
966 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
00bfe02f 967retry:
20f82999
AG
968 ret = gfs2_glock_nq(&gh);
969 if (ret)
970 goto out_uninit;
00bfe02f 971 pagefault_disable();
20f82999 972 ret = generic_file_read_iter(iocb, to);
00bfe02f 973 pagefault_enable();
72382264
AG
974 if (ret <= 0 && ret != -EFAULT)
975 goto out_unlock;
20f82999 976 if (ret > 0)
42e4c3bd 977 read += ret;
00bfe02f 978
324d116c 979 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
e1fa9ea8 980 gfs2_glock_dq(&gh);
6d22ff47 981 window_size -= fault_in_iov_iter_writeable(to, window_size);
e1fa9ea8 982 if (window_size)
124c458a 983 goto retry;
00bfe02f 984 }
72382264 985out_unlock:
00bfe02f
AG
986 if (gfs2_holder_queued(&gh))
987 gfs2_glock_dq(&gh);
20f82999
AG
988out_uninit:
989 gfs2_holder_uninit(&gh);
42e4c3bd 990 return read ? read : ret;
967bcc91
AG
991}
992
1b223f70
AG
993static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
994 struct iov_iter *from,
995 struct gfs2_holder *gh)
2eb7509a
AG
996{
997 struct file *file = iocb->ki_filp;
998 struct inode *inode = file_inode(file);
b924bdab
AG
999 struct gfs2_inode *ip = GFS2_I(inode);
1000 struct gfs2_sbd *sdp = GFS2_SB(inode);
1b223f70 1001 struct gfs2_holder *statfs_gh = NULL;
00bfe02f 1002 size_t prev_count = 0, window_size = 0;
554c577c 1003 size_t orig_count = iov_iter_count(from);
42e4c3bd 1004 size_t written = 0;
2eb7509a
AG
1005 ssize_t ret;
1006
00bfe02f
AG
1007 /*
1008 * In this function, we disable page faults when we're holding the
1009 * inode glock while doing I/O. If a page fault occurs, we indicate
1010 * that the inode glock may be dropped, fault in the pages manually,
1011 * and retry.
1012 */
1013
1b223f70
AG
1014 if (inode == sdp->sd_rindex) {
1015 statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS);
1016 if (!statfs_gh)
1017 return -ENOMEM;
1018 }
1019
1020 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
00bfe02f 1021retry:
fa5dfa64 1022 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
fa5dfa64 1023 window_size -= fault_in_iov_iter_readable(from, window_size);
fa5dfa64
AG
1024 if (!window_size) {
1025 ret = -EFAULT;
e1fa9ea8 1026 goto out_uninit;
fa5dfa64 1027 }
fa5dfa64
AG
1028 from->count = min(from->count, window_size);
1029 }
e1fa9ea8
AG
1030 ret = gfs2_glock_nq(gh);
1031 if (ret)
1032 goto out_uninit;
fa5dfa64 1033
b924bdab
AG
1034 if (inode == sdp->sd_rindex) {
1035 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1036
1037 ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1b223f70 1038 GL_NOCACHE, statfs_gh);
b924bdab
AG
1039 if (ret)
1040 goto out_unlock;
1041 }
1042
2eb7509a 1043 current->backing_dev_info = inode_to_bdi(inode);
00bfe02f 1044 pagefault_disable();
2eb7509a 1045 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
00bfe02f 1046 pagefault_enable();
2eb7509a 1047 current->backing_dev_info = NULL;
00bfe02f 1048 if (ret > 0) {
2eb7509a 1049 iocb->ki_pos += ret;
42e4c3bd 1050 written += ret;
00bfe02f 1051 }
b924bdab 1052
1b223f70
AG
1053 if (inode == sdp->sd_rindex)
1054 gfs2_glock_dq_uninit(statfs_gh);
b924bdab 1055
72382264
AG
1056 if (ret <= 0 && ret != -EFAULT)
1057 goto out_unlock;
1058
42e4c3bd 1059 from->count = orig_count - written;
e1fa9ea8
AG
1060 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
1061 gfs2_glock_dq(gh);
1062 goto retry;
1063 }
b924bdab 1064out_unlock:
00bfe02f
AG
1065 if (gfs2_holder_queued(gh))
1066 gfs2_glock_dq(gh);
b924bdab 1067out_uninit:
1b223f70 1068 gfs2_holder_uninit(gh);
ab37c305 1069 kfree(statfs_gh);
42e4c3bd
AG
1070 from->count = orig_count - written;
1071 return written ? written : ret;
2eb7509a
AG
1072}
1073
56aa616a 1074/**
da56e45b 1075 * gfs2_file_write_iter - Perform a write to a file
56aa616a 1076 * @iocb: The io context
64bc06bb 1077 * @from: The data to write
56aa616a
SW
1078 *
1079 * We have to do a lock/unlock here to refresh the inode size for
1080 * O_APPEND writes, otherwise we can land up writing at the wrong
1081 * offset. There is still a race, but provided the app is using its
1082 * own file locking, this will make O_APPEND work as expected.
1083 *
1084 */
1085
da56e45b 1086static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
56aa616a
SW
1087{
1088 struct file *file = iocb->ki_filp;
64bc06bb
AG
1089 struct inode *inode = file_inode(file);
1090 struct gfs2_inode *ip = GFS2_I(inode);
4c5c3010 1091 struct gfs2_holder gh;
6e5e41e2 1092 ssize_t ret;
0a305e49 1093
da56e45b 1094 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
da1dfb6a 1095
2ba48ce5 1096 if (iocb->ki_flags & IOCB_APPEND) {
56aa616a
SW
1097 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
1098 if (ret)
4bd684bc 1099 return ret;
56aa616a
SW
1100 gfs2_glock_dq_uninit(&gh);
1101 }
1102
64bc06bb
AG
1103 inode_lock(inode);
1104 ret = generic_write_checks(iocb, from);
1105 if (ret <= 0)
4c0e8dda 1106 goto out_unlock;
64bc06bb
AG
1107
1108 ret = file_remove_privs(file);
1109 if (ret)
4c0e8dda 1110 goto out_unlock;
64bc06bb
AG
1111
1112 ret = file_update_time(file);
1113 if (ret)
4c0e8dda 1114 goto out_unlock;
64bc06bb 1115
967bcc91
AG
1116 if (iocb->ki_flags & IOCB_DIRECT) {
1117 struct address_space *mapping = file->f_mapping;
6e5e41e2 1118 ssize_t buffered, ret2;
967bcc91 1119
4c5c3010 1120 ret = gfs2_file_direct_write(iocb, from, &gh);
6e5e41e2 1121 if (ret < 0 || !iov_iter_count(from))
4c0e8dda 1122 goto out_unlock;
967bcc91 1123
6e5e41e2 1124 iocb->ki_flags |= IOCB_DSYNC;
1b223f70 1125 buffered = gfs2_file_buffered_write(iocb, from, &gh);
43a511c4
AG
1126 if (unlikely(buffered <= 0)) {
1127 if (!ret)
1128 ret = buffered;
4c0e8dda 1129 goto out_unlock;
43a511c4 1130 }
967bcc91
AG
1131
1132 /*
1133 * We need to ensure that the page cache pages are written to
1134 * disk and invalidated to preserve the expected O_DIRECT
6e5e41e2
AG
1135 * semantics. If the writeback or invalidate fails, only report
1136 * the direct I/O range as we don't know if the buffered pages
1137 * made it to disk.
967bcc91 1138 */
6e5e41e2
AG
1139 ret2 = generic_write_sync(iocb, buffered);
1140 invalidate_mapping_pages(mapping,
1141 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
1142 (iocb->ki_pos - 1) >> PAGE_SHIFT);
1143 if (!ret || ret2 > 0)
1144 ret += ret2;
967bcc91 1145 } else {
1b223f70 1146 ret = gfs2_file_buffered_write(iocb, from, &gh);
2eb7509a 1147 if (likely(ret > 0))
6e5e41e2 1148 ret = generic_write_sync(iocb, ret);
967bcc91 1149 }
64bc06bb 1150
4c0e8dda 1151out_unlock:
64bc06bb 1152 inode_unlock(inode);
6e5e41e2 1153 return ret;
56aa616a
SW
1154}
1155
2fe17c10
CH
1156static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1157 int mode)
1158{
fffb6412 1159 struct super_block *sb = inode->i_sb;
2fe17c10 1160 struct gfs2_inode *ip = GFS2_I(inode);
fffb6412 1161 loff_t end = offset + len;
2fe17c10
CH
1162 struct buffer_head *dibh;
1163 int error;
2fe17c10
CH
1164
1165 error = gfs2_meta_inode_buffer(ip, &dibh);
1166 if (unlikely(error))
64dd153c 1167 return error;
2fe17c10 1168
350a9b0a 1169 gfs2_trans_add_meta(ip->i_gl, dibh);
2fe17c10
CH
1170
1171 if (gfs2_is_stuffed(ip)) {
7a607a41 1172 error = gfs2_unstuff_dinode(ip);
2fe17c10
CH
1173 if (unlikely(error))
1174 goto out;
1175 }
1176
fffb6412 1177 while (offset < end) {
c2589282
AG
1178 struct iomap iomap = { };
1179
54992257 1180 error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap);
fffb6412 1181 if (error)
64dd153c 1182 goto out;
fffb6412 1183 offset = iomap.offset + iomap.length;
d505a96a 1184 if (!(iomap.flags & IOMAP_F_NEW))
64dd153c 1185 continue;
fffb6412
AG
1186 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
1187 iomap.length >> inode->i_blkbits,
1188 GFP_NOFS);
1189 if (error) {
1190 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
2fe17c10 1191 goto out;
64dd153c 1192 }
2fe17c10 1193 }
2fe17c10 1194out:
64dd153c 1195 brelse(dibh);
2fe17c10
CH
1196 return error;
1197}
f3b64b57 1198
d9be0cda
AD
1199/**
1200 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1201 * blocks, determine how many bytes can be written.
1202 * @ip: The inode in question.
1203 * @len: Max cap of bytes. What we return in *len must be <= this.
1204 * @data_blocks: Compute and return the number of data blocks needed
1205 * @ind_blocks: Compute and return the number of indirect blocks needed
1206 * @max_blocks: The total blocks available to work with.
1207 *
1208 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1209 */
1210static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1211 unsigned int *data_blocks, unsigned int *ind_blocks,
1212 unsigned int max_blocks)
2fe17c10 1213{
d9be0cda 1214 loff_t max = *len;
2fe17c10 1215 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2fe17c10
CH
1216 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1217
1218 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1219 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1220 max_data -= tmp;
1221 }
d9be0cda 1222
2fe17c10
CH
1223 *data_blocks = max_data;
1224 *ind_blocks = max_blocks - max_data;
1225 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1226 if (*len > max) {
1227 *len = max;
1228 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1229 }
1230}
1231
9c9f1159 1232static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
2fe17c10 1233{
496ad9aa 1234 struct inode *inode = file_inode(file);
2fe17c10
CH
1235 struct gfs2_sbd *sdp = GFS2_SB(inode);
1236 struct gfs2_inode *ip = GFS2_I(inode);
7b9cff46 1237 struct gfs2_alloc_parms ap = { .aflags = 0, };
2fe17c10 1238 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
174d1232 1239 loff_t bytes, max_bytes, max_blks;
2fe17c10 1240 int error;
4442f2e0
SW
1241 const loff_t pos = offset;
1242 const loff_t count = len;
6905d9e4 1243 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
2fe17c10 1244 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
64dd153c 1245 loff_t max_chunk_size = UINT_MAX & bsize_mask;
a0846a53 1246
2fe17c10
CH
1247 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1248
6905d9e4 1249 offset &= bsize_mask;
2fe17c10
CH
1250
1251 len = next - offset;
1252 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1253 if (!bytes)
1254 bytes = UINT_MAX;
6905d9e4
BM
1255 bytes &= bsize_mask;
1256 if (bytes == 0)
1257 bytes = sdp->sd_sb.sb_bsize;
2fe17c10 1258
da1dfb6a 1259 gfs2_size_hint(file, offset, len);
8e2e0047 1260
d9be0cda
AD
1261 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1262 ap.min_target = data_blocks + ind_blocks;
1263
2fe17c10
CH
1264 while (len > 0) {
1265 if (len < bytes)
1266 bytes = len;
58a7d5fb
BM
1267 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1268 len -= bytes;
1269 offset += bytes;
1270 continue;
1271 }
d9be0cda
AD
1272
1273 /* We need to determine how many bytes we can actually
1274 * fallocate without exceeding quota or going over the
1275 * end of the fs. We start off optimistically by assuming
1276 * we can write max_bytes */
1277 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1278
1279 /* Since max_bytes is most likely a theoretical max, we
1280 * calculate a more realistic 'bytes' to serve as a good
1281 * starting point for the number of bytes we may be able
1282 * to write */
2fe17c10 1283 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
7b9cff46 1284 ap.target = data_blocks + ind_blocks;
b8fbf471
AD
1285
1286 error = gfs2_quota_lock_check(ip, &ap);
2fe17c10 1287 if (error)
9c9f1159 1288 return error;
d9be0cda
AD
1289 /* ap.allowed tells us how many blocks quota will allow
1290 * us to write. Check if this reduces max_blks */
174d1232
AG
1291 max_blks = UINT_MAX;
1292 if (ap.allowed)
d9be0cda 1293 max_blks = ap.allowed;
2fe17c10 1294
7b9cff46 1295 error = gfs2_inplace_reserve(ip, &ap);
d9be0cda 1296 if (error)
2fe17c10 1297 goto out_qunlock;
d9be0cda
AD
1298
1299 /* check if the selected rgrp limits our max_blks further */
725d0e9d
AG
1300 if (ip->i_res.rs_reserved < max_blks)
1301 max_blks = ip->i_res.rs_reserved;
d9be0cda
AD
1302
1303 /* Almost done. Calculate bytes that can be written using
1304 * max_blks. We also recompute max_bytes, data_blocks and
1305 * ind_blocks */
1306 calc_max_reserv(ip, &max_bytes, &data_blocks,
1307 &ind_blocks, max_blks);
2fe17c10
CH
1308
1309 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
71f890f7 1310 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
2fe17c10
CH
1311 if (gfs2_is_jdata(ip))
1312 rblocks += data_blocks ? data_blocks : 1;
1313
1314 error = gfs2_trans_begin(sdp, rblocks,
45eb0504 1315 PAGE_SIZE >> inode->i_blkbits);
2fe17c10
CH
1316 if (error)
1317 goto out_trans_fail;
1318
1319 error = fallocate_chunk(inode, offset, max_bytes, mode);
1320 gfs2_trans_end(sdp);
1321
1322 if (error)
1323 goto out_trans_fail;
1324
1325 len -= max_bytes;
1326 offset += max_bytes;
1327 gfs2_inplace_release(ip);
1328 gfs2_quota_unlock(ip);
2fe17c10 1329 }
4442f2e0 1330
0a6a4abc 1331 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1885867b 1332 i_size_write(inode, pos + count);
0a6a4abc
AG
1333 file_update_time(file);
1334 mark_inode_dirty(inode);
1885867b 1335
dde0c2e7
CH
1336 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1337 return vfs_fsync_range(file, pos, pos + count - 1,
1338 (file->f_flags & __O_SYNC) ? 0 : 1);
1339 return 0;
2fe17c10
CH
1340
1341out_trans_fail:
1342 gfs2_inplace_release(ip);
1343out_qunlock:
1344 gfs2_quota_unlock(ip);
9c9f1159
AP
1345 return error;
1346}
1347
1348static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1349{
1350 struct inode *inode = file_inode(file);
d4d7fc12 1351 struct gfs2_sbd *sdp = GFS2_SB(inode);
9c9f1159
AP
1352 struct gfs2_inode *ip = GFS2_I(inode);
1353 struct gfs2_holder gh;
1354 int ret;
1355
4e56a641 1356 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
d4d7fc12
AP
1357 return -EOPNOTSUPP;
1358 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1359 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
9c9f1159
AP
1360 return -EOPNOTSUPP;
1361
5955102c 1362 inode_lock(inode);
9c9f1159
AP
1363
1364 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1365 ret = gfs2_glock_nq(&gh);
1366 if (ret)
1367 goto out_uninit;
1368
1369 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1370 (offset + len) > inode->i_size) {
1371 ret = inode_newsize_ok(inode, offset + len);
1372 if (ret)
1373 goto out_unlock;
1374 }
1375
1376 ret = get_write_access(inode);
1377 if (ret)
1378 goto out_unlock;
1379
4e56a641
AG
1380 if (mode & FALLOC_FL_PUNCH_HOLE) {
1381 ret = __gfs2_punch_hole(file, offset, len);
1382 } else {
4e56a641 1383 ret = __gfs2_fallocate(file, mode, offset, len);
4e56a641
AG
1384 if (ret)
1385 gfs2_rs_deltree(&ip->i_res);
1386 }
a097dc7e 1387
9c9f1159 1388 put_write_access(inode);
2fe17c10 1389out_unlock:
a0846a53 1390 gfs2_glock_dq(&gh);
2fe17c10 1391out_uninit:
a0846a53 1392 gfs2_holder_uninit(&gh);
5955102c 1393 inode_unlock(inode);
9c9f1159 1394 return ret;
2fe17c10
CH
1395}
1396
f1ea6f4e
BP
1397static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1398 struct file *out, loff_t *ppos,
1399 size_t len, unsigned int flags)
1400{
2fba46a0 1401 ssize_t ret;
f1ea6f4e 1402
f1ea6f4e
BP
1403 gfs2_size_hint(out, *ppos, len);
1404
2fba46a0 1405 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
2fba46a0 1406 return ret;
f1ea6f4e
BP
1407}
1408
f057f6cd
SW
1409#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1410
b3b94faa
DT
1411/**
1412 * gfs2_lock - acquire/release a posix lock on a file
1413 * @file: the file pointer
1414 * @cmd: either modify or retrieve lock state, possibly wait
1415 * @fl: type and range of lock
1416 *
1417 * Returns: errno
1418 */
1419
1420static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1421{
feaa7bba
SW
1422 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1423 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
f057f6cd 1424 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
b3b94faa 1425
b3b94faa
DT
1426 if (!(fl->fl_flags & FL_POSIX))
1427 return -ENOLCK;
586759f0
ME
1428 if (cmd == F_CANCELLK) {
1429 /* Hack: */
1430 cmd = F_SETLK;
1431 fl->fl_type = F_UNLCK;
1432 }
eb43e660 1433 if (unlikely(gfs2_withdrawn(sdp))) {
c2952d20 1434 if (fl->fl_type == F_UNLCK)
4f656367 1435 locks_lock_file_wait(file, fl);
f057f6cd 1436 return -EIO;
c2952d20 1437 }
b3b94faa 1438 if (IS_GETLK(cmd))
f057f6cd 1439 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
b3b94faa 1440 else if (fl->fl_type == F_UNLCK)
f057f6cd 1441 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
b3b94faa 1442 else
f057f6cd 1443 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
b3b94faa
DT
1444}
1445
b3b94faa
DT
1446static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1447{
5c676f6d 1448 struct gfs2_file *fp = file->private_data;
b3b94faa 1449 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
496ad9aa 1450 struct gfs2_inode *ip = GFS2_I(file_inode(file));
b3b94faa
DT
1451 struct gfs2_glock *gl;
1452 unsigned int state;
b58bf407 1453 u16 flags;
b3b94faa 1454 int error = 0;
2ddfbdd6 1455 int sleeptime;
b3b94faa
DT
1456
1457 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
2ddfbdd6 1458 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
b3b94faa 1459
f55ab26a 1460 mutex_lock(&fp->f_fl_mutex);
b3b94faa 1461
283c9a97 1462 if (gfs2_holder_initialized(fl_gh)) {
4d62d3f7 1463 struct file_lock request;
b3b94faa
DT
1464 if (fl_gh->gh_state == state)
1465 goto out;
4d62d3f7
N
1466 locks_init_lock(&request);
1467 request.fl_type = F_UNLCK;
1468 request.fl_flags = FL_FLOCK;
1469 locks_lock_file_wait(file, &request);
5bef3e7c 1470 gfs2_glock_dq(fl_gh);
b4c20166 1471 gfs2_holder_reinit(state, flags, fl_gh);
b3b94faa 1472 } else {
6802e340
SW
1473 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1474 &gfs2_flock_glops, CREATE, &gl);
b3b94faa
DT
1475 if (error)
1476 goto out;
b4c20166
AD
1477 gfs2_holder_init(gl, state, flags, fl_gh);
1478 gfs2_glock_put(gl);
b3b94faa 1479 }
2ddfbdd6
BP
1480 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1481 error = gfs2_glock_nq(fl_gh);
1482 if (error != GLR_TRYFAILED)
1483 break;
1484 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
2ddfbdd6
BP
1485 msleep(sleeptime);
1486 }
b3b94faa
DT
1487 if (error) {
1488 gfs2_holder_uninit(fl_gh);
1489 if (error == GLR_TRYFAILED)
1490 error = -EAGAIN;
1491 } else {
4f656367 1492 error = locks_lock_file_wait(file, fl);
feaa7bba 1493 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
b3b94faa
DT
1494 }
1495
420b9e5e 1496out:
f55ab26a 1497 mutex_unlock(&fp->f_fl_mutex);
b3b94faa
DT
1498 return error;
1499}
1500
1501static void do_unflock(struct file *file, struct file_lock *fl)
1502{
5c676f6d 1503 struct gfs2_file *fp = file->private_data;
b3b94faa
DT
1504 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1505
f55ab26a 1506 mutex_lock(&fp->f_fl_mutex);
4f656367 1507 locks_lock_file_wait(file, fl);
6df9f9a2 1508 if (gfs2_holder_initialized(fl_gh)) {
2ddfbdd6 1509 gfs2_glock_dq(fl_gh);
0a33443b
SW
1510 gfs2_holder_uninit(fl_gh);
1511 }
f55ab26a 1512 mutex_unlock(&fp->f_fl_mutex);
b3b94faa
DT
1513}
1514
1515/**
1516 * gfs2_flock - acquire/release a flock lock on a file
1517 * @file: the file pointer
1518 * @cmd: either modify or retrieve lock state, possibly wait
1519 * @fl: type and range of lock
1520 *
1521 * Returns: errno
1522 */
1523
1524static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1525{
b3b94faa
DT
1526 if (!(fl->fl_flags & FL_FLOCK))
1527 return -ENOLCK;
b3b94faa 1528
b3b94faa
DT
1529 if (fl->fl_type == F_UNLCK) {
1530 do_unflock(file, fl);
1531 return 0;
d00223f1 1532 } else {
b3b94faa 1533 return do_flock(file, cmd, fl);
d00223f1 1534 }
b3b94faa
DT
1535}
1536
10d21988 1537const struct file_operations gfs2_file_fops = {
26c1a574 1538 .llseek = gfs2_llseek,
967bcc91 1539 .read_iter = gfs2_file_read_iter,
da56e45b 1540 .write_iter = gfs2_file_write_iter,
3e08773c 1541 .iopoll = iocb_bio_iopoll,
26c1a574 1542 .unlocked_ioctl = gfs2_ioctl,
8d098070 1543 .compat_ioctl = gfs2_compat_ioctl,
26c1a574
SW
1544 .mmap = gfs2_mmap,
1545 .open = gfs2_open,
df3fd117 1546 .release = gfs2_release,
26c1a574
SW
1547 .fsync = gfs2_fsync,
1548 .lock = gfs2_lock,
26c1a574 1549 .flock = gfs2_flock,
82c156f8 1550 .splice_read = generic_file_splice_read,
f42a69fa 1551 .splice_write = gfs2_file_splice_write,
1c994a09 1552 .setlease = simple_nosetlease,
2fe17c10 1553 .fallocate = gfs2_fallocate,
b3b94faa
DT
1554};
1555
10d21988 1556const struct file_operations gfs2_dir_fops = {
1d1bb236 1557 .iterate_shared = gfs2_readdir,
26c1a574 1558 .unlocked_ioctl = gfs2_ioctl,
8d098070 1559 .compat_ioctl = gfs2_compat_ioctl,
26c1a574 1560 .open = gfs2_open,
df3fd117 1561 .release = gfs2_release,
26c1a574
SW
1562 .fsync = gfs2_fsync,
1563 .lock = gfs2_lock,
1564 .flock = gfs2_flock,
6038f373 1565 .llseek = default_llseek,
b3b94faa
DT
1566};
1567
f057f6cd
SW
1568#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1569
10d21988 1570const struct file_operations gfs2_file_fops_nolock = {
c97bfe43 1571 .llseek = gfs2_llseek,
967bcc91 1572 .read_iter = gfs2_file_read_iter,
da56e45b 1573 .write_iter = gfs2_file_write_iter,
3e08773c 1574 .iopoll = iocb_bio_iopoll,
c97bfe43 1575 .unlocked_ioctl = gfs2_ioctl,
8d098070 1576 .compat_ioctl = gfs2_compat_ioctl,
c97bfe43
WC
1577 .mmap = gfs2_mmap,
1578 .open = gfs2_open,
df3fd117 1579 .release = gfs2_release,
c97bfe43 1580 .fsync = gfs2_fsync,
82c156f8 1581 .splice_read = generic_file_splice_read,
f42a69fa 1582 .splice_write = gfs2_file_splice_write,
f057f6cd 1583 .setlease = generic_setlease,
2fe17c10 1584 .fallocate = gfs2_fallocate,
c97bfe43
WC
1585};
1586
10d21988 1587const struct file_operations gfs2_dir_fops_nolock = {
1d1bb236 1588 .iterate_shared = gfs2_readdir,
c97bfe43 1589 .unlocked_ioctl = gfs2_ioctl,
8d098070 1590 .compat_ioctl = gfs2_compat_ioctl,
c97bfe43 1591 .open = gfs2_open,
df3fd117 1592 .release = gfs2_release,
c97bfe43 1593 .fsync = gfs2_fsync,
6038f373 1594 .llseek = default_llseek,
c97bfe43
WC
1595};
1596