]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/file.c
cifs: define superblock-level cache index objects and register them
[people/ms/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
43
1da177e4
LT
44static inline int cifs_convert_flags(unsigned int flags)
45{
46 if ((flags & O_ACCMODE) == O_RDONLY)
47 return GENERIC_READ;
48 else if ((flags & O_ACCMODE) == O_WRONLY)
49 return GENERIC_WRITE;
50 else if ((flags & O_ACCMODE) == O_RDWR) {
51 /* GENERIC_ALL is too much permission to request
52 can cause unnecessary access denied on create */
53 /* return GENERIC_ALL; */
54 return (GENERIC_READ | GENERIC_WRITE);
55 }
56
e10f7b55
JL
57 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
58 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
59 FILE_READ_DATA);
7fc8f4e9 60}
e10f7b55 61
7fc8f4e9
SF
62static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
63{
64 fmode_t posix_flags = 0;
e10f7b55 65
7fc8f4e9
SF
66 if ((flags & O_ACCMODE) == O_RDONLY)
67 posix_flags = FMODE_READ;
68 else if ((flags & O_ACCMODE) == O_WRONLY)
69 posix_flags = FMODE_WRITE;
70 else if ((flags & O_ACCMODE) == O_RDWR) {
71 /* GENERIC_ALL is too much permission to request
72 can cause unnecessary access denied on create */
73 /* return GENERIC_ALL; */
74 posix_flags = FMODE_READ | FMODE_WRITE;
75 }
76 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
77 reopening a file. They had their effect on the original open */
78 if (flags & O_APPEND)
79 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
80 if (flags & O_DSYNC)
81 posix_flags |= (fmode_t)O_DSYNC;
82 if (flags & __O_SYNC)
83 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
84 if (flags & O_DIRECTORY)
85 posix_flags |= (fmode_t)O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= (fmode_t)O_NOFOLLOW;
88 if (flags & O_DIRECT)
89 posix_flags |= (fmode_t)O_DIRECT;
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
276a74a4 108/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
109static inline int
110cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 111 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 112 u16 netfid)
276a74a4 113{
276a74a4 114
276a74a4 115 write_lock(&GlobalSMBSeslock);
276a74a4
SF
116
117 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
118 if (pCifsInode == NULL) {
119 write_unlock(&GlobalSMBSeslock);
120 return -EINVAL;
121 }
122
276a74a4
SF
123 if (pCifsInode->clientCanCacheRead) {
124 /* we have the inode open somewhere else
125 no need to discard cache data */
126 goto psx_client_can_cache;
127 }
128
129 /* BB FIXME need to fix this check to move it earlier into posix_open
130 BB fIX following section BB FIXME */
131
132 /* if not oplocked, invalidate inode pages if mtime or file
133 size changed */
134/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
135 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
136 (file->f_path.dentry->d_inode->i_size ==
137 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 138 cFYI(1, "inode unchanged on server");
276a74a4
SF
139 } else {
140 if (file->f_path.dentry->d_inode->i_mapping) {
141 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
142 if (rc != 0)
143 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
144 }
b6b38f70
JP
145 cFYI(1, "invalidating remote inode since open detected it "
146 "changed");
276a74a4
SF
147 invalidate_remote_inode(file->f_path.dentry->d_inode);
148 } */
149
150psx_client_can_cache:
151 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
152 pCifsInode->clientCanCacheAll = true;
153 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
154 cFYI(1, "Exclusive Oplock granted on inode %p",
155 file->f_path.dentry->d_inode);
276a74a4
SF
156 } else if ((oplock & 0xF) == OPLOCK_READ)
157 pCifsInode->clientCanCacheRead = true;
158
159 /* will have to change the unlock if we reenable the
160 filemap_fdatawrite (which does not seem necessary */
161 write_unlock(&GlobalSMBSeslock);
162 return 0;
163}
164
1da177e4 165/* all arguments to this function must be checked for validity in caller */
db460242 166static inline int cifs_open_inode_helper(struct inode *inode,
1da177e4
LT
167 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
168 char *full_path, int xid)
169{
db460242 170 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
171 struct timespec temp;
172 int rc;
173
1da177e4
LT
174 if (pCifsInode->clientCanCacheRead) {
175 /* we have the inode open somewhere else
176 no need to discard cache data */
177 goto client_can_cache;
178 }
179
180 /* BB need same check in cifs_create too? */
181 /* if not oplocked, invalidate inode pages if mtime or file
182 size changed */
07119a4d 183 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
184 if (timespec_equal(&inode->i_mtime, &temp) &&
185 (inode->i_size ==
1da177e4 186 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 187 cFYI(1, "inode unchanged on server");
1da177e4 188 } else {
db460242 189 if (inode->i_mapping) {
ff215713
SF
190 /* BB no need to lock inode until after invalidate
191 since namei code should already have it locked? */
db460242 192 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 193 if (rc != 0)
db460242 194 pCifsInode->write_behind_rc = rc;
1da177e4 195 }
b6b38f70
JP
196 cFYI(1, "invalidating remote inode since open detected it "
197 "changed");
db460242 198 invalidate_remote_inode(inode);
1da177e4
LT
199 }
200
201client_can_cache:
c18c842b 202 if (pTcon->unix_ext)
db460242
JL
203 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
204 xid);
1da177e4 205 else
db460242
JL
206 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
207 xid, NULL);
1da177e4
LT
208
209 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
210 pCifsInode->clientCanCacheAll = true;
211 pCifsInode->clientCanCacheRead = true;
db460242 212 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
1da177e4 213 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 214 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
215
216 return rc;
217}
218
219int cifs_open(struct inode *inode, struct file *file)
220{
221 int rc = -EACCES;
590a3fe0
JL
222 int xid;
223 __u32 oplock;
1da177e4 224 struct cifs_sb_info *cifs_sb;
276a74a4 225 struct cifsTconInfo *tcon;
6ca9f3ba 226 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 227 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
228 char *full_path = NULL;
229 int desiredAccess;
230 int disposition;
231 __u16 netfid;
232 FILE_ALL_INFO *buf = NULL;
233
234 xid = GetXid();
235
236 cifs_sb = CIFS_SB(inode->i_sb);
276a74a4 237 tcon = cifs_sb->tcon;
1da177e4 238
a6ce4932 239 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 240
e6a00296 241 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 242 if (full_path == NULL) {
0f3bc09e 243 rc = -ENOMEM;
1da177e4 244 FreeXid(xid);
0f3bc09e 245 return rc;
1da177e4
LT
246 }
247
b6b38f70
JP
248 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
249 inode, file->f_flags, full_path);
276a74a4
SF
250
251 if (oplockEnabled)
252 oplock = REQ_OPLOCK;
253 else
254 oplock = 0;
255
64cc2c63
SF
256 if (!tcon->broken_posix_open && tcon->unix_ext &&
257 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
258 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
259 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
260 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 261 oflags |= SMB_O_CREAT;
276a74a4 262 /* can not refresh inode info since size could be stale */
2422f676 263 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c
SF
264 cifs_sb->mnt_file_mode /* ignored */,
265 oflags, &oplock, &netfid, xid);
276a74a4 266 if (rc == 0) {
b6b38f70 267 cFYI(1, "posix open succeeded");
276a74a4
SF
268 /* no need for special case handling of setting mode
269 on read only files needed here */
270
47c78b7f
JL
271 rc = cifs_posix_open_inode_helper(inode, file,
272 pCifsInode, oplock, netfid);
273 if (rc != 0) {
274 CIFSSMBClose(xid, tcon, netfid);
275 goto out;
276 }
277
2422f676
JL
278 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
279 file->f_path.mnt,
280 oflags);
281 if (pCifsFile == NULL) {
282 CIFSSMBClose(xid, tcon, netfid);
283 rc = -ENOMEM;
2422f676 284 }
276a74a4 285 goto out;
64cc2c63
SF
286 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
287 if (tcon->ses->serverNOS)
b6b38f70 288 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
289 " unexpected error on SMB posix open"
290 ", disabling posix open support."
291 " Check if server update available.",
292 tcon->ses->serverName,
b6b38f70 293 tcon->ses->serverNOS);
64cc2c63 294 tcon->broken_posix_open = true;
276a74a4
SF
295 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
296 (rc != -EOPNOTSUPP)) /* path not found or net err */
297 goto out;
64cc2c63
SF
298 /* else fallthrough to retry open the old way on network i/o
299 or DFS errors */
276a74a4
SF
300 }
301
1da177e4
LT
302 desiredAccess = cifs_convert_flags(file->f_flags);
303
304/*********************************************************************
305 * open flag mapping table:
fb8c4b14 306 *
1da177e4 307 * POSIX Flag CIFS Disposition
fb8c4b14 308 * ---------- ----------------
1da177e4
LT
309 * O_CREAT FILE_OPEN_IF
310 * O_CREAT | O_EXCL FILE_CREATE
311 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
312 * O_TRUNC FILE_OVERWRITE
313 * none of the above FILE_OPEN
314 *
315 * Note that there is not a direct match between disposition
fb8c4b14 316 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
317 * O_CREAT | O_TRUNC is similar but truncates the existing
318 * file rather than creating a new file as FILE_SUPERSEDE does
319 * (which uses the attributes / metadata passed in on open call)
320 *?
fb8c4b14 321 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
322 *? and the read write flags match reasonably. O_LARGEFILE
323 *? is irrelevant because largefile support is always used
324 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
325 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
326 *********************************************************************/
327
328 disposition = cifs_get_disposition(file->f_flags);
329
1da177e4
LT
330 /* BB pass O_SYNC flag through on file attributes .. BB */
331
332 /* Also refresh inode by passing in file_info buf returned by SMBOpen
333 and calling get_inode_info with returned buf (at least helps
334 non-Unix server case) */
335
fb8c4b14
SF
336 /* BB we can not do this if this is the second open of a file
337 and the first handle has writebehind data, we might be
1da177e4
LT
338 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
339 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
340 if (!buf) {
341 rc = -ENOMEM;
342 goto out;
343 }
5bafd765
SF
344
345 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 346 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 347 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
348 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
349 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
350 else
351 rc = -EIO; /* no NT SMB support fall into legacy open below */
352
a9d02ad4
SF
353 if (rc == -EIO) {
354 /* Old server, try legacy style OpenX */
276a74a4 355 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
356 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
357 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
358 & CIFS_MOUNT_MAP_SPECIAL_CHR);
359 }
1da177e4 360 if (rc) {
b6b38f70 361 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
362 goto out;
363 }
3321b791 364
47c78b7f
JL
365 rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
366 if (rc != 0)
367 goto out;
368
086f68bd
JL
369 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
370 file->f_flags);
6ca9f3ba 371 if (pCifsFile == NULL) {
1da177e4
LT
372 rc = -ENOMEM;
373 goto out;
374 }
1da177e4 375
fb8c4b14 376 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
377 /* time to set mode which we can not set earlier due to
378 problems creating new read-only files */
276a74a4 379 if (tcon->unix_ext) {
4e1e7fb9
JL
380 struct cifs_unix_set_info_args args = {
381 .mode = inode->i_mode,
382 .uid = NO_CHANGE_64,
383 .gid = NO_CHANGE_64,
384 .ctime = NO_CHANGE_64,
385 .atime = NO_CHANGE_64,
386 .mtime = NO_CHANGE_64,
387 .device = 0,
388 };
01ea95e3
JL
389 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
390 cifs_sb->local_nls,
391 cifs_sb->mnt_cifs_flags &
737b758c 392 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
393 }
394 }
395
396out:
397 kfree(buf);
398 kfree(full_path);
399 FreeXid(xid);
400 return rc;
401}
402
0418726b 403/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
404/* to server was lost */
405static int cifs_relock_file(struct cifsFileInfo *cifsFile)
406{
407 int rc = 0;
408
409/* BB list all locks open on this file and relock */
410
411 return rc;
412}
413
4b18f2a9 414static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
415{
416 int rc = -EACCES;
590a3fe0
JL
417 int xid;
418 __u32 oplock;
1da177e4 419 struct cifs_sb_info *cifs_sb;
7fc8f4e9 420 struct cifsTconInfo *tcon;
1da177e4
LT
421 struct cifsFileInfo *pCifsFile;
422 struct cifsInodeInfo *pCifsInode;
fb8c4b14 423 struct inode *inode;
1da177e4
LT
424 char *full_path = NULL;
425 int desiredAccess;
426 int disposition = FILE_OPEN;
427 __u16 netfid;
428
ad7a2926 429 if (file->private_data)
c21dfb69 430 pCifsFile = file->private_data;
ad7a2926 431 else
1da177e4
LT
432 return -EBADF;
433
434 xid = GetXid();
f0a71eb8 435 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 436 if (!pCifsFile->invalidHandle) {
f0a71eb8 437 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 438 rc = 0;
1da177e4 439 FreeXid(xid);
0f3bc09e 440 return rc;
1da177e4
LT
441 }
442
e6a00296 443 if (file->f_path.dentry == NULL) {
b6b38f70 444 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
445 dump_stack();
446 rc = -EBADF;
447 goto reopen_error_exit;
448 }
449
450 inode = file->f_path.dentry->d_inode;
fb8c4b14 451 if (inode == NULL) {
b6b38f70 452 cERROR(1, "inode not valid");
3a9f462f
SF
453 dump_stack();
454 rc = -EBADF;
455 goto reopen_error_exit;
1da177e4 456 }
50c2f753 457
1da177e4 458 cifs_sb = CIFS_SB(inode->i_sb);
7fc8f4e9 459 tcon = cifs_sb->tcon;
3a9f462f 460
1da177e4
LT
461/* can not grab rename sem here because various ops, including
462 those that already have the rename sem can end up causing writepage
463 to get called and if the server was down that means we end up here,
464 and we can never tell if the caller already has the rename_sem */
e6a00296 465 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 466 if (full_path == NULL) {
3a9f462f
SF
467 rc = -ENOMEM;
468reopen_error_exit:
f0a71eb8 469 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 470 FreeXid(xid);
3a9f462f 471 return rc;
1da177e4
LT
472 }
473
b6b38f70
JP
474 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
475 inode, file->f_flags, full_path);
1da177e4
LT
476
477 if (oplockEnabled)
478 oplock = REQ_OPLOCK;
479 else
4b18f2a9 480 oplock = 0;
1da177e4 481
7fc8f4e9
SF
482 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
483 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
484 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
485 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
486 /* can not refresh inode info since size could be stale */
2422f676 487 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
488 cifs_sb->mnt_file_mode /* ignored */,
489 oflags, &oplock, &netfid, xid);
7fc8f4e9 490 if (rc == 0) {
b6b38f70 491 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
492 goto reopen_success;
493 }
494 /* fallthrough to retry open the old way on errors, especially
495 in the reconnect path it is important to retry hard */
496 }
497
498 desiredAccess = cifs_convert_flags(file->f_flags);
499
1da177e4 500 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
501 by SMBOpen and then calling get_inode_info with returned buf
502 since file might have write behind data that needs to be flushed
1da177e4
LT
503 and server version of file size can be stale. If we knew for sure
504 that inode was not dirty locally we could do this */
505
7fc8f4e9 506 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 507 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 508 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 509 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 510 if (rc) {
f0a71eb8 511 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
512 cFYI(1, "cifs_open returned 0x%x", rc);
513 cFYI(1, "oplock: %d", oplock);
1da177e4 514 } else {
7fc8f4e9 515reopen_success:
1da177e4 516 pCifsFile->netfid = netfid;
4b18f2a9 517 pCifsFile->invalidHandle = false;
f0a71eb8 518 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
519 pCifsInode = CIFS_I(inode);
520 if (pCifsInode) {
521 if (can_flush) {
cea21805
JL
522 rc = filemap_write_and_wait(inode->i_mapping);
523 if (rc != 0)
524 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
525 /* temporarily disable caching while we
526 go to server to get inode info */
4b18f2a9
SF
527 pCifsInode->clientCanCacheAll = false;
528 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 529 if (tcon->unix_ext)
1da177e4
LT
530 rc = cifs_get_inode_info_unix(&inode,
531 full_path, inode->i_sb, xid);
532 else
533 rc = cifs_get_inode_info(&inode,
534 full_path, NULL, inode->i_sb,
8b1327f6 535 xid, NULL);
1da177e4
LT
536 } /* else we are writing out data to server already
537 and could deadlock if we tried to flush data, and
538 since we do not know if we have data that would
539 invalidate the current end of file on the server
540 we can not go to the server to get the new inod
541 info */
542 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
543 pCifsInode->clientCanCacheAll = true;
544 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
545 cFYI(1, "Exclusive Oplock granted on inode %p",
546 file->f_path.dentry->d_inode);
1da177e4 547 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
548 pCifsInode->clientCanCacheRead = true;
549 pCifsInode->clientCanCacheAll = false;
1da177e4 550 } else {
4b18f2a9
SF
551 pCifsInode->clientCanCacheRead = false;
552 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
553 }
554 cifs_relock_file(pCifsFile);
555 }
556 }
1da177e4
LT
557 kfree(full_path);
558 FreeXid(xid);
559 return rc;
560}
561
562int cifs_close(struct inode *inode, struct file *file)
563{
564 int rc = 0;
15745320 565 int xid, timeout;
1da177e4
LT
566 struct cifs_sb_info *cifs_sb;
567 struct cifsTconInfo *pTcon;
c21dfb69 568 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
569
570 xid = GetXid();
571
572 cifs_sb = CIFS_SB(inode->i_sb);
573 pTcon = cifs_sb->tcon;
574 if (pSMBFile) {
7ee1af76 575 struct cifsLockInfo *li, *tmp;
ddb4cbfc 576 write_lock(&GlobalSMBSeslock);
4b18f2a9 577 pSMBFile->closePend = true;
1da177e4
LT
578 if (pTcon) {
579 /* no sense reconnecting to close a file that is
580 already closed */
3b795210 581 if (!pTcon->need_reconnect) {
ddb4cbfc 582 write_unlock(&GlobalSMBSeslock);
15745320 583 timeout = 2;
6ab409b5 584 while ((atomic_read(&pSMBFile->count) != 1)
15745320 585 && (timeout <= 2048)) {
23e7dd7d
SF
586 /* Give write a better chance to get to
587 server ahead of the close. We do not
588 want to add a wait_q here as it would
589 increase the memory utilization as
590 the struct would be in each open file,
fb8c4b14 591 but this should give enough time to
23e7dd7d 592 clear the socket */
b6b38f70 593 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
594 msleep(timeout);
595 timeout *= 4;
4891d539 596 }
ddb4cbfc
SF
597 if (!pTcon->need_reconnect &&
598 !pSMBFile->invalidHandle)
599 rc = CIFSSMBClose(xid, pTcon,
1da177e4 600 pSMBFile->netfid);
ddb4cbfc
SF
601 } else
602 write_unlock(&GlobalSMBSeslock);
603 } else
604 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
605
606 /* Delete any outstanding lock records.
607 We'll lose them when the file is closed anyway. */
796e5661 608 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
609 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
610 list_del(&li->llist);
611 kfree(li);
612 }
796e5661 613 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 614
cbe0476f 615 write_lock(&GlobalSMBSeslock);
1da177e4
LT
616 list_del(&pSMBFile->flist);
617 list_del(&pSMBFile->tlist);
cbe0476f 618 write_unlock(&GlobalSMBSeslock);
6ab409b5 619 cifsFileInfo_put(file->private_data);
1da177e4
LT
620 file->private_data = NULL;
621 } else
622 rc = -EBADF;
623
4efa53f0 624 read_lock(&GlobalSMBSeslock);
1da177e4 625 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 626 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
627 /* if the file is not open we do not know if we can cache info
628 on this inode, much less write behind and read ahead */
4b18f2a9
SF
629 CIFS_I(inode)->clientCanCacheRead = false;
630 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 631 }
4efa53f0 632 read_unlock(&GlobalSMBSeslock);
fb8c4b14 633 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
634 rc = CIFS_I(inode)->write_behind_rc;
635 FreeXid(xid);
636 return rc;
637}
638
639int cifs_closedir(struct inode *inode, struct file *file)
640{
641 int rc = 0;
642 int xid;
c21dfb69 643 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
644 char *ptmp;
645
b6b38f70 646 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
647
648 xid = GetXid();
649
650 if (pCFileStruct) {
651 struct cifsTconInfo *pTcon;
fb8c4b14
SF
652 struct cifs_sb_info *cifs_sb =
653 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
654
655 pTcon = cifs_sb->tcon;
656
b6b38f70 657 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 658 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
659 if (!pCFileStruct->srch_inf.endOfSearch &&
660 !pCFileStruct->invalidHandle) {
661 pCFileStruct->invalidHandle = true;
ddb4cbfc 662 write_unlock(&GlobalSMBSeslock);
1da177e4 663 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
664 cFYI(1, "Closing uncompleted readdir with rc %d",
665 rc);
1da177e4
LT
666 /* not much we can do if it fails anyway, ignore rc */
667 rc = 0;
ddb4cbfc
SF
668 } else
669 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
670 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
671 if (ptmp) {
b6b38f70 672 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 673 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 674 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
675 cifs_small_buf_release(ptmp);
676 else
677 cifs_buf_release(ptmp);
1da177e4 678 }
1da177e4
LT
679 kfree(file->private_data);
680 file->private_data = NULL;
681 }
682 /* BB can we lock the filestruct while this is going on? */
683 FreeXid(xid);
684 return rc;
685}
686
7ee1af76
JA
687static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
688 __u64 offset, __u8 lockType)
689{
fb8c4b14
SF
690 struct cifsLockInfo *li =
691 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
692 if (li == NULL)
693 return -ENOMEM;
694 li->offset = offset;
695 li->length = len;
696 li->type = lockType;
796e5661 697 mutex_lock(&fid->lock_mutex);
7ee1af76 698 list_add(&li->llist, &fid->llist);
796e5661 699 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
700 return 0;
701}
702
1da177e4
LT
703int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
704{
705 int rc, xid;
1da177e4
LT
706 __u32 numLock = 0;
707 __u32 numUnlock = 0;
708 __u64 length;
4b18f2a9 709 bool wait_flag = false;
1da177e4 710 struct cifs_sb_info *cifs_sb;
13a6e42a 711 struct cifsTconInfo *tcon;
08547b03
SF
712 __u16 netfid;
713 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 714 bool posix_locking = 0;
1da177e4
LT
715
716 length = 1 + pfLock->fl_end - pfLock->fl_start;
717 rc = -EACCES;
718 xid = GetXid();
719
b6b38f70 720 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 721 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 722 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 723 pfLock->fl_end);
1da177e4
LT
724
725 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 726 cFYI(1, "Posix");
1da177e4 727 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 728 cFYI(1, "Flock");
1da177e4 729 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 730 cFYI(1, "Blocking lock");
4b18f2a9 731 wait_flag = true;
1da177e4
LT
732 }
733 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
734 cFYI(1, "Process suspended by mandatory locking - "
735 "not implemented yet");
1da177e4 736 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 737 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 738 if (pfLock->fl_flags &
1da177e4 739 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 740 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
741
742 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 743 cFYI(1, "F_WRLCK ");
1da177e4
LT
744 numLock = 1;
745 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 746 cFYI(1, "F_UNLCK");
1da177e4 747 numUnlock = 1;
d47d7c1a
SF
748 /* Check if unlock includes more than
749 one lock range */
1da177e4 750 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 751 cFYI(1, "F_RDLCK");
1da177e4
LT
752 lockType |= LOCKING_ANDX_SHARED_LOCK;
753 numLock = 1;
754 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 755 cFYI(1, "F_EXLCK");
1da177e4
LT
756 numLock = 1;
757 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 758 cFYI(1, "F_SHLCK");
1da177e4
LT
759 lockType |= LOCKING_ANDX_SHARED_LOCK;
760 numLock = 1;
761 } else
b6b38f70 762 cFYI(1, "Unknown type of lock");
1da177e4 763
e6a00296 764 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13a6e42a 765 tcon = cifs_sb->tcon;
1da177e4
LT
766
767 if (file->private_data == NULL) {
0f3bc09e 768 rc = -EBADF;
1da177e4 769 FreeXid(xid);
0f3bc09e 770 return rc;
1da177e4 771 }
08547b03
SF
772 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
773
13a6e42a
SF
774 if ((tcon->ses->capabilities & CAP_UNIX) &&
775 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 776 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 777 posix_locking = 1;
08547b03
SF
778 /* BB add code here to normalize offset and length to
779 account for negative length which we can not accept over the
780 wire */
1da177e4 781 if (IS_GETLK(cmd)) {
fb8c4b14 782 if (posix_locking) {
08547b03 783 int posix_lock_type;
fb8c4b14 784 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
785 posix_lock_type = CIFS_RDLCK;
786 else
787 posix_lock_type = CIFS_WRLCK;
13a6e42a 788 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 789 length, pfLock,
08547b03
SF
790 posix_lock_type, wait_flag);
791 FreeXid(xid);
792 return rc;
793 }
794
795 /* BB we could chain these into one lock request BB */
13a6e42a 796 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 797 0, 1, lockType, 0 /* wait flag */ );
1da177e4 798 if (rc == 0) {
13a6e42a 799 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
800 pfLock->fl_start, 1 /* numUnlock */ ,
801 0 /* numLock */ , lockType,
802 0 /* wait flag */ );
803 pfLock->fl_type = F_UNLCK;
804 if (rc != 0)
b6b38f70
JP
805 cERROR(1, "Error unlocking previously locked "
806 "range %d during test of lock", rc);
1da177e4
LT
807 rc = 0;
808
809 } else {
810 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
811 rc = 0;
812
813 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
814 pfLock->fl_type = F_WRLCK;
815 } else {
816 rc = CIFSSMBLock(xid, tcon, netfid, length,
817 pfLock->fl_start, 0, 1,
818 lockType | LOCKING_ANDX_SHARED_LOCK,
819 0 /* wait flag */);
820 if (rc == 0) {
821 rc = CIFSSMBLock(xid, tcon, netfid,
822 length, pfLock->fl_start, 1, 0,
823 lockType |
824 LOCKING_ANDX_SHARED_LOCK,
825 0 /* wait flag */);
826 pfLock->fl_type = F_RDLCK;
827 if (rc != 0)
f19159dc 828 cERROR(1, "Error unlocking "
f05337c6 829 "previously locked range %d "
f19159dc 830 "during test of lock", rc);
f05337c6
PS
831 rc = 0;
832 } else {
833 pfLock->fl_type = F_WRLCK;
834 rc = 0;
835 }
836 }
1da177e4
LT
837 }
838
839 FreeXid(xid);
840 return rc;
841 }
7ee1af76
JA
842
843 if (!numLock && !numUnlock) {
844 /* if no lock or unlock then nothing
845 to do since we do not know what it is */
846 FreeXid(xid);
847 return -EOPNOTSUPP;
848 }
849
850 if (posix_locking) {
08547b03 851 int posix_lock_type;
fb8c4b14 852 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
853 posix_lock_type = CIFS_RDLCK;
854 else
855 posix_lock_type = CIFS_WRLCK;
50c2f753 856
fb8c4b14 857 if (numUnlock == 1)
beb84dc8 858 posix_lock_type = CIFS_UNLCK;
7ee1af76 859
13a6e42a 860 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 861 length, pfLock,
08547b03 862 posix_lock_type, wait_flag);
7ee1af76 863 } else {
c21dfb69 864 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
865
866 if (numLock) {
13a6e42a 867 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 868 pfLock->fl_start,
7ee1af76
JA
869 0, numLock, lockType, wait_flag);
870
871 if (rc == 0) {
872 /* For Windows locks we must store them. */
873 rc = store_file_lock(fid, length,
874 pfLock->fl_start, lockType);
875 }
876 } else if (numUnlock) {
877 /* For each stored lock that this unlock overlaps
878 completely, unlock it. */
879 int stored_rc = 0;
880 struct cifsLockInfo *li, *tmp;
881
6b70c955 882 rc = 0;
796e5661 883 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
884 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
885 if (pfLock->fl_start <= li->offset &&
c19eb710 886 (pfLock->fl_start + length) >=
39db810c 887 (li->offset + li->length)) {
13a6e42a 888 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 889 netfid,
7ee1af76 890 li->length, li->offset,
4b18f2a9 891 1, 0, li->type, false);
7ee1af76
JA
892 if (stored_rc)
893 rc = stored_rc;
2c964d1f
PS
894 else {
895 list_del(&li->llist);
896 kfree(li);
897 }
7ee1af76
JA
898 }
899 }
796e5661 900 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
901 }
902 }
903
d634cc15 904 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
905 posix_lock_file_wait(file, pfLock);
906 FreeXid(xid);
907 return rc;
908}
909
fbec9ab9
JL
910/*
911 * Set the timeout on write requests past EOF. For some servers (Windows)
912 * these calls can be very long.
913 *
914 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
915 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
916 * The 10M cutoff is totally arbitrary. A better scheme for this would be
917 * welcome if someone wants to suggest one.
918 *
919 * We may be able to do a better job with this if there were some way to
920 * declare that a file should be sparse.
921 */
922static int
923cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
924{
925 if (offset <= cifsi->server_eof)
926 return CIFS_STD_OP;
927 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
928 return CIFS_VLONG_OP;
929 else
930 return CIFS_LONG_OP;
931}
932
933/* update the file size (if needed) after a write */
934static void
935cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
936 unsigned int bytes_written)
937{
938 loff_t end_of_write = offset + bytes_written;
939
940 if (end_of_write > cifsi->server_eof)
941 cifsi->server_eof = end_of_write;
942}
943
1da177e4
LT
944ssize_t cifs_user_write(struct file *file, const char __user *write_data,
945 size_t write_size, loff_t *poffset)
946{
947 int rc = 0;
948 unsigned int bytes_written = 0;
949 unsigned int total_written;
950 struct cifs_sb_info *cifs_sb;
951 struct cifsTconInfo *pTcon;
952 int xid, long_op;
953 struct cifsFileInfo *open_file;
fbec9ab9 954 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 955
e6a00296 956 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
957
958 pTcon = cifs_sb->tcon;
959
b6b38f70
JP
960 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
961 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
962
963 if (file->private_data == NULL)
964 return -EBADF;
c21dfb69 965 open_file = file->private_data;
50c2f753 966
838726c4
JL
967 rc = generic_write_checks(file, poffset, &write_size, 0);
968 if (rc)
969 return rc;
970
1da177e4 971 xid = GetXid();
1da177e4 972
fbec9ab9 973 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
974 for (total_written = 0; write_size > total_written;
975 total_written += bytes_written) {
976 rc = -EAGAIN;
977 while (rc == -EAGAIN) {
978 if (file->private_data == NULL) {
979 /* file has been closed on us */
980 FreeXid(xid);
981 /* if we have gotten here we have written some data
982 and blocked, and the file has been freed on us while
983 we blocked so return what we managed to write */
984 return total_written;
fb8c4b14 985 }
1da177e4
LT
986 if (open_file->closePend) {
987 FreeXid(xid);
988 if (total_written)
989 return total_written;
990 else
991 return -EBADF;
992 }
993 if (open_file->invalidHandle) {
1da177e4
LT
994 /* we could deadlock if we called
995 filemap_fdatawait from here so tell
996 reopen_file not to flush data to server
997 now */
4b18f2a9 998 rc = cifs_reopen_file(file, false);
1da177e4
LT
999 if (rc != 0)
1000 break;
1001 }
1002
1003 rc = CIFSSMBWrite(xid, pTcon,
1004 open_file->netfid,
1005 min_t(const int, cifs_sb->wsize,
1006 write_size - total_written),
1007 *poffset, &bytes_written,
1008 NULL, write_data + total_written, long_op);
1009 }
1010 if (rc || (bytes_written == 0)) {
1011 if (total_written)
1012 break;
1013 else {
1014 FreeXid(xid);
1015 return rc;
1016 }
fbec9ab9
JL
1017 } else {
1018 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1019 *poffset += bytes_written;
fbec9ab9 1020 }
133672ef 1021 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1022 15 seconds is plenty */
1023 }
1024
a4544347 1025 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1026
1027 /* since the write may have blocked check these pointers again */
3677db10
SF
1028 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1029 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1030/* Do not update local mtime - server will set its actual value on write
1031 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1032 * current_fs_time(inode->i_sb);*/
1033 if (total_written > 0) {
1034 spin_lock(&inode->i_lock);
1035 if (*poffset > file->f_path.dentry->d_inode->i_size)
1036 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1037 *poffset);
3677db10 1038 spin_unlock(&inode->i_lock);
1da177e4 1039 }
fb8c4b14 1040 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1041 }
1042 FreeXid(xid);
1043 return total_written;
1044}
1045
1046static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1047 size_t write_size, loff_t *poffset)
1da177e4
LT
1048{
1049 int rc = 0;
1050 unsigned int bytes_written = 0;
1051 unsigned int total_written;
1052 struct cifs_sb_info *cifs_sb;
1053 struct cifsTconInfo *pTcon;
1054 int xid, long_op;
1055 struct cifsFileInfo *open_file;
fbec9ab9 1056 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1057
e6a00296 1058 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1059
1060 pTcon = cifs_sb->tcon;
1061
b6b38f70
JP
1062 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1063 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1064
1065 if (file->private_data == NULL)
1066 return -EBADF;
c21dfb69 1067 open_file = file->private_data;
50c2f753 1068
1da177e4 1069 xid = GetXid();
1da177e4 1070
fbec9ab9 1071 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1072 for (total_written = 0; write_size > total_written;
1073 total_written += bytes_written) {
1074 rc = -EAGAIN;
1075 while (rc == -EAGAIN) {
1076 if (file->private_data == NULL) {
1077 /* file has been closed on us */
1078 FreeXid(xid);
1079 /* if we have gotten here we have written some data
1080 and blocked, and the file has been freed on us
fb8c4b14 1081 while we blocked so return what we managed to
1da177e4
LT
1082 write */
1083 return total_written;
fb8c4b14 1084 }
1da177e4
LT
1085 if (open_file->closePend) {
1086 FreeXid(xid);
1087 if (total_written)
1088 return total_written;
1089 else
1090 return -EBADF;
1091 }
1092 if (open_file->invalidHandle) {
1da177e4
LT
1093 /* we could deadlock if we called
1094 filemap_fdatawait from here so tell
fb8c4b14 1095 reopen_file not to flush data to
1da177e4 1096 server now */
4b18f2a9 1097 rc = cifs_reopen_file(file, false);
1da177e4
LT
1098 if (rc != 0)
1099 break;
1100 }
fb8c4b14
SF
1101 if (experimEnabled || (pTcon->ses->server &&
1102 ((pTcon->ses->server->secMode &
08775834 1103 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1104 == 0))) {
3e84469d
SF
1105 struct kvec iov[2];
1106 unsigned int len;
1107
0ae0efad 1108 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1109 write_size - total_written);
1110 /* iov[0] is reserved for smb header */
1111 iov[1].iov_base = (char *)write_data +
1112 total_written;
1113 iov[1].iov_len = len;
d6e04ae6 1114 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1115 open_file->netfid, len,
d6e04ae6 1116 *poffset, &bytes_written,
3e84469d 1117 iov, 1, long_op);
d6e04ae6 1118 } else
60808233
SF
1119 rc = CIFSSMBWrite(xid, pTcon,
1120 open_file->netfid,
1121 min_t(const int, cifs_sb->wsize,
1122 write_size - total_written),
1123 *poffset, &bytes_written,
1124 write_data + total_written,
1125 NULL, long_op);
1da177e4
LT
1126 }
1127 if (rc || (bytes_written == 0)) {
1128 if (total_written)
1129 break;
1130 else {
1131 FreeXid(xid);
1132 return rc;
1133 }
fbec9ab9
JL
1134 } else {
1135 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1136 *poffset += bytes_written;
fbec9ab9 1137 }
133672ef 1138 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1139 15 seconds is plenty */
1140 }
1141
a4544347 1142 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1143
1144 /* since the write may have blocked check these pointers again */
3677db10 1145 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1146/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1147/* file->f_path.dentry->d_inode->i_ctime =
1148 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1149 if (total_written > 0) {
1150 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1151 if (*poffset > file->f_path.dentry->d_inode->i_size)
1152 i_size_write(file->f_path.dentry->d_inode,
1153 *poffset);
1154 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1155 }
3677db10 1156 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1157 }
1158 FreeXid(xid);
1159 return total_written;
1160}
1161
630f3f0c
SF
1162#ifdef CONFIG_CIFS_EXPERIMENTAL
1163struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1164{
1165 struct cifsFileInfo *open_file = NULL;
1166
1167 read_lock(&GlobalSMBSeslock);
1168 /* we could simply get the first_list_entry since write-only entries
1169 are always at the end of the list but since the first entry might
1170 have a close pending, we go through the whole list */
1171 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1172 if (open_file->closePend)
1173 continue;
1174 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1175 (open_file->pfile->f_flags & O_RDONLY))) {
1176 if (!open_file->invalidHandle) {
1177 /* found a good file */
1178 /* lock it so it will not be closed on us */
6ab409b5 1179 cifsFileInfo_get(open_file);
630f3f0c
SF
1180 read_unlock(&GlobalSMBSeslock);
1181 return open_file;
1182 } /* else might as well continue, and look for
1183 another, or simply have the caller reopen it
1184 again rather than trying to fix this handle */
1185 } else /* write only file */
1186 break; /* write only files are last so must be done */
1187 }
1188 read_unlock(&GlobalSMBSeslock);
1189 return NULL;
1190}
1191#endif
1192
dd99cd80 1193struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1194{
1195 struct cifsFileInfo *open_file;
2846d386 1196 bool any_available = false;
dd99cd80 1197 int rc;
6148a742 1198
60808233
SF
1199 /* Having a null inode here (because mapping->host was set to zero by
1200 the VFS or MM) should not happen but we had reports of on oops (due to
1201 it being zero) during stress testcases so we need to check for it */
1202
fb8c4b14 1203 if (cifs_inode == NULL) {
b6b38f70 1204 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1205 dump_stack();
1206 return NULL;
1207 }
1208
6148a742 1209 read_lock(&GlobalSMBSeslock);
9b22b0b7 1210refind_writable:
6148a742 1211 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1212 if (open_file->closePend ||
1213 (!any_available && open_file->pid != current->tgid))
6148a742 1214 continue;
2846d386 1215
6148a742
SF
1216 if (open_file->pfile &&
1217 ((open_file->pfile->f_flags & O_RDWR) ||
1218 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1219 cifsFileInfo_get(open_file);
9b22b0b7
SF
1220
1221 if (!open_file->invalidHandle) {
1222 /* found a good writable file */
1223 read_unlock(&GlobalSMBSeslock);
1224 return open_file;
1225 }
8840dee9 1226
6148a742 1227 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1228 /* Had to unlock since following call can block */
4b18f2a9 1229 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1230 if (!rc) {
9b22b0b7
SF
1231 if (!open_file->closePend)
1232 return open_file;
1233 else { /* start over in case this was deleted */
1234 /* since the list could be modified */
37c0eb46 1235 read_lock(&GlobalSMBSeslock);
6ab409b5 1236 cifsFileInfo_put(open_file);
9b22b0b7 1237 goto refind_writable;
37c0eb46
SF
1238 }
1239 }
9b22b0b7
SF
1240
1241 /* if it fails, try another handle if possible -
1242 (we can not do this if closePending since
1243 loop could be modified - in which case we
1244 have to start at the beginning of the list
1245 again. Note that it would be bad
1246 to hold up writepages here (rather than
1247 in caller) with continuous retries */
b6b38f70 1248 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1249 read_lock(&GlobalSMBSeslock);
1250 /* can not use this handle, no write
1251 pending on this one after all */
6ab409b5 1252 cifsFileInfo_put(open_file);
8840dee9 1253
9b22b0b7
SF
1254 if (open_file->closePend) /* list could have changed */
1255 goto refind_writable;
1256 /* else we simply continue to the next entry. Thus
1257 we do not loop on reopen errors. If we
1258 can not reopen the file, for example if we
1259 reconnected to a server with another client
1260 racing to delete or lock the file we would not
1261 make progress if we restarted before the beginning
1262 of the loop here. */
6148a742
SF
1263 }
1264 }
2846d386
JL
1265 /* couldn't find useable FH with same pid, try any available */
1266 if (!any_available) {
1267 any_available = true;
1268 goto refind_writable;
1269 }
6148a742
SF
1270 read_unlock(&GlobalSMBSeslock);
1271 return NULL;
1272}
1273
1da177e4
LT
1274static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1275{
1276 struct address_space *mapping = page->mapping;
1277 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1278 char *write_data;
1279 int rc = -EFAULT;
1280 int bytes_written = 0;
1281 struct cifs_sb_info *cifs_sb;
1282 struct cifsTconInfo *pTcon;
1283 struct inode *inode;
6148a742 1284 struct cifsFileInfo *open_file;
1da177e4
LT
1285
1286 if (!mapping || !mapping->host)
1287 return -EFAULT;
1288
1289 inode = page->mapping->host;
1290 cifs_sb = CIFS_SB(inode->i_sb);
1291 pTcon = cifs_sb->tcon;
1292
1293 offset += (loff_t)from;
1294 write_data = kmap(page);
1295 write_data += from;
1296
1297 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1298 kunmap(page);
1299 return -EIO;
1300 }
1301
1302 /* racing with truncate? */
1303 if (offset > mapping->host->i_size) {
1304 kunmap(page);
1305 return 0; /* don't care */
1306 }
1307
1308 /* check to make sure that we are not extending the file */
1309 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1310 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1311
6148a742
SF
1312 open_file = find_writable_file(CIFS_I(mapping->host));
1313 if (open_file) {
1314 bytes_written = cifs_write(open_file->pfile, write_data,
1315 to-from, &offset);
6ab409b5 1316 cifsFileInfo_put(open_file);
1da177e4 1317 /* Does mm or vfs already set times? */
6148a742 1318 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1319 if ((bytes_written > 0) && (offset))
6148a742 1320 rc = 0;
bb5a9a04
SF
1321 else if (bytes_written < 0)
1322 rc = bytes_written;
6148a742 1323 } else {
b6b38f70 1324 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1325 rc = -EIO;
1326 }
1327
1328 kunmap(page);
1329 return rc;
1330}
1331
1da177e4 1332static int cifs_writepages(struct address_space *mapping,
37c0eb46 1333 struct writeback_control *wbc)
1da177e4 1334{
37c0eb46
SF
1335 struct backing_dev_info *bdi = mapping->backing_dev_info;
1336 unsigned int bytes_to_write;
1337 unsigned int bytes_written;
1338 struct cifs_sb_info *cifs_sb;
1339 int done = 0;
111ebb6e 1340 pgoff_t end;
37c0eb46 1341 pgoff_t index;
fb8c4b14
SF
1342 int range_whole = 0;
1343 struct kvec *iov;
84d2f07e 1344 int len;
37c0eb46
SF
1345 int n_iov = 0;
1346 pgoff_t next;
1347 int nr_pages;
1348 __u64 offset = 0;
23e7dd7d 1349 struct cifsFileInfo *open_file;
fbec9ab9 1350 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1351 struct page *page;
1352 struct pagevec pvec;
1353 int rc = 0;
1354 int scanned = 0;
fbec9ab9 1355 int xid, long_op;
1da177e4 1356
37c0eb46 1357 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1358
37c0eb46
SF
1359 /*
1360 * If wsize is smaller that the page cache size, default to writing
1361 * one page at a time via cifs_writepage
1362 */
1363 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1364 return generic_writepages(mapping, wbc);
1365
fb8c4b14
SF
1366 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1367 if (cifs_sb->tcon->ses->server->secMode &
1368 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1369 if (!experimEnabled)
60808233 1370 return generic_writepages(mapping, wbc);
4a77118c 1371
9a0c8230 1372 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1373 if (iov == NULL)
9a0c8230
SF
1374 return generic_writepages(mapping, wbc);
1375
1376
37c0eb46
SF
1377 /*
1378 * BB: Is this meaningful for a non-block-device file system?
1379 * If it is, we should test it again after we do I/O
1380 */
1381 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1382 wbc->encountered_congestion = 1;
9a0c8230 1383 kfree(iov);
37c0eb46
SF
1384 return 0;
1385 }
1386
1da177e4
LT
1387 xid = GetXid();
1388
37c0eb46 1389 pagevec_init(&pvec, 0);
111ebb6e 1390 if (wbc->range_cyclic) {
37c0eb46 1391 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1392 end = -1;
1393 } else {
1394 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1395 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1396 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1397 range_whole = 1;
37c0eb46
SF
1398 scanned = 1;
1399 }
1400retry:
1401 while (!done && (index <= end) &&
1402 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1403 PAGECACHE_TAG_DIRTY,
1404 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1405 int first;
1406 unsigned int i;
1407
37c0eb46
SF
1408 first = -1;
1409 next = 0;
1410 n_iov = 0;
1411 bytes_to_write = 0;
1412
1413 for (i = 0; i < nr_pages; i++) {
1414 page = pvec.pages[i];
1415 /*
1416 * At this point we hold neither mapping->tree_lock nor
1417 * lock on the page itself: the page may be truncated or
1418 * invalidated (changing page->mapping to NULL), or even
1419 * swizzled back from swapper_space to tmpfs file
1420 * mapping
1421 */
1422
1423 if (first < 0)
1424 lock_page(page);
529ae9aa 1425 else if (!trylock_page(page))
37c0eb46
SF
1426 break;
1427
1428 if (unlikely(page->mapping != mapping)) {
1429 unlock_page(page);
1430 break;
1431 }
1432
111ebb6e 1433 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1434 done = 1;
1435 unlock_page(page);
1436 break;
1437 }
1438
1439 if (next && (page->index != next)) {
1440 /* Not next consecutive page */
1441 unlock_page(page);
1442 break;
1443 }
1444
1445 if (wbc->sync_mode != WB_SYNC_NONE)
1446 wait_on_page_writeback(page);
1447
1448 if (PageWriteback(page) ||
cb876f45 1449 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1450 unlock_page(page);
1451 break;
1452 }
84d2f07e 1453
cb876f45
LT
1454 /*
1455 * This actually clears the dirty bit in the radix tree.
1456 * See cifs_writepage() for more commentary.
1457 */
1458 set_page_writeback(page);
1459
84d2f07e
SF
1460 if (page_offset(page) >= mapping->host->i_size) {
1461 done = 1;
1462 unlock_page(page);
cb876f45 1463 end_page_writeback(page);
84d2f07e
SF
1464 break;
1465 }
1466
37c0eb46
SF
1467 /*
1468 * BB can we get rid of this? pages are held by pvec
1469 */
1470 page_cache_get(page);
1471
84d2f07e
SF
1472 len = min(mapping->host->i_size - page_offset(page),
1473 (loff_t)PAGE_CACHE_SIZE);
1474
37c0eb46
SF
1475 /* reserve iov[0] for the smb header */
1476 n_iov++;
1477 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1478 iov[n_iov].iov_len = len;
1479 bytes_to_write += len;
37c0eb46
SF
1480
1481 if (first < 0) {
1482 first = i;
1483 offset = page_offset(page);
1484 }
1485 next = page->index + 1;
1486 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1487 break;
1488 }
1489 if (n_iov) {
23e7dd7d
SF
1490 /* Search for a writable handle every time we call
1491 * CIFSSMBWrite2. We can't rely on the last handle
1492 * we used to still be valid
1493 */
1494 open_file = find_writable_file(CIFS_I(mapping->host));
1495 if (!open_file) {
b6b38f70 1496 cERROR(1, "No writable handles for inode");
23e7dd7d 1497 rc = -EBADF;
1047abc1 1498 } else {
fbec9ab9 1499 long_op = cifs_write_timeout(cifsi, offset);
23e7dd7d
SF
1500 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1501 open_file->netfid,
1502 bytes_to_write, offset,
1503 &bytes_written, iov, n_iov,
fbec9ab9 1504 long_op);
6ab409b5 1505 cifsFileInfo_put(open_file);
fbec9ab9
JL
1506 cifs_update_eof(cifsi, offset, bytes_written);
1507
23e7dd7d 1508 if (rc || bytes_written < bytes_to_write) {
b6b38f70
JP
1509 cERROR(1, "Write2 ret %d, wrote %d",
1510 rc, bytes_written);
23e7dd7d
SF
1511 /* BB what if continued retry is
1512 requested via mount flags? */
cea21805
JL
1513 if (rc == -ENOSPC)
1514 set_bit(AS_ENOSPC, &mapping->flags);
1515 else
1516 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1517 } else {
1518 cifs_stats_bytes_written(cifs_sb->tcon,
1519 bytes_written);
1520 }
37c0eb46
SF
1521 }
1522 for (i = 0; i < n_iov; i++) {
1523 page = pvec.pages[first + i];
eb9bdaa3
SF
1524 /* Should we also set page error on
1525 success rc but too little data written? */
1526 /* BB investigate retry logic on temporary
1527 server crash cases and how recovery works
fb8c4b14
SF
1528 when page marked as error */
1529 if (rc)
eb9bdaa3 1530 SetPageError(page);
37c0eb46
SF
1531 kunmap(page);
1532 unlock_page(page);
cb876f45 1533 end_page_writeback(page);
37c0eb46
SF
1534 page_cache_release(page);
1535 }
1536 if ((wbc->nr_to_write -= n_iov) <= 0)
1537 done = 1;
1538 index = next;
b066a48c
DK
1539 } else
1540 /* Need to re-find the pages we skipped */
1541 index = pvec.pages[0]->index + 1;
1542
37c0eb46
SF
1543 pagevec_release(&pvec);
1544 }
1545 if (!scanned && !done) {
1546 /*
1547 * We hit the last page and there is more work to be done: wrap
1548 * back to the start of the file
1549 */
1550 scanned = 1;
1551 index = 0;
1552 goto retry;
1553 }
111ebb6e 1554 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1555 mapping->writeback_index = index;
1556
1da177e4 1557 FreeXid(xid);
9a0c8230 1558 kfree(iov);
1da177e4
LT
1559 return rc;
1560}
1da177e4 1561
fb8c4b14 1562static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1563{
1564 int rc = -EFAULT;
1565 int xid;
1566
1567 xid = GetXid();
1568/* BB add check for wbc flags */
1569 page_cache_get(page);
ad7a2926 1570 if (!PageUptodate(page))
b6b38f70 1571 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1572
1573 /*
1574 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1575 *
1576 * A writepage() implementation always needs to do either this,
1577 * or re-dirty the page with "redirty_page_for_writepage()" in
1578 * the case of a failure.
1579 *
1580 * Just unlocking the page will cause the radix tree tag-bits
1581 * to fail to update with the state of the page correctly.
1582 */
fb8c4b14 1583 set_page_writeback(page);
1da177e4
LT
1584 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1585 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1586 unlock_page(page);
cb876f45
LT
1587 end_page_writeback(page);
1588 page_cache_release(page);
1da177e4
LT
1589 FreeXid(xid);
1590 return rc;
1591}
1592
d9414774
NP
1593static int cifs_write_end(struct file *file, struct address_space *mapping,
1594 loff_t pos, unsigned len, unsigned copied,
1595 struct page *page, void *fsdata)
1da177e4 1596{
d9414774
NP
1597 int rc;
1598 struct inode *inode = mapping->host;
1da177e4 1599
b6b38f70
JP
1600 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1601 page, pos, copied);
d9414774 1602
a98ee8c1
JL
1603 if (PageChecked(page)) {
1604 if (copied == len)
1605 SetPageUptodate(page);
1606 ClearPageChecked(page);
1607 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1608 SetPageUptodate(page);
ad7a2926 1609
1da177e4 1610 if (!PageUptodate(page)) {
d9414774
NP
1611 char *page_data;
1612 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1613 int xid;
1614
1615 xid = GetXid();
1da177e4
LT
1616 /* this is probably better than directly calling
1617 partialpage_write since in this function the file handle is
1618 known which we might as well leverage */
1619 /* BB check if anything else missing out of ppw
1620 such as updating last write time */
1621 page_data = kmap(page);
d9414774
NP
1622 rc = cifs_write(file, page_data + offset, copied, &pos);
1623 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1624 kunmap(page);
d9414774
NP
1625
1626 FreeXid(xid);
fb8c4b14 1627 } else {
d9414774
NP
1628 rc = copied;
1629 pos += copied;
1da177e4
LT
1630 set_page_dirty(page);
1631 }
1632
d9414774
NP
1633 if (rc > 0) {
1634 spin_lock(&inode->i_lock);
1635 if (pos > inode->i_size)
1636 i_size_write(inode, pos);
1637 spin_unlock(&inode->i_lock);
1638 }
1639
1640 unlock_page(page);
1641 page_cache_release(page);
1642
1da177e4
LT
1643 return rc;
1644}
1645
7ea80859 1646int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1647{
1648 int xid;
1649 int rc = 0;
b298f223 1650 struct cifsTconInfo *tcon;
c21dfb69 1651 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1652 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1653
1654 xid = GetXid();
1655
b6b38f70 1656 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1657 file->f_path.dentry->d_name.name, datasync);
50c2f753 1658
cea21805
JL
1659 rc = filemap_write_and_wait(inode->i_mapping);
1660 if (rc == 0) {
1661 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1662 CIFS_I(inode)->write_behind_rc = 0;
b298f223 1663 tcon = CIFS_SB(inode->i_sb)->tcon;
be652445 1664 if (!rc && tcon && smbfile &&
4717bed6 1665 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1666 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1667 }
b298f223 1668
1da177e4
LT
1669 FreeXid(xid);
1670 return rc;
1671}
1672
3978d717 1673/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1674{
1675 struct address_space *mapping;
1676 struct inode *inode;
1677 unsigned long index = page->index;
1678 unsigned int rpages = 0;
1679 int rc = 0;
1680
f19159dc 1681 cFYI(1, "sync page %p", page);
1da177e4
LT
1682 mapping = page->mapping;
1683 if (!mapping)
1684 return 0;
1685 inode = mapping->host;
1686 if (!inode)
3978d717 1687 return; */
1da177e4 1688
fb8c4b14 1689/* fill in rpages then
1da177e4
LT
1690 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1691
b6b38f70 1692/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1693
3978d717 1694#if 0
1da177e4
LT
1695 if (rc < 0)
1696 return rc;
1697 return 0;
3978d717 1698#endif
1da177e4
LT
1699} */
1700
1701/*
1702 * As file closes, flush all cached write data for this inode checking
1703 * for write behind errors.
1704 */
75e1fcc0 1705int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1706{
fb8c4b14 1707 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1708 int rc = 0;
1709
1710 /* Rather than do the steps manually:
1711 lock the inode for writing
1712 loop through pages looking for write behind data (dirty pages)
1713 coalesce into contiguous 16K (or smaller) chunks to write to server
1714 send to server (prefer in parallel)
1715 deal with writebehind errors
1716 unlock inode for writing
1717 filemapfdatawrite appears easier for the time being */
1718
1719 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1720 /* reset wb rc if we were able to write out dirty pages */
1721 if (!rc) {
1722 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1723 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1724 }
50c2f753 1725
b6b38f70 1726 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1727
1728 return rc;
1729}
1730
1731ssize_t cifs_user_read(struct file *file, char __user *read_data,
1732 size_t read_size, loff_t *poffset)
1733{
1734 int rc = -EACCES;
1735 unsigned int bytes_read = 0;
1736 unsigned int total_read = 0;
1737 unsigned int current_read_size;
1738 struct cifs_sb_info *cifs_sb;
1739 struct cifsTconInfo *pTcon;
1740 int xid;
1741 struct cifsFileInfo *open_file;
1742 char *smb_read_data;
1743 char __user *current_offset;
1744 struct smb_com_read_rsp *pSMBr;
1745
1746 xid = GetXid();
e6a00296 1747 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1748 pTcon = cifs_sb->tcon;
1749
1750 if (file->private_data == NULL) {
0f3bc09e 1751 rc = -EBADF;
1da177e4 1752 FreeXid(xid);
0f3bc09e 1753 return rc;
1da177e4 1754 }
c21dfb69 1755 open_file = file->private_data;
1da177e4 1756
ad7a2926 1757 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1758 cFYI(1, "attempting read on write only file instance");
ad7a2926 1759
1da177e4
LT
1760 for (total_read = 0, current_offset = read_data;
1761 read_size > total_read;
1762 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1763 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1764 cifs_sb->rsize);
1765 rc = -EAGAIN;
1766 smb_read_data = NULL;
1767 while (rc == -EAGAIN) {
ec637e3f 1768 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1769 if ((open_file->invalidHandle) &&
1da177e4 1770 (!open_file->closePend)) {
4b18f2a9 1771 rc = cifs_reopen_file(file, true);
1da177e4
LT
1772 if (rc != 0)
1773 break;
1774 }
bfa0d75a 1775 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1776 open_file->netfid,
1777 current_read_size, *poffset,
1778 &bytes_read, &smb_read_data,
1779 &buf_type);
1da177e4 1780 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1781 if (smb_read_data) {
93544cc6
SF
1782 if (copy_to_user(current_offset,
1783 smb_read_data +
1784 4 /* RFC1001 length field */ +
1785 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1786 bytes_read))
93544cc6 1787 rc = -EFAULT;
93544cc6 1788
fb8c4b14 1789 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1790 cifs_small_buf_release(smb_read_data);
fb8c4b14 1791 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1792 cifs_buf_release(smb_read_data);
1da177e4
LT
1793 smb_read_data = NULL;
1794 }
1795 }
1796 if (rc || (bytes_read == 0)) {
1797 if (total_read) {
1798 break;
1799 } else {
1800 FreeXid(xid);
1801 return rc;
1802 }
1803 } else {
a4544347 1804 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1805 *poffset += bytes_read;
1806 }
1807 }
1808 FreeXid(xid);
1809 return total_read;
1810}
1811
1812
1813static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1814 loff_t *poffset)
1815{
1816 int rc = -EACCES;
1817 unsigned int bytes_read = 0;
1818 unsigned int total_read;
1819 unsigned int current_read_size;
1820 struct cifs_sb_info *cifs_sb;
1821 struct cifsTconInfo *pTcon;
1822 int xid;
1823 char *current_offset;
1824 struct cifsFileInfo *open_file;
ec637e3f 1825 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1826
1827 xid = GetXid();
e6a00296 1828 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1829 pTcon = cifs_sb->tcon;
1830
1831 if (file->private_data == NULL) {
0f3bc09e 1832 rc = -EBADF;
1da177e4 1833 FreeXid(xid);
0f3bc09e 1834 return rc;
1da177e4 1835 }
c21dfb69 1836 open_file = file->private_data;
1da177e4
LT
1837
1838 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1839 cFYI(1, "attempting read on write only file instance");
1da177e4 1840
fb8c4b14 1841 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1842 read_size > total_read;
1843 total_read += bytes_read, current_offset += bytes_read) {
1844 current_read_size = min_t(const int, read_size - total_read,
1845 cifs_sb->rsize);
f9f5c817
SF
1846 /* For windows me and 9x we do not want to request more
1847 than it negotiated since it will refuse the read then */
fb8c4b14 1848 if ((pTcon->ses) &&
f9f5c817
SF
1849 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1850 current_read_size = min_t(const int, current_read_size,
1851 pTcon->ses->server->maxBuf - 128);
1852 }
1da177e4
LT
1853 rc = -EAGAIN;
1854 while (rc == -EAGAIN) {
fb8c4b14 1855 if ((open_file->invalidHandle) &&
1da177e4 1856 (!open_file->closePend)) {
4b18f2a9 1857 rc = cifs_reopen_file(file, true);
1da177e4
LT
1858 if (rc != 0)
1859 break;
1860 }
bfa0d75a 1861 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1862 open_file->netfid,
1863 current_read_size, *poffset,
1864 &bytes_read, &current_offset,
1865 &buf_type);
1da177e4
LT
1866 }
1867 if (rc || (bytes_read == 0)) {
1868 if (total_read) {
1869 break;
1870 } else {
1871 FreeXid(xid);
1872 return rc;
1873 }
1874 } else {
a4544347 1875 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1876 *poffset += bytes_read;
1877 }
1878 }
1879 FreeXid(xid);
1880 return total_read;
1881}
1882
1883int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1884{
1da177e4
LT
1885 int rc, xid;
1886
1887 xid = GetXid();
abab095d 1888 rc = cifs_revalidate_file(file);
1da177e4 1889 if (rc) {
b6b38f70 1890 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1891 FreeXid(xid);
1892 return rc;
1893 }
1894 rc = generic_file_mmap(file, vma);
1895 FreeXid(xid);
1896 return rc;
1897}
1898
1899
fb8c4b14 1900static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1901 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1902{
1903 struct page *page;
1904 char *target;
1905
1906 while (bytes_read > 0) {
1907 if (list_empty(pages))
1908 break;
1909
1910 page = list_entry(pages->prev, struct page, lru);
1911 list_del(&page->lru);
1912
315e995c 1913 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1914 GFP_KERNEL)) {
1915 page_cache_release(page);
b6b38f70 1916 cFYI(1, "Add page cache failed");
3079ca62
SF
1917 data += PAGE_CACHE_SIZE;
1918 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1919 continue;
1920 }
06b43672 1921 page_cache_release(page);
1da177e4 1922
fb8c4b14 1923 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1924
1925 if (PAGE_CACHE_SIZE > bytes_read) {
1926 memcpy(target, data, bytes_read);
1927 /* zero the tail end of this partial page */
fb8c4b14 1928 memset(target + bytes_read, 0,
1da177e4
LT
1929 PAGE_CACHE_SIZE - bytes_read);
1930 bytes_read = 0;
1931 } else {
1932 memcpy(target, data, PAGE_CACHE_SIZE);
1933 bytes_read -= PAGE_CACHE_SIZE;
1934 }
1935 kunmap_atomic(target, KM_USER0);
1936
1937 flush_dcache_page(page);
1938 SetPageUptodate(page);
1939 unlock_page(page);
1da177e4
LT
1940 data += PAGE_CACHE_SIZE;
1941 }
1942 return;
1943}
1944
1945static int cifs_readpages(struct file *file, struct address_space *mapping,
1946 struct list_head *page_list, unsigned num_pages)
1947{
1948 int rc = -EACCES;
1949 int xid;
1950 loff_t offset;
1951 struct page *page;
1952 struct cifs_sb_info *cifs_sb;
1953 struct cifsTconInfo *pTcon;
2c2130e1 1954 unsigned int bytes_read = 0;
fb8c4b14 1955 unsigned int read_size, i;
1da177e4
LT
1956 char *smb_read_data = NULL;
1957 struct smb_com_read_rsp *pSMBr;
1da177e4 1958 struct cifsFileInfo *open_file;
ec637e3f 1959 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1960
1961 xid = GetXid();
1962 if (file->private_data == NULL) {
0f3bc09e 1963 rc = -EBADF;
1da177e4 1964 FreeXid(xid);
0f3bc09e 1965 return rc;
1da177e4 1966 }
c21dfb69 1967 open_file = file->private_data;
e6a00296 1968 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1969 pTcon = cifs_sb->tcon;
bfa0d75a 1970
f19159dc 1971 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
1972 for (i = 0; i < num_pages; ) {
1973 unsigned contig_pages;
1974 struct page *tmp_page;
1975 unsigned long expected_index;
1976
1977 if (list_empty(page_list))
1978 break;
1979
1980 page = list_entry(page_list->prev, struct page, lru);
1981 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1982
1983 /* count adjacent pages that we will read into */
1984 contig_pages = 0;
fb8c4b14 1985 expected_index =
1da177e4 1986 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 1987 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
1988 if (tmp_page->index == expected_index) {
1989 contig_pages++;
1990 expected_index++;
1991 } else
fb8c4b14 1992 break;
1da177e4
LT
1993 }
1994 if (contig_pages + i > num_pages)
1995 contig_pages = num_pages - i;
1996
1997 /* for reads over a certain size could initiate async
1998 read ahead */
1999
2000 read_size = contig_pages * PAGE_CACHE_SIZE;
2001 /* Read size needs to be in multiples of one page */
2002 read_size = min_t(const unsigned int, read_size,
2003 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2004 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2005 read_size, contig_pages);
1da177e4
LT
2006 rc = -EAGAIN;
2007 while (rc == -EAGAIN) {
fb8c4b14 2008 if ((open_file->invalidHandle) &&
1da177e4 2009 (!open_file->closePend)) {
4b18f2a9 2010 rc = cifs_reopen_file(file, true);
1da177e4
LT
2011 if (rc != 0)
2012 break;
2013 }
2014
bfa0d75a 2015 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2016 open_file->netfid,
2017 read_size, offset,
2018 &bytes_read, &smb_read_data,
2019 &buf_type);
a9d02ad4 2020 /* BB more RC checks ? */
fb8c4b14 2021 if (rc == -EAGAIN) {
1da177e4 2022 if (smb_read_data) {
fb8c4b14 2023 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2024 cifs_small_buf_release(smb_read_data);
fb8c4b14 2025 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2026 cifs_buf_release(smb_read_data);
1da177e4
LT
2027 smb_read_data = NULL;
2028 }
2029 }
2030 }
2031 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2032 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2033 break;
2034 } else if (bytes_read > 0) {
6f88cc2e 2035 task_io_account_read(bytes_read);
1da177e4
LT
2036 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2037 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2038 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2039 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2040
2041 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2042 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2043 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2044 i++; /* account for partial page */
2045
fb8c4b14 2046 /* server copy of file can have smaller size
1da177e4 2047 than client */
fb8c4b14
SF
2048 /* BB do we need to verify this common case ?
2049 this case is ok - if we are at server EOF
1da177e4
LT
2050 we will hit it on next read */
2051
05ac9d4b 2052 /* break; */
1da177e4
LT
2053 }
2054 } else {
b6b38f70 2055 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2056 "Cleaning remaining pages from readahead list",
b6b38f70 2057 bytes_read, offset);
fb8c4b14 2058 /* BB turn off caching and do new lookup on
1da177e4 2059 file size at server? */
1da177e4
LT
2060 break;
2061 }
2062 if (smb_read_data) {
fb8c4b14 2063 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2064 cifs_small_buf_release(smb_read_data);
fb8c4b14 2065 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2066 cifs_buf_release(smb_read_data);
1da177e4
LT
2067 smb_read_data = NULL;
2068 }
2069 bytes_read = 0;
2070 }
2071
1da177e4
LT
2072/* need to free smb_read_data buf before exit */
2073 if (smb_read_data) {
fb8c4b14 2074 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2075 cifs_small_buf_release(smb_read_data);
fb8c4b14 2076 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2077 cifs_buf_release(smb_read_data);
1da177e4 2078 smb_read_data = NULL;
fb8c4b14 2079 }
1da177e4
LT
2080
2081 FreeXid(xid);
2082 return rc;
2083}
2084
2085static int cifs_readpage_worker(struct file *file, struct page *page,
2086 loff_t *poffset)
2087{
2088 char *read_data;
2089 int rc;
2090
2091 page_cache_get(page);
2092 read_data = kmap(page);
2093 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2094
1da177e4 2095 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2096
1da177e4
LT
2097 if (rc < 0)
2098 goto io_error;
2099 else
b6b38f70 2100 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2101
e6a00296
JJS
2102 file->f_path.dentry->d_inode->i_atime =
2103 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2104
1da177e4
LT
2105 if (PAGE_CACHE_SIZE > rc)
2106 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2107
2108 flush_dcache_page(page);
2109 SetPageUptodate(page);
2110 rc = 0;
fb8c4b14 2111
1da177e4 2112io_error:
fb8c4b14 2113 kunmap(page);
1da177e4
LT
2114 page_cache_release(page);
2115 return rc;
2116}
2117
2118static int cifs_readpage(struct file *file, struct page *page)
2119{
2120 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2121 int rc = -EACCES;
2122 int xid;
2123
2124 xid = GetXid();
2125
2126 if (file->private_data == NULL) {
0f3bc09e 2127 rc = -EBADF;
1da177e4 2128 FreeXid(xid);
0f3bc09e 2129 return rc;
1da177e4
LT
2130 }
2131
b6b38f70
JP
2132 cFYI(1, "readpage %p at offset %d 0x%x\n",
2133 page, (int)offset, (int)offset);
1da177e4
LT
2134
2135 rc = cifs_readpage_worker(file, page, &offset);
2136
2137 unlock_page(page);
2138
2139 FreeXid(xid);
2140 return rc;
2141}
2142
a403a0a3
SF
2143static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2144{
2145 struct cifsFileInfo *open_file;
2146
2147 read_lock(&GlobalSMBSeslock);
2148 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2149 if (open_file->closePend)
2150 continue;
2151 if (open_file->pfile &&
2152 ((open_file->pfile->f_flags & O_RDWR) ||
2153 (open_file->pfile->f_flags & O_WRONLY))) {
2154 read_unlock(&GlobalSMBSeslock);
2155 return 1;
2156 }
2157 }
2158 read_unlock(&GlobalSMBSeslock);
2159 return 0;
2160}
2161
1da177e4
LT
2162/* We do not want to update the file size from server for inodes
2163 open for write - to avoid races with writepage extending
2164 the file - in the future we could consider allowing
fb8c4b14 2165 refreshing the inode only on increases in the file size
1da177e4
LT
2166 but this is tricky to do without racing with writebehind
2167 page caching in the current Linux kernel design */
4b18f2a9 2168bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2169{
a403a0a3 2170 if (!cifsInode)
4b18f2a9 2171 return true;
50c2f753 2172
a403a0a3
SF
2173 if (is_inode_writable(cifsInode)) {
2174 /* This inode is open for write at least once */
c32a0b68
SF
2175 struct cifs_sb_info *cifs_sb;
2176
c32a0b68 2177 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2178 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2179 /* since no page cache to corrupt on directio
c32a0b68 2180 we can change size safely */
4b18f2a9 2181 return true;
c32a0b68
SF
2182 }
2183
fb8c4b14 2184 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2185 return true;
7ba52631 2186
4b18f2a9 2187 return false;
23e7dd7d 2188 } else
4b18f2a9 2189 return true;
1da177e4
LT
2190}
2191
d9414774
NP
2192static int cifs_write_begin(struct file *file, struct address_space *mapping,
2193 loff_t pos, unsigned len, unsigned flags,
2194 struct page **pagep, void **fsdata)
1da177e4 2195{
d9414774
NP
2196 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2197 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2198 loff_t page_start = pos & PAGE_MASK;
2199 loff_t i_size;
2200 struct page *page;
2201 int rc = 0;
d9414774 2202
b6b38f70 2203 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2204
54566b2c 2205 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2206 if (!page) {
2207 rc = -ENOMEM;
2208 goto out;
2209 }
8a236264 2210
a98ee8c1
JL
2211 if (PageUptodate(page))
2212 goto out;
8a236264 2213
a98ee8c1
JL
2214 /*
2215 * If we write a full page it will be up to date, no need to read from
2216 * the server. If the write is short, we'll end up doing a sync write
2217 * instead.
2218 */
2219 if (len == PAGE_CACHE_SIZE)
2220 goto out;
8a236264 2221
a98ee8c1
JL
2222 /*
2223 * optimize away the read when we have an oplock, and we're not
2224 * expecting to use any of the data we'd be reading in. That
2225 * is, when the page lies beyond the EOF, or straddles the EOF
2226 * and the write will cover all of the existing data.
2227 */
2228 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2229 i_size = i_size_read(mapping->host);
2230 if (page_start >= i_size ||
2231 (offset == 0 && (pos + len) >= i_size)) {
2232 zero_user_segments(page, 0, offset,
2233 offset + len,
2234 PAGE_CACHE_SIZE);
2235 /*
2236 * PageChecked means that the parts of the page
2237 * to which we're not writing are considered up
2238 * to date. Once the data is copied to the
2239 * page, it can be set uptodate.
2240 */
2241 SetPageChecked(page);
2242 goto out;
2243 }
2244 }
d9414774 2245
a98ee8c1
JL
2246 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2247 /*
2248 * might as well read a page, it is fast enough. If we get
2249 * an error, we don't need to return it. cifs_write_end will
2250 * do a sync write instead since PG_uptodate isn't set.
2251 */
2252 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2253 } else {
2254 /* we could try using another file handle if there is one -
2255 but how would we lock it to prevent close of that handle
2256 racing with this read? In any case
d9414774 2257 this will be written out by write_end so is fine */
1da177e4 2258 }
a98ee8c1
JL
2259out:
2260 *pagep = page;
2261 return rc;
1da177e4
LT
2262}
2263
3bc303c2
JL
2264static void
2265cifs_oplock_break(struct slow_work *work)
2266{
2267 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2268 oplock_break);
2269 struct inode *inode = cfile->pInode;
2270 struct cifsInodeInfo *cinode = CIFS_I(inode);
2271 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2272 int rc, waitrc = 0;
2273
2274 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2275 if (cinode->clientCanCacheRead)
8737c930 2276 break_lease(inode, O_RDONLY);
d54ff732 2277 else
8737c930 2278 break_lease(inode, O_WRONLY);
3bc303c2
JL
2279 rc = filemap_fdatawrite(inode->i_mapping);
2280 if (cinode->clientCanCacheRead == 0) {
2281 waitrc = filemap_fdatawait(inode->i_mapping);
2282 invalidate_remote_inode(inode);
2283 }
2284 if (!rc)
2285 rc = waitrc;
2286 if (rc)
2287 cinode->write_behind_rc = rc;
b6b38f70 2288 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2289 }
2290
2291 /*
2292 * releasing stale oplock after recent reconnect of smb session using
2293 * a now incorrect file handle is not a data integrity issue but do
2294 * not bother sending an oplock release if session to server still is
2295 * disconnected since oplock already released by the server
2296 */
2297 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2298 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2299 LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2300 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2
JL
2301 }
2302}
2303
2304static int
2305cifs_oplock_break_get(struct slow_work *work)
2306{
2307 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2308 oplock_break);
2309 mntget(cfile->mnt);
2310 cifsFileInfo_get(cfile);
2311 return 0;
2312}
2313
2314static void
2315cifs_oplock_break_put(struct slow_work *work)
2316{
2317 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2318 oplock_break);
2319 mntput(cfile->mnt);
2320 cifsFileInfo_put(cfile);
2321}
2322
2323const struct slow_work_ops cifs_oplock_break_ops = {
2324 .get_ref = cifs_oplock_break_get,
2325 .put_ref = cifs_oplock_break_put,
2326 .execute = cifs_oplock_break,
2327};
2328
f5e54d6e 2329const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2330 .readpage = cifs_readpage,
2331 .readpages = cifs_readpages,
2332 .writepage = cifs_writepage,
37c0eb46 2333 .writepages = cifs_writepages,
d9414774
NP
2334 .write_begin = cifs_write_begin,
2335 .write_end = cifs_write_end,
1da177e4
LT
2336 .set_page_dirty = __set_page_dirty_nobuffers,
2337 /* .sync_page = cifs_sync_page, */
2338 /* .direct_IO = */
2339};
273d81d6
DK
2340
2341/*
2342 * cifs_readpages requires the server to support a buffer large enough to
2343 * contain the header plus one complete page of data. Otherwise, we need
2344 * to leave cifs_readpages out of the address space operations.
2345 */
f5e54d6e 2346const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2347 .readpage = cifs_readpage,
2348 .writepage = cifs_writepage,
2349 .writepages = cifs_writepages,
d9414774
NP
2350 .write_begin = cifs_write_begin,
2351 .write_end = cifs_write_end,
273d81d6
DK
2352 .set_page_dirty = __set_page_dirty_nobuffers,
2353 /* .sync_page = cifs_sync_page, */
2354 /* .direct_IO = */
2355};