]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/smb/client/cifsfs.c
netfs: Move pinning-for-writeback from fscache to netfs
[thirdparty/linux.git] / fs / smb / client / cifsfs.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51
52 /*
53 * DOS dates from 1980/1/1 through 2107/12/31
54 * Protocol specifications indicate the range should be to 119, which
55 * limits maximum year to 2099. But this range has not been checked.
56 */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 spinlock_t cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 "before logging that a response is delayed. "
129 "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
137
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 "helpful to restrict the ability to "
147 "override the default dialects (SMB2.1, "
148 "SMB3 and SMB3.02) on mount with old "
149 "dialects (CIFS/SMB1 and SMB2) since "
150 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 " and less secure. Default: n/N/0");
152
153 extern mempool_t *cifs_sm_req_poolp;
154 extern mempool_t *cifs_req_poolp;
155 extern mempool_t *cifs_mid_poolp;
156
157 struct workqueue_struct *cifsiod_wq;
158 struct workqueue_struct *decrypt_wq;
159 struct workqueue_struct *fileinfo_put_wq;
160 struct workqueue_struct *cifsoplockd_wq;
161 struct workqueue_struct *deferredclose_wq;
162 __u32 cifs_lock_secret;
163
164 /*
165 * Bumps refcount for cifs super block.
166 * Note that it should be only called if a referece to VFS super block is
167 * already held, e.g. in open-type syscalls context. Otherwise it can race with
168 * atomic_dec_and_test in deactivate_locked_super.
169 */
170 void
171 cifs_sb_active(struct super_block *sb)
172 {
173 struct cifs_sb_info *server = CIFS_SB(sb);
174
175 if (atomic_inc_return(&server->active) == 1)
176 atomic_inc(&sb->s_active);
177 }
178
179 void
180 cifs_sb_deactive(struct super_block *sb)
181 {
182 struct cifs_sb_info *server = CIFS_SB(sb);
183
184 if (atomic_dec_and_test(&server->active))
185 deactivate_super(sb);
186 }
187
188 static int
189 cifs_read_super(struct super_block *sb)
190 {
191 struct inode *inode;
192 struct cifs_sb_info *cifs_sb;
193 struct cifs_tcon *tcon;
194 struct timespec64 ts;
195 int rc = 0;
196
197 cifs_sb = CIFS_SB(sb);
198 tcon = cifs_sb_master_tcon(cifs_sb);
199
200 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
201 sb->s_flags |= SB_POSIXACL;
202
203 if (tcon->snapshot_time)
204 sb->s_flags |= SB_RDONLY;
205
206 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
207 sb->s_maxbytes = MAX_LFS_FILESIZE;
208 else
209 sb->s_maxbytes = MAX_NON_LFS;
210
211 /*
212 * Some very old servers like DOS and OS/2 used 2 second granularity
213 * (while all current servers use 100ns granularity - see MS-DTYP)
214 * but 1 second is the maximum allowed granularity for the VFS
215 * so for old servers set time granularity to 1 second while for
216 * everything else (current servers) set it to 100ns.
217 */
218 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
219 ((tcon->ses->capabilities &
220 tcon->ses->server->vals->cap_nt_find) == 0) &&
221 !tcon->unix_ext) {
222 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
224 sb->s_time_min = ts.tv_sec;
225 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
226 cpu_to_le16(SMB_TIME_MAX), 0);
227 sb->s_time_max = ts.tv_sec;
228 } else {
229 /*
230 * Almost every server, including all SMB2+, uses DCE TIME
231 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
232 */
233 sb->s_time_gran = 100;
234 ts = cifs_NTtimeToUnix(0);
235 sb->s_time_min = ts.tv_sec;
236 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
237 sb->s_time_max = ts.tv_sec;
238 }
239
240 sb->s_magic = CIFS_SUPER_MAGIC;
241 sb->s_op = &cifs_super_ops;
242 sb->s_xattr = cifs_xattr_handlers;
243 rc = super_setup_bdi(sb);
244 if (rc)
245 goto out_no_root;
246 /* tune readahead according to rsize if readahead size not set on mount */
247 if (cifs_sb->ctx->rsize == 0)
248 cifs_sb->ctx->rsize =
249 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
250 if (cifs_sb->ctx->rasize)
251 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
252 else
253 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
254
255 sb->s_blocksize = CIFS_MAX_MSGSIZE;
256 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
257 inode = cifs_root_iget(sb);
258
259 if (IS_ERR(inode)) {
260 rc = PTR_ERR(inode);
261 goto out_no_root;
262 }
263
264 if (tcon->nocase)
265 sb->s_d_op = &cifs_ci_dentry_ops;
266 else
267 sb->s_d_op = &cifs_dentry_ops;
268
269 sb->s_root = d_make_root(inode);
270 if (!sb->s_root) {
271 rc = -ENOMEM;
272 goto out_no_root;
273 }
274
275 #ifdef CONFIG_CIFS_NFSD_EXPORT
276 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
277 cifs_dbg(FYI, "export ops supported\n");
278 sb->s_export_op = &cifs_export_ops;
279 }
280 #endif /* CONFIG_CIFS_NFSD_EXPORT */
281
282 return 0;
283
284 out_no_root:
285 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
286 return rc;
287 }
288
289 static void cifs_kill_sb(struct super_block *sb)
290 {
291 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
292
293 /*
294 * We ned to release all dentries for the cached directories
295 * before we kill the sb.
296 */
297 if (cifs_sb->root) {
298 close_all_cached_dirs(cifs_sb);
299
300 /* finally release root dentry */
301 dput(cifs_sb->root);
302 cifs_sb->root = NULL;
303 }
304
305 kill_anon_super(sb);
306 cifs_umount(cifs_sb);
307 }
308
309 static int
310 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
311 {
312 struct super_block *sb = dentry->d_sb;
313 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
314 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
315 struct TCP_Server_Info *server = tcon->ses->server;
316 unsigned int xid;
317 int rc = 0;
318
319 xid = get_xid();
320
321 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
322 buf->f_namelen =
323 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
324 else
325 buf->f_namelen = PATH_MAX;
326
327 buf->f_fsid.val[0] = tcon->vol_serial_number;
328 /* are using part of create time for more randomness, see man statfs */
329 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
330
331 buf->f_files = 0; /* undefined */
332 buf->f_ffree = 0; /* unlimited */
333
334 if (server->ops->queryfs)
335 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
336
337 free_xid(xid);
338 return rc;
339 }
340
341 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
342 {
343 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
344 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
345 struct TCP_Server_Info *server = tcon->ses->server;
346
347 if (server->ops->fallocate)
348 return server->ops->fallocate(file, tcon, mode, off, len);
349
350 return -EOPNOTSUPP;
351 }
352
353 static int cifs_permission(struct mnt_idmap *idmap,
354 struct inode *inode, int mask)
355 {
356 struct cifs_sb_info *cifs_sb;
357
358 cifs_sb = CIFS_SB(inode->i_sb);
359
360 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
361 if ((mask & MAY_EXEC) && !execute_ok(inode))
362 return -EACCES;
363 else
364 return 0;
365 } else /* file mode might have been restricted at mount time
366 on the client (above and beyond ACL on servers) for
367 servers which do not support setting and viewing mode bits,
368 so allowing client to check permissions is useful */
369 return generic_permission(&nop_mnt_idmap, inode, mask);
370 }
371
372 static struct kmem_cache *cifs_inode_cachep;
373 static struct kmem_cache *cifs_req_cachep;
374 static struct kmem_cache *cifs_mid_cachep;
375 static struct kmem_cache *cifs_sm_req_cachep;
376 mempool_t *cifs_sm_req_poolp;
377 mempool_t *cifs_req_poolp;
378 mempool_t *cifs_mid_poolp;
379
380 static struct inode *
381 cifs_alloc_inode(struct super_block *sb)
382 {
383 struct cifsInodeInfo *cifs_inode;
384 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
385 if (!cifs_inode)
386 return NULL;
387 cifs_inode->cifsAttrs = 0x20; /* default */
388 cifs_inode->time = 0;
389 /*
390 * Until the file is open and we have gotten oplock info back from the
391 * server, can not assume caching of file data or metadata.
392 */
393 cifs_set_oplock_level(cifs_inode, 0);
394 cifs_inode->flags = 0;
395 spin_lock_init(&cifs_inode->writers_lock);
396 cifs_inode->writers = 0;
397 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
398 cifs_inode->server_eof = 0;
399 cifs_inode->uniqueid = 0;
400 cifs_inode->createtime = 0;
401 cifs_inode->epoch = 0;
402 spin_lock_init(&cifs_inode->open_file_lock);
403 generate_random_uuid(cifs_inode->lease_key);
404 cifs_inode->symlink_target = NULL;
405
406 /*
407 * Can not set i_flags here - they get immediately overwritten to zero
408 * by the VFS.
409 */
410 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
411 INIT_LIST_HEAD(&cifs_inode->openFileList);
412 INIT_LIST_HEAD(&cifs_inode->llist);
413 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
414 spin_lock_init(&cifs_inode->deferred_lock);
415 return &cifs_inode->netfs.inode;
416 }
417
418 static void
419 cifs_free_inode(struct inode *inode)
420 {
421 struct cifsInodeInfo *cinode = CIFS_I(inode);
422
423 if (S_ISLNK(inode->i_mode))
424 kfree(cinode->symlink_target);
425 kmem_cache_free(cifs_inode_cachep, cinode);
426 }
427
428 static void
429 cifs_evict_inode(struct inode *inode)
430 {
431 truncate_inode_pages_final(&inode->i_data);
432 if (inode->i_state & I_PINNING_NETFS_WB)
433 cifs_fscache_unuse_inode_cookie(inode, true);
434 cifs_fscache_release_inode_cookie(inode);
435 clear_inode(inode);
436 }
437
438 static void
439 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
440 {
441 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
442 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
443
444 seq_puts(s, ",addr=");
445
446 switch (server->dstaddr.ss_family) {
447 case AF_INET:
448 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
449 break;
450 case AF_INET6:
451 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
452 if (sa6->sin6_scope_id)
453 seq_printf(s, "%%%u", sa6->sin6_scope_id);
454 break;
455 default:
456 seq_puts(s, "(unknown)");
457 }
458 if (server->rdma)
459 seq_puts(s, ",rdma");
460 }
461
462 static void
463 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
464 {
465 if (ses->sectype == Unspecified) {
466 if (ses->user_name == NULL)
467 seq_puts(s, ",sec=none");
468 return;
469 }
470
471 seq_puts(s, ",sec=");
472
473 switch (ses->sectype) {
474 case NTLMv2:
475 seq_puts(s, "ntlmv2");
476 break;
477 case Kerberos:
478 seq_puts(s, "krb5");
479 break;
480 case RawNTLMSSP:
481 seq_puts(s, "ntlmssp");
482 break;
483 default:
484 /* shouldn't ever happen */
485 seq_puts(s, "unknown");
486 break;
487 }
488
489 if (ses->sign)
490 seq_puts(s, "i");
491
492 if (ses->sectype == Kerberos)
493 seq_printf(s, ",cruid=%u",
494 from_kuid_munged(&init_user_ns, ses->cred_uid));
495 }
496
497 static void
498 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
499 {
500 seq_puts(s, ",cache=");
501
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
503 seq_puts(s, "strict");
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
505 seq_puts(s, "none");
506 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
507 seq_puts(s, "singleclient"); /* assume only one client access */
508 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
509 seq_puts(s, "ro"); /* read only caching assumed */
510 else
511 seq_puts(s, "loose");
512 }
513
514 /*
515 * cifs_show_devname() is used so we show the mount device name with correct
516 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
517 */
518 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
519 {
520 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
521 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
522
523 if (devname == NULL)
524 seq_puts(m, "none");
525 else {
526 convert_delimiter(devname, '/');
527 /* escape all spaces in share names */
528 seq_escape(m, devname, " \t");
529 kfree(devname);
530 }
531 return 0;
532 }
533
534 /*
535 * cifs_show_options() is for displaying mount options in /proc/mounts.
536 * Not all settable options are displayed but most of the important
537 * ones are.
538 */
539 static int
540 cifs_show_options(struct seq_file *s, struct dentry *root)
541 {
542 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
543 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
544 struct sockaddr *srcaddr;
545 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
546
547 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
548 cifs_show_security(s, tcon->ses);
549 cifs_show_cache_flavor(s, cifs_sb);
550
551 if (tcon->no_lease)
552 seq_puts(s, ",nolease");
553 if (cifs_sb->ctx->multiuser)
554 seq_puts(s, ",multiuser");
555 else if (tcon->ses->user_name)
556 seq_show_option(s, "username", tcon->ses->user_name);
557
558 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
559 seq_show_option(s, "domain", tcon->ses->domainName);
560
561 if (srcaddr->sa_family != AF_UNSPEC) {
562 struct sockaddr_in *saddr4;
563 struct sockaddr_in6 *saddr6;
564 saddr4 = (struct sockaddr_in *)srcaddr;
565 saddr6 = (struct sockaddr_in6 *)srcaddr;
566 if (srcaddr->sa_family == AF_INET6)
567 seq_printf(s, ",srcaddr=%pI6c",
568 &saddr6->sin6_addr);
569 else if (srcaddr->sa_family == AF_INET)
570 seq_printf(s, ",srcaddr=%pI4",
571 &saddr4->sin_addr.s_addr);
572 else
573 seq_printf(s, ",srcaddr=BAD-AF:%i",
574 (int)(srcaddr->sa_family));
575 }
576
577 seq_printf(s, ",uid=%u",
578 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
580 seq_puts(s, ",forceuid");
581 else
582 seq_puts(s, ",noforceuid");
583
584 seq_printf(s, ",gid=%u",
585 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
586 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
587 seq_puts(s, ",forcegid");
588 else
589 seq_puts(s, ",noforcegid");
590
591 cifs_show_address(s, tcon->ses->server);
592
593 if (!tcon->unix_ext)
594 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
595 cifs_sb->ctx->file_mode,
596 cifs_sb->ctx->dir_mode);
597 if (cifs_sb->ctx->iocharset)
598 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
599 if (tcon->seal)
600 seq_puts(s, ",seal");
601 else if (tcon->ses->server->ignore_signature)
602 seq_puts(s, ",signloosely");
603 if (tcon->nocase)
604 seq_puts(s, ",nocase");
605 if (tcon->nodelete)
606 seq_puts(s, ",nodelete");
607 if (cifs_sb->ctx->no_sparse)
608 seq_puts(s, ",nosparse");
609 if (tcon->local_lease)
610 seq_puts(s, ",locallease");
611 if (tcon->retry)
612 seq_puts(s, ",hard");
613 else
614 seq_puts(s, ",soft");
615 if (tcon->use_persistent)
616 seq_puts(s, ",persistenthandles");
617 else if (tcon->use_resilient)
618 seq_puts(s, ",resilienthandles");
619 if (tcon->posix_extensions)
620 seq_puts(s, ",posix");
621 else if (tcon->unix_ext)
622 seq_puts(s, ",unix");
623 else
624 seq_puts(s, ",nounix");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
626 seq_puts(s, ",nodfs");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
628 seq_puts(s, ",posixpaths");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
630 seq_puts(s, ",setuids");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
632 seq_puts(s, ",idsfromsid");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
634 seq_puts(s, ",serverino");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
636 seq_puts(s, ",rwpidforward");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
638 seq_puts(s, ",forcemand");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
640 seq_puts(s, ",nouser_xattr");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
642 seq_puts(s, ",mapchars");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
644 seq_puts(s, ",mapposix");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
646 seq_puts(s, ",sfu");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
648 seq_puts(s, ",nobrl");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
650 seq_puts(s, ",nohandlecache");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
652 seq_puts(s, ",modefromsid");
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
654 seq_puts(s, ",cifsacl");
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
656 seq_puts(s, ",dynperm");
657 if (root->d_sb->s_flags & SB_POSIXACL)
658 seq_puts(s, ",acl");
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
660 seq_puts(s, ",mfsymlinks");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
662 seq_puts(s, ",fsc");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
664 seq_puts(s, ",nostrictsync");
665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
666 seq_puts(s, ",noperm");
667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
668 seq_printf(s, ",backupuid=%u",
669 from_kuid_munged(&init_user_ns,
670 cifs_sb->ctx->backupuid));
671 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
672 seq_printf(s, ",backupgid=%u",
673 from_kgid_munged(&init_user_ns,
674 cifs_sb->ctx->backupgid));
675
676 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
677 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
678 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
679 if (cifs_sb->ctx->rasize)
680 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
681 if (tcon->ses->server->min_offload)
682 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
683 seq_printf(s, ",echo_interval=%lu",
684 tcon->ses->server->echo_interval / HZ);
685
686 /* Only display the following if overridden on mount */
687 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
688 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
689 if (tcon->ses->server->tcp_nodelay)
690 seq_puts(s, ",tcpnodelay");
691 if (tcon->ses->server->noautotune)
692 seq_puts(s, ",noautotune");
693 if (tcon->ses->server->noblocksnd)
694 seq_puts(s, ",noblocksend");
695 if (tcon->ses->server->nosharesock)
696 seq_puts(s, ",nosharesock");
697
698 if (tcon->snapshot_time)
699 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
700 if (tcon->handle_timeout)
701 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
702 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
703 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
704
705 /*
706 * Display file and directory attribute timeout in seconds.
707 * If file and directory attribute timeout the same then actimeo
708 * was likely specified on mount
709 */
710 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
711 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
712 else {
713 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
714 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
715 }
716 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
717
718 if (tcon->ses->chan_max > 1)
719 seq_printf(s, ",multichannel,max_channels=%zu",
720 tcon->ses->chan_max);
721
722 if (tcon->use_witness)
723 seq_puts(s, ",witness");
724
725 return 0;
726 }
727
728 static void cifs_umount_begin(struct super_block *sb)
729 {
730 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
731 struct cifs_tcon *tcon;
732
733 if (cifs_sb == NULL)
734 return;
735
736 tcon = cifs_sb_master_tcon(cifs_sb);
737
738 spin_lock(&cifs_tcp_ses_lock);
739 spin_lock(&tcon->tc_lock);
740 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
741 /* we have other mounts to same share or we have
742 already tried to umount this and woken up
743 all waiting network requests, nothing to do */
744 spin_unlock(&tcon->tc_lock);
745 spin_unlock(&cifs_tcp_ses_lock);
746 return;
747 }
748 /*
749 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
750 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
751 */
752 spin_unlock(&tcon->tc_lock);
753 spin_unlock(&cifs_tcp_ses_lock);
754
755 cifs_close_all_deferred_files(tcon);
756 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
757 /* cancel_notify_requests(tcon); */
758 if (tcon->ses && tcon->ses->server) {
759 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
760 wake_up_all(&tcon->ses->server->request_q);
761 wake_up_all(&tcon->ses->server->response_q);
762 msleep(1); /* yield */
763 /* we have to kick the requests once more */
764 wake_up_all(&tcon->ses->server->response_q);
765 msleep(1);
766 }
767
768 return;
769 }
770
771 static int cifs_freeze(struct super_block *sb)
772 {
773 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
774 struct cifs_tcon *tcon;
775
776 if (cifs_sb == NULL)
777 return 0;
778
779 tcon = cifs_sb_master_tcon(cifs_sb);
780
781 cifs_close_all_deferred_files(tcon);
782 return 0;
783 }
784
785 #ifdef CONFIG_CIFS_STATS2
786 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
787 {
788 /* BB FIXME */
789 return 0;
790 }
791 #endif
792
793 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
794 {
795 return netfs_unpin_writeback(inode, wbc);
796 }
797
798 static int cifs_drop_inode(struct inode *inode)
799 {
800 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
801
802 /* no serverino => unconditional eviction */
803 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
804 generic_drop_inode(inode);
805 }
806
807 static const struct super_operations cifs_super_ops = {
808 .statfs = cifs_statfs,
809 .alloc_inode = cifs_alloc_inode,
810 .write_inode = cifs_write_inode,
811 .free_inode = cifs_free_inode,
812 .drop_inode = cifs_drop_inode,
813 .evict_inode = cifs_evict_inode,
814 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
815 .show_devname = cifs_show_devname,
816 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
817 function unless later we add lazy close of inodes or unless the
818 kernel forgets to call us with the same number of releases (closes)
819 as opens */
820 .show_options = cifs_show_options,
821 .umount_begin = cifs_umount_begin,
822 .freeze_fs = cifs_freeze,
823 #ifdef CONFIG_CIFS_STATS2
824 .show_stats = cifs_show_stats,
825 #endif
826 };
827
828 /*
829 * Get root dentry from superblock according to prefix path mount option.
830 * Return dentry with refcount + 1 on success and NULL otherwise.
831 */
832 static struct dentry *
833 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
834 {
835 struct dentry *dentry;
836 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
837 char *full_path = NULL;
838 char *s, *p;
839 char sep;
840
841 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
842 return dget(sb->s_root);
843
844 full_path = cifs_build_path_to_root(ctx, cifs_sb,
845 cifs_sb_master_tcon(cifs_sb), 0);
846 if (full_path == NULL)
847 return ERR_PTR(-ENOMEM);
848
849 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
850
851 sep = CIFS_DIR_SEP(cifs_sb);
852 dentry = dget(sb->s_root);
853 s = full_path;
854
855 do {
856 struct inode *dir = d_inode(dentry);
857 struct dentry *child;
858
859 if (!S_ISDIR(dir->i_mode)) {
860 dput(dentry);
861 dentry = ERR_PTR(-ENOTDIR);
862 break;
863 }
864
865 /* skip separators */
866 while (*s == sep)
867 s++;
868 if (!*s)
869 break;
870 p = s++;
871 /* next separator */
872 while (*s && *s != sep)
873 s++;
874
875 child = lookup_positive_unlocked(p, dentry, s - p);
876 dput(dentry);
877 dentry = child;
878 } while (!IS_ERR(dentry));
879 kfree(full_path);
880 return dentry;
881 }
882
883 static int cifs_set_super(struct super_block *sb, void *data)
884 {
885 struct cifs_mnt_data *mnt_data = data;
886 sb->s_fs_info = mnt_data->cifs_sb;
887 return set_anon_super(sb, NULL);
888 }
889
890 struct dentry *
891 cifs_smb3_do_mount(struct file_system_type *fs_type,
892 int flags, struct smb3_fs_context *old_ctx)
893 {
894 struct cifs_mnt_data mnt_data;
895 struct cifs_sb_info *cifs_sb;
896 struct super_block *sb;
897 struct dentry *root;
898 int rc;
899
900 if (cifsFYI) {
901 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
902 old_ctx->source, flags);
903 } else {
904 cifs_info("Attempting to mount %s\n", old_ctx->source);
905 }
906
907 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
908 if (!cifs_sb)
909 return ERR_PTR(-ENOMEM);
910
911 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
912 if (!cifs_sb->ctx) {
913 root = ERR_PTR(-ENOMEM);
914 goto out;
915 }
916 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
917 if (rc) {
918 root = ERR_PTR(rc);
919 goto out;
920 }
921
922 rc = cifs_setup_cifs_sb(cifs_sb);
923 if (rc) {
924 root = ERR_PTR(rc);
925 goto out;
926 }
927
928 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
929 if (rc) {
930 if (!(flags & SB_SILENT))
931 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
932 rc);
933 root = ERR_PTR(rc);
934 goto out;
935 }
936
937 mnt_data.ctx = cifs_sb->ctx;
938 mnt_data.cifs_sb = cifs_sb;
939 mnt_data.flags = flags;
940
941 /* BB should we make this contingent on mount parm? */
942 flags |= SB_NODIRATIME | SB_NOATIME;
943
944 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
945 if (IS_ERR(sb)) {
946 cifs_umount(cifs_sb);
947 return ERR_CAST(sb);
948 }
949
950 if (sb->s_root) {
951 cifs_dbg(FYI, "Use existing superblock\n");
952 cifs_umount(cifs_sb);
953 cifs_sb = NULL;
954 } else {
955 rc = cifs_read_super(sb);
956 if (rc) {
957 root = ERR_PTR(rc);
958 goto out_super;
959 }
960
961 sb->s_flags |= SB_ACTIVE;
962 }
963
964 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
965 if (IS_ERR(root))
966 goto out_super;
967
968 if (cifs_sb)
969 cifs_sb->root = dget(root);
970
971 cifs_dbg(FYI, "dentry root is: %p\n", root);
972 return root;
973
974 out_super:
975 deactivate_locked_super(sb);
976 return root;
977 out:
978 kfree(cifs_sb->prepath);
979 smb3_cleanup_fs_context(cifs_sb->ctx);
980 kfree(cifs_sb);
981 return root;
982 }
983
984
985 static ssize_t
986 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
987 {
988 ssize_t rc;
989 struct inode *inode = file_inode(iocb->ki_filp);
990
991 if (iocb->ki_flags & IOCB_DIRECT)
992 return cifs_user_readv(iocb, iter);
993
994 rc = cifs_revalidate_mapping(inode);
995 if (rc)
996 return rc;
997
998 return generic_file_read_iter(iocb, iter);
999 }
1000
1001 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1002 {
1003 struct inode *inode = file_inode(iocb->ki_filp);
1004 struct cifsInodeInfo *cinode = CIFS_I(inode);
1005 ssize_t written;
1006 int rc;
1007
1008 if (iocb->ki_filp->f_flags & O_DIRECT) {
1009 written = cifs_user_writev(iocb, from);
1010 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1011 cifs_zap_mapping(inode);
1012 cifs_dbg(FYI,
1013 "Set no oplock for inode=%p after a write operation\n",
1014 inode);
1015 cinode->oplock = 0;
1016 }
1017 return written;
1018 }
1019
1020 written = cifs_get_writer(cinode);
1021 if (written)
1022 return written;
1023
1024 written = generic_file_write_iter(iocb, from);
1025
1026 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1027 goto out;
1028
1029 rc = filemap_fdatawrite(inode->i_mapping);
1030 if (rc)
1031 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1032 rc, inode);
1033
1034 out:
1035 cifs_put_writer(cinode);
1036 return written;
1037 }
1038
1039 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1040 {
1041 struct cifsFileInfo *cfile = file->private_data;
1042 struct cifs_tcon *tcon;
1043
1044 /*
1045 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1046 * the cached file length
1047 */
1048 if (whence != SEEK_SET && whence != SEEK_CUR) {
1049 int rc;
1050 struct inode *inode = file_inode(file);
1051
1052 /*
1053 * We need to be sure that all dirty pages are written and the
1054 * server has the newest file length.
1055 */
1056 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1057 inode->i_mapping->nrpages != 0) {
1058 rc = filemap_fdatawait(inode->i_mapping);
1059 if (rc) {
1060 mapping_set_error(inode->i_mapping, rc);
1061 return rc;
1062 }
1063 }
1064 /*
1065 * Some applications poll for the file length in this strange
1066 * way so we must seek to end on non-oplocked files by
1067 * setting the revalidate time to zero.
1068 */
1069 CIFS_I(inode)->time = 0;
1070
1071 rc = cifs_revalidate_file_attr(file);
1072 if (rc < 0)
1073 return (loff_t)rc;
1074 }
1075 if (cfile && cfile->tlink) {
1076 tcon = tlink_tcon(cfile->tlink);
1077 if (tcon->ses->server->ops->llseek)
1078 return tcon->ses->server->ops->llseek(file, tcon,
1079 offset, whence);
1080 }
1081 return generic_file_llseek(file, offset, whence);
1082 }
1083
1084 static int
1085 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1086 {
1087 /*
1088 * Note that this is called by vfs setlease with i_lock held to
1089 * protect *lease from going away.
1090 */
1091 struct inode *inode = file_inode(file);
1092 struct cifsFileInfo *cfile = file->private_data;
1093
1094 if (!(S_ISREG(inode->i_mode)))
1095 return -EINVAL;
1096
1097 /* Check if file is oplocked if this is request for new lease */
1098 if (arg == F_UNLCK ||
1099 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1100 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1101 return generic_setlease(file, arg, lease, priv);
1102 else if (tlink_tcon(cfile->tlink)->local_lease &&
1103 !CIFS_CACHE_READ(CIFS_I(inode)))
1104 /*
1105 * If the server claims to support oplock on this file, then we
1106 * still need to check oplock even if the local_lease mount
1107 * option is set, but there are servers which do not support
1108 * oplock for which this mount option may be useful if the user
1109 * knows that the file won't be changed on the server by anyone
1110 * else.
1111 */
1112 return generic_setlease(file, arg, lease, priv);
1113 else
1114 return -EAGAIN;
1115 }
1116
1117 struct file_system_type cifs_fs_type = {
1118 .owner = THIS_MODULE,
1119 .name = "cifs",
1120 .init_fs_context = smb3_init_fs_context,
1121 .parameters = smb3_fs_parameters,
1122 .kill_sb = cifs_kill_sb,
1123 .fs_flags = FS_RENAME_DOES_D_MOVE,
1124 };
1125 MODULE_ALIAS_FS("cifs");
1126
1127 struct file_system_type smb3_fs_type = {
1128 .owner = THIS_MODULE,
1129 .name = "smb3",
1130 .init_fs_context = smb3_init_fs_context,
1131 .parameters = smb3_fs_parameters,
1132 .kill_sb = cifs_kill_sb,
1133 .fs_flags = FS_RENAME_DOES_D_MOVE,
1134 };
1135 MODULE_ALIAS_FS("smb3");
1136 MODULE_ALIAS("smb3");
1137
1138 const struct inode_operations cifs_dir_inode_ops = {
1139 .create = cifs_create,
1140 .atomic_open = cifs_atomic_open,
1141 .lookup = cifs_lookup,
1142 .getattr = cifs_getattr,
1143 .unlink = cifs_unlink,
1144 .link = cifs_hardlink,
1145 .mkdir = cifs_mkdir,
1146 .rmdir = cifs_rmdir,
1147 .rename = cifs_rename2,
1148 .permission = cifs_permission,
1149 .setattr = cifs_setattr,
1150 .symlink = cifs_symlink,
1151 .mknod = cifs_mknod,
1152 .listxattr = cifs_listxattr,
1153 .get_acl = cifs_get_acl,
1154 .set_acl = cifs_set_acl,
1155 };
1156
1157 const struct inode_operations cifs_file_inode_ops = {
1158 .setattr = cifs_setattr,
1159 .getattr = cifs_getattr,
1160 .permission = cifs_permission,
1161 .listxattr = cifs_listxattr,
1162 .fiemap = cifs_fiemap,
1163 .get_acl = cifs_get_acl,
1164 .set_acl = cifs_set_acl,
1165 };
1166
1167 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1168 struct delayed_call *done)
1169 {
1170 char *target_path;
1171
1172 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1173 if (!target_path)
1174 return ERR_PTR(-ENOMEM);
1175
1176 spin_lock(&inode->i_lock);
1177 if (likely(CIFS_I(inode)->symlink_target)) {
1178 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1179 } else {
1180 kfree(target_path);
1181 target_path = ERR_PTR(-EOPNOTSUPP);
1182 }
1183 spin_unlock(&inode->i_lock);
1184
1185 if (!IS_ERR(target_path))
1186 set_delayed_call(done, kfree_link, target_path);
1187
1188 return target_path;
1189 }
1190
1191 const struct inode_operations cifs_symlink_inode_ops = {
1192 .get_link = cifs_get_link,
1193 .setattr = cifs_setattr,
1194 .permission = cifs_permission,
1195 .listxattr = cifs_listxattr,
1196 };
1197
1198 /*
1199 * Advance the EOF marker to after the source range.
1200 */
1201 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1202 struct cifs_tcon *src_tcon,
1203 unsigned int xid, loff_t src_end)
1204 {
1205 struct cifsFileInfo *writeable_srcfile;
1206 int rc = -EINVAL;
1207
1208 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1209 if (writeable_srcfile) {
1210 if (src_tcon->ses->server->ops->set_file_size)
1211 rc = src_tcon->ses->server->ops->set_file_size(
1212 xid, src_tcon, writeable_srcfile,
1213 src_inode->i_size, true /* no need to set sparse */);
1214 else
1215 rc = -ENOSYS;
1216 cifsFileInfo_put(writeable_srcfile);
1217 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1218 }
1219
1220 if (rc < 0)
1221 goto set_failed;
1222
1223 netfs_resize_file(&src_cifsi->netfs, src_end);
1224 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1225 return 0;
1226
1227 set_failed:
1228 return filemap_write_and_wait(src_inode->i_mapping);
1229 }
1230
1231 /*
1232 * Flush out either the folio that overlaps the beginning of a range in which
1233 * pos resides or the folio that overlaps the end of a range unless that folio
1234 * is entirely within the range we're going to invalidate. We extend the flush
1235 * bounds to encompass the folio.
1236 */
1237 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1238 bool first)
1239 {
1240 struct folio *folio;
1241 unsigned long long fpos, fend;
1242 pgoff_t index = pos / PAGE_SIZE;
1243 size_t size;
1244 int rc = 0;
1245
1246 folio = filemap_get_folio(inode->i_mapping, index);
1247 if (IS_ERR(folio))
1248 return 0;
1249
1250 size = folio_size(folio);
1251 fpos = folio_pos(folio);
1252 fend = fpos + size - 1;
1253 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1254 *_fend = max_t(unsigned long long, *_fend, fend);
1255 if ((first && pos == fpos) || (!first && pos == fend))
1256 goto out;
1257
1258 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1259 out:
1260 folio_put(folio);
1261 return rc;
1262 }
1263
1264 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1265 struct file *dst_file, loff_t destoff, loff_t len,
1266 unsigned int remap_flags)
1267 {
1268 struct inode *src_inode = file_inode(src_file);
1269 struct inode *target_inode = file_inode(dst_file);
1270 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1271 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1272 struct cifsFileInfo *smb_file_src = src_file->private_data;
1273 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1274 struct cifs_tcon *target_tcon, *src_tcon;
1275 unsigned long long destend, fstart, fend, new_size;
1276 unsigned int xid;
1277 int rc;
1278
1279 if (remap_flags & REMAP_FILE_DEDUP)
1280 return -EOPNOTSUPP;
1281 if (remap_flags & ~REMAP_FILE_ADVISORY)
1282 return -EINVAL;
1283
1284 cifs_dbg(FYI, "clone range\n");
1285
1286 xid = get_xid();
1287
1288 if (!smb_file_src || !smb_file_target) {
1289 rc = -EBADF;
1290 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1291 goto out;
1292 }
1293
1294 src_tcon = tlink_tcon(smb_file_src->tlink);
1295 target_tcon = tlink_tcon(smb_file_target->tlink);
1296
1297 /*
1298 * Note: cifs case is easier than btrfs since server responsible for
1299 * checks for proper open modes and file type and if it wants
1300 * server could even support copy of range where source = target
1301 */
1302 lock_two_nondirectories(target_inode, src_inode);
1303
1304 if (len == 0)
1305 len = src_inode->i_size - off;
1306
1307 cifs_dbg(FYI, "clone range\n");
1308
1309 /* Flush the source buffer */
1310 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1311 off + len - 1);
1312 if (rc)
1313 goto unlock;
1314
1315 /* The server-side copy will fail if the source crosses the EOF marker.
1316 * Advance the EOF marker after the flush above to the end of the range
1317 * if it's short of that.
1318 */
1319 if (src_cifsi->netfs.remote_i_size < off + len) {
1320 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1321 if (rc < 0)
1322 goto unlock;
1323 }
1324
1325 new_size = destoff + len;
1326 destend = destoff + len - 1;
1327
1328 /* Flush the folios at either end of the destination range to prevent
1329 * accidental loss of dirty data outside of the range.
1330 */
1331 fstart = destoff;
1332 fend = destend;
1333
1334 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1335 if (rc)
1336 goto unlock;
1337 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1338 if (rc)
1339 goto unlock;
1340
1341 /* Discard all the folios that overlap the destination region. */
1342 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1343 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1344
1345 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1346 i_size_read(target_inode), 0);
1347
1348 rc = -EOPNOTSUPP;
1349 if (target_tcon->ses->server->ops->duplicate_extents) {
1350 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1351 smb_file_src, smb_file_target, off, len, destoff);
1352 if (rc == 0 && new_size > i_size_read(target_inode)) {
1353 truncate_setsize(target_inode, new_size);
1354 netfs_resize_file(&target_cifsi->netfs, new_size);
1355 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1356 new_size);
1357 }
1358 }
1359
1360 /* force revalidate of size and timestamps of target file now
1361 that target is updated on the server */
1362 CIFS_I(target_inode)->time = 0;
1363 unlock:
1364 /* although unlocking in the reverse order from locking is not
1365 strictly necessary here it is a little cleaner to be consistent */
1366 unlock_two_nondirectories(src_inode, target_inode);
1367 out:
1368 free_xid(xid);
1369 return rc < 0 ? rc : len;
1370 }
1371
1372 ssize_t cifs_file_copychunk_range(unsigned int xid,
1373 struct file *src_file, loff_t off,
1374 struct file *dst_file, loff_t destoff,
1375 size_t len, unsigned int flags)
1376 {
1377 struct inode *src_inode = file_inode(src_file);
1378 struct inode *target_inode = file_inode(dst_file);
1379 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1380 struct cifsFileInfo *smb_file_src;
1381 struct cifsFileInfo *smb_file_target;
1382 struct cifs_tcon *src_tcon;
1383 struct cifs_tcon *target_tcon;
1384 unsigned long long destend, fstart, fend;
1385 ssize_t rc;
1386
1387 cifs_dbg(FYI, "copychunk range\n");
1388
1389 if (!src_file->private_data || !dst_file->private_data) {
1390 rc = -EBADF;
1391 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1392 goto out;
1393 }
1394
1395 rc = -EXDEV;
1396 smb_file_target = dst_file->private_data;
1397 smb_file_src = src_file->private_data;
1398 src_tcon = tlink_tcon(smb_file_src->tlink);
1399 target_tcon = tlink_tcon(smb_file_target->tlink);
1400
1401 if (src_tcon->ses != target_tcon->ses) {
1402 cifs_dbg(VFS, "source and target of copy not on same server\n");
1403 goto out;
1404 }
1405
1406 rc = -EOPNOTSUPP;
1407 if (!target_tcon->ses->server->ops->copychunk_range)
1408 goto out;
1409
1410 /*
1411 * Note: cifs case is easier than btrfs since server responsible for
1412 * checks for proper open modes and file type and if it wants
1413 * server could even support copy of range where source = target
1414 */
1415 lock_two_nondirectories(target_inode, src_inode);
1416
1417 cifs_dbg(FYI, "about to flush pages\n");
1418
1419 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1420 off + len - 1);
1421 if (rc)
1422 goto unlock;
1423
1424 /* The server-side copy will fail if the source crosses the EOF marker.
1425 * Advance the EOF marker after the flush above to the end of the range
1426 * if it's short of that.
1427 */
1428 if (src_cifsi->server_eof < off + len) {
1429 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1430 if (rc < 0)
1431 goto unlock;
1432 }
1433
1434 destend = destoff + len - 1;
1435
1436 /* Flush the folios at either end of the destination range to prevent
1437 * accidental loss of dirty data outside of the range.
1438 */
1439 fstart = destoff;
1440 fend = destend;
1441
1442 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1443 if (rc)
1444 goto unlock;
1445 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1446 if (rc)
1447 goto unlock;
1448
1449 /* Discard all the folios that overlap the destination region. */
1450 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1451
1452 rc = file_modified(dst_file);
1453 if (!rc) {
1454 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1455 smb_file_src, smb_file_target, off, len, destoff);
1456 if (rc > 0 && destoff + rc > i_size_read(target_inode))
1457 truncate_setsize(target_inode, destoff + rc);
1458 }
1459
1460 file_accessed(src_file);
1461
1462 /* force revalidate of size and timestamps of target file now
1463 * that target is updated on the server
1464 */
1465 CIFS_I(target_inode)->time = 0;
1466
1467 unlock:
1468 /* although unlocking in the reverse order from locking is not
1469 * strictly necessary here it is a little cleaner to be consistent
1470 */
1471 unlock_two_nondirectories(src_inode, target_inode);
1472
1473 out:
1474 return rc;
1475 }
1476
1477 /*
1478 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1479 * is a dummy operation.
1480 */
1481 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1482 {
1483 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1484 file, datasync);
1485
1486 return 0;
1487 }
1488
1489 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1490 struct file *dst_file, loff_t destoff,
1491 size_t len, unsigned int flags)
1492 {
1493 unsigned int xid = get_xid();
1494 ssize_t rc;
1495 struct cifsFileInfo *cfile = dst_file->private_data;
1496
1497 if (cfile->swapfile) {
1498 rc = -EOPNOTSUPP;
1499 free_xid(xid);
1500 return rc;
1501 }
1502
1503 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1504 len, flags);
1505 free_xid(xid);
1506
1507 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1508 rc = generic_copy_file_range(src_file, off, dst_file,
1509 destoff, len, flags);
1510 return rc;
1511 }
1512
1513 const struct file_operations cifs_file_ops = {
1514 .read_iter = cifs_loose_read_iter,
1515 .write_iter = cifs_file_write_iter,
1516 .open = cifs_open,
1517 .release = cifs_close,
1518 .lock = cifs_lock,
1519 .flock = cifs_flock,
1520 .fsync = cifs_fsync,
1521 .flush = cifs_flush,
1522 .mmap = cifs_file_mmap,
1523 .splice_read = filemap_splice_read,
1524 .splice_write = iter_file_splice_write,
1525 .llseek = cifs_llseek,
1526 .unlocked_ioctl = cifs_ioctl,
1527 .copy_file_range = cifs_copy_file_range,
1528 .remap_file_range = cifs_remap_file_range,
1529 .setlease = cifs_setlease,
1530 .fallocate = cifs_fallocate,
1531 };
1532
1533 const struct file_operations cifs_file_strict_ops = {
1534 .read_iter = cifs_strict_readv,
1535 .write_iter = cifs_strict_writev,
1536 .open = cifs_open,
1537 .release = cifs_close,
1538 .lock = cifs_lock,
1539 .flock = cifs_flock,
1540 .fsync = cifs_strict_fsync,
1541 .flush = cifs_flush,
1542 .mmap = cifs_file_strict_mmap,
1543 .splice_read = filemap_splice_read,
1544 .splice_write = iter_file_splice_write,
1545 .llseek = cifs_llseek,
1546 .unlocked_ioctl = cifs_ioctl,
1547 .copy_file_range = cifs_copy_file_range,
1548 .remap_file_range = cifs_remap_file_range,
1549 .setlease = cifs_setlease,
1550 .fallocate = cifs_fallocate,
1551 };
1552
1553 const struct file_operations cifs_file_direct_ops = {
1554 .read_iter = cifs_direct_readv,
1555 .write_iter = cifs_direct_writev,
1556 .open = cifs_open,
1557 .release = cifs_close,
1558 .lock = cifs_lock,
1559 .flock = cifs_flock,
1560 .fsync = cifs_fsync,
1561 .flush = cifs_flush,
1562 .mmap = cifs_file_mmap,
1563 .splice_read = copy_splice_read,
1564 .splice_write = iter_file_splice_write,
1565 .unlocked_ioctl = cifs_ioctl,
1566 .copy_file_range = cifs_copy_file_range,
1567 .remap_file_range = cifs_remap_file_range,
1568 .llseek = cifs_llseek,
1569 .setlease = cifs_setlease,
1570 .fallocate = cifs_fallocate,
1571 };
1572
1573 const struct file_operations cifs_file_nobrl_ops = {
1574 .read_iter = cifs_loose_read_iter,
1575 .write_iter = cifs_file_write_iter,
1576 .open = cifs_open,
1577 .release = cifs_close,
1578 .fsync = cifs_fsync,
1579 .flush = cifs_flush,
1580 .mmap = cifs_file_mmap,
1581 .splice_read = filemap_splice_read,
1582 .splice_write = iter_file_splice_write,
1583 .llseek = cifs_llseek,
1584 .unlocked_ioctl = cifs_ioctl,
1585 .copy_file_range = cifs_copy_file_range,
1586 .remap_file_range = cifs_remap_file_range,
1587 .setlease = cifs_setlease,
1588 .fallocate = cifs_fallocate,
1589 };
1590
1591 const struct file_operations cifs_file_strict_nobrl_ops = {
1592 .read_iter = cifs_strict_readv,
1593 .write_iter = cifs_strict_writev,
1594 .open = cifs_open,
1595 .release = cifs_close,
1596 .fsync = cifs_strict_fsync,
1597 .flush = cifs_flush,
1598 .mmap = cifs_file_strict_mmap,
1599 .splice_read = filemap_splice_read,
1600 .splice_write = iter_file_splice_write,
1601 .llseek = cifs_llseek,
1602 .unlocked_ioctl = cifs_ioctl,
1603 .copy_file_range = cifs_copy_file_range,
1604 .remap_file_range = cifs_remap_file_range,
1605 .setlease = cifs_setlease,
1606 .fallocate = cifs_fallocate,
1607 };
1608
1609 const struct file_operations cifs_file_direct_nobrl_ops = {
1610 .read_iter = cifs_direct_readv,
1611 .write_iter = cifs_direct_writev,
1612 .open = cifs_open,
1613 .release = cifs_close,
1614 .fsync = cifs_fsync,
1615 .flush = cifs_flush,
1616 .mmap = cifs_file_mmap,
1617 .splice_read = copy_splice_read,
1618 .splice_write = iter_file_splice_write,
1619 .unlocked_ioctl = cifs_ioctl,
1620 .copy_file_range = cifs_copy_file_range,
1621 .remap_file_range = cifs_remap_file_range,
1622 .llseek = cifs_llseek,
1623 .setlease = cifs_setlease,
1624 .fallocate = cifs_fallocate,
1625 };
1626
1627 const struct file_operations cifs_dir_ops = {
1628 .iterate_shared = cifs_readdir,
1629 .release = cifs_closedir,
1630 .read = generic_read_dir,
1631 .unlocked_ioctl = cifs_ioctl,
1632 .copy_file_range = cifs_copy_file_range,
1633 .remap_file_range = cifs_remap_file_range,
1634 .llseek = generic_file_llseek,
1635 .fsync = cifs_dir_fsync,
1636 };
1637
1638 static void
1639 cifs_init_once(void *inode)
1640 {
1641 struct cifsInodeInfo *cifsi = inode;
1642
1643 inode_init_once(&cifsi->netfs.inode);
1644 init_rwsem(&cifsi->lock_sem);
1645 }
1646
1647 static int __init
1648 cifs_init_inodecache(void)
1649 {
1650 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1651 sizeof(struct cifsInodeInfo),
1652 0, (SLAB_RECLAIM_ACCOUNT|
1653 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1654 cifs_init_once);
1655 if (cifs_inode_cachep == NULL)
1656 return -ENOMEM;
1657
1658 return 0;
1659 }
1660
1661 static void
1662 cifs_destroy_inodecache(void)
1663 {
1664 /*
1665 * Make sure all delayed rcu free inodes are flushed before we
1666 * destroy cache.
1667 */
1668 rcu_barrier();
1669 kmem_cache_destroy(cifs_inode_cachep);
1670 }
1671
1672 static int
1673 cifs_init_request_bufs(void)
1674 {
1675 /*
1676 * SMB2 maximum header size is bigger than CIFS one - no problems to
1677 * allocate some more bytes for CIFS.
1678 */
1679 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1680
1681 if (CIFSMaxBufSize < 8192) {
1682 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1683 Unicode path name has to fit in any SMB/CIFS path based frames */
1684 CIFSMaxBufSize = 8192;
1685 } else if (CIFSMaxBufSize > 1024*127) {
1686 CIFSMaxBufSize = 1024 * 127;
1687 } else {
1688 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1689 }
1690 /*
1691 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1692 CIFSMaxBufSize, CIFSMaxBufSize);
1693 */
1694 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1695 CIFSMaxBufSize + max_hdr_size, 0,
1696 SLAB_HWCACHE_ALIGN, 0,
1697 CIFSMaxBufSize + max_hdr_size,
1698 NULL);
1699 if (cifs_req_cachep == NULL)
1700 return -ENOMEM;
1701
1702 if (cifs_min_rcv < 1)
1703 cifs_min_rcv = 1;
1704 else if (cifs_min_rcv > 64) {
1705 cifs_min_rcv = 64;
1706 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1707 }
1708
1709 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1710 cifs_req_cachep);
1711
1712 if (cifs_req_poolp == NULL) {
1713 kmem_cache_destroy(cifs_req_cachep);
1714 return -ENOMEM;
1715 }
1716 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1717 almost all handle based requests (but not write response, nor is it
1718 sufficient for path based requests). A smaller size would have
1719 been more efficient (compacting multiple slab items on one 4k page)
1720 for the case in which debug was on, but this larger size allows
1721 more SMBs to use small buffer alloc and is still much more
1722 efficient to alloc 1 per page off the slab compared to 17K (5page)
1723 alloc of large cifs buffers even when page debugging is on */
1724 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1725 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1726 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1727 if (cifs_sm_req_cachep == NULL) {
1728 mempool_destroy(cifs_req_poolp);
1729 kmem_cache_destroy(cifs_req_cachep);
1730 return -ENOMEM;
1731 }
1732
1733 if (cifs_min_small < 2)
1734 cifs_min_small = 2;
1735 else if (cifs_min_small > 256) {
1736 cifs_min_small = 256;
1737 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1738 }
1739
1740 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1741 cifs_sm_req_cachep);
1742
1743 if (cifs_sm_req_poolp == NULL) {
1744 mempool_destroy(cifs_req_poolp);
1745 kmem_cache_destroy(cifs_req_cachep);
1746 kmem_cache_destroy(cifs_sm_req_cachep);
1747 return -ENOMEM;
1748 }
1749
1750 return 0;
1751 }
1752
1753 static void
1754 cifs_destroy_request_bufs(void)
1755 {
1756 mempool_destroy(cifs_req_poolp);
1757 kmem_cache_destroy(cifs_req_cachep);
1758 mempool_destroy(cifs_sm_req_poolp);
1759 kmem_cache_destroy(cifs_sm_req_cachep);
1760 }
1761
1762 static int init_mids(void)
1763 {
1764 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1765 sizeof(struct mid_q_entry), 0,
1766 SLAB_HWCACHE_ALIGN, NULL);
1767 if (cifs_mid_cachep == NULL)
1768 return -ENOMEM;
1769
1770 /* 3 is a reasonable minimum number of simultaneous operations */
1771 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1772 if (cifs_mid_poolp == NULL) {
1773 kmem_cache_destroy(cifs_mid_cachep);
1774 return -ENOMEM;
1775 }
1776
1777 return 0;
1778 }
1779
1780 static void destroy_mids(void)
1781 {
1782 mempool_destroy(cifs_mid_poolp);
1783 kmem_cache_destroy(cifs_mid_cachep);
1784 }
1785
1786 static int __init
1787 init_cifs(void)
1788 {
1789 int rc = 0;
1790 cifs_proc_init();
1791 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1792 /*
1793 * Initialize Global counters
1794 */
1795 atomic_set(&sesInfoAllocCount, 0);
1796 atomic_set(&tconInfoAllocCount, 0);
1797 atomic_set(&tcpSesNextId, 0);
1798 atomic_set(&tcpSesAllocCount, 0);
1799 atomic_set(&tcpSesReconnectCount, 0);
1800 atomic_set(&tconInfoReconnectCount, 0);
1801
1802 atomic_set(&buf_alloc_count, 0);
1803 atomic_set(&small_buf_alloc_count, 0);
1804 #ifdef CONFIG_CIFS_STATS2
1805 atomic_set(&total_buf_alloc_count, 0);
1806 atomic_set(&total_small_buf_alloc_count, 0);
1807 if (slow_rsp_threshold < 1)
1808 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1809 else if (slow_rsp_threshold > 32767)
1810 cifs_dbg(VFS,
1811 "slow response threshold set higher than recommended (0 to 32767)\n");
1812 #endif /* CONFIG_CIFS_STATS2 */
1813
1814 atomic_set(&mid_count, 0);
1815 GlobalCurrentXid = 0;
1816 GlobalTotalActiveXid = 0;
1817 GlobalMaxActiveXid = 0;
1818 spin_lock_init(&cifs_tcp_ses_lock);
1819 spin_lock_init(&GlobalMid_Lock);
1820
1821 cifs_lock_secret = get_random_u32();
1822
1823 if (cifs_max_pending < 2) {
1824 cifs_max_pending = 2;
1825 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1826 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1827 cifs_max_pending = CIFS_MAX_REQ;
1828 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1829 CIFS_MAX_REQ);
1830 }
1831
1832 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1833 if (dir_cache_timeout > 65000) {
1834 dir_cache_timeout = 65000;
1835 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1836 }
1837
1838 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1839 if (!cifsiod_wq) {
1840 rc = -ENOMEM;
1841 goto out_clean_proc;
1842 }
1843
1844 /*
1845 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1846 * so that we don't launch too many worker threads but
1847 * Documentation/core-api/workqueue.rst recommends setting it to 0
1848 */
1849
1850 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1851 decrypt_wq = alloc_workqueue("smb3decryptd",
1852 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1853 if (!decrypt_wq) {
1854 rc = -ENOMEM;
1855 goto out_destroy_cifsiod_wq;
1856 }
1857
1858 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1859 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1860 if (!fileinfo_put_wq) {
1861 rc = -ENOMEM;
1862 goto out_destroy_decrypt_wq;
1863 }
1864
1865 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1866 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1867 if (!cifsoplockd_wq) {
1868 rc = -ENOMEM;
1869 goto out_destroy_fileinfo_put_wq;
1870 }
1871
1872 deferredclose_wq = alloc_workqueue("deferredclose",
1873 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1874 if (!deferredclose_wq) {
1875 rc = -ENOMEM;
1876 goto out_destroy_cifsoplockd_wq;
1877 }
1878
1879 rc = cifs_init_inodecache();
1880 if (rc)
1881 goto out_destroy_deferredclose_wq;
1882
1883 rc = init_mids();
1884 if (rc)
1885 goto out_destroy_inodecache;
1886
1887 rc = cifs_init_request_bufs();
1888 if (rc)
1889 goto out_destroy_mids;
1890
1891 #ifdef CONFIG_CIFS_DFS_UPCALL
1892 rc = dfs_cache_init();
1893 if (rc)
1894 goto out_destroy_request_bufs;
1895 #endif /* CONFIG_CIFS_DFS_UPCALL */
1896 #ifdef CONFIG_CIFS_UPCALL
1897 rc = init_cifs_spnego();
1898 if (rc)
1899 goto out_destroy_dfs_cache;
1900 #endif /* CONFIG_CIFS_UPCALL */
1901 #ifdef CONFIG_CIFS_SWN_UPCALL
1902 rc = cifs_genl_init();
1903 if (rc)
1904 goto out_register_key_type;
1905 #endif /* CONFIG_CIFS_SWN_UPCALL */
1906
1907 rc = init_cifs_idmap();
1908 if (rc)
1909 goto out_cifs_swn_init;
1910
1911 rc = register_filesystem(&cifs_fs_type);
1912 if (rc)
1913 goto out_init_cifs_idmap;
1914
1915 rc = register_filesystem(&smb3_fs_type);
1916 if (rc) {
1917 unregister_filesystem(&cifs_fs_type);
1918 goto out_init_cifs_idmap;
1919 }
1920
1921 return 0;
1922
1923 out_init_cifs_idmap:
1924 exit_cifs_idmap();
1925 out_cifs_swn_init:
1926 #ifdef CONFIG_CIFS_SWN_UPCALL
1927 cifs_genl_exit();
1928 out_register_key_type:
1929 #endif
1930 #ifdef CONFIG_CIFS_UPCALL
1931 exit_cifs_spnego();
1932 out_destroy_dfs_cache:
1933 #endif
1934 #ifdef CONFIG_CIFS_DFS_UPCALL
1935 dfs_cache_destroy();
1936 out_destroy_request_bufs:
1937 #endif
1938 cifs_destroy_request_bufs();
1939 out_destroy_mids:
1940 destroy_mids();
1941 out_destroy_inodecache:
1942 cifs_destroy_inodecache();
1943 out_destroy_deferredclose_wq:
1944 destroy_workqueue(deferredclose_wq);
1945 out_destroy_cifsoplockd_wq:
1946 destroy_workqueue(cifsoplockd_wq);
1947 out_destroy_fileinfo_put_wq:
1948 destroy_workqueue(fileinfo_put_wq);
1949 out_destroy_decrypt_wq:
1950 destroy_workqueue(decrypt_wq);
1951 out_destroy_cifsiod_wq:
1952 destroy_workqueue(cifsiod_wq);
1953 out_clean_proc:
1954 cifs_proc_clean();
1955 return rc;
1956 }
1957
1958 static void __exit
1959 exit_cifs(void)
1960 {
1961 cifs_dbg(NOISY, "exit_smb3\n");
1962 unregister_filesystem(&cifs_fs_type);
1963 unregister_filesystem(&smb3_fs_type);
1964 cifs_release_automount_timer();
1965 exit_cifs_idmap();
1966 #ifdef CONFIG_CIFS_SWN_UPCALL
1967 cifs_genl_exit();
1968 #endif
1969 #ifdef CONFIG_CIFS_UPCALL
1970 exit_cifs_spnego();
1971 #endif
1972 #ifdef CONFIG_CIFS_DFS_UPCALL
1973 dfs_cache_destroy();
1974 #endif
1975 cifs_destroy_request_bufs();
1976 destroy_mids();
1977 cifs_destroy_inodecache();
1978 destroy_workqueue(deferredclose_wq);
1979 destroy_workqueue(cifsoplockd_wq);
1980 destroy_workqueue(decrypt_wq);
1981 destroy_workqueue(fileinfo_put_wq);
1982 destroy_workqueue(cifsiod_wq);
1983 cifs_proc_clean();
1984 }
1985
1986 MODULE_AUTHOR("Steve French");
1987 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1988 MODULE_DESCRIPTION
1989 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1990 "also older servers complying with the SNIA CIFS Specification)");
1991 MODULE_VERSION(CIFS_VERSION);
1992 MODULE_SOFTDEP("ecb");
1993 MODULE_SOFTDEP("hmac");
1994 MODULE_SOFTDEP("md5");
1995 MODULE_SOFTDEP("nls");
1996 MODULE_SOFTDEP("aes");
1997 MODULE_SOFTDEP("cmac");
1998 MODULE_SOFTDEP("sha256");
1999 MODULE_SOFTDEP("sha512");
2000 MODULE_SOFTDEP("aead2");
2001 MODULE_SOFTDEP("ccm");
2002 MODULE_SOFTDEP("gcm");
2003 module_init(init_cifs)
2004 module_exit(exit_cifs)