]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - fs/smb/client/cifsfs.c
30781789dfd9f539abbe7499d65496d05607d5df
[thirdparty/kernel/stable.git] / fs / smb / client / cifsfs.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52
53 /*
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
57 */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74
75 /*
76 * Global transaction id (XID) information
77 */
78 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82
83 /*
84 * Global counters, updated atomically
85 */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head cifs_tcp_ses_list;
101 spinlock_t cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 "for CIFS requests. "
107 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 "1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 "CIFS/SMB1 dialect (N/A for SMB3) "
120 "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 "before logging that a response is delayed. "
130 "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 "helpful to restrict the ability to "
148 "override the default dialects (SMB2.1, "
149 "SMB3 and SMB3.02) on mount with old "
150 "dialects (CIFS/SMB1 and SMB2) since "
151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 " and less secure. Default: n/N/0");
153
154 struct workqueue_struct *cifsiod_wq;
155 struct workqueue_struct *decrypt_wq;
156 struct workqueue_struct *fileinfo_put_wq;
157 struct workqueue_struct *cifsoplockd_wq;
158 struct workqueue_struct *deferredclose_wq;
159 struct workqueue_struct *serverclose_wq;
160 __u32 cifs_lock_secret;
161
162 /*
163 * Bumps refcount for cifs super block.
164 * Note that it should be only called if a referece to VFS super block is
165 * already held, e.g. in open-type syscalls context. Otherwise it can race with
166 * atomic_dec_and_test in deactivate_locked_super.
167 */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 struct cifs_sb_info *server = CIFS_SB(sb);
172
173 if (atomic_inc_return(&server->active) == 1)
174 atomic_inc(&sb->s_active);
175 }
176
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 struct cifs_sb_info *server = CIFS_SB(sb);
181
182 if (atomic_dec_and_test(&server->active))
183 deactivate_super(sb);
184 }
185
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 struct inode *inode;
190 struct cifs_sb_info *cifs_sb;
191 struct cifs_tcon *tcon;
192 struct timespec64 ts;
193 int rc = 0;
194
195 cifs_sb = CIFS_SB(sb);
196 tcon = cifs_sb_master_tcon(cifs_sb);
197
198 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 sb->s_flags |= SB_POSIXACL;
200
201 if (tcon->snapshot_time)
202 sb->s_flags |= SB_RDONLY;
203
204 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 sb->s_maxbytes = MAX_LFS_FILESIZE;
206 else
207 sb->s_maxbytes = MAX_NON_LFS;
208
209 /*
210 * Some very old servers like DOS and OS/2 used 2 second granularity
211 * (while all current servers use 100ns granularity - see MS-DTYP)
212 * but 1 second is the maximum allowed granularity for the VFS
213 * so for old servers set time granularity to 1 second while for
214 * everything else (current servers) set it to 100ns.
215 */
216 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 ((tcon->ses->capabilities &
218 tcon->ses->server->vals->cap_nt_find) == 0) &&
219 !tcon->unix_ext) {
220 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 sb->s_time_min = ts.tv_sec;
223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 cpu_to_le16(SMB_TIME_MAX), 0);
225 sb->s_time_max = ts.tv_sec;
226 } else {
227 /*
228 * Almost every server, including all SMB2+, uses DCE TIME
229 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
230 */
231 sb->s_time_gran = 100;
232 ts = cifs_NTtimeToUnix(0);
233 sb->s_time_min = ts.tv_sec;
234 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 sb->s_time_max = ts.tv_sec;
236 }
237
238 sb->s_magic = CIFS_SUPER_MAGIC;
239 sb->s_op = &cifs_super_ops;
240 sb->s_xattr = cifs_xattr_handlers;
241 rc = super_setup_bdi(sb);
242 if (rc)
243 goto out_no_root;
244 /* tune readahead according to rsize if readahead size not set on mount */
245 if (cifs_sb->ctx->rsize == 0)
246 cifs_sb->ctx->rsize =
247 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 if (cifs_sb->ctx->rasize)
249 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 else
251 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252
253 sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
255 inode = cifs_root_iget(sb);
256
257 if (IS_ERR(inode)) {
258 rc = PTR_ERR(inode);
259 goto out_no_root;
260 }
261
262 if (tcon->nocase)
263 sb->s_d_op = &cifs_ci_dentry_ops;
264 else
265 sb->s_d_op = &cifs_dentry_ops;
266
267 sb->s_root = d_make_root(inode);
268 if (!sb->s_root) {
269 rc = -ENOMEM;
270 goto out_no_root;
271 }
272
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 cifs_dbg(FYI, "export ops supported\n");
276 sb->s_export_op = &cifs_export_ops;
277 }
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279
280 return 0;
281
282 out_no_root:
283 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 return rc;
285 }
286
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290
291 /*
292 * We ned to release all dentries for the cached directories
293 * before we kill the sb.
294 */
295 if (cifs_sb->root) {
296 close_all_cached_dirs(cifs_sb);
297
298 /* finally release root dentry */
299 dput(cifs_sb->root);
300 cifs_sb->root = NULL;
301 }
302
303 kill_anon_super(sb);
304 cifs_umount(cifs_sb);
305 }
306
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 struct super_block *sb = dentry->d_sb;
311 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 struct TCP_Server_Info *server = tcon->ses->server;
314 unsigned int xid;
315 int rc = 0;
316
317 xid = get_xid();
318
319 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
320 buf->f_namelen =
321 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
322 else
323 buf->f_namelen = PATH_MAX;
324
325 buf->f_fsid.val[0] = tcon->vol_serial_number;
326 /* are using part of create time for more randomness, see man statfs */
327 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
328
329 buf->f_files = 0; /* undefined */
330 buf->f_ffree = 0; /* unlimited */
331
332 if (server->ops->queryfs)
333 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
334
335 free_xid(xid);
336 return rc;
337 }
338
339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
340 {
341 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
342 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
343 struct TCP_Server_Info *server = tcon->ses->server;
344
345 if (server->ops->fallocate)
346 return server->ops->fallocate(file, tcon, mode, off, len);
347
348 return -EOPNOTSUPP;
349 }
350
351 static int cifs_permission(struct mnt_idmap *idmap,
352 struct inode *inode, int mask)
353 {
354 struct cifs_sb_info *cifs_sb;
355
356 cifs_sb = CIFS_SB(inode->i_sb);
357
358 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
359 if ((mask & MAY_EXEC) && !execute_ok(inode))
360 return -EACCES;
361 else
362 return 0;
363 } else /* file mode might have been restricted at mount time
364 on the client (above and beyond ACL on servers) for
365 servers which do not support setting and viewing mode bits,
366 so allowing client to check permissions is useful */
367 return generic_permission(&nop_mnt_idmap, inode, mask);
368 }
369
370 static struct kmem_cache *cifs_inode_cachep;
371 static struct kmem_cache *cifs_req_cachep;
372 static struct kmem_cache *cifs_mid_cachep;
373 static struct kmem_cache *cifs_sm_req_cachep;
374 mempool_t *cifs_sm_req_poolp;
375 mempool_t *cifs_req_poolp;
376 mempool_t *cifs_mid_poolp;
377
378 static struct inode *
379 cifs_alloc_inode(struct super_block *sb)
380 {
381 struct cifsInodeInfo *cifs_inode;
382 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
383 if (!cifs_inode)
384 return NULL;
385 cifs_inode->cifsAttrs = 0x20; /* default */
386 cifs_inode->time = 0;
387 /*
388 * Until the file is open and we have gotten oplock info back from the
389 * server, can not assume caching of file data or metadata.
390 */
391 cifs_set_oplock_level(cifs_inode, 0);
392 cifs_inode->flags = 0;
393 spin_lock_init(&cifs_inode->writers_lock);
394 cifs_inode->writers = 0;
395 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
396 cifs_inode->netfs.remote_i_size = 0;
397 cifs_inode->uniqueid = 0;
398 cifs_inode->createtime = 0;
399 cifs_inode->epoch = 0;
400 spin_lock_init(&cifs_inode->open_file_lock);
401 generate_random_uuid(cifs_inode->lease_key);
402 cifs_inode->symlink_target = NULL;
403
404 /*
405 * Can not set i_flags here - they get immediately overwritten to zero
406 * by the VFS.
407 */
408 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
409 INIT_LIST_HEAD(&cifs_inode->openFileList);
410 INIT_LIST_HEAD(&cifs_inode->llist);
411 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
412 spin_lock_init(&cifs_inode->deferred_lock);
413 return &cifs_inode->netfs.inode;
414 }
415
416 static void
417 cifs_free_inode(struct inode *inode)
418 {
419 struct cifsInodeInfo *cinode = CIFS_I(inode);
420
421 if (S_ISLNK(inode->i_mode))
422 kfree(cinode->symlink_target);
423 kmem_cache_free(cifs_inode_cachep, cinode);
424 }
425
426 static void
427 cifs_evict_inode(struct inode *inode)
428 {
429 truncate_inode_pages_final(&inode->i_data);
430 if (inode->i_state & I_PINNING_NETFS_WB)
431 cifs_fscache_unuse_inode_cookie(inode, true);
432 cifs_fscache_release_inode_cookie(inode);
433 clear_inode(inode);
434 }
435
436 static void
437 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
438 {
439 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
440 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
441
442 seq_puts(s, ",addr=");
443
444 switch (server->dstaddr.ss_family) {
445 case AF_INET:
446 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
447 break;
448 case AF_INET6:
449 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
450 if (sa6->sin6_scope_id)
451 seq_printf(s, "%%%u", sa6->sin6_scope_id);
452 break;
453 default:
454 seq_puts(s, "(unknown)");
455 }
456 if (server->rdma)
457 seq_puts(s, ",rdma");
458 }
459
460 static void
461 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
462 {
463 if (ses->sectype == Unspecified) {
464 if (ses->user_name == NULL)
465 seq_puts(s, ",sec=none");
466 return;
467 }
468
469 seq_puts(s, ",sec=");
470
471 switch (ses->sectype) {
472 case NTLMv2:
473 seq_puts(s, "ntlmv2");
474 break;
475 case Kerberos:
476 seq_puts(s, "krb5");
477 break;
478 case RawNTLMSSP:
479 seq_puts(s, "ntlmssp");
480 break;
481 default:
482 /* shouldn't ever happen */
483 seq_puts(s, "unknown");
484 break;
485 }
486
487 if (ses->sign)
488 seq_puts(s, "i");
489
490 if (ses->sectype == Kerberos)
491 seq_printf(s, ",cruid=%u",
492 from_kuid_munged(&init_user_ns, ses->cred_uid));
493 }
494
495 static void
496 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
497 {
498 seq_puts(s, ",cache=");
499
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
501 seq_puts(s, "strict");
502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
503 seq_puts(s, "none");
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
505 seq_puts(s, "singleclient"); /* assume only one client access */
506 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
507 seq_puts(s, "ro"); /* read only caching assumed */
508 else
509 seq_puts(s, "loose");
510 }
511
512 /*
513 * cifs_show_devname() is used so we show the mount device name with correct
514 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
515 */
516 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
517 {
518 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
519 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
520
521 if (devname == NULL)
522 seq_puts(m, "none");
523 else {
524 convert_delimiter(devname, '/');
525 /* escape all spaces in share names */
526 seq_escape(m, devname, " \t");
527 kfree(devname);
528 }
529 return 0;
530 }
531
532 /*
533 * cifs_show_options() is for displaying mount options in /proc/mounts.
534 * Not all settable options are displayed but most of the important
535 * ones are.
536 */
537 static int
538 cifs_show_options(struct seq_file *s, struct dentry *root)
539 {
540 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
541 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
542 struct sockaddr *srcaddr;
543 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
544
545 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
546 cifs_show_security(s, tcon->ses);
547 cifs_show_cache_flavor(s, cifs_sb);
548
549 if (tcon->no_lease)
550 seq_puts(s, ",nolease");
551 if (cifs_sb->ctx->multiuser)
552 seq_puts(s, ",multiuser");
553 else if (tcon->ses->user_name)
554 seq_show_option(s, "username", tcon->ses->user_name);
555
556 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
557 seq_show_option(s, "domain", tcon->ses->domainName);
558
559 if (srcaddr->sa_family != AF_UNSPEC) {
560 struct sockaddr_in *saddr4;
561 struct sockaddr_in6 *saddr6;
562 saddr4 = (struct sockaddr_in *)srcaddr;
563 saddr6 = (struct sockaddr_in6 *)srcaddr;
564 if (srcaddr->sa_family == AF_INET6)
565 seq_printf(s, ",srcaddr=%pI6c",
566 &saddr6->sin6_addr);
567 else if (srcaddr->sa_family == AF_INET)
568 seq_printf(s, ",srcaddr=%pI4",
569 &saddr4->sin_addr.s_addr);
570 else
571 seq_printf(s, ",srcaddr=BAD-AF:%i",
572 (int)(srcaddr->sa_family));
573 }
574
575 seq_printf(s, ",uid=%u",
576 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
578 seq_puts(s, ",forceuid");
579 else
580 seq_puts(s, ",noforceuid");
581
582 seq_printf(s, ",gid=%u",
583 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
584 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
585 seq_puts(s, ",forcegid");
586 else
587 seq_puts(s, ",noforcegid");
588
589 cifs_show_address(s, tcon->ses->server);
590
591 if (!tcon->unix_ext)
592 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
593 cifs_sb->ctx->file_mode,
594 cifs_sb->ctx->dir_mode);
595 if (cifs_sb->ctx->iocharset)
596 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
597 if (tcon->seal)
598 seq_puts(s, ",seal");
599 else if (tcon->ses->server->ignore_signature)
600 seq_puts(s, ",signloosely");
601 if (tcon->nocase)
602 seq_puts(s, ",nocase");
603 if (tcon->nodelete)
604 seq_puts(s, ",nodelete");
605 if (cifs_sb->ctx->no_sparse)
606 seq_puts(s, ",nosparse");
607 if (tcon->local_lease)
608 seq_puts(s, ",locallease");
609 if (tcon->retry)
610 seq_puts(s, ",hard");
611 else
612 seq_puts(s, ",soft");
613 if (tcon->use_persistent)
614 seq_puts(s, ",persistenthandles");
615 else if (tcon->use_resilient)
616 seq_puts(s, ",resilienthandles");
617 if (tcon->posix_extensions)
618 seq_puts(s, ",posix");
619 else if (tcon->unix_ext)
620 seq_puts(s, ",unix");
621 else
622 seq_puts(s, ",nounix");
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
624 seq_puts(s, ",nodfs");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
626 seq_puts(s, ",posixpaths");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
628 seq_puts(s, ",setuids");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
630 seq_puts(s, ",idsfromsid");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
632 seq_puts(s, ",serverino");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
634 seq_puts(s, ",rwpidforward");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
636 seq_puts(s, ",forcemand");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
638 seq_puts(s, ",nouser_xattr");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
640 seq_puts(s, ",mapchars");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
642 seq_puts(s, ",mapposix");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
644 seq_puts(s, ",sfu");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
646 seq_puts(s, ",nobrl");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
648 seq_puts(s, ",nohandlecache");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
650 seq_puts(s, ",modefromsid");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
652 seq_puts(s, ",cifsacl");
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
654 seq_puts(s, ",dynperm");
655 if (root->d_sb->s_flags & SB_POSIXACL)
656 seq_puts(s, ",acl");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
658 seq_puts(s, ",mfsymlinks");
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
660 seq_puts(s, ",fsc");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
662 seq_puts(s, ",nostrictsync");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
664 seq_puts(s, ",noperm");
665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
666 seq_printf(s, ",backupuid=%u",
667 from_kuid_munged(&init_user_ns,
668 cifs_sb->ctx->backupuid));
669 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
670 seq_printf(s, ",backupgid=%u",
671 from_kgid_munged(&init_user_ns,
672 cifs_sb->ctx->backupgid));
673 seq_show_option(s, "reparse",
674 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
675
676 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
677 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
678 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
679 if (cifs_sb->ctx->rasize)
680 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
681 if (tcon->ses->server->min_offload)
682 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
683 if (tcon->ses->server->retrans)
684 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
685 seq_printf(s, ",echo_interval=%lu",
686 tcon->ses->server->echo_interval / HZ);
687
688 /* Only display the following if overridden on mount */
689 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
690 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
691 if (tcon->ses->server->tcp_nodelay)
692 seq_puts(s, ",tcpnodelay");
693 if (tcon->ses->server->noautotune)
694 seq_puts(s, ",noautotune");
695 if (tcon->ses->server->noblocksnd)
696 seq_puts(s, ",noblocksend");
697 if (tcon->ses->server->nosharesock)
698 seq_puts(s, ",nosharesock");
699
700 if (tcon->snapshot_time)
701 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
702 if (tcon->handle_timeout)
703 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
704 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
705 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
706
707 /*
708 * Display file and directory attribute timeout in seconds.
709 * If file and directory attribute timeout the same then actimeo
710 * was likely specified on mount
711 */
712 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
713 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
714 else {
715 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
716 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
717 }
718 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
719
720 if (tcon->ses->chan_max > 1)
721 seq_printf(s, ",multichannel,max_channels=%zu",
722 tcon->ses->chan_max);
723
724 if (tcon->use_witness)
725 seq_puts(s, ",witness");
726
727 return 0;
728 }
729
730 static void cifs_umount_begin(struct super_block *sb)
731 {
732 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
733 struct cifs_tcon *tcon;
734
735 if (cifs_sb == NULL)
736 return;
737
738 tcon = cifs_sb_master_tcon(cifs_sb);
739
740 spin_lock(&cifs_tcp_ses_lock);
741 spin_lock(&tcon->tc_lock);
742 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
743 netfs_trace_tcon_ref_see_umount);
744 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
745 /* we have other mounts to same share or we have
746 already tried to umount this and woken up
747 all waiting network requests, nothing to do */
748 spin_unlock(&tcon->tc_lock);
749 spin_unlock(&cifs_tcp_ses_lock);
750 return;
751 }
752 /*
753 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
754 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
755 */
756 spin_unlock(&tcon->tc_lock);
757 spin_unlock(&cifs_tcp_ses_lock);
758
759 cifs_close_all_deferred_files(tcon);
760 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
761 /* cancel_notify_requests(tcon); */
762 if (tcon->ses && tcon->ses->server) {
763 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
764 wake_up_all(&tcon->ses->server->request_q);
765 wake_up_all(&tcon->ses->server->response_q);
766 msleep(1); /* yield */
767 /* we have to kick the requests once more */
768 wake_up_all(&tcon->ses->server->response_q);
769 msleep(1);
770 }
771
772 return;
773 }
774
775 static int cifs_freeze(struct super_block *sb)
776 {
777 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
778 struct cifs_tcon *tcon;
779
780 if (cifs_sb == NULL)
781 return 0;
782
783 tcon = cifs_sb_master_tcon(cifs_sb);
784
785 cifs_close_all_deferred_files(tcon);
786 return 0;
787 }
788
789 #ifdef CONFIG_CIFS_STATS2
790 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
791 {
792 /* BB FIXME */
793 return 0;
794 }
795 #endif
796
797 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
798 {
799 return netfs_unpin_writeback(inode, wbc);
800 }
801
802 static int cifs_drop_inode(struct inode *inode)
803 {
804 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
805
806 /* no serverino => unconditional eviction */
807 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
808 generic_drop_inode(inode);
809 }
810
811 static const struct super_operations cifs_super_ops = {
812 .statfs = cifs_statfs,
813 .alloc_inode = cifs_alloc_inode,
814 .write_inode = cifs_write_inode,
815 .free_inode = cifs_free_inode,
816 .drop_inode = cifs_drop_inode,
817 .evict_inode = cifs_evict_inode,
818 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
819 .show_devname = cifs_show_devname,
820 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
821 function unless later we add lazy close of inodes or unless the
822 kernel forgets to call us with the same number of releases (closes)
823 as opens */
824 .show_options = cifs_show_options,
825 .umount_begin = cifs_umount_begin,
826 .freeze_fs = cifs_freeze,
827 #ifdef CONFIG_CIFS_STATS2
828 .show_stats = cifs_show_stats,
829 #endif
830 };
831
832 /*
833 * Get root dentry from superblock according to prefix path mount option.
834 * Return dentry with refcount + 1 on success and NULL otherwise.
835 */
836 static struct dentry *
837 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
838 {
839 struct dentry *dentry;
840 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
841 char *full_path = NULL;
842 char *s, *p;
843 char sep;
844
845 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
846 return dget(sb->s_root);
847
848 full_path = cifs_build_path_to_root(ctx, cifs_sb,
849 cifs_sb_master_tcon(cifs_sb), 0);
850 if (full_path == NULL)
851 return ERR_PTR(-ENOMEM);
852
853 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
854
855 sep = CIFS_DIR_SEP(cifs_sb);
856 dentry = dget(sb->s_root);
857 s = full_path;
858
859 do {
860 struct inode *dir = d_inode(dentry);
861 struct dentry *child;
862
863 if (!S_ISDIR(dir->i_mode)) {
864 dput(dentry);
865 dentry = ERR_PTR(-ENOTDIR);
866 break;
867 }
868
869 /* skip separators */
870 while (*s == sep)
871 s++;
872 if (!*s)
873 break;
874 p = s++;
875 /* next separator */
876 while (*s && *s != sep)
877 s++;
878
879 child = lookup_positive_unlocked(p, dentry, s - p);
880 dput(dentry);
881 dentry = child;
882 } while (!IS_ERR(dentry));
883 kfree(full_path);
884 return dentry;
885 }
886
887 static int cifs_set_super(struct super_block *sb, void *data)
888 {
889 struct cifs_mnt_data *mnt_data = data;
890 sb->s_fs_info = mnt_data->cifs_sb;
891 return set_anon_super(sb, NULL);
892 }
893
894 struct dentry *
895 cifs_smb3_do_mount(struct file_system_type *fs_type,
896 int flags, struct smb3_fs_context *old_ctx)
897 {
898 struct cifs_mnt_data mnt_data;
899 struct cifs_sb_info *cifs_sb;
900 struct super_block *sb;
901 struct dentry *root;
902 int rc;
903
904 if (cifsFYI) {
905 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
906 old_ctx->source, flags);
907 } else {
908 cifs_info("Attempting to mount %s\n", old_ctx->source);
909 }
910
911 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
912 if (!cifs_sb)
913 return ERR_PTR(-ENOMEM);
914
915 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
916 if (!cifs_sb->ctx) {
917 root = ERR_PTR(-ENOMEM);
918 goto out;
919 }
920 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
921 if (rc) {
922 root = ERR_PTR(rc);
923 goto out;
924 }
925
926 rc = cifs_setup_cifs_sb(cifs_sb);
927 if (rc) {
928 root = ERR_PTR(rc);
929 goto out;
930 }
931
932 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
933 if (rc) {
934 if (!(flags & SB_SILENT))
935 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
936 rc);
937 root = ERR_PTR(rc);
938 goto out;
939 }
940
941 mnt_data.ctx = cifs_sb->ctx;
942 mnt_data.cifs_sb = cifs_sb;
943 mnt_data.flags = flags;
944
945 /* BB should we make this contingent on mount parm? */
946 flags |= SB_NODIRATIME | SB_NOATIME;
947
948 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
949 if (IS_ERR(sb)) {
950 cifs_umount(cifs_sb);
951 return ERR_CAST(sb);
952 }
953
954 if (sb->s_root) {
955 cifs_dbg(FYI, "Use existing superblock\n");
956 cifs_umount(cifs_sb);
957 cifs_sb = NULL;
958 } else {
959 rc = cifs_read_super(sb);
960 if (rc) {
961 root = ERR_PTR(rc);
962 goto out_super;
963 }
964
965 sb->s_flags |= SB_ACTIVE;
966 }
967
968 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
969 if (IS_ERR(root))
970 goto out_super;
971
972 if (cifs_sb)
973 cifs_sb->root = dget(root);
974
975 cifs_dbg(FYI, "dentry root is: %p\n", root);
976 return root;
977
978 out_super:
979 deactivate_locked_super(sb);
980 return root;
981 out:
982 kfree(cifs_sb->prepath);
983 smb3_cleanup_fs_context(cifs_sb->ctx);
984 kfree(cifs_sb);
985 return root;
986 }
987
988
989 static ssize_t
990 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
991 {
992 ssize_t rc;
993 struct inode *inode = file_inode(iocb->ki_filp);
994
995 if (iocb->ki_flags & IOCB_DIRECT)
996 return cifs_user_readv(iocb, iter);
997
998 rc = cifs_revalidate_mapping(inode);
999 if (rc)
1000 return rc;
1001
1002 return generic_file_read_iter(iocb, iter);
1003 }
1004
1005 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1006 {
1007 struct inode *inode = file_inode(iocb->ki_filp);
1008 struct cifsInodeInfo *cinode = CIFS_I(inode);
1009 ssize_t written;
1010 int rc;
1011
1012 if (iocb->ki_filp->f_flags & O_DIRECT) {
1013 written = cifs_user_writev(iocb, from);
1014 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1015 cifs_zap_mapping(inode);
1016 cifs_dbg(FYI,
1017 "Set no oplock for inode=%p after a write operation\n",
1018 inode);
1019 cinode->oplock = 0;
1020 }
1021 return written;
1022 }
1023
1024 written = cifs_get_writer(cinode);
1025 if (written)
1026 return written;
1027
1028 written = generic_file_write_iter(iocb, from);
1029
1030 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1031 goto out;
1032
1033 rc = filemap_fdatawrite(inode->i_mapping);
1034 if (rc)
1035 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1036 rc, inode);
1037
1038 out:
1039 cifs_put_writer(cinode);
1040 return written;
1041 }
1042
1043 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1044 {
1045 struct cifsFileInfo *cfile = file->private_data;
1046 struct cifs_tcon *tcon;
1047
1048 /*
1049 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1050 * the cached file length
1051 */
1052 if (whence != SEEK_SET && whence != SEEK_CUR) {
1053 int rc;
1054 struct inode *inode = file_inode(file);
1055
1056 /*
1057 * We need to be sure that all dirty pages are written and the
1058 * server has the newest file length.
1059 */
1060 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1061 inode->i_mapping->nrpages != 0) {
1062 rc = filemap_fdatawait(inode->i_mapping);
1063 if (rc) {
1064 mapping_set_error(inode->i_mapping, rc);
1065 return rc;
1066 }
1067 }
1068 /*
1069 * Some applications poll for the file length in this strange
1070 * way so we must seek to end on non-oplocked files by
1071 * setting the revalidate time to zero.
1072 */
1073 CIFS_I(inode)->time = 0;
1074
1075 rc = cifs_revalidate_file_attr(file);
1076 if (rc < 0)
1077 return (loff_t)rc;
1078 }
1079 if (cfile && cfile->tlink) {
1080 tcon = tlink_tcon(cfile->tlink);
1081 if (tcon->ses->server->ops->llseek)
1082 return tcon->ses->server->ops->llseek(file, tcon,
1083 offset, whence);
1084 }
1085 return generic_file_llseek(file, offset, whence);
1086 }
1087
1088 static int
1089 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1090 {
1091 /*
1092 * Note that this is called by vfs setlease with i_lock held to
1093 * protect *lease from going away.
1094 */
1095 struct inode *inode = file_inode(file);
1096 struct cifsFileInfo *cfile = file->private_data;
1097
1098 /* Check if file is oplocked if this is request for new lease */
1099 if (arg == F_UNLCK ||
1100 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1101 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1102 return generic_setlease(file, arg, lease, priv);
1103 else if (tlink_tcon(cfile->tlink)->local_lease &&
1104 !CIFS_CACHE_READ(CIFS_I(inode)))
1105 /*
1106 * If the server claims to support oplock on this file, then we
1107 * still need to check oplock even if the local_lease mount
1108 * option is set, but there are servers which do not support
1109 * oplock for which this mount option may be useful if the user
1110 * knows that the file won't be changed on the server by anyone
1111 * else.
1112 */
1113 return generic_setlease(file, arg, lease, priv);
1114 else
1115 return -EAGAIN;
1116 }
1117
1118 struct file_system_type cifs_fs_type = {
1119 .owner = THIS_MODULE,
1120 .name = "cifs",
1121 .init_fs_context = smb3_init_fs_context,
1122 .parameters = smb3_fs_parameters,
1123 .kill_sb = cifs_kill_sb,
1124 .fs_flags = FS_RENAME_DOES_D_MOVE,
1125 };
1126 MODULE_ALIAS_FS("cifs");
1127
1128 struct file_system_type smb3_fs_type = {
1129 .owner = THIS_MODULE,
1130 .name = "smb3",
1131 .init_fs_context = smb3_init_fs_context,
1132 .parameters = smb3_fs_parameters,
1133 .kill_sb = cifs_kill_sb,
1134 .fs_flags = FS_RENAME_DOES_D_MOVE,
1135 };
1136 MODULE_ALIAS_FS("smb3");
1137 MODULE_ALIAS("smb3");
1138
1139 const struct inode_operations cifs_dir_inode_ops = {
1140 .create = cifs_create,
1141 .atomic_open = cifs_atomic_open,
1142 .lookup = cifs_lookup,
1143 .getattr = cifs_getattr,
1144 .unlink = cifs_unlink,
1145 .link = cifs_hardlink,
1146 .mkdir = cifs_mkdir,
1147 .rmdir = cifs_rmdir,
1148 .rename = cifs_rename2,
1149 .permission = cifs_permission,
1150 .setattr = cifs_setattr,
1151 .symlink = cifs_symlink,
1152 .mknod = cifs_mknod,
1153 .listxattr = cifs_listxattr,
1154 .get_acl = cifs_get_acl,
1155 .set_acl = cifs_set_acl,
1156 };
1157
1158 const struct inode_operations cifs_file_inode_ops = {
1159 .setattr = cifs_setattr,
1160 .getattr = cifs_getattr,
1161 .permission = cifs_permission,
1162 .listxattr = cifs_listxattr,
1163 .fiemap = cifs_fiemap,
1164 .get_acl = cifs_get_acl,
1165 .set_acl = cifs_set_acl,
1166 };
1167
1168 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1169 struct delayed_call *done)
1170 {
1171 char *target_path;
1172
1173 if (!dentry)
1174 return ERR_PTR(-ECHILD);
1175
1176 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1177 if (!target_path)
1178 return ERR_PTR(-ENOMEM);
1179
1180 spin_lock(&inode->i_lock);
1181 if (likely(CIFS_I(inode)->symlink_target)) {
1182 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1183 } else {
1184 kfree(target_path);
1185 target_path = ERR_PTR(-EOPNOTSUPP);
1186 }
1187 spin_unlock(&inode->i_lock);
1188
1189 if (!IS_ERR(target_path))
1190 set_delayed_call(done, kfree_link, target_path);
1191
1192 return target_path;
1193 }
1194
1195 const struct inode_operations cifs_symlink_inode_ops = {
1196 .get_link = cifs_get_link,
1197 .setattr = cifs_setattr,
1198 .permission = cifs_permission,
1199 .listxattr = cifs_listxattr,
1200 };
1201
1202 /*
1203 * Advance the EOF marker to after the source range.
1204 */
1205 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1206 struct cifs_tcon *src_tcon,
1207 unsigned int xid, loff_t src_end)
1208 {
1209 struct cifsFileInfo *writeable_srcfile;
1210 int rc = -EINVAL;
1211
1212 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1213 if (writeable_srcfile) {
1214 if (src_tcon->ses->server->ops->set_file_size)
1215 rc = src_tcon->ses->server->ops->set_file_size(
1216 xid, src_tcon, writeable_srcfile,
1217 src_inode->i_size, true /* no need to set sparse */);
1218 else
1219 rc = -ENOSYS;
1220 cifsFileInfo_put(writeable_srcfile);
1221 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1222 }
1223
1224 if (rc < 0)
1225 goto set_failed;
1226
1227 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1228 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1229 return 0;
1230
1231 set_failed:
1232 return filemap_write_and_wait(src_inode->i_mapping);
1233 }
1234
1235 /*
1236 * Flush out either the folio that overlaps the beginning of a range in which
1237 * pos resides or the folio that overlaps the end of a range unless that folio
1238 * is entirely within the range we're going to invalidate. We extend the flush
1239 * bounds to encompass the folio.
1240 */
1241 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1242 bool first)
1243 {
1244 struct folio *folio;
1245 unsigned long long fpos, fend;
1246 pgoff_t index = pos / PAGE_SIZE;
1247 size_t size;
1248 int rc = 0;
1249
1250 folio = filemap_get_folio(inode->i_mapping, index);
1251 if (IS_ERR(folio))
1252 return 0;
1253
1254 size = folio_size(folio);
1255 fpos = folio_pos(folio);
1256 fend = fpos + size - 1;
1257 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1258 *_fend = max_t(unsigned long long, *_fend, fend);
1259 if ((first && pos == fpos) || (!first && pos == fend))
1260 goto out;
1261
1262 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1263 out:
1264 folio_put(folio);
1265 return rc;
1266 }
1267
1268 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1269 struct file *dst_file, loff_t destoff, loff_t len,
1270 unsigned int remap_flags)
1271 {
1272 struct inode *src_inode = file_inode(src_file);
1273 struct inode *target_inode = file_inode(dst_file);
1274 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1275 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1276 struct cifsFileInfo *smb_file_src = src_file->private_data;
1277 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1278 struct cifs_tcon *target_tcon, *src_tcon;
1279 unsigned long long destend, fstart, fend, new_size;
1280 unsigned int xid;
1281 int rc;
1282
1283 if (remap_flags & REMAP_FILE_DEDUP)
1284 return -EOPNOTSUPP;
1285 if (remap_flags & ~REMAP_FILE_ADVISORY)
1286 return -EINVAL;
1287
1288 cifs_dbg(FYI, "clone range\n");
1289
1290 xid = get_xid();
1291
1292 if (!smb_file_src || !smb_file_target) {
1293 rc = -EBADF;
1294 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1295 goto out;
1296 }
1297
1298 src_tcon = tlink_tcon(smb_file_src->tlink);
1299 target_tcon = tlink_tcon(smb_file_target->tlink);
1300
1301 /*
1302 * Note: cifs case is easier than btrfs since server responsible for
1303 * checks for proper open modes and file type and if it wants
1304 * server could even support copy of range where source = target
1305 */
1306 lock_two_nondirectories(target_inode, src_inode);
1307
1308 if (len == 0)
1309 len = src_inode->i_size - off;
1310
1311 cifs_dbg(FYI, "clone range\n");
1312
1313 /* Flush the source buffer */
1314 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1315 off + len - 1);
1316 if (rc)
1317 goto unlock;
1318
1319 /* The server-side copy will fail if the source crosses the EOF marker.
1320 * Advance the EOF marker after the flush above to the end of the range
1321 * if it's short of that.
1322 */
1323 if (src_cifsi->netfs.remote_i_size < off + len) {
1324 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1325 if (rc < 0)
1326 goto unlock;
1327 }
1328
1329 new_size = destoff + len;
1330 destend = destoff + len - 1;
1331
1332 /* Flush the folios at either end of the destination range to prevent
1333 * accidental loss of dirty data outside of the range.
1334 */
1335 fstart = destoff;
1336 fend = destend;
1337
1338 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1339 if (rc)
1340 goto unlock;
1341 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1342 if (rc)
1343 goto unlock;
1344
1345 /* Discard all the folios that overlap the destination region. */
1346 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1347 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1348
1349 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1350 i_size_read(target_inode), 0);
1351
1352 rc = -EOPNOTSUPP;
1353 if (target_tcon->ses->server->ops->duplicate_extents) {
1354 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1355 smb_file_src, smb_file_target, off, len, destoff);
1356 if (rc == 0 && new_size > i_size_read(target_inode)) {
1357 truncate_setsize(target_inode, new_size);
1358 netfs_resize_file(&target_cifsi->netfs, new_size, true);
1359 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1360 new_size);
1361 }
1362 }
1363
1364 /* force revalidate of size and timestamps of target file now
1365 that target is updated on the server */
1366 CIFS_I(target_inode)->time = 0;
1367 unlock:
1368 /* although unlocking in the reverse order from locking is not
1369 strictly necessary here it is a little cleaner to be consistent */
1370 unlock_two_nondirectories(src_inode, target_inode);
1371 out:
1372 free_xid(xid);
1373 return rc < 0 ? rc : len;
1374 }
1375
1376 ssize_t cifs_file_copychunk_range(unsigned int xid,
1377 struct file *src_file, loff_t off,
1378 struct file *dst_file, loff_t destoff,
1379 size_t len, unsigned int flags)
1380 {
1381 struct inode *src_inode = file_inode(src_file);
1382 struct inode *target_inode = file_inode(dst_file);
1383 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1384 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1385 struct cifsFileInfo *smb_file_src;
1386 struct cifsFileInfo *smb_file_target;
1387 struct cifs_tcon *src_tcon;
1388 struct cifs_tcon *target_tcon;
1389 unsigned long long destend, fstart, fend;
1390 ssize_t rc;
1391
1392 cifs_dbg(FYI, "copychunk range\n");
1393
1394 if (!src_file->private_data || !dst_file->private_data) {
1395 rc = -EBADF;
1396 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1397 goto out;
1398 }
1399
1400 rc = -EXDEV;
1401 smb_file_target = dst_file->private_data;
1402 smb_file_src = src_file->private_data;
1403 src_tcon = tlink_tcon(smb_file_src->tlink);
1404 target_tcon = tlink_tcon(smb_file_target->tlink);
1405
1406 if (src_tcon->ses != target_tcon->ses) {
1407 cifs_dbg(VFS, "source and target of copy not on same server\n");
1408 goto out;
1409 }
1410
1411 rc = -EOPNOTSUPP;
1412 if (!target_tcon->ses->server->ops->copychunk_range)
1413 goto out;
1414
1415 /*
1416 * Note: cifs case is easier than btrfs since server responsible for
1417 * checks for proper open modes and file type and if it wants
1418 * server could even support copy of range where source = target
1419 */
1420 lock_two_nondirectories(target_inode, src_inode);
1421
1422 cifs_dbg(FYI, "about to flush pages\n");
1423
1424 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1425 off + len - 1);
1426 if (rc)
1427 goto unlock;
1428
1429 /* The server-side copy will fail if the source crosses the EOF marker.
1430 * Advance the EOF marker after the flush above to the end of the range
1431 * if it's short of that.
1432 */
1433 if (src_cifsi->netfs.remote_i_size < off + len) {
1434 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1435 if (rc < 0)
1436 goto unlock;
1437 }
1438
1439 destend = destoff + len - 1;
1440
1441 /* Flush the folios at either end of the destination range to prevent
1442 * accidental loss of dirty data outside of the range.
1443 */
1444 fstart = destoff;
1445 fend = destend;
1446
1447 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1448 if (rc)
1449 goto unlock;
1450 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1451 if (rc)
1452 goto unlock;
1453
1454 /* Discard all the folios that overlap the destination region. */
1455 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1456
1457 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1458 i_size_read(target_inode), 0);
1459
1460 rc = file_modified(dst_file);
1461 if (!rc) {
1462 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1463 smb_file_src, smb_file_target, off, len, destoff);
1464 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1465 truncate_setsize(target_inode, destoff + rc);
1466 netfs_resize_file(&target_cifsi->netfs,
1467 i_size_read(target_inode), true);
1468 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1469 i_size_read(target_inode));
1470 }
1471 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1472 target_cifsi->netfs.zero_point = destoff + rc;
1473 }
1474
1475 file_accessed(src_file);
1476
1477 /* force revalidate of size and timestamps of target file now
1478 * that target is updated on the server
1479 */
1480 CIFS_I(target_inode)->time = 0;
1481
1482 unlock:
1483 /* although unlocking in the reverse order from locking is not
1484 * strictly necessary here it is a little cleaner to be consistent
1485 */
1486 unlock_two_nondirectories(src_inode, target_inode);
1487
1488 out:
1489 return rc;
1490 }
1491
1492 /*
1493 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1494 * is a dummy operation.
1495 */
1496 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1497 {
1498 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1499 file, datasync);
1500
1501 return 0;
1502 }
1503
1504 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1505 struct file *dst_file, loff_t destoff,
1506 size_t len, unsigned int flags)
1507 {
1508 unsigned int xid = get_xid();
1509 ssize_t rc;
1510 struct cifsFileInfo *cfile = dst_file->private_data;
1511
1512 if (cfile->swapfile) {
1513 rc = -EOPNOTSUPP;
1514 free_xid(xid);
1515 return rc;
1516 }
1517
1518 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1519 len, flags);
1520 free_xid(xid);
1521
1522 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1523 rc = splice_copy_file_range(src_file, off, dst_file,
1524 destoff, len);
1525 return rc;
1526 }
1527
1528 const struct file_operations cifs_file_ops = {
1529 .read_iter = cifs_loose_read_iter,
1530 .write_iter = cifs_file_write_iter,
1531 .open = cifs_open,
1532 .release = cifs_close,
1533 .lock = cifs_lock,
1534 .flock = cifs_flock,
1535 .fsync = cifs_fsync,
1536 .flush = cifs_flush,
1537 .mmap = cifs_file_mmap,
1538 .splice_read = filemap_splice_read,
1539 .splice_write = iter_file_splice_write,
1540 .llseek = cifs_llseek,
1541 .unlocked_ioctl = cifs_ioctl,
1542 .copy_file_range = cifs_copy_file_range,
1543 .remap_file_range = cifs_remap_file_range,
1544 .setlease = cifs_setlease,
1545 .fallocate = cifs_fallocate,
1546 };
1547
1548 const struct file_operations cifs_file_strict_ops = {
1549 .read_iter = cifs_strict_readv,
1550 .write_iter = cifs_strict_writev,
1551 .open = cifs_open,
1552 .release = cifs_close,
1553 .lock = cifs_lock,
1554 .flock = cifs_flock,
1555 .fsync = cifs_strict_fsync,
1556 .flush = cifs_flush,
1557 .mmap = cifs_file_strict_mmap,
1558 .splice_read = filemap_splice_read,
1559 .splice_write = iter_file_splice_write,
1560 .llseek = cifs_llseek,
1561 .unlocked_ioctl = cifs_ioctl,
1562 .copy_file_range = cifs_copy_file_range,
1563 .remap_file_range = cifs_remap_file_range,
1564 .setlease = cifs_setlease,
1565 .fallocate = cifs_fallocate,
1566 };
1567
1568 const struct file_operations cifs_file_direct_ops = {
1569 .read_iter = cifs_direct_readv,
1570 .write_iter = cifs_direct_writev,
1571 .open = cifs_open,
1572 .release = cifs_close,
1573 .lock = cifs_lock,
1574 .flock = cifs_flock,
1575 .fsync = cifs_fsync,
1576 .flush = cifs_flush,
1577 .mmap = cifs_file_mmap,
1578 .splice_read = copy_splice_read,
1579 .splice_write = iter_file_splice_write,
1580 .unlocked_ioctl = cifs_ioctl,
1581 .copy_file_range = cifs_copy_file_range,
1582 .remap_file_range = cifs_remap_file_range,
1583 .llseek = cifs_llseek,
1584 .setlease = cifs_setlease,
1585 .fallocate = cifs_fallocate,
1586 };
1587
1588 const struct file_operations cifs_file_nobrl_ops = {
1589 .read_iter = cifs_loose_read_iter,
1590 .write_iter = cifs_file_write_iter,
1591 .open = cifs_open,
1592 .release = cifs_close,
1593 .fsync = cifs_fsync,
1594 .flush = cifs_flush,
1595 .mmap = cifs_file_mmap,
1596 .splice_read = filemap_splice_read,
1597 .splice_write = iter_file_splice_write,
1598 .llseek = cifs_llseek,
1599 .unlocked_ioctl = cifs_ioctl,
1600 .copy_file_range = cifs_copy_file_range,
1601 .remap_file_range = cifs_remap_file_range,
1602 .setlease = cifs_setlease,
1603 .fallocate = cifs_fallocate,
1604 };
1605
1606 const struct file_operations cifs_file_strict_nobrl_ops = {
1607 .read_iter = cifs_strict_readv,
1608 .write_iter = cifs_strict_writev,
1609 .open = cifs_open,
1610 .release = cifs_close,
1611 .fsync = cifs_strict_fsync,
1612 .flush = cifs_flush,
1613 .mmap = cifs_file_strict_mmap,
1614 .splice_read = filemap_splice_read,
1615 .splice_write = iter_file_splice_write,
1616 .llseek = cifs_llseek,
1617 .unlocked_ioctl = cifs_ioctl,
1618 .copy_file_range = cifs_copy_file_range,
1619 .remap_file_range = cifs_remap_file_range,
1620 .setlease = cifs_setlease,
1621 .fallocate = cifs_fallocate,
1622 };
1623
1624 const struct file_operations cifs_file_direct_nobrl_ops = {
1625 .read_iter = cifs_direct_readv,
1626 .write_iter = cifs_direct_writev,
1627 .open = cifs_open,
1628 .release = cifs_close,
1629 .fsync = cifs_fsync,
1630 .flush = cifs_flush,
1631 .mmap = cifs_file_mmap,
1632 .splice_read = copy_splice_read,
1633 .splice_write = iter_file_splice_write,
1634 .unlocked_ioctl = cifs_ioctl,
1635 .copy_file_range = cifs_copy_file_range,
1636 .remap_file_range = cifs_remap_file_range,
1637 .llseek = cifs_llseek,
1638 .setlease = cifs_setlease,
1639 .fallocate = cifs_fallocate,
1640 };
1641
1642 const struct file_operations cifs_dir_ops = {
1643 .iterate_shared = cifs_readdir,
1644 .release = cifs_closedir,
1645 .read = generic_read_dir,
1646 .unlocked_ioctl = cifs_ioctl,
1647 .copy_file_range = cifs_copy_file_range,
1648 .remap_file_range = cifs_remap_file_range,
1649 .llseek = generic_file_llseek,
1650 .fsync = cifs_dir_fsync,
1651 };
1652
1653 static void
1654 cifs_init_once(void *inode)
1655 {
1656 struct cifsInodeInfo *cifsi = inode;
1657
1658 inode_init_once(&cifsi->netfs.inode);
1659 init_rwsem(&cifsi->lock_sem);
1660 }
1661
1662 static int __init
1663 cifs_init_inodecache(void)
1664 {
1665 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1666 sizeof(struct cifsInodeInfo),
1667 0, (SLAB_RECLAIM_ACCOUNT|
1668 SLAB_ACCOUNT),
1669 cifs_init_once);
1670 if (cifs_inode_cachep == NULL)
1671 return -ENOMEM;
1672
1673 return 0;
1674 }
1675
1676 static void
1677 cifs_destroy_inodecache(void)
1678 {
1679 /*
1680 * Make sure all delayed rcu free inodes are flushed before we
1681 * destroy cache.
1682 */
1683 rcu_barrier();
1684 kmem_cache_destroy(cifs_inode_cachep);
1685 }
1686
1687 static int
1688 cifs_init_request_bufs(void)
1689 {
1690 /*
1691 * SMB2 maximum header size is bigger than CIFS one - no problems to
1692 * allocate some more bytes for CIFS.
1693 */
1694 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1695
1696 if (CIFSMaxBufSize < 8192) {
1697 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1698 Unicode path name has to fit in any SMB/CIFS path based frames */
1699 CIFSMaxBufSize = 8192;
1700 } else if (CIFSMaxBufSize > 1024*127) {
1701 CIFSMaxBufSize = 1024 * 127;
1702 } else {
1703 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1704 }
1705 /*
1706 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1707 CIFSMaxBufSize, CIFSMaxBufSize);
1708 */
1709 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1710 CIFSMaxBufSize + max_hdr_size, 0,
1711 SLAB_HWCACHE_ALIGN, 0,
1712 CIFSMaxBufSize + max_hdr_size,
1713 NULL);
1714 if (cifs_req_cachep == NULL)
1715 return -ENOMEM;
1716
1717 if (cifs_min_rcv < 1)
1718 cifs_min_rcv = 1;
1719 else if (cifs_min_rcv > 64) {
1720 cifs_min_rcv = 64;
1721 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1722 }
1723
1724 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1725 cifs_req_cachep);
1726
1727 if (cifs_req_poolp == NULL) {
1728 kmem_cache_destroy(cifs_req_cachep);
1729 return -ENOMEM;
1730 }
1731 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1732 almost all handle based requests (but not write response, nor is it
1733 sufficient for path based requests). A smaller size would have
1734 been more efficient (compacting multiple slab items on one 4k page)
1735 for the case in which debug was on, but this larger size allows
1736 more SMBs to use small buffer alloc and is still much more
1737 efficient to alloc 1 per page off the slab compared to 17K (5page)
1738 alloc of large cifs buffers even when page debugging is on */
1739 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1740 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1741 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1742 if (cifs_sm_req_cachep == NULL) {
1743 mempool_destroy(cifs_req_poolp);
1744 kmem_cache_destroy(cifs_req_cachep);
1745 return -ENOMEM;
1746 }
1747
1748 if (cifs_min_small < 2)
1749 cifs_min_small = 2;
1750 else if (cifs_min_small > 256) {
1751 cifs_min_small = 256;
1752 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1753 }
1754
1755 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1756 cifs_sm_req_cachep);
1757
1758 if (cifs_sm_req_poolp == NULL) {
1759 mempool_destroy(cifs_req_poolp);
1760 kmem_cache_destroy(cifs_req_cachep);
1761 kmem_cache_destroy(cifs_sm_req_cachep);
1762 return -ENOMEM;
1763 }
1764
1765 return 0;
1766 }
1767
1768 static void
1769 cifs_destroy_request_bufs(void)
1770 {
1771 mempool_destroy(cifs_req_poolp);
1772 kmem_cache_destroy(cifs_req_cachep);
1773 mempool_destroy(cifs_sm_req_poolp);
1774 kmem_cache_destroy(cifs_sm_req_cachep);
1775 }
1776
1777 static int init_mids(void)
1778 {
1779 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1780 sizeof(struct mid_q_entry), 0,
1781 SLAB_HWCACHE_ALIGN, NULL);
1782 if (cifs_mid_cachep == NULL)
1783 return -ENOMEM;
1784
1785 /* 3 is a reasonable minimum number of simultaneous operations */
1786 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1787 if (cifs_mid_poolp == NULL) {
1788 kmem_cache_destroy(cifs_mid_cachep);
1789 return -ENOMEM;
1790 }
1791
1792 return 0;
1793 }
1794
1795 static void destroy_mids(void)
1796 {
1797 mempool_destroy(cifs_mid_poolp);
1798 kmem_cache_destroy(cifs_mid_cachep);
1799 }
1800
1801 static int __init
1802 init_cifs(void)
1803 {
1804 int rc = 0;
1805 cifs_proc_init();
1806 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1807 /*
1808 * Initialize Global counters
1809 */
1810 atomic_set(&sesInfoAllocCount, 0);
1811 atomic_set(&tconInfoAllocCount, 0);
1812 atomic_set(&tcpSesNextId, 0);
1813 atomic_set(&tcpSesAllocCount, 0);
1814 atomic_set(&tcpSesReconnectCount, 0);
1815 atomic_set(&tconInfoReconnectCount, 0);
1816
1817 atomic_set(&buf_alloc_count, 0);
1818 atomic_set(&small_buf_alloc_count, 0);
1819 #ifdef CONFIG_CIFS_STATS2
1820 atomic_set(&total_buf_alloc_count, 0);
1821 atomic_set(&total_small_buf_alloc_count, 0);
1822 if (slow_rsp_threshold < 1)
1823 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1824 else if (slow_rsp_threshold > 32767)
1825 cifs_dbg(VFS,
1826 "slow response threshold set higher than recommended (0 to 32767)\n");
1827 #endif /* CONFIG_CIFS_STATS2 */
1828
1829 atomic_set(&mid_count, 0);
1830 GlobalCurrentXid = 0;
1831 GlobalTotalActiveXid = 0;
1832 GlobalMaxActiveXid = 0;
1833 spin_lock_init(&cifs_tcp_ses_lock);
1834 spin_lock_init(&GlobalMid_Lock);
1835
1836 cifs_lock_secret = get_random_u32();
1837
1838 if (cifs_max_pending < 2) {
1839 cifs_max_pending = 2;
1840 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1841 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1842 cifs_max_pending = CIFS_MAX_REQ;
1843 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1844 CIFS_MAX_REQ);
1845 }
1846
1847 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1848 if (dir_cache_timeout > 65000) {
1849 dir_cache_timeout = 65000;
1850 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1851 }
1852
1853 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1854 if (!cifsiod_wq) {
1855 rc = -ENOMEM;
1856 goto out_clean_proc;
1857 }
1858
1859 /*
1860 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1861 * so that we don't launch too many worker threads but
1862 * Documentation/core-api/workqueue.rst recommends setting it to 0
1863 */
1864
1865 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1866 decrypt_wq = alloc_workqueue("smb3decryptd",
1867 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1868 if (!decrypt_wq) {
1869 rc = -ENOMEM;
1870 goto out_destroy_cifsiod_wq;
1871 }
1872
1873 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1874 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1875 if (!fileinfo_put_wq) {
1876 rc = -ENOMEM;
1877 goto out_destroy_decrypt_wq;
1878 }
1879
1880 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1881 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1882 if (!cifsoplockd_wq) {
1883 rc = -ENOMEM;
1884 goto out_destroy_fileinfo_put_wq;
1885 }
1886
1887 deferredclose_wq = alloc_workqueue("deferredclose",
1888 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1889 if (!deferredclose_wq) {
1890 rc = -ENOMEM;
1891 goto out_destroy_cifsoplockd_wq;
1892 }
1893
1894 serverclose_wq = alloc_workqueue("serverclose",
1895 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1896 if (!serverclose_wq) {
1897 rc = -ENOMEM;
1898 goto out_destroy_serverclose_wq;
1899 }
1900
1901 rc = cifs_init_inodecache();
1902 if (rc)
1903 goto out_destroy_deferredclose_wq;
1904
1905 rc = init_mids();
1906 if (rc)
1907 goto out_destroy_inodecache;
1908
1909 rc = cifs_init_request_bufs();
1910 if (rc)
1911 goto out_destroy_mids;
1912
1913 #ifdef CONFIG_CIFS_DFS_UPCALL
1914 rc = dfs_cache_init();
1915 if (rc)
1916 goto out_destroy_request_bufs;
1917 #endif /* CONFIG_CIFS_DFS_UPCALL */
1918 #ifdef CONFIG_CIFS_UPCALL
1919 rc = init_cifs_spnego();
1920 if (rc)
1921 goto out_destroy_dfs_cache;
1922 #endif /* CONFIG_CIFS_UPCALL */
1923 #ifdef CONFIG_CIFS_SWN_UPCALL
1924 rc = cifs_genl_init();
1925 if (rc)
1926 goto out_register_key_type;
1927 #endif /* CONFIG_CIFS_SWN_UPCALL */
1928
1929 rc = init_cifs_idmap();
1930 if (rc)
1931 goto out_cifs_swn_init;
1932
1933 rc = register_filesystem(&cifs_fs_type);
1934 if (rc)
1935 goto out_init_cifs_idmap;
1936
1937 rc = register_filesystem(&smb3_fs_type);
1938 if (rc) {
1939 unregister_filesystem(&cifs_fs_type);
1940 goto out_init_cifs_idmap;
1941 }
1942
1943 return 0;
1944
1945 out_init_cifs_idmap:
1946 exit_cifs_idmap();
1947 out_cifs_swn_init:
1948 #ifdef CONFIG_CIFS_SWN_UPCALL
1949 cifs_genl_exit();
1950 out_register_key_type:
1951 #endif
1952 #ifdef CONFIG_CIFS_UPCALL
1953 exit_cifs_spnego();
1954 out_destroy_dfs_cache:
1955 #endif
1956 #ifdef CONFIG_CIFS_DFS_UPCALL
1957 dfs_cache_destroy();
1958 out_destroy_request_bufs:
1959 #endif
1960 cifs_destroy_request_bufs();
1961 out_destroy_mids:
1962 destroy_mids();
1963 out_destroy_inodecache:
1964 cifs_destroy_inodecache();
1965 out_destroy_deferredclose_wq:
1966 destroy_workqueue(deferredclose_wq);
1967 out_destroy_cifsoplockd_wq:
1968 destroy_workqueue(cifsoplockd_wq);
1969 out_destroy_fileinfo_put_wq:
1970 destroy_workqueue(fileinfo_put_wq);
1971 out_destroy_decrypt_wq:
1972 destroy_workqueue(decrypt_wq);
1973 out_destroy_cifsiod_wq:
1974 destroy_workqueue(cifsiod_wq);
1975 out_destroy_serverclose_wq:
1976 destroy_workqueue(serverclose_wq);
1977 out_clean_proc:
1978 cifs_proc_clean();
1979 return rc;
1980 }
1981
1982 static void __exit
1983 exit_cifs(void)
1984 {
1985 cifs_dbg(NOISY, "exit_smb3\n");
1986 unregister_filesystem(&cifs_fs_type);
1987 unregister_filesystem(&smb3_fs_type);
1988 cifs_release_automount_timer();
1989 exit_cifs_idmap();
1990 #ifdef CONFIG_CIFS_SWN_UPCALL
1991 cifs_genl_exit();
1992 #endif
1993 #ifdef CONFIG_CIFS_UPCALL
1994 exit_cifs_spnego();
1995 #endif
1996 #ifdef CONFIG_CIFS_DFS_UPCALL
1997 dfs_cache_destroy();
1998 #endif
1999 cifs_destroy_request_bufs();
2000 destroy_mids();
2001 cifs_destroy_inodecache();
2002 destroy_workqueue(deferredclose_wq);
2003 destroy_workqueue(cifsoplockd_wq);
2004 destroy_workqueue(decrypt_wq);
2005 destroy_workqueue(fileinfo_put_wq);
2006 destroy_workqueue(serverclose_wq);
2007 destroy_workqueue(cifsiod_wq);
2008 cifs_proc_clean();
2009 }
2010
2011 MODULE_AUTHOR("Steve French");
2012 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2013 MODULE_DESCRIPTION
2014 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2015 "also older servers complying with the SNIA CIFS Specification)");
2016 MODULE_VERSION(CIFS_VERSION);
2017 MODULE_SOFTDEP("ecb");
2018 MODULE_SOFTDEP("hmac");
2019 MODULE_SOFTDEP("md5");
2020 MODULE_SOFTDEP("nls");
2021 MODULE_SOFTDEP("aes");
2022 MODULE_SOFTDEP("cmac");
2023 MODULE_SOFTDEP("sha256");
2024 MODULE_SOFTDEP("sha512");
2025 MODULE_SOFTDEP("aead2");
2026 MODULE_SOFTDEP("ccm");
2027 MODULE_SOFTDEP("gcm");
2028 module_init(init_cifs)
2029 module_exit(exit_cifs)