]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/smb/client/connect.c
smb: client: fix OOB in cifsd when receiving compounded resps
[thirdparty/linux.git] / fs / smb / client / connect.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 */
8 #include <linux/fs.h>
9 #include <linux/net.h>
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
32 #include <net/ipv6.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41 #include "ntlmssp.h"
42 #include "nterr.h"
43 #include "rfc1002pdu.h"
44 #include "fscache.h"
45 #include "smb2proto.h"
46 #include "smbdirect.h"
47 #include "dns_resolve.h"
48 #ifdef CONFIG_CIFS_DFS_UPCALL
49 #include "dfs.h"
50 #include "dfs_cache.h"
51 #endif
52 #include "fs_context.h"
53 #include "cifs_swn.h"
54
55 extern mempool_t *cifs_req_poolp;
56 extern bool disable_legacy_dialects;
57
58 /* FIXME: should these be tunable? */
59 #define TLINK_ERROR_EXPIRE (1 * HZ)
60 #define TLINK_IDLE_EXPIRE (600 * HZ)
61
62 /* Drop the connection to not overload the server */
63 #define MAX_STATUS_IO_TIMEOUT 5
64
65 static int ip_connect(struct TCP_Server_Info *server);
66 static int generic_ip_connect(struct TCP_Server_Info *server);
67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
68 static void cifs_prune_tlinks(struct work_struct *work);
69
70 /*
71 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
72 * get their ip addresses changed at some point.
73 *
74 * This should be called with server->srv_mutex held.
75 */
76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
77 {
78 int rc;
79 int len;
80 char *unc;
81 struct sockaddr_storage ss;
82
83 if (!server->hostname)
84 return -EINVAL;
85
86 /* if server hostname isn't populated, there's nothing to do here */
87 if (server->hostname[0] == '\0')
88 return 0;
89
90 len = strlen(server->hostname) + 3;
91
92 unc = kmalloc(len, GFP_KERNEL);
93 if (!unc) {
94 cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
95 return -ENOMEM;
96 }
97 scnprintf(unc, len, "\\\\%s", server->hostname);
98
99 spin_lock(&server->srv_lock);
100 ss = server->dstaddr;
101 spin_unlock(&server->srv_lock);
102
103 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
104 kfree(unc);
105
106 if (rc < 0) {
107 cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
108 __func__, server->hostname, rc);
109 } else {
110 spin_lock(&server->srv_lock);
111 memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr));
112 spin_unlock(&server->srv_lock);
113 rc = 0;
114 }
115
116 return rc;
117 }
118
119 static void smb2_query_server_interfaces(struct work_struct *work)
120 {
121 int rc;
122 int xid;
123 struct cifs_tcon *tcon = container_of(work,
124 struct cifs_tcon,
125 query_interfaces.work);
126
127 /*
128 * query server network interfaces, in case they change
129 */
130 xid = get_xid();
131 rc = SMB3_request_interfaces(xid, tcon, false);
132 free_xid(xid);
133
134 if (rc) {
135 if (rc == -EOPNOTSUPP)
136 return;
137
138 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
139 __func__, rc);
140 }
141
142 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
143 (SMB_INTERFACE_POLL_INTERVAL * HZ));
144 }
145
146 /*
147 * Update the tcpStatus for the server.
148 * This is used to signal the cifsd thread to call cifs_reconnect
149 * ONLY cifsd thread should call cifs_reconnect. For any other
150 * thread, use this function
151 *
152 * @server: the tcp ses for which reconnect is needed
153 * @all_channels: if this needs to be done for all channels
154 */
155 void
156 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
157 bool all_channels)
158 {
159 struct TCP_Server_Info *pserver;
160 struct cifs_ses *ses;
161 int i;
162
163 /* If server is a channel, select the primary channel */
164 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
165
166 /* if we need to signal just this channel */
167 if (!all_channels) {
168 spin_lock(&server->srv_lock);
169 if (server->tcpStatus != CifsExiting)
170 server->tcpStatus = CifsNeedReconnect;
171 spin_unlock(&server->srv_lock);
172 return;
173 }
174
175 spin_lock(&cifs_tcp_ses_lock);
176 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
177 spin_lock(&ses->chan_lock);
178 for (i = 0; i < ses->chan_count; i++) {
179 if (!ses->chans[i].server)
180 continue;
181
182 spin_lock(&ses->chans[i].server->srv_lock);
183 if (ses->chans[i].server->tcpStatus != CifsExiting)
184 ses->chans[i].server->tcpStatus = CifsNeedReconnect;
185 spin_unlock(&ses->chans[i].server->srv_lock);
186 }
187 spin_unlock(&ses->chan_lock);
188 }
189 spin_unlock(&cifs_tcp_ses_lock);
190 }
191
192 /*
193 * Mark all sessions and tcons for reconnect.
194 * IMPORTANT: make sure that this gets called only from
195 * cifsd thread. For any other thread, use
196 * cifs_signal_cifsd_for_reconnect
197 *
198 * @server: the tcp ses for which reconnect is needed
199 * @server needs to be previously set to CifsNeedReconnect.
200 * @mark_smb_session: whether even sessions need to be marked
201 */
202 void
203 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
204 bool mark_smb_session)
205 {
206 struct TCP_Server_Info *pserver;
207 struct cifs_ses *ses, *nses;
208 struct cifs_tcon *tcon;
209
210 /*
211 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
212 * are not used until reconnected.
213 */
214 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
215
216 /* If server is a channel, select the primary channel */
217 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
218
219
220 spin_lock(&cifs_tcp_ses_lock);
221 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
222 /*
223 * if channel has been marked for termination, nothing to do
224 * for the channel. in fact, we cannot find the channel for the
225 * server. So safe to exit here
226 */
227 if (server->terminate)
228 break;
229
230 /* check if iface is still active */
231 if (!cifs_chan_is_iface_active(ses, server))
232 cifs_chan_update_iface(ses, server);
233
234 spin_lock(&ses->chan_lock);
235 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
236 spin_unlock(&ses->chan_lock);
237 continue;
238 }
239
240 if (mark_smb_session)
241 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
242 else
243 cifs_chan_set_need_reconnect(ses, server);
244
245 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
246 __func__, ses->chans_need_reconnect);
247
248 /* If all channels need reconnect, then tcon needs reconnect */
249 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
250 spin_unlock(&ses->chan_lock);
251 continue;
252 }
253 spin_unlock(&ses->chan_lock);
254
255 spin_lock(&ses->ses_lock);
256 ses->ses_status = SES_NEED_RECON;
257 spin_unlock(&ses->ses_lock);
258
259 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
260 tcon->need_reconnect = true;
261 spin_lock(&tcon->tc_lock);
262 tcon->status = TID_NEED_RECON;
263 spin_unlock(&tcon->tc_lock);
264
265 cancel_delayed_work(&tcon->query_interfaces);
266 }
267 if (ses->tcon_ipc) {
268 ses->tcon_ipc->need_reconnect = true;
269 spin_lock(&ses->tcon_ipc->tc_lock);
270 ses->tcon_ipc->status = TID_NEED_RECON;
271 spin_unlock(&ses->tcon_ipc->tc_lock);
272 }
273 }
274 spin_unlock(&cifs_tcp_ses_lock);
275 }
276
277 static void
278 cifs_abort_connection(struct TCP_Server_Info *server)
279 {
280 struct mid_q_entry *mid, *nmid;
281 struct list_head retry_list;
282
283 server->maxBuf = 0;
284 server->max_read = 0;
285
286 /* do not want to be sending data on a socket we are freeing */
287 cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
288 cifs_server_lock(server);
289 if (server->ssocket) {
290 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
291 server->ssocket->flags);
292 kernel_sock_shutdown(server->ssocket, SHUT_WR);
293 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
294 server->ssocket->flags);
295 sock_release(server->ssocket);
296 server->ssocket = NULL;
297 }
298 server->sequence_number = 0;
299 server->session_estab = false;
300 kfree_sensitive(server->session_key.response);
301 server->session_key.response = NULL;
302 server->session_key.len = 0;
303 server->lstrp = jiffies;
304
305 /* mark submitted MIDs for retry and issue callback */
306 INIT_LIST_HEAD(&retry_list);
307 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
308 spin_lock(&server->mid_lock);
309 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
310 kref_get(&mid->refcount);
311 if (mid->mid_state == MID_REQUEST_SUBMITTED)
312 mid->mid_state = MID_RETRY_NEEDED;
313 list_move(&mid->qhead, &retry_list);
314 mid->mid_flags |= MID_DELETED;
315 }
316 spin_unlock(&server->mid_lock);
317 cifs_server_unlock(server);
318
319 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
320 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
321 list_del_init(&mid->qhead);
322 mid->callback(mid);
323 release_mid(mid);
324 }
325
326 if (cifs_rdma_enabled(server)) {
327 cifs_server_lock(server);
328 smbd_destroy(server);
329 cifs_server_unlock(server);
330 }
331 }
332
333 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
334 {
335 spin_lock(&server->srv_lock);
336 server->nr_targets = num_targets;
337 if (server->tcpStatus == CifsExiting) {
338 /* the demux thread will exit normally next time through the loop */
339 spin_unlock(&server->srv_lock);
340 wake_up(&server->response_q);
341 return false;
342 }
343
344 cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
345 trace_smb3_reconnect(server->CurrentMid, server->conn_id,
346 server->hostname);
347 server->tcpStatus = CifsNeedReconnect;
348
349 spin_unlock(&server->srv_lock);
350 return true;
351 }
352
353 /*
354 * cifs tcp session reconnection
355 *
356 * mark tcp session as reconnecting so temporarily locked
357 * mark all smb sessions as reconnecting for tcp session
358 * reconnect tcp session
359 * wake up waiters on reconnection? - (not needed currently)
360 *
361 * if mark_smb_session is passed as true, unconditionally mark
362 * the smb session (and tcon) for reconnect as well. This value
363 * doesn't really matter for non-multichannel scenario.
364 *
365 */
366 static int __cifs_reconnect(struct TCP_Server_Info *server,
367 bool mark_smb_session)
368 {
369 int rc = 0;
370
371 if (!cifs_tcp_ses_needs_reconnect(server, 1))
372 return 0;
373
374 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
375
376 cifs_abort_connection(server);
377
378 do {
379 try_to_freeze();
380 cifs_server_lock(server);
381
382 if (!cifs_swn_set_server_dstaddr(server)) {
383 /* resolve the hostname again to make sure that IP address is up-to-date */
384 rc = reconn_set_ipaddr_from_hostname(server);
385 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
386 }
387
388 if (cifs_rdma_enabled(server))
389 rc = smbd_reconnect(server);
390 else
391 rc = generic_ip_connect(server);
392 if (rc) {
393 cifs_server_unlock(server);
394 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
395 msleep(3000);
396 } else {
397 atomic_inc(&tcpSesReconnectCount);
398 set_credits(server, 1);
399 spin_lock(&server->srv_lock);
400 if (server->tcpStatus != CifsExiting)
401 server->tcpStatus = CifsNeedNegotiate;
402 spin_unlock(&server->srv_lock);
403 cifs_swn_reset_server_dstaddr(server);
404 cifs_server_unlock(server);
405 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
406 }
407 } while (server->tcpStatus == CifsNeedReconnect);
408
409 spin_lock(&server->srv_lock);
410 if (server->tcpStatus == CifsNeedNegotiate)
411 mod_delayed_work(cifsiod_wq, &server->echo, 0);
412 spin_unlock(&server->srv_lock);
413
414 wake_up(&server->response_q);
415 return rc;
416 }
417
418 #ifdef CONFIG_CIFS_DFS_UPCALL
419 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
420 {
421 int rc;
422 char *hostname;
423
424 if (!cifs_swn_set_server_dstaddr(server)) {
425 if (server->hostname != target) {
426 hostname = extract_hostname(target);
427 if (!IS_ERR(hostname)) {
428 spin_lock(&server->srv_lock);
429 kfree(server->hostname);
430 server->hostname = hostname;
431 spin_unlock(&server->srv_lock);
432 } else {
433 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
434 __func__, PTR_ERR(hostname));
435 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
436 server->hostname);
437 }
438 }
439 /* resolve the hostname again to make sure that IP address is up-to-date. */
440 rc = reconn_set_ipaddr_from_hostname(server);
441 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
442 }
443 /* Reconnect the socket */
444 if (cifs_rdma_enabled(server))
445 rc = smbd_reconnect(server);
446 else
447 rc = generic_ip_connect(server);
448
449 return rc;
450 }
451
452 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
453 struct dfs_cache_tgt_iterator **target_hint)
454 {
455 int rc;
456 struct dfs_cache_tgt_iterator *tit;
457
458 *target_hint = NULL;
459
460 /* If dfs target list is empty, then reconnect to last server */
461 tit = dfs_cache_get_tgt_iterator(tl);
462 if (!tit)
463 return __reconnect_target_unlocked(server, server->hostname);
464
465 /* Otherwise, try every dfs target in @tl */
466 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
467 rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
468 if (!rc) {
469 *target_hint = tit;
470 break;
471 }
472 }
473 return rc;
474 }
475
476 static int reconnect_dfs_server(struct TCP_Server_Info *server)
477 {
478 struct dfs_cache_tgt_iterator *target_hint = NULL;
479 DFS_CACHE_TGT_LIST(tl);
480 int num_targets = 0;
481 int rc = 0;
482
483 /*
484 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
485 *
486 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
487 * targets (server->nr_targets). It's also possible that the cached referral was cleared
488 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
489 * refreshing the referral, so, in this case, default it to 1.
490 */
491 mutex_lock(&server->refpath_lock);
492 if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
493 num_targets = dfs_cache_get_nr_tgts(&tl);
494 mutex_unlock(&server->refpath_lock);
495 if (!num_targets)
496 num_targets = 1;
497
498 if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
499 return 0;
500
501 /*
502 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
503 * different server or share during failover. It could be improved by adding some logic to
504 * only do that in case it connects to a different server or share, though.
505 */
506 cifs_mark_tcp_ses_conns_for_reconnect(server, true);
507
508 cifs_abort_connection(server);
509
510 do {
511 try_to_freeze();
512 cifs_server_lock(server);
513
514 rc = reconnect_target_unlocked(server, &tl, &target_hint);
515 if (rc) {
516 /* Failed to reconnect socket */
517 cifs_server_unlock(server);
518 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
519 msleep(3000);
520 continue;
521 }
522 /*
523 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
524 * process waiting for reconnect will know it needs to re-establish session and tcon
525 * through the reconnected target server.
526 */
527 atomic_inc(&tcpSesReconnectCount);
528 set_credits(server, 1);
529 spin_lock(&server->srv_lock);
530 if (server->tcpStatus != CifsExiting)
531 server->tcpStatus = CifsNeedNegotiate;
532 spin_unlock(&server->srv_lock);
533 cifs_swn_reset_server_dstaddr(server);
534 cifs_server_unlock(server);
535 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
536 } while (server->tcpStatus == CifsNeedReconnect);
537
538 mutex_lock(&server->refpath_lock);
539 dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
540 mutex_unlock(&server->refpath_lock);
541 dfs_cache_free_tgts(&tl);
542
543 /* Need to set up echo worker again once connection has been established */
544 spin_lock(&server->srv_lock);
545 if (server->tcpStatus == CifsNeedNegotiate)
546 mod_delayed_work(cifsiod_wq, &server->echo, 0);
547 spin_unlock(&server->srv_lock);
548
549 wake_up(&server->response_q);
550 return rc;
551 }
552
553 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
554 {
555 mutex_lock(&server->refpath_lock);
556 if (!server->leaf_fullpath) {
557 mutex_unlock(&server->refpath_lock);
558 return __cifs_reconnect(server, mark_smb_session);
559 }
560 mutex_unlock(&server->refpath_lock);
561
562 return reconnect_dfs_server(server);
563 }
564 #else
565 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
566 {
567 return __cifs_reconnect(server, mark_smb_session);
568 }
569 #endif
570
571 static void
572 cifs_echo_request(struct work_struct *work)
573 {
574 int rc;
575 struct TCP_Server_Info *server = container_of(work,
576 struct TCP_Server_Info, echo.work);
577
578 /*
579 * We cannot send an echo if it is disabled.
580 * Also, no need to ping if we got a response recently.
581 */
582
583 if (server->tcpStatus == CifsNeedReconnect ||
584 server->tcpStatus == CifsExiting ||
585 server->tcpStatus == CifsNew ||
586 (server->ops->can_echo && !server->ops->can_echo(server)) ||
587 time_before(jiffies, server->lstrp + server->echo_interval - HZ))
588 goto requeue_echo;
589
590 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
591 cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
592
593 /* Check witness registrations */
594 cifs_swn_check();
595
596 requeue_echo:
597 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
598 }
599
600 static bool
601 allocate_buffers(struct TCP_Server_Info *server)
602 {
603 if (!server->bigbuf) {
604 server->bigbuf = (char *)cifs_buf_get();
605 if (!server->bigbuf) {
606 cifs_server_dbg(VFS, "No memory for large SMB response\n");
607 msleep(3000);
608 /* retry will check if exiting */
609 return false;
610 }
611 } else if (server->large_buf) {
612 /* we are reusing a dirty large buf, clear its start */
613 memset(server->bigbuf, 0, HEADER_SIZE(server));
614 }
615
616 if (!server->smallbuf) {
617 server->smallbuf = (char *)cifs_small_buf_get();
618 if (!server->smallbuf) {
619 cifs_server_dbg(VFS, "No memory for SMB response\n");
620 msleep(1000);
621 /* retry will check if exiting */
622 return false;
623 }
624 /* beginning of smb buffer is cleared in our buf_get */
625 } else {
626 /* if existing small buf clear beginning */
627 memset(server->smallbuf, 0, HEADER_SIZE(server));
628 }
629
630 return true;
631 }
632
633 static bool
634 server_unresponsive(struct TCP_Server_Info *server)
635 {
636 /*
637 * We need to wait 3 echo intervals to make sure we handle such
638 * situations right:
639 * 1s client sends a normal SMB request
640 * 2s client gets a response
641 * 30s echo workqueue job pops, and decides we got a response recently
642 * and don't need to send another
643 * ...
644 * 65s kernel_recvmsg times out, and we see that we haven't gotten
645 * a response in >60s.
646 */
647 spin_lock(&server->srv_lock);
648 if ((server->tcpStatus == CifsGood ||
649 server->tcpStatus == CifsNeedNegotiate) &&
650 (!server->ops->can_echo || server->ops->can_echo(server)) &&
651 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
652 spin_unlock(&server->srv_lock);
653 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
654 (3 * server->echo_interval) / HZ);
655 cifs_reconnect(server, false);
656 return true;
657 }
658 spin_unlock(&server->srv_lock);
659
660 return false;
661 }
662
663 static inline bool
664 zero_credits(struct TCP_Server_Info *server)
665 {
666 int val;
667
668 spin_lock(&server->req_lock);
669 val = server->credits + server->echo_credits + server->oplock_credits;
670 if (server->in_flight == 0 && val == 0) {
671 spin_unlock(&server->req_lock);
672 return true;
673 }
674 spin_unlock(&server->req_lock);
675 return false;
676 }
677
678 static int
679 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
680 {
681 int length = 0;
682 int total_read;
683
684 for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
685 try_to_freeze();
686
687 /* reconnect if no credits and no requests in flight */
688 if (zero_credits(server)) {
689 cifs_reconnect(server, false);
690 return -ECONNABORTED;
691 }
692
693 if (server_unresponsive(server))
694 return -ECONNABORTED;
695 if (cifs_rdma_enabled(server) && server->smbd_conn)
696 length = smbd_recv(server->smbd_conn, smb_msg);
697 else
698 length = sock_recvmsg(server->ssocket, smb_msg, 0);
699
700 spin_lock(&server->srv_lock);
701 if (server->tcpStatus == CifsExiting) {
702 spin_unlock(&server->srv_lock);
703 return -ESHUTDOWN;
704 }
705
706 if (server->tcpStatus == CifsNeedReconnect) {
707 spin_unlock(&server->srv_lock);
708 cifs_reconnect(server, false);
709 return -ECONNABORTED;
710 }
711 spin_unlock(&server->srv_lock);
712
713 if (length == -ERESTARTSYS ||
714 length == -EAGAIN ||
715 length == -EINTR) {
716 /*
717 * Minimum sleep to prevent looping, allowing socket
718 * to clear and app threads to set tcpStatus
719 * CifsNeedReconnect if server hung.
720 */
721 usleep_range(1000, 2000);
722 length = 0;
723 continue;
724 }
725
726 if (length <= 0) {
727 cifs_dbg(FYI, "Received no data or error: %d\n", length);
728 cifs_reconnect(server, false);
729 return -ECONNABORTED;
730 }
731 }
732 return total_read;
733 }
734
735 int
736 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
737 unsigned int to_read)
738 {
739 struct msghdr smb_msg = {};
740 struct kvec iov = {.iov_base = buf, .iov_len = to_read};
741 iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
742
743 return cifs_readv_from_socket(server, &smb_msg);
744 }
745
746 ssize_t
747 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
748 {
749 struct msghdr smb_msg = {};
750
751 /*
752 * iov_iter_discard already sets smb_msg.type and count and iov_offset
753 * and cifs_readv_from_socket sets msg_control and msg_controllen
754 * so little to initialize in struct msghdr
755 */
756 iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
757
758 return cifs_readv_from_socket(server, &smb_msg);
759 }
760
761 int
762 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
763 unsigned int page_offset, unsigned int to_read)
764 {
765 struct msghdr smb_msg = {};
766 struct bio_vec bv;
767
768 bvec_set_page(&bv, page, to_read, page_offset);
769 iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
770 return cifs_readv_from_socket(server, &smb_msg);
771 }
772
773 int
774 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter,
775 unsigned int to_read)
776 {
777 struct msghdr smb_msg = { .msg_iter = *iter };
778 int ret;
779
780 iov_iter_truncate(&smb_msg.msg_iter, to_read);
781 ret = cifs_readv_from_socket(server, &smb_msg);
782 if (ret > 0)
783 iov_iter_advance(iter, ret);
784 return ret;
785 }
786
787 static bool
788 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
789 {
790 /*
791 * The first byte big endian of the length field,
792 * is actually not part of the length but the type
793 * with the most common, zero, as regular data.
794 */
795 switch (type) {
796 case RFC1002_SESSION_MESSAGE:
797 /* Regular SMB response */
798 return true;
799 case RFC1002_SESSION_KEEP_ALIVE:
800 cifs_dbg(FYI, "RFC 1002 session keep alive\n");
801 break;
802 case RFC1002_POSITIVE_SESSION_RESPONSE:
803 cifs_dbg(FYI, "RFC 1002 positive session response\n");
804 break;
805 case RFC1002_NEGATIVE_SESSION_RESPONSE:
806 /*
807 * We get this from Windows 98 instead of an error on
808 * SMB negprot response.
809 */
810 cifs_dbg(FYI, "RFC 1002 negative session response\n");
811 /* give server a second to clean up */
812 msleep(1000);
813 /*
814 * Always try 445 first on reconnect since we get NACK
815 * on some if we ever connected to port 139 (the NACK
816 * is since we do not begin with RFC1001 session
817 * initialize frame).
818 */
819 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
820 cifs_reconnect(server, true);
821 break;
822 default:
823 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
824 cifs_reconnect(server, true);
825 }
826
827 return false;
828 }
829
830 void
831 dequeue_mid(struct mid_q_entry *mid, bool malformed)
832 {
833 #ifdef CONFIG_CIFS_STATS2
834 mid->when_received = jiffies;
835 #endif
836 spin_lock(&mid->server->mid_lock);
837 if (!malformed)
838 mid->mid_state = MID_RESPONSE_RECEIVED;
839 else
840 mid->mid_state = MID_RESPONSE_MALFORMED;
841 /*
842 * Trying to handle/dequeue a mid after the send_recv()
843 * function has finished processing it is a bug.
844 */
845 if (mid->mid_flags & MID_DELETED) {
846 spin_unlock(&mid->server->mid_lock);
847 pr_warn_once("trying to dequeue a deleted mid\n");
848 } else {
849 list_del_init(&mid->qhead);
850 mid->mid_flags |= MID_DELETED;
851 spin_unlock(&mid->server->mid_lock);
852 }
853 }
854
855 static unsigned int
856 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
857 {
858 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
859
860 /*
861 * SMB1 does not use credits.
862 */
863 if (is_smb1(server))
864 return 0;
865
866 return le16_to_cpu(shdr->CreditRequest);
867 }
868
869 static void
870 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
871 char *buf, int malformed)
872 {
873 if (server->ops->check_trans2 &&
874 server->ops->check_trans2(mid, server, buf, malformed))
875 return;
876 mid->credits_received = smb2_get_credits_from_hdr(buf, server);
877 mid->resp_buf = buf;
878 mid->large_buf = server->large_buf;
879 /* Was previous buf put in mpx struct for multi-rsp? */
880 if (!mid->multiRsp) {
881 /* smb buffer will be freed by user thread */
882 if (server->large_buf)
883 server->bigbuf = NULL;
884 else
885 server->smallbuf = NULL;
886 }
887 dequeue_mid(mid, malformed);
888 }
889
890 int
891 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
892 {
893 bool srv_sign_required = server->sec_mode & server->vals->signing_required;
894 bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
895 bool mnt_sign_enabled;
896
897 /*
898 * Is signing required by mnt options? If not then check
899 * global_secflags to see if it is there.
900 */
901 if (!mnt_sign_required)
902 mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
903 CIFSSEC_MUST_SIGN);
904
905 /*
906 * If signing is required then it's automatically enabled too,
907 * otherwise, check to see if the secflags allow it.
908 */
909 mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
910 (global_secflags & CIFSSEC_MAY_SIGN);
911
912 /* If server requires signing, does client allow it? */
913 if (srv_sign_required) {
914 if (!mnt_sign_enabled) {
915 cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
916 return -EOPNOTSUPP;
917 }
918 server->sign = true;
919 }
920
921 /* If client requires signing, does server allow it? */
922 if (mnt_sign_required) {
923 if (!srv_sign_enabled) {
924 cifs_dbg(VFS, "Server does not support signing!\n");
925 return -EOPNOTSUPP;
926 }
927 server->sign = true;
928 }
929
930 if (cifs_rdma_enabled(server) && server->sign)
931 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
932
933 return 0;
934 }
935
936 static noinline_for_stack void
937 clean_demultiplex_info(struct TCP_Server_Info *server)
938 {
939 int length;
940
941 /* take it off the list, if it's not already */
942 spin_lock(&server->srv_lock);
943 list_del_init(&server->tcp_ses_list);
944 spin_unlock(&server->srv_lock);
945
946 cancel_delayed_work_sync(&server->echo);
947
948 spin_lock(&server->srv_lock);
949 server->tcpStatus = CifsExiting;
950 spin_unlock(&server->srv_lock);
951 wake_up_all(&server->response_q);
952
953 /* check if we have blocked requests that need to free */
954 spin_lock(&server->req_lock);
955 if (server->credits <= 0)
956 server->credits = 1;
957 spin_unlock(&server->req_lock);
958 /*
959 * Although there should not be any requests blocked on this queue it
960 * can not hurt to be paranoid and try to wake up requests that may
961 * haven been blocked when more than 50 at time were on the wire to the
962 * same server - they now will see the session is in exit state and get
963 * out of SendReceive.
964 */
965 wake_up_all(&server->request_q);
966 /* give those requests time to exit */
967 msleep(125);
968 if (cifs_rdma_enabled(server))
969 smbd_destroy(server);
970 if (server->ssocket) {
971 sock_release(server->ssocket);
972 server->ssocket = NULL;
973 }
974
975 if (!list_empty(&server->pending_mid_q)) {
976 struct list_head dispose_list;
977 struct mid_q_entry *mid_entry;
978 struct list_head *tmp, *tmp2;
979
980 INIT_LIST_HEAD(&dispose_list);
981 spin_lock(&server->mid_lock);
982 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
983 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
984 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
985 kref_get(&mid_entry->refcount);
986 mid_entry->mid_state = MID_SHUTDOWN;
987 list_move(&mid_entry->qhead, &dispose_list);
988 mid_entry->mid_flags |= MID_DELETED;
989 }
990 spin_unlock(&server->mid_lock);
991
992 /* now walk dispose list and issue callbacks */
993 list_for_each_safe(tmp, tmp2, &dispose_list) {
994 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
995 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
996 list_del_init(&mid_entry->qhead);
997 mid_entry->callback(mid_entry);
998 release_mid(mid_entry);
999 }
1000 /* 1/8th of sec is more than enough time for them to exit */
1001 msleep(125);
1002 }
1003
1004 if (!list_empty(&server->pending_mid_q)) {
1005 /*
1006 * mpx threads have not exited yet give them at least the smb
1007 * send timeout time for long ops.
1008 *
1009 * Due to delays on oplock break requests, we need to wait at
1010 * least 45 seconds before giving up on a request getting a
1011 * response and going ahead and killing cifsd.
1012 */
1013 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
1014 msleep(46000);
1015 /*
1016 * If threads still have not exited they are probably never
1017 * coming home not much else we can do but free the memory.
1018 */
1019 }
1020
1021 kfree(server->leaf_fullpath);
1022 kfree(server);
1023
1024 length = atomic_dec_return(&tcpSesAllocCount);
1025 if (length > 0)
1026 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1027 }
1028
1029 static int
1030 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1031 {
1032 int length;
1033 char *buf = server->smallbuf;
1034 unsigned int pdu_length = server->pdu_size;
1035
1036 /* make sure this will fit in a large buffer */
1037 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
1038 HEADER_PREAMBLE_SIZE(server)) {
1039 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
1040 cifs_reconnect(server, true);
1041 return -ECONNABORTED;
1042 }
1043
1044 /* switch to large buffer if too big for a small one */
1045 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
1046 server->large_buf = true;
1047 memcpy(server->bigbuf, buf, server->total_read);
1048 buf = server->bigbuf;
1049 }
1050
1051 /* now read the rest */
1052 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
1053 pdu_length - MID_HEADER_SIZE(server));
1054
1055 if (length < 0)
1056 return length;
1057 server->total_read += length;
1058
1059 dump_smb(buf, server->total_read);
1060
1061 return cifs_handle_standard(server, mid);
1062 }
1063
1064 int
1065 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1066 {
1067 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
1068 int rc;
1069
1070 /*
1071 * We know that we received enough to get to the MID as we
1072 * checked the pdu_length earlier. Now check to see
1073 * if the rest of the header is OK.
1074 *
1075 * 48 bytes is enough to display the header and a little bit
1076 * into the payload for debugging purposes.
1077 */
1078 rc = server->ops->check_message(buf, server->total_read, server);
1079 if (rc)
1080 cifs_dump_mem("Bad SMB: ", buf,
1081 min_t(unsigned int, server->total_read, 48));
1082
1083 if (server->ops->is_session_expired &&
1084 server->ops->is_session_expired(buf)) {
1085 cifs_reconnect(server, true);
1086 return -1;
1087 }
1088
1089 if (server->ops->is_status_pending &&
1090 server->ops->is_status_pending(buf, server))
1091 return -1;
1092
1093 if (!mid)
1094 return rc;
1095
1096 handle_mid(mid, server, buf, rc);
1097 return 0;
1098 }
1099
1100 static void
1101 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
1102 {
1103 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
1104 int scredits, in_flight;
1105
1106 /*
1107 * SMB1 does not use credits.
1108 */
1109 if (is_smb1(server))
1110 return;
1111
1112 if (shdr->CreditRequest) {
1113 spin_lock(&server->req_lock);
1114 server->credits += le16_to_cpu(shdr->CreditRequest);
1115 scredits = server->credits;
1116 in_flight = server->in_flight;
1117 spin_unlock(&server->req_lock);
1118 wake_up(&server->request_q);
1119
1120 trace_smb3_hdr_credits(server->CurrentMid,
1121 server->conn_id, server->hostname, scredits,
1122 le16_to_cpu(shdr->CreditRequest), in_flight);
1123 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1124 __func__, le16_to_cpu(shdr->CreditRequest),
1125 scredits);
1126 }
1127 }
1128
1129
1130 static int
1131 cifs_demultiplex_thread(void *p)
1132 {
1133 int i, num_mids, length;
1134 struct TCP_Server_Info *server = p;
1135 unsigned int pdu_length;
1136 unsigned int next_offset;
1137 char *buf = NULL;
1138 struct task_struct *task_to_wake = NULL;
1139 struct mid_q_entry *mids[MAX_COMPOUND];
1140 char *bufs[MAX_COMPOUND];
1141 unsigned int noreclaim_flag, num_io_timeout = 0;
1142 bool pending_reconnect = false;
1143
1144 noreclaim_flag = memalloc_noreclaim_save();
1145 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1146
1147 length = atomic_inc_return(&tcpSesAllocCount);
1148 if (length > 1)
1149 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1150
1151 set_freezable();
1152 allow_kernel_signal(SIGKILL);
1153 while (server->tcpStatus != CifsExiting) {
1154 if (try_to_freeze())
1155 continue;
1156
1157 if (!allocate_buffers(server))
1158 continue;
1159
1160 server->large_buf = false;
1161 buf = server->smallbuf;
1162 pdu_length = 4; /* enough to get RFC1001 header */
1163
1164 length = cifs_read_from_socket(server, buf, pdu_length);
1165 if (length < 0)
1166 continue;
1167
1168 if (is_smb1(server))
1169 server->total_read = length;
1170 else
1171 server->total_read = 0;
1172
1173 /*
1174 * The right amount was read from socket - 4 bytes,
1175 * so we can now interpret the length field.
1176 */
1177 pdu_length = get_rfc1002_length(buf);
1178
1179 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1180 if (!is_smb_response(server, buf[0]))
1181 continue;
1182
1183 pending_reconnect = false;
1184 next_pdu:
1185 server->pdu_size = pdu_length;
1186
1187 /* make sure we have enough to get to the MID */
1188 if (server->pdu_size < MID_HEADER_SIZE(server)) {
1189 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1190 server->pdu_size);
1191 cifs_reconnect(server, true);
1192 continue;
1193 }
1194
1195 /* read down to the MID */
1196 length = cifs_read_from_socket(server,
1197 buf + HEADER_PREAMBLE_SIZE(server),
1198 MID_HEADER_SIZE(server));
1199 if (length < 0)
1200 continue;
1201 server->total_read += length;
1202
1203 if (server->ops->next_header) {
1204 if (server->ops->next_header(server, buf, &next_offset)) {
1205 cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
1206 __func__, next_offset);
1207 cifs_reconnect(server, true);
1208 continue;
1209 }
1210 if (next_offset)
1211 server->pdu_size = next_offset;
1212 }
1213
1214 memset(mids, 0, sizeof(mids));
1215 memset(bufs, 0, sizeof(bufs));
1216 num_mids = 0;
1217
1218 if (server->ops->is_transform_hdr &&
1219 server->ops->receive_transform &&
1220 server->ops->is_transform_hdr(buf)) {
1221 length = server->ops->receive_transform(server,
1222 mids,
1223 bufs,
1224 &num_mids);
1225 } else {
1226 mids[0] = server->ops->find_mid(server, buf);
1227 bufs[0] = buf;
1228 num_mids = 1;
1229
1230 if (!mids[0] || !mids[0]->receive)
1231 length = standard_receive3(server, mids[0]);
1232 else
1233 length = mids[0]->receive(server, mids[0]);
1234 }
1235
1236 if (length < 0) {
1237 for (i = 0; i < num_mids; i++)
1238 if (mids[i])
1239 release_mid(mids[i]);
1240 continue;
1241 }
1242
1243 if (server->ops->is_status_io_timeout &&
1244 server->ops->is_status_io_timeout(buf)) {
1245 num_io_timeout++;
1246 if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
1247 cifs_server_dbg(VFS,
1248 "Number of request timeouts exceeded %d. Reconnecting",
1249 MAX_STATUS_IO_TIMEOUT);
1250
1251 pending_reconnect = true;
1252 num_io_timeout = 0;
1253 }
1254 }
1255
1256 server->lstrp = jiffies;
1257
1258 for (i = 0; i < num_mids; i++) {
1259 if (mids[i] != NULL) {
1260 mids[i]->resp_buf_size = server->pdu_size;
1261
1262 if (bufs[i] != NULL) {
1263 if (server->ops->is_network_name_deleted &&
1264 server->ops->is_network_name_deleted(bufs[i],
1265 server)) {
1266 cifs_server_dbg(FYI,
1267 "Share deleted. Reconnect needed");
1268 }
1269 }
1270
1271 if (!mids[i]->multiRsp || mids[i]->multiEnd)
1272 mids[i]->callback(mids[i]);
1273
1274 release_mid(mids[i]);
1275 } else if (server->ops->is_oplock_break &&
1276 server->ops->is_oplock_break(bufs[i],
1277 server)) {
1278 smb2_add_credits_from_hdr(bufs[i], server);
1279 cifs_dbg(FYI, "Received oplock break\n");
1280 } else {
1281 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1282 atomic_read(&mid_count));
1283 cifs_dump_mem("Received Data is: ", bufs[i],
1284 HEADER_SIZE(server));
1285 smb2_add_credits_from_hdr(bufs[i], server);
1286 #ifdef CONFIG_CIFS_DEBUG2
1287 if (server->ops->dump_detail)
1288 server->ops->dump_detail(bufs[i],
1289 server);
1290 cifs_dump_mids(server);
1291 #endif /* CIFS_DEBUG2 */
1292 }
1293 }
1294
1295 if (pdu_length > server->pdu_size) {
1296 if (!allocate_buffers(server))
1297 continue;
1298 pdu_length -= server->pdu_size;
1299 server->total_read = 0;
1300 server->large_buf = false;
1301 buf = server->smallbuf;
1302 goto next_pdu;
1303 }
1304
1305 /* do this reconnect at the very end after processing all MIDs */
1306 if (pending_reconnect)
1307 cifs_reconnect(server, true);
1308
1309 } /* end while !EXITING */
1310
1311 /* buffer usually freed in free_mid - need to free it here on exit */
1312 cifs_buf_release(server->bigbuf);
1313 if (server->smallbuf) /* no sense logging a debug message if NULL */
1314 cifs_small_buf_release(server->smallbuf);
1315
1316 task_to_wake = xchg(&server->tsk, NULL);
1317 clean_demultiplex_info(server);
1318
1319 /* if server->tsk was NULL then wait for a signal before exiting */
1320 if (!task_to_wake) {
1321 set_current_state(TASK_INTERRUPTIBLE);
1322 while (!signal_pending(current)) {
1323 schedule();
1324 set_current_state(TASK_INTERRUPTIBLE);
1325 }
1326 set_current_state(TASK_RUNNING);
1327 }
1328
1329 memalloc_noreclaim_restore(noreclaim_flag);
1330 module_put_and_kthread_exit(0);
1331 }
1332
1333 int
1334 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
1335 {
1336 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1337 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1338 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1339 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1340
1341 switch (srcaddr->sa_family) {
1342 case AF_UNSPEC:
1343 switch (rhs->sa_family) {
1344 case AF_UNSPEC:
1345 return 0;
1346 case AF_INET:
1347 case AF_INET6:
1348 return 1;
1349 default:
1350 return -1;
1351 }
1352 case AF_INET: {
1353 switch (rhs->sa_family) {
1354 case AF_UNSPEC:
1355 return -1;
1356 case AF_INET:
1357 return memcmp(saddr4, vaddr4,
1358 sizeof(struct sockaddr_in));
1359 case AF_INET6:
1360 return 1;
1361 default:
1362 return -1;
1363 }
1364 }
1365 case AF_INET6: {
1366 switch (rhs->sa_family) {
1367 case AF_UNSPEC:
1368 case AF_INET:
1369 return -1;
1370 case AF_INET6:
1371 return memcmp(saddr6,
1372 vaddr6,
1373 sizeof(struct sockaddr_in6));
1374 default:
1375 return -1;
1376 }
1377 }
1378 default:
1379 return -1; /* don't expect to be here */
1380 }
1381 }
1382
1383 /*
1384 * Returns true if srcaddr isn't specified and rhs isn't specified, or
1385 * if srcaddr is specified and matches the IP address of the rhs argument
1386 */
1387 bool
1388 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1389 {
1390 switch (srcaddr->sa_family) {
1391 case AF_UNSPEC:
1392 return (rhs->sa_family == AF_UNSPEC);
1393 case AF_INET: {
1394 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1395 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1396 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1397 }
1398 case AF_INET6: {
1399 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1400 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1401 return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr)
1402 && saddr6->sin6_scope_id == vaddr6->sin6_scope_id);
1403 }
1404 default:
1405 WARN_ON(1);
1406 return false; /* don't expect to be here */
1407 }
1408 }
1409
1410 /*
1411 * If no port is specified in addr structure, we try to match with 445 port
1412 * and if it fails - with 139 ports. It should be called only if address
1413 * families of server and addr are equal.
1414 */
1415 static bool
1416 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1417 {
1418 __be16 port, *sport;
1419
1420 /* SMBDirect manages its own ports, don't match it here */
1421 if (server->rdma)
1422 return true;
1423
1424 switch (addr->sa_family) {
1425 case AF_INET:
1426 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1427 port = ((struct sockaddr_in *) addr)->sin_port;
1428 break;
1429 case AF_INET6:
1430 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1431 port = ((struct sockaddr_in6 *) addr)->sin6_port;
1432 break;
1433 default:
1434 WARN_ON(1);
1435 return false;
1436 }
1437
1438 if (!port) {
1439 port = htons(CIFS_PORT);
1440 if (port == *sport)
1441 return true;
1442
1443 port = htons(RFC1001_PORT);
1444 }
1445
1446 return port == *sport;
1447 }
1448
1449 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr)
1450 {
1451 if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr))
1452 return false;
1453
1454 return true;
1455 }
1456
1457 static bool
1458 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1459 {
1460 /*
1461 * The select_sectype function should either return the ctx->sectype
1462 * that was specified, or "Unspecified" if that sectype was not
1463 * compatible with the given NEGOTIATE request.
1464 */
1465 if (server->ops->select_sectype(server, ctx->sectype)
1466 == Unspecified)
1467 return false;
1468
1469 /*
1470 * Now check if signing mode is acceptable. No need to check
1471 * global_secflags at this point since if MUST_SIGN is set then
1472 * the server->sign had better be too.
1473 */
1474 if (ctx->sign && !server->sign)
1475 return false;
1476
1477 return true;
1478 }
1479
1480 /* this function must be called with srv_lock held */
1481 static int match_server(struct TCP_Server_Info *server,
1482 struct smb3_fs_context *ctx,
1483 bool match_super)
1484 {
1485 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1486
1487 lockdep_assert_held(&server->srv_lock);
1488
1489 if (ctx->nosharesock)
1490 return 0;
1491
1492 /* this server does not share socket */
1493 if (server->nosharesock)
1494 return 0;
1495
1496 /* If multidialect negotiation see if existing sessions match one */
1497 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1498 if (server->vals->protocol_id < SMB30_PROT_ID)
1499 return 0;
1500 } else if (strcmp(ctx->vals->version_string,
1501 SMBDEFAULT_VERSION_STRING) == 0) {
1502 if (server->vals->protocol_id < SMB21_PROT_ID)
1503 return 0;
1504 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1505 return 0;
1506
1507 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1508 return 0;
1509
1510 if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr,
1511 (struct sockaddr *)&server->srcaddr))
1512 return 0;
1513 /*
1514 * When matching cifs.ko superblocks (@match_super == true), we can't
1515 * really match either @server->leaf_fullpath or @server->dstaddr
1516 * directly since this @server might belong to a completely different
1517 * server -- in case of domain-based DFS referrals or DFS links -- as
1518 * provided earlier by mount(2) through 'source' and 'ip' options.
1519 *
1520 * Otherwise, match the DFS referral in @server->leaf_fullpath or the
1521 * destination address in @server->dstaddr.
1522 *
1523 * When using 'nodfs' mount option, we avoid sharing it with DFS
1524 * connections as they might failover.
1525 */
1526 if (!match_super) {
1527 if (!ctx->nodfs) {
1528 if (server->leaf_fullpath) {
1529 if (!ctx->leaf_fullpath ||
1530 strcasecmp(server->leaf_fullpath,
1531 ctx->leaf_fullpath))
1532 return 0;
1533 } else if (ctx->leaf_fullpath) {
1534 return 0;
1535 }
1536 } else if (server->leaf_fullpath) {
1537 return 0;
1538 }
1539 }
1540
1541 /*
1542 * Match for a regular connection (address/hostname/port) which has no
1543 * DFS referrals set.
1544 */
1545 if (!server->leaf_fullpath &&
1546 (strcasecmp(server->hostname, ctx->server_hostname) ||
1547 !match_server_address(server, addr) ||
1548 !match_port(server, addr)))
1549 return 0;
1550
1551 if (!match_security(server, ctx))
1552 return 0;
1553
1554 if (server->echo_interval != ctx->echo_interval * HZ)
1555 return 0;
1556
1557 if (server->rdma != ctx->rdma)
1558 return 0;
1559
1560 if (server->ignore_signature != ctx->ignore_signature)
1561 return 0;
1562
1563 if (server->min_offload != ctx->min_offload)
1564 return 0;
1565
1566 return 1;
1567 }
1568
1569 struct TCP_Server_Info *
1570 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1571 {
1572 struct TCP_Server_Info *server;
1573
1574 spin_lock(&cifs_tcp_ses_lock);
1575 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1576 spin_lock(&server->srv_lock);
1577 /*
1578 * Skip ses channels since they're only handled in lower layers
1579 * (e.g. cifs_send_recv).
1580 */
1581 if (SERVER_IS_CHAN(server) ||
1582 !match_server(server, ctx, false)) {
1583 spin_unlock(&server->srv_lock);
1584 continue;
1585 }
1586 spin_unlock(&server->srv_lock);
1587
1588 ++server->srv_count;
1589 spin_unlock(&cifs_tcp_ses_lock);
1590 cifs_dbg(FYI, "Existing tcp session with server found\n");
1591 return server;
1592 }
1593 spin_unlock(&cifs_tcp_ses_lock);
1594 return NULL;
1595 }
1596
1597 void
1598 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1599 {
1600 struct task_struct *task;
1601
1602 spin_lock(&cifs_tcp_ses_lock);
1603 if (--server->srv_count > 0) {
1604 spin_unlock(&cifs_tcp_ses_lock);
1605 return;
1606 }
1607
1608 /* srv_count can never go negative */
1609 WARN_ON(server->srv_count < 0);
1610
1611 put_net(cifs_net_ns(server));
1612
1613 list_del_init(&server->tcp_ses_list);
1614 spin_unlock(&cifs_tcp_ses_lock);
1615
1616 cancel_delayed_work_sync(&server->echo);
1617
1618 if (from_reconnect)
1619 /*
1620 * Avoid deadlock here: reconnect work calls
1621 * cifs_put_tcp_session() at its end. Need to be sure
1622 * that reconnect work does nothing with server pointer after
1623 * that step.
1624 */
1625 cancel_delayed_work(&server->reconnect);
1626 else
1627 cancel_delayed_work_sync(&server->reconnect);
1628
1629 /* For secondary channels, we pick up ref-count on the primary server */
1630 if (SERVER_IS_CHAN(server))
1631 cifs_put_tcp_session(server->primary_server, from_reconnect);
1632
1633 spin_lock(&server->srv_lock);
1634 server->tcpStatus = CifsExiting;
1635 spin_unlock(&server->srv_lock);
1636
1637 cifs_crypto_secmech_release(server);
1638
1639 kfree_sensitive(server->session_key.response);
1640 server->session_key.response = NULL;
1641 server->session_key.len = 0;
1642 kfree(server->hostname);
1643 server->hostname = NULL;
1644
1645 task = xchg(&server->tsk, NULL);
1646 if (task)
1647 send_sig(SIGKILL, task, 1);
1648 }
1649
1650 struct TCP_Server_Info *
1651 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1652 struct TCP_Server_Info *primary_server)
1653 {
1654 struct TCP_Server_Info *tcp_ses = NULL;
1655 int rc;
1656
1657 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1658
1659 /* see if we already have a matching tcp_ses */
1660 tcp_ses = cifs_find_tcp_session(ctx);
1661 if (tcp_ses)
1662 return tcp_ses;
1663
1664 tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
1665 if (!tcp_ses) {
1666 rc = -ENOMEM;
1667 goto out_err;
1668 }
1669
1670 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1671 if (!tcp_ses->hostname) {
1672 rc = -ENOMEM;
1673 goto out_err;
1674 }
1675
1676 if (ctx->leaf_fullpath) {
1677 tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL);
1678 if (!tcp_ses->leaf_fullpath) {
1679 rc = -ENOMEM;
1680 goto out_err;
1681 }
1682 }
1683
1684 if (ctx->nosharesock)
1685 tcp_ses->nosharesock = true;
1686
1687 tcp_ses->ops = ctx->ops;
1688 tcp_ses->vals = ctx->vals;
1689 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1690
1691 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1692 tcp_ses->noblockcnt = ctx->rootfs;
1693 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1694 tcp_ses->noautotune = ctx->noautotune;
1695 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1696 tcp_ses->rdma = ctx->rdma;
1697 tcp_ses->in_flight = 0;
1698 tcp_ses->max_in_flight = 0;
1699 tcp_ses->credits = 1;
1700 if (primary_server) {
1701 spin_lock(&cifs_tcp_ses_lock);
1702 ++primary_server->srv_count;
1703 spin_unlock(&cifs_tcp_ses_lock);
1704 tcp_ses->primary_server = primary_server;
1705 }
1706 init_waitqueue_head(&tcp_ses->response_q);
1707 init_waitqueue_head(&tcp_ses->request_q);
1708 INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1709 mutex_init(&tcp_ses->_srv_mutex);
1710 memcpy(tcp_ses->workstation_RFC1001_name,
1711 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1712 memcpy(tcp_ses->server_RFC1001_name,
1713 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1714 tcp_ses->session_estab = false;
1715 tcp_ses->sequence_number = 0;
1716 tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
1717 tcp_ses->reconnect_instance = 1;
1718 tcp_ses->lstrp = jiffies;
1719 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
1720 spin_lock_init(&tcp_ses->req_lock);
1721 spin_lock_init(&tcp_ses->srv_lock);
1722 spin_lock_init(&tcp_ses->mid_lock);
1723 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1724 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1725 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1726 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1727 mutex_init(&tcp_ses->reconnect_mutex);
1728 #ifdef CONFIG_CIFS_DFS_UPCALL
1729 mutex_init(&tcp_ses->refpath_lock);
1730 #endif
1731 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1732 sizeof(tcp_ses->srcaddr));
1733 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1734 sizeof(tcp_ses->dstaddr));
1735 if (ctx->use_client_guid)
1736 memcpy(tcp_ses->client_guid, ctx->client_guid,
1737 SMB2_CLIENT_GUID_SIZE);
1738 else
1739 generate_random_uuid(tcp_ses->client_guid);
1740 /*
1741 * at this point we are the only ones with the pointer
1742 * to the struct since the kernel thread not created yet
1743 * no need to spinlock this init of tcpStatus or srv_count
1744 */
1745 tcp_ses->tcpStatus = CifsNew;
1746 ++tcp_ses->srv_count;
1747
1748 if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
1749 ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
1750 tcp_ses->echo_interval = ctx->echo_interval * HZ;
1751 else
1752 tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
1753 if (tcp_ses->rdma) {
1754 #ifndef CONFIG_CIFS_SMB_DIRECT
1755 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1756 rc = -ENOENT;
1757 goto out_err_crypto_release;
1758 #endif
1759 tcp_ses->smbd_conn = smbd_get_connection(
1760 tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1761 if (tcp_ses->smbd_conn) {
1762 cifs_dbg(VFS, "RDMA transport established\n");
1763 rc = 0;
1764 goto smbd_connected;
1765 } else {
1766 rc = -ENOENT;
1767 goto out_err_crypto_release;
1768 }
1769 }
1770 rc = ip_connect(tcp_ses);
1771 if (rc < 0) {
1772 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1773 goto out_err_crypto_release;
1774 }
1775 smbd_connected:
1776 /*
1777 * since we're in a cifs function already, we know that
1778 * this will succeed. No need for try_module_get().
1779 */
1780 __module_get(THIS_MODULE);
1781 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1782 tcp_ses, "cifsd");
1783 if (IS_ERR(tcp_ses->tsk)) {
1784 rc = PTR_ERR(tcp_ses->tsk);
1785 cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1786 module_put(THIS_MODULE);
1787 goto out_err_crypto_release;
1788 }
1789 tcp_ses->min_offload = ctx->min_offload;
1790 /*
1791 * at this point we are the only ones with the pointer
1792 * to the struct since the kernel thread not created yet
1793 * no need to spinlock this update of tcpStatus
1794 */
1795 spin_lock(&tcp_ses->srv_lock);
1796 tcp_ses->tcpStatus = CifsNeedNegotiate;
1797 spin_unlock(&tcp_ses->srv_lock);
1798
1799 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1800 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1801 else
1802 tcp_ses->max_credits = ctx->max_credits;
1803
1804 tcp_ses->nr_targets = 1;
1805 tcp_ses->ignore_signature = ctx->ignore_signature;
1806 /* thread spawned, put it on the list */
1807 spin_lock(&cifs_tcp_ses_lock);
1808 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1809 spin_unlock(&cifs_tcp_ses_lock);
1810
1811 /* queue echo request delayed work */
1812 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1813
1814 return tcp_ses;
1815
1816 out_err_crypto_release:
1817 cifs_crypto_secmech_release(tcp_ses);
1818
1819 put_net(cifs_net_ns(tcp_ses));
1820
1821 out_err:
1822 if (tcp_ses) {
1823 if (SERVER_IS_CHAN(tcp_ses))
1824 cifs_put_tcp_session(tcp_ses->primary_server, false);
1825 kfree(tcp_ses->hostname);
1826 kfree(tcp_ses->leaf_fullpath);
1827 if (tcp_ses->ssocket)
1828 sock_release(tcp_ses->ssocket);
1829 kfree(tcp_ses);
1830 }
1831 return ERR_PTR(rc);
1832 }
1833
1834 /* this function must be called with ses_lock and chan_lock held */
1835 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1836 {
1837 if (ctx->sectype != Unspecified &&
1838 ctx->sectype != ses->sectype)
1839 return 0;
1840
1841 /*
1842 * If an existing session is limited to less channels than
1843 * requested, it should not be reused
1844 */
1845 if (ses->chan_max < ctx->max_channels)
1846 return 0;
1847
1848 switch (ses->sectype) {
1849 case Kerberos:
1850 if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1851 return 0;
1852 break;
1853 default:
1854 /* NULL username means anonymous session */
1855 if (ses->user_name == NULL) {
1856 if (!ctx->nullauth)
1857 return 0;
1858 break;
1859 }
1860
1861 /* anything else takes username/password */
1862 if (strncmp(ses->user_name,
1863 ctx->username ? ctx->username : "",
1864 CIFS_MAX_USERNAME_LEN))
1865 return 0;
1866 if ((ctx->username && strlen(ctx->username) != 0) &&
1867 ses->password != NULL &&
1868 strncmp(ses->password,
1869 ctx->password ? ctx->password : "",
1870 CIFS_MAX_PASSWORD_LEN))
1871 return 0;
1872 }
1873
1874 if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
1875 return 0;
1876
1877 return 1;
1878 }
1879
1880 /**
1881 * cifs_setup_ipc - helper to setup the IPC tcon for the session
1882 * @ses: smb session to issue the request on
1883 * @ctx: the superblock configuration context to use for building the
1884 * new tree connection for the IPC (interprocess communication RPC)
1885 *
1886 * A new IPC connection is made and stored in the session
1887 * tcon_ipc. The IPC tcon has the same lifetime as the session.
1888 */
1889 static int
1890 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1891 {
1892 int rc = 0, xid;
1893 struct cifs_tcon *tcon;
1894 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
1895 bool seal = false;
1896 struct TCP_Server_Info *server = ses->server;
1897
1898 /*
1899 * If the mount request that resulted in the creation of the
1900 * session requires encryption, force IPC to be encrypted too.
1901 */
1902 if (ctx->seal) {
1903 if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
1904 seal = true;
1905 else {
1906 cifs_server_dbg(VFS,
1907 "IPC: server doesn't support encryption\n");
1908 return -EOPNOTSUPP;
1909 }
1910 }
1911
1912 /* no need to setup directory caching on IPC share, so pass in false */
1913 tcon = tcon_info_alloc(false);
1914 if (tcon == NULL)
1915 return -ENOMEM;
1916
1917 spin_lock(&server->srv_lock);
1918 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
1919 spin_unlock(&server->srv_lock);
1920
1921 xid = get_xid();
1922 tcon->ses = ses;
1923 tcon->ipc = true;
1924 tcon->seal = seal;
1925 rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
1926 free_xid(xid);
1927
1928 if (rc) {
1929 cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
1930 tconInfoFree(tcon);
1931 goto out;
1932 }
1933
1934 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
1935
1936 spin_lock(&tcon->tc_lock);
1937 tcon->status = TID_GOOD;
1938 spin_unlock(&tcon->tc_lock);
1939 ses->tcon_ipc = tcon;
1940 out:
1941 return rc;
1942 }
1943
1944 /**
1945 * cifs_free_ipc - helper to release the session IPC tcon
1946 * @ses: smb session to unmount the IPC from
1947 *
1948 * Needs to be called everytime a session is destroyed.
1949 *
1950 * On session close, the IPC is closed and the server must release all tcons of the session.
1951 * No need to send a tree disconnect here.
1952 *
1953 * Besides, it will make the server to not close durable and resilient files on session close, as
1954 * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
1955 */
1956 static int
1957 cifs_free_ipc(struct cifs_ses *ses)
1958 {
1959 struct cifs_tcon *tcon = ses->tcon_ipc;
1960
1961 if (tcon == NULL)
1962 return 0;
1963
1964 tconInfoFree(tcon);
1965 ses->tcon_ipc = NULL;
1966 return 0;
1967 }
1968
1969 static struct cifs_ses *
1970 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1971 {
1972 struct cifs_ses *ses, *ret = NULL;
1973
1974 spin_lock(&cifs_tcp_ses_lock);
1975 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1976 spin_lock(&ses->ses_lock);
1977 if (ses->ses_status == SES_EXITING) {
1978 spin_unlock(&ses->ses_lock);
1979 continue;
1980 }
1981 spin_lock(&ses->chan_lock);
1982 if (match_session(ses, ctx)) {
1983 spin_unlock(&ses->chan_lock);
1984 spin_unlock(&ses->ses_lock);
1985 ret = ses;
1986 break;
1987 }
1988 spin_unlock(&ses->chan_lock);
1989 spin_unlock(&ses->ses_lock);
1990 }
1991 if (ret)
1992 cifs_smb_ses_inc_refcount(ret);
1993 spin_unlock(&cifs_tcp_ses_lock);
1994 return ret;
1995 }
1996
1997 void __cifs_put_smb_ses(struct cifs_ses *ses)
1998 {
1999 struct TCP_Server_Info *server = ses->server;
2000 unsigned int xid;
2001 size_t i;
2002 int rc;
2003
2004 spin_lock(&ses->ses_lock);
2005 if (ses->ses_status == SES_EXITING) {
2006 spin_unlock(&ses->ses_lock);
2007 return;
2008 }
2009 spin_unlock(&ses->ses_lock);
2010
2011 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
2012 cifs_dbg(FYI,
2013 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
2014
2015 spin_lock(&cifs_tcp_ses_lock);
2016 if (--ses->ses_count > 0) {
2017 spin_unlock(&cifs_tcp_ses_lock);
2018 return;
2019 }
2020 spin_lock(&ses->ses_lock);
2021 if (ses->ses_status == SES_GOOD)
2022 ses->ses_status = SES_EXITING;
2023 spin_unlock(&ses->ses_lock);
2024 spin_unlock(&cifs_tcp_ses_lock);
2025
2026 /* ses_count can never go negative */
2027 WARN_ON(ses->ses_count < 0);
2028
2029 spin_lock(&ses->ses_lock);
2030 if (ses->ses_status == SES_EXITING && server->ops->logoff) {
2031 spin_unlock(&ses->ses_lock);
2032 cifs_free_ipc(ses);
2033 xid = get_xid();
2034 rc = server->ops->logoff(xid, ses);
2035 if (rc)
2036 cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
2037 __func__, rc);
2038 _free_xid(xid);
2039 } else {
2040 spin_unlock(&ses->ses_lock);
2041 cifs_free_ipc(ses);
2042 }
2043
2044 spin_lock(&cifs_tcp_ses_lock);
2045 list_del_init(&ses->smb_ses_list);
2046 spin_unlock(&cifs_tcp_ses_lock);
2047
2048 /* close any extra channels */
2049 for (i = 1; i < ses->chan_count; i++) {
2050 if (ses->chans[i].iface) {
2051 kref_put(&ses->chans[i].iface->refcount, release_iface);
2052 ses->chans[i].iface = NULL;
2053 }
2054 cifs_put_tcp_session(ses->chans[i].server, 0);
2055 ses->chans[i].server = NULL;
2056 }
2057
2058 /* we now account for primary channel in iface->refcount */
2059 if (ses->chans[0].iface) {
2060 kref_put(&ses->chans[0].iface->refcount, release_iface);
2061 ses->chans[0].server = NULL;
2062 }
2063
2064 sesInfoFree(ses);
2065 cifs_put_tcp_session(server, 0);
2066 }
2067
2068 #ifdef CONFIG_KEYS
2069
2070 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2071 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2072
2073 /* Populate username and pw fields from keyring if possible */
2074 static int
2075 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
2076 {
2077 int rc = 0;
2078 int is_domain = 0;
2079 const char *delim, *payload;
2080 char *desc;
2081 ssize_t len;
2082 struct key *key;
2083 struct TCP_Server_Info *server = ses->server;
2084 struct sockaddr_in *sa;
2085 struct sockaddr_in6 *sa6;
2086 const struct user_key_payload *upayload;
2087
2088 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
2089 if (!desc)
2090 return -ENOMEM;
2091
2092 /* try to find an address key first */
2093 switch (server->dstaddr.ss_family) {
2094 case AF_INET:
2095 sa = (struct sockaddr_in *)&server->dstaddr;
2096 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2097 break;
2098 case AF_INET6:
2099 sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2100 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2101 break;
2102 default:
2103 cifs_dbg(FYI, "Bad ss_family (%hu)\n",
2104 server->dstaddr.ss_family);
2105 rc = -EINVAL;
2106 goto out_err;
2107 }
2108
2109 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2110 key = request_key(&key_type_logon, desc, "");
2111 if (IS_ERR(key)) {
2112 if (!ses->domainName) {
2113 cifs_dbg(FYI, "domainName is NULL\n");
2114 rc = PTR_ERR(key);
2115 goto out_err;
2116 }
2117
2118 /* didn't work, try to find a domain key */
2119 sprintf(desc, "cifs:d:%s", ses->domainName);
2120 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2121 key = request_key(&key_type_logon, desc, "");
2122 if (IS_ERR(key)) {
2123 rc = PTR_ERR(key);
2124 goto out_err;
2125 }
2126 is_domain = 1;
2127 }
2128
2129 down_read(&key->sem);
2130 upayload = user_key_payload_locked(key);
2131 if (IS_ERR_OR_NULL(upayload)) {
2132 rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2133 goto out_key_put;
2134 }
2135
2136 /* find first : in payload */
2137 payload = upayload->data;
2138 delim = strnchr(payload, upayload->datalen, ':');
2139 cifs_dbg(FYI, "payload=%s\n", payload);
2140 if (!delim) {
2141 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
2142 upayload->datalen);
2143 rc = -EINVAL;
2144 goto out_key_put;
2145 }
2146
2147 len = delim - payload;
2148 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
2149 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
2150 len);
2151 rc = -EINVAL;
2152 goto out_key_put;
2153 }
2154
2155 ctx->username = kstrndup(payload, len, GFP_KERNEL);
2156 if (!ctx->username) {
2157 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
2158 len);
2159 rc = -ENOMEM;
2160 goto out_key_put;
2161 }
2162 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
2163
2164 len = key->datalen - (len + 1);
2165 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
2166 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
2167 rc = -EINVAL;
2168 kfree(ctx->username);
2169 ctx->username = NULL;
2170 goto out_key_put;
2171 }
2172
2173 ++delim;
2174 ctx->password = kstrndup(delim, len, GFP_KERNEL);
2175 if (!ctx->password) {
2176 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
2177 len);
2178 rc = -ENOMEM;
2179 kfree(ctx->username);
2180 ctx->username = NULL;
2181 goto out_key_put;
2182 }
2183
2184 /*
2185 * If we have a domain key then we must set the domainName in the
2186 * for the request.
2187 */
2188 if (is_domain && ses->domainName) {
2189 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
2190 if (!ctx->domainname) {
2191 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
2192 len);
2193 rc = -ENOMEM;
2194 kfree(ctx->username);
2195 ctx->username = NULL;
2196 kfree_sensitive(ctx->password);
2197 ctx->password = NULL;
2198 goto out_key_put;
2199 }
2200 }
2201
2202 strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
2203
2204 out_key_put:
2205 up_read(&key->sem);
2206 key_put(key);
2207 out_err:
2208 kfree(desc);
2209 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2210 return rc;
2211 }
2212 #else /* ! CONFIG_KEYS */
2213 static inline int
2214 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
2215 struct cifs_ses *ses __attribute__((unused)))
2216 {
2217 return -ENOSYS;
2218 }
2219 #endif /* CONFIG_KEYS */
2220
2221 /**
2222 * cifs_get_smb_ses - get a session matching @ctx data from @server
2223 * @server: server to setup the session to
2224 * @ctx: superblock configuration context to use to setup the session
2225 *
2226 * This function assumes it is being called from cifs_mount() where we
2227 * already got a server reference (server refcount +1). See
2228 * cifs_get_tcon() for refcount explanations.
2229 */
2230 struct cifs_ses *
2231 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2232 {
2233 int rc = 0;
2234 unsigned int xid;
2235 struct cifs_ses *ses;
2236 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2237 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2238
2239 xid = get_xid();
2240
2241 ses = cifs_find_smb_ses(server, ctx);
2242 if (ses) {
2243 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2244 ses->ses_status);
2245
2246 spin_lock(&ses->chan_lock);
2247 if (cifs_chan_needs_reconnect(ses, server)) {
2248 spin_unlock(&ses->chan_lock);
2249 cifs_dbg(FYI, "Session needs reconnect\n");
2250
2251 mutex_lock(&ses->session_mutex);
2252 rc = cifs_negotiate_protocol(xid, ses, server);
2253 if (rc) {
2254 mutex_unlock(&ses->session_mutex);
2255 /* problem -- put our ses reference */
2256 cifs_put_smb_ses(ses);
2257 free_xid(xid);
2258 return ERR_PTR(rc);
2259 }
2260
2261 rc = cifs_setup_session(xid, ses, server,
2262 ctx->local_nls);
2263 if (rc) {
2264 mutex_unlock(&ses->session_mutex);
2265 /* problem -- put our reference */
2266 cifs_put_smb_ses(ses);
2267 free_xid(xid);
2268 return ERR_PTR(rc);
2269 }
2270 mutex_unlock(&ses->session_mutex);
2271
2272 spin_lock(&ses->chan_lock);
2273 }
2274 spin_unlock(&ses->chan_lock);
2275
2276 /* existing SMB ses has a server reference already */
2277 cifs_put_tcp_session(server, 0);
2278 free_xid(xid);
2279 return ses;
2280 }
2281
2282 rc = -ENOMEM;
2283
2284 cifs_dbg(FYI, "Existing smb sess not found\n");
2285 ses = sesInfoAlloc();
2286 if (ses == NULL)
2287 goto get_ses_fail;
2288
2289 /* new SMB session uses our server ref */
2290 ses->server = server;
2291 if (server->dstaddr.ss_family == AF_INET6)
2292 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2293 else
2294 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2295
2296 if (ctx->username) {
2297 ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2298 if (!ses->user_name)
2299 goto get_ses_fail;
2300 }
2301
2302 /* ctx->password freed at unmount */
2303 if (ctx->password) {
2304 ses->password = kstrdup(ctx->password, GFP_KERNEL);
2305 if (!ses->password)
2306 goto get_ses_fail;
2307 }
2308 if (ctx->domainname) {
2309 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2310 if (!ses->domainName)
2311 goto get_ses_fail;
2312 }
2313
2314 strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
2315
2316 if (ctx->domainauto)
2317 ses->domainAuto = ctx->domainauto;
2318 ses->cred_uid = ctx->cred_uid;
2319 ses->linux_uid = ctx->linux_uid;
2320
2321 ses->sectype = ctx->sectype;
2322 ses->sign = ctx->sign;
2323 ses->local_nls = load_nls(ctx->local_nls->charset);
2324
2325 /* add server as first channel */
2326 spin_lock(&ses->chan_lock);
2327 ses->chans[0].server = server;
2328 ses->chan_count = 1;
2329 ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2330 ses->chans_need_reconnect = 1;
2331 spin_unlock(&ses->chan_lock);
2332
2333 mutex_lock(&ses->session_mutex);
2334 rc = cifs_negotiate_protocol(xid, ses, server);
2335 if (!rc)
2336 rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2337 mutex_unlock(&ses->session_mutex);
2338
2339 /* each channel uses a different signing key */
2340 spin_lock(&ses->chan_lock);
2341 memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2342 sizeof(ses->smb3signingkey));
2343 spin_unlock(&ses->chan_lock);
2344
2345 if (rc)
2346 goto get_ses_fail;
2347
2348 /*
2349 * success, put it on the list and add it as first channel
2350 * note: the session becomes active soon after this. So you'll
2351 * need to lock before changing something in the session.
2352 */
2353 spin_lock(&cifs_tcp_ses_lock);
2354 ses->dfs_root_ses = ctx->dfs_root_ses;
2355 if (ses->dfs_root_ses)
2356 ses->dfs_root_ses->ses_count++;
2357 list_add(&ses->smb_ses_list, &server->smb_ses_list);
2358 spin_unlock(&cifs_tcp_ses_lock);
2359
2360 cifs_setup_ipc(ses, ctx);
2361
2362 free_xid(xid);
2363
2364 return ses;
2365
2366 get_ses_fail:
2367 sesInfoFree(ses);
2368 free_xid(xid);
2369 return ERR_PTR(rc);
2370 }
2371
2372 /* this function must be called with tc_lock held */
2373 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2374 {
2375 struct TCP_Server_Info *server = tcon->ses->server;
2376
2377 if (tcon->status == TID_EXITING)
2378 return 0;
2379
2380 if (tcon->origin_fullpath) {
2381 if (!ctx->source ||
2382 !dfs_src_pathname_equal(ctx->source,
2383 tcon->origin_fullpath))
2384 return 0;
2385 } else if (!server->leaf_fullpath &&
2386 strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
2387 return 0;
2388 }
2389 if (tcon->seal != ctx->seal)
2390 return 0;
2391 if (tcon->snapshot_time != ctx->snapshot_time)
2392 return 0;
2393 if (tcon->handle_timeout != ctx->handle_timeout)
2394 return 0;
2395 if (tcon->no_lease != ctx->no_lease)
2396 return 0;
2397 if (tcon->nodelete != ctx->nodelete)
2398 return 0;
2399 return 1;
2400 }
2401
2402 static struct cifs_tcon *
2403 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2404 {
2405 struct cifs_tcon *tcon;
2406
2407 spin_lock(&cifs_tcp_ses_lock);
2408 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2409 spin_lock(&tcon->tc_lock);
2410 if (!match_tcon(tcon, ctx)) {
2411 spin_unlock(&tcon->tc_lock);
2412 continue;
2413 }
2414 ++tcon->tc_count;
2415 spin_unlock(&tcon->tc_lock);
2416 spin_unlock(&cifs_tcp_ses_lock);
2417 return tcon;
2418 }
2419 spin_unlock(&cifs_tcp_ses_lock);
2420 return NULL;
2421 }
2422
2423 void
2424 cifs_put_tcon(struct cifs_tcon *tcon)
2425 {
2426 unsigned int xid;
2427 struct cifs_ses *ses;
2428
2429 /*
2430 * IPC tcon share the lifetime of their session and are
2431 * destroyed in the session put function
2432 */
2433 if (tcon == NULL || tcon->ipc)
2434 return;
2435
2436 ses = tcon->ses;
2437 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2438 spin_lock(&cifs_tcp_ses_lock);
2439 spin_lock(&tcon->tc_lock);
2440 if (--tcon->tc_count > 0) {
2441 spin_unlock(&tcon->tc_lock);
2442 spin_unlock(&cifs_tcp_ses_lock);
2443 return;
2444 }
2445
2446 /* tc_count can never go negative */
2447 WARN_ON(tcon->tc_count < 0);
2448
2449 list_del_init(&tcon->tcon_list);
2450 tcon->status = TID_EXITING;
2451 spin_unlock(&tcon->tc_lock);
2452 spin_unlock(&cifs_tcp_ses_lock);
2453
2454 /* cancel polling of interfaces */
2455 cancel_delayed_work_sync(&tcon->query_interfaces);
2456 #ifdef CONFIG_CIFS_DFS_UPCALL
2457 cancel_delayed_work_sync(&tcon->dfs_cache_work);
2458 #endif
2459
2460 if (tcon->use_witness) {
2461 int rc;
2462
2463 rc = cifs_swn_unregister(tcon);
2464 if (rc < 0) {
2465 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2466 __func__, rc);
2467 }
2468 }
2469
2470 xid = get_xid();
2471 if (ses->server->ops->tree_disconnect)
2472 ses->server->ops->tree_disconnect(xid, tcon);
2473 _free_xid(xid);
2474
2475 cifs_fscache_release_super_cookie(tcon);
2476 tconInfoFree(tcon);
2477 cifs_put_smb_ses(ses);
2478 }
2479
2480 /**
2481 * cifs_get_tcon - get a tcon matching @ctx data from @ses
2482 * @ses: smb session to issue the request on
2483 * @ctx: the superblock configuration context to use for building the
2484 *
2485 * - tcon refcount is the number of mount points using the tcon.
2486 * - ses refcount is the number of tcon using the session.
2487 *
2488 * 1. This function assumes it is being called from cifs_mount() where
2489 * we already got a session reference (ses refcount +1).
2490 *
2491 * 2. Since we're in the context of adding a mount point, the end
2492 * result should be either:
2493 *
2494 * a) a new tcon already allocated with refcount=1 (1 mount point) and
2495 * its session refcount incremented (1 new tcon). This +1 was
2496 * already done in (1).
2497 *
2498 * b) an existing tcon with refcount+1 (add a mount point to it) and
2499 * identical ses refcount (no new tcon). Because of (1) we need to
2500 * decrement the ses refcount.
2501 */
2502 static struct cifs_tcon *
2503 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2504 {
2505 struct cifs_tcon *tcon;
2506 bool nohandlecache;
2507 int rc, xid;
2508
2509 tcon = cifs_find_tcon(ses, ctx);
2510 if (tcon) {
2511 /*
2512 * tcon has refcount already incremented but we need to
2513 * decrement extra ses reference gotten by caller (case b)
2514 */
2515 cifs_dbg(FYI, "Found match on UNC path\n");
2516 cifs_put_smb_ses(ses);
2517 return tcon;
2518 }
2519
2520 if (!ses->server->ops->tree_connect) {
2521 rc = -ENOSYS;
2522 goto out_fail;
2523 }
2524
2525 if (ses->server->dialect >= SMB20_PROT_ID &&
2526 (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
2527 nohandlecache = ctx->nohandlecache;
2528 else
2529 nohandlecache = true;
2530 tcon = tcon_info_alloc(!nohandlecache);
2531 if (tcon == NULL) {
2532 rc = -ENOMEM;
2533 goto out_fail;
2534 }
2535 tcon->nohandlecache = nohandlecache;
2536
2537 if (ctx->snapshot_time) {
2538 if (ses->server->vals->protocol_id == 0) {
2539 cifs_dbg(VFS,
2540 "Use SMB2 or later for snapshot mount option\n");
2541 rc = -EOPNOTSUPP;
2542 goto out_fail;
2543 } else
2544 tcon->snapshot_time = ctx->snapshot_time;
2545 }
2546
2547 if (ctx->handle_timeout) {
2548 if (ses->server->vals->protocol_id == 0) {
2549 cifs_dbg(VFS,
2550 "Use SMB2.1 or later for handle timeout option\n");
2551 rc = -EOPNOTSUPP;
2552 goto out_fail;
2553 } else
2554 tcon->handle_timeout = ctx->handle_timeout;
2555 }
2556
2557 tcon->ses = ses;
2558 if (ctx->password) {
2559 tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2560 if (!tcon->password) {
2561 rc = -ENOMEM;
2562 goto out_fail;
2563 }
2564 }
2565
2566 if (ctx->seal) {
2567 if (ses->server->vals->protocol_id == 0) {
2568 cifs_dbg(VFS,
2569 "SMB3 or later required for encryption\n");
2570 rc = -EOPNOTSUPP;
2571 goto out_fail;
2572 } else if (tcon->ses->server->capabilities &
2573 SMB2_GLOBAL_CAP_ENCRYPTION)
2574 tcon->seal = true;
2575 else {
2576 cifs_dbg(VFS, "Encryption is not supported on share\n");
2577 rc = -EOPNOTSUPP;
2578 goto out_fail;
2579 }
2580 }
2581
2582 if (ctx->linux_ext) {
2583 if (ses->server->posix_ext_supported) {
2584 tcon->posix_extensions = true;
2585 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2586 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2587 (strcmp(ses->server->vals->version_string,
2588 SMB3ANY_VERSION_STRING) == 0) ||
2589 (strcmp(ses->server->vals->version_string,
2590 SMBDEFAULT_VERSION_STRING) == 0)) {
2591 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2592 rc = -EOPNOTSUPP;
2593 goto out_fail;
2594 } else {
2595 cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
2596 "disabled but required for POSIX extensions\n");
2597 rc = -EOPNOTSUPP;
2598 goto out_fail;
2599 }
2600 }
2601
2602 xid = get_xid();
2603 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2604 ctx->local_nls);
2605 free_xid(xid);
2606 cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2607 if (rc)
2608 goto out_fail;
2609
2610 tcon->use_persistent = false;
2611 /* check if SMB2 or later, CIFS does not support persistent handles */
2612 if (ctx->persistent) {
2613 if (ses->server->vals->protocol_id == 0) {
2614 cifs_dbg(VFS,
2615 "SMB3 or later required for persistent handles\n");
2616 rc = -EOPNOTSUPP;
2617 goto out_fail;
2618 } else if (ses->server->capabilities &
2619 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2620 tcon->use_persistent = true;
2621 else /* persistent handles requested but not supported */ {
2622 cifs_dbg(VFS,
2623 "Persistent handles not supported on share\n");
2624 rc = -EOPNOTSUPP;
2625 goto out_fail;
2626 }
2627 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2628 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2629 && (ctx->nopersistent == false)) {
2630 cifs_dbg(FYI, "enabling persistent handles\n");
2631 tcon->use_persistent = true;
2632 } else if (ctx->resilient) {
2633 if (ses->server->vals->protocol_id == 0) {
2634 cifs_dbg(VFS,
2635 "SMB2.1 or later required for resilient handles\n");
2636 rc = -EOPNOTSUPP;
2637 goto out_fail;
2638 }
2639 tcon->use_resilient = true;
2640 }
2641
2642 tcon->use_witness = false;
2643 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2644 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2645 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2646 /*
2647 * Set witness in use flag in first place
2648 * to retry registration in the echo task
2649 */
2650 tcon->use_witness = true;
2651 /* And try to register immediately */
2652 rc = cifs_swn_register(tcon);
2653 if (rc < 0) {
2654 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2655 goto out_fail;
2656 }
2657 } else {
2658 /* TODO: try to extend for non-cluster uses (eg multichannel) */
2659 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2660 rc = -EOPNOTSUPP;
2661 goto out_fail;
2662 }
2663 } else {
2664 cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2665 rc = -EOPNOTSUPP;
2666 goto out_fail;
2667 }
2668 }
2669
2670 /* If the user really knows what they are doing they can override */
2671 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2672 if (ctx->cache_ro)
2673 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2674 else if (ctx->cache_rw)
2675 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2676 }
2677
2678 if (ctx->no_lease) {
2679 if (ses->server->vals->protocol_id == 0) {
2680 cifs_dbg(VFS,
2681 "SMB2 or later required for nolease option\n");
2682 rc = -EOPNOTSUPP;
2683 goto out_fail;
2684 } else
2685 tcon->no_lease = ctx->no_lease;
2686 }
2687
2688 /*
2689 * We can have only one retry value for a connection to a share so for
2690 * resources mounted more than once to the same server share the last
2691 * value passed in for the retry flag is used.
2692 */
2693 tcon->retry = ctx->retry;
2694 tcon->nocase = ctx->nocase;
2695 tcon->broken_sparse_sup = ctx->no_sparse;
2696 tcon->max_cached_dirs = ctx->max_cached_dirs;
2697 tcon->nodelete = ctx->nodelete;
2698 tcon->local_lease = ctx->local_lease;
2699 INIT_LIST_HEAD(&tcon->pending_opens);
2700 tcon->status = TID_GOOD;
2701
2702 INIT_DELAYED_WORK(&tcon->query_interfaces,
2703 smb2_query_server_interfaces);
2704 if (ses->server->dialect >= SMB30_PROT_ID &&
2705 (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
2706 /* schedule query interfaces poll */
2707 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
2708 (SMB_INTERFACE_POLL_INTERVAL * HZ));
2709 }
2710 #ifdef CONFIG_CIFS_DFS_UPCALL
2711 INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
2712 #endif
2713 spin_lock(&cifs_tcp_ses_lock);
2714 list_add(&tcon->tcon_list, &ses->tcon_list);
2715 spin_unlock(&cifs_tcp_ses_lock);
2716
2717 return tcon;
2718
2719 out_fail:
2720 tconInfoFree(tcon);
2721 return ERR_PTR(rc);
2722 }
2723
2724 void
2725 cifs_put_tlink(struct tcon_link *tlink)
2726 {
2727 if (!tlink || IS_ERR(tlink))
2728 return;
2729
2730 if (!atomic_dec_and_test(&tlink->tl_count) ||
2731 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2732 tlink->tl_time = jiffies;
2733 return;
2734 }
2735
2736 if (!IS_ERR(tlink_tcon(tlink)))
2737 cifs_put_tcon(tlink_tcon(tlink));
2738 kfree(tlink);
2739 return;
2740 }
2741
2742 static int
2743 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2744 {
2745 struct cifs_sb_info *old = CIFS_SB(sb);
2746 struct cifs_sb_info *new = mnt_data->cifs_sb;
2747 unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
2748 unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
2749
2750 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2751 return 0;
2752
2753 if (old->mnt_cifs_serverino_autodisabled)
2754 newflags &= ~CIFS_MOUNT_SERVER_INUM;
2755
2756 if (oldflags != newflags)
2757 return 0;
2758
2759 /*
2760 * We want to share sb only if we don't specify an r/wsize or
2761 * specified r/wsize is greater than or equal to existing one.
2762 */
2763 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2764 return 0;
2765
2766 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2767 return 0;
2768
2769 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2770 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2771 return 0;
2772
2773 if (old->ctx->file_mode != new->ctx->file_mode ||
2774 old->ctx->dir_mode != new->ctx->dir_mode)
2775 return 0;
2776
2777 if (strcmp(old->local_nls->charset, new->local_nls->charset))
2778 return 0;
2779
2780 if (old->ctx->acregmax != new->ctx->acregmax)
2781 return 0;
2782 if (old->ctx->acdirmax != new->ctx->acdirmax)
2783 return 0;
2784 if (old->ctx->closetimeo != new->ctx->closetimeo)
2785 return 0;
2786
2787 return 1;
2788 }
2789
2790 static int match_prepath(struct super_block *sb,
2791 struct cifs_tcon *tcon,
2792 struct cifs_mnt_data *mnt_data)
2793 {
2794 struct smb3_fs_context *ctx = mnt_data->ctx;
2795 struct cifs_sb_info *old = CIFS_SB(sb);
2796 struct cifs_sb_info *new = mnt_data->cifs_sb;
2797 bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2798 old->prepath;
2799 bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2800 new->prepath;
2801
2802 if (tcon->origin_fullpath &&
2803 dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
2804 return 1;
2805
2806 if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2807 return 1;
2808 else if (!old_set && !new_set)
2809 return 1;
2810
2811 return 0;
2812 }
2813
2814 int
2815 cifs_match_super(struct super_block *sb, void *data)
2816 {
2817 struct cifs_mnt_data *mnt_data = data;
2818 struct smb3_fs_context *ctx;
2819 struct cifs_sb_info *cifs_sb;
2820 struct TCP_Server_Info *tcp_srv;
2821 struct cifs_ses *ses;
2822 struct cifs_tcon *tcon;
2823 struct tcon_link *tlink;
2824 int rc = 0;
2825
2826 spin_lock(&cifs_tcp_ses_lock);
2827 cifs_sb = CIFS_SB(sb);
2828
2829 /* We do not want to use a superblock that has been shutdown */
2830 if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
2831 spin_unlock(&cifs_tcp_ses_lock);
2832 return 0;
2833 }
2834
2835 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2836 if (IS_ERR_OR_NULL(tlink)) {
2837 pr_warn_once("%s: skip super matching due to bad tlink(%p)\n",
2838 __func__, tlink);
2839 spin_unlock(&cifs_tcp_ses_lock);
2840 return 0;
2841 }
2842 tcon = tlink_tcon(tlink);
2843 ses = tcon->ses;
2844 tcp_srv = ses->server;
2845
2846 ctx = mnt_data->ctx;
2847
2848 spin_lock(&tcp_srv->srv_lock);
2849 spin_lock(&ses->ses_lock);
2850 spin_lock(&ses->chan_lock);
2851 spin_lock(&tcon->tc_lock);
2852 if (!match_server(tcp_srv, ctx, true) ||
2853 !match_session(ses, ctx) ||
2854 !match_tcon(tcon, ctx) ||
2855 !match_prepath(sb, tcon, mnt_data)) {
2856 rc = 0;
2857 goto out;
2858 }
2859
2860 rc = compare_mount_options(sb, mnt_data);
2861 out:
2862 spin_unlock(&tcon->tc_lock);
2863 spin_unlock(&ses->chan_lock);
2864 spin_unlock(&ses->ses_lock);
2865 spin_unlock(&tcp_srv->srv_lock);
2866
2867 spin_unlock(&cifs_tcp_ses_lock);
2868 cifs_put_tlink(tlink);
2869 return rc;
2870 }
2871
2872 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2873 static struct lock_class_key cifs_key[2];
2874 static struct lock_class_key cifs_slock_key[2];
2875
2876 static inline void
2877 cifs_reclassify_socket4(struct socket *sock)
2878 {
2879 struct sock *sk = sock->sk;
2880 BUG_ON(!sock_allow_reclassification(sk));
2881 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
2882 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
2883 }
2884
2885 static inline void
2886 cifs_reclassify_socket6(struct socket *sock)
2887 {
2888 struct sock *sk = sock->sk;
2889 BUG_ON(!sock_allow_reclassification(sk));
2890 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
2891 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
2892 }
2893 #else
2894 static inline void
2895 cifs_reclassify_socket4(struct socket *sock)
2896 {
2897 }
2898
2899 static inline void
2900 cifs_reclassify_socket6(struct socket *sock)
2901 {
2902 }
2903 #endif
2904
2905 /* See RFC1001 section 14 on representation of Netbios names */
2906 static void rfc1002mangle(char *target, char *source, unsigned int length)
2907 {
2908 unsigned int i, j;
2909
2910 for (i = 0, j = 0; i < (length); i++) {
2911 /* mask a nibble at a time and encode */
2912 target[j] = 'A' + (0x0F & (source[i] >> 4));
2913 target[j+1] = 'A' + (0x0F & source[i]);
2914 j += 2;
2915 }
2916
2917 }
2918
2919 static int
2920 bind_socket(struct TCP_Server_Info *server)
2921 {
2922 int rc = 0;
2923 if (server->srcaddr.ss_family != AF_UNSPEC) {
2924 /* Bind to the specified local IP address */
2925 struct socket *socket = server->ssocket;
2926 rc = kernel_bind(socket,
2927 (struct sockaddr *) &server->srcaddr,
2928 sizeof(server->srcaddr));
2929 if (rc < 0) {
2930 struct sockaddr_in *saddr4;
2931 struct sockaddr_in6 *saddr6;
2932 saddr4 = (struct sockaddr_in *)&server->srcaddr;
2933 saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2934 if (saddr6->sin6_family == AF_INET6)
2935 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
2936 &saddr6->sin6_addr, rc);
2937 else
2938 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
2939 &saddr4->sin_addr.s_addr, rc);
2940 }
2941 }
2942 return rc;
2943 }
2944
2945 static int
2946 ip_rfc1001_connect(struct TCP_Server_Info *server)
2947 {
2948 int rc = 0;
2949 /*
2950 * some servers require RFC1001 sessinit before sending
2951 * negprot - BB check reconnection in case where second
2952 * sessinit is sent but no second negprot
2953 */
2954 struct rfc1002_session_packet req = {};
2955 struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
2956 unsigned int len;
2957
2958 req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
2959
2960 if (server->server_RFC1001_name[0] != 0)
2961 rfc1002mangle(req.trailer.session_req.called_name,
2962 server->server_RFC1001_name,
2963 RFC1001_NAME_LEN_WITH_NULL);
2964 else
2965 rfc1002mangle(req.trailer.session_req.called_name,
2966 DEFAULT_CIFS_CALLED_NAME,
2967 RFC1001_NAME_LEN_WITH_NULL);
2968
2969 req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
2970
2971 /* calling name ends in null (byte 16) from old smb convention */
2972 if (server->workstation_RFC1001_name[0] != 0)
2973 rfc1002mangle(req.trailer.session_req.calling_name,
2974 server->workstation_RFC1001_name,
2975 RFC1001_NAME_LEN_WITH_NULL);
2976 else
2977 rfc1002mangle(req.trailer.session_req.calling_name,
2978 "LINUX_CIFS_CLNT",
2979 RFC1001_NAME_LEN_WITH_NULL);
2980
2981 /*
2982 * As per rfc1002, @len must be the number of bytes that follows the
2983 * length field of a rfc1002 session request payload.
2984 */
2985 len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
2986
2987 smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
2988 rc = smb_send(server, smb_buf, len);
2989 /*
2990 * RFC1001 layer in at least one server requires very short break before
2991 * negprot presumably because not expecting negprot to follow so fast.
2992 * This is a simple solution that works without complicating the code
2993 * and causes no significant slowing down on mount for everyone else
2994 */
2995 usleep_range(1000, 2000);
2996
2997 return rc;
2998 }
2999
3000 static int
3001 generic_ip_connect(struct TCP_Server_Info *server)
3002 {
3003 struct sockaddr *saddr;
3004 struct socket *socket;
3005 int slen, sfamily;
3006 __be16 sport;
3007 int rc = 0;
3008
3009 saddr = (struct sockaddr *) &server->dstaddr;
3010
3011 if (server->dstaddr.ss_family == AF_INET6) {
3012 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
3013
3014 sport = ipv6->sin6_port;
3015 slen = sizeof(struct sockaddr_in6);
3016 sfamily = AF_INET6;
3017 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
3018 ntohs(sport));
3019 } else {
3020 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
3021
3022 sport = ipv4->sin_port;
3023 slen = sizeof(struct sockaddr_in);
3024 sfamily = AF_INET;
3025 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
3026 ntohs(sport));
3027 }
3028
3029 if (server->ssocket) {
3030 socket = server->ssocket;
3031 } else {
3032 rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
3033 IPPROTO_TCP, &server->ssocket, 1);
3034 if (rc < 0) {
3035 cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
3036 return rc;
3037 }
3038
3039 /* BB other socket options to set KEEPALIVE, NODELAY? */
3040 cifs_dbg(FYI, "Socket created\n");
3041 socket = server->ssocket;
3042 socket->sk->sk_allocation = GFP_NOFS;
3043 socket->sk->sk_use_task_frag = false;
3044 if (sfamily == AF_INET6)
3045 cifs_reclassify_socket6(socket);
3046 else
3047 cifs_reclassify_socket4(socket);
3048 }
3049
3050 rc = bind_socket(server);
3051 if (rc < 0)
3052 return rc;
3053
3054 /*
3055 * Eventually check for other socket options to change from
3056 * the default. sock_setsockopt not used because it expects
3057 * user space buffer
3058 */
3059 socket->sk->sk_rcvtimeo = 7 * HZ;
3060 socket->sk->sk_sndtimeo = 5 * HZ;
3061
3062 /* make the bufsizes depend on wsize/rsize and max requests */
3063 if (server->noautotune) {
3064 if (socket->sk->sk_sndbuf < (200 * 1024))
3065 socket->sk->sk_sndbuf = 200 * 1024;
3066 if (socket->sk->sk_rcvbuf < (140 * 1024))
3067 socket->sk->sk_rcvbuf = 140 * 1024;
3068 }
3069
3070 if (server->tcp_nodelay)
3071 tcp_sock_set_nodelay(socket->sk);
3072
3073 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
3074 socket->sk->sk_sndbuf,
3075 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
3076
3077 rc = kernel_connect(socket, saddr, slen,
3078 server->noblockcnt ? O_NONBLOCK : 0);
3079 /*
3080 * When mounting SMB root file systems, we do not want to block in
3081 * connect. Otherwise bail out and then let cifs_reconnect() perform
3082 * reconnect failover - if possible.
3083 */
3084 if (server->noblockcnt && rc == -EINPROGRESS)
3085 rc = 0;
3086 if (rc < 0) {
3087 cifs_dbg(FYI, "Error %d connecting to server\n", rc);
3088 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
3089 sock_release(socket);
3090 server->ssocket = NULL;
3091 return rc;
3092 }
3093 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
3094 if (sport == htons(RFC1001_PORT))
3095 rc = ip_rfc1001_connect(server);
3096
3097 return rc;
3098 }
3099
3100 static int
3101 ip_connect(struct TCP_Server_Info *server)
3102 {
3103 __be16 *sport;
3104 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
3105 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
3106
3107 if (server->dstaddr.ss_family == AF_INET6)
3108 sport = &addr6->sin6_port;
3109 else
3110 sport = &addr->sin_port;
3111
3112 if (*sport == 0) {
3113 int rc;
3114
3115 /* try with 445 port at first */
3116 *sport = htons(CIFS_PORT);
3117
3118 rc = generic_ip_connect(server);
3119 if (rc >= 0)
3120 return rc;
3121
3122 /* if it failed, try with 139 port */
3123 *sport = htons(RFC1001_PORT);
3124 }
3125
3126 return generic_ip_connect(server);
3127 }
3128
3129 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3130 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
3131 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3132 {
3133 /*
3134 * If we are reconnecting then should we check to see if
3135 * any requested capabilities changed locally e.g. via
3136 * remount but we can not do much about it here
3137 * if they have (even if we could detect it by the following)
3138 * Perhaps we could add a backpointer to array of sb from tcon
3139 * or if we change to make all sb to same share the same
3140 * sb as NFS - then we only have one backpointer to sb.
3141 * What if we wanted to mount the server share twice once with
3142 * and once without posixacls or posix paths?
3143 */
3144 __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3145
3146 if (ctx && ctx->no_linux_ext) {
3147 tcon->fsUnixInfo.Capability = 0;
3148 tcon->unix_ext = 0; /* Unix Extensions disabled */
3149 cifs_dbg(FYI, "Linux protocol extensions disabled\n");
3150 return;
3151 } else if (ctx)
3152 tcon->unix_ext = 1; /* Unix Extensions supported */
3153
3154 if (!tcon->unix_ext) {
3155 cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
3156 return;
3157 }
3158
3159 if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
3160 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3161 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
3162 /*
3163 * check for reconnect case in which we do not
3164 * want to change the mount behavior if we can avoid it
3165 */
3166 if (ctx == NULL) {
3167 /*
3168 * turn off POSIX ACL and PATHNAMES if not set
3169 * originally at mount time
3170 */
3171 if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
3172 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3173 if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3174 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3175 cifs_dbg(VFS, "POSIXPATH support change\n");
3176 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3177 } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3178 cifs_dbg(VFS, "possible reconnect error\n");
3179 cifs_dbg(VFS, "server disabled POSIX path support\n");
3180 }
3181 }
3182
3183 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3184 cifs_dbg(VFS, "per-share encryption not supported yet\n");
3185
3186 cap &= CIFS_UNIX_CAP_MASK;
3187 if (ctx && ctx->no_psx_acl)
3188 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3189 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
3190 cifs_dbg(FYI, "negotiated posix acl support\n");
3191 if (cifs_sb)
3192 cifs_sb->mnt_cifs_flags |=
3193 CIFS_MOUNT_POSIXACL;
3194 }
3195
3196 if (ctx && ctx->posix_paths == 0)
3197 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3198 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
3199 cifs_dbg(FYI, "negotiate posix pathnames\n");
3200 if (cifs_sb)
3201 cifs_sb->mnt_cifs_flags |=
3202 CIFS_MOUNT_POSIX_PATHS;
3203 }
3204
3205 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
3206 #ifdef CONFIG_CIFS_DEBUG2
3207 if (cap & CIFS_UNIX_FCNTL_CAP)
3208 cifs_dbg(FYI, "FCNTL cap\n");
3209 if (cap & CIFS_UNIX_EXTATTR_CAP)
3210 cifs_dbg(FYI, "EXTATTR cap\n");
3211 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3212 cifs_dbg(FYI, "POSIX path cap\n");
3213 if (cap & CIFS_UNIX_XATTR_CAP)
3214 cifs_dbg(FYI, "XATTR cap\n");
3215 if (cap & CIFS_UNIX_POSIX_ACL_CAP)
3216 cifs_dbg(FYI, "POSIX ACL cap\n");
3217 if (cap & CIFS_UNIX_LARGE_READ_CAP)
3218 cifs_dbg(FYI, "very large read cap\n");
3219 if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
3220 cifs_dbg(FYI, "very large write cap\n");
3221 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
3222 cifs_dbg(FYI, "transport encryption cap\n");
3223 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3224 cifs_dbg(FYI, "mandatory transport encryption cap\n");
3225 #endif /* CIFS_DEBUG2 */
3226 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
3227 if (ctx == NULL)
3228 cifs_dbg(FYI, "resetting capabilities failed\n");
3229 else
3230 cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
3231
3232 }
3233 }
3234 }
3235 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3236
3237 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3238 {
3239 struct smb3_fs_context *ctx = cifs_sb->ctx;
3240
3241 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3242
3243 spin_lock_init(&cifs_sb->tlink_tree_lock);
3244 cifs_sb->tlink_tree = RB_ROOT;
3245
3246 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
3247 ctx->file_mode, ctx->dir_mode);
3248
3249 /* this is needed for ASCII cp to Unicode converts */
3250 if (ctx->iocharset == NULL) {
3251 /* load_nls_default cannot return null */
3252 cifs_sb->local_nls = load_nls_default();
3253 } else {
3254 cifs_sb->local_nls = load_nls(ctx->iocharset);
3255 if (cifs_sb->local_nls == NULL) {
3256 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3257 ctx->iocharset);
3258 return -ELIBACC;
3259 }
3260 }
3261 ctx->local_nls = cifs_sb->local_nls;
3262
3263 smb3_update_mnt_flags(cifs_sb);
3264
3265 if (ctx->direct_io)
3266 cifs_dbg(FYI, "mounting share using direct i/o\n");
3267 if (ctx->cache_ro) {
3268 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3269 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3270 } else if (ctx->cache_rw) {
3271 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3272 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3273 CIFS_MOUNT_RW_CACHE);
3274 }
3275
3276 if ((ctx->cifs_acl) && (ctx->dynperm))
3277 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3278
3279 if (ctx->prepath) {
3280 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3281 if (cifs_sb->prepath == NULL)
3282 return -ENOMEM;
3283 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3284 }
3285
3286 return 0;
3287 }
3288
3289 /* Release all succeed connections */
3290 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
3291 {
3292 int rc = 0;
3293
3294 if (mnt_ctx->tcon)
3295 cifs_put_tcon(mnt_ctx->tcon);
3296 else if (mnt_ctx->ses)
3297 cifs_put_smb_ses(mnt_ctx->ses);
3298 else if (mnt_ctx->server)
3299 cifs_put_tcp_session(mnt_ctx->server, 0);
3300 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3301 free_xid(mnt_ctx->xid);
3302 }
3303
3304 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx)
3305 {
3306 struct TCP_Server_Info *server = NULL;
3307 struct smb3_fs_context *ctx;
3308 struct cifs_ses *ses = NULL;
3309 unsigned int xid;
3310 int rc = 0;
3311
3312 xid = get_xid();
3313
3314 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) {
3315 rc = -EINVAL;
3316 goto out;
3317 }
3318 ctx = mnt_ctx->fs_ctx;
3319
3320 /* get a reference to a tcp session */
3321 server = cifs_get_tcp_session(ctx, NULL);
3322 if (IS_ERR(server)) {
3323 rc = PTR_ERR(server);
3324 server = NULL;
3325 goto out;
3326 }
3327
3328 /* get a reference to a SMB session */
3329 ses = cifs_get_smb_ses(server, ctx);
3330 if (IS_ERR(ses)) {
3331 rc = PTR_ERR(ses);
3332 ses = NULL;
3333 goto out;
3334 }
3335
3336 if ((ctx->persistent == true) && (!(ses->server->capabilities &
3337 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3338 cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3339 rc = -EOPNOTSUPP;
3340 }
3341
3342 out:
3343 mnt_ctx->xid = xid;
3344 mnt_ctx->server = server;
3345 mnt_ctx->ses = ses;
3346 mnt_ctx->tcon = NULL;
3347
3348 return rc;
3349 }
3350
3351 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
3352 {
3353 struct TCP_Server_Info *server;
3354 struct cifs_sb_info *cifs_sb;
3355 struct smb3_fs_context *ctx;
3356 struct cifs_tcon *tcon = NULL;
3357 int rc = 0;
3358
3359 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx ||
3360 !mnt_ctx->cifs_sb)) {
3361 rc = -EINVAL;
3362 goto out;
3363 }
3364 server = mnt_ctx->server;
3365 ctx = mnt_ctx->fs_ctx;
3366 cifs_sb = mnt_ctx->cifs_sb;
3367
3368 /* search for existing tcon to this server share */
3369 tcon = cifs_get_tcon(mnt_ctx->ses, ctx);
3370 if (IS_ERR(tcon)) {
3371 rc = PTR_ERR(tcon);
3372 tcon = NULL;
3373 goto out;
3374 }
3375
3376 /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
3377 if (tcon->posix_extensions)
3378 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3379
3380 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3381 /* tell server which Unix caps we support */
3382 if (cap_unix(tcon->ses)) {
3383 /*
3384 * reset of caps checks mount to see if unix extensions disabled
3385 * for just this mount.
3386 */
3387 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx);
3388 spin_lock(&tcon->ses->server->srv_lock);
3389 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3390 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3391 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3392 spin_unlock(&tcon->ses->server->srv_lock);
3393 rc = -EACCES;
3394 goto out;
3395 }
3396 spin_unlock(&tcon->ses->server->srv_lock);
3397 } else
3398 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3399 tcon->unix_ext = 0; /* server does not support them */
3400
3401 /* do not care if a following call succeed - informational */
3402 if (!tcon->pipe && server->ops->qfs_tcon) {
3403 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
3404 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3405 if (tcon->fsDevInfo.DeviceCharacteristics &
3406 cpu_to_le32(FILE_READ_ONLY_DEVICE))
3407 cifs_dbg(VFS, "mounted to read only share\n");
3408 else if ((cifs_sb->mnt_cifs_flags &
3409 CIFS_MOUNT_RW_CACHE) == 0)
3410 cifs_dbg(VFS, "read only mount of RW share\n");
3411 /* no need to log a RW mount of a typical RW share */
3412 }
3413 }
3414
3415 /*
3416 * Clamp the rsize/wsize mount arguments if they are too big for the server
3417 * and set the rsize/wsize to the negotiated values if not passed in by
3418 * the user on mount
3419 */
3420 if ((cifs_sb->ctx->wsize == 0) ||
3421 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
3422 cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
3423 if ((cifs_sb->ctx->rsize == 0) ||
3424 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3425 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3426
3427 /*
3428 * The cookie is initialized from volume info returned above.
3429 * Inside cifs_fscache_get_super_cookie it checks
3430 * that we do not get super cookie twice.
3431 */
3432 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3433 cifs_fscache_get_super_cookie(tcon);
3434
3435 out:
3436 mnt_ctx->tcon = tcon;
3437 return rc;
3438 }
3439
3440 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3441 struct cifs_tcon *tcon)
3442 {
3443 struct tcon_link *tlink;
3444
3445 /* hang the tcon off of the superblock */
3446 tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3447 if (tlink == NULL)
3448 return -ENOMEM;
3449
3450 tlink->tl_uid = ses->linux_uid;
3451 tlink->tl_tcon = tcon;
3452 tlink->tl_time = jiffies;
3453 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3454 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3455
3456 cifs_sb->master_tlink = tlink;
3457 spin_lock(&cifs_sb->tlink_tree_lock);
3458 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3459 spin_unlock(&cifs_sb->tlink_tree_lock);
3460
3461 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3462 TLINK_IDLE_EXPIRE);
3463 return 0;
3464 }
3465
3466 static int
3467 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3468 unsigned int xid,
3469 struct cifs_tcon *tcon,
3470 struct cifs_sb_info *cifs_sb,
3471 char *full_path,
3472 int added_treename)
3473 {
3474 int rc;
3475 char *s;
3476 char sep, tmp;
3477 int skip = added_treename ? 1 : 0;
3478
3479 sep = CIFS_DIR_SEP(cifs_sb);
3480 s = full_path;
3481
3482 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3483 while (rc == 0) {
3484 /* skip separators */
3485 while (*s == sep)
3486 s++;
3487 if (!*s)
3488 break;
3489 /* next separator */
3490 while (*s && *s != sep)
3491 s++;
3492 /*
3493 * if the treename is added, we then have to skip the first
3494 * part within the separators
3495 */
3496 if (skip) {
3497 skip = 0;
3498 continue;
3499 }
3500 /*
3501 * temporarily null-terminate the path at the end of
3502 * the current component
3503 */
3504 tmp = *s;
3505 *s = 0;
3506 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3507 full_path);
3508 *s = tmp;
3509 }
3510 return rc;
3511 }
3512
3513 /*
3514 * Check if path is remote (i.e. a DFS share).
3515 *
3516 * Return -EREMOTE if it is, otherwise 0 or -errno.
3517 */
3518 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx)
3519 {
3520 int rc;
3521 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3522 struct TCP_Server_Info *server = mnt_ctx->server;
3523 unsigned int xid = mnt_ctx->xid;
3524 struct cifs_tcon *tcon = mnt_ctx->tcon;
3525 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3526 char *full_path;
3527
3528 if (!server->ops->is_path_accessible)
3529 return -EOPNOTSUPP;
3530
3531 /*
3532 * cifs_build_path_to_root works only when we have a valid tcon
3533 */
3534 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3535 tcon->Flags & SMB_SHARE_IS_IN_DFS);
3536 if (full_path == NULL)
3537 return -ENOMEM;
3538
3539 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3540
3541 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3542 full_path);
3543 if (rc != 0 && rc != -EREMOTE)
3544 goto out;
3545
3546 if (rc != -EREMOTE) {
3547 rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3548 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3549 if (rc != 0) {
3550 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3551 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3552 rc = 0;
3553 }
3554 }
3555
3556 out:
3557 kfree(full_path);
3558 return rc;
3559 }
3560
3561 #ifdef CONFIG_CIFS_DFS_UPCALL
3562 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3563 {
3564 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3565 bool isdfs;
3566 int rc;
3567
3568 INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
3569
3570 rc = dfs_mount_share(&mnt_ctx, &isdfs);
3571 if (rc)
3572 goto error;
3573 if (!isdfs)
3574 goto out;
3575
3576 /*
3577 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3578 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3579 */
3580 cifs_autodisable_serverino(cifs_sb);
3581 /*
3582 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3583 * that have different prefix paths.
3584 */
3585 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3586 kfree(cifs_sb->prepath);
3587 cifs_sb->prepath = ctx->prepath;
3588 ctx->prepath = NULL;
3589
3590 out:
3591 cifs_try_adding_channels(mnt_ctx.ses);
3592 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3593 if (rc)
3594 goto error;
3595
3596 free_xid(mnt_ctx.xid);
3597 return rc;
3598
3599 error:
3600 dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
3601 cifs_mount_put_conns(&mnt_ctx);
3602 return rc;
3603 }
3604 #else
3605 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3606 {
3607 int rc = 0;
3608 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3609
3610 rc = cifs_mount_get_session(&mnt_ctx);
3611 if (rc)
3612 goto error;
3613
3614 rc = cifs_mount_get_tcon(&mnt_ctx);
3615 if (rc)
3616 goto error;
3617
3618 rc = cifs_is_path_remote(&mnt_ctx);
3619 if (rc == -EREMOTE)
3620 rc = -EOPNOTSUPP;
3621 if (rc)
3622 goto error;
3623
3624 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3625 if (rc)
3626 goto error;
3627
3628 free_xid(mnt_ctx.xid);
3629 return rc;
3630
3631 error:
3632 cifs_mount_put_conns(&mnt_ctx);
3633 return rc;
3634 }
3635 #endif
3636
3637 /*
3638 * Issue a TREE_CONNECT request.
3639 */
3640 int
3641 CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3642 const char *tree, struct cifs_tcon *tcon,
3643 const struct nls_table *nls_codepage)
3644 {
3645 struct smb_hdr *smb_buffer;
3646 struct smb_hdr *smb_buffer_response;
3647 TCONX_REQ *pSMB;
3648 TCONX_RSP *pSMBr;
3649 unsigned char *bcc_ptr;
3650 int rc = 0;
3651 int length;
3652 __u16 bytes_left, count;
3653
3654 if (ses == NULL)
3655 return -EIO;
3656
3657 smb_buffer = cifs_buf_get();
3658 if (smb_buffer == NULL)
3659 return -ENOMEM;
3660
3661 smb_buffer_response = smb_buffer;
3662
3663 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
3664 NULL /*no tid */ , 4 /*wct */ );
3665
3666 smb_buffer->Mid = get_next_mid(ses->server);
3667 smb_buffer->Uid = ses->Suid;
3668 pSMB = (TCONX_REQ *) smb_buffer;
3669 pSMBr = (TCONX_RSP *) smb_buffer_response;
3670
3671 pSMB->AndXCommand = 0xFF;
3672 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3673 bcc_ptr = &pSMB->Password[0];
3674
3675 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3676 *bcc_ptr = 0; /* password is null byte */
3677 bcc_ptr++; /* skip password */
3678 /* already aligned so no need to do it below */
3679
3680 if (ses->server->sign)
3681 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
3682
3683 if (ses->capabilities & CAP_STATUS32) {
3684 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
3685 }
3686 if (ses->capabilities & CAP_DFS) {
3687 smb_buffer->Flags2 |= SMBFLG2_DFS;
3688 }
3689 if (ses->capabilities & CAP_UNICODE) {
3690 smb_buffer->Flags2 |= SMBFLG2_UNICODE;
3691 length =
3692 cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
3693 6 /* max utf8 char length in bytes */ *
3694 (/* server len*/ + 256 /* share len */), nls_codepage);
3695 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
3696 bcc_ptr += 2; /* skip trailing null */
3697 } else { /* ASCII */
3698 strcpy(bcc_ptr, tree);
3699 bcc_ptr += strlen(tree) + 1;
3700 }
3701 strcpy(bcc_ptr, "?????");
3702 bcc_ptr += strlen("?????");
3703 bcc_ptr += 1;
3704 count = bcc_ptr - &pSMB->Password[0];
3705 be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
3706 pSMB->ByteCount = cpu_to_le16(count);
3707
3708 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
3709 0);
3710
3711 /* above now done in SendReceive */
3712 if (rc == 0) {
3713 bool is_unicode;
3714
3715 tcon->tid = smb_buffer_response->Tid;
3716 bcc_ptr = pByteArea(smb_buffer_response);
3717 bytes_left = get_bcc(smb_buffer_response);
3718 length = strnlen(bcc_ptr, bytes_left - 2);
3719 if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
3720 is_unicode = true;
3721 else
3722 is_unicode = false;
3723
3724
3725 /* skip service field (NB: this field is always ASCII) */
3726 if (length == 3) {
3727 if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
3728 (bcc_ptr[2] == 'C')) {
3729 cifs_dbg(FYI, "IPC connection\n");
3730 tcon->ipc = true;
3731 tcon->pipe = true;
3732 }
3733 } else if (length == 2) {
3734 if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
3735 /* the most common case */
3736 cifs_dbg(FYI, "disk share connection\n");
3737 }
3738 }
3739 bcc_ptr += length + 1;
3740 bytes_left -= (length + 1);
3741 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
3742
3743 /* mostly informational -- no need to fail on error here */
3744 kfree(tcon->nativeFileSystem);
3745 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
3746 bytes_left, is_unicode,
3747 nls_codepage);
3748
3749 cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
3750
3751 if ((smb_buffer_response->WordCount == 3) ||
3752 (smb_buffer_response->WordCount == 7))
3753 /* field is in same location */
3754 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
3755 else
3756 tcon->Flags = 0;
3757 cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
3758 }
3759
3760 cifs_buf_release(smb_buffer);
3761 return rc;
3762 }
3763
3764 static void delayed_free(struct rcu_head *p)
3765 {
3766 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3767
3768 unload_nls(cifs_sb->local_nls);
3769 smb3_cleanup_fs_context(cifs_sb->ctx);
3770 kfree(cifs_sb);
3771 }
3772
3773 void
3774 cifs_umount(struct cifs_sb_info *cifs_sb)
3775 {
3776 struct rb_root *root = &cifs_sb->tlink_tree;
3777 struct rb_node *node;
3778 struct tcon_link *tlink;
3779
3780 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3781
3782 spin_lock(&cifs_sb->tlink_tree_lock);
3783 while ((node = rb_first(root))) {
3784 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3785 cifs_get_tlink(tlink);
3786 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3787 rb_erase(node, root);
3788
3789 spin_unlock(&cifs_sb->tlink_tree_lock);
3790 cifs_put_tlink(tlink);
3791 spin_lock(&cifs_sb->tlink_tree_lock);
3792 }
3793 spin_unlock(&cifs_sb->tlink_tree_lock);
3794
3795 kfree(cifs_sb->prepath);
3796 call_rcu(&cifs_sb->rcu, delayed_free);
3797 }
3798
3799 int
3800 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
3801 struct TCP_Server_Info *server)
3802 {
3803 int rc = 0;
3804
3805 if (!server->ops->need_neg || !server->ops->negotiate)
3806 return -ENOSYS;
3807
3808 /* only send once per connect */
3809 spin_lock(&server->srv_lock);
3810 if (server->tcpStatus != CifsGood &&
3811 server->tcpStatus != CifsNew &&
3812 server->tcpStatus != CifsNeedNegotiate) {
3813 spin_unlock(&server->srv_lock);
3814 return -EHOSTDOWN;
3815 }
3816
3817 if (!server->ops->need_neg(server) &&
3818 server->tcpStatus == CifsGood) {
3819 spin_unlock(&server->srv_lock);
3820 return 0;
3821 }
3822
3823 server->tcpStatus = CifsInNegotiate;
3824 spin_unlock(&server->srv_lock);
3825
3826 rc = server->ops->negotiate(xid, ses, server);
3827 if (rc == 0) {
3828 spin_lock(&server->srv_lock);
3829 if (server->tcpStatus == CifsInNegotiate)
3830 server->tcpStatus = CifsGood;
3831 else
3832 rc = -EHOSTDOWN;
3833 spin_unlock(&server->srv_lock);
3834 } else {
3835 spin_lock(&server->srv_lock);
3836 if (server->tcpStatus == CifsInNegotiate)
3837 server->tcpStatus = CifsNeedNegotiate;
3838 spin_unlock(&server->srv_lock);
3839 }
3840
3841 return rc;
3842 }
3843
3844 int
3845 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
3846 struct TCP_Server_Info *server,
3847 struct nls_table *nls_info)
3848 {
3849 int rc = -ENOSYS;
3850 struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
3851 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
3852 struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
3853 bool is_binding = false;
3854
3855 spin_lock(&ses->ses_lock);
3856 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
3857 __func__, ses->chans_need_reconnect);
3858
3859 if (ses->ses_status != SES_GOOD &&
3860 ses->ses_status != SES_NEW &&
3861 ses->ses_status != SES_NEED_RECON) {
3862 spin_unlock(&ses->ses_lock);
3863 return -EHOSTDOWN;
3864 }
3865
3866 /* only send once per connect */
3867 spin_lock(&ses->chan_lock);
3868 if (CIFS_ALL_CHANS_GOOD(ses)) {
3869 if (ses->ses_status == SES_NEED_RECON)
3870 ses->ses_status = SES_GOOD;
3871 spin_unlock(&ses->chan_lock);
3872 spin_unlock(&ses->ses_lock);
3873 return 0;
3874 }
3875
3876 cifs_chan_set_in_reconnect(ses, server);
3877 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
3878 spin_unlock(&ses->chan_lock);
3879
3880 if (!is_binding) {
3881 ses->ses_status = SES_IN_SETUP;
3882
3883 /* force iface_list refresh */
3884 ses->iface_last_update = 0;
3885 }
3886 spin_unlock(&ses->ses_lock);
3887
3888 /* update ses ip_addr only for primary chan */
3889 if (server == pserver) {
3890 if (server->dstaddr.ss_family == AF_INET6)
3891 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
3892 else
3893 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
3894 }
3895
3896 if (!is_binding) {
3897 ses->capabilities = server->capabilities;
3898 if (!linuxExtEnabled)
3899 ses->capabilities &= (~server->vals->cap_unix);
3900
3901 if (ses->auth_key.response) {
3902 cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
3903 ses->auth_key.response);
3904 kfree_sensitive(ses->auth_key.response);
3905 ses->auth_key.response = NULL;
3906 ses->auth_key.len = 0;
3907 }
3908 }
3909
3910 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
3911 server->sec_mode, server->capabilities, server->timeAdj);
3912
3913 if (server->ops->sess_setup)
3914 rc = server->ops->sess_setup(xid, ses, server, nls_info);
3915
3916 if (rc) {
3917 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
3918 spin_lock(&ses->ses_lock);
3919 if (ses->ses_status == SES_IN_SETUP)
3920 ses->ses_status = SES_NEED_RECON;
3921 spin_lock(&ses->chan_lock);
3922 cifs_chan_clear_in_reconnect(ses, server);
3923 spin_unlock(&ses->chan_lock);
3924 spin_unlock(&ses->ses_lock);
3925 } else {
3926 spin_lock(&ses->ses_lock);
3927 if (ses->ses_status == SES_IN_SETUP)
3928 ses->ses_status = SES_GOOD;
3929 spin_lock(&ses->chan_lock);
3930 cifs_chan_clear_in_reconnect(ses, server);
3931 cifs_chan_clear_need_reconnect(ses, server);
3932 spin_unlock(&ses->chan_lock);
3933 spin_unlock(&ses->ses_lock);
3934 }
3935
3936 return rc;
3937 }
3938
3939 static int
3940 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
3941 {
3942 ctx->sectype = ses->sectype;
3943
3944 /* krb5 is special, since we don't need username or pw */
3945 if (ctx->sectype == Kerberos)
3946 return 0;
3947
3948 return cifs_set_cifscreds(ctx, ses);
3949 }
3950
3951 static struct cifs_tcon *
3952 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
3953 {
3954 int rc;
3955 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3956 struct cifs_ses *ses;
3957 struct cifs_tcon *tcon = NULL;
3958 struct smb3_fs_context *ctx;
3959
3960 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3961 if (ctx == NULL)
3962 return ERR_PTR(-ENOMEM);
3963
3964 ctx->local_nls = cifs_sb->local_nls;
3965 ctx->linux_uid = fsuid;
3966 ctx->cred_uid = fsuid;
3967 ctx->UNC = master_tcon->tree_name;
3968 ctx->retry = master_tcon->retry;
3969 ctx->nocase = master_tcon->nocase;
3970 ctx->nohandlecache = master_tcon->nohandlecache;
3971 ctx->local_lease = master_tcon->local_lease;
3972 ctx->no_lease = master_tcon->no_lease;
3973 ctx->resilient = master_tcon->use_resilient;
3974 ctx->persistent = master_tcon->use_persistent;
3975 ctx->handle_timeout = master_tcon->handle_timeout;
3976 ctx->no_linux_ext = !master_tcon->unix_ext;
3977 ctx->linux_ext = master_tcon->posix_extensions;
3978 ctx->sectype = master_tcon->ses->sectype;
3979 ctx->sign = master_tcon->ses->sign;
3980 ctx->seal = master_tcon->seal;
3981 ctx->witness = master_tcon->use_witness;
3982
3983 rc = cifs_set_vol_auth(ctx, master_tcon->ses);
3984 if (rc) {
3985 tcon = ERR_PTR(rc);
3986 goto out;
3987 }
3988
3989 /* get a reference for the same TCP session */
3990 spin_lock(&cifs_tcp_ses_lock);
3991 ++master_tcon->ses->server->srv_count;
3992 spin_unlock(&cifs_tcp_ses_lock);
3993
3994 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
3995 if (IS_ERR(ses)) {
3996 tcon = (struct cifs_tcon *)ses;
3997 cifs_put_tcp_session(master_tcon->ses->server, 0);
3998 goto out;
3999 }
4000
4001 tcon = cifs_get_tcon(ses, ctx);
4002 if (IS_ERR(tcon)) {
4003 cifs_put_smb_ses(ses);
4004 goto out;
4005 }
4006
4007 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4008 if (cap_unix(ses))
4009 reset_cifs_unix_caps(0, tcon, NULL, ctx);
4010 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
4011
4012 out:
4013 kfree(ctx->username);
4014 kfree_sensitive(ctx->password);
4015 kfree(ctx);
4016
4017 return tcon;
4018 }
4019
4020 struct cifs_tcon *
4021 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4022 {
4023 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4024 }
4025
4026 /* find and return a tlink with given uid */
4027 static struct tcon_link *
4028 tlink_rb_search(struct rb_root *root, kuid_t uid)
4029 {
4030 struct rb_node *node = root->rb_node;
4031 struct tcon_link *tlink;
4032
4033 while (node) {
4034 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4035
4036 if (uid_gt(tlink->tl_uid, uid))
4037 node = node->rb_left;
4038 else if (uid_lt(tlink->tl_uid, uid))
4039 node = node->rb_right;
4040 else
4041 return tlink;
4042 }
4043 return NULL;
4044 }
4045
4046 /* insert a tcon_link into the tree */
4047 static void
4048 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4049 {
4050 struct rb_node **new = &(root->rb_node), *parent = NULL;
4051 struct tcon_link *tlink;
4052
4053 while (*new) {
4054 tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4055 parent = *new;
4056
4057 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4058 new = &((*new)->rb_left);
4059 else
4060 new = &((*new)->rb_right);
4061 }
4062
4063 rb_link_node(&new_tlink->tl_rbnode, parent, new);
4064 rb_insert_color(&new_tlink->tl_rbnode, root);
4065 }
4066
4067 /*
4068 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4069 * current task.
4070 *
4071 * If the superblock doesn't refer to a multiuser mount, then just return
4072 * the master tcon for the mount.
4073 *
4074 * First, search the rbtree for an existing tcon for this fsuid. If one
4075 * exists, then check to see if it's pending construction. If it is then wait
4076 * for construction to complete. Once it's no longer pending, check to see if
4077 * it failed and either return an error or retry construction, depending on
4078 * the timeout.
4079 *
4080 * If one doesn't exist then insert a new tcon_link struct into the tree and
4081 * try to construct a new one.
4082 */
4083 struct tcon_link *
4084 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4085 {
4086 int ret;
4087 kuid_t fsuid = current_fsuid();
4088 struct tcon_link *tlink, *newtlink;
4089
4090 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4091 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4092
4093 spin_lock(&cifs_sb->tlink_tree_lock);
4094 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4095 if (tlink)
4096 cifs_get_tlink(tlink);
4097 spin_unlock(&cifs_sb->tlink_tree_lock);
4098
4099 if (tlink == NULL) {
4100 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
4101 if (newtlink == NULL)
4102 return ERR_PTR(-ENOMEM);
4103 newtlink->tl_uid = fsuid;
4104 newtlink->tl_tcon = ERR_PTR(-EACCES);
4105 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4106 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4107 cifs_get_tlink(newtlink);
4108
4109 spin_lock(&cifs_sb->tlink_tree_lock);
4110 /* was one inserted after previous search? */
4111 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4112 if (tlink) {
4113 cifs_get_tlink(tlink);
4114 spin_unlock(&cifs_sb->tlink_tree_lock);
4115 kfree(newtlink);
4116 goto wait_for_construction;
4117 }
4118 tlink = newtlink;
4119 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4120 spin_unlock(&cifs_sb->tlink_tree_lock);
4121 } else {
4122 wait_for_construction:
4123 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4124 TASK_INTERRUPTIBLE);
4125 if (ret) {
4126 cifs_put_tlink(tlink);
4127 return ERR_PTR(-ERESTARTSYS);
4128 }
4129
4130 /* if it's good, return it */
4131 if (!IS_ERR(tlink->tl_tcon))
4132 return tlink;
4133
4134 /* return error if we tried this already recently */
4135 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4136 cifs_put_tlink(tlink);
4137 return ERR_PTR(-EACCES);
4138 }
4139
4140 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4141 goto wait_for_construction;
4142 }
4143
4144 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4145 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4146 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4147
4148 if (IS_ERR(tlink->tl_tcon)) {
4149 cifs_put_tlink(tlink);
4150 return ERR_PTR(-EACCES);
4151 }
4152
4153 return tlink;
4154 }
4155
4156 /*
4157 * periodic workqueue job that scans tcon_tree for a superblock and closes
4158 * out tcons.
4159 */
4160 static void
4161 cifs_prune_tlinks(struct work_struct *work)
4162 {
4163 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4164 prune_tlinks.work);
4165 struct rb_root *root = &cifs_sb->tlink_tree;
4166 struct rb_node *node;
4167 struct rb_node *tmp;
4168 struct tcon_link *tlink;
4169
4170 /*
4171 * Because we drop the spinlock in the loop in order to put the tlink
4172 * it's not guarded against removal of links from the tree. The only
4173 * places that remove entries from the tree are this function and
4174 * umounts. Because this function is non-reentrant and is canceled
4175 * before umount can proceed, this is safe.
4176 */
4177 spin_lock(&cifs_sb->tlink_tree_lock);
4178 node = rb_first(root);
4179 while (node != NULL) {
4180 tmp = node;
4181 node = rb_next(tmp);
4182 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4183
4184 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4185 atomic_read(&tlink->tl_count) != 0 ||
4186 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4187 continue;
4188
4189 cifs_get_tlink(tlink);
4190 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4191 rb_erase(tmp, root);
4192
4193 spin_unlock(&cifs_sb->tlink_tree_lock);
4194 cifs_put_tlink(tlink);
4195 spin_lock(&cifs_sb->tlink_tree_lock);
4196 }
4197 spin_unlock(&cifs_sb->tlink_tree_lock);
4198
4199 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4200 TLINK_IDLE_EXPIRE);
4201 }
4202
4203 #ifndef CONFIG_CIFS_DFS_UPCALL
4204 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4205 {
4206 int rc;
4207 const struct smb_version_operations *ops = tcon->ses->server->ops;
4208
4209 /* only send once per connect */
4210 spin_lock(&tcon->tc_lock);
4211 if (tcon->status == TID_GOOD) {
4212 spin_unlock(&tcon->tc_lock);
4213 return 0;
4214 }
4215
4216 if (tcon->status != TID_NEW &&
4217 tcon->status != TID_NEED_TCON) {
4218 spin_unlock(&tcon->tc_lock);
4219 return -EHOSTDOWN;
4220 }
4221
4222 tcon->status = TID_IN_TCON;
4223 spin_unlock(&tcon->tc_lock);
4224
4225 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
4226 if (rc) {
4227 spin_lock(&tcon->tc_lock);
4228 if (tcon->status == TID_IN_TCON)
4229 tcon->status = TID_NEED_TCON;
4230 spin_unlock(&tcon->tc_lock);
4231 } else {
4232 spin_lock(&tcon->tc_lock);
4233 if (tcon->status == TID_IN_TCON)
4234 tcon->status = TID_GOOD;
4235 tcon->need_reconnect = false;
4236 spin_unlock(&tcon->tc_lock);
4237 }
4238
4239 return rc;
4240 }
4241 #endif