]> git.ipfire.org Git - people/ms/linux.git/blob - fs/cifs/smb2ops.c
Merge tag 'xtensa-20220626' of https://github.com/jcmvbkbc/linux-xtensa
[people/ms/linux.git] / fs / cifs / smb2ops.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
6 */
7
8 #include <linux/pagemap.h>
9 #include <linux/vfs.h>
10 #include <linux/falloc.h>
11 #include <linux/scatterlist.h>
12 #include <linux/uuid.h>
13 #include <linux/sort.h>
14 #include <crypto/aead.h>
15 #include <linux/fiemap.h>
16 #include <uapi/linux/magic.h>
17 #include "cifsfs.h"
18 #include "cifsglob.h"
19 #include "smb2pdu.h"
20 #include "smb2proto.h"
21 #include "cifsproto.h"
22 #include "cifs_debug.h"
23 #include "cifs_unicode.h"
24 #include "smb2status.h"
25 #include "smb2glob.h"
26 #include "cifs_ioctl.h"
27 #include "smbdirect.h"
28 #include "fscache.h"
29 #include "fs_context.h"
30
31 /* Change credits for different ops and return the total number of credits */
32 static int
33 change_conf(struct TCP_Server_Info *server)
34 {
35 server->credits += server->echo_credits + server->oplock_credits;
36 server->oplock_credits = server->echo_credits = 0;
37 switch (server->credits) {
38 case 0:
39 return 0;
40 case 1:
41 server->echoes = false;
42 server->oplocks = false;
43 break;
44 case 2:
45 server->echoes = true;
46 server->oplocks = false;
47 server->echo_credits = 1;
48 break;
49 default:
50 server->echoes = true;
51 if (enable_oplocks) {
52 server->oplocks = true;
53 server->oplock_credits = 1;
54 } else
55 server->oplocks = false;
56
57 server->echo_credits = 1;
58 }
59 server->credits -= server->echo_credits + server->oplock_credits;
60 return server->credits + server->echo_credits + server->oplock_credits;
61 }
62
63 static void
64 smb2_add_credits(struct TCP_Server_Info *server,
65 const struct cifs_credits *credits, const int optype)
66 {
67 int *val, rc = -1;
68 int scredits, in_flight;
69 unsigned int add = credits->value;
70 unsigned int instance = credits->instance;
71 bool reconnect_detected = false;
72 bool reconnect_with_invalid_credits = false;
73
74 spin_lock(&server->req_lock);
75 val = server->ops->get_credits_field(server, optype);
76
77 /* eg found case where write overlapping reconnect messed up credits */
78 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
79 reconnect_with_invalid_credits = true;
80
81 if ((instance == 0) || (instance == server->reconnect_instance))
82 *val += add;
83 else
84 reconnect_detected = true;
85
86 if (*val > 65000) {
87 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
88 pr_warn_once("server overflowed SMB3 credits\n");
89 trace_smb3_overflow_credits(server->CurrentMid,
90 server->conn_id, server->hostname, *val,
91 add, server->in_flight);
92 }
93 server->in_flight--;
94 if (server->in_flight == 0 &&
95 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
96 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
97 rc = change_conf(server);
98 /*
99 * Sometimes server returns 0 credits on oplock break ack - we need to
100 * rebalance credits in this case.
101 */
102 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
103 server->oplocks) {
104 if (server->credits > 1) {
105 server->credits--;
106 server->oplock_credits++;
107 }
108 }
109 scredits = *val;
110 in_flight = server->in_flight;
111 spin_unlock(&server->req_lock);
112 wake_up(&server->request_q);
113
114 if (reconnect_detected) {
115 trace_smb3_reconnect_detected(server->CurrentMid,
116 server->conn_id, server->hostname, scredits, add, in_flight);
117
118 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
119 add, instance);
120 }
121
122 if (reconnect_with_invalid_credits) {
123 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
124 server->conn_id, server->hostname, scredits, add, in_flight);
125 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
126 optype, scredits, add);
127 }
128
129 spin_lock(&cifs_tcp_ses_lock);
130 if (server->tcpStatus == CifsNeedReconnect
131 || server->tcpStatus == CifsExiting) {
132 spin_unlock(&cifs_tcp_ses_lock);
133 return;
134 }
135 spin_unlock(&cifs_tcp_ses_lock);
136
137 switch (rc) {
138 case -1:
139 /* change_conf hasn't been executed */
140 break;
141 case 0:
142 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
143 break;
144 case 1:
145 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
146 break;
147 case 2:
148 cifs_dbg(FYI, "disabling oplocks\n");
149 break;
150 default:
151 /* change_conf rebalanced credits for different types */
152 break;
153 }
154
155 trace_smb3_add_credits(server->CurrentMid,
156 server->conn_id, server->hostname, scredits, add, in_flight);
157 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
158 }
159
160 static void
161 smb2_set_credits(struct TCP_Server_Info *server, const int val)
162 {
163 int scredits, in_flight;
164
165 spin_lock(&server->req_lock);
166 server->credits = val;
167 if (val == 1)
168 server->reconnect_instance++;
169 scredits = server->credits;
170 in_flight = server->in_flight;
171 spin_unlock(&server->req_lock);
172
173 trace_smb3_set_credits(server->CurrentMid,
174 server->conn_id, server->hostname, scredits, val, in_flight);
175 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
176
177 /* don't log while holding the lock */
178 if (val == 1)
179 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
180 }
181
182 static int *
183 smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
184 {
185 switch (optype) {
186 case CIFS_ECHO_OP:
187 return &server->echo_credits;
188 case CIFS_OBREAK_OP:
189 return &server->oplock_credits;
190 default:
191 return &server->credits;
192 }
193 }
194
195 static unsigned int
196 smb2_get_credits(struct mid_q_entry *mid)
197 {
198 return mid->credits_received;
199 }
200
201 static int
202 smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
203 unsigned int *num, struct cifs_credits *credits)
204 {
205 int rc = 0;
206 unsigned int scredits, in_flight;
207
208 spin_lock(&server->req_lock);
209 while (1) {
210 if (server->credits <= 0) {
211 spin_unlock(&server->req_lock);
212 cifs_num_waiters_inc(server);
213 rc = wait_event_killable(server->request_q,
214 has_credits(server, &server->credits, 1));
215 cifs_num_waiters_dec(server);
216 if (rc)
217 return rc;
218 spin_lock(&server->req_lock);
219 } else {
220 spin_unlock(&server->req_lock);
221 spin_lock(&cifs_tcp_ses_lock);
222 if (server->tcpStatus == CifsExiting) {
223 spin_unlock(&cifs_tcp_ses_lock);
224 return -ENOENT;
225 }
226 spin_unlock(&cifs_tcp_ses_lock);
227
228 spin_lock(&server->req_lock);
229 scredits = server->credits;
230 /* can deadlock with reopen */
231 if (scredits <= 8) {
232 *num = SMB2_MAX_BUFFER_SIZE;
233 credits->value = 0;
234 credits->instance = 0;
235 break;
236 }
237
238 /* leave some credits for reopen and other ops */
239 scredits -= 8;
240 *num = min_t(unsigned int, size,
241 scredits * SMB2_MAX_BUFFER_SIZE);
242
243 credits->value =
244 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
245 credits->instance = server->reconnect_instance;
246 server->credits -= credits->value;
247 server->in_flight++;
248 if (server->in_flight > server->max_in_flight)
249 server->max_in_flight = server->in_flight;
250 break;
251 }
252 }
253 scredits = server->credits;
254 in_flight = server->in_flight;
255 spin_unlock(&server->req_lock);
256
257 trace_smb3_wait_credits(server->CurrentMid,
258 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
259 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
260 __func__, credits->value, scredits);
261
262 return rc;
263 }
264
265 static int
266 smb2_adjust_credits(struct TCP_Server_Info *server,
267 struct cifs_credits *credits,
268 const unsigned int payload_size)
269 {
270 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
271 int scredits, in_flight;
272
273 if (!credits->value || credits->value == new_val)
274 return 0;
275
276 if (credits->value < new_val) {
277 trace_smb3_too_many_credits(server->CurrentMid,
278 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
279 cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
280 credits->value, new_val);
281
282 return -ENOTSUPP;
283 }
284
285 spin_lock(&server->req_lock);
286
287 if (server->reconnect_instance != credits->instance) {
288 scredits = server->credits;
289 in_flight = server->in_flight;
290 spin_unlock(&server->req_lock);
291
292 trace_smb3_reconnect_detected(server->CurrentMid,
293 server->conn_id, server->hostname, scredits,
294 credits->value - new_val, in_flight);
295 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
296 credits->value - new_val);
297 return -EAGAIN;
298 }
299
300 server->credits += credits->value - new_val;
301 scredits = server->credits;
302 in_flight = server->in_flight;
303 spin_unlock(&server->req_lock);
304 wake_up(&server->request_q);
305
306 trace_smb3_adj_credits(server->CurrentMid,
307 server->conn_id, server->hostname, scredits,
308 credits->value - new_val, in_flight);
309 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
310 __func__, credits->value - new_val, scredits);
311
312 credits->value = new_val;
313
314 return 0;
315 }
316
317 static __u64
318 smb2_get_next_mid(struct TCP_Server_Info *server)
319 {
320 __u64 mid;
321 /* for SMB2 we need the current value */
322 spin_lock(&GlobalMid_Lock);
323 mid = server->CurrentMid++;
324 spin_unlock(&GlobalMid_Lock);
325 return mid;
326 }
327
328 static void
329 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
330 {
331 spin_lock(&GlobalMid_Lock);
332 if (server->CurrentMid >= val)
333 server->CurrentMid -= val;
334 spin_unlock(&GlobalMid_Lock);
335 }
336
337 static struct mid_q_entry *
338 __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
339 {
340 struct mid_q_entry *mid;
341 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
342 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
343
344 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
345 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
346 return NULL;
347 }
348
349 spin_lock(&GlobalMid_Lock);
350 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
351 if ((mid->mid == wire_mid) &&
352 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
353 (mid->command == shdr->Command)) {
354 kref_get(&mid->refcount);
355 if (dequeue) {
356 list_del_init(&mid->qhead);
357 mid->mid_flags |= MID_DELETED;
358 }
359 spin_unlock(&GlobalMid_Lock);
360 return mid;
361 }
362 }
363 spin_unlock(&GlobalMid_Lock);
364 return NULL;
365 }
366
367 static struct mid_q_entry *
368 smb2_find_mid(struct TCP_Server_Info *server, char *buf)
369 {
370 return __smb2_find_mid(server, buf, false);
371 }
372
373 static struct mid_q_entry *
374 smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
375 {
376 return __smb2_find_mid(server, buf, true);
377 }
378
379 static void
380 smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
381 {
382 #ifdef CONFIG_CIFS_DEBUG2
383 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
384
385 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
386 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
387 shdr->Id.SyncId.ProcessId);
388 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
389 server->ops->calc_smb_size(buf, server));
390 #endif
391 }
392
393 static bool
394 smb2_need_neg(struct TCP_Server_Info *server)
395 {
396 return server->max_read == 0;
397 }
398
399 static int
400 smb2_negotiate(const unsigned int xid,
401 struct cifs_ses *ses,
402 struct TCP_Server_Info *server)
403 {
404 int rc;
405
406 spin_lock(&GlobalMid_Lock);
407 server->CurrentMid = 0;
408 spin_unlock(&GlobalMid_Lock);
409 rc = SMB2_negotiate(xid, ses, server);
410 /* BB we probably don't need to retry with modern servers */
411 if (rc == -EAGAIN)
412 rc = -EHOSTDOWN;
413 return rc;
414 }
415
416 static unsigned int
417 smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
418 {
419 struct TCP_Server_Info *server = tcon->ses->server;
420 unsigned int wsize;
421
422 /* start with specified wsize, or default */
423 wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
424 wsize = min_t(unsigned int, wsize, server->max_write);
425 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
426 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
427
428 return wsize;
429 }
430
431 static unsigned int
432 smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
433 {
434 struct TCP_Server_Info *server = tcon->ses->server;
435 unsigned int wsize;
436
437 /* start with specified wsize, or default */
438 wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
439 wsize = min_t(unsigned int, wsize, server->max_write);
440 #ifdef CONFIG_CIFS_SMB_DIRECT
441 if (server->rdma) {
442 if (server->sign)
443 /*
444 * Account for SMB2 data transfer packet header and
445 * possible encryption header
446 */
447 wsize = min_t(unsigned int,
448 wsize,
449 server->smbd_conn->max_fragmented_send_size -
450 SMB2_READWRITE_PDU_HEADER_SIZE -
451 sizeof(struct smb2_transform_hdr));
452 else
453 wsize = min_t(unsigned int,
454 wsize, server->smbd_conn->max_readwrite_size);
455 }
456 #endif
457 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
458 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
459
460 return wsize;
461 }
462
463 static unsigned int
464 smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
465 {
466 struct TCP_Server_Info *server = tcon->ses->server;
467 unsigned int rsize;
468
469 /* start with specified rsize, or default */
470 rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
471 rsize = min_t(unsigned int, rsize, server->max_read);
472
473 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
474 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
475
476 return rsize;
477 }
478
479 static unsigned int
480 smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
481 {
482 struct TCP_Server_Info *server = tcon->ses->server;
483 unsigned int rsize;
484
485 /* start with specified rsize, or default */
486 rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
487 rsize = min_t(unsigned int, rsize, server->max_read);
488 #ifdef CONFIG_CIFS_SMB_DIRECT
489 if (server->rdma) {
490 if (server->sign)
491 /*
492 * Account for SMB2 data transfer packet header and
493 * possible encryption header
494 */
495 rsize = min_t(unsigned int,
496 rsize,
497 server->smbd_conn->max_fragmented_recv_size -
498 SMB2_READWRITE_PDU_HEADER_SIZE -
499 sizeof(struct smb2_transform_hdr));
500 else
501 rsize = min_t(unsigned int,
502 rsize, server->smbd_conn->max_readwrite_size);
503 }
504 #endif
505
506 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
507 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
508
509 return rsize;
510 }
511
512 static int
513 parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
514 size_t buf_len,
515 struct cifs_ses *ses)
516 {
517 struct network_interface_info_ioctl_rsp *p;
518 struct sockaddr_in *addr4;
519 struct sockaddr_in6 *addr6;
520 struct iface_info_ipv4 *p4;
521 struct iface_info_ipv6 *p6;
522 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
523 struct cifs_server_iface tmp_iface;
524 ssize_t bytes_left;
525 size_t next = 0;
526 int nb_iface = 0;
527 int rc = 0, ret = 0;
528
529 bytes_left = buf_len;
530 p = buf;
531
532 spin_lock(&ses->iface_lock);
533 /*
534 * Go through iface_list and do kref_put to remove
535 * any unused ifaces. ifaces in use will be removed
536 * when the last user calls a kref_put on it
537 */
538 list_for_each_entry_safe(iface, niface, &ses->iface_list,
539 iface_head) {
540 iface->is_active = 0;
541 kref_put(&iface->refcount, release_iface);
542 }
543 spin_unlock(&ses->iface_lock);
544
545 while (bytes_left >= sizeof(*p)) {
546 memset(&tmp_iface, 0, sizeof(tmp_iface));
547 tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
548 tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
549 tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
550
551 switch (p->Family) {
552 /*
553 * The kernel and wire socket structures have the same
554 * layout and use network byte order but make the
555 * conversion explicit in case either one changes.
556 */
557 case INTERNETWORK:
558 addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
559 p4 = (struct iface_info_ipv4 *)p->Buffer;
560 addr4->sin_family = AF_INET;
561 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
562
563 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
564 addr4->sin_port = cpu_to_be16(CIFS_PORT);
565
566 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
567 &addr4->sin_addr);
568 break;
569 case INTERNETWORKV6:
570 addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr;
571 p6 = (struct iface_info_ipv6 *)p->Buffer;
572 addr6->sin6_family = AF_INET6;
573 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
574
575 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
576 addr6->sin6_flowinfo = 0;
577 addr6->sin6_scope_id = 0;
578 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
579
580 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
581 &addr6->sin6_addr);
582 break;
583 default:
584 cifs_dbg(VFS,
585 "%s: skipping unsupported socket family\n",
586 __func__);
587 goto next_iface;
588 }
589
590 /*
591 * The iface_list is assumed to be sorted by speed.
592 * Check if the new interface exists in that list.
593 * NEVER change iface. it could be in use.
594 * Add a new one instead
595 */
596 spin_lock(&ses->iface_lock);
597 iface = niface = NULL;
598 list_for_each_entry_safe(iface, niface, &ses->iface_list,
599 iface_head) {
600 ret = iface_cmp(iface, &tmp_iface);
601 if (!ret) {
602 /* just get a ref so that it doesn't get picked/freed */
603 iface->is_active = 1;
604 kref_get(&iface->refcount);
605 spin_unlock(&ses->iface_lock);
606 goto next_iface;
607 } else if (ret < 0) {
608 /* all remaining ifaces are slower */
609 kref_get(&iface->refcount);
610 break;
611 }
612 }
613 spin_unlock(&ses->iface_lock);
614
615 /* no match. insert the entry in the list */
616 info = kmalloc(sizeof(struct cifs_server_iface),
617 GFP_KERNEL);
618 if (!info) {
619 rc = -ENOMEM;
620 goto out;
621 }
622 memcpy(info, &tmp_iface, sizeof(tmp_iface));
623
624 /* add this new entry to the list */
625 kref_init(&info->refcount);
626 info->is_active = 1;
627
628 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
629 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
630 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
631 le32_to_cpu(p->Capability));
632
633 spin_lock(&ses->iface_lock);
634 if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
635 list_add_tail(&info->iface_head, &iface->iface_head);
636 kref_put(&iface->refcount, release_iface);
637 } else
638 list_add_tail(&info->iface_head, &ses->iface_list);
639 spin_unlock(&ses->iface_lock);
640
641 ses->iface_count++;
642 ses->iface_last_update = jiffies;
643 next_iface:
644 nb_iface++;
645 next = le32_to_cpu(p->Next);
646 if (!next) {
647 bytes_left -= sizeof(*p);
648 break;
649 }
650 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
651 bytes_left -= next;
652 }
653
654 if (!nb_iface) {
655 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
656 rc = -EINVAL;
657 goto out;
658 }
659
660 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
661 if ((bytes_left > 8) || p->Next)
662 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
663
664
665 if (!ses->iface_count) {
666 rc = -EINVAL;
667 goto out;
668 }
669
670 out:
671 return rc;
672 }
673
674 int
675 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
676 {
677 int rc;
678 unsigned int ret_data_len = 0;
679 struct network_interface_info_ioctl_rsp *out_buf = NULL;
680 struct cifs_ses *ses = tcon->ses;
681
682 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
683 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
684 NULL /* no data input */, 0 /* no data input */,
685 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
686 if (rc == -EOPNOTSUPP) {
687 cifs_dbg(FYI,
688 "server does not support query network interfaces\n");
689 goto out;
690 } else if (rc != 0) {
691 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
692 goto out;
693 }
694
695 rc = parse_server_interfaces(out_buf, ret_data_len, ses);
696 if (rc)
697 goto out;
698
699 out:
700 kfree(out_buf);
701 return rc;
702 }
703
704 static void
705 smb2_close_cached_fid(struct kref *ref)
706 {
707 struct cached_fid *cfid = container_of(ref, struct cached_fid,
708 refcount);
709 struct cached_dirent *dirent, *q;
710
711 if (cfid->is_valid) {
712 cifs_dbg(FYI, "clear cached root file handle\n");
713 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
714 cfid->fid->volatile_fid);
715 }
716
717 /*
718 * We only check validity above to send SMB2_close,
719 * but we still need to invalidate these entries
720 * when this function is called
721 */
722 cfid->is_valid = false;
723 cfid->file_all_info_is_valid = false;
724 cfid->has_lease = false;
725 if (cfid->dentry) {
726 dput(cfid->dentry);
727 cfid->dentry = NULL;
728 }
729 /*
730 * Delete all cached dirent names
731 */
732 mutex_lock(&cfid->dirents.de_mutex);
733 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
734 list_del(&dirent->entry);
735 kfree(dirent->name);
736 kfree(dirent);
737 }
738 cfid->dirents.is_valid = 0;
739 cfid->dirents.is_failed = 0;
740 cfid->dirents.ctx = NULL;
741 cfid->dirents.pos = 0;
742 mutex_unlock(&cfid->dirents.de_mutex);
743
744 }
745
746 void close_cached_dir(struct cached_fid *cfid)
747 {
748 mutex_lock(&cfid->fid_mutex);
749 kref_put(&cfid->refcount, smb2_close_cached_fid);
750 mutex_unlock(&cfid->fid_mutex);
751 }
752
753 void close_cached_dir_lease_locked(struct cached_fid *cfid)
754 {
755 if (cfid->has_lease) {
756 cfid->has_lease = false;
757 kref_put(&cfid->refcount, smb2_close_cached_fid);
758 }
759 }
760
761 void close_cached_dir_lease(struct cached_fid *cfid)
762 {
763 mutex_lock(&cfid->fid_mutex);
764 close_cached_dir_lease_locked(cfid);
765 mutex_unlock(&cfid->fid_mutex);
766 }
767
768 void
769 smb2_cached_lease_break(struct work_struct *work)
770 {
771 struct cached_fid *cfid = container_of(work,
772 struct cached_fid, lease_break);
773
774 close_cached_dir_lease(cfid);
775 }
776
777 /*
778 * Open the and cache a directory handle.
779 * Only supported for the root handle.
780 * If error then *cfid is not initialized.
781 */
782 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
783 const char *path,
784 struct cifs_sb_info *cifs_sb,
785 struct cached_fid **cfid)
786 {
787 struct cifs_ses *ses;
788 struct TCP_Server_Info *server;
789 struct cifs_open_parms oparms;
790 struct smb2_create_rsp *o_rsp = NULL;
791 struct smb2_query_info_rsp *qi_rsp = NULL;
792 int resp_buftype[2];
793 struct smb_rqst rqst[2];
794 struct kvec rsp_iov[2];
795 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
796 struct kvec qi_iov[1];
797 int rc, flags = 0;
798 __le16 utf16_path = 0; /* Null - since an open of top of share */
799 u8 oplock = SMB2_OPLOCK_LEVEL_II;
800 struct cifs_fid *pfid;
801 struct dentry *dentry;
802
803 if (tcon == NULL || tcon->nohandlecache ||
804 is_smb1_server(tcon->ses->server))
805 return -ENOTSUPP;
806
807 ses = tcon->ses;
808 server = ses->server;
809
810 if (cifs_sb->root == NULL)
811 return -ENOENT;
812
813 if (strlen(path))
814 return -ENOENT;
815
816 dentry = cifs_sb->root;
817
818 mutex_lock(&tcon->crfid.fid_mutex);
819 if (tcon->crfid.is_valid) {
820 cifs_dbg(FYI, "found a cached root file handle\n");
821 *cfid = &tcon->crfid;
822 kref_get(&tcon->crfid.refcount);
823 mutex_unlock(&tcon->crfid.fid_mutex);
824 return 0;
825 }
826
827 /*
828 * We do not hold the lock for the open because in case
829 * SMB2_open needs to reconnect, it will end up calling
830 * cifs_mark_open_files_invalid() which takes the lock again
831 * thus causing a deadlock
832 */
833
834 mutex_unlock(&tcon->crfid.fid_mutex);
835
836 if (smb3_encryption_required(tcon))
837 flags |= CIFS_TRANSFORM_REQ;
838
839 if (!server->ops->new_lease_key)
840 return -EIO;
841
842 pfid = tcon->crfid.fid;
843 server->ops->new_lease_key(pfid);
844
845 memset(rqst, 0, sizeof(rqst));
846 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
847 memset(rsp_iov, 0, sizeof(rsp_iov));
848
849 /* Open */
850 memset(&open_iov, 0, sizeof(open_iov));
851 rqst[0].rq_iov = open_iov;
852 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
853
854 oparms.tcon = tcon;
855 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
856 oparms.desired_access = FILE_READ_ATTRIBUTES;
857 oparms.disposition = FILE_OPEN;
858 oparms.fid = pfid;
859 oparms.reconnect = false;
860
861 rc = SMB2_open_init(tcon, server,
862 &rqst[0], &oplock, &oparms, &utf16_path);
863 if (rc)
864 goto oshr_free;
865 smb2_set_next_command(tcon, &rqst[0]);
866
867 memset(&qi_iov, 0, sizeof(qi_iov));
868 rqst[1].rq_iov = qi_iov;
869 rqst[1].rq_nvec = 1;
870
871 rc = SMB2_query_info_init(tcon, server,
872 &rqst[1], COMPOUND_FID,
873 COMPOUND_FID, FILE_ALL_INFORMATION,
874 SMB2_O_INFO_FILE, 0,
875 sizeof(struct smb2_file_all_info) +
876 PATH_MAX * 2, 0, NULL);
877 if (rc)
878 goto oshr_free;
879
880 smb2_set_related(&rqst[1]);
881
882 rc = compound_send_recv(xid, ses, server,
883 flags, 2, rqst,
884 resp_buftype, rsp_iov);
885 mutex_lock(&tcon->crfid.fid_mutex);
886
887 /*
888 * Now we need to check again as the cached root might have
889 * been successfully re-opened from a concurrent process
890 */
891
892 if (tcon->crfid.is_valid) {
893 /* work was already done */
894
895 /* stash fids for close() later */
896 struct cifs_fid fid = {
897 .persistent_fid = pfid->persistent_fid,
898 .volatile_fid = pfid->volatile_fid,
899 };
900
901 /*
902 * caller expects this func to set the fid in crfid to valid
903 * cached root, so increment the refcount.
904 */
905 kref_get(&tcon->crfid.refcount);
906
907 mutex_unlock(&tcon->crfid.fid_mutex);
908
909 if (rc == 0) {
910 /* close extra handle outside of crit sec */
911 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
912 }
913 rc = 0;
914 goto oshr_free;
915 }
916
917 /* Cached root is still invalid, continue normaly */
918
919 if (rc) {
920 if (rc == -EREMCHG) {
921 tcon->need_reconnect = true;
922 pr_warn_once("server share %s deleted\n",
923 tcon->treeName);
924 }
925 goto oshr_exit;
926 }
927
928 atomic_inc(&tcon->num_remote_opens);
929
930 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
931 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
932 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
933 #ifdef CONFIG_CIFS_DEBUG2
934 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
935 #endif /* CIFS_DEBUG2 */
936
937 tcon->crfid.tcon = tcon;
938 tcon->crfid.is_valid = true;
939 tcon->crfid.dentry = dentry;
940 dget(dentry);
941 kref_init(&tcon->crfid.refcount);
942
943 /* BB TBD check to see if oplock level check can be removed below */
944 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
945 /*
946 * See commit 2f94a3125b87. Increment the refcount when we
947 * get a lease for root, release it if lease break occurs
948 */
949 kref_get(&tcon->crfid.refcount);
950 tcon->crfid.has_lease = true;
951 smb2_parse_contexts(server, o_rsp,
952 &oparms.fid->epoch,
953 oparms.fid->lease_key, &oplock,
954 NULL, NULL);
955 } else
956 goto oshr_exit;
957
958 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
959 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
960 goto oshr_exit;
961 if (!smb2_validate_and_copy_iov(
962 le16_to_cpu(qi_rsp->OutputBufferOffset),
963 sizeof(struct smb2_file_all_info),
964 &rsp_iov[1], sizeof(struct smb2_file_all_info),
965 (char *)&tcon->crfid.file_all_info))
966 tcon->crfid.file_all_info_is_valid = true;
967 tcon->crfid.time = jiffies;
968
969
970 oshr_exit:
971 mutex_unlock(&tcon->crfid.fid_mutex);
972 oshr_free:
973 SMB2_open_free(&rqst[0]);
974 SMB2_query_info_free(&rqst[1]);
975 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
976 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
977 if (rc == 0)
978 *cfid = &tcon->crfid;
979 return rc;
980 }
981
982 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
983 struct dentry *dentry,
984 struct cached_fid **cfid)
985 {
986 mutex_lock(&tcon->crfid.fid_mutex);
987 if (tcon->crfid.dentry == dentry) {
988 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
989 *cfid = &tcon->crfid;
990 kref_get(&tcon->crfid.refcount);
991 mutex_unlock(&tcon->crfid.fid_mutex);
992 return 0;
993 }
994 mutex_unlock(&tcon->crfid.fid_mutex);
995 return -ENOENT;
996 }
997
998 static void
999 smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
1000 struct cifs_sb_info *cifs_sb)
1001 {
1002 int rc;
1003 __le16 srch_path = 0; /* Null - open root of share */
1004 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1005 struct cifs_open_parms oparms;
1006 struct cifs_fid fid;
1007 struct cached_fid *cfid = NULL;
1008
1009 oparms.tcon = tcon;
1010 oparms.desired_access = FILE_READ_ATTRIBUTES;
1011 oparms.disposition = FILE_OPEN;
1012 oparms.create_options = cifs_create_options(cifs_sb, 0);
1013 oparms.fid = &fid;
1014 oparms.reconnect = false;
1015
1016 rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid);
1017 if (rc == 0)
1018 memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
1019 else
1020 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
1021 NULL, NULL);
1022 if (rc)
1023 return;
1024
1025 SMB3_request_interfaces(xid, tcon);
1026
1027 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1028 FS_ATTRIBUTE_INFORMATION);
1029 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1030 FS_DEVICE_INFORMATION);
1031 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1032 FS_VOLUME_INFORMATION);
1033 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1034 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
1035 if (cfid == NULL)
1036 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
1037 else
1038 close_cached_dir(cfid);
1039 }
1040
1041 static void
1042 smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
1043 struct cifs_sb_info *cifs_sb)
1044 {
1045 int rc;
1046 __le16 srch_path = 0; /* Null - open root of share */
1047 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1048 struct cifs_open_parms oparms;
1049 struct cifs_fid fid;
1050
1051 oparms.tcon = tcon;
1052 oparms.desired_access = FILE_READ_ATTRIBUTES;
1053 oparms.disposition = FILE_OPEN;
1054 oparms.create_options = cifs_create_options(cifs_sb, 0);
1055 oparms.fid = &fid;
1056 oparms.reconnect = false;
1057
1058 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
1059 NULL, NULL);
1060 if (rc)
1061 return;
1062
1063 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1064 FS_ATTRIBUTE_INFORMATION);
1065 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
1066 FS_DEVICE_INFORMATION);
1067 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
1068 }
1069
1070 static int
1071 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
1072 struct cifs_sb_info *cifs_sb, const char *full_path)
1073 {
1074 int rc;
1075 __le16 *utf16_path;
1076 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1077 struct cifs_open_parms oparms;
1078 struct cifs_fid fid;
1079
1080 if ((*full_path == 0) && tcon->crfid.is_valid)
1081 return 0;
1082
1083 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
1084 if (!utf16_path)
1085 return -ENOMEM;
1086
1087 oparms.tcon = tcon;
1088 oparms.desired_access = FILE_READ_ATTRIBUTES;
1089 oparms.disposition = FILE_OPEN;
1090 oparms.create_options = cifs_create_options(cifs_sb, 0);
1091 oparms.fid = &fid;
1092 oparms.reconnect = false;
1093
1094 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
1095 NULL);
1096 if (rc) {
1097 kfree(utf16_path);
1098 return rc;
1099 }
1100
1101 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
1102 kfree(utf16_path);
1103 return rc;
1104 }
1105
1106 static int
1107 smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1108 struct cifs_sb_info *cifs_sb, const char *full_path,
1109 u64 *uniqueid, FILE_ALL_INFO *data)
1110 {
1111 *uniqueid = le64_to_cpu(data->IndexNumber);
1112 return 0;
1113 }
1114
1115 static int
1116 smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1117 struct cifs_fid *fid, FILE_ALL_INFO *data)
1118 {
1119 int rc;
1120 struct smb2_file_all_info *smb2_data;
1121
1122 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
1123 GFP_KERNEL);
1124 if (smb2_data == NULL)
1125 return -ENOMEM;
1126
1127 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
1128 smb2_data);
1129 if (!rc)
1130 move_smb2_info_to_cifs(data, smb2_data);
1131 kfree(smb2_data);
1132 return rc;
1133 }
1134
1135 #ifdef CONFIG_CIFS_XATTR
1136 static ssize_t
1137 move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1138 struct smb2_file_full_ea_info *src, size_t src_size,
1139 const unsigned char *ea_name)
1140 {
1141 int rc = 0;
1142 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1143 char *name, *value;
1144 size_t buf_size = dst_size;
1145 size_t name_len, value_len, user_name_len;
1146
1147 while (src_size > 0) {
1148 name = &src->ea_data[0];
1149 name_len = (size_t)src->ea_name_length;
1150 value = &src->ea_data[src->ea_name_length + 1];
1151 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1152
1153 if (name_len == 0)
1154 break;
1155
1156 if (src_size < 8 + name_len + 1 + value_len) {
1157 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1158 rc = -EIO;
1159 goto out;
1160 }
1161
1162 if (ea_name) {
1163 if (ea_name_len == name_len &&
1164 memcmp(ea_name, name, name_len) == 0) {
1165 rc = value_len;
1166 if (dst_size == 0)
1167 goto out;
1168 if (dst_size < value_len) {
1169 rc = -ERANGE;
1170 goto out;
1171 }
1172 memcpy(dst, value, value_len);
1173 goto out;
1174 }
1175 } else {
1176 /* 'user.' plus a terminating null */
1177 user_name_len = 5 + 1 + name_len;
1178
1179 if (buf_size == 0) {
1180 /* skip copy - calc size only */
1181 rc += user_name_len;
1182 } else if (dst_size >= user_name_len) {
1183 dst_size -= user_name_len;
1184 memcpy(dst, "user.", 5);
1185 dst += 5;
1186 memcpy(dst, src->ea_data, name_len);
1187 dst += name_len;
1188 *dst = 0;
1189 ++dst;
1190 rc += user_name_len;
1191 } else {
1192 /* stop before overrun buffer */
1193 rc = -ERANGE;
1194 break;
1195 }
1196 }
1197
1198 if (!src->next_entry_offset)
1199 break;
1200
1201 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1202 /* stop before overrun buffer */
1203 rc = -ERANGE;
1204 break;
1205 }
1206 src_size -= le32_to_cpu(src->next_entry_offset);
1207 src = (void *)((char *)src +
1208 le32_to_cpu(src->next_entry_offset));
1209 }
1210
1211 /* didn't find the named attribute */
1212 if (ea_name)
1213 rc = -ENODATA;
1214
1215 out:
1216 return (ssize_t)rc;
1217 }
1218
1219 static ssize_t
1220 smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1221 const unsigned char *path, const unsigned char *ea_name,
1222 char *ea_data, size_t buf_size,
1223 struct cifs_sb_info *cifs_sb)
1224 {
1225 int rc;
1226 struct kvec rsp_iov = {NULL, 0};
1227 int buftype = CIFS_NO_BUFFER;
1228 struct smb2_query_info_rsp *rsp;
1229 struct smb2_file_full_ea_info *info = NULL;
1230
1231 rc = smb2_query_info_compound(xid, tcon, path,
1232 FILE_READ_EA,
1233 FILE_FULL_EA_INFORMATION,
1234 SMB2_O_INFO_FILE,
1235 CIFSMaxBufSize -
1236 MAX_SMB2_CREATE_RESPONSE_SIZE -
1237 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1238 &rsp_iov, &buftype, cifs_sb);
1239 if (rc) {
1240 /*
1241 * If ea_name is NULL (listxattr) and there are no EAs,
1242 * return 0 as it's not an error. Otherwise, the specified
1243 * ea_name was not found.
1244 */
1245 if (!ea_name && rc == -ENODATA)
1246 rc = 0;
1247 goto qeas_exit;
1248 }
1249
1250 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1251 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1252 le32_to_cpu(rsp->OutputBufferLength),
1253 &rsp_iov,
1254 sizeof(struct smb2_file_full_ea_info));
1255 if (rc)
1256 goto qeas_exit;
1257
1258 info = (struct smb2_file_full_ea_info *)(
1259 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1260 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1261 le32_to_cpu(rsp->OutputBufferLength), ea_name);
1262
1263 qeas_exit:
1264 free_rsp_buf(buftype, rsp_iov.iov_base);
1265 return rc;
1266 }
1267
1268
1269 static int
1270 smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1271 const char *path, const char *ea_name, const void *ea_value,
1272 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1273 struct cifs_sb_info *cifs_sb)
1274 {
1275 struct cifs_ses *ses = tcon->ses;
1276 struct TCP_Server_Info *server = cifs_pick_channel(ses);
1277 __le16 *utf16_path = NULL;
1278 int ea_name_len = strlen(ea_name);
1279 int flags = CIFS_CP_CREATE_CLOSE_OP;
1280 int len;
1281 struct smb_rqst rqst[3];
1282 int resp_buftype[3];
1283 struct kvec rsp_iov[3];
1284 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1285 struct cifs_open_parms oparms;
1286 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1287 struct cifs_fid fid;
1288 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1289 unsigned int size[1];
1290 void *data[1];
1291 struct smb2_file_full_ea_info *ea = NULL;
1292 struct kvec close_iov[1];
1293 struct smb2_query_info_rsp *rsp;
1294 int rc, used_len = 0;
1295
1296 if (smb3_encryption_required(tcon))
1297 flags |= CIFS_TRANSFORM_REQ;
1298
1299 if (ea_name_len > 255)
1300 return -EINVAL;
1301
1302 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1303 if (!utf16_path)
1304 return -ENOMEM;
1305
1306 memset(rqst, 0, sizeof(rqst));
1307 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1308 memset(rsp_iov, 0, sizeof(rsp_iov));
1309
1310 if (ses->server->ops->query_all_EAs) {
1311 if (!ea_value) {
1312 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1313 ea_name, NULL, 0,
1314 cifs_sb);
1315 if (rc == -ENODATA)
1316 goto sea_exit;
1317 } else {
1318 /* If we are adding a attribute we should first check
1319 * if there will be enough space available to store
1320 * the new EA. If not we should not add it since we
1321 * would not be able to even read the EAs back.
1322 */
1323 rc = smb2_query_info_compound(xid, tcon, path,
1324 FILE_READ_EA,
1325 FILE_FULL_EA_INFORMATION,
1326 SMB2_O_INFO_FILE,
1327 CIFSMaxBufSize -
1328 MAX_SMB2_CREATE_RESPONSE_SIZE -
1329 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1330 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1331 if (rc == 0) {
1332 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1333 used_len = le32_to_cpu(rsp->OutputBufferLength);
1334 }
1335 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1336 resp_buftype[1] = CIFS_NO_BUFFER;
1337 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1338 rc = 0;
1339
1340 /* Use a fudge factor of 256 bytes in case we collide
1341 * with a different set_EAs command.
1342 */
1343 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1344 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1345 used_len + ea_name_len + ea_value_len + 1) {
1346 rc = -ENOSPC;
1347 goto sea_exit;
1348 }
1349 }
1350 }
1351
1352 /* Open */
1353 memset(&open_iov, 0, sizeof(open_iov));
1354 rqst[0].rq_iov = open_iov;
1355 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1356
1357 memset(&oparms, 0, sizeof(oparms));
1358 oparms.tcon = tcon;
1359 oparms.desired_access = FILE_WRITE_EA;
1360 oparms.disposition = FILE_OPEN;
1361 oparms.create_options = cifs_create_options(cifs_sb, 0);
1362 oparms.fid = &fid;
1363 oparms.reconnect = false;
1364
1365 rc = SMB2_open_init(tcon, server,
1366 &rqst[0], &oplock, &oparms, utf16_path);
1367 if (rc)
1368 goto sea_exit;
1369 smb2_set_next_command(tcon, &rqst[0]);
1370
1371
1372 /* Set Info */
1373 memset(&si_iov, 0, sizeof(si_iov));
1374 rqst[1].rq_iov = si_iov;
1375 rqst[1].rq_nvec = 1;
1376
1377 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
1378 ea = kzalloc(len, GFP_KERNEL);
1379 if (ea == NULL) {
1380 rc = -ENOMEM;
1381 goto sea_exit;
1382 }
1383
1384 ea->ea_name_length = ea_name_len;
1385 ea->ea_value_length = cpu_to_le16(ea_value_len);
1386 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1387 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1388
1389 size[0] = len;
1390 data[0] = ea;
1391
1392 rc = SMB2_set_info_init(tcon, server,
1393 &rqst[1], COMPOUND_FID,
1394 COMPOUND_FID, current->tgid,
1395 FILE_FULL_EA_INFORMATION,
1396 SMB2_O_INFO_FILE, 0, data, size);
1397 smb2_set_next_command(tcon, &rqst[1]);
1398 smb2_set_related(&rqst[1]);
1399
1400
1401 /* Close */
1402 memset(&close_iov, 0, sizeof(close_iov));
1403 rqst[2].rq_iov = close_iov;
1404 rqst[2].rq_nvec = 1;
1405 rc = SMB2_close_init(tcon, server,
1406 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1407 smb2_set_related(&rqst[2]);
1408
1409 rc = compound_send_recv(xid, ses, server,
1410 flags, 3, rqst,
1411 resp_buftype, rsp_iov);
1412 /* no need to bump num_remote_opens because handle immediately closed */
1413
1414 sea_exit:
1415 kfree(ea);
1416 kfree(utf16_path);
1417 SMB2_open_free(&rqst[0]);
1418 SMB2_set_info_free(&rqst[1]);
1419 SMB2_close_free(&rqst[2]);
1420 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1421 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1422 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1423 return rc;
1424 }
1425 #endif
1426
1427 static bool
1428 smb2_can_echo(struct TCP_Server_Info *server)
1429 {
1430 return server->echoes;
1431 }
1432
1433 static void
1434 smb2_clear_stats(struct cifs_tcon *tcon)
1435 {
1436 int i;
1437
1438 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1439 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1440 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1441 }
1442 }
1443
1444 static void
1445 smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1446 {
1447 seq_puts(m, "\n\tShare Capabilities:");
1448 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1449 seq_puts(m, " DFS,");
1450 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1451 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1452 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1453 seq_puts(m, " SCALEOUT,");
1454 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1455 seq_puts(m, " CLUSTER,");
1456 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1457 seq_puts(m, " ASYMMETRIC,");
1458 if (tcon->capabilities == 0)
1459 seq_puts(m, " None");
1460 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1461 seq_puts(m, " Aligned,");
1462 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1463 seq_puts(m, " Partition Aligned,");
1464 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1465 seq_puts(m, " SSD,");
1466 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1467 seq_puts(m, " TRIM-support,");
1468
1469 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
1470 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
1471 if (tcon->perf_sector_size)
1472 seq_printf(m, "\tOptimal sector size: 0x%x",
1473 tcon->perf_sector_size);
1474 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
1475 }
1476
1477 static void
1478 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1479 {
1480 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1481 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
1482
1483 /*
1484 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1485 * totals (requests sent) since those SMBs are per-session not per tcon
1486 */
1487 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1488 (long long)(tcon->bytes_read),
1489 (long long)(tcon->bytes_written));
1490 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1491 atomic_read(&tcon->num_local_opens),
1492 atomic_read(&tcon->num_remote_opens));
1493 seq_printf(m, "\nTreeConnects: %d total %d failed",
1494 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1495 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
1496 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
1497 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1498 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
1499 seq_printf(m, "\nCreates: %d total %d failed",
1500 atomic_read(&sent[SMB2_CREATE_HE]),
1501 atomic_read(&failed[SMB2_CREATE_HE]));
1502 seq_printf(m, "\nCloses: %d total %d failed",
1503 atomic_read(&sent[SMB2_CLOSE_HE]),
1504 atomic_read(&failed[SMB2_CLOSE_HE]));
1505 seq_printf(m, "\nFlushes: %d total %d failed",
1506 atomic_read(&sent[SMB2_FLUSH_HE]),
1507 atomic_read(&failed[SMB2_FLUSH_HE]));
1508 seq_printf(m, "\nReads: %d total %d failed",
1509 atomic_read(&sent[SMB2_READ_HE]),
1510 atomic_read(&failed[SMB2_READ_HE]));
1511 seq_printf(m, "\nWrites: %d total %d failed",
1512 atomic_read(&sent[SMB2_WRITE_HE]),
1513 atomic_read(&failed[SMB2_WRITE_HE]));
1514 seq_printf(m, "\nLocks: %d total %d failed",
1515 atomic_read(&sent[SMB2_LOCK_HE]),
1516 atomic_read(&failed[SMB2_LOCK_HE]));
1517 seq_printf(m, "\nIOCTLs: %d total %d failed",
1518 atomic_read(&sent[SMB2_IOCTL_HE]),
1519 atomic_read(&failed[SMB2_IOCTL_HE]));
1520 seq_printf(m, "\nQueryDirectories: %d total %d failed",
1521 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1522 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
1523 seq_printf(m, "\nChangeNotifies: %d total %d failed",
1524 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1525 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
1526 seq_printf(m, "\nQueryInfos: %d total %d failed",
1527 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1528 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
1529 seq_printf(m, "\nSetInfos: %d total %d failed",
1530 atomic_read(&sent[SMB2_SET_INFO_HE]),
1531 atomic_read(&failed[SMB2_SET_INFO_HE]));
1532 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1533 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1534 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
1535 }
1536
1537 static void
1538 smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1539 {
1540 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1541 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1542
1543 cfile->fid.persistent_fid = fid->persistent_fid;
1544 cfile->fid.volatile_fid = fid->volatile_fid;
1545 cfile->fid.access = fid->access;
1546 #ifdef CONFIG_CIFS_DEBUG2
1547 cfile->fid.mid = fid->mid;
1548 #endif /* CIFS_DEBUG2 */
1549 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1550 &fid->purge_cache);
1551 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
1552 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
1553 }
1554
1555 static void
1556 smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1557 struct cifs_fid *fid)
1558 {
1559 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1560 }
1561
1562 static void
1563 smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1564 struct cifsFileInfo *cfile)
1565 {
1566 struct smb2_file_network_open_info file_inf;
1567 struct inode *inode;
1568 int rc;
1569
1570 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1571 cfile->fid.volatile_fid, &file_inf);
1572 if (rc)
1573 return;
1574
1575 inode = d_inode(cfile->dentry);
1576
1577 spin_lock(&inode->i_lock);
1578 CIFS_I(inode)->time = jiffies;
1579
1580 /* Creation time should not need to be updated on close */
1581 if (file_inf.LastWriteTime)
1582 inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
1583 if (file_inf.ChangeTime)
1584 inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
1585 if (file_inf.LastAccessTime)
1586 inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
1587
1588 /*
1589 * i_blocks is not related to (i_size / i_blksize),
1590 * but instead 512 byte (2**9) size is required for
1591 * calculating num blocks.
1592 */
1593 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1594 inode->i_blocks =
1595 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1596
1597 /* End of file and Attributes should not have to be updated on close */
1598 spin_unlock(&inode->i_lock);
1599 }
1600
1601 static int
1602 SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1603 u64 persistent_fid, u64 volatile_fid,
1604 struct copychunk_ioctl *pcchunk)
1605 {
1606 int rc;
1607 unsigned int ret_data_len;
1608 struct resume_key_req *res_key;
1609
1610 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1611 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
1612 NULL, 0 /* no input */, CIFSMaxBufSize,
1613 (char **)&res_key, &ret_data_len);
1614
1615 if (rc == -EOPNOTSUPP) {
1616 pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
1617 goto req_res_key_exit;
1618 } else if (rc) {
1619 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1620 goto req_res_key_exit;
1621 }
1622 if (ret_data_len < sizeof(struct resume_key_req)) {
1623 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
1624 rc = -EINVAL;
1625 goto req_res_key_exit;
1626 }
1627 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1628
1629 req_res_key_exit:
1630 kfree(res_key);
1631 return rc;
1632 }
1633
1634 struct iqi_vars {
1635 struct smb_rqst rqst[3];
1636 struct kvec rsp_iov[3];
1637 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1638 struct kvec qi_iov[1];
1639 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1640 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1641 struct kvec close_iov[1];
1642 };
1643
1644 static int
1645 smb2_ioctl_query_info(const unsigned int xid,
1646 struct cifs_tcon *tcon,
1647 struct cifs_sb_info *cifs_sb,
1648 __le16 *path, int is_dir,
1649 unsigned long p)
1650 {
1651 struct iqi_vars *vars;
1652 struct smb_rqst *rqst;
1653 struct kvec *rsp_iov;
1654 struct cifs_ses *ses = tcon->ses;
1655 struct TCP_Server_Info *server = cifs_pick_channel(ses);
1656 char __user *arg = (char __user *)p;
1657 struct smb_query_info qi;
1658 struct smb_query_info __user *pqi;
1659 int rc = 0;
1660 int flags = CIFS_CP_CREATE_CLOSE_OP;
1661 struct smb2_query_info_rsp *qi_rsp = NULL;
1662 struct smb2_ioctl_rsp *io_rsp = NULL;
1663 void *buffer = NULL;
1664 int resp_buftype[3];
1665 struct cifs_open_parms oparms;
1666 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1667 struct cifs_fid fid;
1668 unsigned int size[2];
1669 void *data[2];
1670 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
1671 void (*free_req1_func)(struct smb_rqst *r);
1672
1673 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1674 if (vars == NULL)
1675 return -ENOMEM;
1676 rqst = &vars->rqst[0];
1677 rsp_iov = &vars->rsp_iov[0];
1678
1679 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1680
1681 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
1682 rc = -EFAULT;
1683 goto free_vars;
1684 }
1685 if (qi.output_buffer_length > 1024) {
1686 rc = -EINVAL;
1687 goto free_vars;
1688 }
1689
1690 if (!ses || !server) {
1691 rc = -EIO;
1692 goto free_vars;
1693 }
1694
1695 if (smb3_encryption_required(tcon))
1696 flags |= CIFS_TRANSFORM_REQ;
1697
1698 if (qi.output_buffer_length) {
1699 buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
1700 if (IS_ERR(buffer)) {
1701 rc = PTR_ERR(buffer);
1702 goto free_vars;
1703 }
1704 }
1705
1706 /* Open */
1707 rqst[0].rq_iov = &vars->open_iov[0];
1708 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1709
1710 memset(&oparms, 0, sizeof(oparms));
1711 oparms.tcon = tcon;
1712 oparms.disposition = FILE_OPEN;
1713 oparms.create_options = cifs_create_options(cifs_sb, create_options);
1714 oparms.fid = &fid;
1715 oparms.reconnect = false;
1716
1717 if (qi.flags & PASSTHRU_FSCTL) {
1718 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1719 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1720 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1721 break;
1722 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1723 oparms.desired_access = GENERIC_ALL;
1724 break;
1725 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1726 oparms.desired_access = GENERIC_READ;
1727 break;
1728 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1729 oparms.desired_access = GENERIC_WRITE;
1730 break;
1731 }
1732 } else if (qi.flags & PASSTHRU_SET_INFO) {
1733 oparms.desired_access = GENERIC_WRITE;
1734 } else {
1735 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1736 }
1737
1738 rc = SMB2_open_init(tcon, server,
1739 &rqst[0], &oplock, &oparms, path);
1740 if (rc)
1741 goto free_output_buffer;
1742 smb2_set_next_command(tcon, &rqst[0]);
1743
1744 /* Query */
1745 if (qi.flags & PASSTHRU_FSCTL) {
1746 /* Can eventually relax perm check since server enforces too */
1747 if (!capable(CAP_SYS_ADMIN)) {
1748 rc = -EPERM;
1749 goto free_open_req;
1750 }
1751 rqst[1].rq_iov = &vars->io_iov[0];
1752 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1753
1754 rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1755 qi.info_type, true, buffer, qi.output_buffer_length,
1756 CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1757 MAX_SMB2_CLOSE_RESPONSE_SIZE);
1758 free_req1_func = SMB2_ioctl_free;
1759 } else if (qi.flags == PASSTHRU_SET_INFO) {
1760 /* Can eventually relax perm check since server enforces too */
1761 if (!capable(CAP_SYS_ADMIN)) {
1762 rc = -EPERM;
1763 goto free_open_req;
1764 }
1765 if (qi.output_buffer_length < 8) {
1766 rc = -EINVAL;
1767 goto free_open_req;
1768 }
1769 rqst[1].rq_iov = &vars->si_iov[0];
1770 rqst[1].rq_nvec = 1;
1771
1772 /* MS-FSCC 2.4.13 FileEndOfFileInformation */
1773 size[0] = 8;
1774 data[0] = buffer;
1775
1776 rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1777 current->tgid, FILE_END_OF_FILE_INFORMATION,
1778 SMB2_O_INFO_FILE, 0, data, size);
1779 free_req1_func = SMB2_set_info_free;
1780 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1781 rqst[1].rq_iov = &vars->qi_iov[0];
1782 rqst[1].rq_nvec = 1;
1783
1784 rc = SMB2_query_info_init(tcon, server,
1785 &rqst[1], COMPOUND_FID,
1786 COMPOUND_FID, qi.file_info_class,
1787 qi.info_type, qi.additional_information,
1788 qi.input_buffer_length,
1789 qi.output_buffer_length, buffer);
1790 free_req1_func = SMB2_query_info_free;
1791 } else { /* unknown flags */
1792 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1793 qi.flags);
1794 rc = -EINVAL;
1795 }
1796
1797 if (rc)
1798 goto free_open_req;
1799 smb2_set_next_command(tcon, &rqst[1]);
1800 smb2_set_related(&rqst[1]);
1801
1802 /* Close */
1803 rqst[2].rq_iov = &vars->close_iov[0];
1804 rqst[2].rq_nvec = 1;
1805
1806 rc = SMB2_close_init(tcon, server,
1807 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1808 if (rc)
1809 goto free_req_1;
1810 smb2_set_related(&rqst[2]);
1811
1812 rc = compound_send_recv(xid, ses, server,
1813 flags, 3, rqst,
1814 resp_buftype, rsp_iov);
1815 if (rc)
1816 goto out;
1817
1818 /* No need to bump num_remote_opens since handle immediately closed */
1819 if (qi.flags & PASSTHRU_FSCTL) {
1820 pqi = (struct smb_query_info __user *)arg;
1821 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1822 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1823 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1824 if (qi.input_buffer_length > 0 &&
1825 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1826 > rsp_iov[1].iov_len) {
1827 rc = -EFAULT;
1828 goto out;
1829 }
1830
1831 if (copy_to_user(&pqi->input_buffer_length,
1832 &qi.input_buffer_length,
1833 sizeof(qi.input_buffer_length))) {
1834 rc = -EFAULT;
1835 goto out;
1836 }
1837
1838 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1839 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1840 qi.input_buffer_length))
1841 rc = -EFAULT;
1842 } else {
1843 pqi = (struct smb_query_info __user *)arg;
1844 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1845 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1846 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1847 if (copy_to_user(&pqi->input_buffer_length,
1848 &qi.input_buffer_length,
1849 sizeof(qi.input_buffer_length))) {
1850 rc = -EFAULT;
1851 goto out;
1852 }
1853
1854 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1855 qi.input_buffer_length))
1856 rc = -EFAULT;
1857 }
1858
1859 out:
1860 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1861 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1862 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1863 SMB2_close_free(&rqst[2]);
1864 free_req_1:
1865 free_req1_func(&rqst[1]);
1866 free_open_req:
1867 SMB2_open_free(&rqst[0]);
1868 free_output_buffer:
1869 kfree(buffer);
1870 free_vars:
1871 kfree(vars);
1872 return rc;
1873 }
1874
1875 static ssize_t
1876 smb2_copychunk_range(const unsigned int xid,
1877 struct cifsFileInfo *srcfile,
1878 struct cifsFileInfo *trgtfile, u64 src_off,
1879 u64 len, u64 dest_off)
1880 {
1881 int rc;
1882 unsigned int ret_data_len;
1883 struct copychunk_ioctl *pcchunk;
1884 struct copychunk_ioctl_rsp *retbuf = NULL;
1885 struct cifs_tcon *tcon;
1886 int chunks_copied = 0;
1887 bool chunk_sizes_updated = false;
1888 ssize_t bytes_written, total_bytes_written = 0;
1889 struct inode *inode;
1890
1891 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1892
1893 /*
1894 * We need to flush all unwritten data before we can send the
1895 * copychunk ioctl to the server.
1896 */
1897 inode = d_inode(trgtfile->dentry);
1898 filemap_write_and_wait(inode->i_mapping);
1899
1900 if (pcchunk == NULL)
1901 return -ENOMEM;
1902
1903 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
1904 /* Request a key from the server to identify the source of the copy */
1905 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1906 srcfile->fid.persistent_fid,
1907 srcfile->fid.volatile_fid, pcchunk);
1908
1909 /* Note: request_res_key sets res_key null only if rc !=0 */
1910 if (rc)
1911 goto cchunk_out;
1912
1913 /* For now array only one chunk long, will make more flexible later */
1914 pcchunk->ChunkCount = cpu_to_le32(1);
1915 pcchunk->Reserved = 0;
1916 pcchunk->Reserved2 = 0;
1917
1918 tcon = tlink_tcon(trgtfile->tlink);
1919
1920 while (len > 0) {
1921 pcchunk->SourceOffset = cpu_to_le64(src_off);
1922 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1923 pcchunk->Length =
1924 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1925
1926 /* Request server copy to target from src identified by key */
1927 kfree(retbuf);
1928 retbuf = NULL;
1929 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1930 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1931 true /* is_fsctl */, (char *)pcchunk,
1932 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1933 (char **)&retbuf, &ret_data_len);
1934 if (rc == 0) {
1935 if (ret_data_len !=
1936 sizeof(struct copychunk_ioctl_rsp)) {
1937 cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
1938 rc = -EIO;
1939 goto cchunk_out;
1940 }
1941 if (retbuf->TotalBytesWritten == 0) {
1942 cifs_dbg(FYI, "no bytes copied\n");
1943 rc = -EIO;
1944 goto cchunk_out;
1945 }
1946 /*
1947 * Check if server claimed to write more than we asked
1948 */
1949 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1950 le32_to_cpu(pcchunk->Length)) {
1951 cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
1952 rc = -EIO;
1953 goto cchunk_out;
1954 }
1955 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1956 cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
1957 rc = -EIO;
1958 goto cchunk_out;
1959 }
1960 chunks_copied++;
1961
1962 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1963 src_off += bytes_written;
1964 dest_off += bytes_written;
1965 len -= bytes_written;
1966 total_bytes_written += bytes_written;
1967
1968 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
1969 le32_to_cpu(retbuf->ChunksWritten),
1970 le32_to_cpu(retbuf->ChunkBytesWritten),
1971 bytes_written);
1972 } else if (rc == -EINVAL) {
1973 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1974 goto cchunk_out;
1975
1976 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1977 le32_to_cpu(retbuf->ChunksWritten),
1978 le32_to_cpu(retbuf->ChunkBytesWritten),
1979 le32_to_cpu(retbuf->TotalBytesWritten));
1980
1981 /*
1982 * Check if this is the first request using these sizes,
1983 * (ie check if copy succeed once with original sizes
1984 * and check if the server gave us different sizes after
1985 * we already updated max sizes on previous request).
1986 * if not then why is the server returning an error now
1987 */
1988 if ((chunks_copied != 0) || chunk_sizes_updated)
1989 goto cchunk_out;
1990
1991 /* Check that server is not asking us to grow size */
1992 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1993 tcon->max_bytes_chunk)
1994 tcon->max_bytes_chunk =
1995 le32_to_cpu(retbuf->ChunkBytesWritten);
1996 else
1997 goto cchunk_out; /* server gave us bogus size */
1998
1999 /* No need to change MaxChunks since already set to 1 */
2000 chunk_sizes_updated = true;
2001 } else
2002 goto cchunk_out;
2003 }
2004
2005 cchunk_out:
2006 kfree(pcchunk);
2007 kfree(retbuf);
2008 if (rc)
2009 return rc;
2010 else
2011 return total_bytes_written;
2012 }
2013
2014 static int
2015 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
2016 struct cifs_fid *fid)
2017 {
2018 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2019 }
2020
2021 static unsigned int
2022 smb2_read_data_offset(char *buf)
2023 {
2024 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2025
2026 return rsp->DataOffset;
2027 }
2028
2029 static unsigned int
2030 smb2_read_data_length(char *buf, bool in_remaining)
2031 {
2032 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2033
2034 if (in_remaining)
2035 return le32_to_cpu(rsp->DataRemaining);
2036
2037 return le32_to_cpu(rsp->DataLength);
2038 }
2039
2040
2041 static int
2042 smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
2043 struct cifs_io_parms *parms, unsigned int *bytes_read,
2044 char **buf, int *buf_type)
2045 {
2046 parms->persistent_fid = pfid->persistent_fid;
2047 parms->volatile_fid = pfid->volatile_fid;
2048 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
2049 }
2050
2051 static int
2052 smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
2053 struct cifs_io_parms *parms, unsigned int *written,
2054 struct kvec *iov, unsigned long nr_segs)
2055 {
2056
2057 parms->persistent_fid = pfid->persistent_fid;
2058 parms->volatile_fid = pfid->volatile_fid;
2059 return SMB2_write(xid, parms, written, iov, nr_segs);
2060 }
2061
2062 /* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
2063 static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
2064 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
2065 {
2066 struct cifsInodeInfo *cifsi;
2067 int rc;
2068
2069 cifsi = CIFS_I(inode);
2070
2071 /* if file already sparse don't bother setting sparse again */
2072 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
2073 return true; /* already sparse */
2074
2075 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
2076 return true; /* already not sparse */
2077
2078 /*
2079 * Can't check for sparse support on share the usual way via the
2080 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
2081 * since Samba server doesn't set the flag on the share, yet
2082 * supports the set sparse FSCTL and returns sparse correctly
2083 * in the file attributes. If we fail setting sparse though we
2084 * mark that server does not support sparse files for this share
2085 * to avoid repeatedly sending the unsupported fsctl to server
2086 * if the file is repeatedly extended.
2087 */
2088 if (tcon->broken_sparse_sup)
2089 return false;
2090
2091 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2092 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
2093 true /* is_fctl */,
2094 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
2095 if (rc) {
2096 tcon->broken_sparse_sup = true;
2097 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2098 return false;
2099 }
2100
2101 if (setsparse)
2102 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2103 else
2104 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2105
2106 return true;
2107 }
2108
2109 static int
2110 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2111 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2112 {
2113 __le64 eof = cpu_to_le64(size);
2114 struct inode *inode;
2115
2116 /*
2117 * If extending file more than one page make sparse. Many Linux fs
2118 * make files sparse by default when extending via ftruncate
2119 */
2120 inode = d_inode(cfile->dentry);
2121
2122 if (!set_alloc && (size > inode->i_size + 8192)) {
2123 __u8 set_sparse = 1;
2124
2125 /* whether set sparse succeeds or not, extend the file */
2126 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
2127 }
2128
2129 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2130 cfile->fid.volatile_fid, cfile->pid, &eof);
2131 }
2132
2133 static int
2134 smb2_duplicate_extents(const unsigned int xid,
2135 struct cifsFileInfo *srcfile,
2136 struct cifsFileInfo *trgtfile, u64 src_off,
2137 u64 len, u64 dest_off)
2138 {
2139 int rc;
2140 unsigned int ret_data_len;
2141 struct inode *inode;
2142 struct duplicate_extents_to_file dup_ext_buf;
2143 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2144
2145 /* server fileays advertise duplicate extent support with this flag */
2146 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2147 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2148 return -EOPNOTSUPP;
2149
2150 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2151 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2152 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2153 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2154 dup_ext_buf.ByteCount = cpu_to_le64(len);
2155 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
2156 src_off, dest_off, len);
2157
2158 inode = d_inode(trgtfile->dentry);
2159 if (inode->i_size < dest_off + len) {
2160 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2161 if (rc)
2162 goto duplicate_extents_out;
2163
2164 /*
2165 * Although also could set plausible allocation size (i_blocks)
2166 * here in addition to setting the file size, in reflink
2167 * it is likely that the target file is sparse. Its allocation
2168 * size will be queried on next revalidate, but it is important
2169 * to make sure that file's cached size is updated immediately
2170 */
2171 cifs_setsize(inode, dest_off + len);
2172 }
2173 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2174 trgtfile->fid.volatile_fid,
2175 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
2176 true /* is_fsctl */,
2177 (char *)&dup_ext_buf,
2178 sizeof(struct duplicate_extents_to_file),
2179 CIFSMaxBufSize, NULL,
2180 &ret_data_len);
2181
2182 if (ret_data_len > 0)
2183 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
2184
2185 duplicate_extents_out:
2186 return rc;
2187 }
2188
2189 static int
2190 smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2191 struct cifsFileInfo *cfile)
2192 {
2193 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2194 cfile->fid.volatile_fid);
2195 }
2196
2197 static int
2198 smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2199 struct cifsFileInfo *cfile)
2200 {
2201 struct fsctl_set_integrity_information_req integr_info;
2202 unsigned int ret_data_len;
2203
2204 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2205 integr_info.Flags = 0;
2206 integr_info.Reserved = 0;
2207
2208 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2209 cfile->fid.volatile_fid,
2210 FSCTL_SET_INTEGRITY_INFORMATION,
2211 true /* is_fsctl */,
2212 (char *)&integr_info,
2213 sizeof(struct fsctl_set_integrity_information_req),
2214 CIFSMaxBufSize, NULL,
2215 &ret_data_len);
2216
2217 }
2218
2219 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2220 #define GMT_TOKEN_SIZE 50
2221
2222 #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2223
2224 /*
2225 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2226 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2227 */
2228 static int
2229 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2230 struct cifsFileInfo *cfile, void __user *ioc_buf)
2231 {
2232 char *retbuf = NULL;
2233 unsigned int ret_data_len = 0;
2234 int rc;
2235 u32 max_response_size;
2236 struct smb_snapshot_array snapshot_in;
2237
2238 /*
2239 * On the first query to enumerate the list of snapshots available
2240 * for this volume the buffer begins with 0 (number of snapshots
2241 * which can be returned is zero since at that point we do not know
2242 * how big the buffer needs to be). On the second query,
2243 * it (ret_data_len) is set to number of snapshots so we can
2244 * know to set the maximum response size larger (see below).
2245 */
2246 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2247 return -EFAULT;
2248
2249 /*
2250 * Note that for snapshot queries that servers like Azure expect that
2251 * the first query be minimal size (and just used to get the number/size
2252 * of previous versions) so response size must be specified as EXACTLY
2253 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2254 * of eight bytes.
2255 */
2256 if (ret_data_len == 0)
2257 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2258 else
2259 max_response_size = CIFSMaxBufSize;
2260
2261 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2262 cfile->fid.volatile_fid,
2263 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
2264 true /* is_fsctl */,
2265 NULL, 0 /* no input data */, max_response_size,
2266 (char **)&retbuf,
2267 &ret_data_len);
2268 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2269 rc, ret_data_len);
2270 if (rc)
2271 return rc;
2272
2273 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2274 /* Fixup buffer */
2275 if (copy_from_user(&snapshot_in, ioc_buf,
2276 sizeof(struct smb_snapshot_array))) {
2277 rc = -EFAULT;
2278 kfree(retbuf);
2279 return rc;
2280 }
2281
2282 /*
2283 * Check for min size, ie not large enough to fit even one GMT
2284 * token (snapshot). On the first ioctl some users may pass in
2285 * smaller size (or zero) to simply get the size of the array
2286 * so the user space caller can allocate sufficient memory
2287 * and retry the ioctl again with larger array size sufficient
2288 * to hold all of the snapshot GMT tokens on the second try.
2289 */
2290 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2291 ret_data_len = sizeof(struct smb_snapshot_array);
2292
2293 /*
2294 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2295 * the snapshot array (of 50 byte GMT tokens) each
2296 * representing an available previous version of the data
2297 */
2298 if (ret_data_len > (snapshot_in.snapshot_array_size +
2299 sizeof(struct smb_snapshot_array)))
2300 ret_data_len = snapshot_in.snapshot_array_size +
2301 sizeof(struct smb_snapshot_array);
2302
2303 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2304 rc = -EFAULT;
2305 }
2306
2307 kfree(retbuf);
2308 return rc;
2309 }
2310
2311
2312
2313 static int
2314 smb3_notify(const unsigned int xid, struct file *pfile,
2315 void __user *ioc_buf)
2316 {
2317 struct smb3_notify notify;
2318 struct dentry *dentry = pfile->f_path.dentry;
2319 struct inode *inode = file_inode(pfile);
2320 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2321 struct cifs_open_parms oparms;
2322 struct cifs_fid fid;
2323 struct cifs_tcon *tcon;
2324 const unsigned char *path;
2325 void *page = alloc_dentry_path();
2326 __le16 *utf16_path = NULL;
2327 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2328 int rc = 0;
2329
2330 path = build_path_from_dentry(dentry, page);
2331 if (IS_ERR(path)) {
2332 rc = PTR_ERR(path);
2333 goto notify_exit;
2334 }
2335
2336 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2337 if (utf16_path == NULL) {
2338 rc = -ENOMEM;
2339 goto notify_exit;
2340 }
2341
2342 if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2343 rc = -EFAULT;
2344 goto notify_exit;
2345 }
2346
2347 tcon = cifs_sb_master_tcon(cifs_sb);
2348 oparms.tcon = tcon;
2349 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2350 oparms.disposition = FILE_OPEN;
2351 oparms.create_options = cifs_create_options(cifs_sb, 0);
2352 oparms.fid = &fid;
2353 oparms.reconnect = false;
2354
2355 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2356 NULL);
2357 if (rc)
2358 goto notify_exit;
2359
2360 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2361 notify.watch_tree, notify.completion_filter);
2362
2363 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2364
2365 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2366
2367 notify_exit:
2368 free_dentry_path(page);
2369 kfree(utf16_path);
2370 return rc;
2371 }
2372
2373 static int
2374 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2375 const char *path, struct cifs_sb_info *cifs_sb,
2376 struct cifs_fid *fid, __u16 search_flags,
2377 struct cifs_search_info *srch_inf)
2378 {
2379 __le16 *utf16_path;
2380 struct smb_rqst rqst[2];
2381 struct kvec rsp_iov[2];
2382 int resp_buftype[2];
2383 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2384 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2385 int rc, flags = 0;
2386 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2387 struct cifs_open_parms oparms;
2388 struct smb2_query_directory_rsp *qd_rsp = NULL;
2389 struct smb2_create_rsp *op_rsp = NULL;
2390 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
2391 int retry_count = 0;
2392
2393 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2394 if (!utf16_path)
2395 return -ENOMEM;
2396
2397 if (smb3_encryption_required(tcon))
2398 flags |= CIFS_TRANSFORM_REQ;
2399
2400 memset(rqst, 0, sizeof(rqst));
2401 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2402 memset(rsp_iov, 0, sizeof(rsp_iov));
2403
2404 /* Open */
2405 memset(&open_iov, 0, sizeof(open_iov));
2406 rqst[0].rq_iov = open_iov;
2407 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2408
2409 oparms.tcon = tcon;
2410 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2411 oparms.disposition = FILE_OPEN;
2412 oparms.create_options = cifs_create_options(cifs_sb, 0);
2413 oparms.fid = fid;
2414 oparms.reconnect = false;
2415
2416 rc = SMB2_open_init(tcon, server,
2417 &rqst[0], &oplock, &oparms, utf16_path);
2418 if (rc)
2419 goto qdf_free;
2420 smb2_set_next_command(tcon, &rqst[0]);
2421
2422 /* Query directory */
2423 srch_inf->entries_in_buffer = 0;
2424 srch_inf->index_of_last_entry = 2;
2425
2426 memset(&qd_iov, 0, sizeof(qd_iov));
2427 rqst[1].rq_iov = qd_iov;
2428 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2429
2430 rc = SMB2_query_directory_init(xid, tcon, server,
2431 &rqst[1],
2432 COMPOUND_FID, COMPOUND_FID,
2433 0, srch_inf->info_level);
2434 if (rc)
2435 goto qdf_free;
2436
2437 smb2_set_related(&rqst[1]);
2438
2439 again:
2440 rc = compound_send_recv(xid, tcon->ses, server,
2441 flags, 2, rqst,
2442 resp_buftype, rsp_iov);
2443
2444 if (rc == -EAGAIN && retry_count++ < 10)
2445 goto again;
2446
2447 /* If the open failed there is nothing to do */
2448 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2449 if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
2450 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2451 goto qdf_free;
2452 }
2453 fid->persistent_fid = op_rsp->PersistentFileId;
2454 fid->volatile_fid = op_rsp->VolatileFileId;
2455
2456 /* Anything else than ENODATA means a genuine error */
2457 if (rc && rc != -ENODATA) {
2458 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2459 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2460 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2461 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2462 goto qdf_free;
2463 }
2464
2465 atomic_inc(&tcon->num_remote_opens);
2466
2467 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2468 if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2469 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2470 tcon->tid, tcon->ses->Suid, 0, 0);
2471 srch_inf->endOfSearch = true;
2472 rc = 0;
2473 goto qdf_free;
2474 }
2475
2476 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2477 srch_inf);
2478 if (rc) {
2479 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2480 tcon->ses->Suid, 0, 0, rc);
2481 goto qdf_free;
2482 }
2483 resp_buftype[1] = CIFS_NO_BUFFER;
2484
2485 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2486 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2487
2488 qdf_free:
2489 kfree(utf16_path);
2490 SMB2_open_free(&rqst[0]);
2491 SMB2_query_directory_free(&rqst[1]);
2492 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2493 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2494 return rc;
2495 }
2496
2497 static int
2498 smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2499 struct cifs_fid *fid, __u16 search_flags,
2500 struct cifs_search_info *srch_inf)
2501 {
2502 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2503 fid->volatile_fid, 0, srch_inf);
2504 }
2505
2506 static int
2507 smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2508 struct cifs_fid *fid)
2509 {
2510 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2511 }
2512
2513 /*
2514 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2515 * the number of credits and return true. Otherwise - return false.
2516 */
2517 static bool
2518 smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
2519 {
2520 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2521 int scredits, in_flight;
2522
2523 if (shdr->Status != STATUS_PENDING)
2524 return false;
2525
2526 if (shdr->CreditRequest) {
2527 spin_lock(&server->req_lock);
2528 server->credits += le16_to_cpu(shdr->CreditRequest);
2529 scredits = server->credits;
2530 in_flight = server->in_flight;
2531 spin_unlock(&server->req_lock);
2532 wake_up(&server->request_q);
2533
2534 trace_smb3_pend_credits(server->CurrentMid,
2535 server->conn_id, server->hostname, scredits,
2536 le16_to_cpu(shdr->CreditRequest), in_flight);
2537 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2538 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
2539 }
2540
2541 return true;
2542 }
2543
2544 static bool
2545 smb2_is_session_expired(char *buf)
2546 {
2547 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2548
2549 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2550 shdr->Status != STATUS_USER_SESSION_DELETED)
2551 return false;
2552
2553 trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
2554 le64_to_cpu(shdr->SessionId),
2555 le16_to_cpu(shdr->Command),
2556 le64_to_cpu(shdr->MessageId));
2557 cifs_dbg(FYI, "Session expired or deleted\n");
2558
2559 return true;
2560 }
2561
2562 static bool
2563 smb2_is_status_io_timeout(char *buf)
2564 {
2565 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2566
2567 if (shdr->Status == STATUS_IO_TIMEOUT)
2568 return true;
2569 else
2570 return false;
2571 }
2572
2573 static void
2574 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2575 {
2576 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2577 struct list_head *tmp, *tmp1;
2578 struct cifs_ses *ses;
2579 struct cifs_tcon *tcon;
2580
2581 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2582 return;
2583
2584 spin_lock(&cifs_tcp_ses_lock);
2585 list_for_each(tmp, &server->smb_ses_list) {
2586 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2587 list_for_each(tmp1, &ses->tcon_list) {
2588 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
2589 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
2590 tcon->need_reconnect = true;
2591 spin_unlock(&cifs_tcp_ses_lock);
2592 pr_warn_once("Server share %s deleted.\n",
2593 tcon->treeName);
2594 return;
2595 }
2596 }
2597 }
2598 spin_unlock(&cifs_tcp_ses_lock);
2599 }
2600
2601 static int
2602 smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2603 struct cifsInodeInfo *cinode)
2604 {
2605 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2606 return SMB2_lease_break(0, tcon, cinode->lease_key,
2607 smb2_get_lease_state(cinode));
2608
2609 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2610 fid->volatile_fid,
2611 CIFS_CACHE_READ(cinode) ? 1 : 0);
2612 }
2613
2614 void
2615 smb2_set_related(struct smb_rqst *rqst)
2616 {
2617 struct smb2_hdr *shdr;
2618
2619 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2620 if (shdr == NULL) {
2621 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2622 return;
2623 }
2624 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2625 }
2626
2627 char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2628
2629 void
2630 smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
2631 {
2632 struct smb2_hdr *shdr;
2633 struct cifs_ses *ses = tcon->ses;
2634 struct TCP_Server_Info *server = ses->server;
2635 unsigned long len = smb_rqst_len(server, rqst);
2636 int i, num_padding;
2637
2638 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2639 if (shdr == NULL) {
2640 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2641 return;
2642 }
2643
2644 /* SMB headers in a compound are 8 byte aligned. */
2645
2646 /* No padding needed */
2647 if (!(len & 7))
2648 goto finished;
2649
2650 num_padding = 8 - (len & 7);
2651 if (!smb3_encryption_required(tcon)) {
2652 /*
2653 * If we do not have encryption then we can just add an extra
2654 * iov for the padding.
2655 */
2656 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2657 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2658 rqst->rq_nvec++;
2659 len += num_padding;
2660 } else {
2661 /*
2662 * We can not add a small padding iov for the encryption case
2663 * because the encryption framework can not handle the padding
2664 * iovs.
2665 * We have to flatten this into a single buffer and add
2666 * the padding to it.
2667 */
2668 for (i = 1; i < rqst->rq_nvec; i++) {
2669 memcpy(rqst->rq_iov[0].iov_base +
2670 rqst->rq_iov[0].iov_len,
2671 rqst->rq_iov[i].iov_base,
2672 rqst->rq_iov[i].iov_len);
2673 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
2674 }
2675 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2676 0, num_padding);
2677 rqst->rq_iov[0].iov_len += num_padding;
2678 len += num_padding;
2679 rqst->rq_nvec = 1;
2680 }
2681
2682 finished:
2683 shdr->NextCommand = cpu_to_le32(len);
2684 }
2685
2686 /*
2687 * Passes the query info response back to the caller on success.
2688 * Caller need to free this with free_rsp_buf().
2689 */
2690 int
2691 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2692 const char *path, u32 desired_access,
2693 u32 class, u32 type, u32 output_len,
2694 struct kvec *rsp, int *buftype,
2695 struct cifs_sb_info *cifs_sb)
2696 {
2697 struct cifs_ses *ses = tcon->ses;
2698 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2699 int flags = CIFS_CP_CREATE_CLOSE_OP;
2700 struct smb_rqst rqst[3];
2701 int resp_buftype[3];
2702 struct kvec rsp_iov[3];
2703 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2704 struct kvec qi_iov[1];
2705 struct kvec close_iov[1];
2706 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2707 struct cifs_open_parms oparms;
2708 struct cifs_fid fid;
2709 int rc;
2710 __le16 *utf16_path;
2711 struct cached_fid *cfid = NULL;
2712
2713 if (!path)
2714 path = "";
2715 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2716 if (!utf16_path)
2717 return -ENOMEM;
2718
2719 if (smb3_encryption_required(tcon))
2720 flags |= CIFS_TRANSFORM_REQ;
2721
2722 memset(rqst, 0, sizeof(rqst));
2723 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2724 memset(rsp_iov, 0, sizeof(rsp_iov));
2725
2726 if (!strcmp(path, ""))
2727 open_cached_dir(xid, tcon, path, cifs_sb, &cfid); /* cfid null if open dir failed */
2728
2729 memset(&open_iov, 0, sizeof(open_iov));
2730 rqst[0].rq_iov = open_iov;
2731 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2732
2733 oparms.tcon = tcon;
2734 oparms.desired_access = desired_access;
2735 oparms.disposition = FILE_OPEN;
2736 oparms.create_options = cifs_create_options(cifs_sb, 0);
2737 oparms.fid = &fid;
2738 oparms.reconnect = false;
2739
2740 rc = SMB2_open_init(tcon, server,
2741 &rqst[0], &oplock, &oparms, utf16_path);
2742 if (rc)
2743 goto qic_exit;
2744 smb2_set_next_command(tcon, &rqst[0]);
2745
2746 memset(&qi_iov, 0, sizeof(qi_iov));
2747 rqst[1].rq_iov = qi_iov;
2748 rqst[1].rq_nvec = 1;
2749
2750 if (cfid) {
2751 rc = SMB2_query_info_init(tcon, server,
2752 &rqst[1],
2753 cfid->fid->persistent_fid,
2754 cfid->fid->volatile_fid,
2755 class, type, 0,
2756 output_len, 0,
2757 NULL);
2758 } else {
2759 rc = SMB2_query_info_init(tcon, server,
2760 &rqst[1],
2761 COMPOUND_FID,
2762 COMPOUND_FID,
2763 class, type, 0,
2764 output_len, 0,
2765 NULL);
2766 }
2767 if (rc)
2768 goto qic_exit;
2769 if (!cfid) {
2770 smb2_set_next_command(tcon, &rqst[1]);
2771 smb2_set_related(&rqst[1]);
2772 }
2773
2774 memset(&close_iov, 0, sizeof(close_iov));
2775 rqst[2].rq_iov = close_iov;
2776 rqst[2].rq_nvec = 1;
2777
2778 rc = SMB2_close_init(tcon, server,
2779 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
2780 if (rc)
2781 goto qic_exit;
2782 smb2_set_related(&rqst[2]);
2783
2784 if (cfid) {
2785 rc = compound_send_recv(xid, ses, server,
2786 flags, 1, &rqst[1],
2787 &resp_buftype[1], &rsp_iov[1]);
2788 } else {
2789 rc = compound_send_recv(xid, ses, server,
2790 flags, 3, rqst,
2791 resp_buftype, rsp_iov);
2792 }
2793 if (rc) {
2794 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2795 if (rc == -EREMCHG) {
2796 tcon->need_reconnect = true;
2797 pr_warn_once("server share %s deleted\n",
2798 tcon->treeName);
2799 }
2800 goto qic_exit;
2801 }
2802 *rsp = rsp_iov[1];
2803 *buftype = resp_buftype[1];
2804
2805 qic_exit:
2806 kfree(utf16_path);
2807 SMB2_open_free(&rqst[0]);
2808 SMB2_query_info_free(&rqst[1]);
2809 SMB2_close_free(&rqst[2]);
2810 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2811 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2812 if (cfid)
2813 close_cached_dir(cfid);
2814 return rc;
2815 }
2816
2817 static int
2818 smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2819 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2820 {
2821 struct smb2_query_info_rsp *rsp;
2822 struct smb2_fs_full_size_info *info = NULL;
2823 struct kvec rsp_iov = {NULL, 0};
2824 int buftype = CIFS_NO_BUFFER;
2825 int rc;
2826
2827
2828 rc = smb2_query_info_compound(xid, tcon, "",
2829 FILE_READ_ATTRIBUTES,
2830 FS_FULL_SIZE_INFORMATION,
2831 SMB2_O_INFO_FILESYSTEM,
2832 sizeof(struct smb2_fs_full_size_info),
2833 &rsp_iov, &buftype, cifs_sb);
2834 if (rc)
2835 goto qfs_exit;
2836
2837 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
2838 buf->f_type = SMB2_SUPER_MAGIC;
2839 info = (struct smb2_fs_full_size_info *)(
2840 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2841 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2842 le32_to_cpu(rsp->OutputBufferLength),
2843 &rsp_iov,
2844 sizeof(struct smb2_fs_full_size_info));
2845 if (!rc)
2846 smb2_copy_fs_info_to_kstatfs(info, buf);
2847
2848 qfs_exit:
2849 free_rsp_buf(buftype, rsp_iov.iov_base);
2850 return rc;
2851 }
2852
2853 static int
2854 smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2855 struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2856 {
2857 int rc;
2858 __le16 srch_path = 0; /* Null - open root of share */
2859 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2860 struct cifs_open_parms oparms;
2861 struct cifs_fid fid;
2862
2863 if (!tcon->posix_extensions)
2864 return smb2_queryfs(xid, tcon, cifs_sb, buf);
2865
2866 oparms.tcon = tcon;
2867 oparms.desired_access = FILE_READ_ATTRIBUTES;
2868 oparms.disposition = FILE_OPEN;
2869 oparms.create_options = cifs_create_options(cifs_sb, 0);
2870 oparms.fid = &fid;
2871 oparms.reconnect = false;
2872
2873 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2874 NULL, NULL);
2875 if (rc)
2876 return rc;
2877
2878 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2879 fid.volatile_fid, buf);
2880 buf->f_type = SMB2_SUPER_MAGIC;
2881 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2882 return rc;
2883 }
2884
2885 static bool
2886 smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2887 {
2888 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2889 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2890 }
2891
2892 static int
2893 smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2894 __u64 length, __u32 type, int lock, int unlock, bool wait)
2895 {
2896 if (unlock && !lock)
2897 type = SMB2_LOCKFLAG_UNLOCK;
2898 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2899 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2900 current->tgid, length, offset, type, wait);
2901 }
2902
2903 static void
2904 smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2905 {
2906 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2907 }
2908
2909 static void
2910 smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2911 {
2912 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2913 }
2914
2915 static void
2916 smb2_new_lease_key(struct cifs_fid *fid)
2917 {
2918 generate_random_uuid(fid->lease_key);
2919 }
2920
2921 static int
2922 smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2923 const char *search_name,
2924 struct dfs_info3_param **target_nodes,
2925 unsigned int *num_of_nodes,
2926 const struct nls_table *nls_codepage, int remap)
2927 {
2928 int rc;
2929 __le16 *utf16_path = NULL;
2930 int utf16_path_len = 0;
2931 struct cifs_tcon *tcon;
2932 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2933 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2934 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2935 int retry_count = 0;
2936
2937 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
2938
2939 /*
2940 * Try to use the IPC tcon, otherwise just use any
2941 */
2942 tcon = ses->tcon_ipc;
2943 if (tcon == NULL) {
2944 spin_lock(&cifs_tcp_ses_lock);
2945 tcon = list_first_entry_or_null(&ses->tcon_list,
2946 struct cifs_tcon,
2947 tcon_list);
2948 if (tcon)
2949 tcon->tc_count++;
2950 spin_unlock(&cifs_tcp_ses_lock);
2951 }
2952
2953 if (tcon == NULL) {
2954 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2955 ses);
2956 rc = -ENOTCONN;
2957 goto out;
2958 }
2959
2960 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2961 &utf16_path_len,
2962 nls_codepage, remap);
2963 if (!utf16_path) {
2964 rc = -ENOMEM;
2965 goto out;
2966 }
2967
2968 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2969 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2970 if (!dfs_req) {
2971 rc = -ENOMEM;
2972 goto out;
2973 }
2974
2975 /* Highest DFS referral version understood */
2976 dfs_req->MaxReferralLevel = DFS_VERSION;
2977
2978 /* Path to resolve in an UTF-16 null-terminated string */
2979 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2980
2981 do {
2982 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2983 FSCTL_DFS_GET_REFERRALS,
2984 true /* is_fsctl */,
2985 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
2986 (char **)&dfs_rsp, &dfs_rsp_size);
2987 if (!is_retryable_error(rc))
2988 break;
2989 usleep_range(512, 2048);
2990 } while (++retry_count < 5);
2991
2992 if (rc) {
2993 if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
2994 cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
2995 goto out;
2996 }
2997
2998 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2999 num_of_nodes, target_nodes,
3000 nls_codepage, remap, search_name,
3001 true /* is_unicode */);
3002 if (rc) {
3003 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
3004 goto out;
3005 }
3006
3007 out:
3008 if (tcon && !tcon->ipc) {
3009 /* ipc tcons are not refcounted */
3010 spin_lock(&cifs_tcp_ses_lock);
3011 tcon->tc_count--;
3012 /* tc_count can never go negative */
3013 WARN_ON(tcon->tc_count < 0);
3014 spin_unlock(&cifs_tcp_ses_lock);
3015 }
3016 kfree(utf16_path);
3017 kfree(dfs_req);
3018 kfree(dfs_rsp);
3019 return rc;
3020 }
3021
3022 static int
3023 parse_reparse_posix(struct reparse_posix_data *symlink_buf,
3024 u32 plen, char **target_path,
3025 struct cifs_sb_info *cifs_sb)
3026 {
3027 unsigned int len;
3028
3029 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
3030 len = le16_to_cpu(symlink_buf->ReparseDataLength);
3031
3032 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
3033 cifs_dbg(VFS, "%lld not a supported symlink type\n",
3034 le64_to_cpu(symlink_buf->InodeType));
3035 return -EOPNOTSUPP;
3036 }
3037
3038 *target_path = cifs_strndup_from_utf16(
3039 symlink_buf->PathBuffer,
3040 len, true, cifs_sb->local_nls);
3041 if (!(*target_path))
3042 return -ENOMEM;
3043
3044 convert_delimiter(*target_path, '/');
3045 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
3046
3047 return 0;
3048 }
3049
3050 static int
3051 parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
3052 u32 plen, char **target_path,
3053 struct cifs_sb_info *cifs_sb)
3054 {
3055 unsigned int sub_len;
3056 unsigned int sub_offset;
3057
3058 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
3059
3060 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
3061 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
3062 if (sub_offset + 20 > plen ||
3063 sub_offset + sub_len + 20 > plen) {
3064 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
3065 return -EIO;
3066 }
3067
3068 *target_path = cifs_strndup_from_utf16(
3069 symlink_buf->PathBuffer + sub_offset,
3070 sub_len, true, cifs_sb->local_nls);
3071 if (!(*target_path))
3072 return -ENOMEM;
3073
3074 convert_delimiter(*target_path, '/');
3075 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
3076
3077 return 0;
3078 }
3079
3080 static int
3081 parse_reparse_point(struct reparse_data_buffer *buf,
3082 u32 plen, char **target_path,
3083 struct cifs_sb_info *cifs_sb)
3084 {
3085 if (plen < sizeof(struct reparse_data_buffer)) {
3086 cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
3087 plen);
3088 return -EIO;
3089 }
3090
3091 if (plen < le16_to_cpu(buf->ReparseDataLength) +
3092 sizeof(struct reparse_data_buffer)) {
3093 cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
3094 plen);
3095 return -EIO;
3096 }
3097
3098 /* See MS-FSCC 2.1.2 */
3099 switch (le32_to_cpu(buf->ReparseTag)) {
3100 case IO_REPARSE_TAG_NFS:
3101 return parse_reparse_posix(
3102 (struct reparse_posix_data *)buf,
3103 plen, target_path, cifs_sb);
3104 case IO_REPARSE_TAG_SYMLINK:
3105 return parse_reparse_symlink(
3106 (struct reparse_symlink_data_buffer *)buf,
3107 plen, target_path, cifs_sb);
3108 default:
3109 cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
3110 le32_to_cpu(buf->ReparseTag));
3111 return -EOPNOTSUPP;
3112 }
3113 }
3114
3115 #define SMB2_SYMLINK_STRUCT_SIZE \
3116 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
3117
3118 static int
3119 smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
3120 struct cifs_sb_info *cifs_sb, const char *full_path,
3121 char **target_path, bool is_reparse_point)
3122 {
3123 int rc;
3124 __le16 *utf16_path = NULL;
3125 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3126 struct cifs_open_parms oparms;
3127 struct cifs_fid fid;
3128 struct kvec err_iov = {NULL, 0};
3129 struct smb2_err_rsp *err_buf = NULL;
3130 struct smb2_symlink_err_rsp *symlink;
3131 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
3132 unsigned int sub_len;
3133 unsigned int sub_offset;
3134 unsigned int print_len;
3135 unsigned int print_offset;
3136 int flags = CIFS_CP_CREATE_CLOSE_OP;
3137 struct smb_rqst rqst[3];
3138 int resp_buftype[3];
3139 struct kvec rsp_iov[3];
3140 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3141 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3142 struct kvec close_iov[1];
3143 struct smb2_create_rsp *create_rsp;
3144 struct smb2_ioctl_rsp *ioctl_rsp;
3145 struct reparse_data_buffer *reparse_buf;
3146 int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
3147 u32 plen;
3148
3149 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3150
3151 *target_path = NULL;
3152
3153 if (smb3_encryption_required(tcon))
3154 flags |= CIFS_TRANSFORM_REQ;
3155
3156 memset(rqst, 0, sizeof(rqst));
3157 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3158 memset(rsp_iov, 0, sizeof(rsp_iov));
3159
3160 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3161 if (!utf16_path)
3162 return -ENOMEM;
3163
3164 /* Open */
3165 memset(&open_iov, 0, sizeof(open_iov));
3166 rqst[0].rq_iov = open_iov;
3167 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3168
3169 memset(&oparms, 0, sizeof(oparms));
3170 oparms.tcon = tcon;
3171 oparms.desired_access = FILE_READ_ATTRIBUTES;
3172 oparms.disposition = FILE_OPEN;
3173 oparms.create_options = cifs_create_options(cifs_sb, create_options);
3174 oparms.fid = &fid;
3175 oparms.reconnect = false;
3176
3177 rc = SMB2_open_init(tcon, server,
3178 &rqst[0], &oplock, &oparms, utf16_path);
3179 if (rc)
3180 goto querty_exit;
3181 smb2_set_next_command(tcon, &rqst[0]);
3182
3183
3184 /* IOCTL */
3185 memset(&io_iov, 0, sizeof(io_iov));
3186 rqst[1].rq_iov = io_iov;
3187 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3188
3189 rc = SMB2_ioctl_init(tcon, server,
3190 &rqst[1], fid.persistent_fid,
3191 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
3192 true /* is_fctl */, NULL, 0,
3193 CIFSMaxBufSize -
3194 MAX_SMB2_CREATE_RESPONSE_SIZE -
3195 MAX_SMB2_CLOSE_RESPONSE_SIZE);
3196 if (rc)
3197 goto querty_exit;
3198
3199 smb2_set_next_command(tcon, &rqst[1]);
3200 smb2_set_related(&rqst[1]);
3201
3202
3203 /* Close */
3204 memset(&close_iov, 0, sizeof(close_iov));
3205 rqst[2].rq_iov = close_iov;
3206 rqst[2].rq_nvec = 1;
3207
3208 rc = SMB2_close_init(tcon, server,
3209 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
3210 if (rc)
3211 goto querty_exit;
3212
3213 smb2_set_related(&rqst[2]);
3214
3215 rc = compound_send_recv(xid, tcon->ses, server,
3216 flags, 3, rqst,
3217 resp_buftype, rsp_iov);
3218
3219 create_rsp = rsp_iov[0].iov_base;
3220 if (create_rsp && create_rsp->hdr.Status)
3221 err_iov = rsp_iov[0];
3222 ioctl_rsp = rsp_iov[1].iov_base;
3223
3224 /*
3225 * Open was successful and we got an ioctl response.
3226 */
3227 if ((rc == 0) && (is_reparse_point)) {
3228 /* See MS-FSCC 2.3.23 */
3229
3230 reparse_buf = (struct reparse_data_buffer *)
3231 ((char *)ioctl_rsp +
3232 le32_to_cpu(ioctl_rsp->OutputOffset));
3233 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3234
3235 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3236 rsp_iov[1].iov_len) {
3237 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
3238 plen);
3239 rc = -EIO;
3240 goto querty_exit;
3241 }
3242
3243 rc = parse_reparse_point(reparse_buf, plen, target_path,
3244 cifs_sb);
3245 goto querty_exit;
3246 }
3247
3248 if (!rc || !err_iov.iov_base) {
3249 rc = -ENOENT;
3250 goto querty_exit;
3251 }
3252
3253 err_buf = err_iov.iov_base;
3254 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
3255 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
3256 rc = -EINVAL;
3257 goto querty_exit;
3258 }
3259
3260 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
3261 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
3262 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
3263 rc = -EINVAL;
3264 goto querty_exit;
3265 }
3266
3267 /* open must fail on symlink - reset rc */
3268 rc = 0;
3269 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
3270 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
3271 print_len = le16_to_cpu(symlink->PrintNameLength);
3272 print_offset = le16_to_cpu(symlink->PrintNameOffset);
3273
3274 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
3275 rc = -EINVAL;
3276 goto querty_exit;
3277 }
3278
3279 if (err_iov.iov_len <
3280 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
3281 rc = -EINVAL;
3282 goto querty_exit;
3283 }
3284
3285 *target_path = cifs_strndup_from_utf16(
3286 (char *)symlink->PathBuffer + sub_offset,
3287 sub_len, true, cifs_sb->local_nls);
3288 if (!(*target_path)) {
3289 rc = -ENOMEM;
3290 goto querty_exit;
3291 }
3292 convert_delimiter(*target_path, '/');
3293 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
3294
3295 querty_exit:
3296 cifs_dbg(FYI, "query symlink rc %d\n", rc);
3297 kfree(utf16_path);
3298 SMB2_open_free(&rqst[0]);
3299 SMB2_ioctl_free(&rqst[1]);
3300 SMB2_close_free(&rqst[2]);
3301 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3302 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3303 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
3304 return rc;
3305 }
3306
3307 int
3308 smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
3309 struct cifs_sb_info *cifs_sb, const char *full_path,
3310 __u32 *tag)
3311 {
3312 int rc;
3313 __le16 *utf16_path = NULL;
3314 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3315 struct cifs_open_parms oparms;
3316 struct cifs_fid fid;
3317 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
3318 int flags = CIFS_CP_CREATE_CLOSE_OP;
3319 struct smb_rqst rqst[3];
3320 int resp_buftype[3];
3321 struct kvec rsp_iov[3];
3322 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
3323 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
3324 struct kvec close_iov[1];
3325 struct smb2_ioctl_rsp *ioctl_rsp;
3326 struct reparse_data_buffer *reparse_buf;
3327 u32 plen;
3328
3329 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
3330
3331 if (smb3_encryption_required(tcon))
3332 flags |= CIFS_TRANSFORM_REQ;
3333
3334 memset(rqst, 0, sizeof(rqst));
3335 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
3336 memset(rsp_iov, 0, sizeof(rsp_iov));
3337
3338 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
3339 if (!utf16_path)
3340 return -ENOMEM;
3341
3342 /*
3343 * setup smb2open - TODO add optimization to call cifs_get_readable_path
3344 * to see if there is a handle already open that we can use
3345 */
3346 memset(&open_iov, 0, sizeof(open_iov));
3347 rqst[0].rq_iov = open_iov;
3348 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
3349
3350 memset(&oparms, 0, sizeof(oparms));
3351 oparms.tcon = tcon;
3352 oparms.desired_access = FILE_READ_ATTRIBUTES;
3353 oparms.disposition = FILE_OPEN;
3354 oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
3355 oparms.fid = &fid;
3356 oparms.reconnect = false;
3357
3358 rc = SMB2_open_init(tcon, server,
3359 &rqst[0], &oplock, &oparms, utf16_path);
3360 if (rc)
3361 goto query_rp_exit;
3362 smb2_set_next_command(tcon, &rqst[0]);
3363
3364
3365 /* IOCTL */
3366 memset(&io_iov, 0, sizeof(io_iov));
3367 rqst[1].rq_iov = io_iov;
3368 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
3369
3370 rc = SMB2_ioctl_init(tcon, server,
3371 &rqst[1], COMPOUND_FID,
3372 COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
3373 true /* is_fctl */, NULL, 0,
3374 CIFSMaxBufSize -
3375 MAX_SMB2_CREATE_RESPONSE_SIZE -
3376 MAX_SMB2_CLOSE_RESPONSE_SIZE);
3377 if (rc)
3378 goto query_rp_exit;
3379
3380 smb2_set_next_command(tcon, &rqst[1]);
3381 smb2_set_related(&rqst[1]);
3382
3383
3384 /* Close */
3385 memset(&close_iov, 0, sizeof(close_iov));
3386 rqst[2].rq_iov = close_iov;
3387 rqst[2].rq_nvec = 1;
3388
3389 rc = SMB2_close_init(tcon, server,
3390 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
3391 if (rc)
3392 goto query_rp_exit;
3393
3394 smb2_set_related(&rqst[2]);
3395
3396 rc = compound_send_recv(xid, tcon->ses, server,
3397 flags, 3, rqst,
3398 resp_buftype, rsp_iov);
3399
3400 ioctl_rsp = rsp_iov[1].iov_base;
3401
3402 /*
3403 * Open was successful and we got an ioctl response.
3404 */
3405 if (rc == 0) {
3406 /* See MS-FSCC 2.3.23 */
3407
3408 reparse_buf = (struct reparse_data_buffer *)
3409 ((char *)ioctl_rsp +
3410 le32_to_cpu(ioctl_rsp->OutputOffset));
3411 plen = le32_to_cpu(ioctl_rsp->OutputCount);
3412
3413 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
3414 rsp_iov[1].iov_len) {
3415 cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
3416 plen);
3417 rc = -EIO;
3418 goto query_rp_exit;
3419 }
3420 *tag = le32_to_cpu(reparse_buf->ReparseTag);
3421 }
3422
3423 query_rp_exit:
3424 kfree(utf16_path);
3425 SMB2_open_free(&rqst[0]);
3426 SMB2_ioctl_free(&rqst[1]);
3427 SMB2_close_free(&rqst[2]);
3428 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
3429 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
3430 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
3431 return rc;
3432 }
3433
3434 static struct cifs_ntsd *
3435 get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
3436 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
3437 {
3438 struct cifs_ntsd *pntsd = NULL;
3439 unsigned int xid;
3440 int rc = -EOPNOTSUPP;
3441 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3442
3443 if (IS_ERR(tlink))
3444 return ERR_CAST(tlink);
3445
3446 xid = get_xid();
3447 cifs_dbg(FYI, "trying to get acl\n");
3448
3449 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
3450 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3451 info);
3452 free_xid(xid);
3453
3454 cifs_put_tlink(tlink);
3455
3456 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3457 if (rc)
3458 return ERR_PTR(rc);
3459 return pntsd;
3460
3461 }
3462
3463 static struct cifs_ntsd *
3464 get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
3465 const char *path, u32 *pacllen, u32 info)
3466 {
3467 struct cifs_ntsd *pntsd = NULL;
3468 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3469 unsigned int xid;
3470 int rc;
3471 struct cifs_tcon *tcon;
3472 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3473 struct cifs_fid fid;
3474 struct cifs_open_parms oparms;
3475 __le16 *utf16_path;
3476
3477 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3478 if (IS_ERR(tlink))
3479 return ERR_CAST(tlink);
3480
3481 tcon = tlink_tcon(tlink);
3482 xid = get_xid();
3483
3484 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3485 if (!utf16_path) {
3486 rc = -ENOMEM;
3487 free_xid(xid);
3488 return ERR_PTR(rc);
3489 }
3490
3491 oparms.tcon = tcon;
3492 oparms.desired_access = READ_CONTROL;
3493 oparms.disposition = FILE_OPEN;
3494 /*
3495 * When querying an ACL, even if the file is a symlink we want to open
3496 * the source not the target, and so the protocol requires that the
3497 * client specify this flag when opening a reparse point
3498 */
3499 oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
3500 oparms.fid = &fid;
3501 oparms.reconnect = false;
3502
3503 if (info & SACL_SECINFO)
3504 oparms.desired_access |= SYSTEM_SECURITY;
3505
3506 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3507 NULL);
3508 kfree(utf16_path);
3509 if (!rc) {
3510 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3511 fid.volatile_fid, (void **)&pntsd, pacllen,
3512 info);
3513 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3514 }
3515
3516 cifs_put_tlink(tlink);
3517 free_xid(xid);
3518
3519 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3520 if (rc)
3521 return ERR_PTR(rc);
3522 return pntsd;
3523 }
3524
3525 static int
3526 set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3527 struct inode *inode, const char *path, int aclflag)
3528 {
3529 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3530 unsigned int xid;
3531 int rc, access_flags = 0;
3532 struct cifs_tcon *tcon;
3533 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3534 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3535 struct cifs_fid fid;
3536 struct cifs_open_parms oparms;
3537 __le16 *utf16_path;
3538
3539 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3540 if (IS_ERR(tlink))
3541 return PTR_ERR(tlink);
3542
3543 tcon = tlink_tcon(tlink);
3544 xid = get_xid();
3545
3546 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3547 access_flags |= WRITE_OWNER;
3548 if (aclflag & CIFS_ACL_SACL)
3549 access_flags |= SYSTEM_SECURITY;
3550 if (aclflag & CIFS_ACL_DACL)
3551 access_flags |= WRITE_DAC;
3552
3553 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3554 if (!utf16_path) {
3555 rc = -ENOMEM;
3556 free_xid(xid);
3557 return rc;
3558 }
3559
3560 oparms.tcon = tcon;
3561 oparms.desired_access = access_flags;
3562 oparms.create_options = cifs_create_options(cifs_sb, 0);
3563 oparms.disposition = FILE_OPEN;
3564 oparms.path = path;
3565 oparms.fid = &fid;
3566 oparms.reconnect = false;
3567
3568 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3569 NULL, NULL);
3570 kfree(utf16_path);
3571 if (!rc) {
3572 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3573 fid.volatile_fid, pnntsd, acllen, aclflag);
3574 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3575 }
3576
3577 cifs_put_tlink(tlink);
3578 free_xid(xid);
3579 return rc;
3580 }
3581
3582 /* Retrieve an ACL from the server */
3583 static struct cifs_ntsd *
3584 get_smb2_acl(struct cifs_sb_info *cifs_sb,
3585 struct inode *inode, const char *path,
3586 u32 *pacllen, u32 info)
3587 {
3588 struct cifs_ntsd *pntsd = NULL;
3589 struct cifsFileInfo *open_file = NULL;
3590
3591 if (inode && !(info & SACL_SECINFO))
3592 open_file = find_readable_file(CIFS_I(inode), true);
3593 if (!open_file || (info & SACL_SECINFO))
3594 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
3595
3596 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
3597 cifsFileInfo_put(open_file);
3598 return pntsd;
3599 }
3600
3601 static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3602 loff_t offset, loff_t len, bool keep_size)
3603 {
3604 struct cifs_ses *ses = tcon->ses;
3605 struct inode *inode;
3606 struct cifsInodeInfo *cifsi;
3607 struct cifsFileInfo *cfile = file->private_data;
3608 struct file_zero_data_information fsctl_buf;
3609 long rc;
3610 unsigned int xid;
3611 __le64 eof;
3612
3613 xid = get_xid();
3614
3615 inode = d_inode(cfile->dentry);
3616 cifsi = CIFS_I(inode);
3617
3618 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3619 ses->Suid, offset, len);
3620
3621 /*
3622 * We zero the range through ioctl, so we need remove the page caches
3623 * first, otherwise the data may be inconsistent with the server.
3624 */
3625 truncate_pagecache_range(inode, offset, offset + len - 1);
3626
3627 /* if file not oplocked can't be sure whether asking to extend size */
3628 if (!CIFS_CACHE_READ(cifsi))
3629 if (keep_size == false) {
3630 rc = -EOPNOTSUPP;
3631 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3632 tcon->tid, ses->Suid, offset, len, rc);
3633 free_xid(xid);
3634 return rc;
3635 }
3636
3637 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3638
3639 fsctl_buf.FileOffset = cpu_to_le64(offset);
3640 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3641
3642 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3643 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3644 (char *)&fsctl_buf,
3645 sizeof(struct file_zero_data_information),
3646 0, NULL, NULL);
3647 if (rc)
3648 goto zero_range_exit;
3649
3650 /*
3651 * do we also need to change the size of the file?
3652 */
3653 if (keep_size == false && i_size_read(inode) < offset + len) {
3654 eof = cpu_to_le64(offset + len);
3655 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3656 cfile->fid.volatile_fid, cfile->pid, &eof);
3657 }
3658
3659 zero_range_exit:
3660 free_xid(xid);
3661 if (rc)
3662 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3663 ses->Suid, offset, len, rc);
3664 else
3665 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3666 ses->Suid, offset, len);
3667 return rc;
3668 }
3669
3670 static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3671 loff_t offset, loff_t len)
3672 {
3673 struct inode *inode;
3674 struct cifsFileInfo *cfile = file->private_data;
3675 struct file_zero_data_information fsctl_buf;
3676 long rc;
3677 unsigned int xid;
3678 __u8 set_sparse = 1;
3679
3680 xid = get_xid();
3681
3682 inode = d_inode(cfile->dentry);
3683
3684 /* Need to make file sparse, if not already, before freeing range. */
3685 /* Consider adding equivalent for compressed since it could also work */
3686 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3687 rc = -EOPNOTSUPP;
3688 free_xid(xid);
3689 return rc;
3690 }
3691
3692 filemap_invalidate_lock(inode->i_mapping);
3693 /*
3694 * We implement the punch hole through ioctl, so we need remove the page
3695 * caches first, otherwise the data may be inconsistent with the server.
3696 */
3697 truncate_pagecache_range(inode, offset, offset + len - 1);
3698
3699 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3700
3701 fsctl_buf.FileOffset = cpu_to_le64(offset);
3702 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3703
3704 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3705 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3706 true /* is_fctl */, (char *)&fsctl_buf,
3707 sizeof(struct file_zero_data_information),
3708 CIFSMaxBufSize, NULL, NULL);
3709 free_xid(xid);
3710 filemap_invalidate_unlock(inode->i_mapping);
3711 return rc;
3712 }
3713
3714 static int smb3_simple_fallocate_write_range(unsigned int xid,
3715 struct cifs_tcon *tcon,
3716 struct cifsFileInfo *cfile,
3717 loff_t off, loff_t len,
3718 char *buf)
3719 {
3720 struct cifs_io_parms io_parms = {0};
3721 int nbytes;
3722 int rc = 0;
3723 struct kvec iov[2];
3724
3725 io_parms.netfid = cfile->fid.netfid;
3726 io_parms.pid = current->tgid;
3727 io_parms.tcon = tcon;
3728 io_parms.persistent_fid = cfile->fid.persistent_fid;
3729 io_parms.volatile_fid = cfile->fid.volatile_fid;
3730
3731 while (len) {
3732 io_parms.offset = off;
3733 io_parms.length = len;
3734 if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
3735 io_parms.length = SMB2_MAX_BUFFER_SIZE;
3736 /* iov[0] is reserved for smb header */
3737 iov[1].iov_base = buf;
3738 iov[1].iov_len = io_parms.length;
3739 rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
3740 if (rc)
3741 break;
3742 if (nbytes > len)
3743 return -EINVAL;
3744 buf += nbytes;
3745 off += nbytes;
3746 len -= nbytes;
3747 }
3748 return rc;
3749 }
3750
3751 static int smb3_simple_fallocate_range(unsigned int xid,
3752 struct cifs_tcon *tcon,
3753 struct cifsFileInfo *cfile,
3754 loff_t off, loff_t len)
3755 {
3756 struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
3757 u32 out_data_len;
3758 char *buf = NULL;
3759 loff_t l;
3760 int rc;
3761
3762 in_data.file_offset = cpu_to_le64(off);
3763 in_data.length = cpu_to_le64(len);
3764 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3765 cfile->fid.volatile_fid,
3766 FSCTL_QUERY_ALLOCATED_RANGES, true,
3767 (char *)&in_data, sizeof(in_data),
3768 1024 * sizeof(struct file_allocated_range_buffer),
3769 (char **)&out_data, &out_data_len);
3770 if (rc)
3771 goto out;
3772
3773 buf = kzalloc(1024 * 1024, GFP_KERNEL);
3774 if (buf == NULL) {
3775 rc = -ENOMEM;
3776 goto out;
3777 }
3778
3779 tmp_data = out_data;
3780 while (len) {
3781 /*
3782 * The rest of the region is unmapped so write it all.
3783 */
3784 if (out_data_len == 0) {
3785 rc = smb3_simple_fallocate_write_range(xid, tcon,
3786 cfile, off, len, buf);
3787 goto out;
3788 }
3789
3790 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3791 rc = -EINVAL;
3792 goto out;
3793 }
3794
3795 if (off < le64_to_cpu(tmp_data->file_offset)) {
3796 /*
3797 * We are at a hole. Write until the end of the region
3798 * or until the next allocated data,
3799 * whichever comes next.
3800 */
3801 l = le64_to_cpu(tmp_data->file_offset) - off;
3802 if (len < l)
3803 l = len;
3804 rc = smb3_simple_fallocate_write_range(xid, tcon,
3805 cfile, off, l, buf);
3806 if (rc)
3807 goto out;
3808 off = off + l;
3809 len = len - l;
3810 if (len == 0)
3811 goto out;
3812 }
3813 /*
3814 * We are at a section of allocated data, just skip forward
3815 * until the end of the data or the end of the region
3816 * we are supposed to fallocate, whichever comes first.
3817 */
3818 l = le64_to_cpu(tmp_data->length);
3819 if (len < l)
3820 l = len;
3821 off += l;
3822 len -= l;
3823
3824 tmp_data = &tmp_data[1];
3825 out_data_len -= sizeof(struct file_allocated_range_buffer);
3826 }
3827
3828 out:
3829 kfree(out_data);
3830 kfree(buf);
3831 return rc;
3832 }
3833
3834
3835 static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3836 loff_t off, loff_t len, bool keep_size)
3837 {
3838 struct inode *inode;
3839 struct cifsInodeInfo *cifsi;
3840 struct cifsFileInfo *cfile = file->private_data;
3841 long rc = -EOPNOTSUPP;
3842 unsigned int xid;
3843 __le64 eof;
3844
3845 xid = get_xid();
3846
3847 inode = d_inode(cfile->dentry);
3848 cifsi = CIFS_I(inode);
3849
3850 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3851 tcon->ses->Suid, off, len);
3852 /* if file not oplocked can't be sure whether asking to extend size */
3853 if (!CIFS_CACHE_READ(cifsi))
3854 if (keep_size == false) {
3855 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3856 tcon->tid, tcon->ses->Suid, off, len, rc);
3857 free_xid(xid);
3858 return rc;
3859 }
3860
3861 /*
3862 * Extending the file
3863 */
3864 if ((keep_size == false) && i_size_read(inode) < off + len) {
3865 rc = inode_newsize_ok(inode, off + len);
3866 if (rc)
3867 goto out;
3868
3869 if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
3870 smb2_set_sparse(xid, tcon, cfile, inode, false);
3871
3872 eof = cpu_to_le64(off + len);
3873 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3874 cfile->fid.volatile_fid, cfile->pid, &eof);
3875 if (rc == 0) {
3876 cifsi->server_eof = off + len;
3877 cifs_setsize(inode, off + len);
3878 cifs_truncate_page(inode->i_mapping, inode->i_size);
3879 truncate_setsize(inode, off + len);
3880 }
3881 goto out;
3882 }
3883
3884 /*
3885 * Files are non-sparse by default so falloc may be a no-op
3886 * Must check if file sparse. If not sparse, and since we are not
3887 * extending then no need to do anything since file already allocated
3888 */
3889 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3890 rc = 0;
3891 goto out;
3892 }
3893
3894 if (keep_size == true) {
3895 /*
3896 * We can not preallocate pages beyond the end of the file
3897 * in SMB2
3898 */
3899 if (off >= i_size_read(inode)) {
3900 rc = 0;
3901 goto out;
3902 }
3903 /*
3904 * For fallocates that are partially beyond the end of file,
3905 * clamp len so we only fallocate up to the end of file.
3906 */
3907 if (off + len > i_size_read(inode)) {
3908 len = i_size_read(inode) - off;
3909 }
3910 }
3911
3912 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3913 /*
3914 * At this point, we are trying to fallocate an internal
3915 * regions of a sparse file. Since smb2 does not have a
3916 * fallocate command we have two otions on how to emulate this.
3917 * We can either turn the entire file to become non-sparse
3918 * which we only do if the fallocate is for virtually
3919 * the whole file, or we can overwrite the region with zeroes
3920 * using SMB2_write, which could be prohibitevly expensive
3921 * if len is large.
3922 */
3923 /*
3924 * We are only trying to fallocate a small region so
3925 * just write it with zero.
3926 */
3927 if (len <= 1024 * 1024) {
3928 rc = smb3_simple_fallocate_range(xid, tcon, cfile,
3929 off, len);
3930 goto out;
3931 }
3932
3933 /*
3934 * Check if falloc starts within first few pages of file
3935 * and ends within a few pages of the end of file to
3936 * ensure that most of file is being forced to be
3937 * fallocated now. If so then setting whole file sparse
3938 * ie potentially making a few extra pages at the beginning
3939 * or end of the file non-sparse via set_sparse is harmless.
3940 */
3941 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3942 rc = -EOPNOTSUPP;
3943 goto out;
3944 }
3945 }
3946
3947 smb2_set_sparse(xid, tcon, cfile, inode, false);
3948 rc = 0;
3949
3950 out:
3951 if (rc)
3952 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3953 tcon->ses->Suid, off, len, rc);
3954 else
3955 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3956 tcon->ses->Suid, off, len);
3957
3958 free_xid(xid);
3959 return rc;
3960 }
3961
3962 static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3963 loff_t off, loff_t len)
3964 {
3965 int rc;
3966 unsigned int xid;
3967 struct inode *inode;
3968 struct cifsFileInfo *cfile = file->private_data;
3969 struct cifsInodeInfo *cifsi;
3970 __le64 eof;
3971
3972 xid = get_xid();
3973
3974 inode = d_inode(cfile->dentry);
3975 cifsi = CIFS_I(inode);
3976
3977 if (off >= i_size_read(inode) ||
3978 off + len >= i_size_read(inode)) {
3979 rc = -EINVAL;
3980 goto out;
3981 }
3982
3983 rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3984 i_size_read(inode) - off - len, off);
3985 if (rc < 0)
3986 goto out;
3987
3988 eof = cpu_to_le64(i_size_read(inode) - len);
3989 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3990 cfile->fid.volatile_fid, cfile->pid, &eof);
3991 if (rc < 0)
3992 goto out;
3993
3994 rc = 0;
3995
3996 cifsi->server_eof = i_size_read(inode) - len;
3997 truncate_setsize(inode, cifsi->server_eof);
3998 fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
3999 out:
4000 free_xid(xid);
4001 return rc;
4002 }
4003
4004 static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
4005 loff_t off, loff_t len)
4006 {
4007 int rc;
4008 unsigned int xid;
4009 struct cifsFileInfo *cfile = file->private_data;
4010 __le64 eof;
4011 __u64 count;
4012
4013 xid = get_xid();
4014
4015 if (off >= i_size_read(file->f_inode)) {
4016 rc = -EINVAL;
4017 goto out;
4018 }
4019
4020 count = i_size_read(file->f_inode) - off;
4021 eof = cpu_to_le64(i_size_read(file->f_inode) + len);
4022
4023 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
4024 cfile->fid.volatile_fid, cfile->pid, &eof);
4025 if (rc < 0)
4026 goto out;
4027
4028 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
4029 if (rc < 0)
4030 goto out;
4031
4032 rc = smb3_zero_range(file, tcon, off, len, 1);
4033 if (rc < 0)
4034 goto out;
4035
4036 rc = 0;
4037 out:
4038 free_xid(xid);
4039 return rc;
4040 }
4041
4042 static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
4043 {
4044 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
4045 struct cifsInodeInfo *cifsi;
4046 struct inode *inode;
4047 int rc = 0;
4048 struct file_allocated_range_buffer in_data, *out_data = NULL;
4049 u32 out_data_len;
4050 unsigned int xid;
4051
4052 if (whence != SEEK_HOLE && whence != SEEK_DATA)
4053 return generic_file_llseek(file, offset, whence);
4054
4055 inode = d_inode(cfile->dentry);
4056 cifsi = CIFS_I(inode);
4057
4058 if (offset < 0 || offset >= i_size_read(inode))
4059 return -ENXIO;
4060
4061 xid = get_xid();
4062 /*
4063 * We need to be sure that all dirty pages are written as they
4064 * might fill holes on the server.
4065 * Note that we also MUST flush any written pages since at least
4066 * some servers (Windows2016) will not reflect recent writes in
4067 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
4068 */
4069 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
4070 if (wrcfile) {
4071 filemap_write_and_wait(inode->i_mapping);
4072 smb2_flush_file(xid, tcon, &wrcfile->fid);
4073 cifsFileInfo_put(wrcfile);
4074 }
4075
4076 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
4077 if (whence == SEEK_HOLE)
4078 offset = i_size_read(inode);
4079 goto lseek_exit;
4080 }
4081
4082 in_data.file_offset = cpu_to_le64(offset);
4083 in_data.length = cpu_to_le64(i_size_read(inode));
4084
4085 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
4086 cfile->fid.volatile_fid,
4087 FSCTL_QUERY_ALLOCATED_RANGES, true,
4088 (char *)&in_data, sizeof(in_data),
4089 sizeof(struct file_allocated_range_buffer),
4090 (char **)&out_data, &out_data_len);
4091 if (rc == -E2BIG)
4092 rc = 0;
4093 if (rc)
4094 goto lseek_exit;
4095
4096 if (whence == SEEK_HOLE && out_data_len == 0)
4097 goto lseek_exit;
4098
4099 if (whence == SEEK_DATA && out_data_len == 0) {
4100 rc = -ENXIO;
4101 goto lseek_exit;
4102 }
4103
4104 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
4105 rc = -EINVAL;
4106 goto lseek_exit;
4107 }
4108 if (whence == SEEK_DATA) {
4109 offset = le64_to_cpu(out_data->file_offset);
4110 goto lseek_exit;
4111 }
4112 if (offset < le64_to_cpu(out_data->file_offset))
4113 goto lseek_exit;
4114
4115 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
4116
4117 lseek_exit:
4118 free_xid(xid);
4119 kfree(out_data);
4120 if (!rc)
4121 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
4122 else
4123 return rc;
4124 }
4125
4126 static int smb3_fiemap(struct cifs_tcon *tcon,
4127 struct cifsFileInfo *cfile,
4128 struct fiemap_extent_info *fei, u64 start, u64 len)
4129 {
4130 unsigned int xid;
4131 struct file_allocated_range_buffer in_data, *out_data;
4132 u32 out_data_len;
4133 int i, num, rc, flags, last_blob;
4134 u64 next;
4135
4136 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
4137 if (rc)
4138 return rc;
4139
4140 xid = get_xid();
4141 again:
4142 in_data.file_offset = cpu_to_le64(start);
4143 in_data.length = cpu_to_le64(len);
4144
4145 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
4146 cfile->fid.volatile_fid,
4147 FSCTL_QUERY_ALLOCATED_RANGES, true,
4148 (char *)&in_data, sizeof(in_data),
4149 1024 * sizeof(struct file_allocated_range_buffer),
4150 (char **)&out_data, &out_data_len);
4151 if (rc == -E2BIG) {
4152 last_blob = 0;
4153 rc = 0;
4154 } else
4155 last_blob = 1;
4156 if (rc)
4157 goto out;
4158
4159 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
4160 rc = -EINVAL;
4161 goto out;
4162 }
4163 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
4164 rc = -EINVAL;
4165 goto out;
4166 }
4167
4168 num = out_data_len / sizeof(struct file_allocated_range_buffer);
4169 for (i = 0; i < num; i++) {
4170 flags = 0;
4171 if (i == num - 1 && last_blob)
4172 flags |= FIEMAP_EXTENT_LAST;
4173
4174 rc = fiemap_fill_next_extent(fei,
4175 le64_to_cpu(out_data[i].file_offset),
4176 le64_to_cpu(out_data[i].file_offset),
4177 le64_to_cpu(out_data[i].length),
4178 flags);
4179 if (rc < 0)
4180 goto out;
4181 if (rc == 1) {
4182 rc = 0;
4183 goto out;
4184 }
4185 }
4186
4187 if (!last_blob) {
4188 next = le64_to_cpu(out_data[num - 1].file_offset) +
4189 le64_to_cpu(out_data[num - 1].length);
4190 len = len - (next - start);
4191 start = next;
4192 goto again;
4193 }
4194
4195 out:
4196 free_xid(xid);
4197 kfree(out_data);
4198 return rc;
4199 }
4200
4201 static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
4202 loff_t off, loff_t len)
4203 {
4204 /* KEEP_SIZE already checked for by do_fallocate */
4205 if (mode & FALLOC_FL_PUNCH_HOLE)
4206 return smb3_punch_hole(file, tcon, off, len);
4207 else if (mode & FALLOC_FL_ZERO_RANGE) {
4208 if (mode & FALLOC_FL_KEEP_SIZE)
4209 return smb3_zero_range(file, tcon, off, len, true);
4210 return smb3_zero_range(file, tcon, off, len, false);
4211 } else if (mode == FALLOC_FL_KEEP_SIZE)
4212 return smb3_simple_falloc(file, tcon, off, len, true);
4213 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
4214 return smb3_collapse_range(file, tcon, off, len);
4215 else if (mode == FALLOC_FL_INSERT_RANGE)
4216 return smb3_insert_range(file, tcon, off, len);
4217 else if (mode == 0)
4218 return smb3_simple_falloc(file, tcon, off, len, false);
4219
4220 return -EOPNOTSUPP;
4221 }
4222
4223 static void
4224 smb2_downgrade_oplock(struct TCP_Server_Info *server,
4225 struct cifsInodeInfo *cinode, __u32 oplock,
4226 unsigned int epoch, bool *purge_cache)
4227 {
4228 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
4229 }
4230
4231 static void
4232 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4233 unsigned int epoch, bool *purge_cache);
4234
4235 static void
4236 smb3_downgrade_oplock(struct TCP_Server_Info *server,
4237 struct cifsInodeInfo *cinode, __u32 oplock,
4238 unsigned int epoch, bool *purge_cache)
4239 {
4240 unsigned int old_state = cinode->oplock;
4241 unsigned int old_epoch = cinode->epoch;
4242 unsigned int new_state;
4243
4244 if (epoch > old_epoch) {
4245 smb21_set_oplock_level(cinode, oplock, 0, NULL);
4246 cinode->epoch = epoch;
4247 }
4248
4249 new_state = cinode->oplock;
4250 *purge_cache = false;
4251
4252 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
4253 (new_state & CIFS_CACHE_READ_FLG) == 0)
4254 *purge_cache = true;
4255 else if (old_state == new_state && (epoch - old_epoch > 1))
4256 *purge_cache = true;
4257 }
4258
4259 static void
4260 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4261 unsigned int epoch, bool *purge_cache)
4262 {
4263 oplock &= 0xFF;
4264 cinode->lease_granted = false;
4265 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4266 return;
4267 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
4268 cinode->oplock = CIFS_CACHE_RHW_FLG;
4269 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
4270 &cinode->netfs.inode);
4271 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
4272 cinode->oplock = CIFS_CACHE_RW_FLG;
4273 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
4274 &cinode->netfs.inode);
4275 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
4276 cinode->oplock = CIFS_CACHE_READ_FLG;
4277 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
4278 &cinode->netfs.inode);
4279 } else
4280 cinode->oplock = 0;
4281 }
4282
4283 static void
4284 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4285 unsigned int epoch, bool *purge_cache)
4286 {
4287 char message[5] = {0};
4288 unsigned int new_oplock = 0;
4289
4290 oplock &= 0xFF;
4291 cinode->lease_granted = true;
4292 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4293 return;
4294
4295 /* Check if the server granted an oplock rather than a lease */
4296 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4297 return smb2_set_oplock_level(cinode, oplock, epoch,
4298 purge_cache);
4299
4300 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
4301 new_oplock |= CIFS_CACHE_READ_FLG;
4302 strcat(message, "R");
4303 }
4304 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
4305 new_oplock |= CIFS_CACHE_HANDLE_FLG;
4306 strcat(message, "H");
4307 }
4308 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
4309 new_oplock |= CIFS_CACHE_WRITE_FLG;
4310 strcat(message, "W");
4311 }
4312 if (!new_oplock)
4313 strncpy(message, "None", sizeof(message));
4314
4315 cinode->oplock = new_oplock;
4316 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
4317 &cinode->netfs.inode);
4318 }
4319
4320 static void
4321 smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4322 unsigned int epoch, bool *purge_cache)
4323 {
4324 unsigned int old_oplock = cinode->oplock;
4325
4326 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
4327
4328 if (purge_cache) {
4329 *purge_cache = false;
4330 if (old_oplock == CIFS_CACHE_READ_FLG) {
4331 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
4332 (epoch - cinode->epoch > 0))
4333 *purge_cache = true;
4334 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4335 (epoch - cinode->epoch > 1))
4336 *purge_cache = true;
4337 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4338 (epoch - cinode->epoch > 1))
4339 *purge_cache = true;
4340 else if (cinode->oplock == 0 &&
4341 (epoch - cinode->epoch > 0))
4342 *purge_cache = true;
4343 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
4344 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4345 (epoch - cinode->epoch > 0))
4346 *purge_cache = true;
4347 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4348 (epoch - cinode->epoch > 1))
4349 *purge_cache = true;
4350 }
4351 cinode->epoch = epoch;
4352 }
4353 }
4354
4355 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4356 static bool
4357 smb2_is_read_op(__u32 oplock)
4358 {
4359 return oplock == SMB2_OPLOCK_LEVEL_II;
4360 }
4361 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
4362
4363 static bool
4364 smb21_is_read_op(__u32 oplock)
4365 {
4366 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
4367 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
4368 }
4369
4370 static __le32
4371 map_oplock_to_lease(u8 oplock)
4372 {
4373 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4374 return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
4375 else if (oplock == SMB2_OPLOCK_LEVEL_II)
4376 return SMB2_LEASE_READ_CACHING_LE;
4377 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
4378 return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
4379 SMB2_LEASE_WRITE_CACHING_LE;
4380 return 0;
4381 }
4382
4383 static char *
4384 smb2_create_lease_buf(u8 *lease_key, u8 oplock)
4385 {
4386 struct create_lease *buf;
4387
4388 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4389 if (!buf)
4390 return NULL;
4391
4392 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4393 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4394
4395 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4396 (struct create_lease, lcontext));
4397 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4398 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4399 (struct create_lease, Name));
4400 buf->ccontext.NameLength = cpu_to_le16(4);
4401 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4402 buf->Name[0] = 'R';
4403 buf->Name[1] = 'q';
4404 buf->Name[2] = 'L';
4405 buf->Name[3] = 's';
4406 return (char *)buf;
4407 }
4408
4409 static char *
4410 smb3_create_lease_buf(u8 *lease_key, u8 oplock)
4411 {
4412 struct create_lease_v2 *buf;
4413
4414 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4415 if (!buf)
4416 return NULL;
4417
4418 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4419 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4420
4421 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4422 (struct create_lease_v2, lcontext));
4423 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4424 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4425 (struct create_lease_v2, Name));
4426 buf->ccontext.NameLength = cpu_to_le16(4);
4427 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4428 buf->Name[0] = 'R';
4429 buf->Name[1] = 'q';
4430 buf->Name[2] = 'L';
4431 buf->Name[3] = 's';
4432 return (char *)buf;
4433 }
4434
4435 static __u8
4436 smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
4437 {
4438 struct create_lease *lc = (struct create_lease *)buf;
4439
4440 *epoch = 0; /* not used */
4441 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4442 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4443 return le32_to_cpu(lc->lcontext.LeaseState);
4444 }
4445
4446 static __u8
4447 smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
4448 {
4449 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4450
4451 *epoch = le16_to_cpu(lc->lcontext.Epoch);
4452 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4453 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4454 if (lease_key)
4455 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
4456 return le32_to_cpu(lc->lcontext.LeaseState);
4457 }
4458
4459 static unsigned int
4460 smb2_wp_retry_size(struct inode *inode)
4461 {
4462 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
4463 SMB2_MAX_BUFFER_SIZE);
4464 }
4465
4466 static bool
4467 smb2_dir_needs_close(struct cifsFileInfo *cfile)
4468 {
4469 return !cfile->invalidHandle;
4470 }
4471
4472 static void
4473 fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
4474 struct smb_rqst *old_rq, __le16 cipher_type)
4475 {
4476 struct smb2_hdr *shdr =
4477 (struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
4478
4479 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4480 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4481 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4482 tr_hdr->Flags = cpu_to_le16(0x01);
4483 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4484 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4485 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4486 else
4487 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4488 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
4489 }
4490
4491 /* We can not use the normal sg_set_buf() as we will sometimes pass a
4492 * stack object as buf.
4493 */
4494 static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
4495 unsigned int buflen)
4496 {
4497 void *addr;
4498 /*
4499 * VMAP_STACK (at least) puts stack into the vmalloc address space
4500 */
4501 if (is_vmalloc_addr(buf))
4502 addr = vmalloc_to_page(buf);
4503 else
4504 addr = virt_to_page(buf);
4505 sg_set_page(sg, addr, buflen, offset_in_page(buf));
4506 }
4507
4508 /* Assumes the first rqst has a transform header as the first iov.
4509 * I.e.
4510 * rqst[0].rq_iov[0] is transform header
4511 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
4512 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
4513 */
4514 static struct scatterlist *
4515 init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
4516 {
4517 unsigned int sg_len;
4518 struct scatterlist *sg;
4519 unsigned int i;
4520 unsigned int j;
4521 unsigned int idx = 0;
4522 int skip;
4523
4524 sg_len = 1;
4525 for (i = 0; i < num_rqst; i++)
4526 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
4527
4528 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
4529 if (!sg)
4530 return NULL;
4531
4532 sg_init_table(sg, sg_len);
4533 for (i = 0; i < num_rqst; i++) {
4534 for (j = 0; j < rqst[i].rq_nvec; j++) {
4535 /*
4536 * The first rqst has a transform header where the
4537 * first 20 bytes are not part of the encrypted blob
4538 */
4539 skip = (i == 0) && (j == 0) ? 20 : 0;
4540 smb2_sg_set_buf(&sg[idx++],
4541 rqst[i].rq_iov[j].iov_base + skip,
4542 rqst[i].rq_iov[j].iov_len - skip);
4543 }
4544
4545 for (j = 0; j < rqst[i].rq_npages; j++) {
4546 unsigned int len, offset;
4547
4548 rqst_page_get_length(&rqst[i], j, &len, &offset);
4549 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
4550 }
4551 }
4552 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
4553 return sg;
4554 }
4555
4556 static int
4557 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4558 {
4559 struct cifs_ses *ses;
4560 u8 *ses_enc_key;
4561
4562 spin_lock(&cifs_tcp_ses_lock);
4563 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
4564 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
4565 if (ses->Suid == ses_id) {
4566 ses_enc_key = enc ? ses->smb3encryptionkey :
4567 ses->smb3decryptionkey;
4568 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
4569 spin_unlock(&cifs_tcp_ses_lock);
4570 return 0;
4571 }
4572 }
4573 }
4574 spin_unlock(&cifs_tcp_ses_lock);
4575
4576 return -EAGAIN;
4577 }
4578 /*
4579 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4580 * iov[0] - transform header (associate data),
4581 * iov[1-N] - SMB2 header and pages - data to encrypt.
4582 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
4583 * untouched.
4584 */
4585 static int
4586 crypt_message(struct TCP_Server_Info *server, int num_rqst,
4587 struct smb_rqst *rqst, int enc)
4588 {
4589 struct smb2_transform_hdr *tr_hdr =
4590 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
4591 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
4592 int rc = 0;
4593 struct scatterlist *sg;
4594 u8 sign[SMB2_SIGNATURE_SIZE] = {};
4595 u8 key[SMB3_ENC_DEC_KEY_SIZE];
4596 struct aead_request *req;
4597 char *iv;
4598 unsigned int iv_len;
4599 DECLARE_CRYPTO_WAIT(wait);
4600 struct crypto_aead *tfm;
4601 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4602
4603 rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
4604 if (rc) {
4605 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
4606 enc ? "en" : "de");
4607 return rc;
4608 }
4609
4610 rc = smb3_crypto_aead_allocate(server);
4611 if (rc) {
4612 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
4613 return rc;
4614 }
4615
4616 tfm = enc ? server->secmech.ccmaesencrypt :
4617 server->secmech.ccmaesdecrypt;
4618
4619 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4620 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4621 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4622 else
4623 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
4624
4625 if (rc) {
4626 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
4627 return rc;
4628 }
4629
4630 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4631 if (rc) {
4632 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
4633 return rc;
4634 }
4635
4636 req = aead_request_alloc(tfm, GFP_KERNEL);
4637 if (!req) {
4638 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
4639 return -ENOMEM;
4640 }
4641
4642 if (!enc) {
4643 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4644 crypt_len += SMB2_SIGNATURE_SIZE;
4645 }
4646
4647 sg = init_sg(num_rqst, rqst, sign);
4648 if (!sg) {
4649 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
4650 rc = -ENOMEM;
4651 goto free_req;
4652 }
4653
4654 iv_len = crypto_aead_ivsize(tfm);
4655 iv = kzalloc(iv_len, GFP_KERNEL);
4656 if (!iv) {
4657 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
4658 rc = -ENOMEM;
4659 goto free_sg;
4660 }
4661
4662 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4663 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4664 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4665 else {
4666 iv[0] = 3;
4667 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4668 }
4669
4670 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4671 aead_request_set_ad(req, assoc_data_len);
4672
4673 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
4674 crypto_req_done, &wait);
4675
4676 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4677 : crypto_aead_decrypt(req), &wait);
4678
4679 if (!rc && enc)
4680 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4681
4682 kfree(iv);
4683 free_sg:
4684 kfree(sg);
4685 free_req:
4686 kfree(req);
4687 return rc;
4688 }
4689
4690 void
4691 smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
4692 {
4693 int i, j;
4694
4695 for (i = 0; i < num_rqst; i++) {
4696 if (rqst[i].rq_pages) {
4697 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
4698 put_page(rqst[i].rq_pages[j]);
4699 kfree(rqst[i].rq_pages);
4700 }
4701 }
4702 }
4703
4704 /*
4705 * This function will initialize new_rq and encrypt the content.
4706 * The first entry, new_rq[0], only contains a single iov which contains
4707 * a smb2_transform_hdr and is pre-allocated by the caller.
4708 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4709 *
4710 * The end result is an array of smb_rqst structures where the first structure
4711 * only contains a single iov for the transform header which we then can pass
4712 * to crypt_message().
4713 *
4714 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4715 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4716 */
4717 static int
4718 smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4719 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
4720 {
4721 struct page **pages;
4722 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4723 unsigned int npages;
4724 unsigned int orig_len = 0;
4725 int i, j;
4726 int rc = -ENOMEM;
4727
4728 for (i = 1; i < num_rqst; i++) {
4729 npages = old_rq[i - 1].rq_npages;
4730 pages = kmalloc_array(npages, sizeof(struct page *),
4731 GFP_KERNEL);
4732 if (!pages)
4733 goto err_free;
4734
4735 new_rq[i].rq_pages = pages;
4736 new_rq[i].rq_npages = npages;
4737 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
4738 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
4739 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
4740 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
4741 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
4742
4743 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
4744
4745 for (j = 0; j < npages; j++) {
4746 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4747 if (!pages[j])
4748 goto err_free;
4749 }
4750
4751 /* copy pages form the old */
4752 for (j = 0; j < npages; j++) {
4753 char *dst, *src;
4754 unsigned int offset, len;
4755
4756 rqst_page_get_length(&new_rq[i], j, &len, &offset);
4757
4758 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
4759 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
4760
4761 memcpy(dst, src, len);
4762 kunmap(new_rq[i].rq_pages[j]);
4763 kunmap(old_rq[i - 1].rq_pages[j]);
4764 }
4765 }
4766
4767 /* fill the 1st iov with a transform header */
4768 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
4769
4770 rc = crypt_message(server, num_rqst, new_rq, 1);
4771 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
4772 if (rc)
4773 goto err_free;
4774
4775 return rc;
4776
4777 err_free:
4778 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4779 return rc;
4780 }
4781
4782 static int
4783 smb3_is_transform_hdr(void *buf)
4784 {
4785 struct smb2_transform_hdr *trhdr = buf;
4786
4787 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4788 }
4789
4790 static int
4791 decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4792 unsigned int buf_data_size, struct page **pages,
4793 unsigned int npages, unsigned int page_data_size,
4794 bool is_offloaded)
4795 {
4796 struct kvec iov[2];
4797 struct smb_rqst rqst = {NULL};
4798 int rc;
4799
4800 iov[0].iov_base = buf;
4801 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4802 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4803 iov[1].iov_len = buf_data_size;
4804
4805 rqst.rq_iov = iov;
4806 rqst.rq_nvec = 2;
4807 rqst.rq_pages = pages;
4808 rqst.rq_npages = npages;
4809 rqst.rq_pagesz = PAGE_SIZE;
4810 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
4811
4812 rc = crypt_message(server, 1, &rqst, 0);
4813 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
4814
4815 if (rc)
4816 return rc;
4817
4818 memmove(buf, iov[1].iov_base, buf_data_size);
4819
4820 if (!is_offloaded)
4821 server->total_read = buf_data_size + page_data_size;
4822
4823 return rc;
4824 }
4825
4826 static int
4827 read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
4828 unsigned int npages, unsigned int len)
4829 {
4830 int i;
4831 int length;
4832
4833 for (i = 0; i < npages; i++) {
4834 struct page *page = pages[i];
4835 size_t n;
4836
4837 n = len;
4838 if (len >= PAGE_SIZE) {
4839 /* enough data to fill the page */
4840 n = PAGE_SIZE;
4841 len -= n;
4842 } else {
4843 zero_user(page, len, PAGE_SIZE - len);
4844 len = 0;
4845 }
4846 length = cifs_read_page_from_socket(server, page, 0, n);
4847 if (length < 0)
4848 return length;
4849 server->total_read += length;
4850 }
4851
4852 return 0;
4853 }
4854
4855 static int
4856 init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
4857 unsigned int cur_off, struct bio_vec **page_vec)
4858 {
4859 struct bio_vec *bvec;
4860 int i;
4861
4862 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
4863 if (!bvec)
4864 return -ENOMEM;
4865
4866 for (i = 0; i < npages; i++) {
4867 bvec[i].bv_page = pages[i];
4868 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
4869 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
4870 data_size -= bvec[i].bv_len;
4871 }
4872
4873 if (data_size != 0) {
4874 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4875 kfree(bvec);
4876 return -EIO;
4877 }
4878
4879 *page_vec = bvec;
4880 return 0;
4881 }
4882
4883 static int
4884 handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4885 char *buf, unsigned int buf_len, struct page **pages,
4886 unsigned int npages, unsigned int page_data_size,
4887 bool is_offloaded)
4888 {
4889 unsigned int data_offset;
4890 unsigned int data_len;
4891 unsigned int cur_off;
4892 unsigned int cur_page_idx;
4893 unsigned int pad_len;
4894 struct cifs_readdata *rdata = mid->callback_data;
4895 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
4896 struct bio_vec *bvec = NULL;
4897 struct iov_iter iter;
4898 struct kvec iov;
4899 int length;
4900 bool use_rdma_mr = false;
4901
4902 if (shdr->Command != SMB2_READ) {
4903 cifs_server_dbg(VFS, "only big read responses are supported\n");
4904 return -ENOTSUPP;
4905 }
4906
4907 if (server->ops->is_session_expired &&
4908 server->ops->is_session_expired(buf)) {
4909 if (!is_offloaded)
4910 cifs_reconnect(server, true);
4911 return -1;
4912 }
4913
4914 if (server->ops->is_status_pending &&
4915 server->ops->is_status_pending(buf, server))
4916 return -1;
4917
4918 /* set up first two iov to get credits */
4919 rdata->iov[0].iov_base = buf;
4920 rdata->iov[0].iov_len = 0;
4921 rdata->iov[1].iov_base = buf;
4922 rdata->iov[1].iov_len =
4923 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
4924 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4925 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4926 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4927 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4928
4929 rdata->result = server->ops->map_error(buf, true);
4930 if (rdata->result != 0) {
4931 cifs_dbg(FYI, "%s: server returned error %d\n",
4932 __func__, rdata->result);
4933 /* normal error on read response */
4934 if (is_offloaded)
4935 mid->mid_state = MID_RESPONSE_RECEIVED;
4936 else
4937 dequeue_mid(mid, false);
4938 return 0;
4939 }
4940
4941 data_offset = server->ops->read_data_offset(buf);
4942 #ifdef CONFIG_CIFS_SMB_DIRECT
4943 use_rdma_mr = rdata->mr;
4944 #endif
4945 data_len = server->ops->read_data_length(buf, use_rdma_mr);
4946
4947 if (data_offset < server->vals->read_rsp_size) {
4948 /*
4949 * win2k8 sometimes sends an offset of 0 when the read
4950 * is beyond the EOF. Treat it as if the data starts just after
4951 * the header.
4952 */
4953 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4954 __func__, data_offset);
4955 data_offset = server->vals->read_rsp_size;
4956 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4957 /* data_offset is beyond the end of smallbuf */
4958 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4959 __func__, data_offset);
4960 rdata->result = -EIO;
4961 if (is_offloaded)
4962 mid->mid_state = MID_RESPONSE_MALFORMED;
4963 else
4964 dequeue_mid(mid, rdata->result);
4965 return 0;
4966 }
4967
4968 pad_len = data_offset - server->vals->read_rsp_size;
4969
4970 if (buf_len <= data_offset) {
4971 /* read response payload is in pages */
4972 cur_page_idx = pad_len / PAGE_SIZE;
4973 cur_off = pad_len % PAGE_SIZE;
4974
4975 if (cur_page_idx != 0) {
4976 /* data offset is beyond the 1st page of response */
4977 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4978 __func__, data_offset);
4979 rdata->result = -EIO;
4980 if (is_offloaded)
4981 mid->mid_state = MID_RESPONSE_MALFORMED;
4982 else
4983 dequeue_mid(mid, rdata->result);
4984 return 0;
4985 }
4986
4987 if (data_len > page_data_size - pad_len) {
4988 /* data_len is corrupt -- discard frame */
4989 rdata->result = -EIO;
4990 if (is_offloaded)
4991 mid->mid_state = MID_RESPONSE_MALFORMED;
4992 else
4993 dequeue_mid(mid, rdata->result);
4994 return 0;
4995 }
4996
4997 rdata->result = init_read_bvec(pages, npages, page_data_size,
4998 cur_off, &bvec);
4999 if (rdata->result != 0) {
5000 if (is_offloaded)
5001 mid->mid_state = MID_RESPONSE_MALFORMED;
5002 else
5003 dequeue_mid(mid, rdata->result);
5004 return 0;
5005 }
5006
5007 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
5008 } else if (buf_len >= data_offset + data_len) {
5009 /* read response payload is in buf */
5010 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
5011 iov.iov_base = buf + data_offset;
5012 iov.iov_len = data_len;
5013 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
5014 } else {
5015 /* read response payload cannot be in both buf and pages */
5016 WARN_ONCE(1, "buf can not contain only a part of read data");
5017 rdata->result = -EIO;
5018 if (is_offloaded)
5019 mid->mid_state = MID_RESPONSE_MALFORMED;
5020 else
5021 dequeue_mid(mid, rdata->result);
5022 return 0;
5023 }
5024
5025 length = rdata->copy_into_pages(server, rdata, &iter);
5026
5027 kfree(bvec);
5028
5029 if (length < 0)
5030 return length;
5031
5032 if (is_offloaded)
5033 mid->mid_state = MID_RESPONSE_RECEIVED;
5034 else
5035 dequeue_mid(mid, false);
5036 return length;
5037 }
5038
5039 struct smb2_decrypt_work {
5040 struct work_struct decrypt;
5041 struct TCP_Server_Info *server;
5042 struct page **ppages;
5043 char *buf;
5044 unsigned int npages;
5045 unsigned int len;
5046 };
5047
5048
5049 static void smb2_decrypt_offload(struct work_struct *work)
5050 {
5051 struct smb2_decrypt_work *dw = container_of(work,
5052 struct smb2_decrypt_work, decrypt);
5053 int i, rc;
5054 struct mid_q_entry *mid;
5055
5056 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
5057 dw->ppages, dw->npages, dw->len, true);
5058 if (rc) {
5059 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
5060 goto free_pages;
5061 }
5062
5063 dw->server->lstrp = jiffies;
5064 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
5065 if (mid == NULL)
5066 cifs_dbg(FYI, "mid not found\n");
5067 else {
5068 mid->decrypted = true;
5069 rc = handle_read_data(dw->server, mid, dw->buf,
5070 dw->server->vals->read_rsp_size,
5071 dw->ppages, dw->npages, dw->len,
5072 true);
5073 if (rc >= 0) {
5074 #ifdef CONFIG_CIFS_STATS2
5075 mid->when_received = jiffies;
5076 #endif
5077 if (dw->server->ops->is_network_name_deleted)
5078 dw->server->ops->is_network_name_deleted(dw->buf,
5079 dw->server);
5080
5081 mid->callback(mid);
5082 } else {
5083 spin_lock(&cifs_tcp_ses_lock);
5084 spin_lock(&GlobalMid_Lock);
5085 if (dw->server->tcpStatus == CifsNeedReconnect) {
5086 mid->mid_state = MID_RETRY_NEEDED;
5087 spin_unlock(&GlobalMid_Lock);
5088 spin_unlock(&cifs_tcp_ses_lock);
5089 mid->callback(mid);
5090 } else {
5091 mid->mid_state = MID_REQUEST_SUBMITTED;
5092 mid->mid_flags &= ~(MID_DELETED);
5093 list_add_tail(&mid->qhead,
5094 &dw->server->pending_mid_q);
5095 spin_unlock(&GlobalMid_Lock);
5096 spin_unlock(&cifs_tcp_ses_lock);
5097 }
5098 }
5099 cifs_mid_q_entry_release(mid);
5100 }
5101
5102 free_pages:
5103 for (i = dw->npages-1; i >= 0; i--)
5104 put_page(dw->ppages[i]);
5105
5106 kfree(dw->ppages);
5107 cifs_small_buf_release(dw->buf);
5108 kfree(dw);
5109 }
5110
5111
5112 static int
5113 receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
5114 int *num_mids)
5115 {
5116 char *buf = server->smallbuf;
5117 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5118 unsigned int npages;
5119 struct page **pages;
5120 unsigned int len;
5121 unsigned int buflen = server->pdu_size;
5122 int rc;
5123 int i = 0;
5124 struct smb2_decrypt_work *dw;
5125
5126 *num_mids = 1;
5127 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
5128 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
5129
5130 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
5131 if (rc < 0)
5132 return rc;
5133 server->total_read += rc;
5134
5135 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
5136 server->vals->read_rsp_size;
5137 npages = DIV_ROUND_UP(len, PAGE_SIZE);
5138
5139 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
5140 if (!pages) {
5141 rc = -ENOMEM;
5142 goto discard_data;
5143 }
5144
5145 for (; i < npages; i++) {
5146 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
5147 if (!pages[i]) {
5148 rc = -ENOMEM;
5149 goto discard_data;
5150 }
5151 }
5152
5153 /* read read data into pages */
5154 rc = read_data_into_pages(server, pages, npages, len);
5155 if (rc)
5156 goto free_pages;
5157
5158 rc = cifs_discard_remaining_data(server);
5159 if (rc)
5160 goto free_pages;
5161
5162 /*
5163 * For large reads, offload to different thread for better performance,
5164 * use more cores decrypting which can be expensive
5165 */
5166
5167 if ((server->min_offload) && (server->in_flight > 1) &&
5168 (server->pdu_size >= server->min_offload)) {
5169 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
5170 if (dw == NULL)
5171 goto non_offloaded_decrypt;
5172
5173 dw->buf = server->smallbuf;
5174 server->smallbuf = (char *)cifs_small_buf_get();
5175
5176 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
5177
5178 dw->npages = npages;
5179 dw->server = server;
5180 dw->ppages = pages;
5181 dw->len = len;
5182 queue_work(decrypt_wq, &dw->decrypt);
5183 *num_mids = 0; /* worker thread takes care of finding mid */
5184 return -1;
5185 }
5186
5187 non_offloaded_decrypt:
5188 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
5189 pages, npages, len, false);
5190 if (rc)
5191 goto free_pages;
5192
5193 *mid = smb2_find_mid(server, buf);
5194 if (*mid == NULL)
5195 cifs_dbg(FYI, "mid not found\n");
5196 else {
5197 cifs_dbg(FYI, "mid found\n");
5198 (*mid)->decrypted = true;
5199 rc = handle_read_data(server, *mid, buf,
5200 server->vals->read_rsp_size,
5201 pages, npages, len, false);
5202 if (rc >= 0) {
5203 if (server->ops->is_network_name_deleted) {
5204 server->ops->is_network_name_deleted(buf,
5205 server);
5206 }
5207 }
5208 }
5209
5210 free_pages:
5211 for (i = i - 1; i >= 0; i--)
5212 put_page(pages[i]);
5213 kfree(pages);
5214 return rc;
5215 discard_data:
5216 cifs_discard_remaining_data(server);
5217 goto free_pages;
5218 }
5219
5220 static int
5221 receive_encrypted_standard(struct TCP_Server_Info *server,
5222 struct mid_q_entry **mids, char **bufs,
5223 int *num_mids)
5224 {
5225 int ret, length;
5226 char *buf = server->smallbuf;
5227 struct smb2_hdr *shdr;
5228 unsigned int pdu_length = server->pdu_size;
5229 unsigned int buf_size;
5230 struct mid_q_entry *mid_entry;
5231 int next_is_large;
5232 char *next_buffer = NULL;
5233
5234 *num_mids = 0;
5235
5236 /* switch to large buffer if too big for a small one */
5237 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
5238 server->large_buf = true;
5239 memcpy(server->bigbuf, buf, server->total_read);
5240 buf = server->bigbuf;
5241 }
5242
5243 /* now read the rest */
5244 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
5245 pdu_length - HEADER_SIZE(server) + 1);
5246 if (length < 0)
5247 return length;
5248 server->total_read += length;
5249
5250 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
5251 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
5252 if (length)
5253 return length;
5254
5255 next_is_large = server->large_buf;
5256 one_more:
5257 shdr = (struct smb2_hdr *)buf;
5258 if (shdr->NextCommand) {
5259 if (next_is_large)
5260 next_buffer = (char *)cifs_buf_get();
5261 else
5262 next_buffer = (char *)cifs_small_buf_get();
5263 memcpy(next_buffer,
5264 buf + le32_to_cpu(shdr->NextCommand),
5265 pdu_length - le32_to_cpu(shdr->NextCommand));
5266 }
5267
5268 mid_entry = smb2_find_mid(server, buf);
5269 if (mid_entry == NULL)
5270 cifs_dbg(FYI, "mid not found\n");
5271 else {
5272 cifs_dbg(FYI, "mid found\n");
5273 mid_entry->decrypted = true;
5274 mid_entry->resp_buf_size = server->pdu_size;
5275 }
5276
5277 if (*num_mids >= MAX_COMPOUND) {
5278 cifs_server_dbg(VFS, "too many PDUs in compound\n");
5279 return -1;
5280 }
5281 bufs[*num_mids] = buf;
5282 mids[(*num_mids)++] = mid_entry;
5283
5284 if (mid_entry && mid_entry->handle)
5285 ret = mid_entry->handle(server, mid_entry);
5286 else
5287 ret = cifs_handle_standard(server, mid_entry);
5288
5289 if (ret == 0 && shdr->NextCommand) {
5290 pdu_length -= le32_to_cpu(shdr->NextCommand);
5291 server->large_buf = next_is_large;
5292 if (next_is_large)
5293 server->bigbuf = buf = next_buffer;
5294 else
5295 server->smallbuf = buf = next_buffer;
5296 goto one_more;
5297 } else if (ret != 0) {
5298 /*
5299 * ret != 0 here means that we didn't get to handle_mid() thus
5300 * server->smallbuf and server->bigbuf are still valid. We need
5301 * to free next_buffer because it is not going to be used
5302 * anywhere.
5303 */
5304 if (next_is_large)
5305 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
5306 else
5307 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
5308 }
5309
5310 return ret;
5311 }
5312
5313 static int
5314 smb3_receive_transform(struct TCP_Server_Info *server,
5315 struct mid_q_entry **mids, char **bufs, int *num_mids)
5316 {
5317 char *buf = server->smallbuf;
5318 unsigned int pdu_length = server->pdu_size;
5319 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5320 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
5321
5322 if (pdu_length < sizeof(struct smb2_transform_hdr) +
5323 sizeof(struct smb2_hdr)) {
5324 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
5325 pdu_length);
5326 cifs_reconnect(server, true);
5327 return -ECONNABORTED;
5328 }
5329
5330 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
5331 cifs_server_dbg(VFS, "Transform message is broken\n");
5332 cifs_reconnect(server, true);
5333 return -ECONNABORTED;
5334 }
5335
5336 /* TODO: add support for compounds containing READ. */
5337 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
5338 return receive_encrypted_read(server, &mids[0], num_mids);
5339 }
5340
5341 return receive_encrypted_standard(server, mids, bufs, num_mids);
5342 }
5343
5344 int
5345 smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
5346 {
5347 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
5348
5349 return handle_read_data(server, mid, buf, server->pdu_size,
5350 NULL, 0, 0, false);
5351 }
5352
5353 static int
5354 smb2_next_header(char *buf)
5355 {
5356 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
5357 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
5358
5359 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
5360 return sizeof(struct smb2_transform_hdr) +
5361 le32_to_cpu(t_hdr->OriginalMessageSize);
5362
5363 return le32_to_cpu(hdr->NextCommand);
5364 }
5365
5366 static int
5367 smb2_make_node(unsigned int xid, struct inode *inode,
5368 struct dentry *dentry, struct cifs_tcon *tcon,
5369 const char *full_path, umode_t mode, dev_t dev)
5370 {
5371 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5372 int rc = -EPERM;
5373 FILE_ALL_INFO *buf = NULL;
5374 struct cifs_io_parms io_parms = {0};
5375 __u32 oplock = 0;
5376 struct cifs_fid fid;
5377 struct cifs_open_parms oparms;
5378 unsigned int bytes_written;
5379 struct win_dev *pdev;
5380 struct kvec iov[2];
5381
5382 /*
5383 * Check if mounted with mount parm 'sfu' mount parm.
5384 * SFU emulation should work with all servers, but only
5385 * supports block and char device (no socket & fifo),
5386 * and was used by default in earlier versions of Windows
5387 */
5388 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
5389 goto out;
5390
5391 /*
5392 * TODO: Add ability to create instead via reparse point. Windows (e.g.
5393 * their current NFS server) uses this approach to expose special files
5394 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
5395 */
5396
5397 if (!S_ISCHR(mode) && !S_ISBLK(mode))
5398 goto out;
5399
5400 cifs_dbg(FYI, "sfu compat create special file\n");
5401
5402 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
5403 if (buf == NULL) {
5404 rc = -ENOMEM;
5405 goto out;
5406 }
5407
5408 oparms.tcon = tcon;
5409 oparms.cifs_sb = cifs_sb;
5410 oparms.desired_access = GENERIC_WRITE;
5411 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
5412 CREATE_OPTION_SPECIAL);
5413 oparms.disposition = FILE_CREATE;
5414 oparms.path = full_path;
5415 oparms.fid = &fid;
5416 oparms.reconnect = false;
5417
5418 if (tcon->ses->server->oplocks)
5419 oplock = REQ_OPLOCK;
5420 else
5421 oplock = 0;
5422 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
5423 if (rc)
5424 goto out;
5425
5426 /*
5427 * BB Do not bother to decode buf since no local inode yet to put
5428 * timestamps in, but we can reuse it safely.
5429 */
5430
5431 pdev = (struct win_dev *)buf;
5432 io_parms.pid = current->tgid;
5433 io_parms.tcon = tcon;
5434 io_parms.offset = 0;
5435 io_parms.length = sizeof(struct win_dev);
5436 iov[1].iov_base = buf;
5437 iov[1].iov_len = sizeof(struct win_dev);
5438 if (S_ISCHR(mode)) {
5439 memcpy(pdev->type, "IntxCHR", 8);
5440 pdev->major = cpu_to_le64(MAJOR(dev));
5441 pdev->minor = cpu_to_le64(MINOR(dev));
5442 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5443 &bytes_written, iov, 1);
5444 } else if (S_ISBLK(mode)) {
5445 memcpy(pdev->type, "IntxBLK", 8);
5446 pdev->major = cpu_to_le64(MAJOR(dev));
5447 pdev->minor = cpu_to_le64(MINOR(dev));
5448 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
5449 &bytes_written, iov, 1);
5450 }
5451 tcon->ses->server->ops->close(xid, tcon, &fid);
5452 d_drop(dentry);
5453
5454 /* FIXME: add code here to set EAs */
5455 out:
5456 kfree(buf);
5457 return rc;
5458 }
5459
5460 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5461 struct smb_version_operations smb20_operations = {
5462 .compare_fids = smb2_compare_fids,
5463 .setup_request = smb2_setup_request,
5464 .setup_async_request = smb2_setup_async_request,
5465 .check_receive = smb2_check_receive,
5466 .add_credits = smb2_add_credits,
5467 .set_credits = smb2_set_credits,
5468 .get_credits_field = smb2_get_credits_field,
5469 .get_credits = smb2_get_credits,
5470 .wait_mtu_credits = cifs_wait_mtu_credits,
5471 .get_next_mid = smb2_get_next_mid,
5472 .revert_current_mid = smb2_revert_current_mid,
5473 .read_data_offset = smb2_read_data_offset,
5474 .read_data_length = smb2_read_data_length,
5475 .map_error = map_smb2_to_linux_error,
5476 .find_mid = smb2_find_mid,
5477 .check_message = smb2_check_message,
5478 .dump_detail = smb2_dump_detail,
5479 .clear_stats = smb2_clear_stats,
5480 .print_stats = smb2_print_stats,
5481 .is_oplock_break = smb2_is_valid_oplock_break,
5482 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5483 .downgrade_oplock = smb2_downgrade_oplock,
5484 .need_neg = smb2_need_neg,
5485 .negotiate = smb2_negotiate,
5486 .negotiate_wsize = smb2_negotiate_wsize,
5487 .negotiate_rsize = smb2_negotiate_rsize,
5488 .sess_setup = SMB2_sess_setup,
5489 .logoff = SMB2_logoff,
5490 .tree_connect = SMB2_tcon,
5491 .tree_disconnect = SMB2_tdis,
5492 .qfs_tcon = smb2_qfs_tcon,
5493 .is_path_accessible = smb2_is_path_accessible,
5494 .can_echo = smb2_can_echo,
5495 .echo = SMB2_echo,
5496 .query_path_info = smb2_query_path_info,
5497 .get_srv_inum = smb2_get_srv_inum,
5498 .query_file_info = smb2_query_file_info,
5499 .set_path_size = smb2_set_path_size,
5500 .set_file_size = smb2_set_file_size,
5501 .set_file_info = smb2_set_file_info,
5502 .set_compression = smb2_set_compression,
5503 .mkdir = smb2_mkdir,
5504 .mkdir_setinfo = smb2_mkdir_setinfo,
5505 .rmdir = smb2_rmdir,
5506 .unlink = smb2_unlink,
5507 .rename = smb2_rename_path,
5508 .create_hardlink = smb2_create_hardlink,
5509 .query_symlink = smb2_query_symlink,
5510 .query_mf_symlink = smb3_query_mf_symlink,
5511 .create_mf_symlink = smb3_create_mf_symlink,
5512 .open = smb2_open_file,
5513 .set_fid = smb2_set_fid,
5514 .close = smb2_close_file,
5515 .flush = smb2_flush_file,
5516 .async_readv = smb2_async_readv,
5517 .async_writev = smb2_async_writev,
5518 .sync_read = smb2_sync_read,
5519 .sync_write = smb2_sync_write,
5520 .query_dir_first = smb2_query_dir_first,
5521 .query_dir_next = smb2_query_dir_next,
5522 .close_dir = smb2_close_dir,
5523 .calc_smb_size = smb2_calc_size,
5524 .is_status_pending = smb2_is_status_pending,
5525 .is_session_expired = smb2_is_session_expired,
5526 .oplock_response = smb2_oplock_response,
5527 .queryfs = smb2_queryfs,
5528 .mand_lock = smb2_mand_lock,
5529 .mand_unlock_range = smb2_unlock_range,
5530 .push_mand_locks = smb2_push_mandatory_locks,
5531 .get_lease_key = smb2_get_lease_key,
5532 .set_lease_key = smb2_set_lease_key,
5533 .new_lease_key = smb2_new_lease_key,
5534 .calc_signature = smb2_calc_signature,
5535 .is_read_op = smb2_is_read_op,
5536 .set_oplock_level = smb2_set_oplock_level,
5537 .create_lease_buf = smb2_create_lease_buf,
5538 .parse_lease_buf = smb2_parse_lease_buf,
5539 .copychunk_range = smb2_copychunk_range,
5540 .wp_retry_size = smb2_wp_retry_size,
5541 .dir_needs_close = smb2_dir_needs_close,
5542 .get_dfs_refer = smb2_get_dfs_refer,
5543 .select_sectype = smb2_select_sectype,
5544 #ifdef CONFIG_CIFS_XATTR
5545 .query_all_EAs = smb2_query_eas,
5546 .set_EA = smb2_set_ea,
5547 #endif /* CIFS_XATTR */
5548 .get_acl = get_smb2_acl,
5549 .get_acl_by_fid = get_smb2_acl_by_fid,
5550 .set_acl = set_smb2_acl,
5551 .next_header = smb2_next_header,
5552 .ioctl_query_info = smb2_ioctl_query_info,
5553 .make_node = smb2_make_node,
5554 .fiemap = smb3_fiemap,
5555 .llseek = smb3_llseek,
5556 .is_status_io_timeout = smb2_is_status_io_timeout,
5557 .is_network_name_deleted = smb2_is_network_name_deleted,
5558 };
5559 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
5560
5561 struct smb_version_operations smb21_operations = {
5562 .compare_fids = smb2_compare_fids,
5563 .setup_request = smb2_setup_request,
5564 .setup_async_request = smb2_setup_async_request,
5565 .check_receive = smb2_check_receive,
5566 .add_credits = smb2_add_credits,
5567 .set_credits = smb2_set_credits,
5568 .get_credits_field = smb2_get_credits_field,
5569 .get_credits = smb2_get_credits,
5570 .wait_mtu_credits = smb2_wait_mtu_credits,
5571 .adjust_credits = smb2_adjust_credits,
5572 .get_next_mid = smb2_get_next_mid,
5573 .revert_current_mid = smb2_revert_current_mid,
5574 .read_data_offset = smb2_read_data_offset,
5575 .read_data_length = smb2_read_data_length,
5576 .map_error = map_smb2_to_linux_error,
5577 .find_mid = smb2_find_mid,
5578 .check_message = smb2_check_message,
5579 .dump_detail = smb2_dump_detail,
5580 .clear_stats = smb2_clear_stats,
5581 .print_stats = smb2_print_stats,
5582 .is_oplock_break = smb2_is_valid_oplock_break,
5583 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5584 .downgrade_oplock = smb2_downgrade_oplock,
5585 .need_neg = smb2_need_neg,
5586 .negotiate = smb2_negotiate,
5587 .negotiate_wsize = smb2_negotiate_wsize,
5588 .negotiate_rsize = smb2_negotiate_rsize,
5589 .sess_setup = SMB2_sess_setup,
5590 .logoff = SMB2_logoff,
5591 .tree_connect = SMB2_tcon,
5592 .tree_disconnect = SMB2_tdis,
5593 .qfs_tcon = smb2_qfs_tcon,
5594 .is_path_accessible = smb2_is_path_accessible,
5595 .can_echo = smb2_can_echo,
5596 .echo = SMB2_echo,
5597 .query_path_info = smb2_query_path_info,
5598 .get_srv_inum = smb2_get_srv_inum,
5599 .query_file_info = smb2_query_file_info,
5600 .set_path_size = smb2_set_path_size,
5601 .set_file_size = smb2_set_file_size,
5602 .set_file_info = smb2_set_file_info,
5603 .set_compression = smb2_set_compression,
5604 .mkdir = smb2_mkdir,
5605 .mkdir_setinfo = smb2_mkdir_setinfo,
5606 .rmdir = smb2_rmdir,
5607 .unlink = smb2_unlink,
5608 .rename = smb2_rename_path,
5609 .create_hardlink = smb2_create_hardlink,
5610 .query_symlink = smb2_query_symlink,
5611 .query_mf_symlink = smb3_query_mf_symlink,
5612 .create_mf_symlink = smb3_create_mf_symlink,
5613 .open = smb2_open_file,
5614 .set_fid = smb2_set_fid,
5615 .close = smb2_close_file,
5616 .flush = smb2_flush_file,
5617 .async_readv = smb2_async_readv,
5618 .async_writev = smb2_async_writev,
5619 .sync_read = smb2_sync_read,
5620 .sync_write = smb2_sync_write,
5621 .query_dir_first = smb2_query_dir_first,
5622 .query_dir_next = smb2_query_dir_next,
5623 .close_dir = smb2_close_dir,
5624 .calc_smb_size = smb2_calc_size,
5625 .is_status_pending = smb2_is_status_pending,
5626 .is_session_expired = smb2_is_session_expired,
5627 .oplock_response = smb2_oplock_response,
5628 .queryfs = smb2_queryfs,
5629 .mand_lock = smb2_mand_lock,
5630 .mand_unlock_range = smb2_unlock_range,
5631 .push_mand_locks = smb2_push_mandatory_locks,
5632 .get_lease_key = smb2_get_lease_key,
5633 .set_lease_key = smb2_set_lease_key,
5634 .new_lease_key = smb2_new_lease_key,
5635 .calc_signature = smb2_calc_signature,
5636 .is_read_op = smb21_is_read_op,
5637 .set_oplock_level = smb21_set_oplock_level,
5638 .create_lease_buf = smb2_create_lease_buf,
5639 .parse_lease_buf = smb2_parse_lease_buf,
5640 .copychunk_range = smb2_copychunk_range,
5641 .wp_retry_size = smb2_wp_retry_size,
5642 .dir_needs_close = smb2_dir_needs_close,
5643 .enum_snapshots = smb3_enum_snapshots,
5644 .notify = smb3_notify,
5645 .get_dfs_refer = smb2_get_dfs_refer,
5646 .select_sectype = smb2_select_sectype,
5647 #ifdef CONFIG_CIFS_XATTR
5648 .query_all_EAs = smb2_query_eas,
5649 .set_EA = smb2_set_ea,
5650 #endif /* CIFS_XATTR */
5651 .get_acl = get_smb2_acl,
5652 .get_acl_by_fid = get_smb2_acl_by_fid,
5653 .set_acl = set_smb2_acl,
5654 .next_header = smb2_next_header,
5655 .ioctl_query_info = smb2_ioctl_query_info,
5656 .make_node = smb2_make_node,
5657 .fiemap = smb3_fiemap,
5658 .llseek = smb3_llseek,
5659 .is_status_io_timeout = smb2_is_status_io_timeout,
5660 .is_network_name_deleted = smb2_is_network_name_deleted,
5661 };
5662
5663 struct smb_version_operations smb30_operations = {
5664 .compare_fids = smb2_compare_fids,
5665 .setup_request = smb2_setup_request,
5666 .setup_async_request = smb2_setup_async_request,
5667 .check_receive = smb2_check_receive,
5668 .add_credits = smb2_add_credits,
5669 .set_credits = smb2_set_credits,
5670 .get_credits_field = smb2_get_credits_field,
5671 .get_credits = smb2_get_credits,
5672 .wait_mtu_credits = smb2_wait_mtu_credits,
5673 .adjust_credits = smb2_adjust_credits,
5674 .get_next_mid = smb2_get_next_mid,
5675 .revert_current_mid = smb2_revert_current_mid,
5676 .read_data_offset = smb2_read_data_offset,
5677 .read_data_length = smb2_read_data_length,
5678 .map_error = map_smb2_to_linux_error,
5679 .find_mid = smb2_find_mid,
5680 .check_message = smb2_check_message,
5681 .dump_detail = smb2_dump_detail,
5682 .clear_stats = smb2_clear_stats,
5683 .print_stats = smb2_print_stats,
5684 .dump_share_caps = smb2_dump_share_caps,
5685 .is_oplock_break = smb2_is_valid_oplock_break,
5686 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5687 .downgrade_oplock = smb3_downgrade_oplock,
5688 .need_neg = smb2_need_neg,
5689 .negotiate = smb2_negotiate,
5690 .negotiate_wsize = smb3_negotiate_wsize,
5691 .negotiate_rsize = smb3_negotiate_rsize,
5692 .sess_setup = SMB2_sess_setup,
5693 .logoff = SMB2_logoff,
5694 .tree_connect = SMB2_tcon,
5695 .tree_disconnect = SMB2_tdis,
5696 .qfs_tcon = smb3_qfs_tcon,
5697 .is_path_accessible = smb2_is_path_accessible,
5698 .can_echo = smb2_can_echo,
5699 .echo = SMB2_echo,
5700 .query_path_info = smb2_query_path_info,
5701 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5702 .query_reparse_tag = smb2_query_reparse_tag,
5703 .get_srv_inum = smb2_get_srv_inum,
5704 .query_file_info = smb2_query_file_info,
5705 .set_path_size = smb2_set_path_size,
5706 .set_file_size = smb2_set_file_size,
5707 .set_file_info = smb2_set_file_info,
5708 .set_compression = smb2_set_compression,
5709 .mkdir = smb2_mkdir,
5710 .mkdir_setinfo = smb2_mkdir_setinfo,
5711 .rmdir = smb2_rmdir,
5712 .unlink = smb2_unlink,
5713 .rename = smb2_rename_path,
5714 .create_hardlink = smb2_create_hardlink,
5715 .query_symlink = smb2_query_symlink,
5716 .query_mf_symlink = smb3_query_mf_symlink,
5717 .create_mf_symlink = smb3_create_mf_symlink,
5718 .open = smb2_open_file,
5719 .set_fid = smb2_set_fid,
5720 .close = smb2_close_file,
5721 .close_getattr = smb2_close_getattr,
5722 .flush = smb2_flush_file,
5723 .async_readv = smb2_async_readv,
5724 .async_writev = smb2_async_writev,
5725 .sync_read = smb2_sync_read,
5726 .sync_write = smb2_sync_write,
5727 .query_dir_first = smb2_query_dir_first,
5728 .query_dir_next = smb2_query_dir_next,
5729 .close_dir = smb2_close_dir,
5730 .calc_smb_size = smb2_calc_size,
5731 .is_status_pending = smb2_is_status_pending,
5732 .is_session_expired = smb2_is_session_expired,
5733 .oplock_response = smb2_oplock_response,
5734 .queryfs = smb2_queryfs,
5735 .mand_lock = smb2_mand_lock,
5736 .mand_unlock_range = smb2_unlock_range,
5737 .push_mand_locks = smb2_push_mandatory_locks,
5738 .get_lease_key = smb2_get_lease_key,
5739 .set_lease_key = smb2_set_lease_key,
5740 .new_lease_key = smb2_new_lease_key,
5741 .generate_signingkey = generate_smb30signingkey,
5742 .calc_signature = smb3_calc_signature,
5743 .set_integrity = smb3_set_integrity,
5744 .is_read_op = smb21_is_read_op,
5745 .set_oplock_level = smb3_set_oplock_level,
5746 .create_lease_buf = smb3_create_lease_buf,
5747 .parse_lease_buf = smb3_parse_lease_buf,
5748 .copychunk_range = smb2_copychunk_range,
5749 .duplicate_extents = smb2_duplicate_extents,
5750 .validate_negotiate = smb3_validate_negotiate,
5751 .wp_retry_size = smb2_wp_retry_size,
5752 .dir_needs_close = smb2_dir_needs_close,
5753 .fallocate = smb3_fallocate,
5754 .enum_snapshots = smb3_enum_snapshots,
5755 .notify = smb3_notify,
5756 .init_transform_rq = smb3_init_transform_rq,
5757 .is_transform_hdr = smb3_is_transform_hdr,
5758 .receive_transform = smb3_receive_transform,
5759 .get_dfs_refer = smb2_get_dfs_refer,
5760 .select_sectype = smb2_select_sectype,
5761 #ifdef CONFIG_CIFS_XATTR
5762 .query_all_EAs = smb2_query_eas,
5763 .set_EA = smb2_set_ea,
5764 #endif /* CIFS_XATTR */
5765 .get_acl = get_smb2_acl,
5766 .get_acl_by_fid = get_smb2_acl_by_fid,
5767 .set_acl = set_smb2_acl,
5768 .next_header = smb2_next_header,
5769 .ioctl_query_info = smb2_ioctl_query_info,
5770 .make_node = smb2_make_node,
5771 .fiemap = smb3_fiemap,
5772 .llseek = smb3_llseek,
5773 .is_status_io_timeout = smb2_is_status_io_timeout,
5774 .is_network_name_deleted = smb2_is_network_name_deleted,
5775 };
5776
5777 struct smb_version_operations smb311_operations = {
5778 .compare_fids = smb2_compare_fids,
5779 .setup_request = smb2_setup_request,
5780 .setup_async_request = smb2_setup_async_request,
5781 .check_receive = smb2_check_receive,
5782 .add_credits = smb2_add_credits,
5783 .set_credits = smb2_set_credits,
5784 .get_credits_field = smb2_get_credits_field,
5785 .get_credits = smb2_get_credits,
5786 .wait_mtu_credits = smb2_wait_mtu_credits,
5787 .adjust_credits = smb2_adjust_credits,
5788 .get_next_mid = smb2_get_next_mid,
5789 .revert_current_mid = smb2_revert_current_mid,
5790 .read_data_offset = smb2_read_data_offset,
5791 .read_data_length = smb2_read_data_length,
5792 .map_error = map_smb2_to_linux_error,
5793 .find_mid = smb2_find_mid,
5794 .check_message = smb2_check_message,
5795 .dump_detail = smb2_dump_detail,
5796 .clear_stats = smb2_clear_stats,
5797 .print_stats = smb2_print_stats,
5798 .dump_share_caps = smb2_dump_share_caps,
5799 .is_oplock_break = smb2_is_valid_oplock_break,
5800 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5801 .downgrade_oplock = smb3_downgrade_oplock,
5802 .need_neg = smb2_need_neg,
5803 .negotiate = smb2_negotiate,
5804 .negotiate_wsize = smb3_negotiate_wsize,
5805 .negotiate_rsize = smb3_negotiate_rsize,
5806 .sess_setup = SMB2_sess_setup,
5807 .logoff = SMB2_logoff,
5808 .tree_connect = SMB2_tcon,
5809 .tree_disconnect = SMB2_tdis,
5810 .qfs_tcon = smb3_qfs_tcon,
5811 .is_path_accessible = smb2_is_path_accessible,
5812 .can_echo = smb2_can_echo,
5813 .echo = SMB2_echo,
5814 .query_path_info = smb2_query_path_info,
5815 .query_reparse_tag = smb2_query_reparse_tag,
5816 .get_srv_inum = smb2_get_srv_inum,
5817 .query_file_info = smb2_query_file_info,
5818 .set_path_size = smb2_set_path_size,
5819 .set_file_size = smb2_set_file_size,
5820 .set_file_info = smb2_set_file_info,
5821 .set_compression = smb2_set_compression,
5822 .mkdir = smb2_mkdir,
5823 .mkdir_setinfo = smb2_mkdir_setinfo,
5824 .posix_mkdir = smb311_posix_mkdir,
5825 .rmdir = smb2_rmdir,
5826 .unlink = smb2_unlink,
5827 .rename = smb2_rename_path,
5828 .create_hardlink = smb2_create_hardlink,
5829 .query_symlink = smb2_query_symlink,
5830 .query_mf_symlink = smb3_query_mf_symlink,
5831 .create_mf_symlink = smb3_create_mf_symlink,
5832 .open = smb2_open_file,
5833 .set_fid = smb2_set_fid,
5834 .close = smb2_close_file,
5835 .close_getattr = smb2_close_getattr,
5836 .flush = smb2_flush_file,
5837 .async_readv = smb2_async_readv,
5838 .async_writev = smb2_async_writev,
5839 .sync_read = smb2_sync_read,
5840 .sync_write = smb2_sync_write,
5841 .query_dir_first = smb2_query_dir_first,
5842 .query_dir_next = smb2_query_dir_next,
5843 .close_dir = smb2_close_dir,
5844 .calc_smb_size = smb2_calc_size,
5845 .is_status_pending = smb2_is_status_pending,
5846 .is_session_expired = smb2_is_session_expired,
5847 .oplock_response = smb2_oplock_response,
5848 .queryfs = smb311_queryfs,
5849 .mand_lock = smb2_mand_lock,
5850 .mand_unlock_range = smb2_unlock_range,
5851 .push_mand_locks = smb2_push_mandatory_locks,
5852 .get_lease_key = smb2_get_lease_key,
5853 .set_lease_key = smb2_set_lease_key,
5854 .new_lease_key = smb2_new_lease_key,
5855 .generate_signingkey = generate_smb311signingkey,
5856 .calc_signature = smb3_calc_signature,
5857 .set_integrity = smb3_set_integrity,
5858 .is_read_op = smb21_is_read_op,
5859 .set_oplock_level = smb3_set_oplock_level,
5860 .create_lease_buf = smb3_create_lease_buf,
5861 .parse_lease_buf = smb3_parse_lease_buf,
5862 .copychunk_range = smb2_copychunk_range,
5863 .duplicate_extents = smb2_duplicate_extents,
5864 /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5865 .wp_retry_size = smb2_wp_retry_size,
5866 .dir_needs_close = smb2_dir_needs_close,
5867 .fallocate = smb3_fallocate,
5868 .enum_snapshots = smb3_enum_snapshots,
5869 .notify = smb3_notify,
5870 .init_transform_rq = smb3_init_transform_rq,
5871 .is_transform_hdr = smb3_is_transform_hdr,
5872 .receive_transform = smb3_receive_transform,
5873 .get_dfs_refer = smb2_get_dfs_refer,
5874 .select_sectype = smb2_select_sectype,
5875 #ifdef CONFIG_CIFS_XATTR
5876 .query_all_EAs = smb2_query_eas,
5877 .set_EA = smb2_set_ea,
5878 #endif /* CIFS_XATTR */
5879 .get_acl = get_smb2_acl,
5880 .get_acl_by_fid = get_smb2_acl_by_fid,
5881 .set_acl = set_smb2_acl,
5882 .next_header = smb2_next_header,
5883 .ioctl_query_info = smb2_ioctl_query_info,
5884 .make_node = smb2_make_node,
5885 .fiemap = smb3_fiemap,
5886 .llseek = smb3_llseek,
5887 .is_status_io_timeout = smb2_is_status_io_timeout,
5888 .is_network_name_deleted = smb2_is_network_name_deleted,
5889 };
5890
5891 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5892 struct smb_version_values smb20_values = {
5893 .version_string = SMB20_VERSION_STRING,
5894 .protocol_id = SMB20_PROT_ID,
5895 .req_capabilities = 0, /* MBZ */
5896 .large_lock_type = 0,
5897 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5898 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5899 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5900 .header_size = sizeof(struct smb2_hdr),
5901 .header_preamble_size = 0,
5902 .max_header_size = MAX_SMB2_HDR_SIZE,
5903 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5904 .lock_cmd = SMB2_LOCK,
5905 .cap_unix = 0,
5906 .cap_nt_find = SMB2_NT_FIND,
5907 .cap_large_files = SMB2_LARGE_FILES,
5908 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5909 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5910 .create_lease_size = sizeof(struct create_lease),
5911 };
5912 #endif /* ALLOW_INSECURE_LEGACY */
5913
5914 struct smb_version_values smb21_values = {
5915 .version_string = SMB21_VERSION_STRING,
5916 .protocol_id = SMB21_PROT_ID,
5917 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5918 .large_lock_type = 0,
5919 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5920 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5921 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5922 .header_size = sizeof(struct smb2_hdr),
5923 .header_preamble_size = 0,
5924 .max_header_size = MAX_SMB2_HDR_SIZE,
5925 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5926 .lock_cmd = SMB2_LOCK,
5927 .cap_unix = 0,
5928 .cap_nt_find = SMB2_NT_FIND,
5929 .cap_large_files = SMB2_LARGE_FILES,
5930 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5931 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5932 .create_lease_size = sizeof(struct create_lease),
5933 };
5934
5935 struct smb_version_values smb3any_values = {
5936 .version_string = SMB3ANY_VERSION_STRING,
5937 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5938 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5939 .large_lock_type = 0,
5940 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5941 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5942 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5943 .header_size = sizeof(struct smb2_hdr),
5944 .header_preamble_size = 0,
5945 .max_header_size = MAX_SMB2_HDR_SIZE,
5946 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5947 .lock_cmd = SMB2_LOCK,
5948 .cap_unix = 0,
5949 .cap_nt_find = SMB2_NT_FIND,
5950 .cap_large_files = SMB2_LARGE_FILES,
5951 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5952 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5953 .create_lease_size = sizeof(struct create_lease_v2),
5954 };
5955
5956 struct smb_version_values smbdefault_values = {
5957 .version_string = SMBDEFAULT_VERSION_STRING,
5958 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5959 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5960 .large_lock_type = 0,
5961 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5962 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5963 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5964 .header_size = sizeof(struct smb2_hdr),
5965 .header_preamble_size = 0,
5966 .max_header_size = MAX_SMB2_HDR_SIZE,
5967 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5968 .lock_cmd = SMB2_LOCK,
5969 .cap_unix = 0,
5970 .cap_nt_find = SMB2_NT_FIND,
5971 .cap_large_files = SMB2_LARGE_FILES,
5972 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5973 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5974 .create_lease_size = sizeof(struct create_lease_v2),
5975 };
5976
5977 struct smb_version_values smb30_values = {
5978 .version_string = SMB30_VERSION_STRING,
5979 .protocol_id = SMB30_PROT_ID,
5980 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5981 .large_lock_type = 0,
5982 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5983 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5984 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5985 .header_size = sizeof(struct smb2_hdr),
5986 .header_preamble_size = 0,
5987 .max_header_size = MAX_SMB2_HDR_SIZE,
5988 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5989 .lock_cmd = SMB2_LOCK,
5990 .cap_unix = 0,
5991 .cap_nt_find = SMB2_NT_FIND,
5992 .cap_large_files = SMB2_LARGE_FILES,
5993 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5994 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5995 .create_lease_size = sizeof(struct create_lease_v2),
5996 };
5997
5998 struct smb_version_values smb302_values = {
5999 .version_string = SMB302_VERSION_STRING,
6000 .protocol_id = SMB302_PROT_ID,
6001 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
6002 .large_lock_type = 0,
6003 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
6004 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
6005 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
6006 .header_size = sizeof(struct smb2_hdr),
6007 .header_preamble_size = 0,
6008 .max_header_size = MAX_SMB2_HDR_SIZE,
6009 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
6010 .lock_cmd = SMB2_LOCK,
6011 .cap_unix = 0,
6012 .cap_nt_find = SMB2_NT_FIND,
6013 .cap_large_files = SMB2_LARGE_FILES,
6014 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
6015 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
6016 .create_lease_size = sizeof(struct create_lease_v2),
6017 };
6018
6019 struct smb_version_values smb311_values = {
6020 .version_string = SMB311_VERSION_STRING,
6021 .protocol_id = SMB311_PROT_ID,
6022 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
6023 .large_lock_type = 0,
6024 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
6025 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
6026 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
6027 .header_size = sizeof(struct smb2_hdr),
6028 .header_preamble_size = 0,
6029 .max_header_size = MAX_SMB2_HDR_SIZE,
6030 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
6031 .lock_cmd = SMB2_LOCK,
6032 .cap_unix = 0,
6033 .cap_nt_find = SMB2_NT_FIND,
6034 .cap_large_files = SMB2_LARGE_FILES,
6035 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
6036 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
6037 .create_lease_size = sizeof(struct create_lease_v2),
6038 };