]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - fs/cifs/transport.c
Merge tag 'drm/tegra/for-5.1-rc5' of git://anongit.freedesktop.org/tegra/linux into...
[thirdparty/kernel/stable.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
81
82 atomic_inc(&midCount);
83 temp->mid_state = MID_REQUEST_ALLOCATED;
84 return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106 __le16 command = midEntry->server->vals->lock_cmd;
107 unsigned long now;
108 #endif
109 midEntry->mid_state = MID_FREE;
110 atomic_dec(&midCount);
111 if (midEntry->large_buf)
112 cifs_buf_release(midEntry->resp_buf);
113 else
114 cifs_small_buf_release(midEntry->resp_buf);
115 #ifdef CONFIG_CIFS_STATS2
116 now = jiffies;
117 /*
118 * commands taking longer than one second (default) can be indications
119 * that something is wrong, unless it is quite a slow link or a very
120 * busy server. Note that this calc is unlikely or impossible to wrap
121 * as long as slow_rsp_threshold is not set way above recommended max
122 * value (32767 ie 9 hours) and is generally harmless even if wrong
123 * since only affects debug counters - so leaving the calc as simple
124 * comparison rather than doing multiple conversions and overflow
125 * checks
126 */
127 if ((slow_rsp_threshold != 0) &&
128 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
129 (midEntry->command != command)) {
130 /*
131 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132 * NB: le16_to_cpu returns unsigned so can not be negative below
133 */
134 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
135 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
137 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138 midEntry->mid, midEntry->pid,
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
141 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
147 }
148 }
149 #endif
150 cifs_mid_q_entry_release(midEntry);
151 }
152
153 void
154 cifs_delete_mid(struct mid_q_entry *mid)
155 {
156 spin_lock(&GlobalMid_Lock);
157 list_del_init(&mid->qhead);
158 mid->mid_flags |= MID_DELETED;
159 spin_unlock(&GlobalMid_Lock);
160
161 DeleteMidQEntry(mid);
162 }
163
164 /*
165 * smb_send_kvec - send an array of kvecs to the server
166 * @server: Server to send the data to
167 * @smb_msg: Message to send
168 * @sent: amount of data sent on socket is stored here
169 *
170 * Our basic "send data to server" function. Should be called with srv_mutex
171 * held. The caller is responsible for handling the results.
172 */
173 static int
174 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175 size_t *sent)
176 {
177 int rc = 0;
178 int retries = 0;
179 struct socket *ssocket = server->ssocket;
180
181 *sent = 0;
182
183 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184 smb_msg->msg_namelen = sizeof(struct sockaddr);
185 smb_msg->msg_control = NULL;
186 smb_msg->msg_controllen = 0;
187 if (server->noblocksnd)
188 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
189 else
190 smb_msg->msg_flags = MSG_NOSIGNAL;
191
192 while (msg_data_left(smb_msg)) {
193 /*
194 * If blocking send, we try 3 times, since each can block
195 * for 5 seconds. For nonblocking we have to try more
196 * but wait increasing amounts of time allowing time for
197 * socket to clear. The overall time we wait in either
198 * case to send on the socket is about 15 seconds.
199 * Similarly we wait for 15 seconds for a response from
200 * the server in SendReceive[2] for the server to send
201 * a response back for most types of requests (except
202 * SMB Write past end of file which can be slow, and
203 * blocking lock operations). NFS waits slightly longer
204 * than CIFS, but this can make it take longer for
205 * nonresponsive servers to be detected and 15 seconds
206 * is more than enough time for modern networks to
207 * send a packet. In most cases if we fail to send
208 * after the retries we will kill the socket and
209 * reconnect which may clear the network problem.
210 */
211 rc = sock_sendmsg(ssocket, smb_msg);
212 if (rc == -EAGAIN) {
213 retries++;
214 if (retries >= 14 ||
215 (!server->noblocksnd && (retries > 2))) {
216 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217 ssocket);
218 return -EAGAIN;
219 }
220 msleep(1 << retries);
221 continue;
222 }
223
224 if (rc < 0)
225 return rc;
226
227 if (rc == 0) {
228 /* should never happen, letting socket clear before
229 retrying is our only obvious option here */
230 cifs_dbg(VFS, "tcp sent no data\n");
231 msleep(500);
232 continue;
233 }
234
235 /* send was at least partially successful */
236 *sent += rc;
237 retries = 0; /* in case we get ENOSPC on the next send */
238 }
239 return 0;
240 }
241
242 unsigned long
243 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
244 {
245 unsigned int i;
246 struct kvec *iov;
247 int nvec;
248 unsigned long buflen = 0;
249
250 if (server->vals->header_preamble_size == 0 &&
251 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
252 iov = &rqst->rq_iov[1];
253 nvec = rqst->rq_nvec - 1;
254 } else {
255 iov = rqst->rq_iov;
256 nvec = rqst->rq_nvec;
257 }
258
259 /* total up iov array first */
260 for (i = 0; i < nvec; i++)
261 buflen += iov[i].iov_len;
262
263 /*
264 * Add in the page array if there is one. The caller needs to make
265 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266 * multiple pages ends at page boundary, rq_tailsz needs to be set to
267 * PAGE_SIZE.
268 */
269 if (rqst->rq_npages) {
270 if (rqst->rq_npages == 1)
271 buflen += rqst->rq_tailsz;
272 else {
273 /*
274 * If there is more than one page, calculate the
275 * buffer length based on rq_offset and rq_tailsz
276 */
277 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278 rqst->rq_offset;
279 buflen += rqst->rq_tailsz;
280 }
281 }
282
283 return buflen;
284 }
285
286 static int
287 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288 struct smb_rqst *rqst)
289 {
290 int rc = 0;
291 struct kvec *iov;
292 int n_vec;
293 unsigned int send_length = 0;
294 unsigned int i, j;
295 sigset_t mask, oldmask;
296 size_t total_len = 0, sent, size;
297 struct socket *ssocket = server->ssocket;
298 struct msghdr smb_msg;
299 int val = 1;
300 __be32 rfc1002_marker;
301
302 if (cifs_rdma_enabled(server) && server->smbd_conn) {
303 rc = smbd_send(server, rqst);
304 goto smbd_done;
305 }
306
307 if (ssocket == NULL)
308 return -EAGAIN;
309
310 if (signal_pending(current)) {
311 cifs_dbg(FYI, "signal is pending before sending any data\n");
312 return -EINTR;
313 }
314
315 /* cork the socket */
316 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317 (char *)&val, sizeof(val));
318
319 for (j = 0; j < num_rqst; j++)
320 send_length += smb_rqst_len(server, &rqst[j]);
321 rfc1002_marker = cpu_to_be32(send_length);
322
323 /*
324 * We should not allow signals to interrupt the network send because
325 * any partial send will cause session reconnects thus increasing
326 * latency of system calls and overload a server with unnecessary
327 * requests.
328 */
329
330 sigfillset(&mask);
331 sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
333 /* Generate a rfc1002 marker for SMB2+ */
334 if (server->vals->header_preamble_size == 0) {
335 struct kvec hiov = {
336 .iov_base = &rfc1002_marker,
337 .iov_len = 4
338 };
339 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
340 rc = smb_send_kvec(server, &smb_msg, &sent);
341 if (rc < 0)
342 goto unmask;
343
344 total_len += sent;
345 send_length += 4;
346 }
347
348 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
350 for (j = 0; j < num_rqst; j++) {
351 iov = rqst[j].rq_iov;
352 n_vec = rqst[j].rq_nvec;
353
354 size = 0;
355 for (i = 0; i < n_vec; i++) {
356 dump_smb(iov[i].iov_base, iov[i].iov_len);
357 size += iov[i].iov_len;
358 }
359
360 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
361
362 rc = smb_send_kvec(server, &smb_msg, &sent);
363 if (rc < 0)
364 goto unmask;
365
366 total_len += sent;
367
368 /* now walk the page array and send each page in it */
369 for (i = 0; i < rqst[j].rq_npages; i++) {
370 struct bio_vec bvec;
371
372 bvec.bv_page = rqst[j].rq_pages[i];
373 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374 &bvec.bv_offset);
375
376 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
377 &bvec, 1, bvec.bv_len);
378 rc = smb_send_kvec(server, &smb_msg, &sent);
379 if (rc < 0)
380 break;
381
382 total_len += sent;
383 }
384 }
385
386 unmask:
387 sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389 /*
390 * If signal is pending but we have already sent the whole packet to
391 * the server we need to return success status to allow a corresponding
392 * mid entry to be kept in the pending requests queue thus allowing
393 * to handle responses from the server by the client.
394 *
395 * If only part of the packet has been sent there is no need to hide
396 * interrupt because the session will be reconnected anyway, so there
397 * won't be any response from the server to handle.
398 */
399
400 if (signal_pending(current) && (total_len != send_length)) {
401 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402 rc = -EINTR;
403 }
404
405 /* uncork it */
406 val = 0;
407 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408 (char *)&val, sizeof(val));
409
410 if ((total_len > 0) && (total_len != send_length)) {
411 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
412 send_length, total_len);
413 /*
414 * If we have only sent part of an SMB then the next SMB could
415 * be taken as the remainder of this one. We need to kill the
416 * socket so the server throws away the partial SMB
417 */
418 server->tcpStatus = CifsNeedReconnect;
419 trace_smb3_partial_send_reconnect(server->CurrentMid,
420 server->hostname);
421 }
422 smbd_done:
423 if (rc < 0 && rc != -EINTR)
424 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425 rc);
426 else if (rc > 0)
427 rc = 0;
428
429 return rc;
430 }
431
432 static int
433 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434 struct smb_rqst *rqst, int flags)
435 {
436 struct kvec iov;
437 struct smb2_transform_hdr tr_hdr;
438 struct smb_rqst cur_rqst[MAX_COMPOUND];
439 int rc;
440
441 if (!(flags & CIFS_TRANSFORM_REQ))
442 return __smb_send_rqst(server, num_rqst, rqst);
443
444 if (num_rqst > MAX_COMPOUND - 1)
445 return -ENOMEM;
446
447 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448 memset(&iov, 0, sizeof(iov));
449 memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451 iov.iov_base = &tr_hdr;
452 iov.iov_len = sizeof(tr_hdr);
453 cur_rqst[0].rq_iov = &iov;
454 cur_rqst[0].rq_nvec = 1;
455
456 if (!server->ops->init_transform_rq) {
457 cifs_dbg(VFS, "Encryption requested but transform callback "
458 "is missing\n");
459 return -EIO;
460 }
461
462 rc = server->ops->init_transform_rq(server, num_rqst + 1,
463 &cur_rqst[0], rqst);
464 if (rc)
465 return rc;
466
467 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
469 return rc;
470 }
471
472 int
473 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474 unsigned int smb_buf_length)
475 {
476 struct kvec iov[2];
477 struct smb_rqst rqst = { .rq_iov = iov,
478 .rq_nvec = 2 };
479
480 iov[0].iov_base = smb_buffer;
481 iov[0].iov_len = 4;
482 iov[1].iov_base = (char *)smb_buffer + 4;
483 iov[1].iov_len = smb_buf_length;
484
485 return __smb_send_rqst(server, 1, &rqst);
486 }
487
488 static int
489 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
490 const int timeout, const int flags,
491 unsigned int *instance)
492 {
493 int rc;
494 int *credits;
495 int optype;
496 long int t;
497
498 if (timeout < 0)
499 t = MAX_JIFFY_OFFSET;
500 else
501 t = msecs_to_jiffies(timeout);
502
503 optype = flags & CIFS_OP_MASK;
504
505 *instance = 0;
506
507 credits = server->ops->get_credits_field(server, optype);
508 /* Since an echo is already inflight, no need to wait to send another */
509 if (*credits <= 0 && optype == CIFS_ECHO_OP)
510 return -EAGAIN;
511
512 spin_lock(&server->req_lock);
513 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
514 /* oplock breaks must not be held up */
515 server->in_flight++;
516 *credits -= 1;
517 *instance = server->reconnect_instance;
518 spin_unlock(&server->req_lock);
519 return 0;
520 }
521
522 while (1) {
523 if (*credits < num_credits) {
524 spin_unlock(&server->req_lock);
525 cifs_num_waiters_inc(server);
526 rc = wait_event_killable_timeout(server->request_q,
527 has_credits(server, credits, num_credits), t);
528 cifs_num_waiters_dec(server);
529 if (!rc) {
530 trace_smb3_credit_timeout(server->CurrentMid,
531 server->hostname, num_credits);
532 cifs_dbg(VFS, "wait timed out after %d ms\n",
533 timeout);
534 return -ENOTSUPP;
535 }
536 if (rc == -ERESTARTSYS)
537 return -ERESTARTSYS;
538 spin_lock(&server->req_lock);
539 } else {
540 if (server->tcpStatus == CifsExiting) {
541 spin_unlock(&server->req_lock);
542 return -ENOENT;
543 }
544
545 /*
546 * For normal commands, reserve the last MAX_COMPOUND
547 * credits to compound requests.
548 * Otherwise these compounds could be permanently
549 * starved for credits by single-credit requests.
550 *
551 * To prevent spinning CPU, block this thread until
552 * there are >MAX_COMPOUND credits available.
553 * But only do this is we already have a lot of
554 * credits in flight to avoid triggering this check
555 * for servers that are slow to hand out credits on
556 * new sessions.
557 */
558 if (!optype && num_credits == 1 &&
559 server->in_flight > 2 * MAX_COMPOUND &&
560 *credits <= MAX_COMPOUND) {
561 spin_unlock(&server->req_lock);
562 cifs_num_waiters_inc(server);
563 rc = wait_event_killable_timeout(
564 server->request_q,
565 has_credits(server, credits,
566 MAX_COMPOUND + 1),
567 t);
568 cifs_num_waiters_dec(server);
569 if (!rc) {
570 trace_smb3_credit_timeout(
571 server->CurrentMid,
572 server->hostname, num_credits);
573 cifs_dbg(VFS, "wait timed out after %d ms\n",
574 timeout);
575 return -ENOTSUPP;
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
579 spin_lock(&server->req_lock);
580 continue;
581 }
582
583 /*
584 * Can not count locking commands against total
585 * as they are allowed to block on server.
586 */
587
588 /* update # of requests on the wire to server */
589 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
590 *credits -= num_credits;
591 server->in_flight += num_credits;
592 *instance = server->reconnect_instance;
593 }
594 spin_unlock(&server->req_lock);
595 break;
596 }
597 }
598 return 0;
599 }
600
601 static int
602 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
603 unsigned int *instance)
604 {
605 return wait_for_free_credits(server, 1, -1, flags,
606 instance);
607 }
608
609 static int
610 wait_for_compound_request(struct TCP_Server_Info *server, int num,
611 const int flags, unsigned int *instance)
612 {
613 int *credits;
614
615 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
616
617 spin_lock(&server->req_lock);
618 if (*credits < num) {
619 /*
620 * Return immediately if not too many requests in flight since
621 * we will likely be stuck on waiting for credits.
622 */
623 if (server->in_flight < num - *credits) {
624 spin_unlock(&server->req_lock);
625 return -ENOTSUPP;
626 }
627 }
628 spin_unlock(&server->req_lock);
629
630 return wait_for_free_credits(server, num, 60000, flags,
631 instance);
632 }
633
634 int
635 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
636 unsigned int *num, struct cifs_credits *credits)
637 {
638 *num = size;
639 credits->value = 0;
640 credits->instance = server->reconnect_instance;
641 return 0;
642 }
643
644 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
645 struct mid_q_entry **ppmidQ)
646 {
647 if (ses->server->tcpStatus == CifsExiting) {
648 return -ENOENT;
649 }
650
651 if (ses->server->tcpStatus == CifsNeedReconnect) {
652 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
653 return -EAGAIN;
654 }
655
656 if (ses->status == CifsNew) {
657 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
658 (in_buf->Command != SMB_COM_NEGOTIATE))
659 return -EAGAIN;
660 /* else ok - we are setting up session */
661 }
662
663 if (ses->status == CifsExiting) {
664 /* check if SMB session is bad because we are setting it up */
665 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
666 return -EAGAIN;
667 /* else ok - we are shutting down session */
668 }
669
670 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
671 if (*ppmidQ == NULL)
672 return -ENOMEM;
673 spin_lock(&GlobalMid_Lock);
674 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
675 spin_unlock(&GlobalMid_Lock);
676 return 0;
677 }
678
679 static int
680 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
681 {
682 int error;
683
684 error = wait_event_freezekillable_unsafe(server->response_q,
685 midQ->mid_state != MID_REQUEST_SUBMITTED);
686 if (error < 0)
687 return -ERESTARTSYS;
688
689 return 0;
690 }
691
692 struct mid_q_entry *
693 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
694 {
695 int rc;
696 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
697 struct mid_q_entry *mid;
698
699 if (rqst->rq_iov[0].iov_len != 4 ||
700 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
701 return ERR_PTR(-EIO);
702
703 /* enable signing if server requires it */
704 if (server->sign)
705 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
706
707 mid = AllocMidQEntry(hdr, server);
708 if (mid == NULL)
709 return ERR_PTR(-ENOMEM);
710
711 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
712 if (rc) {
713 DeleteMidQEntry(mid);
714 return ERR_PTR(rc);
715 }
716
717 return mid;
718 }
719
720 /*
721 * Send a SMB request and set the callback function in the mid to handle
722 * the result. Caller is responsible for dealing with timeouts.
723 */
724 int
725 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
726 mid_receive_t *receive, mid_callback_t *callback,
727 mid_handle_t *handle, void *cbdata, const int flags,
728 const struct cifs_credits *exist_credits)
729 {
730 int rc;
731 struct mid_q_entry *mid;
732 struct cifs_credits credits = { .value = 0, .instance = 0 };
733 unsigned int instance;
734 int optype;
735
736 optype = flags & CIFS_OP_MASK;
737
738 if ((flags & CIFS_HAS_CREDITS) == 0) {
739 rc = wait_for_free_request(server, flags, &instance);
740 if (rc)
741 return rc;
742 credits.value = 1;
743 credits.instance = instance;
744 } else
745 instance = exist_credits->instance;
746
747 mutex_lock(&server->srv_mutex);
748
749 /*
750 * We can't use credits obtained from the previous session to send this
751 * request. Check if there were reconnects after we obtained credits and
752 * return -EAGAIN in such cases to let callers handle it.
753 */
754 if (instance != server->reconnect_instance) {
755 mutex_unlock(&server->srv_mutex);
756 add_credits_and_wake_if(server, &credits, optype);
757 return -EAGAIN;
758 }
759
760 mid = server->ops->setup_async_request(server, rqst);
761 if (IS_ERR(mid)) {
762 mutex_unlock(&server->srv_mutex);
763 add_credits_and_wake_if(server, &credits, optype);
764 return PTR_ERR(mid);
765 }
766
767 mid->receive = receive;
768 mid->callback = callback;
769 mid->callback_data = cbdata;
770 mid->handle = handle;
771 mid->mid_state = MID_REQUEST_SUBMITTED;
772
773 /* put it on the pending_mid_q */
774 spin_lock(&GlobalMid_Lock);
775 list_add_tail(&mid->qhead, &server->pending_mid_q);
776 spin_unlock(&GlobalMid_Lock);
777
778 /*
779 * Need to store the time in mid before calling I/O. For call_async,
780 * I/O response may come back and free the mid entry on another thread.
781 */
782 cifs_save_when_sent(mid);
783 cifs_in_send_inc(server);
784 rc = smb_send_rqst(server, 1, rqst, flags);
785 cifs_in_send_dec(server);
786
787 if (rc < 0) {
788 revert_current_mid(server, mid->credits);
789 server->sequence_number -= 2;
790 cifs_delete_mid(mid);
791 }
792
793 mutex_unlock(&server->srv_mutex);
794
795 if (rc == 0)
796 return 0;
797
798 add_credits_and_wake_if(server, &credits, optype);
799 return rc;
800 }
801
802 /*
803 *
804 * Send an SMB Request. No response info (other than return code)
805 * needs to be parsed.
806 *
807 * flags indicate the type of request buffer and how long to wait
808 * and whether to log NT STATUS code (error) before mapping it to POSIX error
809 *
810 */
811 int
812 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
813 char *in_buf, int flags)
814 {
815 int rc;
816 struct kvec iov[1];
817 struct kvec rsp_iov;
818 int resp_buf_type;
819
820 iov[0].iov_base = in_buf;
821 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
822 flags |= CIFS_NO_RESP;
823 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
824 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
825
826 return rc;
827 }
828
829 static int
830 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
831 {
832 int rc = 0;
833
834 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
835 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
836
837 spin_lock(&GlobalMid_Lock);
838 switch (mid->mid_state) {
839 case MID_RESPONSE_RECEIVED:
840 spin_unlock(&GlobalMid_Lock);
841 return rc;
842 case MID_RETRY_NEEDED:
843 rc = -EAGAIN;
844 break;
845 case MID_RESPONSE_MALFORMED:
846 rc = -EIO;
847 break;
848 case MID_SHUTDOWN:
849 rc = -EHOSTDOWN;
850 break;
851 default:
852 list_del_init(&mid->qhead);
853 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
854 __func__, mid->mid, mid->mid_state);
855 rc = -EIO;
856 }
857 spin_unlock(&GlobalMid_Lock);
858
859 DeleteMidQEntry(mid);
860 return rc;
861 }
862
863 static inline int
864 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
865 struct mid_q_entry *mid)
866 {
867 return server->ops->send_cancel ?
868 server->ops->send_cancel(server, rqst, mid) : 0;
869 }
870
871 int
872 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
873 bool log_error)
874 {
875 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
876
877 dump_smb(mid->resp_buf, min_t(u32, 92, len));
878
879 /* convert the length into a more usable form */
880 if (server->sign) {
881 struct kvec iov[2];
882 int rc = 0;
883 struct smb_rqst rqst = { .rq_iov = iov,
884 .rq_nvec = 2 };
885
886 iov[0].iov_base = mid->resp_buf;
887 iov[0].iov_len = 4;
888 iov[1].iov_base = (char *)mid->resp_buf + 4;
889 iov[1].iov_len = len - 4;
890 /* FIXME: add code to kill session */
891 rc = cifs_verify_signature(&rqst, server,
892 mid->sequence_number);
893 if (rc)
894 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
895 rc);
896 }
897
898 /* BB special case reconnect tid and uid here? */
899 return map_smb_to_linux_error(mid->resp_buf, log_error);
900 }
901
902 struct mid_q_entry *
903 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
904 {
905 int rc;
906 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
907 struct mid_q_entry *mid;
908
909 if (rqst->rq_iov[0].iov_len != 4 ||
910 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
911 return ERR_PTR(-EIO);
912
913 rc = allocate_mid(ses, hdr, &mid);
914 if (rc)
915 return ERR_PTR(rc);
916 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
917 if (rc) {
918 cifs_delete_mid(mid);
919 return ERR_PTR(rc);
920 }
921 return mid;
922 }
923
924 static void
925 cifs_compound_callback(struct mid_q_entry *mid)
926 {
927 struct TCP_Server_Info *server = mid->server;
928 struct cifs_credits credits;
929
930 credits.value = server->ops->get_credits(mid);
931 credits.instance = server->reconnect_instance;
932
933 add_credits(server, &credits, mid->optype);
934 }
935
936 static void
937 cifs_compound_last_callback(struct mid_q_entry *mid)
938 {
939 cifs_compound_callback(mid);
940 cifs_wake_up_task(mid);
941 }
942
943 static void
944 cifs_cancelled_callback(struct mid_q_entry *mid)
945 {
946 cifs_compound_callback(mid);
947 DeleteMidQEntry(mid);
948 }
949
950 int
951 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
952 const int flags, const int num_rqst, struct smb_rqst *rqst,
953 int *resp_buf_type, struct kvec *resp_iov)
954 {
955 int i, j, optype, rc = 0;
956 struct mid_q_entry *midQ[MAX_COMPOUND];
957 bool cancelled_mid[MAX_COMPOUND] = {false};
958 struct cifs_credits credits[MAX_COMPOUND] = {
959 { .value = 0, .instance = 0 }
960 };
961 unsigned int instance;
962 char *buf;
963
964 optype = flags & CIFS_OP_MASK;
965
966 for (i = 0; i < num_rqst; i++)
967 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
968
969 if ((ses == NULL) || (ses->server == NULL)) {
970 cifs_dbg(VFS, "Null session\n");
971 return -EIO;
972 }
973
974 if (ses->server->tcpStatus == CifsExiting)
975 return -ENOENT;
976
977 /*
978 * Wait for all the requests to become available.
979 * This approach still leaves the possibility to be stuck waiting for
980 * credits if the server doesn't grant credits to the outstanding
981 * requests and if the client is completely idle, not generating any
982 * other requests.
983 * This can be handled by the eventual session reconnect.
984 */
985 rc = wait_for_compound_request(ses->server, num_rqst, flags,
986 &instance);
987 if (rc)
988 return rc;
989
990 for (i = 0; i < num_rqst; i++) {
991 credits[i].value = 1;
992 credits[i].instance = instance;
993 }
994
995 /*
996 * Make sure that we sign in the same order that we send on this socket
997 * and avoid races inside tcp sendmsg code that could cause corruption
998 * of smb data.
999 */
1000
1001 mutex_lock(&ses->server->srv_mutex);
1002
1003 /*
1004 * All the parts of the compound chain belong obtained credits from the
1005 * same session. We can not use credits obtained from the previous
1006 * session to send this request. Check if there were reconnects after
1007 * we obtained credits and return -EAGAIN in such cases to let callers
1008 * handle it.
1009 */
1010 if (instance != ses->server->reconnect_instance) {
1011 mutex_unlock(&ses->server->srv_mutex);
1012 for (j = 0; j < num_rqst; j++)
1013 add_credits(ses->server, &credits[j], optype);
1014 return -EAGAIN;
1015 }
1016
1017 for (i = 0; i < num_rqst; i++) {
1018 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1019 if (IS_ERR(midQ[i])) {
1020 revert_current_mid(ses->server, i);
1021 for (j = 0; j < i; j++)
1022 cifs_delete_mid(midQ[j]);
1023 mutex_unlock(&ses->server->srv_mutex);
1024
1025 /* Update # of requests on wire to server */
1026 for (j = 0; j < num_rqst; j++)
1027 add_credits(ses->server, &credits[j], optype);
1028 return PTR_ERR(midQ[i]);
1029 }
1030
1031 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1032 midQ[i]->optype = optype;
1033 /*
1034 * Invoke callback for every part of the compound chain
1035 * to calculate credits properly. Wake up this thread only when
1036 * the last element is received.
1037 */
1038 if (i < num_rqst - 1)
1039 midQ[i]->callback = cifs_compound_callback;
1040 else
1041 midQ[i]->callback = cifs_compound_last_callback;
1042 }
1043 cifs_in_send_inc(ses->server);
1044 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
1045 cifs_in_send_dec(ses->server);
1046
1047 for (i = 0; i < num_rqst; i++)
1048 cifs_save_when_sent(midQ[i]);
1049
1050 if (rc < 0) {
1051 revert_current_mid(ses->server, num_rqst);
1052 ses->server->sequence_number -= 2;
1053 }
1054
1055 mutex_unlock(&ses->server->srv_mutex);
1056
1057 if (rc < 0) {
1058 /* Sending failed for some reason - return credits back */
1059 for (i = 0; i < num_rqst; i++)
1060 add_credits(ses->server, &credits[i], optype);
1061 goto out;
1062 }
1063
1064 /*
1065 * At this point the request is passed to the network stack - we assume
1066 * that any credits taken from the server structure on the client have
1067 * been spent and we can't return them back. Once we receive responses
1068 * we will collect credits granted by the server in the mid callbacks
1069 * and add those credits to the server structure.
1070 */
1071
1072 /*
1073 * Compounding is never used during session establish.
1074 */
1075 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1076 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1077 rqst[0].rq_nvec);
1078
1079 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1080 goto out;
1081
1082 for (i = 0; i < num_rqst; i++) {
1083 rc = wait_for_response(ses->server, midQ[i]);
1084 if (rc != 0)
1085 break;
1086 }
1087 if (rc != 0) {
1088 for (; i < num_rqst; i++) {
1089 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1090 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1091 send_cancel(ses->server, &rqst[i], midQ[i]);
1092 spin_lock(&GlobalMid_Lock);
1093 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1094 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1095 midQ[i]->callback = cifs_cancelled_callback;
1096 cancelled_mid[i] = true;
1097 credits[i].value = 0;
1098 }
1099 spin_unlock(&GlobalMid_Lock);
1100 }
1101 }
1102
1103 for (i = 0; i < num_rqst; i++) {
1104 if (rc < 0)
1105 goto out;
1106
1107 rc = cifs_sync_mid_result(midQ[i], ses->server);
1108 if (rc != 0) {
1109 /* mark this mid as cancelled to not free it below */
1110 cancelled_mid[i] = true;
1111 goto out;
1112 }
1113
1114 if (!midQ[i]->resp_buf ||
1115 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1116 rc = -EIO;
1117 cifs_dbg(FYI, "Bad MID state?\n");
1118 goto out;
1119 }
1120
1121 buf = (char *)midQ[i]->resp_buf;
1122 resp_iov[i].iov_base = buf;
1123 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1124 ses->server->vals->header_preamble_size;
1125
1126 if (midQ[i]->large_buf)
1127 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1128 else
1129 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1130
1131 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1132 flags & CIFS_LOG_ERROR);
1133
1134 /* mark it so buf will not be freed by cifs_delete_mid */
1135 if ((flags & CIFS_NO_RESP) == 0)
1136 midQ[i]->resp_buf = NULL;
1137
1138 }
1139
1140 /*
1141 * Compounding is never used during session establish.
1142 */
1143 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1144 struct kvec iov = {
1145 .iov_base = resp_iov[0].iov_base,
1146 .iov_len = resp_iov[0].iov_len
1147 };
1148 smb311_update_preauth_hash(ses, &iov, 1);
1149 }
1150
1151 out:
1152 /*
1153 * This will dequeue all mids. After this it is important that the
1154 * demultiplex_thread will not process any of these mids any futher.
1155 * This is prevented above by using a noop callback that will not
1156 * wake this thread except for the very last PDU.
1157 */
1158 for (i = 0; i < num_rqst; i++) {
1159 if (!cancelled_mid[i])
1160 cifs_delete_mid(midQ[i]);
1161 }
1162
1163 return rc;
1164 }
1165
1166 int
1167 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1168 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1169 struct kvec *resp_iov)
1170 {
1171 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1172 resp_iov);
1173 }
1174
1175 int
1176 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1177 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1178 const int flags, struct kvec *resp_iov)
1179 {
1180 struct smb_rqst rqst;
1181 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1182 int rc;
1183
1184 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1185 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1186 GFP_KERNEL);
1187 if (!new_iov) {
1188 /* otherwise cifs_send_recv below sets resp_buf_type */
1189 *resp_buf_type = CIFS_NO_BUFFER;
1190 return -ENOMEM;
1191 }
1192 } else
1193 new_iov = s_iov;
1194
1195 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1196 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1197
1198 new_iov[0].iov_base = new_iov[1].iov_base;
1199 new_iov[0].iov_len = 4;
1200 new_iov[1].iov_base += 4;
1201 new_iov[1].iov_len -= 4;
1202
1203 memset(&rqst, 0, sizeof(struct smb_rqst));
1204 rqst.rq_iov = new_iov;
1205 rqst.rq_nvec = n_vec + 1;
1206
1207 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1208 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1209 kfree(new_iov);
1210 return rc;
1211 }
1212
1213 int
1214 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1215 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1216 int *pbytes_returned, const int flags)
1217 {
1218 int rc = 0;
1219 struct mid_q_entry *midQ;
1220 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1221 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1222 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1223 struct cifs_credits credits = { .value = 1, .instance = 0 };
1224
1225 if (ses == NULL) {
1226 cifs_dbg(VFS, "Null smb session\n");
1227 return -EIO;
1228 }
1229 if (ses->server == NULL) {
1230 cifs_dbg(VFS, "Null tcp session\n");
1231 return -EIO;
1232 }
1233
1234 if (ses->server->tcpStatus == CifsExiting)
1235 return -ENOENT;
1236
1237 /* Ensure that we do not send more than 50 overlapping requests
1238 to the same server. We may make this configurable later or
1239 use ses->maxReq */
1240
1241 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1242 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1243 len);
1244 return -EIO;
1245 }
1246
1247 rc = wait_for_free_request(ses->server, flags, &credits.instance);
1248 if (rc)
1249 return rc;
1250
1251 /* make sure that we sign in the same order that we send on this socket
1252 and avoid races inside tcp sendmsg code that could cause corruption
1253 of smb data */
1254
1255 mutex_lock(&ses->server->srv_mutex);
1256
1257 rc = allocate_mid(ses, in_buf, &midQ);
1258 if (rc) {
1259 mutex_unlock(&ses->server->srv_mutex);
1260 /* Update # of requests on wire to server */
1261 add_credits(ses->server, &credits, 0);
1262 return rc;
1263 }
1264
1265 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1266 if (rc) {
1267 mutex_unlock(&ses->server->srv_mutex);
1268 goto out;
1269 }
1270
1271 midQ->mid_state = MID_REQUEST_SUBMITTED;
1272
1273 cifs_in_send_inc(ses->server);
1274 rc = smb_send(ses->server, in_buf, len);
1275 cifs_in_send_dec(ses->server);
1276 cifs_save_when_sent(midQ);
1277
1278 if (rc < 0)
1279 ses->server->sequence_number -= 2;
1280
1281 mutex_unlock(&ses->server->srv_mutex);
1282
1283 if (rc < 0)
1284 goto out;
1285
1286 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1287 goto out;
1288
1289 rc = wait_for_response(ses->server, midQ);
1290 if (rc != 0) {
1291 send_cancel(ses->server, &rqst, midQ);
1292 spin_lock(&GlobalMid_Lock);
1293 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1294 /* no longer considered to be "in-flight" */
1295 midQ->callback = DeleteMidQEntry;
1296 spin_unlock(&GlobalMid_Lock);
1297 add_credits(ses->server, &credits, 0);
1298 return rc;
1299 }
1300 spin_unlock(&GlobalMid_Lock);
1301 }
1302
1303 rc = cifs_sync_mid_result(midQ, ses->server);
1304 if (rc != 0) {
1305 add_credits(ses->server, &credits, 0);
1306 return rc;
1307 }
1308
1309 if (!midQ->resp_buf || !out_buf ||
1310 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1311 rc = -EIO;
1312 cifs_dbg(VFS, "Bad MID state?\n");
1313 goto out;
1314 }
1315
1316 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1317 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1318 rc = cifs_check_receive(midQ, ses->server, 0);
1319 out:
1320 cifs_delete_mid(midQ);
1321 add_credits(ses->server, &credits, 0);
1322
1323 return rc;
1324 }
1325
1326 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1327 blocking lock to return. */
1328
1329 static int
1330 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1331 struct smb_hdr *in_buf,
1332 struct smb_hdr *out_buf)
1333 {
1334 int bytes_returned;
1335 struct cifs_ses *ses = tcon->ses;
1336 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1337
1338 /* We just modify the current in_buf to change
1339 the type of lock from LOCKING_ANDX_SHARED_LOCK
1340 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1341 LOCKING_ANDX_CANCEL_LOCK. */
1342
1343 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1344 pSMB->Timeout = 0;
1345 pSMB->hdr.Mid = get_next_mid(ses->server);
1346
1347 return SendReceive(xid, ses, in_buf, out_buf,
1348 &bytes_returned, 0);
1349 }
1350
1351 int
1352 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1353 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1354 int *pbytes_returned)
1355 {
1356 int rc = 0;
1357 int rstart = 0;
1358 struct mid_q_entry *midQ;
1359 struct cifs_ses *ses;
1360 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1361 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1362 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1363 unsigned int instance;
1364
1365 if (tcon == NULL || tcon->ses == NULL) {
1366 cifs_dbg(VFS, "Null smb session\n");
1367 return -EIO;
1368 }
1369 ses = tcon->ses;
1370
1371 if (ses->server == NULL) {
1372 cifs_dbg(VFS, "Null tcp session\n");
1373 return -EIO;
1374 }
1375
1376 if (ses->server->tcpStatus == CifsExiting)
1377 return -ENOENT;
1378
1379 /* Ensure that we do not send more than 50 overlapping requests
1380 to the same server. We may make this configurable later or
1381 use ses->maxReq */
1382
1383 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1384 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1385 len);
1386 return -EIO;
1387 }
1388
1389 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
1390 if (rc)
1391 return rc;
1392
1393 /* make sure that we sign in the same order that we send on this socket
1394 and avoid races inside tcp sendmsg code that could cause corruption
1395 of smb data */
1396
1397 mutex_lock(&ses->server->srv_mutex);
1398
1399 rc = allocate_mid(ses, in_buf, &midQ);
1400 if (rc) {
1401 mutex_unlock(&ses->server->srv_mutex);
1402 return rc;
1403 }
1404
1405 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1406 if (rc) {
1407 cifs_delete_mid(midQ);
1408 mutex_unlock(&ses->server->srv_mutex);
1409 return rc;
1410 }
1411
1412 midQ->mid_state = MID_REQUEST_SUBMITTED;
1413 cifs_in_send_inc(ses->server);
1414 rc = smb_send(ses->server, in_buf, len);
1415 cifs_in_send_dec(ses->server);
1416 cifs_save_when_sent(midQ);
1417
1418 if (rc < 0)
1419 ses->server->sequence_number -= 2;
1420
1421 mutex_unlock(&ses->server->srv_mutex);
1422
1423 if (rc < 0) {
1424 cifs_delete_mid(midQ);
1425 return rc;
1426 }
1427
1428 /* Wait for a reply - allow signals to interrupt. */
1429 rc = wait_event_interruptible(ses->server->response_q,
1430 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1431 ((ses->server->tcpStatus != CifsGood) &&
1432 (ses->server->tcpStatus != CifsNew)));
1433
1434 /* Were we interrupted by a signal ? */
1435 if ((rc == -ERESTARTSYS) &&
1436 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1437 ((ses->server->tcpStatus == CifsGood) ||
1438 (ses->server->tcpStatus == CifsNew))) {
1439
1440 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1441 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1442 blocking lock to return. */
1443 rc = send_cancel(ses->server, &rqst, midQ);
1444 if (rc) {
1445 cifs_delete_mid(midQ);
1446 return rc;
1447 }
1448 } else {
1449 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1450 to cause the blocking lock to return. */
1451
1452 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1453
1454 /* If we get -ENOLCK back the lock may have
1455 already been removed. Don't exit in this case. */
1456 if (rc && rc != -ENOLCK) {
1457 cifs_delete_mid(midQ);
1458 return rc;
1459 }
1460 }
1461
1462 rc = wait_for_response(ses->server, midQ);
1463 if (rc) {
1464 send_cancel(ses->server, &rqst, midQ);
1465 spin_lock(&GlobalMid_Lock);
1466 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1467 /* no longer considered to be "in-flight" */
1468 midQ->callback = DeleteMidQEntry;
1469 spin_unlock(&GlobalMid_Lock);
1470 return rc;
1471 }
1472 spin_unlock(&GlobalMid_Lock);
1473 }
1474
1475 /* We got the response - restart system call. */
1476 rstart = 1;
1477 }
1478
1479 rc = cifs_sync_mid_result(midQ, ses->server);
1480 if (rc != 0)
1481 return rc;
1482
1483 /* rcvd frame is ok */
1484 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1485 rc = -EIO;
1486 cifs_dbg(VFS, "Bad MID state?\n");
1487 goto out;
1488 }
1489
1490 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1491 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1492 rc = cifs_check_receive(midQ, ses->server, 0);
1493 out:
1494 cifs_delete_mid(midQ);
1495 if (rstart && rc == -EACCES)
1496 return -ERESTARTSYS;
1497 return rc;
1498 }