]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
CIFS: Respect reconnect in MTU credits calculations
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
8bd68c6e 40#include "smb2proto.h"
9762c2d0 41#include "smbdirect.h"
50c2f753 42
3cecf486
RS
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
2dc7e1c0
PS
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
48{
49 wake_up_process(mid->callback_data);
50}
51
a6827c18 52struct mid_q_entry *
24b9b06b 53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
54{
55 struct mid_q_entry *temp;
56
24b9b06b 57 if (server == NULL) {
f96637be 58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
59 return NULL;
60 }
50c2f753 61
232087cb 62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 63 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 64 kref_init(&temp->refcount);
a6f74e80
N
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
2b84a36c 73
a6f74e80
N
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
1da177e4 80
1da177e4 81 atomic_inc(&midCount);
7c9421e1 82 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
83 return temp;
84}
85
696e420b
LP
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
766fdbb5 101void
1da177e4
LT
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
1047abc1 104#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 105 __le16 command = midEntry->server->vals->lock_cmd;
1047abc1
SF
106 unsigned long now;
107#endif
7c9421e1 108 midEntry->mid_state = MID_FREE;
8097531a 109 atomic_dec(&midCount);
7c9421e1 110 if (midEntry->large_buf)
b8643e1b
SF
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
00778e22
SF
116 /*
117 * commands taking longer than one second (default) can be indications
118 * that something is wrong, unless it is quite a slow link or a very
119 * busy server. Note that this calc is unlikely or impossible to wrap
120 * as long as slow_rsp_threshold is not set way above recommended max
121 * value (32767 ie 9 hours) and is generally harmless even if wrong
122 * since only affects debug counters - so leaving the calc as simple
123 * comparison rather than doing multiple conversions and overflow
124 * checks
125 */
126 if ((slow_rsp_threshold != 0) &&
127 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 128 (midEntry->command != command)) {
f5942db5
SF
129 /*
130 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
131 * NB: le16_to_cpu returns unsigned so can not be negative below
132 */
133 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
468d6779
SF
134 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
135
020eec5f
SF
136 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
137 midEntry->mid, midEntry->pid,
138 midEntry->when_sent, midEntry->when_received);
139 if (cifsFYI & CIFS_TIMER) {
0b456f04 140 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 141 midEntry->command, midEntry->mid);
f80eaedd 142 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
143 now - midEntry->when_alloc,
144 now - midEntry->when_sent,
145 now - midEntry->when_received);
146 }
147 }
148#endif
696e420b 149 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
150}
151
3c1bf7e4
PS
152void
153cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
154{
155 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
156 list_del_init(&mid->qhead);
157 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
158 spin_unlock(&GlobalMid_Lock);
159
160 DeleteMidQEntry(mid);
161}
162
6f49f46b
JL
163/*
164 * smb_send_kvec - send an array of kvecs to the server
165 * @server: Server to send the data to
3ab3f2a1 166 * @smb_msg: Message to send
6f49f46b
JL
167 * @sent: amount of data sent on socket is stored here
168 *
169 * Our basic "send data to server" function. Should be called with srv_mutex
170 * held. The caller is responsible for handling the results.
171 */
d6e04ae6 172static int
3ab3f2a1
AV
173smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
174 size_t *sent)
1da177e4
LT
175{
176 int rc = 0;
3ab3f2a1 177 int retries = 0;
edf1ae40 178 struct socket *ssocket = server->ssocket;
50c2f753 179
6f49f46b
JL
180 *sent = 0;
181
3ab3f2a1
AV
182 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
183 smb_msg->msg_namelen = sizeof(struct sockaddr);
184 smb_msg->msg_control = NULL;
185 smb_msg->msg_controllen = 0;
0496e02d 186 if (server->noblocksnd)
3ab3f2a1 187 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 188 else
3ab3f2a1 189 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 190
3ab3f2a1 191 while (msg_data_left(smb_msg)) {
6f49f46b
JL
192 /*
193 * If blocking send, we try 3 times, since each can block
194 * for 5 seconds. For nonblocking we have to try more
195 * but wait increasing amounts of time allowing time for
196 * socket to clear. The overall time we wait in either
197 * case to send on the socket is about 15 seconds.
198 * Similarly we wait for 15 seconds for a response from
199 * the server in SendReceive[2] for the server to send
200 * a response back for most types of requests (except
201 * SMB Write past end of file which can be slow, and
202 * blocking lock operations). NFS waits slightly longer
203 * than CIFS, but this can make it take longer for
204 * nonresponsive servers to be detected and 15 seconds
205 * is more than enough time for modern networks to
206 * send a packet. In most cases if we fail to send
207 * after the retries we will kill the socket and
208 * reconnect which may clear the network problem.
209 */
3ab3f2a1 210 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 211 if (rc == -EAGAIN) {
3ab3f2a1
AV
212 retries++;
213 if (retries >= 14 ||
214 (!server->noblocksnd && (retries > 2))) {
f96637be
JP
215 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
216 ssocket);
3ab3f2a1 217 return -EAGAIN;
1da177e4 218 }
3ab3f2a1 219 msleep(1 << retries);
1da177e4
LT
220 continue;
221 }
6f49f46b 222
79a58d1f 223 if (rc < 0)
3ab3f2a1 224 return rc;
6f49f46b 225
79a58d1f 226 if (rc == 0) {
3e84469d
SF
227 /* should never happen, letting socket clear before
228 retrying is our only obvious option here */
f96637be 229 cifs_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
230 msleep(500);
231 continue;
d6e04ae6 232 }
6f49f46b 233
3ab3f2a1
AV
234 /* send was at least partially successful */
235 *sent += rc;
236 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 237 }
3ab3f2a1 238 return 0;
97bc00b3
JL
239}
240
35e2cc1b 241unsigned long
81f39f95 242smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
243{
244 unsigned int i;
35e2cc1b
PA
245 struct kvec *iov;
246 int nvec;
a26054d1
JL
247 unsigned long buflen = 0;
248
81f39f95
RS
249 if (server->vals->header_preamble_size == 0 &&
250 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
251 iov = &rqst->rq_iov[1];
252 nvec = rqst->rq_nvec - 1;
253 } else {
254 iov = rqst->rq_iov;
255 nvec = rqst->rq_nvec;
256 }
257
a26054d1 258 /* total up iov array first */
35e2cc1b 259 for (i = 0; i < nvec; i++)
a26054d1
JL
260 buflen += iov[i].iov_len;
261
c06a0f2d
LL
262 /*
263 * Add in the page array if there is one. The caller needs to make
264 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
265 * multiple pages ends at page boundary, rq_tailsz needs to be set to
266 * PAGE_SIZE.
267 */
a26054d1 268 if (rqst->rq_npages) {
c06a0f2d
LL
269 if (rqst->rq_npages == 1)
270 buflen += rqst->rq_tailsz;
271 else {
272 /*
273 * If there is more than one page, calculate the
274 * buffer length based on rq_offset and rq_tailsz
275 */
276 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
277 rqst->rq_offset;
278 buflen += rqst->rq_tailsz;
279 }
a26054d1
JL
280 }
281
282 return buflen;
283}
284
6f49f46b 285static int
07cd952f
RS
286__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
287 struct smb_rqst *rqst)
6f49f46b 288{
07cd952f
RS
289 int rc = 0;
290 struct kvec *iov;
291 int n_vec;
292 unsigned int send_length = 0;
293 unsigned int i, j;
3ab3f2a1 294 size_t total_len = 0, sent, size;
b8eed283 295 struct socket *ssocket = server->ssocket;
3ab3f2a1 296 struct msghdr smb_msg;
b8eed283 297 int val = 1;
c713c877
RS
298 __be32 rfc1002_marker;
299
9762c2d0 300 if (cifs_rdma_enabled(server) && server->smbd_conn) {
81f39f95 301 rc = smbd_send(server, rqst);
9762c2d0
LL
302 goto smbd_done;
303 }
ea702b80
JL
304 if (ssocket == NULL)
305 return -ENOTSOCK;
306
b8eed283
JL
307 /* cork the socket */
308 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
309 (char *)&val, sizeof(val));
310
07cd952f 311 for (j = 0; j < num_rqst; j++)
81f39f95 312 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
313 rfc1002_marker = cpu_to_be32(send_length);
314
c713c877
RS
315 /* Generate a rfc1002 marker for SMB2+ */
316 if (server->vals->header_preamble_size == 0) {
317 struct kvec hiov = {
318 .iov_base = &rfc1002_marker,
319 .iov_len = 4
320 };
aa563d7b 321 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
322 rc = smb_send_kvec(server, &smb_msg, &sent);
323 if (rc < 0)
324 goto uncork;
325
326 total_len += sent;
327 send_length += 4;
328 }
329
662bf5bc
PA
330 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
331
07cd952f
RS
332 for (j = 0; j < num_rqst; j++) {
333 iov = rqst[j].rq_iov;
334 n_vec = rqst[j].rq_nvec;
3ab3f2a1 335
07cd952f 336 size = 0;
662bf5bc
PA
337 for (i = 0; i < n_vec; i++) {
338 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 339 size += iov[i].iov_len;
662bf5bc 340 }
97bc00b3 341
aa563d7b 342 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 343
3ab3f2a1 344 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 345 if (rc < 0)
07cd952f 346 goto uncork;
97bc00b3
JL
347
348 total_len += sent;
07cd952f
RS
349
350 /* now walk the page array and send each page in it */
351 for (i = 0; i < rqst[j].rq_npages; i++) {
352 struct bio_vec bvec;
353
354 bvec.bv_page = rqst[j].rq_pages[i];
355 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
356 &bvec.bv_offset);
357
aa563d7b 358 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
359 &bvec, 1, bvec.bv_len);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
362 break;
363
364 total_len += sent;
365 }
97bc00b3 366 }
1da177e4 367
97bc00b3 368uncork:
b8eed283
JL
369 /* uncork it */
370 val = 0;
371 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
372 (char *)&val, sizeof(val));
373
c713c877 374 if ((total_len > 0) && (total_len != send_length)) {
f96637be 375 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 376 send_length, total_len);
6f49f46b
JL
377 /*
378 * If we have only sent part of an SMB then the next SMB could
379 * be taken as the remainder of this one. We need to kill the
380 * socket so the server throws away the partial SMB
381 */
edf1ae40 382 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
383 trace_smb3_partial_send_reconnect(server->CurrentMid,
384 server->hostname);
edf1ae40 385 }
9762c2d0 386smbd_done:
d804d41d 387 if (rc < 0 && rc != -EINTR)
f96637be
JP
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc);
ee13919c 390 else if (rc > 0)
1da177e4 391 rc = 0;
1da177e4
LT
392
393 return rc;
394}
395
6f49f46b 396static int
1f3a8f5f
RS
397smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
398 struct smb_rqst *rqst, int flags)
6f49f46b 399{
b2c96de7
RS
400 struct kvec iov;
401 struct smb2_transform_hdr tr_hdr;
402 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
403 int rc;
404
405 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
406 return __smb_send_rqst(server, num_rqst, rqst);
407
408 if (num_rqst > MAX_COMPOUND - 1)
409 return -ENOMEM;
7fb8986e 410
b2c96de7
RS
411 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
412 memset(&iov, 0, sizeof(iov));
413 memset(&tr_hdr, 0, sizeof(tr_hdr));
414
415 iov.iov_base = &tr_hdr;
416 iov.iov_len = sizeof(tr_hdr);
417 cur_rqst[0].rq_iov = &iov;
418 cur_rqst[0].rq_nvec = 1;
419
420 if (!server->ops->init_transform_rq) {
421 cifs_dbg(VFS, "Encryption requested but transform callback "
422 "is missing\n");
7fb8986e
PS
423 return -EIO;
424 }
6f49f46b 425
1f3a8f5f
RS
426 rc = server->ops->init_transform_rq(server, num_rqst + 1,
427 &cur_rqst[0], rqst);
7fb8986e
PS
428 if (rc)
429 return rc;
430
1f3a8f5f
RS
431 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
432 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 433 return rc;
6f49f46b
JL
434}
435
0496e02d
JL
436int
437smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 unsigned int smb_buf_length)
439{
738f9de5 440 struct kvec iov[2];
7fb8986e
PS
441 struct smb_rqst rqst = { .rq_iov = iov,
442 .rq_nvec = 2 };
0496e02d 443
738f9de5
PS
444 iov[0].iov_base = smb_buffer;
445 iov[0].iov_len = 4;
446 iov[1].iov_base = (char *)smb_buffer + 4;
447 iov[1].iov_len = smb_buf_length;
0496e02d 448
07cd952f 449 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
450}
451
fc40f9cf 452static int
a891f0f8 453wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
bc205ed1 454 int *credits)
1da177e4 455{
5bc59498
PS
456 int rc;
457
fc40f9cf 458 spin_lock(&server->req_lock);
a891f0f8 459 if (timeout == CIFS_ASYNC_OP) {
1da177e4 460 /* oplock breaks must not be held up */
fc40f9cf 461 server->in_flight++;
bc205ed1 462 *credits -= 1;
fc40f9cf 463 spin_unlock(&server->req_lock);
27a97a61
VL
464 return 0;
465 }
466
27a97a61 467 while (1) {
bc205ed1 468 if (*credits <= 0) {
fc40f9cf 469 spin_unlock(&server->req_lock);
789e6661 470 cifs_num_waiters_inc(server);
5bc59498 471 rc = wait_event_killable(server->request_q,
bc205ed1 472 has_credits(server, credits));
789e6661 473 cifs_num_waiters_dec(server);
5bc59498
PS
474 if (rc)
475 return rc;
fc40f9cf 476 spin_lock(&server->req_lock);
27a97a61 477 } else {
c5797a94 478 if (server->tcpStatus == CifsExiting) {
fc40f9cf 479 spin_unlock(&server->req_lock);
27a97a61 480 return -ENOENT;
1da177e4 481 }
27a97a61 482
2d86dbc9
PS
483 /*
484 * Can not count locking commands against total
485 * as they are allowed to block on server.
486 */
27a97a61
VL
487
488 /* update # of requests on the wire to server */
a891f0f8 489 if (timeout != CIFS_BLOCKING_OP) {
bc205ed1 490 *credits -= 1;
fc40f9cf 491 server->in_flight++;
2d86dbc9 492 }
fc40f9cf 493 spin_unlock(&server->req_lock);
27a97a61 494 break;
1da177e4
LT
495 }
496 }
7ee1af76
JA
497 return 0;
498}
1da177e4 499
bc205ed1 500static int
a891f0f8
PS
501wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 const int optype)
bc205ed1 503{
eb4c7df6
SP
504 int *val;
505
506 val = server->ops->get_credits_field(server, optype);
507 /* Since an echo is already inflight, no need to wait to send another */
508 if (*val <= 0 && optype == CIFS_ECHO_OP)
509 return -EAGAIN;
510 return wait_for_free_credits(server, timeout, val);
bc205ed1
PS
511}
512
cb7e9eab
PS
513int
514cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 515 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
516{
517 *num = size;
335b7b62
PS
518 credits->value = 0;
519 credits->instance = server->reconnect_instance;
cb7e9eab
PS
520 return 0;
521}
522
96daf2b0 523static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
524 struct mid_q_entry **ppmidQ)
525{
1da177e4 526 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 527 return -ENOENT;
8fbbd365
VL
528 }
529
530 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 531 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 532 return -EAGAIN;
8fbbd365
VL
533 }
534
7f48558e 535 if (ses->status == CifsNew) {
79a58d1f 536 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 537 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 538 return -EAGAIN;
ad7a2926 539 /* else ok - we are setting up session */
1da177e4 540 }
7f48558e
SP
541
542 if (ses->status == CifsExiting) {
543 /* check if SMB session is bad because we are setting it up */
544 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
545 return -EAGAIN;
546 /* else ok - we are shutting down session */
547 }
548
24b9b06b 549 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 550 if (*ppmidQ == NULL)
7ee1af76 551 return -ENOMEM;
ddc8cf8f
JL
552 spin_lock(&GlobalMid_Lock);
553 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
554 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
555 return 0;
556}
557
0ade640e
JL
558static int
559wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 560{
0ade640e 561 int error;
7ee1af76 562
5853cc2a 563 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 564 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
565 if (error < 0)
566 return -ERESTARTSYS;
7ee1af76 567
0ade640e 568 return 0;
7ee1af76
JA
569}
570
fec344e3
JL
571struct mid_q_entry *
572cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
573{
574 int rc;
fec344e3 575 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
576 struct mid_q_entry *mid;
577
738f9de5
PS
578 if (rqst->rq_iov[0].iov_len != 4 ||
579 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
580 return ERR_PTR(-EIO);
581
792af7b0 582 /* enable signing if server requires it */
38d77c50 583 if (server->sign)
792af7b0
PS
584 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
585
586 mid = AllocMidQEntry(hdr, server);
587 if (mid == NULL)
fec344e3 588 return ERR_PTR(-ENOMEM);
792af7b0 589
fec344e3 590 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
591 if (rc) {
592 DeleteMidQEntry(mid);
fec344e3 593 return ERR_PTR(rc);
ffc61ccb
SP
594 }
595
fec344e3 596 return mid;
792af7b0 597}
133672ef 598
a6827c18
JL
599/*
600 * Send a SMB request and set the callback function in the mid to handle
601 * the result. Caller is responsible for dealing with timeouts.
602 */
603int
fec344e3 604cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2
PS
605 mid_receive_t *receive, mid_callback_t *callback,
606 mid_handle_t *handle, void *cbdata, const int flags)
a6827c18 607{
a891f0f8 608 int rc, timeout, optype;
a6827c18 609 struct mid_q_entry *mid;
335b7b62 610 struct cifs_credits credits = { .value = 0, .instance = 0 };
a6827c18 611
a891f0f8
PS
612 timeout = flags & CIFS_TIMEOUT_MASK;
613 optype = flags & CIFS_OP_MASK;
614
cb7e9eab
PS
615 if ((flags & CIFS_HAS_CREDITS) == 0) {
616 rc = wait_for_free_request(server, timeout, optype);
617 if (rc)
618 return rc;
335b7b62 619 credits.value = 1;
cb7e9eab 620 }
a6827c18
JL
621
622 mutex_lock(&server->srv_mutex);
fec344e3
JL
623 mid = server->ops->setup_async_request(server, rqst);
624 if (IS_ERR(mid)) {
a6827c18 625 mutex_unlock(&server->srv_mutex);
335b7b62 626 add_credits_and_wake_if(server, &credits, optype);
fec344e3 627 return PTR_ERR(mid);
a6827c18
JL
628 }
629
44d22d84 630 mid->receive = receive;
a6827c18
JL
631 mid->callback = callback;
632 mid->callback_data = cbdata;
9b7c18a2 633 mid->handle = handle;
7c9421e1 634 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 635
ffc61ccb
SP
636 /* put it on the pending_mid_q */
637 spin_lock(&GlobalMid_Lock);
638 list_add_tail(&mid->qhead, &server->pending_mid_q);
639 spin_unlock(&GlobalMid_Lock);
640
93d2cb6c
LL
641 /*
642 * Need to store the time in mid before calling I/O. For call_async,
643 * I/O response may come back and free the mid entry on another thread.
644 */
645 cifs_save_when_sent(mid);
789e6661 646 cifs_in_send_inc(server);
1f3a8f5f 647 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 648 cifs_in_send_dec(server);
ad313cb8 649
820962dc 650 if (rc < 0) {
c781af7e 651 revert_current_mid(server, mid->credits);
ad313cb8 652 server->sequence_number -= 2;
820962dc
RV
653 cifs_delete_mid(mid);
654 }
655
a6827c18 656 mutex_unlock(&server->srv_mutex);
789e6661 657
ffc61ccb
SP
658 if (rc == 0)
659 return 0;
a6827c18 660
335b7b62 661 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
662 return rc;
663}
664
133672ef
SF
665/*
666 *
667 * Send an SMB Request. No response info (other than return code)
668 * needs to be parsed.
669 *
670 * flags indicate the type of request buffer and how long to wait
671 * and whether to log NT STATUS code (error) before mapping it to POSIX error
672 *
673 */
674int
96daf2b0 675SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 676 char *in_buf, int flags)
133672ef
SF
677{
678 int rc;
679 struct kvec iov[1];
da502f7d 680 struct kvec rsp_iov;
133672ef
SF
681 int resp_buf_type;
682
792af7b0
PS
683 iov[0].iov_base = in_buf;
684 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
133672ef 685 flags |= CIFS_NO_RESP;
da502f7d 686 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 687 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 688
133672ef
SF
689 return rc;
690}
691
053d5034 692static int
3c1105df 693cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
694{
695 int rc = 0;
696
f96637be
JP
697 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
698 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 699
74dd92a8 700 spin_lock(&GlobalMid_Lock);
7c9421e1 701 switch (mid->mid_state) {
74dd92a8 702 case MID_RESPONSE_RECEIVED:
053d5034
JL
703 spin_unlock(&GlobalMid_Lock);
704 return rc;
74dd92a8
JL
705 case MID_RETRY_NEEDED:
706 rc = -EAGAIN;
707 break;
71823baf
JL
708 case MID_RESPONSE_MALFORMED:
709 rc = -EIO;
710 break;
3c1105df
JL
711 case MID_SHUTDOWN:
712 rc = -EHOSTDOWN;
713 break;
74dd92a8 714 default:
3c1105df 715 list_del_init(&mid->qhead);
f96637be
JP
716 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
717 __func__, mid->mid, mid->mid_state);
74dd92a8 718 rc = -EIO;
053d5034
JL
719 }
720 spin_unlock(&GlobalMid_Lock);
721
2b84a36c 722 DeleteMidQEntry(mid);
053d5034
JL
723 return rc;
724}
725
121b046a 726static inline int
fb2036d8
PS
727send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
728 struct mid_q_entry *mid)
76dcc26f 729{
121b046a 730 return server->ops->send_cancel ?
fb2036d8 731 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
732}
733
2c8f981d
JL
734int
735cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
736 bool log_error)
737{
792af7b0 738 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
739
740 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
741
742 /* convert the length into a more usable form */
38d77c50 743 if (server->sign) {
738f9de5 744 struct kvec iov[2];
985e4ff0 745 int rc = 0;
738f9de5
PS
746 struct smb_rqst rqst = { .rq_iov = iov,
747 .rq_nvec = 2 };
826a95e4 748
738f9de5
PS
749 iov[0].iov_base = mid->resp_buf;
750 iov[0].iov_len = 4;
751 iov[1].iov_base = (char *)mid->resp_buf + 4;
752 iov[1].iov_len = len - 4;
2c8f981d 753 /* FIXME: add code to kill session */
bf5ea0e2 754 rc = cifs_verify_signature(&rqst, server,
0124cc45 755 mid->sequence_number);
985e4ff0 756 if (rc)
f96637be
JP
757 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
758 rc);
2c8f981d
JL
759 }
760
761 /* BB special case reconnect tid and uid here? */
762 return map_smb_to_linux_error(mid->resp_buf, log_error);
763}
764
fec344e3
JL
765struct mid_q_entry *
766cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
767{
768 int rc;
fec344e3 769 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
770 struct mid_q_entry *mid;
771
738f9de5
PS
772 if (rqst->rq_iov[0].iov_len != 4 ||
773 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
774 return ERR_PTR(-EIO);
775
792af7b0
PS
776 rc = allocate_mid(ses, hdr, &mid);
777 if (rc)
fec344e3
JL
778 return ERR_PTR(rc);
779 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
780 if (rc) {
3c1bf7e4 781 cifs_delete_mid(mid);
fec344e3
JL
782 return ERR_PTR(rc);
783 }
784 return mid;
792af7b0
PS
785}
786
4e34feb5 787static void
ee258d79 788cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
789{
790 struct TCP_Server_Info *server = mid->server;
8a26f0f7 791
3d3003fc 792 add_credits(server, server->ops->get_credits(mid), mid->optype);
8a26f0f7
PS
793}
794
ee258d79
PS
795static void
796cifs_compound_last_callback(struct mid_q_entry *mid)
797{
798 cifs_compound_callback(mid);
799 cifs_wake_up_task(mid);
800}
801
802static void
803cifs_cancelled_callback(struct mid_q_entry *mid)
804{
805 cifs_compound_callback(mid);
806 DeleteMidQEntry(mid);
807}
808
b8f57ee8 809int
e0bba0b8
RS
810compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
811 const int flags, const int num_rqst, struct smb_rqst *rqst,
812 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 813{
e0bba0b8 814 int i, j, rc = 0;
a891f0f8 815 int timeout, optype;
e0bba0b8 816 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa
PS
817 bool cancelled_mid[MAX_COMPOUND] = {false};
818 unsigned int credits[MAX_COMPOUND] = {0};
738f9de5 819 char *buf;
50c2f753 820
a891f0f8
PS
821 timeout = flags & CIFS_TIMEOUT_MASK;
822 optype = flags & CIFS_OP_MASK;
133672ef 823
e0bba0b8
RS
824 for (i = 0; i < num_rqst; i++)
825 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
826
827 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 828 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
829 return -EIO;
830 }
831
da502f7d 832 if (ses->server->tcpStatus == CifsExiting)
7ee1af76 833 return -ENOENT;
7ee1af76 834
792af7b0 835 /*
8544f4aa
PS
836 * Ensure we obtain 1 credit per request in the compound chain.
837 * It can be optimized further by waiting for all the credits
838 * at once but this can wait long enough if we don't have enough
839 * credits due to some heavy operations in progress or the server
840 * not granting us much, so a fallback to the current approach is
841 * needed anyway.
792af7b0 842 */
8544f4aa
PS
843 for (i = 0; i < num_rqst; i++) {
844 rc = wait_for_free_request(ses->server, timeout, optype);
845 if (rc) {
846 /*
847 * We haven't sent an SMB packet to the server yet but
848 * we already obtained credits for i requests in the
849 * compound chain - need to return those credits back
850 * for future use. Note that we need to call add_credits
851 * multiple times to match the way we obtained credits
852 * in the first place and to account for in flight
853 * requests correctly.
854 */
855 for (j = 0; j < i; j++)
856 add_credits(ses->server, 1, optype);
857 return rc;
858 }
859 credits[i] = 1;
860 }
7ee1af76 861
792af7b0
PS
862 /*
863 * Make sure that we sign in the same order that we send on this socket
864 * and avoid races inside tcp sendmsg code that could cause corruption
865 * of smb data.
866 */
7ee1af76 867
72ca545b 868 mutex_lock(&ses->server->srv_mutex);
7ee1af76 869
e0bba0b8
RS
870 for (i = 0; i < num_rqst; i++) {
871 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
872 if (IS_ERR(midQ[i])) {
c781af7e 873 revert_current_mid(ses->server, i);
e0bba0b8
RS
874 for (j = 0; j < i; j++)
875 cifs_delete_mid(midQ[j]);
876 mutex_unlock(&ses->server->srv_mutex);
8544f4aa 877
e0bba0b8 878 /* Update # of requests on wire to server */
8544f4aa
PS
879 for (j = 0; j < num_rqst; j++)
880 add_credits(ses->server, credits[j], optype);
e0bba0b8
RS
881 return PTR_ERR(midQ[i]);
882 }
883
884 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 885 midQ[i]->optype = optype;
4e34feb5 886 /*
ee258d79
PS
887 * Invoke callback for every part of the compound chain
888 * to calculate credits properly. Wake up this thread only when
889 * the last element is received.
4e34feb5
RS
890 */
891 if (i < num_rqst - 1)
ee258d79
PS
892 midQ[i]->callback = cifs_compound_callback;
893 else
894 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 895 }
789e6661 896 cifs_in_send_inc(ses->server);
e0bba0b8 897 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
789e6661 898 cifs_in_send_dec(ses->server);
e0bba0b8
RS
899
900 for (i = 0; i < num_rqst; i++)
901 cifs_save_when_sent(midQ[i]);
7ee1af76 902
c781af7e
PS
903 if (rc < 0) {
904 revert_current_mid(ses->server, num_rqst);
ad313cb8 905 ses->server->sequence_number -= 2;
c781af7e 906 }
e0bba0b8 907
72ca545b 908 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 909
ee258d79
PS
910 if (rc < 0) {
911 /* Sending failed for some reason - return credits back */
912 for (i = 0; i < num_rqst; i++)
913 add_credits(ses->server, credits[i], optype);
cb5c2e63 914 goto out;
ee258d79
PS
915 }
916
917 /*
918 * At this point the request is passed to the network stack - we assume
919 * that any credits taken from the server structure on the client have
920 * been spent and we can't return them back. Once we receive responses
921 * we will collect credits granted by the server in the mid callbacks
922 * and add those credits to the server structure.
923 */
e0bba0b8 924
cb5c2e63
RS
925 /*
926 * Compounding is never used during session establish.
927 */
928 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
929 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
930 rqst[0].rq_nvec);
e0bba0b8 931
cb5c2e63
RS
932 if (timeout == CIFS_ASYNC_OP)
933 goto out;
e0bba0b8 934
cb5c2e63 935 for (i = 0; i < num_rqst; i++) {
e0bba0b8 936 rc = wait_for_response(ses->server, midQ[i]);
8a26f0f7
PS
937 if (rc != 0)
938 break;
939 }
940 if (rc != 0) {
941 for (; i < num_rqst; i++) {
43de1db3
SF
942 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
943 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
e0bba0b8
RS
944 send_cancel(ses->server, &rqst[i], midQ[i]);
945 spin_lock(&GlobalMid_Lock);
946 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
947 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 948 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 949 cancelled_mid[i] = true;
8a26f0f7 950 credits[i] = 0;
e0bba0b8 951 }
1be912dd 952 spin_unlock(&GlobalMid_Lock);
e0bba0b8 953 }
cb5c2e63
RS
954 }
955
cb5c2e63
RS
956 for (i = 0; i < num_rqst; i++) {
957 if (rc < 0)
958 goto out;
e0bba0b8
RS
959
960 rc = cifs_sync_mid_result(midQ[i], ses->server);
961 if (rc != 0) {
8544f4aa
PS
962 /* mark this mid as cancelled to not free it below */
963 cancelled_mid[i] = true;
964 goto out;
1be912dd 965 }
2b2bdfba 966
e0bba0b8
RS
967 if (!midQ[i]->resp_buf ||
968 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
969 rc = -EIO;
970 cifs_dbg(FYI, "Bad MID state?\n");
971 goto out;
972 }
a891f0f8 973
e0bba0b8
RS
974 buf = (char *)midQ[i]->resp_buf;
975 resp_iov[i].iov_base = buf;
976 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
977 ses->server->vals->header_preamble_size;
978
979 if (midQ[i]->large_buf)
980 resp_buf_type[i] = CIFS_LARGE_BUFFER;
981 else
982 resp_buf_type[i] = CIFS_SMALL_BUFFER;
983
e0bba0b8
RS
984 rc = ses->server->ops->check_receive(midQ[i], ses->server,
985 flags & CIFS_LOG_ERROR);
1da177e4 986
e0bba0b8
RS
987 /* mark it so buf will not be freed by cifs_delete_mid */
988 if ((flags & CIFS_NO_RESP) == 0)
989 midQ[i]->resp_buf = NULL;
cb5c2e63 990
e0bba0b8 991 }
cb5c2e63
RS
992
993 /*
994 * Compounding is never used during session establish.
995 */
996 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
997 struct kvec iov = {
998 .iov_base = resp_iov[0].iov_base,
999 .iov_len = resp_iov[0].iov_len
1000 };
1001 smb311_update_preauth_hash(ses, &iov, 1);
1002 }
1003
7ee1af76 1004out:
4e34feb5
RS
1005 /*
1006 * This will dequeue all mids. After this it is important that the
1007 * demultiplex_thread will not process any of these mids any futher.
1008 * This is prevented above by using a noop callback that will not
1009 * wake this thread except for the very last PDU.
1010 */
8544f4aa
PS
1011 for (i = 0; i < num_rqst; i++) {
1012 if (!cancelled_mid[i])
1013 cifs_delete_mid(midQ[i]);
8544f4aa 1014 }
1da177e4 1015
d6e04ae6
SF
1016 return rc;
1017}
1da177e4 1018
e0bba0b8
RS
1019int
1020cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1021 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1022 struct kvec *resp_iov)
1023{
1024 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1025 resp_iov);
1026}
1027
738f9de5
PS
1028int
1029SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1030 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1031 const int flags, struct kvec *resp_iov)
1032{
1033 struct smb_rqst rqst;
3cecf486 1034 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1035 int rc;
1036
3cecf486 1037 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1038 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1039 GFP_KERNEL);
117e3b7f
SF
1040 if (!new_iov) {
1041 /* otherwise cifs_send_recv below sets resp_buf_type */
1042 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1043 return -ENOMEM;
117e3b7f 1044 }
3cecf486
RS
1045 } else
1046 new_iov = s_iov;
738f9de5
PS
1047
1048 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1049 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1050
1051 new_iov[0].iov_base = new_iov[1].iov_base;
1052 new_iov[0].iov_len = 4;
1053 new_iov[1].iov_base += 4;
1054 new_iov[1].iov_len -= 4;
1055
1056 memset(&rqst, 0, sizeof(struct smb_rqst));
1057 rqst.rq_iov = new_iov;
1058 rqst.rq_nvec = n_vec + 1;
1059
1060 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1061 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1062 kfree(new_iov);
738f9de5
PS
1063 return rc;
1064}
1065
1da177e4 1066int
96daf2b0 1067SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1068 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
a891f0f8 1069 int *pbytes_returned, const int timeout)
1da177e4
LT
1070{
1071 int rc = 0;
1da177e4 1072 struct mid_q_entry *midQ;
fb2036d8
PS
1073 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1074 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1075 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1da177e4
LT
1076
1077 if (ses == NULL) {
f96637be 1078 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1079 return -EIO;
1080 }
79a58d1f 1081 if (ses->server == NULL) {
f96637be 1082 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1083 return -EIO;
1084 }
1085
79a58d1f 1086 if (ses->server->tcpStatus == CifsExiting)
31ca3bc3
SF
1087 return -ENOENT;
1088
79a58d1f 1089 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1090 to the same server. We may make this configurable later or
1091 use ses->maxReq */
1da177e4 1092
fb2036d8 1093 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1094 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1095 len);
6d9c6d54
VL
1096 return -EIO;
1097 }
1098
a891f0f8 1099 rc = wait_for_free_request(ses->server, timeout, 0);
7ee1af76
JA
1100 if (rc)
1101 return rc;
1102
79a58d1f 1103 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1104 and avoid races inside tcp sendmsg code that could cause corruption
1105 of smb data */
1106
72ca545b 1107 mutex_lock(&ses->server->srv_mutex);
1da177e4 1108
7ee1af76
JA
1109 rc = allocate_mid(ses, in_buf, &midQ);
1110 if (rc) {
72ca545b 1111 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1112 /* Update # of requests on wire to server */
a891f0f8 1113 add_credits(ses->server, 1, 0);
7ee1af76 1114 return rc;
1da177e4
LT
1115 }
1116
ad009ac9 1117 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb
VL
1118 if (rc) {
1119 mutex_unlock(&ses->server->srv_mutex);
1120 goto out;
1121 }
1da177e4 1122
7c9421e1 1123 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661
SF
1124
1125 cifs_in_send_inc(ses->server);
fb2036d8 1126 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1127 cifs_in_send_dec(ses->server);
1128 cifs_save_when_sent(midQ);
ad313cb8
JL
1129
1130 if (rc < 0)
1131 ses->server->sequence_number -= 2;
1132
72ca545b 1133 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1134
79a58d1f 1135 if (rc < 0)
7ee1af76
JA
1136 goto out;
1137
a891f0f8 1138 if (timeout == CIFS_ASYNC_OP)
7ee1af76 1139 goto out;
1da177e4 1140
0ade640e 1141 rc = wait_for_response(ses->server, midQ);
1be912dd 1142 if (rc != 0) {
fb2036d8 1143 send_cancel(ses->server, &rqst, midQ);
1be912dd 1144 spin_lock(&GlobalMid_Lock);
7c9421e1 1145 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1146 /* no longer considered to be "in-flight" */
1147 midQ->callback = DeleteMidQEntry;
1148 spin_unlock(&GlobalMid_Lock);
a891f0f8 1149 add_credits(ses->server, 1, 0);
1be912dd
JL
1150 return rc;
1151 }
1152 spin_unlock(&GlobalMid_Lock);
1153 }
1da177e4 1154
3c1105df 1155 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1156 if (rc != 0) {
a891f0f8 1157 add_credits(ses->server, 1, 0);
1da177e4
LT
1158 return rc;
1159 }
50c2f753 1160
2c8f981d 1161 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1162 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1163 rc = -EIO;
f96637be 1164 cifs_dbg(VFS, "Bad MID state?\n");
2c8f981d 1165 goto out;
1da177e4 1166 }
7ee1af76 1167
d4e4854f 1168 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1169 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1170 rc = cifs_check_receive(midQ, ses->server, 0);
7ee1af76 1171out:
3c1bf7e4 1172 cifs_delete_mid(midQ);
a891f0f8 1173 add_credits(ses->server, 1, 0);
1da177e4 1174
7ee1af76
JA
1175 return rc;
1176}
1da177e4 1177
7ee1af76
JA
1178/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1179 blocking lock to return. */
1180
1181static int
96daf2b0 1182send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1183 struct smb_hdr *in_buf,
1184 struct smb_hdr *out_buf)
1185{
1186 int bytes_returned;
96daf2b0 1187 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1188 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1189
1190 /* We just modify the current in_buf to change
1191 the type of lock from LOCKING_ANDX_SHARED_LOCK
1192 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1193 LOCKING_ANDX_CANCEL_LOCK. */
1194
1195 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1196 pSMB->Timeout = 0;
88257360 1197 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1198
1199 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1200 &bytes_returned, 0);
7ee1af76
JA
1201}
1202
1203int
96daf2b0 1204SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1205 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1206 int *pbytes_returned)
1207{
1208 int rc = 0;
1209 int rstart = 0;
7ee1af76 1210 struct mid_q_entry *midQ;
96daf2b0 1211 struct cifs_ses *ses;
fb2036d8
PS
1212 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1213 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1214 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
7ee1af76
JA
1215
1216 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1217 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1218 return -EIO;
1219 }
1220 ses = tcon->ses;
1221
79a58d1f 1222 if (ses->server == NULL) {
f96637be 1223 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1224 return -EIO;
1225 }
1226
79a58d1f 1227 if (ses->server->tcpStatus == CifsExiting)
7ee1af76
JA
1228 return -ENOENT;
1229
79a58d1f 1230 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1231 to the same server. We may make this configurable later or
1232 use ses->maxReq */
1233
fb2036d8 1234 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
f96637be 1235 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1236 len);
6d9c6d54
VL
1237 return -EIO;
1238 }
1239
a891f0f8 1240 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
7ee1af76
JA
1241 if (rc)
1242 return rc;
1243
79a58d1f 1244 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1245 and avoid races inside tcp sendmsg code that could cause corruption
1246 of smb data */
1247
72ca545b 1248 mutex_lock(&ses->server->srv_mutex);
7ee1af76
JA
1249
1250 rc = allocate_mid(ses, in_buf, &midQ);
1251 if (rc) {
72ca545b 1252 mutex_unlock(&ses->server->srv_mutex);
7ee1af76
JA
1253 return rc;
1254 }
1255
7ee1af76 1256 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
829049cb 1257 if (rc) {
3c1bf7e4 1258 cifs_delete_mid(midQ);
829049cb
VL
1259 mutex_unlock(&ses->server->srv_mutex);
1260 return rc;
1261 }
1da177e4 1262
7c9421e1 1263 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1264 cifs_in_send_inc(ses->server);
fb2036d8 1265 rc = smb_send(ses->server, in_buf, len);
789e6661
SF
1266 cifs_in_send_dec(ses->server);
1267 cifs_save_when_sent(midQ);
ad313cb8
JL
1268
1269 if (rc < 0)
1270 ses->server->sequence_number -= 2;
1271
72ca545b 1272 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1273
79a58d1f 1274 if (rc < 0) {
3c1bf7e4 1275 cifs_delete_mid(midQ);
7ee1af76
JA
1276 return rc;
1277 }
1278
1279 /* Wait for a reply - allow signals to interrupt. */
1280 rc = wait_event_interruptible(ses->server->response_q,
7c9421e1 1281 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
7ee1af76
JA
1282 ((ses->server->tcpStatus != CifsGood) &&
1283 (ses->server->tcpStatus != CifsNew)));
1284
1285 /* Were we interrupted by a signal ? */
1286 if ((rc == -ERESTARTSYS) &&
7c9421e1 1287 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
7ee1af76
JA
1288 ((ses->server->tcpStatus == CifsGood) ||
1289 (ses->server->tcpStatus == CifsNew))) {
1290
1291 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1292 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1293 blocking lock to return. */
fb2036d8 1294 rc = send_cancel(ses->server, &rqst, midQ);
7ee1af76 1295 if (rc) {
3c1bf7e4 1296 cifs_delete_mid(midQ);
7ee1af76
JA
1297 return rc;
1298 }
1299 } else {
1300 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1301 to cause the blocking lock to return. */
1302
1303 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1304
1305 /* If we get -ENOLCK back the lock may have
1306 already been removed. Don't exit in this case. */
1307 if (rc && rc != -ENOLCK) {
3c1bf7e4 1308 cifs_delete_mid(midQ);
7ee1af76
JA
1309 return rc;
1310 }
1311 }
1312
1be912dd
JL
1313 rc = wait_for_response(ses->server, midQ);
1314 if (rc) {
fb2036d8 1315 send_cancel(ses->server, &rqst, midQ);
1be912dd 1316 spin_lock(&GlobalMid_Lock);
7c9421e1 1317 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1318 /* no longer considered to be "in-flight" */
1319 midQ->callback = DeleteMidQEntry;
1320 spin_unlock(&GlobalMid_Lock);
1321 return rc;
1322 }
1323 spin_unlock(&GlobalMid_Lock);
7ee1af76 1324 }
1be912dd
JL
1325
1326 /* We got the response - restart system call. */
1327 rstart = 1;
7ee1af76
JA
1328 }
1329
3c1105df 1330 rc = cifs_sync_mid_result(midQ, ses->server);
053d5034 1331 if (rc != 0)
7ee1af76 1332 return rc;
50c2f753 1333
17c8bfed 1334 /* rcvd frame is ok */
7c9421e1 1335 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1336 rc = -EIO;
f96637be 1337 cifs_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1338 goto out;
1339 }
1da177e4 1340
d4e4854f 1341 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d
JL
1342 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1343 rc = cifs_check_receive(midQ, ses->server, 0);
17c8bfed 1344out:
3c1bf7e4 1345 cifs_delete_mid(midQ);
7ee1af76
JA
1346 if (rstart && rc == -EACCES)
1347 return -ERESTARTSYS;
1da177e4
LT
1348 return rc;
1349}