]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
cifs: maintain a state machine for tcp/smb/tcon sessions
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
1da177e4
LT
24#include "cifspdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "cifs_debug.h"
8bd68c6e 28#include "smb2proto.h"
9762c2d0 29#include "smbdirect.h"
50c2f753 30
3cecf486
RS
31/* Max number of iovectors we can use off the stack when sending requests. */
32#define CIFS_MAX_IOV_SIZE 8
33
2dc7e1c0
PS
34void
35cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
36{
37 wake_up_process(mid->callback_data);
38}
39
a6827c18 40struct mid_q_entry *
24b9b06b 41AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
42{
43 struct mid_q_entry *temp;
44
24b9b06b 45 if (server == NULL) {
f96637be 46 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
47 return NULL;
48 }
50c2f753 49
232087cb 50 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 51 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 52 kref_init(&temp->refcount);
a6f74e80
N
53 temp->mid = get_mid(smb_buffer);
54 temp->pid = current->pid;
55 temp->command = cpu_to_le16(smb_buffer->Command);
56 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 57 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
58 /* when mid allocated can be before when sent */
59 temp->when_alloc = jiffies;
60 temp->server = server;
2b84a36c 61
a6f74e80
N
62 /*
63 * The default is for the mid to be synchronous, so the
64 * default callback just wakes up the current task.
65 */
f1f27ad7
VW
66 get_task_struct(current);
67 temp->creator = current;
a6f74e80
N
68 temp->callback = cifs_wake_up_task;
69 temp->callback_data = current;
1da177e4 70
1da177e4 71 atomic_inc(&midCount);
7c9421e1 72 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
73 return temp;
74}
75
696e420b
LP
76static void _cifs_mid_q_entry_release(struct kref *refcount)
77{
abe57073
PS
78 struct mid_q_entry *midEntry =
79 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 80#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 81 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 82 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 83 unsigned long now;
433b8dd7 84 unsigned long roundtrip_time;
1047abc1 85#endif
7b71843f
PS
86 struct TCP_Server_Info *server = midEntry->server;
87
88 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 server->ops->handle_cancelled_mid)
04ad69c3 91 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 92
7c9421e1 93 midEntry->mid_state = MID_FREE;
8097531a 94 atomic_dec(&midCount);
7c9421e1 95 if (midEntry->large_buf)
b8643e1b
SF
96 cifs_buf_release(midEntry->resp_buf);
97 else
98 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
99#ifdef CONFIG_CIFS_STATS2
100 now = jiffies;
433b8dd7 101 if (now < midEntry->when_alloc)
a0a3036b 102 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
103 roundtrip_time = now - midEntry->when_alloc;
104
105 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 server->slowest_cmd[smb_cmd] = roundtrip_time;
108 server->fastest_cmd[smb_cmd] = roundtrip_time;
109 } else {
110 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 }
115 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 server->time_per_cmd[smb_cmd] += roundtrip_time;
117 }
00778e22
SF
118 /*
119 * commands taking longer than one second (default) can be indications
120 * that something is wrong, unless it is quite a slow link or a very
121 * busy server. Note that this calc is unlikely or impossible to wrap
122 * as long as slow_rsp_threshold is not set way above recommended max
123 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 * since only affects debug counters - so leaving the calc as simple
125 * comparison rather than doing multiple conversions and overflow
126 * checks
127 */
128 if ((slow_rsp_threshold != 0) &&
129 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 130 (midEntry->command != command)) {
f5942db5
SF
131 /*
132 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 * NB: le16_to_cpu returns unsigned so can not be negative below
134 */
433b8dd7
SF
135 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 137
433b8dd7 138 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
141 pr_debug("slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
1047abc1
SF
147 }
148 }
149#endif
f1f27ad7 150 put_task_struct(midEntry->creator);
abe57073
PS
151
152 mempool_free(midEntry, cifs_mid_poolp);
153}
154
155void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156{
157 spin_lock(&GlobalMid_Lock);
158 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 spin_unlock(&GlobalMid_Lock);
160}
161
162void DeleteMidQEntry(struct mid_q_entry *midEntry)
163{
696e420b 164 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
165}
166
3c1bf7e4
PS
167void
168cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
169{
170 spin_lock(&GlobalMid_Lock);
abe57073
PS
171 if (!(mid->mid_flags & MID_DELETED)) {
172 list_del_init(&mid->qhead);
173 mid->mid_flags |= MID_DELETED;
174 }
ddc8cf8f
JL
175 spin_unlock(&GlobalMid_Lock);
176
177 DeleteMidQEntry(mid);
178}
179
6f49f46b
JL
180/*
181 * smb_send_kvec - send an array of kvecs to the server
182 * @server: Server to send the data to
3ab3f2a1 183 * @smb_msg: Message to send
6f49f46b
JL
184 * @sent: amount of data sent on socket is stored here
185 *
186 * Our basic "send data to server" function. Should be called with srv_mutex
187 * held. The caller is responsible for handling the results.
188 */
d6e04ae6 189static int
3ab3f2a1
AV
190smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 size_t *sent)
1da177e4
LT
192{
193 int rc = 0;
3ab3f2a1 194 int retries = 0;
edf1ae40 195 struct socket *ssocket = server->ssocket;
50c2f753 196
6f49f46b
JL
197 *sent = 0;
198
3ab3f2a1
AV
199 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200 smb_msg->msg_namelen = sizeof(struct sockaddr);
201 smb_msg->msg_control = NULL;
202 smb_msg->msg_controllen = 0;
0496e02d 203 if (server->noblocksnd)
3ab3f2a1 204 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 205 else
3ab3f2a1 206 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 207
3ab3f2a1 208 while (msg_data_left(smb_msg)) {
6f49f46b
JL
209 /*
210 * If blocking send, we try 3 times, since each can block
211 * for 5 seconds. For nonblocking we have to try more
212 * but wait increasing amounts of time allowing time for
213 * socket to clear. The overall time we wait in either
214 * case to send on the socket is about 15 seconds.
215 * Similarly we wait for 15 seconds for a response from
216 * the server in SendReceive[2] for the server to send
217 * a response back for most types of requests (except
218 * SMB Write past end of file which can be slow, and
219 * blocking lock operations). NFS waits slightly longer
220 * than CIFS, but this can make it take longer for
221 * nonresponsive servers to be detected and 15 seconds
222 * is more than enough time for modern networks to
223 * send a packet. In most cases if we fail to send
224 * after the retries we will kill the socket and
225 * reconnect which may clear the network problem.
226 */
3ab3f2a1 227 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 228 if (rc == -EAGAIN) {
3ab3f2a1
AV
229 retries++;
230 if (retries >= 14 ||
231 (!server->noblocksnd && (retries > 2))) {
afe6f653 232 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 233 ssocket);
3ab3f2a1 234 return -EAGAIN;
1da177e4 235 }
3ab3f2a1 236 msleep(1 << retries);
1da177e4
LT
237 continue;
238 }
6f49f46b 239
79a58d1f 240 if (rc < 0)
3ab3f2a1 241 return rc;
6f49f46b 242
79a58d1f 243 if (rc == 0) {
3e84469d
SF
244 /* should never happen, letting socket clear before
245 retrying is our only obvious option here */
afe6f653 246 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
247 msleep(500);
248 continue;
d6e04ae6 249 }
6f49f46b 250
3ab3f2a1
AV
251 /* send was at least partially successful */
252 *sent += rc;
253 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 254 }
3ab3f2a1 255 return 0;
97bc00b3
JL
256}
257
35e2cc1b 258unsigned long
81f39f95 259smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
260{
261 unsigned int i;
35e2cc1b
PA
262 struct kvec *iov;
263 int nvec;
a26054d1
JL
264 unsigned long buflen = 0;
265
81f39f95
RS
266 if (server->vals->header_preamble_size == 0 &&
267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
268 iov = &rqst->rq_iov[1];
269 nvec = rqst->rq_nvec - 1;
270 } else {
271 iov = rqst->rq_iov;
272 nvec = rqst->rq_nvec;
273 }
274
a26054d1 275 /* total up iov array first */
35e2cc1b 276 for (i = 0; i < nvec; i++)
a26054d1
JL
277 buflen += iov[i].iov_len;
278
c06a0f2d
LL
279 /*
280 * Add in the page array if there is one. The caller needs to make
281 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282 * multiple pages ends at page boundary, rq_tailsz needs to be set to
283 * PAGE_SIZE.
284 */
a26054d1 285 if (rqst->rq_npages) {
c06a0f2d
LL
286 if (rqst->rq_npages == 1)
287 buflen += rqst->rq_tailsz;
288 else {
289 /*
290 * If there is more than one page, calculate the
291 * buffer length based on rq_offset and rq_tailsz
292 */
293 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294 rqst->rq_offset;
295 buflen += rqst->rq_tailsz;
296 }
a26054d1
JL
297 }
298
299 return buflen;
300}
301
6f49f46b 302static int
07cd952f
RS
303__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304 struct smb_rqst *rqst)
6f49f46b 305{
07cd952f
RS
306 int rc = 0;
307 struct kvec *iov;
308 int n_vec;
309 unsigned int send_length = 0;
310 unsigned int i, j;
b30c74c7 311 sigset_t mask, oldmask;
3ab3f2a1 312 size_t total_len = 0, sent, size;
b8eed283 313 struct socket *ssocket = server->ssocket;
3ab3f2a1 314 struct msghdr smb_msg;
c713c877
RS
315 __be32 rfc1002_marker;
316
4357d45f
LL
317 if (cifs_rdma_enabled(server)) {
318 /* return -EAGAIN when connecting or reconnecting */
319 rc = -EAGAIN;
320 if (server->smbd_conn)
321 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
322 goto smbd_done;
323 }
afc18a6f 324
ea702b80 325 if (ssocket == NULL)
afc18a6f 326 return -EAGAIN;
ea702b80 327
214a5ea0 328 if (fatal_signal_pending(current)) {
6988a619
PA
329 cifs_dbg(FYI, "signal pending before send request\n");
330 return -ERESTARTSYS;
b30c74c7
PS
331 }
332
b8eed283 333 /* cork the socket */
db10538a 334 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 335
07cd952f 336 for (j = 0; j < num_rqst; j++)
81f39f95 337 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
338 rfc1002_marker = cpu_to_be32(send_length);
339
b30c74c7
PS
340 /*
341 * We should not allow signals to interrupt the network send because
342 * any partial send will cause session reconnects thus increasing
343 * latency of system calls and overload a server with unnecessary
344 * requests.
345 */
346
347 sigfillset(&mask);
348 sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
c713c877
RS
350 /* Generate a rfc1002 marker for SMB2+ */
351 if (server->vals->header_preamble_size == 0) {
352 struct kvec hiov = {
353 .iov_base = &rfc1002_marker,
354 .iov_len = 4
355 };
aa563d7b 356 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
357 rc = smb_send_kvec(server, &smb_msg, &sent);
358 if (rc < 0)
b30c74c7 359 goto unmask;
c713c877
RS
360
361 total_len += sent;
362 send_length += 4;
363 }
364
662bf5bc
PA
365 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
07cd952f
RS
367 for (j = 0; j < num_rqst; j++) {
368 iov = rqst[j].rq_iov;
369 n_vec = rqst[j].rq_nvec;
3ab3f2a1 370
07cd952f 371 size = 0;
662bf5bc
PA
372 for (i = 0; i < n_vec; i++) {
373 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 374 size += iov[i].iov_len;
662bf5bc 375 }
97bc00b3 376
aa563d7b 377 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 378
3ab3f2a1 379 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 380 if (rc < 0)
b30c74c7 381 goto unmask;
97bc00b3
JL
382
383 total_len += sent;
07cd952f
RS
384
385 /* now walk the page array and send each page in it */
386 for (i = 0; i < rqst[j].rq_npages; i++) {
387 struct bio_vec bvec;
388
389 bvec.bv_page = rqst[j].rq_pages[i];
390 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 &bvec.bv_offset);
392
aa563d7b 393 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
394 &bvec, 1, bvec.bv_len);
395 rc = smb_send_kvec(server, &smb_msg, &sent);
396 if (rc < 0)
397 break;
398
399 total_len += sent;
400 }
97bc00b3 401 }
1da177e4 402
b30c74c7
PS
403unmask:
404 sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406 /*
407 * If signal is pending but we have already sent the whole packet to
408 * the server we need to return success status to allow a corresponding
409 * mid entry to be kept in the pending requests queue thus allowing
410 * to handle responses from the server by the client.
411 *
412 * If only part of the packet has been sent there is no need to hide
413 * interrupt because the session will be reconnected anyway, so there
414 * won't be any response from the server to handle.
415 */
416
417 if (signal_pending(current) && (total_len != send_length)) {
418 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 419 rc = -ERESTARTSYS;
b30c74c7
PS
420 }
421
b8eed283 422 /* uncork it */
db10538a 423 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 424
c713c877 425 if ((total_len > 0) && (total_len != send_length)) {
f96637be 426 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 427 send_length, total_len);
6f49f46b
JL
428 /*
429 * If we have only sent part of an SMB then the next SMB could
430 * be taken as the remainder of this one. We need to kill the
431 * socket so the server throws away the partial SMB
432 */
080dc5e5 433 spin_lock(&cifs_tcp_ses_lock);
edf1ae40 434 server->tcpStatus = CifsNeedReconnect;
080dc5e5 435 spin_unlock(&cifs_tcp_ses_lock);
bf1fdeb7 436 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 437 server->conn_id, server->hostname);
edf1ae40 438 }
9762c2d0 439smbd_done:
d804d41d 440 if (rc < 0 && rc != -EINTR)
afe6f653 441 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 442 rc);
ee13919c 443 else if (rc > 0)
1da177e4 444 rc = 0;
1da177e4
LT
445
446 return rc;
447}
448
6f49f46b 449static int
1f3a8f5f
RS
450smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
451 struct smb_rqst *rqst, int flags)
6f49f46b 452{
b2c96de7 453 struct kvec iov;
3946d0d0 454 struct smb2_transform_hdr *tr_hdr;
b2c96de7 455 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
456 int rc;
457
458 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
459 return __smb_send_rqst(server, num_rqst, rqst);
460
461 if (num_rqst > MAX_COMPOUND - 1)
462 return -ENOMEM;
7fb8986e 463
b2c96de7 464 if (!server->ops->init_transform_rq) {
a0a3036b 465 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
466 return -EIO;
467 }
6f49f46b 468
3946d0d0
LL
469 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
470 if (!tr_hdr)
471 return -ENOMEM;
472
473 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
474 memset(&iov, 0, sizeof(iov));
475 memset(tr_hdr, 0, sizeof(*tr_hdr));
476
477 iov.iov_base = tr_hdr;
478 iov.iov_len = sizeof(*tr_hdr);
479 cur_rqst[0].rq_iov = &iov;
480 cur_rqst[0].rq_nvec = 1;
481
1f3a8f5f
RS
482 rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 &cur_rqst[0], rqst);
7fb8986e 484 if (rc)
3946d0d0 485 goto out;
7fb8986e 486
1f3a8f5f
RS
487 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
489out:
490 kfree(tr_hdr);
7fb8986e 491 return rc;
6f49f46b
JL
492}
493
0496e02d
JL
494int
495smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
496 unsigned int smb_buf_length)
497{
738f9de5 498 struct kvec iov[2];
7fb8986e
PS
499 struct smb_rqst rqst = { .rq_iov = iov,
500 .rq_nvec = 2 };
0496e02d 501
738f9de5
PS
502 iov[0].iov_base = smb_buffer;
503 iov[0].iov_len = 4;
504 iov[1].iov_base = (char *)smb_buffer + 4;
505 iov[1].iov_len = smb_buf_length;
0496e02d 506
07cd952f 507 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
508}
509
fc40f9cf 510static int
b227d215 511wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
512 const int timeout, const int flags,
513 unsigned int *instance)
1da177e4 514{
19e88867 515 long rc;
4230cff8
RS
516 int *credits;
517 int optype;
2b53b929 518 long int t;
6d82c27a 519 int scredits, in_flight;
2b53b929
RS
520
521 if (timeout < 0)
522 t = MAX_JIFFY_OFFSET;
523 else
524 t = msecs_to_jiffies(timeout);
4230cff8
RS
525
526 optype = flags & CIFS_OP_MASK;
5bc59498 527
34f4deb7
PS
528 *instance = 0;
529
4230cff8
RS
530 credits = server->ops->get_credits_field(server, optype);
531 /* Since an echo is already inflight, no need to wait to send another */
532 if (*credits <= 0 && optype == CIFS_ECHO_OP)
533 return -EAGAIN;
534
fc40f9cf 535 spin_lock(&server->req_lock);
392e1c5d 536 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 537 /* oplock breaks must not be held up */
fc40f9cf 538 server->in_flight++;
1b63f184
SF
539 if (server->in_flight > server->max_in_flight)
540 server->max_in_flight = server->in_flight;
bc205ed1 541 *credits -= 1;
34f4deb7 542 *instance = server->reconnect_instance;
6d82c27a
SP
543 scredits = *credits;
544 in_flight = server->in_flight;
fc40f9cf 545 spin_unlock(&server->req_lock);
6d82c27a
SP
546
547 trace_smb3_add_credits(server->CurrentMid,
548 server->conn_id, server->hostname, scredits, -1, in_flight);
549 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
550 __func__, 1, scredits);
551
27a97a61
VL
552 return 0;
553 }
554
27a97a61 555 while (1) {
b227d215 556 if (*credits < num_credits) {
6d82c27a 557 scredits = *credits;
fc40f9cf 558 spin_unlock(&server->req_lock);
6d82c27a 559
789e6661 560 cifs_num_waiters_inc(server);
2b53b929
RS
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
789e6661 563 cifs_num_waiters_dec(server);
2b53b929 564 if (!rc) {
6d82c27a
SP
565 spin_lock(&server->req_lock);
566 scredits = *credits;
567 in_flight = server->in_flight;
568 spin_unlock(&server->req_lock);
569
7937ca96 570 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
571 server->conn_id, server->hostname, scredits,
572 num_credits, in_flight);
afe6f653 573 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 574 timeout);
7de03948 575 return -EBUSY;
2b53b929
RS
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
fc40f9cf 579 spin_lock(&server->req_lock);
27a97a61 580 } else {
080dc5e5
SP
581 spin_unlock(&server->req_lock);
582
583 spin_lock(&cifs_tcp_ses_lock);
c5797a94 584 if (server->tcpStatus == CifsExiting) {
080dc5e5 585 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 586 return -ENOENT;
1da177e4 587 }
080dc5e5 588 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 589
16b34aa4
RS
590 /*
591 * For normal commands, reserve the last MAX_COMPOUND
592 * credits to compound requests.
593 * Otherwise these compounds could be permanently
594 * starved for credits by single-credit requests.
595 *
596 * To prevent spinning CPU, block this thread until
597 * there are >MAX_COMPOUND credits available.
598 * But only do this is we already have a lot of
599 * credits in flight to avoid triggering this check
600 * for servers that are slow to hand out credits on
601 * new sessions.
602 */
080dc5e5 603 spin_lock(&server->req_lock);
16b34aa4
RS
604 if (!optype && num_credits == 1 &&
605 server->in_flight > 2 * MAX_COMPOUND &&
606 *credits <= MAX_COMPOUND) {
607 spin_unlock(&server->req_lock);
6d82c27a 608
16b34aa4 609 cifs_num_waiters_inc(server);
2b53b929
RS
610 rc = wait_event_killable_timeout(
611 server->request_q,
16b34aa4 612 has_credits(server, credits,
2b53b929
RS
613 MAX_COMPOUND + 1),
614 t);
16b34aa4 615 cifs_num_waiters_dec(server);
2b53b929 616 if (!rc) {
6d82c27a
SP
617 spin_lock(&server->req_lock);
618 scredits = *credits;
619 in_flight = server->in_flight;
620 spin_unlock(&server->req_lock);
621
7937ca96 622 trace_smb3_credit_timeout(
6d82c27a
SP
623 server->CurrentMid,
624 server->conn_id, server->hostname,
625 scredits, num_credits, in_flight);
afe6f653 626 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 627 timeout);
7de03948 628 return -EBUSY;
2b53b929
RS
629 }
630 if (rc == -ERESTARTSYS)
631 return -ERESTARTSYS;
16b34aa4
RS
632 spin_lock(&server->req_lock);
633 continue;
634 }
635
2d86dbc9
PS
636 /*
637 * Can not count locking commands against total
638 * as they are allowed to block on server.
639 */
27a97a61
VL
640
641 /* update # of requests on the wire to server */
4230cff8 642 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
643 *credits -= num_credits;
644 server->in_flight += num_credits;
1b63f184
SF
645 if (server->in_flight > server->max_in_flight)
646 server->max_in_flight = server->in_flight;
34f4deb7 647 *instance = server->reconnect_instance;
2d86dbc9 648 }
6d82c27a
SP
649 scredits = *credits;
650 in_flight = server->in_flight;
fc40f9cf 651 spin_unlock(&server->req_lock);
cd7b699b
SP
652
653 trace_smb3_add_credits(server->CurrentMid,
6d82c27a
SP
654 server->conn_id, server->hostname, scredits,
655 -(num_credits), in_flight);
cd7b699b
SP
656 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
657 __func__, num_credits, scredits);
27a97a61 658 break;
1da177e4
LT
659 }
660 }
7ee1af76
JA
661 return 0;
662}
1da177e4 663
bc205ed1 664static int
480b1cb9
RS
665wait_for_free_request(struct TCP_Server_Info *server, const int flags,
666 unsigned int *instance)
bc205ed1 667{
2b53b929
RS
668 return wait_for_free_credits(server, 1, -1, flags,
669 instance);
bc205ed1
PS
670}
671
257b7809
RS
672static int
673wait_for_compound_request(struct TCP_Server_Info *server, int num,
674 const int flags, unsigned int *instance)
675{
676 int *credits;
6d82c27a 677 int scredits, in_flight;
257b7809
RS
678
679 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
680
681 spin_lock(&server->req_lock);
cd7b699b 682 scredits = *credits;
6d82c27a 683 in_flight = server->in_flight;
cd7b699b 684
257b7809
RS
685 if (*credits < num) {
686 /*
91792bb8
PS
687 * If the server is tight on resources or just gives us less
688 * credits for other reasons (e.g. requests are coming out of
689 * order and the server delays granting more credits until it
690 * processes a missing mid) and we exhausted most available
691 * credits there may be situations when we try to send
692 * a compound request but we don't have enough credits. At this
693 * point the client needs to decide if it should wait for
694 * additional credits or fail the request. If at least one
695 * request is in flight there is a high probability that the
696 * server will return enough credits to satisfy this compound
697 * request.
698 *
699 * Return immediately if no requests in flight since we will be
700 * stuck on waiting for credits.
257b7809 701 */
91792bb8 702 if (server->in_flight == 0) {
257b7809 703 spin_unlock(&server->req_lock);
cd7b699b 704 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
705 server->conn_id, server->hostname, scredits,
706 num, in_flight);
cd7b699b 707 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 708 __func__, in_flight, num, scredits);
7de03948 709 return -EDEADLK;
257b7809
RS
710 }
711 }
712 spin_unlock(&server->req_lock);
713
714 return wait_for_free_credits(server, num, 60000, flags,
715 instance);
716}
717
cb7e9eab
PS
718int
719cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 720 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
721{
722 *num = size;
335b7b62
PS
723 credits->value = 0;
724 credits->instance = server->reconnect_instance;
cb7e9eab
PS
725 return 0;
726}
727
96daf2b0 728static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
729 struct mid_q_entry **ppmidQ)
730{
080dc5e5 731 spin_lock(&cifs_tcp_ses_lock);
1da177e4 732 if (ses->server->tcpStatus == CifsExiting) {
080dc5e5 733 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 734 return -ENOENT;
8fbbd365
VL
735 }
736
737 if (ses->server->tcpStatus == CifsNeedReconnect) {
080dc5e5 738 spin_unlock(&cifs_tcp_ses_lock);
f96637be 739 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 740 return -EAGAIN;
8fbbd365
VL
741 }
742
7f48558e 743 if (ses->status == CifsNew) {
79a58d1f 744 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5
SP
745 (in_buf->Command != SMB_COM_NEGOTIATE)) {
746 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 747 return -EAGAIN;
080dc5e5 748 }
ad7a2926 749 /* else ok - we are setting up session */
1da177e4 750 }
7f48558e
SP
751
752 if (ses->status == CifsExiting) {
753 /* check if SMB session is bad because we are setting it up */
080dc5e5
SP
754 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
755 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 756 return -EAGAIN;
080dc5e5 757 }
7f48558e
SP
758 /* else ok - we are shutting down session */
759 }
080dc5e5 760 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 761
24b9b06b 762 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 763 if (*ppmidQ == NULL)
7ee1af76 764 return -ENOMEM;
ddc8cf8f
JL
765 spin_lock(&GlobalMid_Lock);
766 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
767 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
768 return 0;
769}
770
0ade640e
JL
771static int
772wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 773{
0ade640e 774 int error;
7ee1af76 775
5853cc2a 776 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 777 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
778 if (error < 0)
779 return -ERESTARTSYS;
7ee1af76 780
0ade640e 781 return 0;
7ee1af76
JA
782}
783
fec344e3
JL
784struct mid_q_entry *
785cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
786{
787 int rc;
fec344e3 788 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
789 struct mid_q_entry *mid;
790
738f9de5
PS
791 if (rqst->rq_iov[0].iov_len != 4 ||
792 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
793 return ERR_PTR(-EIO);
794
792af7b0 795 /* enable signing if server requires it */
38d77c50 796 if (server->sign)
792af7b0
PS
797 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
798
799 mid = AllocMidQEntry(hdr, server);
800 if (mid == NULL)
fec344e3 801 return ERR_PTR(-ENOMEM);
792af7b0 802
fec344e3 803 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
804 if (rc) {
805 DeleteMidQEntry(mid);
fec344e3 806 return ERR_PTR(rc);
ffc61ccb
SP
807 }
808
fec344e3 809 return mid;
792af7b0 810}
133672ef 811
a6827c18
JL
812/*
813 * Send a SMB request and set the callback function in the mid to handle
814 * the result. Caller is responsible for dealing with timeouts.
815 */
816int
fec344e3 817cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 818 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
819 mid_handle_t *handle, void *cbdata, const int flags,
820 const struct cifs_credits *exist_credits)
a6827c18 821{
480b1cb9 822 int rc;
a6827c18 823 struct mid_q_entry *mid;
335b7b62 824 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 825 unsigned int instance;
480b1cb9 826 int optype;
a6827c18 827
a891f0f8
PS
828 optype = flags & CIFS_OP_MASK;
829
cb7e9eab 830 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 831 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
832 if (rc)
833 return rc;
335b7b62 834 credits.value = 1;
34f4deb7 835 credits.instance = instance;
3349c3a7
PS
836 } else
837 instance = exist_credits->instance;
a6827c18
JL
838
839 mutex_lock(&server->srv_mutex);
3349c3a7
PS
840
841 /*
842 * We can't use credits obtained from the previous session to send this
843 * request. Check if there were reconnects after we obtained credits and
844 * return -EAGAIN in such cases to let callers handle it.
845 */
846 if (instance != server->reconnect_instance) {
847 mutex_unlock(&server->srv_mutex);
848 add_credits_and_wake_if(server, &credits, optype);
849 return -EAGAIN;
850 }
851
fec344e3
JL
852 mid = server->ops->setup_async_request(server, rqst);
853 if (IS_ERR(mid)) {
a6827c18 854 mutex_unlock(&server->srv_mutex);
335b7b62 855 add_credits_and_wake_if(server, &credits, optype);
fec344e3 856 return PTR_ERR(mid);
a6827c18
JL
857 }
858
44d22d84 859 mid->receive = receive;
a6827c18
JL
860 mid->callback = callback;
861 mid->callback_data = cbdata;
9b7c18a2 862 mid->handle = handle;
7c9421e1 863 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 864
ffc61ccb
SP
865 /* put it on the pending_mid_q */
866 spin_lock(&GlobalMid_Lock);
867 list_add_tail(&mid->qhead, &server->pending_mid_q);
868 spin_unlock(&GlobalMid_Lock);
869
93d2cb6c
LL
870 /*
871 * Need to store the time in mid before calling I/O. For call_async,
872 * I/O response may come back and free the mid entry on another thread.
873 */
874 cifs_save_when_sent(mid);
789e6661 875 cifs_in_send_inc(server);
1f3a8f5f 876 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 877 cifs_in_send_dec(server);
ad313cb8 878
820962dc 879 if (rc < 0) {
c781af7e 880 revert_current_mid(server, mid->credits);
ad313cb8 881 server->sequence_number -= 2;
820962dc
RV
882 cifs_delete_mid(mid);
883 }
884
a6827c18 885 mutex_unlock(&server->srv_mutex);
789e6661 886
ffc61ccb
SP
887 if (rc == 0)
888 return 0;
a6827c18 889
335b7b62 890 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
891 return rc;
892}
893
133672ef
SF
894/*
895 *
896 * Send an SMB Request. No response info (other than return code)
897 * needs to be parsed.
898 *
899 * flags indicate the type of request buffer and how long to wait
900 * and whether to log NT STATUS code (error) before mapping it to POSIX error
901 *
902 */
903int
96daf2b0 904SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 905 char *in_buf, int flags)
133672ef
SF
906{
907 int rc;
908 struct kvec iov[1];
da502f7d 909 struct kvec rsp_iov;
133672ef
SF
910 int resp_buf_type;
911
792af7b0
PS
912 iov[0].iov_base = in_buf;
913 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 914 flags |= CIFS_NO_RSP_BUF;
da502f7d 915 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 916 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 917
133672ef
SF
918 return rc;
919}
920
053d5034 921static int
3c1105df 922cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
923{
924 int rc = 0;
925
f96637be
JP
926 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
927 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 928
74dd92a8 929 spin_lock(&GlobalMid_Lock);
7c9421e1 930 switch (mid->mid_state) {
74dd92a8 931 case MID_RESPONSE_RECEIVED:
053d5034
JL
932 spin_unlock(&GlobalMid_Lock);
933 return rc;
74dd92a8
JL
934 case MID_RETRY_NEEDED:
935 rc = -EAGAIN;
936 break;
71823baf
JL
937 case MID_RESPONSE_MALFORMED:
938 rc = -EIO;
939 break;
3c1105df
JL
940 case MID_SHUTDOWN:
941 rc = -EHOSTDOWN;
942 break;
74dd92a8 943 default:
abe57073
PS
944 if (!(mid->mid_flags & MID_DELETED)) {
945 list_del_init(&mid->qhead);
946 mid->mid_flags |= MID_DELETED;
947 }
afe6f653 948 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 949 __func__, mid->mid, mid->mid_state);
74dd92a8 950 rc = -EIO;
053d5034
JL
951 }
952 spin_unlock(&GlobalMid_Lock);
953
2b84a36c 954 DeleteMidQEntry(mid);
053d5034
JL
955 return rc;
956}
957
121b046a 958static inline int
fb2036d8
PS
959send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
960 struct mid_q_entry *mid)
76dcc26f 961{
121b046a 962 return server->ops->send_cancel ?
fb2036d8 963 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
964}
965
2c8f981d
JL
966int
967cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
968 bool log_error)
969{
792af7b0 970 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
971
972 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
973
974 /* convert the length into a more usable form */
38d77c50 975 if (server->sign) {
738f9de5 976 struct kvec iov[2];
985e4ff0 977 int rc = 0;
738f9de5
PS
978 struct smb_rqst rqst = { .rq_iov = iov,
979 .rq_nvec = 2 };
826a95e4 980
738f9de5
PS
981 iov[0].iov_base = mid->resp_buf;
982 iov[0].iov_len = 4;
983 iov[1].iov_base = (char *)mid->resp_buf + 4;
984 iov[1].iov_len = len - 4;
2c8f981d 985 /* FIXME: add code to kill session */
bf5ea0e2 986 rc = cifs_verify_signature(&rqst, server,
0124cc45 987 mid->sequence_number);
985e4ff0 988 if (rc)
afe6f653 989 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 990 rc);
2c8f981d
JL
991 }
992
993 /* BB special case reconnect tid and uid here? */
a3713ec3 994 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
995}
996
fec344e3 997struct mid_q_entry *
f780bd3f
AA
998cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
999 struct smb_rqst *rqst)
792af7b0
PS
1000{
1001 int rc;
fec344e3 1002 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
1003 struct mid_q_entry *mid;
1004
738f9de5
PS
1005 if (rqst->rq_iov[0].iov_len != 4 ||
1006 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
1007 return ERR_PTR(-EIO);
1008
792af7b0
PS
1009 rc = allocate_mid(ses, hdr, &mid);
1010 if (rc)
fec344e3
JL
1011 return ERR_PTR(rc);
1012 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1013 if (rc) {
3c1bf7e4 1014 cifs_delete_mid(mid);
fec344e3
JL
1015 return ERR_PTR(rc);
1016 }
1017 return mid;
792af7b0
PS
1018}
1019
4e34feb5 1020static void
ee258d79 1021cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1022{
1023 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1024 struct cifs_credits credits;
1025
1026 credits.value = server->ops->get_credits(mid);
1027 credits.instance = server->reconnect_instance;
8a26f0f7 1028
34f4deb7 1029 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1030}
1031
ee258d79
PS
1032static void
1033cifs_compound_last_callback(struct mid_q_entry *mid)
1034{
1035 cifs_compound_callback(mid);
1036 cifs_wake_up_task(mid);
1037}
1038
1039static void
1040cifs_cancelled_callback(struct mid_q_entry *mid)
1041{
1042 cifs_compound_callback(mid);
1043 DeleteMidQEntry(mid);
1044}
1045
5f68ea4a
AA
1046/*
1047 * Return a channel (master if none) of @ses that can be used to send
1048 * regular requests.
1049 *
1050 * If we are currently binding a new channel (negprot/sess.setup),
1051 * return the new incomplete channel.
1052 */
1053struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1054{
1055 uint index = 0;
1056
1057 if (!ses)
1058 return NULL;
1059
724244cd 1060 spin_lock(&ses->chan_lock);
f486ef8e
SP
1061 /* round robin */
1062pick_another:
1063 if (ses->chan_count > 1 &&
1064 !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
1065 index = (uint)atomic_inc_return(&ses->chan_seq);
1066 index %= ses->chan_count;
1067
1068 if (CIFS_CHAN_NEEDS_RECONNECT(ses, index))
1069 goto pick_another;
5f68ea4a 1070 }
f486ef8e
SP
1071 spin_unlock(&ses->chan_lock);
1072
1073 return ses->chans[index].server;
5f68ea4a
AA
1074}
1075
b8f57ee8 1076int
e0bba0b8 1077compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1078 struct TCP_Server_Info *server,
e0bba0b8
RS
1079 const int flags, const int num_rqst, struct smb_rqst *rqst,
1080 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1081{
480b1cb9 1082 int i, j, optype, rc = 0;
e0bba0b8 1083 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1084 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1085 struct cifs_credits credits[MAX_COMPOUND] = {
1086 { .value = 0, .instance = 0 }
1087 };
1088 unsigned int instance;
738f9de5 1089 char *buf;
50c2f753 1090
a891f0f8 1091 optype = flags & CIFS_OP_MASK;
133672ef 1092
e0bba0b8
RS
1093 for (i = 0; i < num_rqst; i++)
1094 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1095
352d96f3 1096 if (!ses || !ses->server || !server) {
f96637be 1097 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1098 return -EIO;
1099 }
1100
080dc5e5
SP
1101 spin_lock(&cifs_tcp_ses_lock);
1102 if (server->tcpStatus == CifsExiting) {
1103 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1104 return -ENOENT;
080dc5e5
SP
1105 }
1106 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1107
792af7b0 1108 /*
257b7809 1109 * Wait for all the requests to become available.
7091bcab
PS
1110 * This approach still leaves the possibility to be stuck waiting for
1111 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1112 * requests and if the client is completely idle, not generating any
1113 * other requests.
1114 * This can be handled by the eventual session reconnect.
792af7b0 1115 */
3190b59a 1116 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1117 &instance);
1118 if (rc)
1119 return rc;
97ea4998 1120
257b7809
RS
1121 for (i = 0; i < num_rqst; i++) {
1122 credits[i].value = 1;
1123 credits[i].instance = instance;
8544f4aa 1124 }
7ee1af76 1125
792af7b0
PS
1126 /*
1127 * Make sure that we sign in the same order that we send on this socket
1128 * and avoid races inside tcp sendmsg code that could cause corruption
1129 * of smb data.
1130 */
7ee1af76 1131
3190b59a 1132 mutex_lock(&server->srv_mutex);
7ee1af76 1133
97ea4998
PS
1134 /*
1135 * All the parts of the compound chain belong obtained credits from the
257b7809 1136 * same session. We can not use credits obtained from the previous
97ea4998
PS
1137 * session to send this request. Check if there were reconnects after
1138 * we obtained credits and return -EAGAIN in such cases to let callers
1139 * handle it.
1140 */
3190b59a
AA
1141 if (instance != server->reconnect_instance) {
1142 mutex_unlock(&server->srv_mutex);
97ea4998 1143 for (j = 0; j < num_rqst; j++)
3190b59a 1144 add_credits(server, &credits[j], optype);
97ea4998
PS
1145 return -EAGAIN;
1146 }
1147
e0bba0b8 1148 for (i = 0; i < num_rqst; i++) {
f780bd3f 1149 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1150 if (IS_ERR(midQ[i])) {
3190b59a 1151 revert_current_mid(server, i);
e0bba0b8
RS
1152 for (j = 0; j < i; j++)
1153 cifs_delete_mid(midQ[j]);
3190b59a 1154 mutex_unlock(&server->srv_mutex);
8544f4aa 1155
e0bba0b8 1156 /* Update # of requests on wire to server */
8544f4aa 1157 for (j = 0; j < num_rqst; j++)
3190b59a 1158 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1159 return PTR_ERR(midQ[i]);
1160 }
1161
1162 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1163 midQ[i]->optype = optype;
4e34feb5 1164 /*
ee258d79
PS
1165 * Invoke callback for every part of the compound chain
1166 * to calculate credits properly. Wake up this thread only when
1167 * the last element is received.
4e34feb5
RS
1168 */
1169 if (i < num_rqst - 1)
ee258d79
PS
1170 midQ[i]->callback = cifs_compound_callback;
1171 else
1172 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1173 }
3190b59a
AA
1174 cifs_in_send_inc(server);
1175 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1176 cifs_in_send_dec(server);
e0bba0b8
RS
1177
1178 for (i = 0; i < num_rqst; i++)
1179 cifs_save_when_sent(midQ[i]);
7ee1af76 1180
c781af7e 1181 if (rc < 0) {
3190b59a
AA
1182 revert_current_mid(server, num_rqst);
1183 server->sequence_number -= 2;
c781af7e 1184 }
e0bba0b8 1185
3190b59a 1186 mutex_unlock(&server->srv_mutex);
7ee1af76 1187
d69cb728
RS
1188 /*
1189 * If sending failed for some reason or it is an oplock break that we
1190 * will not receive a response to - return credits back
1191 */
1192 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1193 for (i = 0; i < num_rqst; i++)
3190b59a 1194 add_credits(server, &credits[i], optype);
cb5c2e63 1195 goto out;
ee258d79
PS
1196 }
1197
1198 /*
1199 * At this point the request is passed to the network stack - we assume
1200 * that any credits taken from the server structure on the client have
1201 * been spent and we can't return them back. Once we receive responses
1202 * we will collect credits granted by the server in the mid callbacks
1203 * and add those credits to the server structure.
1204 */
e0bba0b8 1205
cb5c2e63
RS
1206 /*
1207 * Compounding is never used during session establish.
1208 */
080dc5e5 1209 spin_lock(&cifs_tcp_ses_lock);
05946d4b 1210 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
080dc5e5
SP
1211 spin_unlock(&cifs_tcp_ses_lock);
1212
05946d4b 1213 mutex_lock(&server->srv_mutex);
f486ef8e 1214 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
05946d4b 1215 mutex_unlock(&server->srv_mutex);
080dc5e5
SP
1216
1217 spin_lock(&cifs_tcp_ses_lock);
05946d4b 1218 }
080dc5e5 1219 spin_unlock(&cifs_tcp_ses_lock);
e0bba0b8 1220
cb5c2e63 1221 for (i = 0; i < num_rqst; i++) {
3190b59a 1222 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1223 if (rc != 0)
1224 break;
1225 }
1226 if (rc != 0) {
1227 for (; i < num_rqst; i++) {
e3d100ea 1228 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1229 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1230 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1231 spin_lock(&GlobalMid_Lock);
7b71843f 1232 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1233 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1234 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1235 cancelled_mid[i] = true;
34f4deb7 1236 credits[i].value = 0;
e0bba0b8 1237 }
1be912dd 1238 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1239 }
cb5c2e63
RS
1240 }
1241
cb5c2e63
RS
1242 for (i = 0; i < num_rqst; i++) {
1243 if (rc < 0)
1244 goto out;
e0bba0b8 1245
3190b59a 1246 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1247 if (rc != 0) {
8544f4aa
PS
1248 /* mark this mid as cancelled to not free it below */
1249 cancelled_mid[i] = true;
1250 goto out;
1be912dd 1251 }
2b2bdfba 1252
e0bba0b8
RS
1253 if (!midQ[i]->resp_buf ||
1254 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1255 rc = -EIO;
1256 cifs_dbg(FYI, "Bad MID state?\n");
1257 goto out;
1258 }
a891f0f8 1259
e0bba0b8
RS
1260 buf = (char *)midQ[i]->resp_buf;
1261 resp_iov[i].iov_base = buf;
1262 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1263 server->vals->header_preamble_size;
e0bba0b8
RS
1264
1265 if (midQ[i]->large_buf)
1266 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1267 else
1268 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1269
3190b59a 1270 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1271 flags & CIFS_LOG_ERROR);
1da177e4 1272
e0bba0b8 1273 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1274 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1275 midQ[i]->resp_buf = NULL;
cb5c2e63 1276
e0bba0b8 1277 }
cb5c2e63
RS
1278
1279 /*
1280 * Compounding is never used during session establish.
1281 */
080dc5e5 1282 spin_lock(&cifs_tcp_ses_lock);
0f56db83 1283 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1284 struct kvec iov = {
1285 .iov_base = resp_iov[0].iov_base,
1286 .iov_len = resp_iov[0].iov_len
1287 };
080dc5e5 1288 spin_unlock(&cifs_tcp_ses_lock);
05946d4b 1289 mutex_lock(&server->srv_mutex);
f486ef8e 1290 smb311_update_preauth_hash(ses, server, &iov, 1);
05946d4b 1291 mutex_unlock(&server->srv_mutex);
080dc5e5 1292 spin_lock(&cifs_tcp_ses_lock);
cb5c2e63 1293 }
080dc5e5 1294 spin_unlock(&cifs_tcp_ses_lock);
cb5c2e63 1295
7ee1af76 1296out:
4e34feb5
RS
1297 /*
1298 * This will dequeue all mids. After this it is important that the
1299 * demultiplex_thread will not process any of these mids any futher.
1300 * This is prevented above by using a noop callback that will not
1301 * wake this thread except for the very last PDU.
1302 */
8544f4aa
PS
1303 for (i = 0; i < num_rqst; i++) {
1304 if (!cancelled_mid[i])
1305 cifs_delete_mid(midQ[i]);
8544f4aa 1306 }
1da177e4 1307
d6e04ae6
SF
1308 return rc;
1309}
1da177e4 1310
e0bba0b8
RS
1311int
1312cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1313 struct TCP_Server_Info *server,
e0bba0b8
RS
1314 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1315 struct kvec *resp_iov)
1316{
352d96f3
AA
1317 return compound_send_recv(xid, ses, server, flags, 1,
1318 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1319}
1320
738f9de5
PS
1321int
1322SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1323 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1324 const int flags, struct kvec *resp_iov)
1325{
1326 struct smb_rqst rqst;
3cecf486 1327 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1328 int rc;
1329
3cecf486 1330 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1331 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1332 GFP_KERNEL);
117e3b7f
SF
1333 if (!new_iov) {
1334 /* otherwise cifs_send_recv below sets resp_buf_type */
1335 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1336 return -ENOMEM;
117e3b7f 1337 }
3cecf486
RS
1338 } else
1339 new_iov = s_iov;
738f9de5
PS
1340
1341 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1342 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1343
1344 new_iov[0].iov_base = new_iov[1].iov_base;
1345 new_iov[0].iov_len = 4;
1346 new_iov[1].iov_base += 4;
1347 new_iov[1].iov_len -= 4;
1348
1349 memset(&rqst, 0, sizeof(struct smb_rqst));
1350 rqst.rq_iov = new_iov;
1351 rqst.rq_nvec = n_vec + 1;
1352
352d96f3
AA
1353 rc = cifs_send_recv(xid, ses, ses->server,
1354 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1355 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1356 kfree(new_iov);
738f9de5
PS
1357 return rc;
1358}
1359
1da177e4 1360int
96daf2b0 1361SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1362 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1363 int *pbytes_returned, const int flags)
1da177e4
LT
1364{
1365 int rc = 0;
1da177e4 1366 struct mid_q_entry *midQ;
fb2036d8
PS
1367 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1368 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1369 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1370 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1371 struct TCP_Server_Info *server;
1da177e4
LT
1372
1373 if (ses == NULL) {
f96637be 1374 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1375 return -EIO;
1376 }
ac6ad7a8 1377 server = ses->server;
afe6f653 1378 if (server == NULL) {
f96637be 1379 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1380 return -EIO;
1381 }
1382
080dc5e5
SP
1383 spin_lock(&cifs_tcp_ses_lock);
1384 if (server->tcpStatus == CifsExiting) {
1385 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1386 return -ENOENT;
080dc5e5
SP
1387 }
1388 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1389
79a58d1f 1390 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1391 to the same server. We may make this configurable later or
1392 use ses->maxReq */
1da177e4 1393
fb2036d8 1394 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1395 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1396 len);
6d9c6d54
VL
1397 return -EIO;
1398 }
1399
afe6f653 1400 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1401 if (rc)
1402 return rc;
1403
79a58d1f 1404 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1405 and avoid races inside tcp sendmsg code that could cause corruption
1406 of smb data */
1407
afe6f653 1408 mutex_lock(&server->srv_mutex);
1da177e4 1409
7ee1af76
JA
1410 rc = allocate_mid(ses, in_buf, &midQ);
1411 if (rc) {
8bd3754c 1412 mutex_unlock(&server->srv_mutex);
7ee1af76 1413 /* Update # of requests on wire to server */
afe6f653 1414 add_credits(server, &credits, 0);
7ee1af76 1415 return rc;
1da177e4
LT
1416 }
1417
afe6f653 1418 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1419 if (rc) {
afe6f653 1420 mutex_unlock(&server->srv_mutex);
829049cb
VL
1421 goto out;
1422 }
1da177e4 1423
7c9421e1 1424 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1425
afe6f653
RS
1426 cifs_in_send_inc(server);
1427 rc = smb_send(server, in_buf, len);
1428 cifs_in_send_dec(server);
789e6661 1429 cifs_save_when_sent(midQ);
ad313cb8
JL
1430
1431 if (rc < 0)
afe6f653 1432 server->sequence_number -= 2;
ad313cb8 1433
afe6f653 1434 mutex_unlock(&server->srv_mutex);
7ee1af76 1435
79a58d1f 1436 if (rc < 0)
7ee1af76
JA
1437 goto out;
1438
afe6f653 1439 rc = wait_for_response(server, midQ);
1be912dd 1440 if (rc != 0) {
afe6f653 1441 send_cancel(server, &rqst, midQ);
1be912dd 1442 spin_lock(&GlobalMid_Lock);
7c9421e1 1443 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1444 /* no longer considered to be "in-flight" */
1445 midQ->callback = DeleteMidQEntry;
1446 spin_unlock(&GlobalMid_Lock);
afe6f653 1447 add_credits(server, &credits, 0);
1be912dd
JL
1448 return rc;
1449 }
1450 spin_unlock(&GlobalMid_Lock);
1451 }
1da177e4 1452
afe6f653 1453 rc = cifs_sync_mid_result(midQ, server);
053d5034 1454 if (rc != 0) {
afe6f653 1455 add_credits(server, &credits, 0);
1da177e4
LT
1456 return rc;
1457 }
50c2f753 1458
2c8f981d 1459 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1460 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1461 rc = -EIO;
afe6f653 1462 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1463 goto out;
1da177e4 1464 }
7ee1af76 1465
d4e4854f 1466 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1467 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1468 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1469out:
3c1bf7e4 1470 cifs_delete_mid(midQ);
afe6f653 1471 add_credits(server, &credits, 0);
1da177e4 1472
7ee1af76
JA
1473 return rc;
1474}
1da177e4 1475
7ee1af76
JA
1476/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1477 blocking lock to return. */
1478
1479static int
96daf2b0 1480send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1481 struct smb_hdr *in_buf,
1482 struct smb_hdr *out_buf)
1483{
1484 int bytes_returned;
96daf2b0 1485 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1486 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1487
1488 /* We just modify the current in_buf to change
1489 the type of lock from LOCKING_ANDX_SHARED_LOCK
1490 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1491 LOCKING_ANDX_CANCEL_LOCK. */
1492
1493 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1494 pSMB->Timeout = 0;
88257360 1495 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1496
1497 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1498 &bytes_returned, 0);
7ee1af76
JA
1499}
1500
1501int
96daf2b0 1502SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1503 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1504 int *pbytes_returned)
1505{
1506 int rc = 0;
1507 int rstart = 0;
7ee1af76 1508 struct mid_q_entry *midQ;
96daf2b0 1509 struct cifs_ses *ses;
fb2036d8
PS
1510 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1511 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1512 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1513 unsigned int instance;
afe6f653 1514 struct TCP_Server_Info *server;
7ee1af76
JA
1515
1516 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1517 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1518 return -EIO;
1519 }
1520 ses = tcon->ses;
afe6f653 1521 server = ses->server;
7ee1af76 1522
afe6f653 1523 if (server == NULL) {
f96637be 1524 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1525 return -EIO;
1526 }
1527
080dc5e5
SP
1528 spin_lock(&cifs_tcp_ses_lock);
1529 if (server->tcpStatus == CifsExiting) {
1530 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1531 return -ENOENT;
080dc5e5
SP
1532 }
1533 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1534
79a58d1f 1535 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1536 to the same server. We may make this configurable later or
1537 use ses->maxReq */
1538
fb2036d8 1539 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1540 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1541 len);
6d9c6d54
VL
1542 return -EIO;
1543 }
1544
afe6f653 1545 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1546 if (rc)
1547 return rc;
1548
79a58d1f 1549 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1550 and avoid races inside tcp sendmsg code that could cause corruption
1551 of smb data */
1552
afe6f653 1553 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1554
1555 rc = allocate_mid(ses, in_buf, &midQ);
1556 if (rc) {
afe6f653 1557 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1558 return rc;
1559 }
1560
afe6f653 1561 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1562 if (rc) {
3c1bf7e4 1563 cifs_delete_mid(midQ);
afe6f653 1564 mutex_unlock(&server->srv_mutex);
829049cb
VL
1565 return rc;
1566 }
1da177e4 1567
7c9421e1 1568 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1569 cifs_in_send_inc(server);
1570 rc = smb_send(server, in_buf, len);
1571 cifs_in_send_dec(server);
789e6661 1572 cifs_save_when_sent(midQ);
ad313cb8
JL
1573
1574 if (rc < 0)
afe6f653 1575 server->sequence_number -= 2;
ad313cb8 1576
afe6f653 1577 mutex_unlock(&server->srv_mutex);
7ee1af76 1578
79a58d1f 1579 if (rc < 0) {
3c1bf7e4 1580 cifs_delete_mid(midQ);
7ee1af76
JA
1581 return rc;
1582 }
1583
1584 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1585 rc = wait_event_interruptible(server->response_q,
7c9421e1 1586 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1587 ((server->tcpStatus != CifsGood) &&
1588 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1589
1590 /* Were we interrupted by a signal ? */
080dc5e5 1591 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1592 if ((rc == -ERESTARTSYS) &&
7c9421e1 1593 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1594 ((server->tcpStatus == CifsGood) ||
1595 (server->tcpStatus == CifsNew))) {
080dc5e5 1596 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76
JA
1597
1598 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1599 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1600 blocking lock to return. */
afe6f653 1601 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1602 if (rc) {
3c1bf7e4 1603 cifs_delete_mid(midQ);
7ee1af76
JA
1604 return rc;
1605 }
1606 } else {
1607 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1608 to cause the blocking lock to return. */
1609
1610 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1611
1612 /* If we get -ENOLCK back the lock may have
1613 already been removed. Don't exit in this case. */
1614 if (rc && rc != -ENOLCK) {
3c1bf7e4 1615 cifs_delete_mid(midQ);
7ee1af76
JA
1616 return rc;
1617 }
1618 }
1619
afe6f653 1620 rc = wait_for_response(server, midQ);
1be912dd 1621 if (rc) {
afe6f653 1622 send_cancel(server, &rqst, midQ);
1be912dd 1623 spin_lock(&GlobalMid_Lock);
7c9421e1 1624 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1625 /* no longer considered to be "in-flight" */
1626 midQ->callback = DeleteMidQEntry;
1627 spin_unlock(&GlobalMid_Lock);
1628 return rc;
1629 }
1630 spin_unlock(&GlobalMid_Lock);
7ee1af76 1631 }
1be912dd
JL
1632
1633 /* We got the response - restart system call. */
1634 rstart = 1;
080dc5e5 1635 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1636 }
080dc5e5 1637 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1638
afe6f653 1639 rc = cifs_sync_mid_result(midQ, server);
053d5034 1640 if (rc != 0)
7ee1af76 1641 return rc;
50c2f753 1642
17c8bfed 1643 /* rcvd frame is ok */
7c9421e1 1644 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1645 rc = -EIO;
3175eb9b 1646 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1647 goto out;
1648 }
1da177e4 1649
d4e4854f 1650 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1651 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1652 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1653out:
3c1bf7e4 1654 cifs_delete_mid(midQ);
7ee1af76
JA
1655 if (rstart && rc == -EACCES)
1656 return -ERESTARTSYS;
1da177e4
LT
1657 return rc;
1658}