]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
cifs: remove remaining build warnings
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
1da177e4
LT
24#include "cifspdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "cifs_debug.h"
8bd68c6e 28#include "smb2proto.h"
9762c2d0 29#include "smbdirect.h"
50c2f753 30
3cecf486
RS
31/* Max number of iovectors we can use off the stack when sending requests. */
32#define CIFS_MAX_IOV_SIZE 8
33
2dc7e1c0
PS
34void
35cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
36{
37 wake_up_process(mid->callback_data);
38}
39
a6827c18 40struct mid_q_entry *
24b9b06b 41AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
42{
43 struct mid_q_entry *temp;
44
24b9b06b 45 if (server == NULL) {
f96637be 46 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
47 return NULL;
48 }
50c2f753 49
232087cb 50 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 51 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 52 kref_init(&temp->refcount);
a6f74e80
N
53 temp->mid = get_mid(smb_buffer);
54 temp->pid = current->pid;
55 temp->command = cpu_to_le16(smb_buffer->Command);
56 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 57 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
58 /* when mid allocated can be before when sent */
59 temp->when_alloc = jiffies;
60 temp->server = server;
2b84a36c 61
a6f74e80
N
62 /*
63 * The default is for the mid to be synchronous, so the
64 * default callback just wakes up the current task.
65 */
f1f27ad7
VW
66 get_task_struct(current);
67 temp->creator = current;
a6f74e80
N
68 temp->callback = cifs_wake_up_task;
69 temp->callback_data = current;
1da177e4 70
c2c17ddb 71 atomic_inc(&mid_count);
7c9421e1 72 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
73 return temp;
74}
75
696e420b
LP
76static void _cifs_mid_q_entry_release(struct kref *refcount)
77{
abe57073
PS
78 struct mid_q_entry *midEntry =
79 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 80#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 81 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 82 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 83 unsigned long now;
433b8dd7 84 unsigned long roundtrip_time;
1047abc1 85#endif
7b71843f
PS
86 struct TCP_Server_Info *server = midEntry->server;
87
88 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 server->ops->handle_cancelled_mid)
04ad69c3 91 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 92
7c9421e1 93 midEntry->mid_state = MID_FREE;
c2c17ddb 94 atomic_dec(&mid_count);
7c9421e1 95 if (midEntry->large_buf)
b8643e1b
SF
96 cifs_buf_release(midEntry->resp_buf);
97 else
98 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
99#ifdef CONFIG_CIFS_STATS2
100 now = jiffies;
433b8dd7 101 if (now < midEntry->when_alloc)
a0a3036b 102 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
103 roundtrip_time = now - midEntry->when_alloc;
104
105 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 server->slowest_cmd[smb_cmd] = roundtrip_time;
108 server->fastest_cmd[smb_cmd] = roundtrip_time;
109 } else {
110 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 }
115 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 server->time_per_cmd[smb_cmd] += roundtrip_time;
117 }
00778e22
SF
118 /*
119 * commands taking longer than one second (default) can be indications
120 * that something is wrong, unless it is quite a slow link or a very
121 * busy server. Note that this calc is unlikely or impossible to wrap
122 * as long as slow_rsp_threshold is not set way above recommended max
123 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 * since only affects debug counters - so leaving the calc as simple
125 * comparison rather than doing multiple conversions and overflow
126 * checks
127 */
128 if ((slow_rsp_threshold != 0) &&
129 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 130 (midEntry->command != command)) {
f5942db5
SF
131 /*
132 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 * NB: le16_to_cpu returns unsigned so can not be negative below
134 */
433b8dd7
SF
135 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 137
433b8dd7 138 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
141 pr_debug("slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
1047abc1
SF
147 }
148 }
149#endif
f1f27ad7 150 put_task_struct(midEntry->creator);
abe57073
PS
151
152 mempool_free(midEntry, cifs_mid_poolp);
153}
154
155void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156{
157 spin_lock(&GlobalMid_Lock);
158 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 spin_unlock(&GlobalMid_Lock);
160}
161
162void DeleteMidQEntry(struct mid_q_entry *midEntry)
163{
696e420b 164 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
165}
166
3c1bf7e4
PS
167void
168cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
169{
170 spin_lock(&GlobalMid_Lock);
abe57073
PS
171 if (!(mid->mid_flags & MID_DELETED)) {
172 list_del_init(&mid->qhead);
173 mid->mid_flags |= MID_DELETED;
174 }
ddc8cf8f
JL
175 spin_unlock(&GlobalMid_Lock);
176
177 DeleteMidQEntry(mid);
178}
179
6f49f46b
JL
180/*
181 * smb_send_kvec - send an array of kvecs to the server
182 * @server: Server to send the data to
3ab3f2a1 183 * @smb_msg: Message to send
6f49f46b
JL
184 * @sent: amount of data sent on socket is stored here
185 *
186 * Our basic "send data to server" function. Should be called with srv_mutex
187 * held. The caller is responsible for handling the results.
188 */
d6e04ae6 189static int
3ab3f2a1
AV
190smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 size_t *sent)
1da177e4
LT
192{
193 int rc = 0;
3ab3f2a1 194 int retries = 0;
edf1ae40 195 struct socket *ssocket = server->ssocket;
50c2f753 196
6f49f46b
JL
197 *sent = 0;
198
3ab3f2a1
AV
199 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200 smb_msg->msg_namelen = sizeof(struct sockaddr);
201 smb_msg->msg_control = NULL;
202 smb_msg->msg_controllen = 0;
0496e02d 203 if (server->noblocksnd)
3ab3f2a1 204 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 205 else
3ab3f2a1 206 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 207
3ab3f2a1 208 while (msg_data_left(smb_msg)) {
6f49f46b
JL
209 /*
210 * If blocking send, we try 3 times, since each can block
211 * for 5 seconds. For nonblocking we have to try more
212 * but wait increasing amounts of time allowing time for
213 * socket to clear. The overall time we wait in either
214 * case to send on the socket is about 15 seconds.
215 * Similarly we wait for 15 seconds for a response from
216 * the server in SendReceive[2] for the server to send
217 * a response back for most types of requests (except
218 * SMB Write past end of file which can be slow, and
219 * blocking lock operations). NFS waits slightly longer
220 * than CIFS, but this can make it take longer for
221 * nonresponsive servers to be detected and 15 seconds
222 * is more than enough time for modern networks to
223 * send a packet. In most cases if we fail to send
224 * after the retries we will kill the socket and
225 * reconnect which may clear the network problem.
226 */
3ab3f2a1 227 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 228 if (rc == -EAGAIN) {
3ab3f2a1
AV
229 retries++;
230 if (retries >= 14 ||
231 (!server->noblocksnd && (retries > 2))) {
afe6f653 232 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 233 ssocket);
3ab3f2a1 234 return -EAGAIN;
1da177e4 235 }
3ab3f2a1 236 msleep(1 << retries);
1da177e4
LT
237 continue;
238 }
6f49f46b 239
79a58d1f 240 if (rc < 0)
3ab3f2a1 241 return rc;
6f49f46b 242
79a58d1f 243 if (rc == 0) {
3e84469d
SF
244 /* should never happen, letting socket clear before
245 retrying is our only obvious option here */
afe6f653 246 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
247 msleep(500);
248 continue;
d6e04ae6 249 }
6f49f46b 250
3ab3f2a1
AV
251 /* send was at least partially successful */
252 *sent += rc;
253 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 254 }
3ab3f2a1 255 return 0;
97bc00b3
JL
256}
257
35e2cc1b 258unsigned long
81f39f95 259smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
260{
261 unsigned int i;
35e2cc1b
PA
262 struct kvec *iov;
263 int nvec;
a26054d1
JL
264 unsigned long buflen = 0;
265
81f39f95
RS
266 if (server->vals->header_preamble_size == 0 &&
267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
268 iov = &rqst->rq_iov[1];
269 nvec = rqst->rq_nvec - 1;
270 } else {
271 iov = rqst->rq_iov;
272 nvec = rqst->rq_nvec;
273 }
274
a26054d1 275 /* total up iov array first */
35e2cc1b 276 for (i = 0; i < nvec; i++)
a26054d1
JL
277 buflen += iov[i].iov_len;
278
c06a0f2d
LL
279 /*
280 * Add in the page array if there is one. The caller needs to make
281 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282 * multiple pages ends at page boundary, rq_tailsz needs to be set to
283 * PAGE_SIZE.
284 */
a26054d1 285 if (rqst->rq_npages) {
c06a0f2d
LL
286 if (rqst->rq_npages == 1)
287 buflen += rqst->rq_tailsz;
288 else {
289 /*
290 * If there is more than one page, calculate the
291 * buffer length based on rq_offset and rq_tailsz
292 */
293 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294 rqst->rq_offset;
295 buflen += rqst->rq_tailsz;
296 }
a26054d1
JL
297 }
298
299 return buflen;
300}
301
6f49f46b 302static int
07cd952f
RS
303__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304 struct smb_rqst *rqst)
6f49f46b 305{
07cd952f
RS
306 int rc = 0;
307 struct kvec *iov;
308 int n_vec;
309 unsigned int send_length = 0;
310 unsigned int i, j;
b30c74c7 311 sigset_t mask, oldmask;
3ab3f2a1 312 size_t total_len = 0, sent, size;
b8eed283 313 struct socket *ssocket = server->ssocket;
3ab3f2a1 314 struct msghdr smb_msg;
c713c877
RS
315 __be32 rfc1002_marker;
316
4357d45f
LL
317 if (cifs_rdma_enabled(server)) {
318 /* return -EAGAIN when connecting or reconnecting */
319 rc = -EAGAIN;
320 if (server->smbd_conn)
321 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
322 goto smbd_done;
323 }
afc18a6f 324
ea702b80 325 if (ssocket == NULL)
afc18a6f 326 return -EAGAIN;
ea702b80 327
214a5ea0 328 if (fatal_signal_pending(current)) {
6988a619
PA
329 cifs_dbg(FYI, "signal pending before send request\n");
330 return -ERESTARTSYS;
b30c74c7
PS
331 }
332
b8eed283 333 /* cork the socket */
db10538a 334 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 335
07cd952f 336 for (j = 0; j < num_rqst; j++)
81f39f95 337 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
338 rfc1002_marker = cpu_to_be32(send_length);
339
b30c74c7
PS
340 /*
341 * We should not allow signals to interrupt the network send because
342 * any partial send will cause session reconnects thus increasing
343 * latency of system calls and overload a server with unnecessary
344 * requests.
345 */
346
347 sigfillset(&mask);
348 sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
c713c877
RS
350 /* Generate a rfc1002 marker for SMB2+ */
351 if (server->vals->header_preamble_size == 0) {
352 struct kvec hiov = {
353 .iov_base = &rfc1002_marker,
354 .iov_len = 4
355 };
aa563d7b 356 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
357 rc = smb_send_kvec(server, &smb_msg, &sent);
358 if (rc < 0)
b30c74c7 359 goto unmask;
c713c877
RS
360
361 total_len += sent;
362 send_length += 4;
363 }
364
662bf5bc
PA
365 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
07cd952f
RS
367 for (j = 0; j < num_rqst; j++) {
368 iov = rqst[j].rq_iov;
369 n_vec = rqst[j].rq_nvec;
3ab3f2a1 370
07cd952f 371 size = 0;
662bf5bc
PA
372 for (i = 0; i < n_vec; i++) {
373 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 374 size += iov[i].iov_len;
662bf5bc 375 }
97bc00b3 376
aa563d7b 377 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 378
3ab3f2a1 379 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 380 if (rc < 0)
b30c74c7 381 goto unmask;
97bc00b3
JL
382
383 total_len += sent;
07cd952f
RS
384
385 /* now walk the page array and send each page in it */
386 for (i = 0; i < rqst[j].rq_npages; i++) {
387 struct bio_vec bvec;
388
389 bvec.bv_page = rqst[j].rq_pages[i];
390 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 &bvec.bv_offset);
392
aa563d7b 393 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
394 &bvec, 1, bvec.bv_len);
395 rc = smb_send_kvec(server, &smb_msg, &sent);
396 if (rc < 0)
397 break;
398
399 total_len += sent;
400 }
97bc00b3 401 }
1da177e4 402
b30c74c7
PS
403unmask:
404 sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406 /*
407 * If signal is pending but we have already sent the whole packet to
408 * the server we need to return success status to allow a corresponding
409 * mid entry to be kept in the pending requests queue thus allowing
410 * to handle responses from the server by the client.
411 *
412 * If only part of the packet has been sent there is no need to hide
413 * interrupt because the session will be reconnected anyway, so there
414 * won't be any response from the server to handle.
415 */
416
417 if (signal_pending(current) && (total_len != send_length)) {
418 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 419 rc = -ERESTARTSYS;
b30c74c7
PS
420 }
421
b8eed283 422 /* uncork it */
db10538a 423 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 424
c713c877 425 if ((total_len > 0) && (total_len != send_length)) {
f96637be 426 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 427 send_length, total_len);
6f49f46b
JL
428 /*
429 * If we have only sent part of an SMB then the next SMB could
430 * be taken as the remainder of this one. We need to kill the
431 * socket so the server throws away the partial SMB
432 */
dca65818 433 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 434 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 435 server->conn_id, server->hostname);
edf1ae40 436 }
9762c2d0 437smbd_done:
d804d41d 438 if (rc < 0 && rc != -EINTR)
afe6f653 439 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 440 rc);
ee13919c 441 else if (rc > 0)
1da177e4 442 rc = 0;
1da177e4
LT
443
444 return rc;
445}
446
6f49f46b 447static int
1f3a8f5f
RS
448smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
449 struct smb_rqst *rqst, int flags)
6f49f46b 450{
b2c96de7 451 struct kvec iov;
3946d0d0 452 struct smb2_transform_hdr *tr_hdr;
b2c96de7 453 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
454 int rc;
455
456 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
457 return __smb_send_rqst(server, num_rqst, rqst);
458
459 if (num_rqst > MAX_COMPOUND - 1)
460 return -ENOMEM;
7fb8986e 461
b2c96de7 462 if (!server->ops->init_transform_rq) {
a0a3036b 463 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
464 return -EIO;
465 }
6f49f46b 466
9339faac 467 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
468 if (!tr_hdr)
469 return -ENOMEM;
470
471 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
472 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
473
474 iov.iov_base = tr_hdr;
475 iov.iov_len = sizeof(*tr_hdr);
476 cur_rqst[0].rq_iov = &iov;
477 cur_rqst[0].rq_nvec = 1;
478
1f3a8f5f
RS
479 rc = server->ops->init_transform_rq(server, num_rqst + 1,
480 &cur_rqst[0], rqst);
7fb8986e 481 if (rc)
3946d0d0 482 goto out;
7fb8986e 483
1f3a8f5f
RS
484 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
485 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
486out:
487 kfree(tr_hdr);
7fb8986e 488 return rc;
6f49f46b
JL
489}
490
0496e02d
JL
491int
492smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493 unsigned int smb_buf_length)
494{
738f9de5 495 struct kvec iov[2];
7fb8986e
PS
496 struct smb_rqst rqst = { .rq_iov = iov,
497 .rq_nvec = 2 };
0496e02d 498
738f9de5
PS
499 iov[0].iov_base = smb_buffer;
500 iov[0].iov_len = 4;
501 iov[1].iov_base = (char *)smb_buffer + 4;
502 iov[1].iov_len = smb_buf_length;
0496e02d 503
07cd952f 504 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
505}
506
fc40f9cf 507static int
b227d215 508wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
509 const int timeout, const int flags,
510 unsigned int *instance)
1da177e4 511{
19e88867 512 long rc;
4230cff8
RS
513 int *credits;
514 int optype;
2b53b929 515 long int t;
6d82c27a 516 int scredits, in_flight;
2b53b929
RS
517
518 if (timeout < 0)
519 t = MAX_JIFFY_OFFSET;
520 else
521 t = msecs_to_jiffies(timeout);
4230cff8
RS
522
523 optype = flags & CIFS_OP_MASK;
5bc59498 524
34f4deb7
PS
525 *instance = 0;
526
4230cff8
RS
527 credits = server->ops->get_credits_field(server, optype);
528 /* Since an echo is already inflight, no need to wait to send another */
529 if (*credits <= 0 && optype == CIFS_ECHO_OP)
530 return -EAGAIN;
531
fc40f9cf 532 spin_lock(&server->req_lock);
392e1c5d 533 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 534 /* oplock breaks must not be held up */
fc40f9cf 535 server->in_flight++;
1b63f184
SF
536 if (server->in_flight > server->max_in_flight)
537 server->max_in_flight = server->in_flight;
bc205ed1 538 *credits -= 1;
34f4deb7 539 *instance = server->reconnect_instance;
6d82c27a
SP
540 scredits = *credits;
541 in_flight = server->in_flight;
fc40f9cf 542 spin_unlock(&server->req_lock);
6d82c27a 543
1ddff774 544 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
545 server->conn_id, server->hostname, scredits, -1, in_flight);
546 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
547 __func__, 1, scredits);
548
27a97a61
VL
549 return 0;
550 }
551
27a97a61 552 while (1) {
b227d215 553 if (*credits < num_credits) {
6d82c27a 554 scredits = *credits;
fc40f9cf 555 spin_unlock(&server->req_lock);
6d82c27a 556
789e6661 557 cifs_num_waiters_inc(server);
2b53b929
RS
558 rc = wait_event_killable_timeout(server->request_q,
559 has_credits(server, credits, num_credits), t);
789e6661 560 cifs_num_waiters_dec(server);
2b53b929 561 if (!rc) {
6d82c27a
SP
562 spin_lock(&server->req_lock);
563 scredits = *credits;
564 in_flight = server->in_flight;
565 spin_unlock(&server->req_lock);
566
7937ca96 567 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
568 server->conn_id, server->hostname, scredits,
569 num_credits, in_flight);
afe6f653 570 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 571 timeout);
7de03948 572 return -EBUSY;
2b53b929
RS
573 }
574 if (rc == -ERESTARTSYS)
575 return -ERESTARTSYS;
fc40f9cf 576 spin_lock(&server->req_lock);
27a97a61 577 } else {
080dc5e5
SP
578 spin_unlock(&server->req_lock);
579
580 spin_lock(&cifs_tcp_ses_lock);
c5797a94 581 if (server->tcpStatus == CifsExiting) {
080dc5e5 582 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 583 return -ENOENT;
1da177e4 584 }
080dc5e5 585 spin_unlock(&cifs_tcp_ses_lock);
27a97a61 586
16b34aa4
RS
587 /*
588 * For normal commands, reserve the last MAX_COMPOUND
589 * credits to compound requests.
590 * Otherwise these compounds could be permanently
591 * starved for credits by single-credit requests.
592 *
593 * To prevent spinning CPU, block this thread until
594 * there are >MAX_COMPOUND credits available.
595 * But only do this is we already have a lot of
596 * credits in flight to avoid triggering this check
597 * for servers that are slow to hand out credits on
598 * new sessions.
599 */
080dc5e5 600 spin_lock(&server->req_lock);
16b34aa4
RS
601 if (!optype && num_credits == 1 &&
602 server->in_flight > 2 * MAX_COMPOUND &&
603 *credits <= MAX_COMPOUND) {
604 spin_unlock(&server->req_lock);
6d82c27a 605
16b34aa4 606 cifs_num_waiters_inc(server);
2b53b929
RS
607 rc = wait_event_killable_timeout(
608 server->request_q,
16b34aa4 609 has_credits(server, credits,
2b53b929
RS
610 MAX_COMPOUND + 1),
611 t);
16b34aa4 612 cifs_num_waiters_dec(server);
2b53b929 613 if (!rc) {
6d82c27a
SP
614 spin_lock(&server->req_lock);
615 scredits = *credits;
616 in_flight = server->in_flight;
617 spin_unlock(&server->req_lock);
618
7937ca96 619 trace_smb3_credit_timeout(
6d82c27a
SP
620 server->CurrentMid,
621 server->conn_id, server->hostname,
622 scredits, num_credits, in_flight);
afe6f653 623 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 624 timeout);
7de03948 625 return -EBUSY;
2b53b929
RS
626 }
627 if (rc == -ERESTARTSYS)
628 return -ERESTARTSYS;
16b34aa4
RS
629 spin_lock(&server->req_lock);
630 continue;
631 }
632
2d86dbc9
PS
633 /*
634 * Can not count locking commands against total
635 * as they are allowed to block on server.
636 */
27a97a61
VL
637
638 /* update # of requests on the wire to server */
4230cff8 639 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
640 *credits -= num_credits;
641 server->in_flight += num_credits;
1b63f184
SF
642 if (server->in_flight > server->max_in_flight)
643 server->max_in_flight = server->in_flight;
34f4deb7 644 *instance = server->reconnect_instance;
2d86dbc9 645 }
6d82c27a
SP
646 scredits = *credits;
647 in_flight = server->in_flight;
fc40f9cf 648 spin_unlock(&server->req_lock);
cd7b699b 649
1ddff774 650 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
651 server->conn_id, server->hostname, scredits,
652 -(num_credits), in_flight);
cd7b699b
SP
653 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
654 __func__, num_credits, scredits);
27a97a61 655 break;
1da177e4
LT
656 }
657 }
7ee1af76
JA
658 return 0;
659}
1da177e4 660
bc205ed1 661static int
480b1cb9
RS
662wait_for_free_request(struct TCP_Server_Info *server, const int flags,
663 unsigned int *instance)
bc205ed1 664{
2b53b929
RS
665 return wait_for_free_credits(server, 1, -1, flags,
666 instance);
bc205ed1
PS
667}
668
257b7809
RS
669static int
670wait_for_compound_request(struct TCP_Server_Info *server, int num,
671 const int flags, unsigned int *instance)
672{
673 int *credits;
6d82c27a 674 int scredits, in_flight;
257b7809
RS
675
676 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
677
678 spin_lock(&server->req_lock);
cd7b699b 679 scredits = *credits;
6d82c27a 680 in_flight = server->in_flight;
cd7b699b 681
257b7809
RS
682 if (*credits < num) {
683 /*
91792bb8
PS
684 * If the server is tight on resources or just gives us less
685 * credits for other reasons (e.g. requests are coming out of
686 * order and the server delays granting more credits until it
687 * processes a missing mid) and we exhausted most available
688 * credits there may be situations when we try to send
689 * a compound request but we don't have enough credits. At this
690 * point the client needs to decide if it should wait for
691 * additional credits or fail the request. If at least one
692 * request is in flight there is a high probability that the
693 * server will return enough credits to satisfy this compound
694 * request.
695 *
696 * Return immediately if no requests in flight since we will be
697 * stuck on waiting for credits.
257b7809 698 */
91792bb8 699 if (server->in_flight == 0) {
257b7809 700 spin_unlock(&server->req_lock);
cd7b699b 701 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
702 server->conn_id, server->hostname, scredits,
703 num, in_flight);
cd7b699b 704 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 705 __func__, in_flight, num, scredits);
7de03948 706 return -EDEADLK;
257b7809
RS
707 }
708 }
709 spin_unlock(&server->req_lock);
710
711 return wait_for_free_credits(server, num, 60000, flags,
712 instance);
713}
714
cb7e9eab
PS
715int
716cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 717 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
718{
719 *num = size;
335b7b62
PS
720 credits->value = 0;
721 credits->instance = server->reconnect_instance;
cb7e9eab
PS
722 return 0;
723}
724
96daf2b0 725static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
726 struct mid_q_entry **ppmidQ)
727{
080dc5e5 728 spin_lock(&cifs_tcp_ses_lock);
dd3cd870 729 if (ses->ses_status == SES_NEW) {
79a58d1f 730 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5
SP
731 (in_buf->Command != SMB_COM_NEGOTIATE)) {
732 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 733 return -EAGAIN;
080dc5e5 734 }
ad7a2926 735 /* else ok - we are setting up session */
1da177e4 736 }
7f48558e 737
dd3cd870 738 if (ses->ses_status == SES_EXITING) {
7f48558e 739 /* check if SMB session is bad because we are setting it up */
080dc5e5
SP
740 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
741 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 742 return -EAGAIN;
080dc5e5 743 }
7f48558e
SP
744 /* else ok - we are shutting down session */
745 }
080dc5e5 746 spin_unlock(&cifs_tcp_ses_lock);
7f48558e 747
24b9b06b 748 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 749 if (*ppmidQ == NULL)
7ee1af76 750 return -ENOMEM;
ddc8cf8f
JL
751 spin_lock(&GlobalMid_Lock);
752 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
753 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
754 return 0;
755}
756
0ade640e
JL
757static int
758wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 759{
0ade640e 760 int error;
7ee1af76 761
5853cc2a 762 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 763 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
764 if (error < 0)
765 return -ERESTARTSYS;
7ee1af76 766
0ade640e 767 return 0;
7ee1af76
JA
768}
769
fec344e3
JL
770struct mid_q_entry *
771cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
772{
773 int rc;
fec344e3 774 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
775 struct mid_q_entry *mid;
776
738f9de5
PS
777 if (rqst->rq_iov[0].iov_len != 4 ||
778 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
779 return ERR_PTR(-EIO);
780
792af7b0 781 /* enable signing if server requires it */
38d77c50 782 if (server->sign)
792af7b0
PS
783 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
784
785 mid = AllocMidQEntry(hdr, server);
786 if (mid == NULL)
fec344e3 787 return ERR_PTR(-ENOMEM);
792af7b0 788
fec344e3 789 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
790 if (rc) {
791 DeleteMidQEntry(mid);
fec344e3 792 return ERR_PTR(rc);
ffc61ccb
SP
793 }
794
fec344e3 795 return mid;
792af7b0 796}
133672ef 797
a6827c18
JL
798/*
799 * Send a SMB request and set the callback function in the mid to handle
800 * the result. Caller is responsible for dealing with timeouts.
801 */
802int
fec344e3 803cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 804 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
805 mid_handle_t *handle, void *cbdata, const int flags,
806 const struct cifs_credits *exist_credits)
a6827c18 807{
480b1cb9 808 int rc;
a6827c18 809 struct mid_q_entry *mid;
335b7b62 810 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 811 unsigned int instance;
480b1cb9 812 int optype;
a6827c18 813
a891f0f8
PS
814 optype = flags & CIFS_OP_MASK;
815
cb7e9eab 816 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 817 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
818 if (rc)
819 return rc;
335b7b62 820 credits.value = 1;
34f4deb7 821 credits.instance = instance;
3349c3a7
PS
822 } else
823 instance = exist_credits->instance;
a6827c18 824
cc391b69 825 cifs_server_lock(server);
3349c3a7
PS
826
827 /*
828 * We can't use credits obtained from the previous session to send this
829 * request. Check if there were reconnects after we obtained credits and
830 * return -EAGAIN in such cases to let callers handle it.
831 */
832 if (instance != server->reconnect_instance) {
cc391b69 833 cifs_server_unlock(server);
3349c3a7
PS
834 add_credits_and_wake_if(server, &credits, optype);
835 return -EAGAIN;
836 }
837
fec344e3
JL
838 mid = server->ops->setup_async_request(server, rqst);
839 if (IS_ERR(mid)) {
cc391b69 840 cifs_server_unlock(server);
335b7b62 841 add_credits_and_wake_if(server, &credits, optype);
fec344e3 842 return PTR_ERR(mid);
a6827c18
JL
843 }
844
44d22d84 845 mid->receive = receive;
a6827c18
JL
846 mid->callback = callback;
847 mid->callback_data = cbdata;
9b7c18a2 848 mid->handle = handle;
7c9421e1 849 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 850
ffc61ccb
SP
851 /* put it on the pending_mid_q */
852 spin_lock(&GlobalMid_Lock);
853 list_add_tail(&mid->qhead, &server->pending_mid_q);
854 spin_unlock(&GlobalMid_Lock);
855
93d2cb6c
LL
856 /*
857 * Need to store the time in mid before calling I/O. For call_async,
858 * I/O response may come back and free the mid entry on another thread.
859 */
860 cifs_save_when_sent(mid);
789e6661 861 cifs_in_send_inc(server);
1f3a8f5f 862 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 863 cifs_in_send_dec(server);
ad313cb8 864
820962dc 865 if (rc < 0) {
c781af7e 866 revert_current_mid(server, mid->credits);
ad313cb8 867 server->sequence_number -= 2;
820962dc
RV
868 cifs_delete_mid(mid);
869 }
870
cc391b69 871 cifs_server_unlock(server);
789e6661 872
ffc61ccb
SP
873 if (rc == 0)
874 return 0;
a6827c18 875
335b7b62 876 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
877 return rc;
878}
879
133672ef
SF
880/*
881 *
882 * Send an SMB Request. No response info (other than return code)
883 * needs to be parsed.
884 *
885 * flags indicate the type of request buffer and how long to wait
886 * and whether to log NT STATUS code (error) before mapping it to POSIX error
887 *
888 */
889int
96daf2b0 890SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 891 char *in_buf, int flags)
133672ef
SF
892{
893 int rc;
894 struct kvec iov[1];
da502f7d 895 struct kvec rsp_iov;
133672ef
SF
896 int resp_buf_type;
897
792af7b0
PS
898 iov[0].iov_base = in_buf;
899 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 900 flags |= CIFS_NO_RSP_BUF;
da502f7d 901 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 902 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 903
133672ef
SF
904 return rc;
905}
906
053d5034 907static int
3c1105df 908cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
909{
910 int rc = 0;
911
f96637be
JP
912 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
913 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 914
74dd92a8 915 spin_lock(&GlobalMid_Lock);
7c9421e1 916 switch (mid->mid_state) {
74dd92a8 917 case MID_RESPONSE_RECEIVED:
053d5034
JL
918 spin_unlock(&GlobalMid_Lock);
919 return rc;
74dd92a8
JL
920 case MID_RETRY_NEEDED:
921 rc = -EAGAIN;
922 break;
71823baf
JL
923 case MID_RESPONSE_MALFORMED:
924 rc = -EIO;
925 break;
3c1105df
JL
926 case MID_SHUTDOWN:
927 rc = -EHOSTDOWN;
928 break;
74dd92a8 929 default:
abe57073
PS
930 if (!(mid->mid_flags & MID_DELETED)) {
931 list_del_init(&mid->qhead);
932 mid->mid_flags |= MID_DELETED;
933 }
afe6f653 934 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 935 __func__, mid->mid, mid->mid_state);
74dd92a8 936 rc = -EIO;
053d5034
JL
937 }
938 spin_unlock(&GlobalMid_Lock);
939
2b84a36c 940 DeleteMidQEntry(mid);
053d5034
JL
941 return rc;
942}
943
121b046a 944static inline int
fb2036d8
PS
945send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
946 struct mid_q_entry *mid)
76dcc26f 947{
121b046a 948 return server->ops->send_cancel ?
fb2036d8 949 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
950}
951
2c8f981d
JL
952int
953cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
954 bool log_error)
955{
792af7b0 956 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
957
958 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
959
960 /* convert the length into a more usable form */
38d77c50 961 if (server->sign) {
738f9de5 962 struct kvec iov[2];
985e4ff0 963 int rc = 0;
738f9de5
PS
964 struct smb_rqst rqst = { .rq_iov = iov,
965 .rq_nvec = 2 };
826a95e4 966
738f9de5
PS
967 iov[0].iov_base = mid->resp_buf;
968 iov[0].iov_len = 4;
969 iov[1].iov_base = (char *)mid->resp_buf + 4;
970 iov[1].iov_len = len - 4;
2c8f981d 971 /* FIXME: add code to kill session */
bf5ea0e2 972 rc = cifs_verify_signature(&rqst, server,
0124cc45 973 mid->sequence_number);
985e4ff0 974 if (rc)
afe6f653 975 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 976 rc);
2c8f981d
JL
977 }
978
979 /* BB special case reconnect tid and uid here? */
a3713ec3 980 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
981}
982
fec344e3 983struct mid_q_entry *
f780bd3f
AA
984cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
985 struct smb_rqst *rqst)
792af7b0
PS
986{
987 int rc;
fec344e3 988 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
989 struct mid_q_entry *mid;
990
738f9de5
PS
991 if (rqst->rq_iov[0].iov_len != 4 ||
992 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
993 return ERR_PTR(-EIO);
994
792af7b0
PS
995 rc = allocate_mid(ses, hdr, &mid);
996 if (rc)
fec344e3
JL
997 return ERR_PTR(rc);
998 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
999 if (rc) {
3c1bf7e4 1000 cifs_delete_mid(mid);
fec344e3
JL
1001 return ERR_PTR(rc);
1002 }
1003 return mid;
792af7b0
PS
1004}
1005
4e34feb5 1006static void
ee258d79 1007cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1008{
1009 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1010 struct cifs_credits credits;
1011
1012 credits.value = server->ops->get_credits(mid);
1013 credits.instance = server->reconnect_instance;
8a26f0f7 1014
34f4deb7 1015 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1016}
1017
ee258d79
PS
1018static void
1019cifs_compound_last_callback(struct mid_q_entry *mid)
1020{
1021 cifs_compound_callback(mid);
1022 cifs_wake_up_task(mid);
1023}
1024
1025static void
1026cifs_cancelled_callback(struct mid_q_entry *mid)
1027{
1028 cifs_compound_callback(mid);
1029 DeleteMidQEntry(mid);
1030}
1031
5f68ea4a
AA
1032/*
1033 * Return a channel (master if none) of @ses that can be used to send
1034 * regular requests.
1035 *
1036 * If we are currently binding a new channel (negprot/sess.setup),
1037 * return the new incomplete channel.
1038 */
1039struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1040{
1041 uint index = 0;
1042
1043 if (!ses)
1044 return NULL;
1045
f486ef8e 1046 /* round robin */
bda487ac 1047 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1048
1049 spin_lock(&ses->chan_lock);
bda487ac 1050 index %= ses->chan_count;
88b024f5 1051 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1052
1053 return ses->chans[index].server;
5f68ea4a
AA
1054}
1055
b8f57ee8 1056int
e0bba0b8 1057compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1058 struct TCP_Server_Info *server,
e0bba0b8
RS
1059 const int flags, const int num_rqst, struct smb_rqst *rqst,
1060 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1061{
480b1cb9 1062 int i, j, optype, rc = 0;
e0bba0b8 1063 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1064 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1065 struct cifs_credits credits[MAX_COMPOUND] = {
1066 { .value = 0, .instance = 0 }
1067 };
1068 unsigned int instance;
738f9de5 1069 char *buf;
50c2f753 1070
a891f0f8 1071 optype = flags & CIFS_OP_MASK;
133672ef 1072
e0bba0b8
RS
1073 for (i = 0; i < num_rqst; i++)
1074 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1075
352d96f3 1076 if (!ses || !ses->server || !server) {
f96637be 1077 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1078 return -EIO;
1079 }
1080
080dc5e5
SP
1081 spin_lock(&cifs_tcp_ses_lock);
1082 if (server->tcpStatus == CifsExiting) {
1083 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1084 return -ENOENT;
080dc5e5
SP
1085 }
1086 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1087
792af7b0 1088 /*
257b7809 1089 * Wait for all the requests to become available.
7091bcab
PS
1090 * This approach still leaves the possibility to be stuck waiting for
1091 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1092 * requests and if the client is completely idle, not generating any
1093 * other requests.
1094 * This can be handled by the eventual session reconnect.
792af7b0 1095 */
3190b59a 1096 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1097 &instance);
1098 if (rc)
1099 return rc;
97ea4998 1100
257b7809
RS
1101 for (i = 0; i < num_rqst; i++) {
1102 credits[i].value = 1;
1103 credits[i].instance = instance;
8544f4aa 1104 }
7ee1af76 1105
792af7b0
PS
1106 /*
1107 * Make sure that we sign in the same order that we send on this socket
1108 * and avoid races inside tcp sendmsg code that could cause corruption
1109 * of smb data.
1110 */
7ee1af76 1111
cc391b69 1112 cifs_server_lock(server);
7ee1af76 1113
97ea4998
PS
1114 /*
1115 * All the parts of the compound chain belong obtained credits from the
257b7809 1116 * same session. We can not use credits obtained from the previous
97ea4998
PS
1117 * session to send this request. Check if there were reconnects after
1118 * we obtained credits and return -EAGAIN in such cases to let callers
1119 * handle it.
1120 */
3190b59a 1121 if (instance != server->reconnect_instance) {
cc391b69 1122 cifs_server_unlock(server);
97ea4998 1123 for (j = 0; j < num_rqst; j++)
3190b59a 1124 add_credits(server, &credits[j], optype);
97ea4998
PS
1125 return -EAGAIN;
1126 }
1127
e0bba0b8 1128 for (i = 0; i < num_rqst; i++) {
f780bd3f 1129 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1130 if (IS_ERR(midQ[i])) {
3190b59a 1131 revert_current_mid(server, i);
e0bba0b8
RS
1132 for (j = 0; j < i; j++)
1133 cifs_delete_mid(midQ[j]);
cc391b69 1134 cifs_server_unlock(server);
8544f4aa 1135
e0bba0b8 1136 /* Update # of requests on wire to server */
8544f4aa 1137 for (j = 0; j < num_rqst; j++)
3190b59a 1138 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1139 return PTR_ERR(midQ[i]);
1140 }
1141
1142 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1143 midQ[i]->optype = optype;
4e34feb5 1144 /*
ee258d79
PS
1145 * Invoke callback for every part of the compound chain
1146 * to calculate credits properly. Wake up this thread only when
1147 * the last element is received.
4e34feb5
RS
1148 */
1149 if (i < num_rqst - 1)
ee258d79
PS
1150 midQ[i]->callback = cifs_compound_callback;
1151 else
1152 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1153 }
3190b59a
AA
1154 cifs_in_send_inc(server);
1155 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1156 cifs_in_send_dec(server);
e0bba0b8
RS
1157
1158 for (i = 0; i < num_rqst; i++)
1159 cifs_save_when_sent(midQ[i]);
7ee1af76 1160
c781af7e 1161 if (rc < 0) {
3190b59a
AA
1162 revert_current_mid(server, num_rqst);
1163 server->sequence_number -= 2;
c781af7e 1164 }
e0bba0b8 1165
cc391b69 1166 cifs_server_unlock(server);
7ee1af76 1167
d69cb728
RS
1168 /*
1169 * If sending failed for some reason or it is an oplock break that we
1170 * will not receive a response to - return credits back
1171 */
1172 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1173 for (i = 0; i < num_rqst; i++)
3190b59a 1174 add_credits(server, &credits[i], optype);
cb5c2e63 1175 goto out;
ee258d79
PS
1176 }
1177
1178 /*
1179 * At this point the request is passed to the network stack - we assume
1180 * that any credits taken from the server structure on the client have
1181 * been spent and we can't return them back. Once we receive responses
1182 * we will collect credits granted by the server in the mid callbacks
1183 * and add those credits to the server structure.
1184 */
e0bba0b8 1185
cb5c2e63
RS
1186 /*
1187 * Compounding is never used during session establish.
1188 */
080dc5e5 1189 spin_lock(&cifs_tcp_ses_lock);
dd3cd870 1190 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
080dc5e5
SP
1191 spin_unlock(&cifs_tcp_ses_lock);
1192
cc391b69 1193 cifs_server_lock(server);
f486ef8e 1194 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1195 cifs_server_unlock(server);
080dc5e5
SP
1196
1197 spin_lock(&cifs_tcp_ses_lock);
05946d4b 1198 }
080dc5e5 1199 spin_unlock(&cifs_tcp_ses_lock);
e0bba0b8 1200
cb5c2e63 1201 for (i = 0; i < num_rqst; i++) {
3190b59a 1202 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1203 if (rc != 0)
1204 break;
1205 }
1206 if (rc != 0) {
1207 for (; i < num_rqst; i++) {
e3d100ea 1208 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1209 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1210 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1211 spin_lock(&GlobalMid_Lock);
7b71843f 1212 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1213 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1214 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1215 cancelled_mid[i] = true;
34f4deb7 1216 credits[i].value = 0;
e0bba0b8 1217 }
1be912dd 1218 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1219 }
cb5c2e63
RS
1220 }
1221
cb5c2e63
RS
1222 for (i = 0; i < num_rqst; i++) {
1223 if (rc < 0)
1224 goto out;
e0bba0b8 1225
3190b59a 1226 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1227 if (rc != 0) {
8544f4aa
PS
1228 /* mark this mid as cancelled to not free it below */
1229 cancelled_mid[i] = true;
1230 goto out;
1be912dd 1231 }
2b2bdfba 1232
e0bba0b8
RS
1233 if (!midQ[i]->resp_buf ||
1234 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1235 rc = -EIO;
1236 cifs_dbg(FYI, "Bad MID state?\n");
1237 goto out;
1238 }
a891f0f8 1239
e0bba0b8
RS
1240 buf = (char *)midQ[i]->resp_buf;
1241 resp_iov[i].iov_base = buf;
1242 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1243 server->vals->header_preamble_size;
e0bba0b8
RS
1244
1245 if (midQ[i]->large_buf)
1246 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1247 else
1248 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1249
3190b59a 1250 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1251 flags & CIFS_LOG_ERROR);
1da177e4 1252
e0bba0b8 1253 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1254 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1255 midQ[i]->resp_buf = NULL;
cb5c2e63 1256
e0bba0b8 1257 }
cb5c2e63
RS
1258
1259 /*
1260 * Compounding is never used during session establish.
1261 */
080dc5e5 1262 spin_lock(&cifs_tcp_ses_lock);
dd3cd870 1263 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1264 struct kvec iov = {
1265 .iov_base = resp_iov[0].iov_base,
1266 .iov_len = resp_iov[0].iov_len
1267 };
080dc5e5 1268 spin_unlock(&cifs_tcp_ses_lock);
cc391b69 1269 cifs_server_lock(server);
f486ef8e 1270 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1271 cifs_server_unlock(server);
080dc5e5 1272 spin_lock(&cifs_tcp_ses_lock);
cb5c2e63 1273 }
080dc5e5 1274 spin_unlock(&cifs_tcp_ses_lock);
cb5c2e63 1275
7ee1af76 1276out:
4e34feb5
RS
1277 /*
1278 * This will dequeue all mids. After this it is important that the
1279 * demultiplex_thread will not process any of these mids any futher.
1280 * This is prevented above by using a noop callback that will not
1281 * wake this thread except for the very last PDU.
1282 */
8544f4aa
PS
1283 for (i = 0; i < num_rqst; i++) {
1284 if (!cancelled_mid[i])
1285 cifs_delete_mid(midQ[i]);
8544f4aa 1286 }
1da177e4 1287
d6e04ae6
SF
1288 return rc;
1289}
1da177e4 1290
e0bba0b8
RS
1291int
1292cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1293 struct TCP_Server_Info *server,
e0bba0b8
RS
1294 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1295 struct kvec *resp_iov)
1296{
352d96f3
AA
1297 return compound_send_recv(xid, ses, server, flags, 1,
1298 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1299}
1300
738f9de5
PS
1301int
1302SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1303 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1304 const int flags, struct kvec *resp_iov)
1305{
1306 struct smb_rqst rqst;
3cecf486 1307 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1308 int rc;
1309
3cecf486 1310 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1311 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1312 GFP_KERNEL);
117e3b7f
SF
1313 if (!new_iov) {
1314 /* otherwise cifs_send_recv below sets resp_buf_type */
1315 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1316 return -ENOMEM;
117e3b7f 1317 }
3cecf486
RS
1318 } else
1319 new_iov = s_iov;
738f9de5
PS
1320
1321 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1322 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1323
1324 new_iov[0].iov_base = new_iov[1].iov_base;
1325 new_iov[0].iov_len = 4;
1326 new_iov[1].iov_base += 4;
1327 new_iov[1].iov_len -= 4;
1328
1329 memset(&rqst, 0, sizeof(struct smb_rqst));
1330 rqst.rq_iov = new_iov;
1331 rqst.rq_nvec = n_vec + 1;
1332
352d96f3
AA
1333 rc = cifs_send_recv(xid, ses, ses->server,
1334 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1335 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1336 kfree(new_iov);
738f9de5
PS
1337 return rc;
1338}
1339
1da177e4 1340int
96daf2b0 1341SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1342 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1343 int *pbytes_returned, const int flags)
1da177e4
LT
1344{
1345 int rc = 0;
1da177e4 1346 struct mid_q_entry *midQ;
fb2036d8
PS
1347 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1348 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1349 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1350 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1351 struct TCP_Server_Info *server;
1da177e4
LT
1352
1353 if (ses == NULL) {
f96637be 1354 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1355 return -EIO;
1356 }
ac6ad7a8 1357 server = ses->server;
afe6f653 1358 if (server == NULL) {
f96637be 1359 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1360 return -EIO;
1361 }
1362
080dc5e5
SP
1363 spin_lock(&cifs_tcp_ses_lock);
1364 if (server->tcpStatus == CifsExiting) {
1365 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1366 return -ENOENT;
080dc5e5
SP
1367 }
1368 spin_unlock(&cifs_tcp_ses_lock);
31ca3bc3 1369
79a58d1f 1370 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1371 to the same server. We may make this configurable later or
1372 use ses->maxReq */
1da177e4 1373
fb2036d8 1374 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1375 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1376 len);
6d9c6d54
VL
1377 return -EIO;
1378 }
1379
afe6f653 1380 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1381 if (rc)
1382 return rc;
1383
79a58d1f 1384 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1385 and avoid races inside tcp sendmsg code that could cause corruption
1386 of smb data */
1387
cc391b69 1388 cifs_server_lock(server);
1da177e4 1389
7ee1af76
JA
1390 rc = allocate_mid(ses, in_buf, &midQ);
1391 if (rc) {
cc391b69 1392 cifs_server_unlock(server);
7ee1af76 1393 /* Update # of requests on wire to server */
afe6f653 1394 add_credits(server, &credits, 0);
7ee1af76 1395 return rc;
1da177e4
LT
1396 }
1397
afe6f653 1398 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1399 if (rc) {
cc391b69 1400 cifs_server_unlock(server);
829049cb
VL
1401 goto out;
1402 }
1da177e4 1403
7c9421e1 1404 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1405
afe6f653
RS
1406 cifs_in_send_inc(server);
1407 rc = smb_send(server, in_buf, len);
1408 cifs_in_send_dec(server);
789e6661 1409 cifs_save_when_sent(midQ);
ad313cb8
JL
1410
1411 if (rc < 0)
afe6f653 1412 server->sequence_number -= 2;
ad313cb8 1413
cc391b69 1414 cifs_server_unlock(server);
7ee1af76 1415
79a58d1f 1416 if (rc < 0)
7ee1af76
JA
1417 goto out;
1418
afe6f653 1419 rc = wait_for_response(server, midQ);
1be912dd 1420 if (rc != 0) {
afe6f653 1421 send_cancel(server, &rqst, midQ);
1be912dd 1422 spin_lock(&GlobalMid_Lock);
7c9421e1 1423 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1424 /* no longer considered to be "in-flight" */
1425 midQ->callback = DeleteMidQEntry;
1426 spin_unlock(&GlobalMid_Lock);
afe6f653 1427 add_credits(server, &credits, 0);
1be912dd
JL
1428 return rc;
1429 }
1430 spin_unlock(&GlobalMid_Lock);
1431 }
1da177e4 1432
afe6f653 1433 rc = cifs_sync_mid_result(midQ, server);
053d5034 1434 if (rc != 0) {
afe6f653 1435 add_credits(server, &credits, 0);
1da177e4
LT
1436 return rc;
1437 }
50c2f753 1438
2c8f981d 1439 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1440 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1441 rc = -EIO;
afe6f653 1442 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1443 goto out;
1da177e4 1444 }
7ee1af76 1445
d4e4854f 1446 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1447 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1448 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1449out:
3c1bf7e4 1450 cifs_delete_mid(midQ);
afe6f653 1451 add_credits(server, &credits, 0);
1da177e4 1452
7ee1af76
JA
1453 return rc;
1454}
1da177e4 1455
7ee1af76
JA
1456/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1457 blocking lock to return. */
1458
1459static int
96daf2b0 1460send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1461 struct smb_hdr *in_buf,
1462 struct smb_hdr *out_buf)
1463{
1464 int bytes_returned;
96daf2b0 1465 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1466 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1467
1468 /* We just modify the current in_buf to change
1469 the type of lock from LOCKING_ANDX_SHARED_LOCK
1470 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1471 LOCKING_ANDX_CANCEL_LOCK. */
1472
1473 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1474 pSMB->Timeout = 0;
88257360 1475 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1476
1477 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1478 &bytes_returned, 0);
7ee1af76
JA
1479}
1480
1481int
96daf2b0 1482SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1483 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1484 int *pbytes_returned)
1485{
1486 int rc = 0;
1487 int rstart = 0;
7ee1af76 1488 struct mid_q_entry *midQ;
96daf2b0 1489 struct cifs_ses *ses;
fb2036d8
PS
1490 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1491 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1492 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1493 unsigned int instance;
afe6f653 1494 struct TCP_Server_Info *server;
7ee1af76
JA
1495
1496 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1497 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1498 return -EIO;
1499 }
1500 ses = tcon->ses;
afe6f653 1501 server = ses->server;
7ee1af76 1502
afe6f653 1503 if (server == NULL) {
f96637be 1504 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1505 return -EIO;
1506 }
1507
080dc5e5
SP
1508 spin_lock(&cifs_tcp_ses_lock);
1509 if (server->tcpStatus == CifsExiting) {
1510 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1511 return -ENOENT;
080dc5e5
SP
1512 }
1513 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1514
79a58d1f 1515 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1516 to the same server. We may make this configurable later or
1517 use ses->maxReq */
1518
fb2036d8 1519 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1520 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1521 len);
6d9c6d54
VL
1522 return -EIO;
1523 }
1524
afe6f653 1525 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1526 if (rc)
1527 return rc;
1528
79a58d1f 1529 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1530 and avoid races inside tcp sendmsg code that could cause corruption
1531 of smb data */
1532
cc391b69 1533 cifs_server_lock(server);
7ee1af76
JA
1534
1535 rc = allocate_mid(ses, in_buf, &midQ);
1536 if (rc) {
cc391b69 1537 cifs_server_unlock(server);
7ee1af76
JA
1538 return rc;
1539 }
1540
afe6f653 1541 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1542 if (rc) {
3c1bf7e4 1543 cifs_delete_mid(midQ);
cc391b69 1544 cifs_server_unlock(server);
829049cb
VL
1545 return rc;
1546 }
1da177e4 1547
7c9421e1 1548 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1549 cifs_in_send_inc(server);
1550 rc = smb_send(server, in_buf, len);
1551 cifs_in_send_dec(server);
789e6661 1552 cifs_save_when_sent(midQ);
ad313cb8
JL
1553
1554 if (rc < 0)
afe6f653 1555 server->sequence_number -= 2;
ad313cb8 1556
cc391b69 1557 cifs_server_unlock(server);
7ee1af76 1558
79a58d1f 1559 if (rc < 0) {
3c1bf7e4 1560 cifs_delete_mid(midQ);
7ee1af76
JA
1561 return rc;
1562 }
1563
1564 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1565 rc = wait_event_interruptible(server->response_q,
7c9421e1 1566 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1567 ((server->tcpStatus != CifsGood) &&
1568 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1569
1570 /* Were we interrupted by a signal ? */
080dc5e5 1571 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1572 if ((rc == -ERESTARTSYS) &&
7c9421e1 1573 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1574 ((server->tcpStatus == CifsGood) ||
1575 (server->tcpStatus == CifsNew))) {
080dc5e5 1576 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76
JA
1577
1578 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1579 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1580 blocking lock to return. */
afe6f653 1581 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1582 if (rc) {
3c1bf7e4 1583 cifs_delete_mid(midQ);
7ee1af76
JA
1584 return rc;
1585 }
1586 } else {
1587 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1588 to cause the blocking lock to return. */
1589
1590 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1591
1592 /* If we get -ENOLCK back the lock may have
1593 already been removed. Don't exit in this case. */
1594 if (rc && rc != -ENOLCK) {
3c1bf7e4 1595 cifs_delete_mid(midQ);
7ee1af76
JA
1596 return rc;
1597 }
1598 }
1599
afe6f653 1600 rc = wait_for_response(server, midQ);
1be912dd 1601 if (rc) {
afe6f653 1602 send_cancel(server, &rqst, midQ);
1be912dd 1603 spin_lock(&GlobalMid_Lock);
7c9421e1 1604 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1605 /* no longer considered to be "in-flight" */
1606 midQ->callback = DeleteMidQEntry;
1607 spin_unlock(&GlobalMid_Lock);
1608 return rc;
1609 }
1610 spin_unlock(&GlobalMid_Lock);
7ee1af76 1611 }
1be912dd
JL
1612
1613 /* We got the response - restart system call. */
1614 rstart = 1;
080dc5e5 1615 spin_lock(&cifs_tcp_ses_lock);
7ee1af76 1616 }
080dc5e5 1617 spin_unlock(&cifs_tcp_ses_lock);
7ee1af76 1618
afe6f653 1619 rc = cifs_sync_mid_result(midQ, server);
053d5034 1620 if (rc != 0)
7ee1af76 1621 return rc;
50c2f753 1622
17c8bfed 1623 /* rcvd frame is ok */
7c9421e1 1624 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1625 rc = -EIO;
3175eb9b 1626 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1627 goto out;
1628 }
1da177e4 1629
d4e4854f 1630 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1631 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1632 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1633out:
3c1bf7e4 1634 cifs_delete_mid(midQ);
7ee1af76
JA
1635 if (rstart && rc == -EACCES)
1636 return -ERESTARTSYS;
1da177e4
LT
1637 return rc;
1638}