]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
cifs: when insecure legacy is disabled shrink amount of SMB1 code
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
a6827c18 41struct mid_q_entry *
24b9b06b 42AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
f96637be 47 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
696e420b
LP
77static void _cifs_mid_q_entry_release(struct kref *refcount)
78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
156void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
157{
d7d7a66a
SP
158 struct TCP_Server_Info *server = midEntry->server;
159
160 spin_lock(&server->mid_lock);
abe57073 161 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
165void DeleteMidQEntry(struct mid_q_entry *midEntry)
166{
696e420b 167 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
168}
169
3c1bf7e4
PS
170void
171cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f 172{
d7d7a66a 173 spin_lock(&mid->server->mid_lock);
abe57073
PS
174 if (!(mid->mid_flags & MID_DELETED)) {
175 list_del_init(&mid->qhead);
176 mid->mid_flags |= MID_DELETED;
177 }
d7d7a66a 178 spin_unlock(&mid->server->mid_lock);
ddc8cf8f
JL
179
180 DeleteMidQEntry(mid);
181}
182
6f49f46b
JL
183/*
184 * smb_send_kvec - send an array of kvecs to the server
185 * @server: Server to send the data to
3ab3f2a1 186 * @smb_msg: Message to send
6f49f46b
JL
187 * @sent: amount of data sent on socket is stored here
188 *
189 * Our basic "send data to server" function. Should be called with srv_mutex
190 * held. The caller is responsible for handling the results.
191 */
d6e04ae6 192static int
3ab3f2a1
AV
193smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194 size_t *sent)
1da177e4
LT
195{
196 int rc = 0;
3ab3f2a1 197 int retries = 0;
edf1ae40 198 struct socket *ssocket = server->ssocket;
50c2f753 199
6f49f46b
JL
200 *sent = 0;
201
3ab3f2a1
AV
202 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203 smb_msg->msg_namelen = sizeof(struct sockaddr);
204 smb_msg->msg_control = NULL;
205 smb_msg->msg_controllen = 0;
0496e02d 206 if (server->noblocksnd)
3ab3f2a1 207 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 208 else
3ab3f2a1 209 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 210
3ab3f2a1 211 while (msg_data_left(smb_msg)) {
6f49f46b
JL
212 /*
213 * If blocking send, we try 3 times, since each can block
214 * for 5 seconds. For nonblocking we have to try more
215 * but wait increasing amounts of time allowing time for
216 * socket to clear. The overall time we wait in either
217 * case to send on the socket is about 15 seconds.
218 * Similarly we wait for 15 seconds for a response from
219 * the server in SendReceive[2] for the server to send
220 * a response back for most types of requests (except
221 * SMB Write past end of file which can be slow, and
222 * blocking lock operations). NFS waits slightly longer
223 * than CIFS, but this can make it take longer for
224 * nonresponsive servers to be detected and 15 seconds
225 * is more than enough time for modern networks to
226 * send a packet. In most cases if we fail to send
227 * after the retries we will kill the socket and
228 * reconnect which may clear the network problem.
229 */
3ab3f2a1 230 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 231 if (rc == -EAGAIN) {
3ab3f2a1
AV
232 retries++;
233 if (retries >= 14 ||
234 (!server->noblocksnd && (retries > 2))) {
afe6f653 235 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 236 ssocket);
3ab3f2a1 237 return -EAGAIN;
1da177e4 238 }
3ab3f2a1 239 msleep(1 << retries);
1da177e4
LT
240 continue;
241 }
6f49f46b 242
79a58d1f 243 if (rc < 0)
3ab3f2a1 244 return rc;
6f49f46b 245
79a58d1f 246 if (rc == 0) {
3e84469d
SF
247 /* should never happen, letting socket clear before
248 retrying is our only obvious option here */
afe6f653 249 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
250 msleep(500);
251 continue;
d6e04ae6 252 }
6f49f46b 253
3ab3f2a1
AV
254 /* send was at least partially successful */
255 *sent += rc;
256 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 257 }
3ab3f2a1 258 return 0;
97bc00b3
JL
259}
260
35e2cc1b 261unsigned long
81f39f95 262smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
263{
264 unsigned int i;
35e2cc1b
PA
265 struct kvec *iov;
266 int nvec;
a26054d1
JL
267 unsigned long buflen = 0;
268
81f39f95
RS
269 if (server->vals->header_preamble_size == 0 &&
270 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
271 iov = &rqst->rq_iov[1];
272 nvec = rqst->rq_nvec - 1;
273 } else {
274 iov = rqst->rq_iov;
275 nvec = rqst->rq_nvec;
276 }
277
a26054d1 278 /* total up iov array first */
35e2cc1b 279 for (i = 0; i < nvec; i++)
a26054d1
JL
280 buflen += iov[i].iov_len;
281
c06a0f2d
LL
282 /*
283 * Add in the page array if there is one. The caller needs to make
284 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285 * multiple pages ends at page boundary, rq_tailsz needs to be set to
286 * PAGE_SIZE.
287 */
a26054d1 288 if (rqst->rq_npages) {
c06a0f2d
LL
289 if (rqst->rq_npages == 1)
290 buflen += rqst->rq_tailsz;
291 else {
292 /*
293 * If there is more than one page, calculate the
294 * buffer length based on rq_offset and rq_tailsz
295 */
296 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297 rqst->rq_offset;
298 buflen += rqst->rq_tailsz;
299 }
a26054d1
JL
300 }
301
302 return buflen;
303}
304
6f49f46b 305static int
07cd952f
RS
306__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307 struct smb_rqst *rqst)
6f49f46b 308{
07cd952f
RS
309 int rc = 0;
310 struct kvec *iov;
311 int n_vec;
312 unsigned int send_length = 0;
313 unsigned int i, j;
b30c74c7 314 sigset_t mask, oldmask;
3ab3f2a1 315 size_t total_len = 0, sent, size;
b8eed283 316 struct socket *ssocket = server->ssocket;
3ab3f2a1 317 struct msghdr smb_msg;
c713c877
RS
318 __be32 rfc1002_marker;
319
4357d45f
LL
320 if (cifs_rdma_enabled(server)) {
321 /* return -EAGAIN when connecting or reconnecting */
322 rc = -EAGAIN;
323 if (server->smbd_conn)
324 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
325 goto smbd_done;
326 }
afc18a6f 327
ea702b80 328 if (ssocket == NULL)
afc18a6f 329 return -EAGAIN;
ea702b80 330
214a5ea0 331 if (fatal_signal_pending(current)) {
6988a619
PA
332 cifs_dbg(FYI, "signal pending before send request\n");
333 return -ERESTARTSYS;
b30c74c7
PS
334 }
335
b8eed283 336 /* cork the socket */
db10538a 337 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 338
07cd952f 339 for (j = 0; j < num_rqst; j++)
81f39f95 340 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
341 rfc1002_marker = cpu_to_be32(send_length);
342
b30c74c7
PS
343 /*
344 * We should not allow signals to interrupt the network send because
345 * any partial send will cause session reconnects thus increasing
346 * latency of system calls and overload a server with unnecessary
347 * requests.
348 */
349
350 sigfillset(&mask);
351 sigprocmask(SIG_BLOCK, &mask, &oldmask);
352
c713c877
RS
353 /* Generate a rfc1002 marker for SMB2+ */
354 if (server->vals->header_preamble_size == 0) {
355 struct kvec hiov = {
356 .iov_base = &rfc1002_marker,
357 .iov_len = 4
358 };
aa563d7b 359 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
360 rc = smb_send_kvec(server, &smb_msg, &sent);
361 if (rc < 0)
b30c74c7 362 goto unmask;
c713c877
RS
363
364 total_len += sent;
365 send_length += 4;
366 }
367
662bf5bc
PA
368 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
369
07cd952f
RS
370 for (j = 0; j < num_rqst; j++) {
371 iov = rqst[j].rq_iov;
372 n_vec = rqst[j].rq_nvec;
3ab3f2a1 373
07cd952f 374 size = 0;
662bf5bc
PA
375 for (i = 0; i < n_vec; i++) {
376 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 377 size += iov[i].iov_len;
662bf5bc 378 }
97bc00b3 379
aa563d7b 380 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 381
3ab3f2a1 382 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 383 if (rc < 0)
b30c74c7 384 goto unmask;
97bc00b3
JL
385
386 total_len += sent;
07cd952f
RS
387
388 /* now walk the page array and send each page in it */
389 for (i = 0; i < rqst[j].rq_npages; i++) {
390 struct bio_vec bvec;
391
392 bvec.bv_page = rqst[j].rq_pages[i];
393 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
394 &bvec.bv_offset);
395
aa563d7b 396 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
397 &bvec, 1, bvec.bv_len);
398 rc = smb_send_kvec(server, &smb_msg, &sent);
399 if (rc < 0)
400 break;
401
402 total_len += sent;
403 }
97bc00b3 404 }
1da177e4 405
b30c74c7
PS
406unmask:
407 sigprocmask(SIG_SETMASK, &oldmask, NULL);
408
409 /*
410 * If signal is pending but we have already sent the whole packet to
411 * the server we need to return success status to allow a corresponding
412 * mid entry to be kept in the pending requests queue thus allowing
413 * to handle responses from the server by the client.
414 *
415 * If only part of the packet has been sent there is no need to hide
416 * interrupt because the session will be reconnected anyway, so there
417 * won't be any response from the server to handle.
418 */
419
420 if (signal_pending(current) && (total_len != send_length)) {
421 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 422 rc = -ERESTARTSYS;
b30c74c7
PS
423 }
424
b8eed283 425 /* uncork it */
db10538a 426 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 427
c713c877 428 if ((total_len > 0) && (total_len != send_length)) {
f96637be 429 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 430 send_length, total_len);
6f49f46b
JL
431 /*
432 * If we have only sent part of an SMB then the next SMB could
433 * be taken as the remainder of this one. We need to kill the
434 * socket so the server throws away the partial SMB
435 */
dca65818 436 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 437 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 438 server->conn_id, server->hostname);
edf1ae40 439 }
9762c2d0 440smbd_done:
d804d41d 441 if (rc < 0 && rc != -EINTR)
afe6f653 442 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 443 rc);
ee13919c 444 else if (rc > 0)
1da177e4 445 rc = 0;
1da177e4
LT
446
447 return rc;
448}
449
6f49f46b 450static int
1f3a8f5f
RS
451smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
452 struct smb_rqst *rqst, int flags)
6f49f46b 453{
b2c96de7 454 struct kvec iov;
3946d0d0 455 struct smb2_transform_hdr *tr_hdr;
b2c96de7 456 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
457 int rc;
458
459 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
460 return __smb_send_rqst(server, num_rqst, rqst);
461
462 if (num_rqst > MAX_COMPOUND - 1)
463 return -ENOMEM;
7fb8986e 464
b2c96de7 465 if (!server->ops->init_transform_rq) {
a0a3036b 466 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
467 return -EIO;
468 }
6f49f46b 469
9339faac 470 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
471 if (!tr_hdr)
472 return -ENOMEM;
473
474 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
475 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
476
477 iov.iov_base = tr_hdr;
478 iov.iov_len = sizeof(*tr_hdr);
479 cur_rqst[0].rq_iov = &iov;
480 cur_rqst[0].rq_nvec = 1;
481
1f3a8f5f
RS
482 rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 &cur_rqst[0], rqst);
7fb8986e 484 if (rc)
3946d0d0 485 goto out;
7fb8986e 486
1f3a8f5f
RS
487 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
489out:
490 kfree(tr_hdr);
7fb8986e 491 return rc;
6f49f46b
JL
492}
493
0496e02d
JL
494int
495smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
496 unsigned int smb_buf_length)
497{
738f9de5 498 struct kvec iov[2];
7fb8986e
PS
499 struct smb_rqst rqst = { .rq_iov = iov,
500 .rq_nvec = 2 };
0496e02d 501
738f9de5
PS
502 iov[0].iov_base = smb_buffer;
503 iov[0].iov_len = 4;
504 iov[1].iov_base = (char *)smb_buffer + 4;
505 iov[1].iov_len = smb_buf_length;
0496e02d 506
07cd952f 507 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
508}
509
fc40f9cf 510static int
b227d215 511wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
512 const int timeout, const int flags,
513 unsigned int *instance)
1da177e4 514{
19e88867 515 long rc;
4230cff8
RS
516 int *credits;
517 int optype;
2b53b929 518 long int t;
6d82c27a 519 int scredits, in_flight;
2b53b929
RS
520
521 if (timeout < 0)
522 t = MAX_JIFFY_OFFSET;
523 else
524 t = msecs_to_jiffies(timeout);
4230cff8
RS
525
526 optype = flags & CIFS_OP_MASK;
5bc59498 527
34f4deb7
PS
528 *instance = 0;
529
4230cff8
RS
530 credits = server->ops->get_credits_field(server, optype);
531 /* Since an echo is already inflight, no need to wait to send another */
532 if (*credits <= 0 && optype == CIFS_ECHO_OP)
533 return -EAGAIN;
534
fc40f9cf 535 spin_lock(&server->req_lock);
392e1c5d 536 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 537 /* oplock breaks must not be held up */
fc40f9cf 538 server->in_flight++;
1b63f184
SF
539 if (server->in_flight > server->max_in_flight)
540 server->max_in_flight = server->in_flight;
bc205ed1 541 *credits -= 1;
34f4deb7 542 *instance = server->reconnect_instance;
6d82c27a
SP
543 scredits = *credits;
544 in_flight = server->in_flight;
fc40f9cf 545 spin_unlock(&server->req_lock);
6d82c27a 546
1ddff774 547 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
548 server->conn_id, server->hostname, scredits, -1, in_flight);
549 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
550 __func__, 1, scredits);
551
27a97a61
VL
552 return 0;
553 }
554
27a97a61 555 while (1) {
b227d215 556 if (*credits < num_credits) {
6d82c27a 557 scredits = *credits;
fc40f9cf 558 spin_unlock(&server->req_lock);
6d82c27a 559
789e6661 560 cifs_num_waiters_inc(server);
2b53b929
RS
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
789e6661 563 cifs_num_waiters_dec(server);
2b53b929 564 if (!rc) {
6d82c27a
SP
565 spin_lock(&server->req_lock);
566 scredits = *credits;
567 in_flight = server->in_flight;
568 spin_unlock(&server->req_lock);
569
7937ca96 570 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
571 server->conn_id, server->hostname, scredits,
572 num_credits, in_flight);
afe6f653 573 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 574 timeout);
7de03948 575 return -EBUSY;
2b53b929
RS
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
fc40f9cf 579 spin_lock(&server->req_lock);
27a97a61 580 } else {
080dc5e5
SP
581 spin_unlock(&server->req_lock);
582
d7d7a66a 583 spin_lock(&server->srv_lock);
c5797a94 584 if (server->tcpStatus == CifsExiting) {
d7d7a66a 585 spin_unlock(&server->srv_lock);
27a97a61 586 return -ENOENT;
1da177e4 587 }
d7d7a66a 588 spin_unlock(&server->srv_lock);
27a97a61 589
16b34aa4
RS
590 /*
591 * For normal commands, reserve the last MAX_COMPOUND
592 * credits to compound requests.
593 * Otherwise these compounds could be permanently
594 * starved for credits by single-credit requests.
595 *
596 * To prevent spinning CPU, block this thread until
597 * there are >MAX_COMPOUND credits available.
598 * But only do this is we already have a lot of
599 * credits in flight to avoid triggering this check
600 * for servers that are slow to hand out credits on
601 * new sessions.
602 */
080dc5e5 603 spin_lock(&server->req_lock);
16b34aa4
RS
604 if (!optype && num_credits == 1 &&
605 server->in_flight > 2 * MAX_COMPOUND &&
606 *credits <= MAX_COMPOUND) {
607 spin_unlock(&server->req_lock);
6d82c27a 608
16b34aa4 609 cifs_num_waiters_inc(server);
2b53b929
RS
610 rc = wait_event_killable_timeout(
611 server->request_q,
16b34aa4 612 has_credits(server, credits,
2b53b929
RS
613 MAX_COMPOUND + 1),
614 t);
16b34aa4 615 cifs_num_waiters_dec(server);
2b53b929 616 if (!rc) {
6d82c27a
SP
617 spin_lock(&server->req_lock);
618 scredits = *credits;
619 in_flight = server->in_flight;
620 spin_unlock(&server->req_lock);
621
7937ca96 622 trace_smb3_credit_timeout(
6d82c27a
SP
623 server->CurrentMid,
624 server->conn_id, server->hostname,
625 scredits, num_credits, in_flight);
afe6f653 626 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 627 timeout);
7de03948 628 return -EBUSY;
2b53b929
RS
629 }
630 if (rc == -ERESTARTSYS)
631 return -ERESTARTSYS;
16b34aa4
RS
632 spin_lock(&server->req_lock);
633 continue;
634 }
635
2d86dbc9
PS
636 /*
637 * Can not count locking commands against total
638 * as they are allowed to block on server.
639 */
27a97a61
VL
640
641 /* update # of requests on the wire to server */
4230cff8 642 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
643 *credits -= num_credits;
644 server->in_flight += num_credits;
1b63f184
SF
645 if (server->in_flight > server->max_in_flight)
646 server->max_in_flight = server->in_flight;
34f4deb7 647 *instance = server->reconnect_instance;
2d86dbc9 648 }
6d82c27a
SP
649 scredits = *credits;
650 in_flight = server->in_flight;
fc40f9cf 651 spin_unlock(&server->req_lock);
cd7b699b 652
1ddff774 653 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
654 server->conn_id, server->hostname, scredits,
655 -(num_credits), in_flight);
cd7b699b
SP
656 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
657 __func__, num_credits, scredits);
27a97a61 658 break;
1da177e4
LT
659 }
660 }
7ee1af76
JA
661 return 0;
662}
1da177e4 663
bc205ed1 664static int
480b1cb9
RS
665wait_for_free_request(struct TCP_Server_Info *server, const int flags,
666 unsigned int *instance)
bc205ed1 667{
2b53b929
RS
668 return wait_for_free_credits(server, 1, -1, flags,
669 instance);
bc205ed1
PS
670}
671
257b7809
RS
672static int
673wait_for_compound_request(struct TCP_Server_Info *server, int num,
674 const int flags, unsigned int *instance)
675{
676 int *credits;
6d82c27a 677 int scredits, in_flight;
257b7809
RS
678
679 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
680
681 spin_lock(&server->req_lock);
cd7b699b 682 scredits = *credits;
6d82c27a 683 in_flight = server->in_flight;
cd7b699b 684
257b7809
RS
685 if (*credits < num) {
686 /*
91792bb8
PS
687 * If the server is tight on resources or just gives us less
688 * credits for other reasons (e.g. requests are coming out of
689 * order and the server delays granting more credits until it
690 * processes a missing mid) and we exhausted most available
691 * credits there may be situations when we try to send
692 * a compound request but we don't have enough credits. At this
693 * point the client needs to decide if it should wait for
694 * additional credits or fail the request. If at least one
695 * request is in flight there is a high probability that the
696 * server will return enough credits to satisfy this compound
697 * request.
698 *
699 * Return immediately if no requests in flight since we will be
700 * stuck on waiting for credits.
257b7809 701 */
91792bb8 702 if (server->in_flight == 0) {
257b7809 703 spin_unlock(&server->req_lock);
cd7b699b 704 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
705 server->conn_id, server->hostname, scredits,
706 num, in_flight);
cd7b699b 707 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 708 __func__, in_flight, num, scredits);
7de03948 709 return -EDEADLK;
257b7809
RS
710 }
711 }
712 spin_unlock(&server->req_lock);
713
714 return wait_for_free_credits(server, num, 60000, flags,
715 instance);
716}
717
cb7e9eab
PS
718int
719cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 720 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
721{
722 *num = size;
335b7b62
PS
723 credits->value = 0;
724 credits->instance = server->reconnect_instance;
cb7e9eab
PS
725 return 0;
726}
727
96daf2b0 728static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
729 struct mid_q_entry **ppmidQ)
730{
d7d7a66a 731 spin_lock(&ses->ses_lock);
dd3cd870 732 if (ses->ses_status == SES_NEW) {
79a58d1f 733 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 734 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 735 spin_unlock(&ses->ses_lock);
7ee1af76 736 return -EAGAIN;
080dc5e5 737 }
ad7a2926 738 /* else ok - we are setting up session */
1da177e4 739 }
7f48558e 740
dd3cd870 741 if (ses->ses_status == SES_EXITING) {
7f48558e 742 /* check if SMB session is bad because we are setting it up */
080dc5e5 743 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 744 spin_unlock(&ses->ses_lock);
7f48558e 745 return -EAGAIN;
080dc5e5 746 }
7f48558e
SP
747 /* else ok - we are shutting down session */
748 }
d7d7a66a 749 spin_unlock(&ses->ses_lock);
7f48558e 750
24b9b06b 751 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 752 if (*ppmidQ == NULL)
7ee1af76 753 return -ENOMEM;
d7d7a66a 754 spin_lock(&ses->server->mid_lock);
ddc8cf8f 755 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 756 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
757 return 0;
758}
759
0ade640e
JL
760static int
761wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 762{
0ade640e 763 int error;
7ee1af76 764
5853cc2a 765 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 766 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
767 if (error < 0)
768 return -ERESTARTSYS;
7ee1af76 769
0ade640e 770 return 0;
7ee1af76
JA
771}
772
fec344e3
JL
773struct mid_q_entry *
774cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
775{
776 int rc;
fec344e3 777 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
778 struct mid_q_entry *mid;
779
738f9de5
PS
780 if (rqst->rq_iov[0].iov_len != 4 ||
781 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
782 return ERR_PTR(-EIO);
783
792af7b0 784 /* enable signing if server requires it */
38d77c50 785 if (server->sign)
792af7b0
PS
786 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
787
788 mid = AllocMidQEntry(hdr, server);
789 if (mid == NULL)
fec344e3 790 return ERR_PTR(-ENOMEM);
792af7b0 791
fec344e3 792 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
793 if (rc) {
794 DeleteMidQEntry(mid);
fec344e3 795 return ERR_PTR(rc);
ffc61ccb
SP
796 }
797
fec344e3 798 return mid;
792af7b0 799}
133672ef 800
a6827c18
JL
801/*
802 * Send a SMB request and set the callback function in the mid to handle
803 * the result. Caller is responsible for dealing with timeouts.
804 */
805int
fec344e3 806cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 807 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
808 mid_handle_t *handle, void *cbdata, const int flags,
809 const struct cifs_credits *exist_credits)
a6827c18 810{
480b1cb9 811 int rc;
a6827c18 812 struct mid_q_entry *mid;
335b7b62 813 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 814 unsigned int instance;
480b1cb9 815 int optype;
a6827c18 816
a891f0f8
PS
817 optype = flags & CIFS_OP_MASK;
818
cb7e9eab 819 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 820 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
821 if (rc)
822 return rc;
335b7b62 823 credits.value = 1;
34f4deb7 824 credits.instance = instance;
3349c3a7
PS
825 } else
826 instance = exist_credits->instance;
a6827c18 827
cc391b69 828 cifs_server_lock(server);
3349c3a7
PS
829
830 /*
831 * We can't use credits obtained from the previous session to send this
832 * request. Check if there were reconnects after we obtained credits and
833 * return -EAGAIN in such cases to let callers handle it.
834 */
835 if (instance != server->reconnect_instance) {
cc391b69 836 cifs_server_unlock(server);
3349c3a7
PS
837 add_credits_and_wake_if(server, &credits, optype);
838 return -EAGAIN;
839 }
840
fec344e3
JL
841 mid = server->ops->setup_async_request(server, rqst);
842 if (IS_ERR(mid)) {
cc391b69 843 cifs_server_unlock(server);
335b7b62 844 add_credits_and_wake_if(server, &credits, optype);
fec344e3 845 return PTR_ERR(mid);
a6827c18
JL
846 }
847
44d22d84 848 mid->receive = receive;
a6827c18
JL
849 mid->callback = callback;
850 mid->callback_data = cbdata;
9b7c18a2 851 mid->handle = handle;
7c9421e1 852 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 853
ffc61ccb 854 /* put it on the pending_mid_q */
d7d7a66a 855 spin_lock(&server->mid_lock);
ffc61ccb 856 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 857 spin_unlock(&server->mid_lock);
ffc61ccb 858
93d2cb6c
LL
859 /*
860 * Need to store the time in mid before calling I/O. For call_async,
861 * I/O response may come back and free the mid entry on another thread.
862 */
863 cifs_save_when_sent(mid);
789e6661 864 cifs_in_send_inc(server);
1f3a8f5f 865 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 866 cifs_in_send_dec(server);
ad313cb8 867
820962dc 868 if (rc < 0) {
c781af7e 869 revert_current_mid(server, mid->credits);
ad313cb8 870 server->sequence_number -= 2;
820962dc
RV
871 cifs_delete_mid(mid);
872 }
873
cc391b69 874 cifs_server_unlock(server);
789e6661 875
ffc61ccb
SP
876 if (rc == 0)
877 return 0;
a6827c18 878
335b7b62 879 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
880 return rc;
881}
882
133672ef
SF
883/*
884 *
885 * Send an SMB Request. No response info (other than return code)
886 * needs to be parsed.
887 *
888 * flags indicate the type of request buffer and how long to wait
889 * and whether to log NT STATUS code (error) before mapping it to POSIX error
890 *
891 */
892int
96daf2b0 893SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 894 char *in_buf, int flags)
133672ef
SF
895{
896 int rc;
897 struct kvec iov[1];
da502f7d 898 struct kvec rsp_iov;
133672ef
SF
899 int resp_buf_type;
900
792af7b0
PS
901 iov[0].iov_base = in_buf;
902 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 903 flags |= CIFS_NO_RSP_BUF;
da502f7d 904 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 905 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 906
133672ef
SF
907 return rc;
908}
909
053d5034 910static int
3c1105df 911cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
912{
913 int rc = 0;
914
f96637be
JP
915 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
916 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 917
d7d7a66a 918 spin_lock(&server->mid_lock);
7c9421e1 919 switch (mid->mid_state) {
74dd92a8 920 case MID_RESPONSE_RECEIVED:
d7d7a66a 921 spin_unlock(&server->mid_lock);
053d5034 922 return rc;
74dd92a8
JL
923 case MID_RETRY_NEEDED:
924 rc = -EAGAIN;
925 break;
71823baf
JL
926 case MID_RESPONSE_MALFORMED:
927 rc = -EIO;
928 break;
3c1105df
JL
929 case MID_SHUTDOWN:
930 rc = -EHOSTDOWN;
931 break;
74dd92a8 932 default:
abe57073
PS
933 if (!(mid->mid_flags & MID_DELETED)) {
934 list_del_init(&mid->qhead);
935 mid->mid_flags |= MID_DELETED;
936 }
afe6f653 937 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 938 __func__, mid->mid, mid->mid_state);
74dd92a8 939 rc = -EIO;
053d5034 940 }
d7d7a66a 941 spin_unlock(&server->mid_lock);
053d5034 942
2b84a36c 943 DeleteMidQEntry(mid);
053d5034
JL
944 return rc;
945}
946
121b046a 947static inline int
fb2036d8
PS
948send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
949 struct mid_q_entry *mid)
76dcc26f 950{
121b046a 951 return server->ops->send_cancel ?
fb2036d8 952 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
953}
954
2c8f981d
JL
955int
956cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
957 bool log_error)
958{
792af7b0 959 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
960
961 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
962
963 /* convert the length into a more usable form */
38d77c50 964 if (server->sign) {
738f9de5 965 struct kvec iov[2];
985e4ff0 966 int rc = 0;
738f9de5
PS
967 struct smb_rqst rqst = { .rq_iov = iov,
968 .rq_nvec = 2 };
826a95e4 969
738f9de5
PS
970 iov[0].iov_base = mid->resp_buf;
971 iov[0].iov_len = 4;
972 iov[1].iov_base = (char *)mid->resp_buf + 4;
973 iov[1].iov_len = len - 4;
2c8f981d 974 /* FIXME: add code to kill session */
bf5ea0e2 975 rc = cifs_verify_signature(&rqst, server,
0124cc45 976 mid->sequence_number);
985e4ff0 977 if (rc)
afe6f653 978 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 979 rc);
2c8f981d
JL
980 }
981
982 /* BB special case reconnect tid and uid here? */
a3713ec3 983 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
984}
985
fec344e3 986struct mid_q_entry *
f780bd3f
AA
987cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
988 struct smb_rqst *rqst)
792af7b0
PS
989{
990 int rc;
fec344e3 991 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
992 struct mid_q_entry *mid;
993
738f9de5
PS
994 if (rqst->rq_iov[0].iov_len != 4 ||
995 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
996 return ERR_PTR(-EIO);
997
792af7b0
PS
998 rc = allocate_mid(ses, hdr, &mid);
999 if (rc)
fec344e3
JL
1000 return ERR_PTR(rc);
1001 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1002 if (rc) {
3c1bf7e4 1003 cifs_delete_mid(mid);
fec344e3
JL
1004 return ERR_PTR(rc);
1005 }
1006 return mid;
792af7b0
PS
1007}
1008
4e34feb5 1009static void
ee258d79 1010cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1011{
1012 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1013 struct cifs_credits credits;
1014
1015 credits.value = server->ops->get_credits(mid);
1016 credits.instance = server->reconnect_instance;
8a26f0f7 1017
34f4deb7 1018 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1019}
1020
ee258d79
PS
1021static void
1022cifs_compound_last_callback(struct mid_q_entry *mid)
1023{
1024 cifs_compound_callback(mid);
1025 cifs_wake_up_task(mid);
1026}
1027
1028static void
1029cifs_cancelled_callback(struct mid_q_entry *mid)
1030{
1031 cifs_compound_callback(mid);
1032 DeleteMidQEntry(mid);
1033}
1034
5f68ea4a
AA
1035/*
1036 * Return a channel (master if none) of @ses that can be used to send
1037 * regular requests.
1038 *
1039 * If we are currently binding a new channel (negprot/sess.setup),
1040 * return the new incomplete channel.
1041 */
1042struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1043{
1044 uint index = 0;
1045
1046 if (!ses)
1047 return NULL;
1048
f486ef8e 1049 /* round robin */
bda487ac 1050 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1051
1052 spin_lock(&ses->chan_lock);
bda487ac 1053 index %= ses->chan_count;
88b024f5 1054 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1055
1056 return ses->chans[index].server;
5f68ea4a
AA
1057}
1058
b8f57ee8 1059int
e0bba0b8 1060compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1061 struct TCP_Server_Info *server,
e0bba0b8
RS
1062 const int flags, const int num_rqst, struct smb_rqst *rqst,
1063 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1064{
480b1cb9 1065 int i, j, optype, rc = 0;
e0bba0b8 1066 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1067 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1068 struct cifs_credits credits[MAX_COMPOUND] = {
1069 { .value = 0, .instance = 0 }
1070 };
1071 unsigned int instance;
738f9de5 1072 char *buf;
50c2f753 1073
a891f0f8 1074 optype = flags & CIFS_OP_MASK;
133672ef 1075
e0bba0b8
RS
1076 for (i = 0; i < num_rqst; i++)
1077 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1078
352d96f3 1079 if (!ses || !ses->server || !server) {
f96637be 1080 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1081 return -EIO;
1082 }
1083
d7d7a66a 1084 spin_lock(&server->srv_lock);
080dc5e5 1085 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1086 spin_unlock(&server->srv_lock);
7ee1af76 1087 return -ENOENT;
080dc5e5 1088 }
d7d7a66a 1089 spin_unlock(&server->srv_lock);
7ee1af76 1090
792af7b0 1091 /*
257b7809 1092 * Wait for all the requests to become available.
7091bcab
PS
1093 * This approach still leaves the possibility to be stuck waiting for
1094 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1095 * requests and if the client is completely idle, not generating any
1096 * other requests.
1097 * This can be handled by the eventual session reconnect.
792af7b0 1098 */
3190b59a 1099 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1100 &instance);
1101 if (rc)
1102 return rc;
97ea4998 1103
257b7809
RS
1104 for (i = 0; i < num_rqst; i++) {
1105 credits[i].value = 1;
1106 credits[i].instance = instance;
8544f4aa 1107 }
7ee1af76 1108
792af7b0
PS
1109 /*
1110 * Make sure that we sign in the same order that we send on this socket
1111 * and avoid races inside tcp sendmsg code that could cause corruption
1112 * of smb data.
1113 */
7ee1af76 1114
cc391b69 1115 cifs_server_lock(server);
7ee1af76 1116
97ea4998
PS
1117 /*
1118 * All the parts of the compound chain belong obtained credits from the
257b7809 1119 * same session. We can not use credits obtained from the previous
97ea4998
PS
1120 * session to send this request. Check if there were reconnects after
1121 * we obtained credits and return -EAGAIN in such cases to let callers
1122 * handle it.
1123 */
3190b59a 1124 if (instance != server->reconnect_instance) {
cc391b69 1125 cifs_server_unlock(server);
97ea4998 1126 for (j = 0; j < num_rqst; j++)
3190b59a 1127 add_credits(server, &credits[j], optype);
97ea4998
PS
1128 return -EAGAIN;
1129 }
1130
e0bba0b8 1131 for (i = 0; i < num_rqst; i++) {
f780bd3f 1132 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1133 if (IS_ERR(midQ[i])) {
3190b59a 1134 revert_current_mid(server, i);
e0bba0b8
RS
1135 for (j = 0; j < i; j++)
1136 cifs_delete_mid(midQ[j]);
cc391b69 1137 cifs_server_unlock(server);
8544f4aa 1138
e0bba0b8 1139 /* Update # of requests on wire to server */
8544f4aa 1140 for (j = 0; j < num_rqst; j++)
3190b59a 1141 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1142 return PTR_ERR(midQ[i]);
1143 }
1144
1145 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1146 midQ[i]->optype = optype;
4e34feb5 1147 /*
ee258d79
PS
1148 * Invoke callback for every part of the compound chain
1149 * to calculate credits properly. Wake up this thread only when
1150 * the last element is received.
4e34feb5
RS
1151 */
1152 if (i < num_rqst - 1)
ee258d79
PS
1153 midQ[i]->callback = cifs_compound_callback;
1154 else
1155 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1156 }
3190b59a
AA
1157 cifs_in_send_inc(server);
1158 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1159 cifs_in_send_dec(server);
e0bba0b8
RS
1160
1161 for (i = 0; i < num_rqst; i++)
1162 cifs_save_when_sent(midQ[i]);
7ee1af76 1163
c781af7e 1164 if (rc < 0) {
3190b59a
AA
1165 revert_current_mid(server, num_rqst);
1166 server->sequence_number -= 2;
c781af7e 1167 }
e0bba0b8 1168
cc391b69 1169 cifs_server_unlock(server);
7ee1af76 1170
d69cb728
RS
1171 /*
1172 * If sending failed for some reason or it is an oplock break that we
1173 * will not receive a response to - return credits back
1174 */
1175 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1176 for (i = 0; i < num_rqst; i++)
3190b59a 1177 add_credits(server, &credits[i], optype);
cb5c2e63 1178 goto out;
ee258d79
PS
1179 }
1180
1181 /*
1182 * At this point the request is passed to the network stack - we assume
1183 * that any credits taken from the server structure on the client have
1184 * been spent and we can't return them back. Once we receive responses
1185 * we will collect credits granted by the server in the mid callbacks
1186 * and add those credits to the server structure.
1187 */
e0bba0b8 1188
cb5c2e63
RS
1189 /*
1190 * Compounding is never used during session establish.
1191 */
d7d7a66a 1192 spin_lock(&ses->ses_lock);
dd3cd870 1193 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1194 spin_unlock(&ses->ses_lock);
080dc5e5 1195
cc391b69 1196 cifs_server_lock(server);
f486ef8e 1197 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1198 cifs_server_unlock(server);
080dc5e5 1199
d7d7a66a 1200 spin_lock(&ses->ses_lock);
05946d4b 1201 }
d7d7a66a 1202 spin_unlock(&ses->ses_lock);
e0bba0b8 1203
cb5c2e63 1204 for (i = 0; i < num_rqst; i++) {
3190b59a 1205 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1206 if (rc != 0)
1207 break;
1208 }
1209 if (rc != 0) {
1210 for (; i < num_rqst; i++) {
e3d100ea 1211 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1212 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1213 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1214 spin_lock(&server->mid_lock);
7b71843f 1215 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1216 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1217 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1218 cancelled_mid[i] = true;
34f4deb7 1219 credits[i].value = 0;
e0bba0b8 1220 }
d7d7a66a 1221 spin_unlock(&server->mid_lock);
e0bba0b8 1222 }
cb5c2e63
RS
1223 }
1224
cb5c2e63
RS
1225 for (i = 0; i < num_rqst; i++) {
1226 if (rc < 0)
1227 goto out;
e0bba0b8 1228
3190b59a 1229 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1230 if (rc != 0) {
8544f4aa
PS
1231 /* mark this mid as cancelled to not free it below */
1232 cancelled_mid[i] = true;
1233 goto out;
1be912dd 1234 }
2b2bdfba 1235
e0bba0b8
RS
1236 if (!midQ[i]->resp_buf ||
1237 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1238 rc = -EIO;
1239 cifs_dbg(FYI, "Bad MID state?\n");
1240 goto out;
1241 }
a891f0f8 1242
e0bba0b8
RS
1243 buf = (char *)midQ[i]->resp_buf;
1244 resp_iov[i].iov_base = buf;
1245 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1246 server->vals->header_preamble_size;
e0bba0b8
RS
1247
1248 if (midQ[i]->large_buf)
1249 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1250 else
1251 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1252
3190b59a 1253 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1254 flags & CIFS_LOG_ERROR);
1da177e4 1255
e0bba0b8 1256 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1257 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1258 midQ[i]->resp_buf = NULL;
cb5c2e63 1259
e0bba0b8 1260 }
cb5c2e63
RS
1261
1262 /*
1263 * Compounding is never used during session establish.
1264 */
d7d7a66a 1265 spin_lock(&ses->ses_lock);
dd3cd870 1266 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1267 struct kvec iov = {
1268 .iov_base = resp_iov[0].iov_base,
1269 .iov_len = resp_iov[0].iov_len
1270 };
d7d7a66a 1271 spin_unlock(&ses->ses_lock);
cc391b69 1272 cifs_server_lock(server);
f486ef8e 1273 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1274 cifs_server_unlock(server);
d7d7a66a 1275 spin_lock(&ses->ses_lock);
cb5c2e63 1276 }
d7d7a66a 1277 spin_unlock(&ses->ses_lock);
cb5c2e63 1278
7ee1af76 1279out:
4e34feb5
RS
1280 /*
1281 * This will dequeue all mids. After this it is important that the
1282 * demultiplex_thread will not process any of these mids any futher.
1283 * This is prevented above by using a noop callback that will not
1284 * wake this thread except for the very last PDU.
1285 */
8544f4aa
PS
1286 for (i = 0; i < num_rqst; i++) {
1287 if (!cancelled_mid[i])
1288 cifs_delete_mid(midQ[i]);
8544f4aa 1289 }
1da177e4 1290
d6e04ae6
SF
1291 return rc;
1292}
1da177e4 1293
e0bba0b8
RS
1294int
1295cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1296 struct TCP_Server_Info *server,
e0bba0b8
RS
1297 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1298 struct kvec *resp_iov)
1299{
352d96f3
AA
1300 return compound_send_recv(xid, ses, server, flags, 1,
1301 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1302}
1303
738f9de5
PS
1304int
1305SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1306 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1307 const int flags, struct kvec *resp_iov)
1308{
1309 struct smb_rqst rqst;
3cecf486 1310 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1311 int rc;
1312
3cecf486 1313 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1314 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1315 GFP_KERNEL);
117e3b7f
SF
1316 if (!new_iov) {
1317 /* otherwise cifs_send_recv below sets resp_buf_type */
1318 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1319 return -ENOMEM;
117e3b7f 1320 }
3cecf486
RS
1321 } else
1322 new_iov = s_iov;
738f9de5
PS
1323
1324 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1325 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1326
1327 new_iov[0].iov_base = new_iov[1].iov_base;
1328 new_iov[0].iov_len = 4;
1329 new_iov[1].iov_base += 4;
1330 new_iov[1].iov_len -= 4;
1331
1332 memset(&rqst, 0, sizeof(struct smb_rqst));
1333 rqst.rq_iov = new_iov;
1334 rqst.rq_nvec = n_vec + 1;
1335
352d96f3
AA
1336 rc = cifs_send_recv(xid, ses, ses->server,
1337 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1338 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1339 kfree(new_iov);
738f9de5
PS
1340 return rc;
1341}
1342
1da177e4 1343int
96daf2b0 1344SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1345 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1346 int *pbytes_returned, const int flags)
1da177e4
LT
1347{
1348 int rc = 0;
1da177e4 1349 struct mid_q_entry *midQ;
fb2036d8
PS
1350 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1351 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1352 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1353 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1354 struct TCP_Server_Info *server;
1da177e4
LT
1355
1356 if (ses == NULL) {
f96637be 1357 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1358 return -EIO;
1359 }
ac6ad7a8 1360 server = ses->server;
afe6f653 1361 if (server == NULL) {
f96637be 1362 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1363 return -EIO;
1364 }
1365
d7d7a66a 1366 spin_lock(&server->srv_lock);
080dc5e5 1367 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1368 spin_unlock(&server->srv_lock);
31ca3bc3 1369 return -ENOENT;
080dc5e5 1370 }
d7d7a66a 1371 spin_unlock(&server->srv_lock);
31ca3bc3 1372
79a58d1f 1373 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1374 to the same server. We may make this configurable later or
1375 use ses->maxReq */
1da177e4 1376
fb2036d8 1377 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1378 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1379 len);
6d9c6d54
VL
1380 return -EIO;
1381 }
1382
afe6f653 1383 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1384 if (rc)
1385 return rc;
1386
79a58d1f 1387 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1388 and avoid races inside tcp sendmsg code that could cause corruption
1389 of smb data */
1390
cc391b69 1391 cifs_server_lock(server);
1da177e4 1392
7ee1af76
JA
1393 rc = allocate_mid(ses, in_buf, &midQ);
1394 if (rc) {
cc391b69 1395 cifs_server_unlock(server);
7ee1af76 1396 /* Update # of requests on wire to server */
afe6f653 1397 add_credits(server, &credits, 0);
7ee1af76 1398 return rc;
1da177e4
LT
1399 }
1400
afe6f653 1401 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1402 if (rc) {
cc391b69 1403 cifs_server_unlock(server);
829049cb
VL
1404 goto out;
1405 }
1da177e4 1406
7c9421e1 1407 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1408
afe6f653
RS
1409 cifs_in_send_inc(server);
1410 rc = smb_send(server, in_buf, len);
1411 cifs_in_send_dec(server);
789e6661 1412 cifs_save_when_sent(midQ);
ad313cb8
JL
1413
1414 if (rc < 0)
afe6f653 1415 server->sequence_number -= 2;
ad313cb8 1416
cc391b69 1417 cifs_server_unlock(server);
7ee1af76 1418
79a58d1f 1419 if (rc < 0)
7ee1af76
JA
1420 goto out;
1421
afe6f653 1422 rc = wait_for_response(server, midQ);
1be912dd 1423 if (rc != 0) {
afe6f653 1424 send_cancel(server, &rqst, midQ);
d7d7a66a 1425 spin_lock(&server->mid_lock);
7c9421e1 1426 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1427 /* no longer considered to be "in-flight" */
1428 midQ->callback = DeleteMidQEntry;
d7d7a66a 1429 spin_unlock(&server->mid_lock);
afe6f653 1430 add_credits(server, &credits, 0);
1be912dd
JL
1431 return rc;
1432 }
d7d7a66a 1433 spin_unlock(&server->mid_lock);
1be912dd 1434 }
1da177e4 1435
afe6f653 1436 rc = cifs_sync_mid_result(midQ, server);
053d5034 1437 if (rc != 0) {
afe6f653 1438 add_credits(server, &credits, 0);
1da177e4
LT
1439 return rc;
1440 }
50c2f753 1441
2c8f981d 1442 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1443 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1444 rc = -EIO;
afe6f653 1445 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1446 goto out;
1da177e4 1447 }
7ee1af76 1448
d4e4854f 1449 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1450 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1451 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1452out:
3c1bf7e4 1453 cifs_delete_mid(midQ);
afe6f653 1454 add_credits(server, &credits, 0);
1da177e4 1455
7ee1af76
JA
1456 return rc;
1457}
1da177e4 1458
7ee1af76
JA
1459/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1460 blocking lock to return. */
1461
1462static int
96daf2b0 1463send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1464 struct smb_hdr *in_buf,
1465 struct smb_hdr *out_buf)
1466{
1467 int bytes_returned;
96daf2b0 1468 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1469 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1470
1471 /* We just modify the current in_buf to change
1472 the type of lock from LOCKING_ANDX_SHARED_LOCK
1473 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1474 LOCKING_ANDX_CANCEL_LOCK. */
1475
1476 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1477 pSMB->Timeout = 0;
88257360 1478 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1479
1480 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1481 &bytes_returned, 0);
7ee1af76
JA
1482}
1483
1484int
96daf2b0 1485SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1486 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1487 int *pbytes_returned)
1488{
1489 int rc = 0;
1490 int rstart = 0;
7ee1af76 1491 struct mid_q_entry *midQ;
96daf2b0 1492 struct cifs_ses *ses;
fb2036d8
PS
1493 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1494 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1495 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1496 unsigned int instance;
afe6f653 1497 struct TCP_Server_Info *server;
7ee1af76
JA
1498
1499 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1500 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1501 return -EIO;
1502 }
1503 ses = tcon->ses;
afe6f653 1504 server = ses->server;
7ee1af76 1505
afe6f653 1506 if (server == NULL) {
f96637be 1507 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1508 return -EIO;
1509 }
1510
d7d7a66a 1511 spin_lock(&server->srv_lock);
080dc5e5 1512 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1513 spin_unlock(&server->srv_lock);
7ee1af76 1514 return -ENOENT;
080dc5e5 1515 }
d7d7a66a 1516 spin_unlock(&server->srv_lock);
7ee1af76 1517
79a58d1f 1518 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1519 to the same server. We may make this configurable later or
1520 use ses->maxReq */
1521
fb2036d8 1522 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1523 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1524 len);
6d9c6d54
VL
1525 return -EIO;
1526 }
1527
afe6f653 1528 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1529 if (rc)
1530 return rc;
1531
79a58d1f 1532 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1533 and avoid races inside tcp sendmsg code that could cause corruption
1534 of smb data */
1535
cc391b69 1536 cifs_server_lock(server);
7ee1af76
JA
1537
1538 rc = allocate_mid(ses, in_buf, &midQ);
1539 if (rc) {
cc391b69 1540 cifs_server_unlock(server);
7ee1af76
JA
1541 return rc;
1542 }
1543
afe6f653 1544 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1545 if (rc) {
3c1bf7e4 1546 cifs_delete_mid(midQ);
cc391b69 1547 cifs_server_unlock(server);
829049cb
VL
1548 return rc;
1549 }
1da177e4 1550
7c9421e1 1551 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1552 cifs_in_send_inc(server);
1553 rc = smb_send(server, in_buf, len);
1554 cifs_in_send_dec(server);
789e6661 1555 cifs_save_when_sent(midQ);
ad313cb8
JL
1556
1557 if (rc < 0)
afe6f653 1558 server->sequence_number -= 2;
ad313cb8 1559
cc391b69 1560 cifs_server_unlock(server);
7ee1af76 1561
79a58d1f 1562 if (rc < 0) {
3c1bf7e4 1563 cifs_delete_mid(midQ);
7ee1af76
JA
1564 return rc;
1565 }
1566
1567 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1568 rc = wait_event_interruptible(server->response_q,
7c9421e1 1569 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1570 ((server->tcpStatus != CifsGood) &&
1571 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1572
1573 /* Were we interrupted by a signal ? */
d7d7a66a 1574 spin_lock(&server->srv_lock);
7ee1af76 1575 if ((rc == -ERESTARTSYS) &&
7c9421e1 1576 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1577 ((server->tcpStatus == CifsGood) ||
1578 (server->tcpStatus == CifsNew))) {
d7d7a66a 1579 spin_unlock(&server->srv_lock);
7ee1af76
JA
1580
1581 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1582 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1583 blocking lock to return. */
afe6f653 1584 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1585 if (rc) {
3c1bf7e4 1586 cifs_delete_mid(midQ);
7ee1af76
JA
1587 return rc;
1588 }
1589 } else {
1590 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1591 to cause the blocking lock to return. */
1592
1593 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1594
1595 /* If we get -ENOLCK back the lock may have
1596 already been removed. Don't exit in this case. */
1597 if (rc && rc != -ENOLCK) {
3c1bf7e4 1598 cifs_delete_mid(midQ);
7ee1af76
JA
1599 return rc;
1600 }
1601 }
1602
afe6f653 1603 rc = wait_for_response(server, midQ);
1be912dd 1604 if (rc) {
afe6f653 1605 send_cancel(server, &rqst, midQ);
d7d7a66a 1606 spin_lock(&server->mid_lock);
7c9421e1 1607 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1608 /* no longer considered to be "in-flight" */
1609 midQ->callback = DeleteMidQEntry;
d7d7a66a 1610 spin_unlock(&server->mid_lock);
1be912dd
JL
1611 return rc;
1612 }
d7d7a66a 1613 spin_unlock(&server->mid_lock);
7ee1af76 1614 }
1be912dd
JL
1615
1616 /* We got the response - restart system call. */
1617 rstart = 1;
d7d7a66a 1618 spin_lock(&server->srv_lock);
7ee1af76 1619 }
d7d7a66a 1620 spin_unlock(&server->srv_lock);
7ee1af76 1621
afe6f653 1622 rc = cifs_sync_mid_result(midQ, server);
053d5034 1623 if (rc != 0)
7ee1af76 1624 return rc;
50c2f753 1625
17c8bfed 1626 /* rcvd frame is ok */
7c9421e1 1627 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1628 rc = -EIO;
3175eb9b 1629 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1630 goto out;
1631 }
1da177e4 1632
d4e4854f 1633 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1634 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1635 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1636out:
3c1bf7e4 1637 cifs_delete_mid(midQ);
7ee1af76
JA
1638 if (rstart && rc == -EACCES)
1639 return -ERESTARTSYS;
1da177e4
LT
1640 return rc;
1641}
fb157ed2
SF
1642
1643/*
1644 * Discard any remaining data in the current SMB. To do this, we borrow the
1645 * current bigbuf.
1646 */
1647int
1648cifs_discard_remaining_data(struct TCP_Server_Info *server)
1649{
1650 unsigned int rfclen = server->pdu_size;
1651 int remaining = rfclen + server->vals->header_preamble_size -
1652 server->total_read;
1653
1654 while (remaining > 0) {
1655 int length;
1656
1657 length = cifs_discard_from_socket(server,
1658 min_t(size_t, remaining,
1659 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1660 if (length < 0)
1661 return length;
1662 server->total_read += length;
1663 remaining -= length;
1664 }
1665
1666 return 0;
1667}
1668
1669static int
1670__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1671 bool malformed)
1672{
1673 int length;
1674
1675 length = cifs_discard_remaining_data(server);
1676 dequeue_mid(mid, malformed);
1677 mid->resp_buf = server->smallbuf;
1678 server->smallbuf = NULL;
1679 return length;
1680}
1681
1682static int
1683cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1684{
1685 struct cifs_readdata *rdata = mid->callback_data;
1686
1687 return __cifs_readv_discard(server, mid, rdata->result);
1688}
1689
1690int
1691cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1692{
1693 int length, len;
1694 unsigned int data_offset, data_len;
1695 struct cifs_readdata *rdata = mid->callback_data;
1696 char *buf = server->smallbuf;
1697 unsigned int buflen = server->pdu_size +
1698 server->vals->header_preamble_size;
1699 bool use_rdma_mr = false;
1700
1701 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1702 __func__, mid->mid, rdata->offset, rdata->bytes);
1703
1704 /*
1705 * read the rest of READ_RSP header (sans Data array), or whatever we
1706 * can if there's not enough data. At this point, we've read down to
1707 * the Mid.
1708 */
1709 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1710 HEADER_SIZE(server) + 1;
1711
1712 length = cifs_read_from_socket(server,
1713 buf + HEADER_SIZE(server) - 1, len);
1714 if (length < 0)
1715 return length;
1716 server->total_read += length;
1717
1718 if (server->ops->is_session_expired &&
1719 server->ops->is_session_expired(buf)) {
1720 cifs_reconnect(server, true);
1721 return -1;
1722 }
1723
1724 if (server->ops->is_status_pending &&
1725 server->ops->is_status_pending(buf, server)) {
1726 cifs_discard_remaining_data(server);
1727 return -1;
1728 }
1729
1730 /* set up first two iov for signature check and to get credits */
1731 rdata->iov[0].iov_base = buf;
1732 rdata->iov[0].iov_len = server->vals->header_preamble_size;
1733 rdata->iov[1].iov_base = buf + server->vals->header_preamble_size;
1734 rdata->iov[1].iov_len =
1735 server->total_read - server->vals->header_preamble_size;
1736 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1737 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1738 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1739 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1740
1741 /* Was the SMB read successful? */
1742 rdata->result = server->ops->map_error(buf, false);
1743 if (rdata->result != 0) {
1744 cifs_dbg(FYI, "%s: server returned error %d\n",
1745 __func__, rdata->result);
1746 /* normal error on read response */
1747 return __cifs_readv_discard(server, mid, false);
1748 }
1749
1750 /* Is there enough to get to the rest of the READ_RSP header? */
1751 if (server->total_read < server->vals->read_rsp_size) {
1752 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1753 __func__, server->total_read,
1754 server->vals->read_rsp_size);
1755 rdata->result = -EIO;
1756 return cifs_readv_discard(server, mid);
1757 }
1758
1759 data_offset = server->ops->read_data_offset(buf) +
1760 server->vals->header_preamble_size;
1761 if (data_offset < server->total_read) {
1762 /*
1763 * win2k8 sometimes sends an offset of 0 when the read
1764 * is beyond the EOF. Treat it as if the data starts just after
1765 * the header.
1766 */
1767 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1768 __func__, data_offset);
1769 data_offset = server->total_read;
1770 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1771 /* data_offset is beyond the end of smallbuf */
1772 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1773 __func__, data_offset);
1774 rdata->result = -EIO;
1775 return cifs_readv_discard(server, mid);
1776 }
1777
1778 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1779 __func__, server->total_read, data_offset);
1780
1781 len = data_offset - server->total_read;
1782 if (len > 0) {
1783 /* read any junk before data into the rest of smallbuf */
1784 length = cifs_read_from_socket(server,
1785 buf + server->total_read, len);
1786 if (length < 0)
1787 return length;
1788 server->total_read += length;
1789 }
1790
1791 /* how much data is in the response? */
1792#ifdef CONFIG_CIFS_SMB_DIRECT
1793 use_rdma_mr = rdata->mr;
1794#endif
1795 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1796 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1797 /* data_len is corrupt -- discard frame */
1798 rdata->result = -EIO;
1799 return cifs_readv_discard(server, mid);
1800 }
1801
1802 length = rdata->read_into_pages(server, rdata, data_len);
1803 if (length < 0)
1804 return length;
1805
1806 server->total_read += length;
1807
1808 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1809 server->total_read, buflen, data_len);
1810
1811 /* discard anything left over */
1812 if (server->total_read < buflen)
1813 return cifs_readv_discard(server, mid);
1814
1815 dequeue_mid(mid, false);
1816 mid->resp_buf = server->smallbuf;
1817 server->smallbuf = NULL;
1818 return length;
1819}