]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
cifs: don't send down the destination address to sendmsg for a SOCK_STREAM
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
17d3df38
SM
197 smb_msg->msg_name = NULL;
198 smb_msg->msg_namelen = 0;
3ab3f2a1
AV
199 smb_msg->msg_control = NULL;
200 smb_msg->msg_controllen = 0;
0496e02d 201 if (server->noblocksnd)
3ab3f2a1 202 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 203 else
3ab3f2a1 204 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 205
3ab3f2a1 206 while (msg_data_left(smb_msg)) {
6f49f46b
JL
207 /*
208 * If blocking send, we try 3 times, since each can block
209 * for 5 seconds. For nonblocking we have to try more
210 * but wait increasing amounts of time allowing time for
211 * socket to clear. The overall time we wait in either
212 * case to send on the socket is about 15 seconds.
213 * Similarly we wait for 15 seconds for a response from
214 * the server in SendReceive[2] for the server to send
215 * a response back for most types of requests (except
216 * SMB Write past end of file which can be slow, and
217 * blocking lock operations). NFS waits slightly longer
218 * than CIFS, but this can make it take longer for
219 * nonresponsive servers to be detected and 15 seconds
220 * is more than enough time for modern networks to
221 * send a packet. In most cases if we fail to send
222 * after the retries we will kill the socket and
223 * reconnect which may clear the network problem.
224 */
3ab3f2a1 225 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 226 if (rc == -EAGAIN) {
3ab3f2a1
AV
227 retries++;
228 if (retries >= 14 ||
229 (!server->noblocksnd && (retries > 2))) {
afe6f653 230 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 231 ssocket);
3ab3f2a1 232 return -EAGAIN;
1da177e4 233 }
3ab3f2a1 234 msleep(1 << retries);
1da177e4
LT
235 continue;
236 }
6f49f46b 237
79a58d1f 238 if (rc < 0)
3ab3f2a1 239 return rc;
6f49f46b 240
79a58d1f 241 if (rc == 0) {
3e84469d
SF
242 /* should never happen, letting socket clear before
243 retrying is our only obvious option here */
afe6f653 244 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
245 msleep(500);
246 continue;
d6e04ae6 247 }
6f49f46b 248
3ab3f2a1
AV
249 /* send was at least partially successful */
250 *sent += rc;
251 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 252 }
3ab3f2a1 253 return 0;
97bc00b3
JL
254}
255
35e2cc1b 256unsigned long
81f39f95 257smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
258{
259 unsigned int i;
35e2cc1b
PA
260 struct kvec *iov;
261 int nvec;
a26054d1
JL
262 unsigned long buflen = 0;
263
d291e703 264 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 265 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
266 iov = &rqst->rq_iov[1];
267 nvec = rqst->rq_nvec - 1;
268 } else {
269 iov = rqst->rq_iov;
270 nvec = rqst->rq_nvec;
271 }
272
a26054d1 273 /* total up iov array first */
35e2cc1b 274 for (i = 0; i < nvec; i++)
a26054d1
JL
275 buflen += iov[i].iov_len;
276
c06a0f2d
LL
277 /*
278 * Add in the page array if there is one. The caller needs to make
279 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
280 * multiple pages ends at page boundary, rq_tailsz needs to be set to
281 * PAGE_SIZE.
282 */
a26054d1 283 if (rqst->rq_npages) {
c06a0f2d
LL
284 if (rqst->rq_npages == 1)
285 buflen += rqst->rq_tailsz;
286 else {
287 /*
288 * If there is more than one page, calculate the
289 * buffer length based on rq_offset and rq_tailsz
290 */
291 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
292 rqst->rq_offset;
293 buflen += rqst->rq_tailsz;
294 }
a26054d1
JL
295 }
296
297 return buflen;
298}
299
6f49f46b 300static int
07cd952f
RS
301__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
302 struct smb_rqst *rqst)
6f49f46b 303{
07cd952f
RS
304 int rc = 0;
305 struct kvec *iov;
306 int n_vec;
307 unsigned int send_length = 0;
308 unsigned int i, j;
b30c74c7 309 sigset_t mask, oldmask;
3ab3f2a1 310 size_t total_len = 0, sent, size;
b8eed283 311 struct socket *ssocket = server->ssocket;
3ab3f2a1 312 struct msghdr smb_msg;
c713c877
RS
313 __be32 rfc1002_marker;
314
4357d45f
LL
315 if (cifs_rdma_enabled(server)) {
316 /* return -EAGAIN when connecting or reconnecting */
317 rc = -EAGAIN;
318 if (server->smbd_conn)
319 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
320 goto smbd_done;
321 }
afc18a6f 322
ea702b80 323 if (ssocket == NULL)
afc18a6f 324 return -EAGAIN;
ea702b80 325
214a5ea0 326 if (fatal_signal_pending(current)) {
6988a619
PA
327 cifs_dbg(FYI, "signal pending before send request\n");
328 return -ERESTARTSYS;
b30c74c7
PS
329 }
330
b8eed283 331 /* cork the socket */
db10538a 332 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 333
07cd952f 334 for (j = 0; j < num_rqst; j++)
81f39f95 335 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
336 rfc1002_marker = cpu_to_be32(send_length);
337
b30c74c7
PS
338 /*
339 * We should not allow signals to interrupt the network send because
340 * any partial send will cause session reconnects thus increasing
341 * latency of system calls and overload a server with unnecessary
342 * requests.
343 */
344
345 sigfillset(&mask);
346 sigprocmask(SIG_BLOCK, &mask, &oldmask);
347
c713c877 348 /* Generate a rfc1002 marker for SMB2+ */
d291e703 349 if (!is_smb1(server)) {
c713c877
RS
350 struct kvec hiov = {
351 .iov_base = &rfc1002_marker,
352 .iov_len = 4
353 };
aa563d7b 354 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
355 rc = smb_send_kvec(server, &smb_msg, &sent);
356 if (rc < 0)
b30c74c7 357 goto unmask;
c713c877
RS
358
359 total_len += sent;
360 send_length += 4;
361 }
362
662bf5bc
PA
363 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
364
07cd952f
RS
365 for (j = 0; j < num_rqst; j++) {
366 iov = rqst[j].rq_iov;
367 n_vec = rqst[j].rq_nvec;
3ab3f2a1 368
07cd952f 369 size = 0;
662bf5bc
PA
370 for (i = 0; i < n_vec; i++) {
371 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 372 size += iov[i].iov_len;
662bf5bc 373 }
97bc00b3 374
aa563d7b 375 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 376
3ab3f2a1 377 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 378 if (rc < 0)
b30c74c7 379 goto unmask;
97bc00b3
JL
380
381 total_len += sent;
07cd952f
RS
382
383 /* now walk the page array and send each page in it */
384 for (i = 0; i < rqst[j].rq_npages; i++) {
385 struct bio_vec bvec;
386
387 bvec.bv_page = rqst[j].rq_pages[i];
388 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
389 &bvec.bv_offset);
390
aa563d7b 391 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
392 &bvec, 1, bvec.bv_len);
393 rc = smb_send_kvec(server, &smb_msg, &sent);
394 if (rc < 0)
395 break;
396
397 total_len += sent;
398 }
97bc00b3 399 }
1da177e4 400
b30c74c7
PS
401unmask:
402 sigprocmask(SIG_SETMASK, &oldmask, NULL);
403
404 /*
405 * If signal is pending but we have already sent the whole packet to
406 * the server we need to return success status to allow a corresponding
407 * mid entry to be kept in the pending requests queue thus allowing
408 * to handle responses from the server by the client.
409 *
410 * If only part of the packet has been sent there is no need to hide
411 * interrupt because the session will be reconnected anyway, so there
412 * won't be any response from the server to handle.
413 */
414
415 if (signal_pending(current) && (total_len != send_length)) {
416 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 417 rc = -ERESTARTSYS;
b30c74c7
PS
418 }
419
b8eed283 420 /* uncork it */
db10538a 421 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 422
c713c877 423 if ((total_len > 0) && (total_len != send_length)) {
f96637be 424 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 425 send_length, total_len);
6f49f46b
JL
426 /*
427 * If we have only sent part of an SMB then the next SMB could
428 * be taken as the remainder of this one. We need to kill the
429 * socket so the server throws away the partial SMB
430 */
dca65818 431 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 432 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 433 server->conn_id, server->hostname);
edf1ae40 434 }
9762c2d0 435smbd_done:
d804d41d 436 if (rc < 0 && rc != -EINTR)
afe6f653 437 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 438 rc);
ee13919c 439 else if (rc > 0)
1da177e4 440 rc = 0;
1da177e4
LT
441
442 return rc;
443}
444
6f49f46b 445static int
1f3a8f5f
RS
446smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
447 struct smb_rqst *rqst, int flags)
6f49f46b 448{
b2c96de7 449 struct kvec iov;
3946d0d0 450 struct smb2_transform_hdr *tr_hdr;
b2c96de7 451 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
452 int rc;
453
454 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
455 return __smb_send_rqst(server, num_rqst, rqst);
456
457 if (num_rqst > MAX_COMPOUND - 1)
458 return -ENOMEM;
7fb8986e 459
b2c96de7 460 if (!server->ops->init_transform_rq) {
a0a3036b 461 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
462 return -EIO;
463 }
6f49f46b 464
9339faac 465 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
466 if (!tr_hdr)
467 return -ENOMEM;
468
469 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
470 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
471
472 iov.iov_base = tr_hdr;
473 iov.iov_len = sizeof(*tr_hdr);
474 cur_rqst[0].rq_iov = &iov;
475 cur_rqst[0].rq_nvec = 1;
476
1f3a8f5f
RS
477 rc = server->ops->init_transform_rq(server, num_rqst + 1,
478 &cur_rqst[0], rqst);
7fb8986e 479 if (rc)
3946d0d0 480 goto out;
7fb8986e 481
1f3a8f5f
RS
482 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
483 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
484out:
485 kfree(tr_hdr);
7fb8986e 486 return rc;
6f49f46b
JL
487}
488
0496e02d
JL
489int
490smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
491 unsigned int smb_buf_length)
492{
738f9de5 493 struct kvec iov[2];
7fb8986e
PS
494 struct smb_rqst rqst = { .rq_iov = iov,
495 .rq_nvec = 2 };
0496e02d 496
738f9de5
PS
497 iov[0].iov_base = smb_buffer;
498 iov[0].iov_len = 4;
499 iov[1].iov_base = (char *)smb_buffer + 4;
500 iov[1].iov_len = smb_buf_length;
0496e02d 501
07cd952f 502 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
503}
504
fc40f9cf 505static int
b227d215 506wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
507 const int timeout, const int flags,
508 unsigned int *instance)
1da177e4 509{
19e88867 510 long rc;
4230cff8
RS
511 int *credits;
512 int optype;
2b53b929 513 long int t;
6d82c27a 514 int scredits, in_flight;
2b53b929
RS
515
516 if (timeout < 0)
517 t = MAX_JIFFY_OFFSET;
518 else
519 t = msecs_to_jiffies(timeout);
4230cff8
RS
520
521 optype = flags & CIFS_OP_MASK;
5bc59498 522
34f4deb7
PS
523 *instance = 0;
524
4230cff8
RS
525 credits = server->ops->get_credits_field(server, optype);
526 /* Since an echo is already inflight, no need to wait to send another */
527 if (*credits <= 0 && optype == CIFS_ECHO_OP)
528 return -EAGAIN;
529
fc40f9cf 530 spin_lock(&server->req_lock);
392e1c5d 531 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 532 /* oplock breaks must not be held up */
fc40f9cf 533 server->in_flight++;
1b63f184
SF
534 if (server->in_flight > server->max_in_flight)
535 server->max_in_flight = server->in_flight;
bc205ed1 536 *credits -= 1;
34f4deb7 537 *instance = server->reconnect_instance;
6d82c27a
SP
538 scredits = *credits;
539 in_flight = server->in_flight;
fc40f9cf 540 spin_unlock(&server->req_lock);
6d82c27a 541
1ddff774 542 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
543 server->conn_id, server->hostname, scredits, -1, in_flight);
544 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
545 __func__, 1, scredits);
546
27a97a61
VL
547 return 0;
548 }
549
27a97a61 550 while (1) {
b227d215 551 if (*credits < num_credits) {
6d82c27a 552 scredits = *credits;
fc40f9cf 553 spin_unlock(&server->req_lock);
6d82c27a 554
789e6661 555 cifs_num_waiters_inc(server);
2b53b929
RS
556 rc = wait_event_killable_timeout(server->request_q,
557 has_credits(server, credits, num_credits), t);
789e6661 558 cifs_num_waiters_dec(server);
2b53b929 559 if (!rc) {
6d82c27a
SP
560 spin_lock(&server->req_lock);
561 scredits = *credits;
562 in_flight = server->in_flight;
563 spin_unlock(&server->req_lock);
564
7937ca96 565 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
566 server->conn_id, server->hostname, scredits,
567 num_credits, in_flight);
afe6f653 568 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 569 timeout);
7de03948 570 return -EBUSY;
2b53b929
RS
571 }
572 if (rc == -ERESTARTSYS)
573 return -ERESTARTSYS;
fc40f9cf 574 spin_lock(&server->req_lock);
27a97a61 575 } else {
080dc5e5
SP
576 spin_unlock(&server->req_lock);
577
d7d7a66a 578 spin_lock(&server->srv_lock);
c5797a94 579 if (server->tcpStatus == CifsExiting) {
d7d7a66a 580 spin_unlock(&server->srv_lock);
27a97a61 581 return -ENOENT;
1da177e4 582 }
d7d7a66a 583 spin_unlock(&server->srv_lock);
27a97a61 584
16b34aa4
RS
585 /*
586 * For normal commands, reserve the last MAX_COMPOUND
587 * credits to compound requests.
588 * Otherwise these compounds could be permanently
589 * starved for credits by single-credit requests.
590 *
591 * To prevent spinning CPU, block this thread until
592 * there are >MAX_COMPOUND credits available.
593 * But only do this is we already have a lot of
594 * credits in flight to avoid triggering this check
595 * for servers that are slow to hand out credits on
596 * new sessions.
597 */
080dc5e5 598 spin_lock(&server->req_lock);
16b34aa4
RS
599 if (!optype && num_credits == 1 &&
600 server->in_flight > 2 * MAX_COMPOUND &&
601 *credits <= MAX_COMPOUND) {
602 spin_unlock(&server->req_lock);
6d82c27a 603
16b34aa4 604 cifs_num_waiters_inc(server);
2b53b929
RS
605 rc = wait_event_killable_timeout(
606 server->request_q,
16b34aa4 607 has_credits(server, credits,
2b53b929
RS
608 MAX_COMPOUND + 1),
609 t);
16b34aa4 610 cifs_num_waiters_dec(server);
2b53b929 611 if (!rc) {
6d82c27a
SP
612 spin_lock(&server->req_lock);
613 scredits = *credits;
614 in_flight = server->in_flight;
615 spin_unlock(&server->req_lock);
616
7937ca96 617 trace_smb3_credit_timeout(
6d82c27a
SP
618 server->CurrentMid,
619 server->conn_id, server->hostname,
620 scredits, num_credits, in_flight);
afe6f653 621 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 622 timeout);
7de03948 623 return -EBUSY;
2b53b929
RS
624 }
625 if (rc == -ERESTARTSYS)
626 return -ERESTARTSYS;
16b34aa4
RS
627 spin_lock(&server->req_lock);
628 continue;
629 }
630
2d86dbc9
PS
631 /*
632 * Can not count locking commands against total
633 * as they are allowed to block on server.
634 */
27a97a61
VL
635
636 /* update # of requests on the wire to server */
4230cff8 637 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
638 *credits -= num_credits;
639 server->in_flight += num_credits;
1b63f184
SF
640 if (server->in_flight > server->max_in_flight)
641 server->max_in_flight = server->in_flight;
34f4deb7 642 *instance = server->reconnect_instance;
2d86dbc9 643 }
6d82c27a
SP
644 scredits = *credits;
645 in_flight = server->in_flight;
fc40f9cf 646 spin_unlock(&server->req_lock);
cd7b699b 647
1ddff774 648 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
649 server->conn_id, server->hostname, scredits,
650 -(num_credits), in_flight);
cd7b699b
SP
651 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
652 __func__, num_credits, scredits);
27a97a61 653 break;
1da177e4
LT
654 }
655 }
7ee1af76
JA
656 return 0;
657}
1da177e4 658
bc205ed1 659static int
480b1cb9
RS
660wait_for_free_request(struct TCP_Server_Info *server, const int flags,
661 unsigned int *instance)
bc205ed1 662{
2b53b929
RS
663 return wait_for_free_credits(server, 1, -1, flags,
664 instance);
bc205ed1
PS
665}
666
257b7809
RS
667static int
668wait_for_compound_request(struct TCP_Server_Info *server, int num,
669 const int flags, unsigned int *instance)
670{
671 int *credits;
6d82c27a 672 int scredits, in_flight;
257b7809
RS
673
674 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
675
676 spin_lock(&server->req_lock);
cd7b699b 677 scredits = *credits;
6d82c27a 678 in_flight = server->in_flight;
cd7b699b 679
257b7809
RS
680 if (*credits < num) {
681 /*
91792bb8
PS
682 * If the server is tight on resources or just gives us less
683 * credits for other reasons (e.g. requests are coming out of
684 * order and the server delays granting more credits until it
685 * processes a missing mid) and we exhausted most available
686 * credits there may be situations when we try to send
687 * a compound request but we don't have enough credits. At this
688 * point the client needs to decide if it should wait for
689 * additional credits or fail the request. If at least one
690 * request is in flight there is a high probability that the
691 * server will return enough credits to satisfy this compound
692 * request.
693 *
694 * Return immediately if no requests in flight since we will be
695 * stuck on waiting for credits.
257b7809 696 */
91792bb8 697 if (server->in_flight == 0) {
257b7809 698 spin_unlock(&server->req_lock);
cd7b699b 699 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
700 server->conn_id, server->hostname, scredits,
701 num, in_flight);
cd7b699b 702 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 703 __func__, in_flight, num, scredits);
7de03948 704 return -EDEADLK;
257b7809
RS
705 }
706 }
707 spin_unlock(&server->req_lock);
708
709 return wait_for_free_credits(server, num, 60000, flags,
710 instance);
711}
712
cb7e9eab
PS
713int
714cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 715 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
716{
717 *num = size;
335b7b62
PS
718 credits->value = 0;
719 credits->instance = server->reconnect_instance;
cb7e9eab
PS
720 return 0;
721}
722
96daf2b0 723static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
724 struct mid_q_entry **ppmidQ)
725{
d7d7a66a 726 spin_lock(&ses->ses_lock);
dd3cd870 727 if (ses->ses_status == SES_NEW) {
79a58d1f 728 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 729 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 730 spin_unlock(&ses->ses_lock);
7ee1af76 731 return -EAGAIN;
080dc5e5 732 }
ad7a2926 733 /* else ok - we are setting up session */
1da177e4 734 }
7f48558e 735
dd3cd870 736 if (ses->ses_status == SES_EXITING) {
7f48558e 737 /* check if SMB session is bad because we are setting it up */
080dc5e5 738 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 739 spin_unlock(&ses->ses_lock);
7f48558e 740 return -EAGAIN;
080dc5e5 741 }
7f48558e
SP
742 /* else ok - we are shutting down session */
743 }
d7d7a66a 744 spin_unlock(&ses->ses_lock);
7f48558e 745
70f08f91 746 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 747 if (*ppmidQ == NULL)
7ee1af76 748 return -ENOMEM;
d7d7a66a 749 spin_lock(&ses->server->mid_lock);
ddc8cf8f 750 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 751 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
752 return 0;
753}
754
0ade640e
JL
755static int
756wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 757{
0ade640e 758 int error;
7ee1af76 759
5853cc2a 760 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 761 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
762 if (error < 0)
763 return -ERESTARTSYS;
7ee1af76 764
0ade640e 765 return 0;
7ee1af76
JA
766}
767
fec344e3
JL
768struct mid_q_entry *
769cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
770{
771 int rc;
fec344e3 772 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
773 struct mid_q_entry *mid;
774
738f9de5
PS
775 if (rqst->rq_iov[0].iov_len != 4 ||
776 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
777 return ERR_PTR(-EIO);
778
792af7b0 779 /* enable signing if server requires it */
38d77c50 780 if (server->sign)
792af7b0
PS
781 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
782
70f08f91 783 mid = alloc_mid(hdr, server);
792af7b0 784 if (mid == NULL)
fec344e3 785 return ERR_PTR(-ENOMEM);
792af7b0 786
fec344e3 787 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 788 if (rc) {
70f08f91 789 release_mid(mid);
fec344e3 790 return ERR_PTR(rc);
ffc61ccb
SP
791 }
792
fec344e3 793 return mid;
792af7b0 794}
133672ef 795
a6827c18
JL
796/*
797 * Send a SMB request and set the callback function in the mid to handle
798 * the result. Caller is responsible for dealing with timeouts.
799 */
800int
fec344e3 801cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 802 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
803 mid_handle_t *handle, void *cbdata, const int flags,
804 const struct cifs_credits *exist_credits)
a6827c18 805{
480b1cb9 806 int rc;
a6827c18 807 struct mid_q_entry *mid;
335b7b62 808 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 809 unsigned int instance;
480b1cb9 810 int optype;
a6827c18 811
a891f0f8
PS
812 optype = flags & CIFS_OP_MASK;
813
cb7e9eab 814 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 815 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
816 if (rc)
817 return rc;
335b7b62 818 credits.value = 1;
34f4deb7 819 credits.instance = instance;
3349c3a7
PS
820 } else
821 instance = exist_credits->instance;
a6827c18 822
cc391b69 823 cifs_server_lock(server);
3349c3a7
PS
824
825 /*
826 * We can't use credits obtained from the previous session to send this
827 * request. Check if there were reconnects after we obtained credits and
828 * return -EAGAIN in such cases to let callers handle it.
829 */
830 if (instance != server->reconnect_instance) {
cc391b69 831 cifs_server_unlock(server);
3349c3a7
PS
832 add_credits_and_wake_if(server, &credits, optype);
833 return -EAGAIN;
834 }
835
fec344e3
JL
836 mid = server->ops->setup_async_request(server, rqst);
837 if (IS_ERR(mid)) {
cc391b69 838 cifs_server_unlock(server);
335b7b62 839 add_credits_and_wake_if(server, &credits, optype);
fec344e3 840 return PTR_ERR(mid);
a6827c18
JL
841 }
842
44d22d84 843 mid->receive = receive;
a6827c18
JL
844 mid->callback = callback;
845 mid->callback_data = cbdata;
9b7c18a2 846 mid->handle = handle;
7c9421e1 847 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 848
ffc61ccb 849 /* put it on the pending_mid_q */
d7d7a66a 850 spin_lock(&server->mid_lock);
ffc61ccb 851 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 852 spin_unlock(&server->mid_lock);
ffc61ccb 853
93d2cb6c
LL
854 /*
855 * Need to store the time in mid before calling I/O. For call_async,
856 * I/O response may come back and free the mid entry on another thread.
857 */
858 cifs_save_when_sent(mid);
789e6661 859 cifs_in_send_inc(server);
1f3a8f5f 860 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 861 cifs_in_send_dec(server);
ad313cb8 862
820962dc 863 if (rc < 0) {
c781af7e 864 revert_current_mid(server, mid->credits);
ad313cb8 865 server->sequence_number -= 2;
70f08f91 866 delete_mid(mid);
820962dc
RV
867 }
868
cc391b69 869 cifs_server_unlock(server);
789e6661 870
ffc61ccb
SP
871 if (rc == 0)
872 return 0;
a6827c18 873
335b7b62 874 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
875 return rc;
876}
877
133672ef
SF
878/*
879 *
880 * Send an SMB Request. No response info (other than return code)
881 * needs to be parsed.
882 *
883 * flags indicate the type of request buffer and how long to wait
884 * and whether to log NT STATUS code (error) before mapping it to POSIX error
885 *
886 */
887int
96daf2b0 888SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 889 char *in_buf, int flags)
133672ef
SF
890{
891 int rc;
892 struct kvec iov[1];
da502f7d 893 struct kvec rsp_iov;
133672ef
SF
894 int resp_buf_type;
895
792af7b0
PS
896 iov[0].iov_base = in_buf;
897 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 898 flags |= CIFS_NO_RSP_BUF;
da502f7d 899 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 900 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 901
133672ef
SF
902 return rc;
903}
904
053d5034 905static int
3c1105df 906cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
907{
908 int rc = 0;
909
f96637be
JP
910 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
911 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 912
d7d7a66a 913 spin_lock(&server->mid_lock);
7c9421e1 914 switch (mid->mid_state) {
74dd92a8 915 case MID_RESPONSE_RECEIVED:
d7d7a66a 916 spin_unlock(&server->mid_lock);
053d5034 917 return rc;
74dd92a8
JL
918 case MID_RETRY_NEEDED:
919 rc = -EAGAIN;
920 break;
71823baf
JL
921 case MID_RESPONSE_MALFORMED:
922 rc = -EIO;
923 break;
3c1105df
JL
924 case MID_SHUTDOWN:
925 rc = -EHOSTDOWN;
926 break;
74dd92a8 927 default:
abe57073
PS
928 if (!(mid->mid_flags & MID_DELETED)) {
929 list_del_init(&mid->qhead);
930 mid->mid_flags |= MID_DELETED;
931 }
afe6f653 932 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 933 __func__, mid->mid, mid->mid_state);
74dd92a8 934 rc = -EIO;
053d5034 935 }
d7d7a66a 936 spin_unlock(&server->mid_lock);
053d5034 937
70f08f91 938 release_mid(mid);
053d5034
JL
939 return rc;
940}
941
121b046a 942static inline int
fb2036d8
PS
943send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
944 struct mid_q_entry *mid)
76dcc26f 945{
121b046a 946 return server->ops->send_cancel ?
fb2036d8 947 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
948}
949
2c8f981d
JL
950int
951cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
952 bool log_error)
953{
792af7b0 954 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
955
956 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
957
958 /* convert the length into a more usable form */
38d77c50 959 if (server->sign) {
738f9de5 960 struct kvec iov[2];
985e4ff0 961 int rc = 0;
738f9de5
PS
962 struct smb_rqst rqst = { .rq_iov = iov,
963 .rq_nvec = 2 };
826a95e4 964
738f9de5
PS
965 iov[0].iov_base = mid->resp_buf;
966 iov[0].iov_len = 4;
967 iov[1].iov_base = (char *)mid->resp_buf + 4;
968 iov[1].iov_len = len - 4;
2c8f981d 969 /* FIXME: add code to kill session */
bf5ea0e2 970 rc = cifs_verify_signature(&rqst, server,
0124cc45 971 mid->sequence_number);
985e4ff0 972 if (rc)
afe6f653 973 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 974 rc);
2c8f981d
JL
975 }
976
977 /* BB special case reconnect tid and uid here? */
a3713ec3 978 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
979}
980
fec344e3 981struct mid_q_entry *
f780bd3f
AA
982cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
983 struct smb_rqst *rqst)
792af7b0
PS
984{
985 int rc;
fec344e3 986 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
987 struct mid_q_entry *mid;
988
738f9de5
PS
989 if (rqst->rq_iov[0].iov_len != 4 ||
990 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
991 return ERR_PTR(-EIO);
992
792af7b0
PS
993 rc = allocate_mid(ses, hdr, &mid);
994 if (rc)
fec344e3
JL
995 return ERR_PTR(rc);
996 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
997 if (rc) {
70f08f91 998 delete_mid(mid);
fec344e3
JL
999 return ERR_PTR(rc);
1000 }
1001 return mid;
792af7b0
PS
1002}
1003
4e34feb5 1004static void
ee258d79 1005cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1006{
1007 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1008 struct cifs_credits credits;
1009
1010 credits.value = server->ops->get_credits(mid);
1011 credits.instance = server->reconnect_instance;
8a26f0f7 1012
34f4deb7 1013 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1014}
1015
ee258d79
PS
1016static void
1017cifs_compound_last_callback(struct mid_q_entry *mid)
1018{
1019 cifs_compound_callback(mid);
1020 cifs_wake_up_task(mid);
1021}
1022
1023static void
1024cifs_cancelled_callback(struct mid_q_entry *mid)
1025{
1026 cifs_compound_callback(mid);
70f08f91 1027 release_mid(mid);
ee258d79
PS
1028}
1029
5f68ea4a
AA
1030/*
1031 * Return a channel (master if none) of @ses that can be used to send
1032 * regular requests.
1033 *
1034 * If we are currently binding a new channel (negprot/sess.setup),
1035 * return the new incomplete channel.
1036 */
1037struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1038{
1039 uint index = 0;
1040
1041 if (!ses)
1042 return NULL;
1043
f486ef8e 1044 /* round robin */
bda487ac 1045 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1046
1047 spin_lock(&ses->chan_lock);
bda487ac 1048 index %= ses->chan_count;
88b024f5 1049 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1050
1051 return ses->chans[index].server;
5f68ea4a
AA
1052}
1053
b8f57ee8 1054int
e0bba0b8 1055compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1056 struct TCP_Server_Info *server,
e0bba0b8
RS
1057 const int flags, const int num_rqst, struct smb_rqst *rqst,
1058 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1059{
480b1cb9 1060 int i, j, optype, rc = 0;
e0bba0b8 1061 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1062 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1063 struct cifs_credits credits[MAX_COMPOUND] = {
1064 { .value = 0, .instance = 0 }
1065 };
1066 unsigned int instance;
738f9de5 1067 char *buf;
50c2f753 1068
a891f0f8 1069 optype = flags & CIFS_OP_MASK;
133672ef 1070
e0bba0b8
RS
1071 for (i = 0; i < num_rqst; i++)
1072 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1073
352d96f3 1074 if (!ses || !ses->server || !server) {
f96637be 1075 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1076 return -EIO;
1077 }
1078
d7d7a66a 1079 spin_lock(&server->srv_lock);
080dc5e5 1080 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1081 spin_unlock(&server->srv_lock);
7ee1af76 1082 return -ENOENT;
080dc5e5 1083 }
d7d7a66a 1084 spin_unlock(&server->srv_lock);
7ee1af76 1085
792af7b0 1086 /*
257b7809 1087 * Wait for all the requests to become available.
7091bcab
PS
1088 * This approach still leaves the possibility to be stuck waiting for
1089 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1090 * requests and if the client is completely idle, not generating any
1091 * other requests.
1092 * This can be handled by the eventual session reconnect.
792af7b0 1093 */
3190b59a 1094 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1095 &instance);
1096 if (rc)
1097 return rc;
97ea4998 1098
257b7809
RS
1099 for (i = 0; i < num_rqst; i++) {
1100 credits[i].value = 1;
1101 credits[i].instance = instance;
8544f4aa 1102 }
7ee1af76 1103
792af7b0
PS
1104 /*
1105 * Make sure that we sign in the same order that we send on this socket
1106 * and avoid races inside tcp sendmsg code that could cause corruption
1107 * of smb data.
1108 */
7ee1af76 1109
cc391b69 1110 cifs_server_lock(server);
7ee1af76 1111
97ea4998
PS
1112 /*
1113 * All the parts of the compound chain belong obtained credits from the
257b7809 1114 * same session. We can not use credits obtained from the previous
97ea4998
PS
1115 * session to send this request. Check if there were reconnects after
1116 * we obtained credits and return -EAGAIN in such cases to let callers
1117 * handle it.
1118 */
3190b59a 1119 if (instance != server->reconnect_instance) {
cc391b69 1120 cifs_server_unlock(server);
97ea4998 1121 for (j = 0; j < num_rqst; j++)
3190b59a 1122 add_credits(server, &credits[j], optype);
97ea4998
PS
1123 return -EAGAIN;
1124 }
1125
e0bba0b8 1126 for (i = 0; i < num_rqst; i++) {
f780bd3f 1127 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1128 if (IS_ERR(midQ[i])) {
3190b59a 1129 revert_current_mid(server, i);
e0bba0b8 1130 for (j = 0; j < i; j++)
70f08f91 1131 delete_mid(midQ[j]);
cc391b69 1132 cifs_server_unlock(server);
8544f4aa 1133
e0bba0b8 1134 /* Update # of requests on wire to server */
8544f4aa 1135 for (j = 0; j < num_rqst; j++)
3190b59a 1136 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1137 return PTR_ERR(midQ[i]);
1138 }
1139
1140 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1141 midQ[i]->optype = optype;
4e34feb5 1142 /*
ee258d79
PS
1143 * Invoke callback for every part of the compound chain
1144 * to calculate credits properly. Wake up this thread only when
1145 * the last element is received.
4e34feb5
RS
1146 */
1147 if (i < num_rqst - 1)
ee258d79
PS
1148 midQ[i]->callback = cifs_compound_callback;
1149 else
1150 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1151 }
3190b59a
AA
1152 cifs_in_send_inc(server);
1153 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1154 cifs_in_send_dec(server);
e0bba0b8
RS
1155
1156 for (i = 0; i < num_rqst; i++)
1157 cifs_save_when_sent(midQ[i]);
7ee1af76 1158
c781af7e 1159 if (rc < 0) {
3190b59a
AA
1160 revert_current_mid(server, num_rqst);
1161 server->sequence_number -= 2;
c781af7e 1162 }
e0bba0b8 1163
cc391b69 1164 cifs_server_unlock(server);
7ee1af76 1165
d69cb728
RS
1166 /*
1167 * If sending failed for some reason or it is an oplock break that we
1168 * will not receive a response to - return credits back
1169 */
1170 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1171 for (i = 0; i < num_rqst; i++)
3190b59a 1172 add_credits(server, &credits[i], optype);
cb5c2e63 1173 goto out;
ee258d79
PS
1174 }
1175
1176 /*
1177 * At this point the request is passed to the network stack - we assume
1178 * that any credits taken from the server structure on the client have
1179 * been spent and we can't return them back. Once we receive responses
1180 * we will collect credits granted by the server in the mid callbacks
1181 * and add those credits to the server structure.
1182 */
e0bba0b8 1183
cb5c2e63
RS
1184 /*
1185 * Compounding is never used during session establish.
1186 */
d7d7a66a 1187 spin_lock(&ses->ses_lock);
dd3cd870 1188 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1189 spin_unlock(&ses->ses_lock);
080dc5e5 1190
cc391b69 1191 cifs_server_lock(server);
f486ef8e 1192 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1193 cifs_server_unlock(server);
080dc5e5 1194
d7d7a66a 1195 spin_lock(&ses->ses_lock);
05946d4b 1196 }
d7d7a66a 1197 spin_unlock(&ses->ses_lock);
e0bba0b8 1198
cb5c2e63 1199 for (i = 0; i < num_rqst; i++) {
3190b59a 1200 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1201 if (rc != 0)
1202 break;
1203 }
1204 if (rc != 0) {
1205 for (; i < num_rqst; i++) {
e3d100ea 1206 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1207 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1208 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1209 spin_lock(&server->mid_lock);
7b71843f 1210 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1211 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1212 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1213 cancelled_mid[i] = true;
34f4deb7 1214 credits[i].value = 0;
e0bba0b8 1215 }
d7d7a66a 1216 spin_unlock(&server->mid_lock);
e0bba0b8 1217 }
cb5c2e63
RS
1218 }
1219
cb5c2e63
RS
1220 for (i = 0; i < num_rqst; i++) {
1221 if (rc < 0)
1222 goto out;
e0bba0b8 1223
3190b59a 1224 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1225 if (rc != 0) {
8544f4aa
PS
1226 /* mark this mid as cancelled to not free it below */
1227 cancelled_mid[i] = true;
1228 goto out;
1be912dd 1229 }
2b2bdfba 1230
e0bba0b8
RS
1231 if (!midQ[i]->resp_buf ||
1232 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1233 rc = -EIO;
1234 cifs_dbg(FYI, "Bad MID state?\n");
1235 goto out;
1236 }
a891f0f8 1237
e0bba0b8
RS
1238 buf = (char *)midQ[i]->resp_buf;
1239 resp_iov[i].iov_base = buf;
1240 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1241 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1242
1243 if (midQ[i]->large_buf)
1244 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1245 else
1246 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1247
3190b59a 1248 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1249 flags & CIFS_LOG_ERROR);
1da177e4 1250
70f08f91 1251 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1252 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1253 midQ[i]->resp_buf = NULL;
cb5c2e63 1254
e0bba0b8 1255 }
cb5c2e63
RS
1256
1257 /*
1258 * Compounding is never used during session establish.
1259 */
d7d7a66a 1260 spin_lock(&ses->ses_lock);
dd3cd870 1261 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1262 struct kvec iov = {
1263 .iov_base = resp_iov[0].iov_base,
1264 .iov_len = resp_iov[0].iov_len
1265 };
d7d7a66a 1266 spin_unlock(&ses->ses_lock);
cc391b69 1267 cifs_server_lock(server);
f486ef8e 1268 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1269 cifs_server_unlock(server);
d7d7a66a 1270 spin_lock(&ses->ses_lock);
cb5c2e63 1271 }
d7d7a66a 1272 spin_unlock(&ses->ses_lock);
cb5c2e63 1273
7ee1af76 1274out:
4e34feb5
RS
1275 /*
1276 * This will dequeue all mids. After this it is important that the
1277 * demultiplex_thread will not process any of these mids any futher.
1278 * This is prevented above by using a noop callback that will not
1279 * wake this thread except for the very last PDU.
1280 */
8544f4aa
PS
1281 for (i = 0; i < num_rqst; i++) {
1282 if (!cancelled_mid[i])
70f08f91 1283 delete_mid(midQ[i]);
8544f4aa 1284 }
1da177e4 1285
d6e04ae6
SF
1286 return rc;
1287}
1da177e4 1288
e0bba0b8
RS
1289int
1290cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1291 struct TCP_Server_Info *server,
e0bba0b8
RS
1292 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1293 struct kvec *resp_iov)
1294{
352d96f3
AA
1295 return compound_send_recv(xid, ses, server, flags, 1,
1296 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1297}
1298
738f9de5
PS
1299int
1300SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1301 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1302 const int flags, struct kvec *resp_iov)
1303{
1304 struct smb_rqst rqst;
3cecf486 1305 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1306 int rc;
1307
3cecf486 1308 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1309 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1310 GFP_KERNEL);
117e3b7f
SF
1311 if (!new_iov) {
1312 /* otherwise cifs_send_recv below sets resp_buf_type */
1313 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1314 return -ENOMEM;
117e3b7f 1315 }
3cecf486
RS
1316 } else
1317 new_iov = s_iov;
738f9de5
PS
1318
1319 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1320 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1321
1322 new_iov[0].iov_base = new_iov[1].iov_base;
1323 new_iov[0].iov_len = 4;
1324 new_iov[1].iov_base += 4;
1325 new_iov[1].iov_len -= 4;
1326
1327 memset(&rqst, 0, sizeof(struct smb_rqst));
1328 rqst.rq_iov = new_iov;
1329 rqst.rq_nvec = n_vec + 1;
1330
352d96f3
AA
1331 rc = cifs_send_recv(xid, ses, ses->server,
1332 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1333 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1334 kfree(new_iov);
738f9de5
PS
1335 return rc;
1336}
1337
1da177e4 1338int
96daf2b0 1339SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1340 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1341 int *pbytes_returned, const int flags)
1da177e4
LT
1342{
1343 int rc = 0;
1da177e4 1344 struct mid_q_entry *midQ;
fb2036d8
PS
1345 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1346 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1347 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1348 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1349 struct TCP_Server_Info *server;
1da177e4
LT
1350
1351 if (ses == NULL) {
f96637be 1352 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1353 return -EIO;
1354 }
ac6ad7a8 1355 server = ses->server;
afe6f653 1356 if (server == NULL) {
f96637be 1357 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1358 return -EIO;
1359 }
1360
d7d7a66a 1361 spin_lock(&server->srv_lock);
080dc5e5 1362 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1363 spin_unlock(&server->srv_lock);
31ca3bc3 1364 return -ENOENT;
080dc5e5 1365 }
d7d7a66a 1366 spin_unlock(&server->srv_lock);
31ca3bc3 1367
79a58d1f 1368 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1369 to the same server. We may make this configurable later or
1370 use ses->maxReq */
1da177e4 1371
fb2036d8 1372 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1373 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1374 len);
6d9c6d54
VL
1375 return -EIO;
1376 }
1377
afe6f653 1378 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1379 if (rc)
1380 return rc;
1381
79a58d1f 1382 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1383 and avoid races inside tcp sendmsg code that could cause corruption
1384 of smb data */
1385
cc391b69 1386 cifs_server_lock(server);
1da177e4 1387
7ee1af76
JA
1388 rc = allocate_mid(ses, in_buf, &midQ);
1389 if (rc) {
cc391b69 1390 cifs_server_unlock(server);
7ee1af76 1391 /* Update # of requests on wire to server */
afe6f653 1392 add_credits(server, &credits, 0);
7ee1af76 1393 return rc;
1da177e4
LT
1394 }
1395
afe6f653 1396 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1397 if (rc) {
cc391b69 1398 cifs_server_unlock(server);
829049cb
VL
1399 goto out;
1400 }
1da177e4 1401
7c9421e1 1402 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1403
afe6f653
RS
1404 cifs_in_send_inc(server);
1405 rc = smb_send(server, in_buf, len);
1406 cifs_in_send_dec(server);
789e6661 1407 cifs_save_when_sent(midQ);
ad313cb8
JL
1408
1409 if (rc < 0)
afe6f653 1410 server->sequence_number -= 2;
ad313cb8 1411
cc391b69 1412 cifs_server_unlock(server);
7ee1af76 1413
79a58d1f 1414 if (rc < 0)
7ee1af76
JA
1415 goto out;
1416
afe6f653 1417 rc = wait_for_response(server, midQ);
1be912dd 1418 if (rc != 0) {
afe6f653 1419 send_cancel(server, &rqst, midQ);
d7d7a66a 1420 spin_lock(&server->mid_lock);
7c9421e1 1421 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1422 /* no longer considered to be "in-flight" */
70f08f91 1423 midQ->callback = release_mid;
d7d7a66a 1424 spin_unlock(&server->mid_lock);
afe6f653 1425 add_credits(server, &credits, 0);
1be912dd
JL
1426 return rc;
1427 }
d7d7a66a 1428 spin_unlock(&server->mid_lock);
1be912dd 1429 }
1da177e4 1430
afe6f653 1431 rc = cifs_sync_mid_result(midQ, server);
053d5034 1432 if (rc != 0) {
afe6f653 1433 add_credits(server, &credits, 0);
1da177e4
LT
1434 return rc;
1435 }
50c2f753 1436
2c8f981d 1437 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1438 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1439 rc = -EIO;
afe6f653 1440 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1441 goto out;
1da177e4 1442 }
7ee1af76 1443
d4e4854f 1444 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1445 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1446 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1447out:
70f08f91 1448 delete_mid(midQ);
afe6f653 1449 add_credits(server, &credits, 0);
1da177e4 1450
7ee1af76
JA
1451 return rc;
1452}
1da177e4 1453
7ee1af76
JA
1454/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1455 blocking lock to return. */
1456
1457static int
96daf2b0 1458send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1459 struct smb_hdr *in_buf,
1460 struct smb_hdr *out_buf)
1461{
1462 int bytes_returned;
96daf2b0 1463 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1464 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1465
1466 /* We just modify the current in_buf to change
1467 the type of lock from LOCKING_ANDX_SHARED_LOCK
1468 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1469 LOCKING_ANDX_CANCEL_LOCK. */
1470
1471 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1472 pSMB->Timeout = 0;
88257360 1473 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1474
1475 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1476 &bytes_returned, 0);
7ee1af76
JA
1477}
1478
1479int
96daf2b0 1480SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1481 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1482 int *pbytes_returned)
1483{
1484 int rc = 0;
1485 int rstart = 0;
7ee1af76 1486 struct mid_q_entry *midQ;
96daf2b0 1487 struct cifs_ses *ses;
fb2036d8
PS
1488 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1489 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1490 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1491 unsigned int instance;
afe6f653 1492 struct TCP_Server_Info *server;
7ee1af76
JA
1493
1494 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1495 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1496 return -EIO;
1497 }
1498 ses = tcon->ses;
afe6f653 1499 server = ses->server;
7ee1af76 1500
afe6f653 1501 if (server == NULL) {
f96637be 1502 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1503 return -EIO;
1504 }
1505
d7d7a66a 1506 spin_lock(&server->srv_lock);
080dc5e5 1507 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1508 spin_unlock(&server->srv_lock);
7ee1af76 1509 return -ENOENT;
080dc5e5 1510 }
d7d7a66a 1511 spin_unlock(&server->srv_lock);
7ee1af76 1512
79a58d1f 1513 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1514 to the same server. We may make this configurable later or
1515 use ses->maxReq */
1516
fb2036d8 1517 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1518 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1519 len);
6d9c6d54
VL
1520 return -EIO;
1521 }
1522
afe6f653 1523 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1524 if (rc)
1525 return rc;
1526
79a58d1f 1527 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1528 and avoid races inside tcp sendmsg code that could cause corruption
1529 of smb data */
1530
cc391b69 1531 cifs_server_lock(server);
7ee1af76
JA
1532
1533 rc = allocate_mid(ses, in_buf, &midQ);
1534 if (rc) {
cc391b69 1535 cifs_server_unlock(server);
7ee1af76
JA
1536 return rc;
1537 }
1538
afe6f653 1539 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1540 if (rc) {
70f08f91 1541 delete_mid(midQ);
cc391b69 1542 cifs_server_unlock(server);
829049cb
VL
1543 return rc;
1544 }
1da177e4 1545
7c9421e1 1546 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1547 cifs_in_send_inc(server);
1548 rc = smb_send(server, in_buf, len);
1549 cifs_in_send_dec(server);
789e6661 1550 cifs_save_when_sent(midQ);
ad313cb8
JL
1551
1552 if (rc < 0)
afe6f653 1553 server->sequence_number -= 2;
ad313cb8 1554
cc391b69 1555 cifs_server_unlock(server);
7ee1af76 1556
79a58d1f 1557 if (rc < 0) {
70f08f91 1558 delete_mid(midQ);
7ee1af76
JA
1559 return rc;
1560 }
1561
1562 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1563 rc = wait_event_interruptible(server->response_q,
7c9421e1 1564 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1565 ((server->tcpStatus != CifsGood) &&
1566 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1567
1568 /* Were we interrupted by a signal ? */
d7d7a66a 1569 spin_lock(&server->srv_lock);
7ee1af76 1570 if ((rc == -ERESTARTSYS) &&
7c9421e1 1571 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1572 ((server->tcpStatus == CifsGood) ||
1573 (server->tcpStatus == CifsNew))) {
d7d7a66a 1574 spin_unlock(&server->srv_lock);
7ee1af76
JA
1575
1576 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1577 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1578 blocking lock to return. */
afe6f653 1579 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1580 if (rc) {
70f08f91 1581 delete_mid(midQ);
7ee1af76
JA
1582 return rc;
1583 }
1584 } else {
1585 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1586 to cause the blocking lock to return. */
1587
1588 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1589
1590 /* If we get -ENOLCK back the lock may have
1591 already been removed. Don't exit in this case. */
1592 if (rc && rc != -ENOLCK) {
70f08f91 1593 delete_mid(midQ);
7ee1af76
JA
1594 return rc;
1595 }
1596 }
1597
afe6f653 1598 rc = wait_for_response(server, midQ);
1be912dd 1599 if (rc) {
afe6f653 1600 send_cancel(server, &rqst, midQ);
d7d7a66a 1601 spin_lock(&server->mid_lock);
7c9421e1 1602 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1603 /* no longer considered to be "in-flight" */
70f08f91 1604 midQ->callback = release_mid;
d7d7a66a 1605 spin_unlock(&server->mid_lock);
1be912dd
JL
1606 return rc;
1607 }
d7d7a66a 1608 spin_unlock(&server->mid_lock);
7ee1af76 1609 }
1be912dd
JL
1610
1611 /* We got the response - restart system call. */
1612 rstart = 1;
d7d7a66a 1613 spin_lock(&server->srv_lock);
7ee1af76 1614 }
d7d7a66a 1615 spin_unlock(&server->srv_lock);
7ee1af76 1616
afe6f653 1617 rc = cifs_sync_mid_result(midQ, server);
053d5034 1618 if (rc != 0)
7ee1af76 1619 return rc;
50c2f753 1620
17c8bfed 1621 /* rcvd frame is ok */
7c9421e1 1622 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1623 rc = -EIO;
3175eb9b 1624 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1625 goto out;
1626 }
1da177e4 1627
d4e4854f 1628 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1629 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1630 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1631out:
70f08f91 1632 delete_mid(midQ);
7ee1af76
JA
1633 if (rstart && rc == -EACCES)
1634 return -ERESTARTSYS;
1da177e4
LT
1635 return rc;
1636}
fb157ed2
SF
1637
1638/*
1639 * Discard any remaining data in the current SMB. To do this, we borrow the
1640 * current bigbuf.
1641 */
1642int
1643cifs_discard_remaining_data(struct TCP_Server_Info *server)
1644{
1645 unsigned int rfclen = server->pdu_size;
9789de8b 1646 int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1647 server->total_read;
1648
1649 while (remaining > 0) {
1650 int length;
1651
1652 length = cifs_discard_from_socket(server,
1653 min_t(size_t, remaining,
1654 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1655 if (length < 0)
1656 return length;
1657 server->total_read += length;
1658 remaining -= length;
1659 }
1660
1661 return 0;
1662}
1663
1664static int
1665__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1666 bool malformed)
1667{
1668 int length;
1669
1670 length = cifs_discard_remaining_data(server);
1671 dequeue_mid(mid, malformed);
1672 mid->resp_buf = server->smallbuf;
1673 server->smallbuf = NULL;
1674 return length;
1675}
1676
1677static int
1678cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1679{
1680 struct cifs_readdata *rdata = mid->callback_data;
1681
1682 return __cifs_readv_discard(server, mid, rdata->result);
1683}
1684
1685int
1686cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1687{
1688 int length, len;
1689 unsigned int data_offset, data_len;
1690 struct cifs_readdata *rdata = mid->callback_data;
1691 char *buf = server->smallbuf;
9789de8b 1692 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1693 bool use_rdma_mr = false;
1694
1695 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1696 __func__, mid->mid, rdata->offset, rdata->bytes);
1697
1698 /*
1699 * read the rest of READ_RSP header (sans Data array), or whatever we
1700 * can if there's not enough data. At this point, we've read down to
1701 * the Mid.
1702 */
1703 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1704 HEADER_SIZE(server) + 1;
1705
1706 length = cifs_read_from_socket(server,
1707 buf + HEADER_SIZE(server) - 1, len);
1708 if (length < 0)
1709 return length;
1710 server->total_read += length;
1711
1712 if (server->ops->is_session_expired &&
1713 server->ops->is_session_expired(buf)) {
1714 cifs_reconnect(server, true);
1715 return -1;
1716 }
1717
1718 if (server->ops->is_status_pending &&
1719 server->ops->is_status_pending(buf, server)) {
1720 cifs_discard_remaining_data(server);
1721 return -1;
1722 }
1723
1724 /* set up first two iov for signature check and to get credits */
1725 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1726 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1727 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1728 rdata->iov[1].iov_len =
9789de8b 1729 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1730 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1731 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1732 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1733 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1734
1735 /* Was the SMB read successful? */
1736 rdata->result = server->ops->map_error(buf, false);
1737 if (rdata->result != 0) {
1738 cifs_dbg(FYI, "%s: server returned error %d\n",
1739 __func__, rdata->result);
1740 /* normal error on read response */
1741 return __cifs_readv_discard(server, mid, false);
1742 }
1743
1744 /* Is there enough to get to the rest of the READ_RSP header? */
1745 if (server->total_read < server->vals->read_rsp_size) {
1746 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1747 __func__, server->total_read,
1748 server->vals->read_rsp_size);
1749 rdata->result = -EIO;
1750 return cifs_readv_discard(server, mid);
1751 }
1752
1753 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1754 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1755 if (data_offset < server->total_read) {
1756 /*
1757 * win2k8 sometimes sends an offset of 0 when the read
1758 * is beyond the EOF. Treat it as if the data starts just after
1759 * the header.
1760 */
1761 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1762 __func__, data_offset);
1763 data_offset = server->total_read;
1764 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1765 /* data_offset is beyond the end of smallbuf */
1766 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1767 __func__, data_offset);
1768 rdata->result = -EIO;
1769 return cifs_readv_discard(server, mid);
1770 }
1771
1772 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1773 __func__, server->total_read, data_offset);
1774
1775 len = data_offset - server->total_read;
1776 if (len > 0) {
1777 /* read any junk before data into the rest of smallbuf */
1778 length = cifs_read_from_socket(server,
1779 buf + server->total_read, len);
1780 if (length < 0)
1781 return length;
1782 server->total_read += length;
1783 }
1784
1785 /* how much data is in the response? */
1786#ifdef CONFIG_CIFS_SMB_DIRECT
1787 use_rdma_mr = rdata->mr;
1788#endif
1789 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1790 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1791 /* data_len is corrupt -- discard frame */
1792 rdata->result = -EIO;
1793 return cifs_readv_discard(server, mid);
1794 }
1795
1796 length = rdata->read_into_pages(server, rdata, data_len);
1797 if (length < 0)
1798 return length;
1799
1800 server->total_read += length;
1801
1802 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1803 server->total_read, buflen, data_len);
1804
1805 /* discard anything left over */
1806 if (server->total_read < buflen)
1807 return cifs_readv_discard(server, mid);
1808
1809 dequeue_mid(mid, false);
1810 mid->resp_buf = server->smallbuf;
1811 server->smallbuf = NULL;
1812 return length;
1813}