]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
Merge tag 'soc-fixes-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4 3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 */
9
10#include <linux/fs.h>
11#include <linux/list.h>
5a0e3ad6 12#include <linux/gfp.h>
1da177e4
LT
13#include <linux/wait.h>
14#include <linux/net.h>
15#include <linux/delay.h>
f06ac72e 16#include <linux/freezer.h>
b8eed283 17#include <linux/tcp.h>
2f8b5444 18#include <linux/bvec.h>
97bc00b3 19#include <linux/highmem.h>
7c0f6ba6 20#include <linux/uaccess.h>
1da177e4
LT
21#include <asm/processor.h>
22#include <linux/mempool.h>
14e25977 23#include <linux/sched/signal.h>
fb157ed2 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
8bd68c6e 29#include "smb2proto.h"
9762c2d0 30#include "smbdirect.h"
50c2f753 31
3cecf486
RS
32/* Max number of iovectors we can use off the stack when sending requests. */
33#define CIFS_MAX_IOV_SIZE 8
34
2dc7e1c0
PS
35void
36cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
37{
38 wake_up_process(mid->callback_data);
39}
40
ea75a78c 41static struct mid_q_entry *
70f08f91 42alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
43{
44 struct mid_q_entry *temp;
45
24b9b06b 46 if (server == NULL) {
70f08f91 47 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
1da177e4
LT
48 return NULL;
49 }
50c2f753 50
232087cb 51 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 52 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 53 kref_init(&temp->refcount);
a6f74e80
N
54 temp->mid = get_mid(smb_buffer);
55 temp->pid = current->pid;
56 temp->command = cpu_to_le16(smb_buffer->Command);
57 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 58 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
59 /* when mid allocated can be before when sent */
60 temp->when_alloc = jiffies;
61 temp->server = server;
2b84a36c 62
a6f74e80
N
63 /*
64 * The default is for the mid to be synchronous, so the
65 * default callback just wakes up the current task.
66 */
f1f27ad7
VW
67 get_task_struct(current);
68 temp->creator = current;
a6f74e80
N
69 temp->callback = cifs_wake_up_task;
70 temp->callback_data = current;
1da177e4 71
c2c17ddb 72 atomic_inc(&mid_count);
7c9421e1 73 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
74 return temp;
75}
76
70f08f91 77static void __release_mid(struct kref *refcount)
696e420b 78{
abe57073
PS
79 struct mid_q_entry *midEntry =
80 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 81#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 82 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 83 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 84 unsigned long now;
433b8dd7 85 unsigned long roundtrip_time;
1047abc1 86#endif
7b71843f
PS
87 struct TCP_Server_Info *server = midEntry->server;
88
89 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 server->ops->handle_cancelled_mid)
04ad69c3 92 server->ops->handle_cancelled_mid(midEntry, server);
7b71843f 93
7c9421e1 94 midEntry->mid_state = MID_FREE;
c2c17ddb 95 atomic_dec(&mid_count);
7c9421e1 96 if (midEntry->large_buf)
b8643e1b
SF
97 cifs_buf_release(midEntry->resp_buf);
98 else
99 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
100#ifdef CONFIG_CIFS_STATS2
101 now = jiffies;
433b8dd7 102 if (now < midEntry->when_alloc)
a0a3036b 103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
104 roundtrip_time = now - midEntry->when_alloc;
105
106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 server->slowest_cmd[smb_cmd] = roundtrip_time;
109 server->fastest_cmd[smb_cmd] = roundtrip_time;
110 } else {
111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 server->fastest_cmd[smb_cmd] = roundtrip_time;
115 }
116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 server->time_per_cmd[smb_cmd] += roundtrip_time;
118 }
00778e22
SF
119 /*
120 * commands taking longer than one second (default) can be indications
121 * that something is wrong, unless it is quite a slow link or a very
122 * busy server. Note that this calc is unlikely or impossible to wrap
123 * as long as slow_rsp_threshold is not set way above recommended max
124 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 * since only affects debug counters - so leaving the calc as simple
126 * comparison rather than doing multiple conversions and overflow
127 * checks
128 */
129 if ((slow_rsp_threshold != 0) &&
130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 131 (midEntry->command != command)) {
f5942db5
SF
132 /*
133 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 * NB: le16_to_cpu returns unsigned so can not be negative below
135 */
433b8dd7
SF
136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 138
433b8dd7 139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
140 midEntry->when_sent, midEntry->when_received);
141 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
142 pr_debug("slow rsp: cmd %d mid %llu",
143 midEntry->command, midEntry->mid);
144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 now - midEntry->when_alloc,
146 now - midEntry->when_sent,
147 now - midEntry->when_received);
1047abc1
SF
148 }
149 }
150#endif
f1f27ad7 151 put_task_struct(midEntry->creator);
abe57073
PS
152
153 mempool_free(midEntry, cifs_mid_poolp);
154}
155
70f08f91 156void release_mid(struct mid_q_entry *mid)
abe57073 157{
70f08f91 158 struct TCP_Server_Info *server = mid->server;
d7d7a66a
SP
159
160 spin_lock(&server->mid_lock);
70f08f91 161 kref_put(&mid->refcount, __release_mid);
d7d7a66a 162 spin_unlock(&server->mid_lock);
abe57073
PS
163}
164
3c1bf7e4 165void
70f08f91 166delete_mid(struct mid_q_entry *mid)
ddc8cf8f 167{
d7d7a66a 168 spin_lock(&mid->server->mid_lock);
abe57073
PS
169 if (!(mid->mid_flags & MID_DELETED)) {
170 list_del_init(&mid->qhead);
171 mid->mid_flags |= MID_DELETED;
172 }
d7d7a66a 173 spin_unlock(&mid->server->mid_lock);
ddc8cf8f 174
70f08f91 175 release_mid(mid);
ddc8cf8f
JL
176}
177
6f49f46b
JL
178/*
179 * smb_send_kvec - send an array of kvecs to the server
180 * @server: Server to send the data to
3ab3f2a1 181 * @smb_msg: Message to send
6f49f46b
JL
182 * @sent: amount of data sent on socket is stored here
183 *
184 * Our basic "send data to server" function. Should be called with srv_mutex
185 * held. The caller is responsible for handling the results.
186 */
d6e04ae6 187static int
3ab3f2a1
AV
188smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 size_t *sent)
1da177e4
LT
190{
191 int rc = 0;
3ab3f2a1 192 int retries = 0;
edf1ae40 193 struct socket *ssocket = server->ssocket;
50c2f753 194
6f49f46b
JL
195 *sent = 0;
196
0496e02d 197 if (server->noblocksnd)
3ab3f2a1 198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 199 else
3ab3f2a1 200 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 201
3ab3f2a1 202 while (msg_data_left(smb_msg)) {
6f49f46b
JL
203 /*
204 * If blocking send, we try 3 times, since each can block
205 * for 5 seconds. For nonblocking we have to try more
206 * but wait increasing amounts of time allowing time for
207 * socket to clear. The overall time we wait in either
208 * case to send on the socket is about 15 seconds.
209 * Similarly we wait for 15 seconds for a response from
210 * the server in SendReceive[2] for the server to send
211 * a response back for most types of requests (except
212 * SMB Write past end of file which can be slow, and
213 * blocking lock operations). NFS waits slightly longer
214 * than CIFS, but this can make it take longer for
215 * nonresponsive servers to be detected and 15 seconds
216 * is more than enough time for modern networks to
217 * send a packet. In most cases if we fail to send
218 * after the retries we will kill the socket and
219 * reconnect which may clear the network problem.
220 */
3ab3f2a1 221 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 222 if (rc == -EAGAIN) {
3ab3f2a1
AV
223 retries++;
224 if (retries >= 14 ||
225 (!server->noblocksnd && (retries > 2))) {
afe6f653 226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 227 ssocket);
3ab3f2a1 228 return -EAGAIN;
1da177e4 229 }
3ab3f2a1 230 msleep(1 << retries);
1da177e4
LT
231 continue;
232 }
6f49f46b 233
79a58d1f 234 if (rc < 0)
3ab3f2a1 235 return rc;
6f49f46b 236
79a58d1f 237 if (rc == 0) {
3e84469d
SF
238 /* should never happen, letting socket clear before
239 retrying is our only obvious option here */
afe6f653 240 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
241 msleep(500);
242 continue;
d6e04ae6 243 }
6f49f46b 244
3ab3f2a1
AV
245 /* send was at least partially successful */
246 *sent += rc;
247 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 248 }
3ab3f2a1 249 return 0;
97bc00b3
JL
250}
251
35e2cc1b 252unsigned long
81f39f95 253smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
254{
255 unsigned int i;
35e2cc1b
PA
256 struct kvec *iov;
257 int nvec;
a26054d1
JL
258 unsigned long buflen = 0;
259
d291e703 260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
9789de8b 261 rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
262 iov = &rqst->rq_iov[1];
263 nvec = rqst->rq_nvec - 1;
264 } else {
265 iov = rqst->rq_iov;
266 nvec = rqst->rq_nvec;
267 }
268
a26054d1 269 /* total up iov array first */
35e2cc1b 270 for (i = 0; i < nvec; i++)
a26054d1
JL
271 buflen += iov[i].iov_len;
272
c06a0f2d
LL
273 /*
274 * Add in the page array if there is one. The caller needs to make
275 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
276 * multiple pages ends at page boundary, rq_tailsz needs to be set to
277 * PAGE_SIZE.
278 */
a26054d1 279 if (rqst->rq_npages) {
c06a0f2d
LL
280 if (rqst->rq_npages == 1)
281 buflen += rqst->rq_tailsz;
282 else {
283 /*
284 * If there is more than one page, calculate the
285 * buffer length based on rq_offset and rq_tailsz
286 */
287 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
288 rqst->rq_offset;
289 buflen += rqst->rq_tailsz;
290 }
a26054d1
JL
291 }
292
293 return buflen;
294}
295
6f49f46b 296static int
07cd952f
RS
297__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
298 struct smb_rqst *rqst)
6f49f46b 299{
07cd952f
RS
300 int rc = 0;
301 struct kvec *iov;
302 int n_vec;
303 unsigned int send_length = 0;
304 unsigned int i, j;
b30c74c7 305 sigset_t mask, oldmask;
3ab3f2a1 306 size_t total_len = 0, sent, size;
b8eed283 307 struct socket *ssocket = server->ssocket;
bedc8f76 308 struct msghdr smb_msg = {};
c713c877
RS
309 __be32 rfc1002_marker;
310
4357d45f
LL
311 if (cifs_rdma_enabled(server)) {
312 /* return -EAGAIN when connecting or reconnecting */
313 rc = -EAGAIN;
314 if (server->smbd_conn)
315 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
316 goto smbd_done;
317 }
afc18a6f 318
ea702b80 319 if (ssocket == NULL)
afc18a6f 320 return -EAGAIN;
ea702b80 321
214a5ea0 322 if (fatal_signal_pending(current)) {
6988a619
PA
323 cifs_dbg(FYI, "signal pending before send request\n");
324 return -ERESTARTSYS;
b30c74c7
PS
325 }
326
b8eed283 327 /* cork the socket */
db10538a 328 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 329
07cd952f 330 for (j = 0; j < num_rqst; j++)
81f39f95 331 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
332 rfc1002_marker = cpu_to_be32(send_length);
333
b30c74c7
PS
334 /*
335 * We should not allow signals to interrupt the network send because
336 * any partial send will cause session reconnects thus increasing
337 * latency of system calls and overload a server with unnecessary
338 * requests.
339 */
340
341 sigfillset(&mask);
342 sigprocmask(SIG_BLOCK, &mask, &oldmask);
343
c713c877 344 /* Generate a rfc1002 marker for SMB2+ */
d291e703 345 if (!is_smb1(server)) {
c713c877
RS
346 struct kvec hiov = {
347 .iov_base = &rfc1002_marker,
348 .iov_len = 4
349 };
aa563d7b 350 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
351 rc = smb_send_kvec(server, &smb_msg, &sent);
352 if (rc < 0)
b30c74c7 353 goto unmask;
c713c877
RS
354
355 total_len += sent;
356 send_length += 4;
357 }
358
662bf5bc
PA
359 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
360
07cd952f
RS
361 for (j = 0; j < num_rqst; j++) {
362 iov = rqst[j].rq_iov;
363 n_vec = rqst[j].rq_nvec;
3ab3f2a1 364
07cd952f 365 size = 0;
662bf5bc
PA
366 for (i = 0; i < n_vec; i++) {
367 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 368 size += iov[i].iov_len;
662bf5bc 369 }
97bc00b3 370
aa563d7b 371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 372
3ab3f2a1 373 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 374 if (rc < 0)
b30c74c7 375 goto unmask;
97bc00b3
JL
376
377 total_len += sent;
07cd952f
RS
378
379 /* now walk the page array and send each page in it */
380 for (i = 0; i < rqst[j].rq_npages; i++) {
381 struct bio_vec bvec;
382
383 bvec.bv_page = rqst[j].rq_pages[i];
384 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
385 &bvec.bv_offset);
386
aa563d7b 387 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
388 &bvec, 1, bvec.bv_len);
389 rc = smb_send_kvec(server, &smb_msg, &sent);
390 if (rc < 0)
391 break;
392
393 total_len += sent;
394 }
97bc00b3 395 }
1da177e4 396
b30c74c7
PS
397unmask:
398 sigprocmask(SIG_SETMASK, &oldmask, NULL);
399
400 /*
401 * If signal is pending but we have already sent the whole packet to
402 * the server we need to return success status to allow a corresponding
403 * mid entry to be kept in the pending requests queue thus allowing
404 * to handle responses from the server by the client.
405 *
406 * If only part of the packet has been sent there is no need to hide
407 * interrupt because the session will be reconnected anyway, so there
408 * won't be any response from the server to handle.
409 */
410
411 if (signal_pending(current) && (total_len != send_length)) {
412 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 413 rc = -ERESTARTSYS;
b30c74c7
PS
414 }
415
b8eed283 416 /* uncork it */
db10538a 417 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 418
c713c877 419 if ((total_len > 0) && (total_len != send_length)) {
f96637be 420 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 421 send_length, total_len);
6f49f46b
JL
422 /*
423 * If we have only sent part of an SMB then the next SMB could
424 * be taken as the remainder of this one. We need to kill the
425 * socket so the server throws away the partial SMB
426 */
dca65818 427 cifs_signal_cifsd_for_reconnect(server, false);
bf1fdeb7 428 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 429 server->conn_id, server->hostname);
edf1ae40 430 }
9762c2d0 431smbd_done:
d804d41d 432 if (rc < 0 && rc != -EINTR)
afe6f653 433 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 434 rc);
ee13919c 435 else if (rc > 0)
1da177e4 436 rc = 0;
1da177e4
LT
437
438 return rc;
439}
440
6f49f46b 441static int
1f3a8f5f
RS
442smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
443 struct smb_rqst *rqst, int flags)
6f49f46b 444{
b2c96de7 445 struct kvec iov;
3946d0d0 446 struct smb2_transform_hdr *tr_hdr;
b2c96de7 447 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
448 int rc;
449
450 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
451 return __smb_send_rqst(server, num_rqst, rqst);
452
453 if (num_rqst > MAX_COMPOUND - 1)
454 return -ENOMEM;
7fb8986e 455
b2c96de7 456 if (!server->ops->init_transform_rq) {
a0a3036b 457 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
458 return -EIO;
459 }
6f49f46b 460
9339faac 461 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
3946d0d0
LL
462 if (!tr_hdr)
463 return -ENOMEM;
464
465 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
466 memset(&iov, 0, sizeof(iov));
3946d0d0
LL
467
468 iov.iov_base = tr_hdr;
469 iov.iov_len = sizeof(*tr_hdr);
470 cur_rqst[0].rq_iov = &iov;
471 cur_rqst[0].rq_nvec = 1;
472
1f3a8f5f
RS
473 rc = server->ops->init_transform_rq(server, num_rqst + 1,
474 &cur_rqst[0], rqst);
7fb8986e 475 if (rc)
3946d0d0 476 goto out;
7fb8986e 477
1f3a8f5f
RS
478 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
479 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
480out:
481 kfree(tr_hdr);
7fb8986e 482 return rc;
6f49f46b
JL
483}
484
0496e02d
JL
485int
486smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
487 unsigned int smb_buf_length)
488{
738f9de5 489 struct kvec iov[2];
7fb8986e
PS
490 struct smb_rqst rqst = { .rq_iov = iov,
491 .rq_nvec = 2 };
0496e02d 492
738f9de5
PS
493 iov[0].iov_base = smb_buffer;
494 iov[0].iov_len = 4;
495 iov[1].iov_base = (char *)smb_buffer + 4;
496 iov[1].iov_len = smb_buf_length;
0496e02d 497
07cd952f 498 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
499}
500
fc40f9cf 501static int
b227d215 502wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
503 const int timeout, const int flags,
504 unsigned int *instance)
1da177e4 505{
19e88867 506 long rc;
4230cff8
RS
507 int *credits;
508 int optype;
2b53b929 509 long int t;
6d82c27a 510 int scredits, in_flight;
2b53b929
RS
511
512 if (timeout < 0)
513 t = MAX_JIFFY_OFFSET;
514 else
515 t = msecs_to_jiffies(timeout);
4230cff8
RS
516
517 optype = flags & CIFS_OP_MASK;
5bc59498 518
34f4deb7
PS
519 *instance = 0;
520
4230cff8
RS
521 credits = server->ops->get_credits_field(server, optype);
522 /* Since an echo is already inflight, no need to wait to send another */
523 if (*credits <= 0 && optype == CIFS_ECHO_OP)
524 return -EAGAIN;
525
fc40f9cf 526 spin_lock(&server->req_lock);
392e1c5d 527 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 528 /* oplock breaks must not be held up */
fc40f9cf 529 server->in_flight++;
1b63f184
SF
530 if (server->in_flight > server->max_in_flight)
531 server->max_in_flight = server->in_flight;
bc205ed1 532 *credits -= 1;
34f4deb7 533 *instance = server->reconnect_instance;
6d82c27a
SP
534 scredits = *credits;
535 in_flight = server->in_flight;
fc40f9cf 536 spin_unlock(&server->req_lock);
6d82c27a 537
1ddff774 538 trace_smb3_nblk_credits(server->CurrentMid,
6d82c27a
SP
539 server->conn_id, server->hostname, scredits, -1, in_flight);
540 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
541 __func__, 1, scredits);
542
27a97a61
VL
543 return 0;
544 }
545
27a97a61 546 while (1) {
b227d215 547 if (*credits < num_credits) {
6d82c27a 548 scredits = *credits;
fc40f9cf 549 spin_unlock(&server->req_lock);
6d82c27a 550
789e6661 551 cifs_num_waiters_inc(server);
2b53b929
RS
552 rc = wait_event_killable_timeout(server->request_q,
553 has_credits(server, credits, num_credits), t);
789e6661 554 cifs_num_waiters_dec(server);
2b53b929 555 if (!rc) {
6d82c27a
SP
556 spin_lock(&server->req_lock);
557 scredits = *credits;
558 in_flight = server->in_flight;
559 spin_unlock(&server->req_lock);
560
7937ca96 561 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
562 server->conn_id, server->hostname, scredits,
563 num_credits, in_flight);
afe6f653 564 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 565 timeout);
7de03948 566 return -EBUSY;
2b53b929
RS
567 }
568 if (rc == -ERESTARTSYS)
569 return -ERESTARTSYS;
fc40f9cf 570 spin_lock(&server->req_lock);
27a97a61 571 } else {
080dc5e5
SP
572 spin_unlock(&server->req_lock);
573
d7d7a66a 574 spin_lock(&server->srv_lock);
c5797a94 575 if (server->tcpStatus == CifsExiting) {
d7d7a66a 576 spin_unlock(&server->srv_lock);
27a97a61 577 return -ENOENT;
1da177e4 578 }
d7d7a66a 579 spin_unlock(&server->srv_lock);
27a97a61 580
16b34aa4
RS
581 /*
582 * For normal commands, reserve the last MAX_COMPOUND
583 * credits to compound requests.
584 * Otherwise these compounds could be permanently
585 * starved for credits by single-credit requests.
586 *
587 * To prevent spinning CPU, block this thread until
588 * there are >MAX_COMPOUND credits available.
589 * But only do this is we already have a lot of
590 * credits in flight to avoid triggering this check
591 * for servers that are slow to hand out credits on
592 * new sessions.
593 */
080dc5e5 594 spin_lock(&server->req_lock);
16b34aa4
RS
595 if (!optype && num_credits == 1 &&
596 server->in_flight > 2 * MAX_COMPOUND &&
597 *credits <= MAX_COMPOUND) {
598 spin_unlock(&server->req_lock);
6d82c27a 599
16b34aa4 600 cifs_num_waiters_inc(server);
2b53b929
RS
601 rc = wait_event_killable_timeout(
602 server->request_q,
16b34aa4 603 has_credits(server, credits,
2b53b929
RS
604 MAX_COMPOUND + 1),
605 t);
16b34aa4 606 cifs_num_waiters_dec(server);
2b53b929 607 if (!rc) {
6d82c27a
SP
608 spin_lock(&server->req_lock);
609 scredits = *credits;
610 in_flight = server->in_flight;
611 spin_unlock(&server->req_lock);
612
7937ca96 613 trace_smb3_credit_timeout(
6d82c27a
SP
614 server->CurrentMid,
615 server->conn_id, server->hostname,
616 scredits, num_credits, in_flight);
afe6f653 617 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 618 timeout);
7de03948 619 return -EBUSY;
2b53b929
RS
620 }
621 if (rc == -ERESTARTSYS)
622 return -ERESTARTSYS;
16b34aa4
RS
623 spin_lock(&server->req_lock);
624 continue;
625 }
626
2d86dbc9
PS
627 /*
628 * Can not count locking commands against total
629 * as they are allowed to block on server.
630 */
27a97a61
VL
631
632 /* update # of requests on the wire to server */
4230cff8 633 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
634 *credits -= num_credits;
635 server->in_flight += num_credits;
1b63f184
SF
636 if (server->in_flight > server->max_in_flight)
637 server->max_in_flight = server->in_flight;
34f4deb7 638 *instance = server->reconnect_instance;
2d86dbc9 639 }
6d82c27a
SP
640 scredits = *credits;
641 in_flight = server->in_flight;
fc40f9cf 642 spin_unlock(&server->req_lock);
cd7b699b 643
1ddff774 644 trace_smb3_waitff_credits(server->CurrentMid,
6d82c27a
SP
645 server->conn_id, server->hostname, scredits,
646 -(num_credits), in_flight);
cd7b699b
SP
647 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
648 __func__, num_credits, scredits);
27a97a61 649 break;
1da177e4
LT
650 }
651 }
7ee1af76
JA
652 return 0;
653}
1da177e4 654
bc205ed1 655static int
480b1cb9
RS
656wait_for_free_request(struct TCP_Server_Info *server, const int flags,
657 unsigned int *instance)
bc205ed1 658{
2b53b929
RS
659 return wait_for_free_credits(server, 1, -1, flags,
660 instance);
bc205ed1
PS
661}
662
257b7809
RS
663static int
664wait_for_compound_request(struct TCP_Server_Info *server, int num,
665 const int flags, unsigned int *instance)
666{
667 int *credits;
6d82c27a 668 int scredits, in_flight;
257b7809
RS
669
670 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
671
672 spin_lock(&server->req_lock);
cd7b699b 673 scredits = *credits;
6d82c27a 674 in_flight = server->in_flight;
cd7b699b 675
257b7809
RS
676 if (*credits < num) {
677 /*
91792bb8
PS
678 * If the server is tight on resources or just gives us less
679 * credits for other reasons (e.g. requests are coming out of
680 * order and the server delays granting more credits until it
681 * processes a missing mid) and we exhausted most available
682 * credits there may be situations when we try to send
683 * a compound request but we don't have enough credits. At this
684 * point the client needs to decide if it should wait for
685 * additional credits or fail the request. If at least one
686 * request is in flight there is a high probability that the
687 * server will return enough credits to satisfy this compound
688 * request.
689 *
690 * Return immediately if no requests in flight since we will be
691 * stuck on waiting for credits.
257b7809 692 */
91792bb8 693 if (server->in_flight == 0) {
257b7809 694 spin_unlock(&server->req_lock);
cd7b699b 695 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
696 server->conn_id, server->hostname, scredits,
697 num, in_flight);
cd7b699b 698 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 699 __func__, in_flight, num, scredits);
7de03948 700 return -EDEADLK;
257b7809
RS
701 }
702 }
703 spin_unlock(&server->req_lock);
704
705 return wait_for_free_credits(server, num, 60000, flags,
706 instance);
707}
708
cb7e9eab
PS
709int
710cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 711 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
712{
713 *num = size;
335b7b62
PS
714 credits->value = 0;
715 credits->instance = server->reconnect_instance;
cb7e9eab
PS
716 return 0;
717}
718
96daf2b0 719static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
720 struct mid_q_entry **ppmidQ)
721{
d7d7a66a 722 spin_lock(&ses->ses_lock);
dd3cd870 723 if (ses->ses_status == SES_NEW) {
79a58d1f 724 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
080dc5e5 725 (in_buf->Command != SMB_COM_NEGOTIATE)) {
d7d7a66a 726 spin_unlock(&ses->ses_lock);
7ee1af76 727 return -EAGAIN;
080dc5e5 728 }
ad7a2926 729 /* else ok - we are setting up session */
1da177e4 730 }
7f48558e 731
dd3cd870 732 if (ses->ses_status == SES_EXITING) {
7f48558e 733 /* check if SMB session is bad because we are setting it up */
080dc5e5 734 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
d7d7a66a 735 spin_unlock(&ses->ses_lock);
7f48558e 736 return -EAGAIN;
080dc5e5 737 }
7f48558e
SP
738 /* else ok - we are shutting down session */
739 }
d7d7a66a 740 spin_unlock(&ses->ses_lock);
7f48558e 741
70f08f91 742 *ppmidQ = alloc_mid(in_buf, ses->server);
26f57364 743 if (*ppmidQ == NULL)
7ee1af76 744 return -ENOMEM;
d7d7a66a 745 spin_lock(&ses->server->mid_lock);
ddc8cf8f 746 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
d7d7a66a 747 spin_unlock(&ses->server->mid_lock);
7ee1af76
JA
748 return 0;
749}
750
0ade640e
JL
751static int
752wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 753{
0ade640e 754 int error;
7ee1af76 755
5853cc2a 756 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 757 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
758 if (error < 0)
759 return -ERESTARTSYS;
7ee1af76 760
0ade640e 761 return 0;
7ee1af76
JA
762}
763
fec344e3
JL
764struct mid_q_entry *
765cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
766{
767 int rc;
fec344e3 768 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
769 struct mid_q_entry *mid;
770
738f9de5
PS
771 if (rqst->rq_iov[0].iov_len != 4 ||
772 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
773 return ERR_PTR(-EIO);
774
792af7b0 775 /* enable signing if server requires it */
38d77c50 776 if (server->sign)
792af7b0
PS
777 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
778
70f08f91 779 mid = alloc_mid(hdr, server);
792af7b0 780 if (mid == NULL)
fec344e3 781 return ERR_PTR(-ENOMEM);
792af7b0 782
fec344e3 783 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb 784 if (rc) {
70f08f91 785 release_mid(mid);
fec344e3 786 return ERR_PTR(rc);
ffc61ccb
SP
787 }
788
fec344e3 789 return mid;
792af7b0 790}
133672ef 791
a6827c18
JL
792/*
793 * Send a SMB request and set the callback function in the mid to handle
794 * the result. Caller is responsible for dealing with timeouts.
795 */
796int
fec344e3 797cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 798 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
799 mid_handle_t *handle, void *cbdata, const int flags,
800 const struct cifs_credits *exist_credits)
a6827c18 801{
480b1cb9 802 int rc;
a6827c18 803 struct mid_q_entry *mid;
335b7b62 804 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 805 unsigned int instance;
480b1cb9 806 int optype;
a6827c18 807
a891f0f8
PS
808 optype = flags & CIFS_OP_MASK;
809
cb7e9eab 810 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 811 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
812 if (rc)
813 return rc;
335b7b62 814 credits.value = 1;
34f4deb7 815 credits.instance = instance;
3349c3a7
PS
816 } else
817 instance = exist_credits->instance;
a6827c18 818
cc391b69 819 cifs_server_lock(server);
3349c3a7
PS
820
821 /*
822 * We can't use credits obtained from the previous session to send this
823 * request. Check if there were reconnects after we obtained credits and
824 * return -EAGAIN in such cases to let callers handle it.
825 */
826 if (instance != server->reconnect_instance) {
cc391b69 827 cifs_server_unlock(server);
3349c3a7
PS
828 add_credits_and_wake_if(server, &credits, optype);
829 return -EAGAIN;
830 }
831
fec344e3
JL
832 mid = server->ops->setup_async_request(server, rqst);
833 if (IS_ERR(mid)) {
cc391b69 834 cifs_server_unlock(server);
335b7b62 835 add_credits_and_wake_if(server, &credits, optype);
fec344e3 836 return PTR_ERR(mid);
a6827c18
JL
837 }
838
44d22d84 839 mid->receive = receive;
a6827c18
JL
840 mid->callback = callback;
841 mid->callback_data = cbdata;
9b7c18a2 842 mid->handle = handle;
7c9421e1 843 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 844
ffc61ccb 845 /* put it on the pending_mid_q */
d7d7a66a 846 spin_lock(&server->mid_lock);
ffc61ccb 847 list_add_tail(&mid->qhead, &server->pending_mid_q);
d7d7a66a 848 spin_unlock(&server->mid_lock);
ffc61ccb 849
93d2cb6c
LL
850 /*
851 * Need to store the time in mid before calling I/O. For call_async,
852 * I/O response may come back and free the mid entry on another thread.
853 */
854 cifs_save_when_sent(mid);
789e6661 855 cifs_in_send_inc(server);
1f3a8f5f 856 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 857 cifs_in_send_dec(server);
ad313cb8 858
820962dc 859 if (rc < 0) {
c781af7e 860 revert_current_mid(server, mid->credits);
ad313cb8 861 server->sequence_number -= 2;
70f08f91 862 delete_mid(mid);
820962dc
RV
863 }
864
cc391b69 865 cifs_server_unlock(server);
789e6661 866
ffc61ccb
SP
867 if (rc == 0)
868 return 0;
a6827c18 869
335b7b62 870 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
871 return rc;
872}
873
133672ef
SF
874/*
875 *
876 * Send an SMB Request. No response info (other than return code)
877 * needs to be parsed.
878 *
879 * flags indicate the type of request buffer and how long to wait
880 * and whether to log NT STATUS code (error) before mapping it to POSIX error
881 *
882 */
883int
96daf2b0 884SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 885 char *in_buf, int flags)
133672ef
SF
886{
887 int rc;
888 struct kvec iov[1];
da502f7d 889 struct kvec rsp_iov;
133672ef
SF
890 int resp_buf_type;
891
792af7b0
PS
892 iov[0].iov_base = in_buf;
893 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 894 flags |= CIFS_NO_RSP_BUF;
da502f7d 895 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 896 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 897
133672ef
SF
898 return rc;
899}
900
053d5034 901static int
3c1105df 902cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
903{
904 int rc = 0;
905
f96637be
JP
906 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
907 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 908
d7d7a66a 909 spin_lock(&server->mid_lock);
7c9421e1 910 switch (mid->mid_state) {
74dd92a8 911 case MID_RESPONSE_RECEIVED:
d7d7a66a 912 spin_unlock(&server->mid_lock);
053d5034 913 return rc;
74dd92a8
JL
914 case MID_RETRY_NEEDED:
915 rc = -EAGAIN;
916 break;
71823baf
JL
917 case MID_RESPONSE_MALFORMED:
918 rc = -EIO;
919 break;
3c1105df
JL
920 case MID_SHUTDOWN:
921 rc = -EHOSTDOWN;
922 break;
74dd92a8 923 default:
abe57073
PS
924 if (!(mid->mid_flags & MID_DELETED)) {
925 list_del_init(&mid->qhead);
926 mid->mid_flags |= MID_DELETED;
927 }
afe6f653 928 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 929 __func__, mid->mid, mid->mid_state);
74dd92a8 930 rc = -EIO;
053d5034 931 }
d7d7a66a 932 spin_unlock(&server->mid_lock);
053d5034 933
70f08f91 934 release_mid(mid);
053d5034
JL
935 return rc;
936}
937
121b046a 938static inline int
fb2036d8
PS
939send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
940 struct mid_q_entry *mid)
76dcc26f 941{
121b046a 942 return server->ops->send_cancel ?
fb2036d8 943 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
944}
945
2c8f981d
JL
946int
947cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
948 bool log_error)
949{
792af7b0 950 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
951
952 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
953
954 /* convert the length into a more usable form */
38d77c50 955 if (server->sign) {
738f9de5 956 struct kvec iov[2];
985e4ff0 957 int rc = 0;
738f9de5
PS
958 struct smb_rqst rqst = { .rq_iov = iov,
959 .rq_nvec = 2 };
826a95e4 960
738f9de5
PS
961 iov[0].iov_base = mid->resp_buf;
962 iov[0].iov_len = 4;
963 iov[1].iov_base = (char *)mid->resp_buf + 4;
964 iov[1].iov_len = len - 4;
2c8f981d 965 /* FIXME: add code to kill session */
bf5ea0e2 966 rc = cifs_verify_signature(&rqst, server,
0124cc45 967 mid->sequence_number);
985e4ff0 968 if (rc)
afe6f653 969 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 970 rc);
2c8f981d
JL
971 }
972
973 /* BB special case reconnect tid and uid here? */
a3713ec3 974 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
975}
976
fec344e3 977struct mid_q_entry *
f780bd3f
AA
978cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
979 struct smb_rqst *rqst)
792af7b0
PS
980{
981 int rc;
fec344e3 982 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
983 struct mid_q_entry *mid;
984
738f9de5
PS
985 if (rqst->rq_iov[0].iov_len != 4 ||
986 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
987 return ERR_PTR(-EIO);
988
792af7b0
PS
989 rc = allocate_mid(ses, hdr, &mid);
990 if (rc)
fec344e3
JL
991 return ERR_PTR(rc);
992 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
993 if (rc) {
70f08f91 994 delete_mid(mid);
fec344e3
JL
995 return ERR_PTR(rc);
996 }
997 return mid;
792af7b0
PS
998}
999
4e34feb5 1000static void
ee258d79 1001cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1002{
1003 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1004 struct cifs_credits credits;
1005
1006 credits.value = server->ops->get_credits(mid);
1007 credits.instance = server->reconnect_instance;
8a26f0f7 1008
34f4deb7 1009 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1010}
1011
ee258d79
PS
1012static void
1013cifs_compound_last_callback(struct mid_q_entry *mid)
1014{
1015 cifs_compound_callback(mid);
1016 cifs_wake_up_task(mid);
1017}
1018
1019static void
1020cifs_cancelled_callback(struct mid_q_entry *mid)
1021{
1022 cifs_compound_callback(mid);
70f08f91 1023 release_mid(mid);
ee258d79
PS
1024}
1025
5f68ea4a
AA
1026/*
1027 * Return a channel (master if none) of @ses that can be used to send
1028 * regular requests.
1029 *
1030 * If we are currently binding a new channel (negprot/sess.setup),
1031 * return the new incomplete channel.
1032 */
1033struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1034{
1035 uint index = 0;
1036
1037 if (!ses)
1038 return NULL;
1039
f486ef8e 1040 /* round robin */
bda487ac 1041 index = (uint)atomic_inc_return(&ses->chan_seq);
88b024f5
SP
1042
1043 spin_lock(&ses->chan_lock);
bda487ac 1044 index %= ses->chan_count;
88b024f5 1045 spin_unlock(&ses->chan_lock);
f486ef8e
SP
1046
1047 return ses->chans[index].server;
5f68ea4a
AA
1048}
1049
b8f57ee8 1050int
e0bba0b8 1051compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1052 struct TCP_Server_Info *server,
e0bba0b8
RS
1053 const int flags, const int num_rqst, struct smb_rqst *rqst,
1054 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1055{
480b1cb9 1056 int i, j, optype, rc = 0;
e0bba0b8 1057 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1058 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1059 struct cifs_credits credits[MAX_COMPOUND] = {
1060 { .value = 0, .instance = 0 }
1061 };
1062 unsigned int instance;
738f9de5 1063 char *buf;
50c2f753 1064
a891f0f8 1065 optype = flags & CIFS_OP_MASK;
133672ef 1066
e0bba0b8
RS
1067 for (i = 0; i < num_rqst; i++)
1068 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1069
352d96f3 1070 if (!ses || !ses->server || !server) {
f96637be 1071 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1072 return -EIO;
1073 }
1074
d7d7a66a 1075 spin_lock(&server->srv_lock);
080dc5e5 1076 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1077 spin_unlock(&server->srv_lock);
7ee1af76 1078 return -ENOENT;
080dc5e5 1079 }
d7d7a66a 1080 spin_unlock(&server->srv_lock);
7ee1af76 1081
792af7b0 1082 /*
257b7809 1083 * Wait for all the requests to become available.
7091bcab
PS
1084 * This approach still leaves the possibility to be stuck waiting for
1085 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1086 * requests and if the client is completely idle, not generating any
1087 * other requests.
1088 * This can be handled by the eventual session reconnect.
792af7b0 1089 */
3190b59a 1090 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1091 &instance);
1092 if (rc)
1093 return rc;
97ea4998 1094
257b7809
RS
1095 for (i = 0; i < num_rqst; i++) {
1096 credits[i].value = 1;
1097 credits[i].instance = instance;
8544f4aa 1098 }
7ee1af76 1099
792af7b0
PS
1100 /*
1101 * Make sure that we sign in the same order that we send on this socket
1102 * and avoid races inside tcp sendmsg code that could cause corruption
1103 * of smb data.
1104 */
7ee1af76 1105
cc391b69 1106 cifs_server_lock(server);
7ee1af76 1107
97ea4998
PS
1108 /*
1109 * All the parts of the compound chain belong obtained credits from the
257b7809 1110 * same session. We can not use credits obtained from the previous
97ea4998
PS
1111 * session to send this request. Check if there were reconnects after
1112 * we obtained credits and return -EAGAIN in such cases to let callers
1113 * handle it.
1114 */
3190b59a 1115 if (instance != server->reconnect_instance) {
cc391b69 1116 cifs_server_unlock(server);
97ea4998 1117 for (j = 0; j < num_rqst; j++)
3190b59a 1118 add_credits(server, &credits[j], optype);
97ea4998
PS
1119 return -EAGAIN;
1120 }
1121
e0bba0b8 1122 for (i = 0; i < num_rqst; i++) {
f780bd3f 1123 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1124 if (IS_ERR(midQ[i])) {
3190b59a 1125 revert_current_mid(server, i);
e0bba0b8 1126 for (j = 0; j < i; j++)
70f08f91 1127 delete_mid(midQ[j]);
cc391b69 1128 cifs_server_unlock(server);
8544f4aa 1129
e0bba0b8 1130 /* Update # of requests on wire to server */
8544f4aa 1131 for (j = 0; j < num_rqst; j++)
3190b59a 1132 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1133 return PTR_ERR(midQ[i]);
1134 }
1135
1136 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1137 midQ[i]->optype = optype;
4e34feb5 1138 /*
ee258d79
PS
1139 * Invoke callback for every part of the compound chain
1140 * to calculate credits properly. Wake up this thread only when
1141 * the last element is received.
4e34feb5
RS
1142 */
1143 if (i < num_rqst - 1)
ee258d79
PS
1144 midQ[i]->callback = cifs_compound_callback;
1145 else
1146 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1147 }
3190b59a
AA
1148 cifs_in_send_inc(server);
1149 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1150 cifs_in_send_dec(server);
e0bba0b8
RS
1151
1152 for (i = 0; i < num_rqst; i++)
1153 cifs_save_when_sent(midQ[i]);
7ee1af76 1154
c781af7e 1155 if (rc < 0) {
3190b59a
AA
1156 revert_current_mid(server, num_rqst);
1157 server->sequence_number -= 2;
c781af7e 1158 }
e0bba0b8 1159
cc391b69 1160 cifs_server_unlock(server);
7ee1af76 1161
d69cb728
RS
1162 /*
1163 * If sending failed for some reason or it is an oplock break that we
1164 * will not receive a response to - return credits back
1165 */
1166 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1167 for (i = 0; i < num_rqst; i++)
3190b59a 1168 add_credits(server, &credits[i], optype);
cb5c2e63 1169 goto out;
ee258d79
PS
1170 }
1171
1172 /*
1173 * At this point the request is passed to the network stack - we assume
1174 * that any credits taken from the server structure on the client have
1175 * been spent and we can't return them back. Once we receive responses
1176 * we will collect credits granted by the server in the mid callbacks
1177 * and add those credits to the server structure.
1178 */
e0bba0b8 1179
cb5c2e63
RS
1180 /*
1181 * Compounding is never used during session establish.
1182 */
d7d7a66a 1183 spin_lock(&ses->ses_lock);
dd3cd870 1184 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
d7d7a66a 1185 spin_unlock(&ses->ses_lock);
080dc5e5 1186
cc391b69 1187 cifs_server_lock(server);
f486ef8e 1188 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cc391b69 1189 cifs_server_unlock(server);
080dc5e5 1190
d7d7a66a 1191 spin_lock(&ses->ses_lock);
05946d4b 1192 }
d7d7a66a 1193 spin_unlock(&ses->ses_lock);
e0bba0b8 1194
cb5c2e63 1195 for (i = 0; i < num_rqst; i++) {
3190b59a 1196 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1197 if (rc != 0)
1198 break;
1199 }
1200 if (rc != 0) {
1201 for (; i < num_rqst; i++) {
e3d100ea 1202 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1203 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1204 send_cancel(server, &rqst[i], midQ[i]);
d7d7a66a 1205 spin_lock(&server->mid_lock);
7b71843f 1206 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1207 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1208 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1209 cancelled_mid[i] = true;
34f4deb7 1210 credits[i].value = 0;
e0bba0b8 1211 }
d7d7a66a 1212 spin_unlock(&server->mid_lock);
e0bba0b8 1213 }
cb5c2e63
RS
1214 }
1215
cb5c2e63
RS
1216 for (i = 0; i < num_rqst; i++) {
1217 if (rc < 0)
1218 goto out;
e0bba0b8 1219
3190b59a 1220 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1221 if (rc != 0) {
8544f4aa
PS
1222 /* mark this mid as cancelled to not free it below */
1223 cancelled_mid[i] = true;
1224 goto out;
1be912dd 1225 }
2b2bdfba 1226
e0bba0b8
RS
1227 if (!midQ[i]->resp_buf ||
1228 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229 rc = -EIO;
1230 cifs_dbg(FYI, "Bad MID state?\n");
1231 goto out;
1232 }
a891f0f8 1233
e0bba0b8
RS
1234 buf = (char *)midQ[i]->resp_buf;
1235 resp_iov[i].iov_base = buf;
1236 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
9789de8b 1237 HEADER_PREAMBLE_SIZE(server);
e0bba0b8
RS
1238
1239 if (midQ[i]->large_buf)
1240 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241 else
1242 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243
3190b59a 1244 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1245 flags & CIFS_LOG_ERROR);
1da177e4 1246
70f08f91 1247 /* mark it so buf will not be freed by delete_mid */
392e1c5d 1248 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1249 midQ[i]->resp_buf = NULL;
cb5c2e63 1250
e0bba0b8 1251 }
cb5c2e63
RS
1252
1253 /*
1254 * Compounding is never used during session establish.
1255 */
d7d7a66a 1256 spin_lock(&ses->ses_lock);
dd3cd870 1257 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1258 struct kvec iov = {
1259 .iov_base = resp_iov[0].iov_base,
1260 .iov_len = resp_iov[0].iov_len
1261 };
d7d7a66a 1262 spin_unlock(&ses->ses_lock);
cc391b69 1263 cifs_server_lock(server);
f486ef8e 1264 smb311_update_preauth_hash(ses, server, &iov, 1);
cc391b69 1265 cifs_server_unlock(server);
d7d7a66a 1266 spin_lock(&ses->ses_lock);
cb5c2e63 1267 }
d7d7a66a 1268 spin_unlock(&ses->ses_lock);
cb5c2e63 1269
7ee1af76 1270out:
4e34feb5
RS
1271 /*
1272 * This will dequeue all mids. After this it is important that the
1273 * demultiplex_thread will not process any of these mids any futher.
1274 * This is prevented above by using a noop callback that will not
1275 * wake this thread except for the very last PDU.
1276 */
8544f4aa
PS
1277 for (i = 0; i < num_rqst; i++) {
1278 if (!cancelled_mid[i])
70f08f91 1279 delete_mid(midQ[i]);
8544f4aa 1280 }
1da177e4 1281
d6e04ae6
SF
1282 return rc;
1283}
1da177e4 1284
e0bba0b8
RS
1285int
1286cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1287 struct TCP_Server_Info *server,
e0bba0b8
RS
1288 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1289 struct kvec *resp_iov)
1290{
352d96f3
AA
1291 return compound_send_recv(xid, ses, server, flags, 1,
1292 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1293}
1294
738f9de5
PS
1295int
1296SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1297 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1298 const int flags, struct kvec *resp_iov)
1299{
1300 struct smb_rqst rqst;
3cecf486 1301 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1302 int rc;
1303
3cecf486 1304 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1305 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1306 GFP_KERNEL);
117e3b7f
SF
1307 if (!new_iov) {
1308 /* otherwise cifs_send_recv below sets resp_buf_type */
1309 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1310 return -ENOMEM;
117e3b7f 1311 }
3cecf486
RS
1312 } else
1313 new_iov = s_iov;
738f9de5
PS
1314
1315 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1316 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1317
1318 new_iov[0].iov_base = new_iov[1].iov_base;
1319 new_iov[0].iov_len = 4;
1320 new_iov[1].iov_base += 4;
1321 new_iov[1].iov_len -= 4;
1322
1323 memset(&rqst, 0, sizeof(struct smb_rqst));
1324 rqst.rq_iov = new_iov;
1325 rqst.rq_nvec = n_vec + 1;
1326
352d96f3
AA
1327 rc = cifs_send_recv(xid, ses, ses->server,
1328 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1329 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1330 kfree(new_iov);
738f9de5
PS
1331 return rc;
1332}
1333
1da177e4 1334int
96daf2b0 1335SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1336 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1337 int *pbytes_returned, const int flags)
1da177e4
LT
1338{
1339 int rc = 0;
1da177e4 1340 struct mid_q_entry *midQ;
fb2036d8
PS
1341 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1342 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1343 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1344 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1345 struct TCP_Server_Info *server;
1da177e4
LT
1346
1347 if (ses == NULL) {
f96637be 1348 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1349 return -EIO;
1350 }
ac6ad7a8 1351 server = ses->server;
afe6f653 1352 if (server == NULL) {
f96637be 1353 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1354 return -EIO;
1355 }
1356
d7d7a66a 1357 spin_lock(&server->srv_lock);
080dc5e5 1358 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1359 spin_unlock(&server->srv_lock);
31ca3bc3 1360 return -ENOENT;
080dc5e5 1361 }
d7d7a66a 1362 spin_unlock(&server->srv_lock);
31ca3bc3 1363
79a58d1f 1364 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1365 to the same server. We may make this configurable later or
1366 use ses->maxReq */
1da177e4 1367
fb2036d8 1368 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1369 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1370 len);
6d9c6d54
VL
1371 return -EIO;
1372 }
1373
afe6f653 1374 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1375 if (rc)
1376 return rc;
1377
79a58d1f 1378 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1379 and avoid races inside tcp sendmsg code that could cause corruption
1380 of smb data */
1381
cc391b69 1382 cifs_server_lock(server);
1da177e4 1383
7ee1af76
JA
1384 rc = allocate_mid(ses, in_buf, &midQ);
1385 if (rc) {
cc391b69 1386 cifs_server_unlock(server);
7ee1af76 1387 /* Update # of requests on wire to server */
afe6f653 1388 add_credits(server, &credits, 0);
7ee1af76 1389 return rc;
1da177e4
LT
1390 }
1391
afe6f653 1392 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1393 if (rc) {
cc391b69 1394 cifs_server_unlock(server);
829049cb
VL
1395 goto out;
1396 }
1da177e4 1397
7c9421e1 1398 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1399
afe6f653
RS
1400 cifs_in_send_inc(server);
1401 rc = smb_send(server, in_buf, len);
1402 cifs_in_send_dec(server);
789e6661 1403 cifs_save_when_sent(midQ);
ad313cb8
JL
1404
1405 if (rc < 0)
afe6f653 1406 server->sequence_number -= 2;
ad313cb8 1407
cc391b69 1408 cifs_server_unlock(server);
7ee1af76 1409
79a58d1f 1410 if (rc < 0)
7ee1af76
JA
1411 goto out;
1412
afe6f653 1413 rc = wait_for_response(server, midQ);
1be912dd 1414 if (rc != 0) {
afe6f653 1415 send_cancel(server, &rqst, midQ);
d7d7a66a 1416 spin_lock(&server->mid_lock);
7c9421e1 1417 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1418 /* no longer considered to be "in-flight" */
70f08f91 1419 midQ->callback = release_mid;
d7d7a66a 1420 spin_unlock(&server->mid_lock);
afe6f653 1421 add_credits(server, &credits, 0);
1be912dd
JL
1422 return rc;
1423 }
d7d7a66a 1424 spin_unlock(&server->mid_lock);
1be912dd 1425 }
1da177e4 1426
afe6f653 1427 rc = cifs_sync_mid_result(midQ, server);
053d5034 1428 if (rc != 0) {
afe6f653 1429 add_credits(server, &credits, 0);
1da177e4
LT
1430 return rc;
1431 }
50c2f753 1432
2c8f981d 1433 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1434 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1435 rc = -EIO;
afe6f653 1436 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1437 goto out;
1da177e4 1438 }
7ee1af76 1439
d4e4854f 1440 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1441 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1442 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1443out:
70f08f91 1444 delete_mid(midQ);
afe6f653 1445 add_credits(server, &credits, 0);
1da177e4 1446
7ee1af76
JA
1447 return rc;
1448}
1da177e4 1449
7ee1af76
JA
1450/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1451 blocking lock to return. */
1452
1453static int
96daf2b0 1454send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1455 struct smb_hdr *in_buf,
1456 struct smb_hdr *out_buf)
1457{
1458 int bytes_returned;
96daf2b0 1459 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1460 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1461
1462 /* We just modify the current in_buf to change
1463 the type of lock from LOCKING_ANDX_SHARED_LOCK
1464 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1465 LOCKING_ANDX_CANCEL_LOCK. */
1466
1467 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1468 pSMB->Timeout = 0;
88257360 1469 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1470
1471 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1472 &bytes_returned, 0);
7ee1af76
JA
1473}
1474
1475int
96daf2b0 1476SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1477 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1478 int *pbytes_returned)
1479{
1480 int rc = 0;
1481 int rstart = 0;
7ee1af76 1482 struct mid_q_entry *midQ;
96daf2b0 1483 struct cifs_ses *ses;
fb2036d8
PS
1484 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1485 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1486 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1487 unsigned int instance;
afe6f653 1488 struct TCP_Server_Info *server;
7ee1af76
JA
1489
1490 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1491 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1492 return -EIO;
1493 }
1494 ses = tcon->ses;
afe6f653 1495 server = ses->server;
7ee1af76 1496
afe6f653 1497 if (server == NULL) {
f96637be 1498 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1499 return -EIO;
1500 }
1501
d7d7a66a 1502 spin_lock(&server->srv_lock);
080dc5e5 1503 if (server->tcpStatus == CifsExiting) {
d7d7a66a 1504 spin_unlock(&server->srv_lock);
7ee1af76 1505 return -ENOENT;
080dc5e5 1506 }
d7d7a66a 1507 spin_unlock(&server->srv_lock);
7ee1af76 1508
79a58d1f 1509 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1510 to the same server. We may make this configurable later or
1511 use ses->maxReq */
1512
fb2036d8 1513 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1514 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1515 len);
6d9c6d54
VL
1516 return -EIO;
1517 }
1518
afe6f653 1519 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1520 if (rc)
1521 return rc;
1522
79a58d1f 1523 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1524 and avoid races inside tcp sendmsg code that could cause corruption
1525 of smb data */
1526
cc391b69 1527 cifs_server_lock(server);
7ee1af76
JA
1528
1529 rc = allocate_mid(ses, in_buf, &midQ);
1530 if (rc) {
cc391b69 1531 cifs_server_unlock(server);
7ee1af76
JA
1532 return rc;
1533 }
1534
afe6f653 1535 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1536 if (rc) {
70f08f91 1537 delete_mid(midQ);
cc391b69 1538 cifs_server_unlock(server);
829049cb
VL
1539 return rc;
1540 }
1da177e4 1541
7c9421e1 1542 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1543 cifs_in_send_inc(server);
1544 rc = smb_send(server, in_buf, len);
1545 cifs_in_send_dec(server);
789e6661 1546 cifs_save_when_sent(midQ);
ad313cb8
JL
1547
1548 if (rc < 0)
afe6f653 1549 server->sequence_number -= 2;
ad313cb8 1550
cc391b69 1551 cifs_server_unlock(server);
7ee1af76 1552
79a58d1f 1553 if (rc < 0) {
70f08f91 1554 delete_mid(midQ);
7ee1af76
JA
1555 return rc;
1556 }
1557
1558 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1559 rc = wait_event_interruptible(server->response_q,
7c9421e1 1560 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1561 ((server->tcpStatus != CifsGood) &&
1562 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1563
1564 /* Were we interrupted by a signal ? */
d7d7a66a 1565 spin_lock(&server->srv_lock);
7ee1af76 1566 if ((rc == -ERESTARTSYS) &&
7c9421e1 1567 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1568 ((server->tcpStatus == CifsGood) ||
1569 (server->tcpStatus == CifsNew))) {
d7d7a66a 1570 spin_unlock(&server->srv_lock);
7ee1af76
JA
1571
1572 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1573 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1574 blocking lock to return. */
afe6f653 1575 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1576 if (rc) {
70f08f91 1577 delete_mid(midQ);
7ee1af76
JA
1578 return rc;
1579 }
1580 } else {
1581 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1582 to cause the blocking lock to return. */
1583
1584 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1585
1586 /* If we get -ENOLCK back the lock may have
1587 already been removed. Don't exit in this case. */
1588 if (rc && rc != -ENOLCK) {
70f08f91 1589 delete_mid(midQ);
7ee1af76
JA
1590 return rc;
1591 }
1592 }
1593
afe6f653 1594 rc = wait_for_response(server, midQ);
1be912dd 1595 if (rc) {
afe6f653 1596 send_cancel(server, &rqst, midQ);
d7d7a66a 1597 spin_lock(&server->mid_lock);
7c9421e1 1598 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd 1599 /* no longer considered to be "in-flight" */
70f08f91 1600 midQ->callback = release_mid;
d7d7a66a 1601 spin_unlock(&server->mid_lock);
1be912dd
JL
1602 return rc;
1603 }
d7d7a66a 1604 spin_unlock(&server->mid_lock);
7ee1af76 1605 }
1be912dd
JL
1606
1607 /* We got the response - restart system call. */
1608 rstart = 1;
d7d7a66a 1609 spin_lock(&server->srv_lock);
7ee1af76 1610 }
d7d7a66a 1611 spin_unlock(&server->srv_lock);
7ee1af76 1612
afe6f653 1613 rc = cifs_sync_mid_result(midQ, server);
053d5034 1614 if (rc != 0)
7ee1af76 1615 return rc;
50c2f753 1616
17c8bfed 1617 /* rcvd frame is ok */
7c9421e1 1618 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1619 rc = -EIO;
3175eb9b 1620 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1621 goto out;
1622 }
1da177e4 1623
d4e4854f 1624 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1625 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1626 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1627out:
70f08f91 1628 delete_mid(midQ);
7ee1af76
JA
1629 if (rstart && rc == -EACCES)
1630 return -ERESTARTSYS;
1da177e4
LT
1631 return rc;
1632}
fb157ed2
SF
1633
1634/*
1635 * Discard any remaining data in the current SMB. To do this, we borrow the
1636 * current bigbuf.
1637 */
1638int
1639cifs_discard_remaining_data(struct TCP_Server_Info *server)
1640{
1641 unsigned int rfclen = server->pdu_size;
9789de8b 1642 int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
fb157ed2
SF
1643 server->total_read;
1644
1645 while (remaining > 0) {
1646 int length;
1647
1648 length = cifs_discard_from_socket(server,
1649 min_t(size_t, remaining,
1650 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1651 if (length < 0)
1652 return length;
1653 server->total_read += length;
1654 remaining -= length;
1655 }
1656
1657 return 0;
1658}
1659
1660static int
1661__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1662 bool malformed)
1663{
1664 int length;
1665
1666 length = cifs_discard_remaining_data(server);
1667 dequeue_mid(mid, malformed);
1668 mid->resp_buf = server->smallbuf;
1669 server->smallbuf = NULL;
1670 return length;
1671}
1672
1673static int
1674cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1675{
1676 struct cifs_readdata *rdata = mid->callback_data;
1677
1678 return __cifs_readv_discard(server, mid, rdata->result);
1679}
1680
1681int
1682cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1683{
1684 int length, len;
1685 unsigned int data_offset, data_len;
1686 struct cifs_readdata *rdata = mid->callback_data;
1687 char *buf = server->smallbuf;
9789de8b 1688 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1689 bool use_rdma_mr = false;
1690
1691 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1692 __func__, mid->mid, rdata->offset, rdata->bytes);
1693
1694 /*
1695 * read the rest of READ_RSP header (sans Data array), or whatever we
1696 * can if there's not enough data. At this point, we've read down to
1697 * the Mid.
1698 */
1699 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1700 HEADER_SIZE(server) + 1;
1701
1702 length = cifs_read_from_socket(server,
1703 buf + HEADER_SIZE(server) - 1, len);
1704 if (length < 0)
1705 return length;
1706 server->total_read += length;
1707
1708 if (server->ops->is_session_expired &&
1709 server->ops->is_session_expired(buf)) {
1710 cifs_reconnect(server, true);
1711 return -1;
1712 }
1713
1714 if (server->ops->is_status_pending &&
1715 server->ops->is_status_pending(buf, server)) {
1716 cifs_discard_remaining_data(server);
1717 return -1;
1718 }
1719
1720 /* set up first two iov for signature check and to get credits */
1721 rdata->iov[0].iov_base = buf;
9789de8b
ZX
1722 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1723 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
fb157ed2 1724 rdata->iov[1].iov_len =
9789de8b 1725 server->total_read - HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1726 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1727 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1728 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1729 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1730
1731 /* Was the SMB read successful? */
1732 rdata->result = server->ops->map_error(buf, false);
1733 if (rdata->result != 0) {
1734 cifs_dbg(FYI, "%s: server returned error %d\n",
1735 __func__, rdata->result);
1736 /* normal error on read response */
1737 return __cifs_readv_discard(server, mid, false);
1738 }
1739
1740 /* Is there enough to get to the rest of the READ_RSP header? */
1741 if (server->total_read < server->vals->read_rsp_size) {
1742 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1743 __func__, server->total_read,
1744 server->vals->read_rsp_size);
1745 rdata->result = -EIO;
1746 return cifs_readv_discard(server, mid);
1747 }
1748
1749 data_offset = server->ops->read_data_offset(buf) +
9789de8b 1750 HEADER_PREAMBLE_SIZE(server);
fb157ed2
SF
1751 if (data_offset < server->total_read) {
1752 /*
1753 * win2k8 sometimes sends an offset of 0 when the read
1754 * is beyond the EOF. Treat it as if the data starts just after
1755 * the header.
1756 */
1757 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1758 __func__, data_offset);
1759 data_offset = server->total_read;
1760 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1761 /* data_offset is beyond the end of smallbuf */
1762 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1763 __func__, data_offset);
1764 rdata->result = -EIO;
1765 return cifs_readv_discard(server, mid);
1766 }
1767
1768 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1769 __func__, server->total_read, data_offset);
1770
1771 len = data_offset - server->total_read;
1772 if (len > 0) {
1773 /* read any junk before data into the rest of smallbuf */
1774 length = cifs_read_from_socket(server,
1775 buf + server->total_read, len);
1776 if (length < 0)
1777 return length;
1778 server->total_read += length;
1779 }
1780
1781 /* how much data is in the response? */
1782#ifdef CONFIG_CIFS_SMB_DIRECT
1783 use_rdma_mr = rdata->mr;
1784#endif
1785 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1786 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1787 /* data_len is corrupt -- discard frame */
1788 rdata->result = -EIO;
1789 return cifs_readv_discard(server, mid);
1790 }
1791
1792 length = rdata->read_into_pages(server, rdata, data_len);
1793 if (length < 0)
1794 return length;
1795
1796 server->total_read += length;
1797
1798 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1799 server->total_read, buflen, data_len);
1800
1801 /* discard anything left over */
1802 if (server->total_read < buflen)
1803 return cifs_readv_discard(server, mid);
1804
1805 dequeue_mid(mid, false);
1806 mid->resp_buf = server->smallbuf;
1807 server->smallbuf = NULL;
1808 return length;
1809}