]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifs/transport.c
smb3: fix signing verification of large reads
[people/ms/linux.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
1da177e4 81
1da177e4 82 atomic_inc(&midCount);
7c9421e1 83 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
84 return temp;
85}
86
696e420b
LP
87static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
766fdbb5 102void
1da177e4
LT
103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
1047abc1 105#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 106 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 107 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 108 unsigned long now;
433b8dd7
SF
109 unsigned long roundtrip_time;
110 struct TCP_Server_Info *server = midEntry->server;
1047abc1 111#endif
7c9421e1 112 midEntry->mid_state = MID_FREE;
8097531a 113 atomic_dec(&midCount);
7c9421e1 114 if (midEntry->large_buf)
b8643e1b
SF
115 cifs_buf_release(midEntry->resp_buf);
116 else
117 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
118#ifdef CONFIG_CIFS_STATS2
119 now = jiffies;
433b8dd7 120 if (now < midEntry->when_alloc)
afe6f653 121 cifs_server_dbg(VFS, "invalid mid allocation time\n");
433b8dd7
SF
122 roundtrip_time = now - midEntry->when_alloc;
123
124 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126 server->slowest_cmd[smb_cmd] = roundtrip_time;
127 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 } else {
129 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130 server->slowest_cmd[smb_cmd] = roundtrip_time;
131 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132 server->fastest_cmd[smb_cmd] = roundtrip_time;
133 }
134 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135 server->time_per_cmd[smb_cmd] += roundtrip_time;
136 }
00778e22
SF
137 /*
138 * commands taking longer than one second (default) can be indications
139 * that something is wrong, unless it is quite a slow link or a very
140 * busy server. Note that this calc is unlikely or impossible to wrap
141 * as long as slow_rsp_threshold is not set way above recommended max
142 * value (32767 ie 9 hours) and is generally harmless even if wrong
143 * since only affects debug counters - so leaving the calc as simple
144 * comparison rather than doing multiple conversions and overflow
145 * checks
146 */
147 if ((slow_rsp_threshold != 0) &&
148 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 149 (midEntry->command != command)) {
f5942db5
SF
150 /*
151 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152 * NB: le16_to_cpu returns unsigned so can not be negative below
153 */
433b8dd7
SF
154 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 156
433b8dd7 157 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
158 midEntry->when_sent, midEntry->when_received);
159 if (cifsFYI & CIFS_TIMER) {
0b456f04 160 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
1047abc1 161 midEntry->command, midEntry->mid);
f80eaedd 162 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
1047abc1
SF
163 now - midEntry->when_alloc,
164 now - midEntry->when_sent,
165 now - midEntry->when_received);
166 }
167 }
168#endif
696e420b 169 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
170}
171
3c1bf7e4
PS
172void
173cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
174{
175 spin_lock(&GlobalMid_Lock);
ddf83afb
RS
176 list_del_init(&mid->qhead);
177 mid->mid_flags |= MID_DELETED;
ddc8cf8f
JL
178 spin_unlock(&GlobalMid_Lock);
179
180 DeleteMidQEntry(mid);
181}
182
6f49f46b
JL
183/*
184 * smb_send_kvec - send an array of kvecs to the server
185 * @server: Server to send the data to
3ab3f2a1 186 * @smb_msg: Message to send
6f49f46b
JL
187 * @sent: amount of data sent on socket is stored here
188 *
189 * Our basic "send data to server" function. Should be called with srv_mutex
190 * held. The caller is responsible for handling the results.
191 */
d6e04ae6 192static int
3ab3f2a1
AV
193smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194 size_t *sent)
1da177e4
LT
195{
196 int rc = 0;
3ab3f2a1 197 int retries = 0;
edf1ae40 198 struct socket *ssocket = server->ssocket;
50c2f753 199
6f49f46b
JL
200 *sent = 0;
201
3ab3f2a1
AV
202 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203 smb_msg->msg_namelen = sizeof(struct sockaddr);
204 smb_msg->msg_control = NULL;
205 smb_msg->msg_controllen = 0;
0496e02d 206 if (server->noblocksnd)
3ab3f2a1 207 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 208 else
3ab3f2a1 209 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 210
3ab3f2a1 211 while (msg_data_left(smb_msg)) {
6f49f46b
JL
212 /*
213 * If blocking send, we try 3 times, since each can block
214 * for 5 seconds. For nonblocking we have to try more
215 * but wait increasing amounts of time allowing time for
216 * socket to clear. The overall time we wait in either
217 * case to send on the socket is about 15 seconds.
218 * Similarly we wait for 15 seconds for a response from
219 * the server in SendReceive[2] for the server to send
220 * a response back for most types of requests (except
221 * SMB Write past end of file which can be slow, and
222 * blocking lock operations). NFS waits slightly longer
223 * than CIFS, but this can make it take longer for
224 * nonresponsive servers to be detected and 15 seconds
225 * is more than enough time for modern networks to
226 * send a packet. In most cases if we fail to send
227 * after the retries we will kill the socket and
228 * reconnect which may clear the network problem.
229 */
3ab3f2a1 230 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 231 if (rc == -EAGAIN) {
3ab3f2a1
AV
232 retries++;
233 if (retries >= 14 ||
234 (!server->noblocksnd && (retries > 2))) {
afe6f653 235 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 236 ssocket);
3ab3f2a1 237 return -EAGAIN;
1da177e4 238 }
3ab3f2a1 239 msleep(1 << retries);
1da177e4
LT
240 continue;
241 }
6f49f46b 242
79a58d1f 243 if (rc < 0)
3ab3f2a1 244 return rc;
6f49f46b 245
79a58d1f 246 if (rc == 0) {
3e84469d
SF
247 /* should never happen, letting socket clear before
248 retrying is our only obvious option here */
afe6f653 249 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
250 msleep(500);
251 continue;
d6e04ae6 252 }
6f49f46b 253
3ab3f2a1
AV
254 /* send was at least partially successful */
255 *sent += rc;
256 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 257 }
3ab3f2a1 258 return 0;
97bc00b3
JL
259}
260
35e2cc1b 261unsigned long
81f39f95 262smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
263{
264 unsigned int i;
35e2cc1b
PA
265 struct kvec *iov;
266 int nvec;
a26054d1
JL
267 unsigned long buflen = 0;
268
81f39f95
RS
269 if (server->vals->header_preamble_size == 0 &&
270 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
271 iov = &rqst->rq_iov[1];
272 nvec = rqst->rq_nvec - 1;
273 } else {
274 iov = rqst->rq_iov;
275 nvec = rqst->rq_nvec;
276 }
277
a26054d1 278 /* total up iov array first */
35e2cc1b 279 for (i = 0; i < nvec; i++)
a26054d1
JL
280 buflen += iov[i].iov_len;
281
c06a0f2d
LL
282 /*
283 * Add in the page array if there is one. The caller needs to make
284 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285 * multiple pages ends at page boundary, rq_tailsz needs to be set to
286 * PAGE_SIZE.
287 */
a26054d1 288 if (rqst->rq_npages) {
c06a0f2d
LL
289 if (rqst->rq_npages == 1)
290 buflen += rqst->rq_tailsz;
291 else {
292 /*
293 * If there is more than one page, calculate the
294 * buffer length based on rq_offset and rq_tailsz
295 */
296 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297 rqst->rq_offset;
298 buflen += rqst->rq_tailsz;
299 }
a26054d1
JL
300 }
301
302 return buflen;
303}
304
6f49f46b 305static int
07cd952f
RS
306__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307 struct smb_rqst *rqst)
6f49f46b 308{
07cd952f
RS
309 int rc = 0;
310 struct kvec *iov;
311 int n_vec;
312 unsigned int send_length = 0;
313 unsigned int i, j;
b30c74c7 314 sigset_t mask, oldmask;
3ab3f2a1 315 size_t total_len = 0, sent, size;
b8eed283 316 struct socket *ssocket = server->ssocket;
3ab3f2a1 317 struct msghdr smb_msg;
b8eed283 318 int val = 1;
c713c877
RS
319 __be32 rfc1002_marker;
320
9762c2d0 321 if (cifs_rdma_enabled(server) && server->smbd_conn) {
4739f232 322 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
323 goto smbd_done;
324 }
afc18a6f 325
ea702b80 326 if (ssocket == NULL)
afc18a6f 327 return -EAGAIN;
ea702b80 328
b30c74c7
PS
329 if (signal_pending(current)) {
330 cifs_dbg(FYI, "signal is pending before sending any data\n");
331 return -EINTR;
332 }
333
b8eed283
JL
334 /* cork the socket */
335 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336 (char *)&val, sizeof(val));
337
07cd952f 338 for (j = 0; j < num_rqst; j++)
81f39f95 339 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
340 rfc1002_marker = cpu_to_be32(send_length);
341
b30c74c7
PS
342 /*
343 * We should not allow signals to interrupt the network send because
344 * any partial send will cause session reconnects thus increasing
345 * latency of system calls and overload a server with unnecessary
346 * requests.
347 */
348
349 sigfillset(&mask);
350 sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
c713c877
RS
352 /* Generate a rfc1002 marker for SMB2+ */
353 if (server->vals->header_preamble_size == 0) {
354 struct kvec hiov = {
355 .iov_base = &rfc1002_marker,
356 .iov_len = 4
357 };
aa563d7b 358 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
359 rc = smb_send_kvec(server, &smb_msg, &sent);
360 if (rc < 0)
b30c74c7 361 goto unmask;
c713c877
RS
362
363 total_len += sent;
364 send_length += 4;
365 }
366
662bf5bc
PA
367 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
07cd952f
RS
369 for (j = 0; j < num_rqst; j++) {
370 iov = rqst[j].rq_iov;
371 n_vec = rqst[j].rq_nvec;
3ab3f2a1 372
07cd952f 373 size = 0;
662bf5bc
PA
374 for (i = 0; i < n_vec; i++) {
375 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 376 size += iov[i].iov_len;
662bf5bc 377 }
97bc00b3 378
aa563d7b 379 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 380
3ab3f2a1 381 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 382 if (rc < 0)
b30c74c7 383 goto unmask;
97bc00b3
JL
384
385 total_len += sent;
07cd952f
RS
386
387 /* now walk the page array and send each page in it */
388 for (i = 0; i < rqst[j].rq_npages; i++) {
389 struct bio_vec bvec;
390
391 bvec.bv_page = rqst[j].rq_pages[i];
392 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393 &bvec.bv_offset);
394
aa563d7b 395 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
396 &bvec, 1, bvec.bv_len);
397 rc = smb_send_kvec(server, &smb_msg, &sent);
398 if (rc < 0)
399 break;
400
401 total_len += sent;
402 }
97bc00b3 403 }
1da177e4 404
b30c74c7
PS
405unmask:
406 sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408 /*
409 * If signal is pending but we have already sent the whole packet to
410 * the server we need to return success status to allow a corresponding
411 * mid entry to be kept in the pending requests queue thus allowing
412 * to handle responses from the server by the client.
413 *
414 * If only part of the packet has been sent there is no need to hide
415 * interrupt because the session will be reconnected anyway, so there
416 * won't be any response from the server to handle.
417 */
418
419 if (signal_pending(current) && (total_len != send_length)) {
420 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421 rc = -EINTR;
422 }
423
b8eed283
JL
424 /* uncork it */
425 val = 0;
426 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427 (char *)&val, sizeof(val));
428
c713c877 429 if ((total_len > 0) && (total_len != send_length)) {
f96637be 430 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 431 send_length, total_len);
6f49f46b
JL
432 /*
433 * If we have only sent part of an SMB then the next SMB could
434 * be taken as the remainder of this one. We need to kill the
435 * socket so the server throws away the partial SMB
436 */
edf1ae40 437 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7
SF
438 trace_smb3_partial_send_reconnect(server->CurrentMid,
439 server->hostname);
edf1ae40 440 }
9762c2d0 441smbd_done:
d804d41d 442 if (rc < 0 && rc != -EINTR)
afe6f653 443 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 444 rc);
ee13919c 445 else if (rc > 0)
1da177e4 446 rc = 0;
1da177e4
LT
447
448 return rc;
449}
450
6f49f46b 451static int
1f3a8f5f
RS
452smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453 struct smb_rqst *rqst, int flags)
6f49f46b 454{
b2c96de7
RS
455 struct kvec iov;
456 struct smb2_transform_hdr tr_hdr;
457 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
458 int rc;
459
460 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
461 return __smb_send_rqst(server, num_rqst, rqst);
462
463 if (num_rqst > MAX_COMPOUND - 1)
464 return -ENOMEM;
7fb8986e 465
b2c96de7
RS
466 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467 memset(&iov, 0, sizeof(iov));
468 memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470 iov.iov_base = &tr_hdr;
471 iov.iov_len = sizeof(tr_hdr);
472 cur_rqst[0].rq_iov = &iov;
473 cur_rqst[0].rq_nvec = 1;
474
475 if (!server->ops->init_transform_rq) {
afe6f653
RS
476 cifs_server_dbg(VFS, "Encryption requested but transform "
477 "callback is missing\n");
7fb8986e
PS
478 return -EIO;
479 }
6f49f46b 480
1f3a8f5f
RS
481 rc = server->ops->init_transform_rq(server, num_rqst + 1,
482 &cur_rqst[0], rqst);
7fb8986e
PS
483 if (rc)
484 return rc;
485
1f3a8f5f
RS
486 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
7fb8986e 488 return rc;
6f49f46b
JL
489}
490
0496e02d
JL
491int
492smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493 unsigned int smb_buf_length)
494{
738f9de5 495 struct kvec iov[2];
7fb8986e
PS
496 struct smb_rqst rqst = { .rq_iov = iov,
497 .rq_nvec = 2 };
0496e02d 498
738f9de5
PS
499 iov[0].iov_base = smb_buffer;
500 iov[0].iov_len = 4;
501 iov[1].iov_base = (char *)smb_buffer + 4;
502 iov[1].iov_len = smb_buf_length;
0496e02d 503
07cd952f 504 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
505}
506
fc40f9cf 507static int
b227d215 508wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
509 const int timeout, const int flags,
510 unsigned int *instance)
1da177e4 511{
5bc59498 512 int rc;
4230cff8
RS
513 int *credits;
514 int optype;
2b53b929
RS
515 long int t;
516
517 if (timeout < 0)
518 t = MAX_JIFFY_OFFSET;
519 else
520 t = msecs_to_jiffies(timeout);
4230cff8
RS
521
522 optype = flags & CIFS_OP_MASK;
5bc59498 523
34f4deb7
PS
524 *instance = 0;
525
4230cff8
RS
526 credits = server->ops->get_credits_field(server, optype);
527 /* Since an echo is already inflight, no need to wait to send another */
528 if (*credits <= 0 && optype == CIFS_ECHO_OP)
529 return -EAGAIN;
530
fc40f9cf 531 spin_lock(&server->req_lock);
392e1c5d 532 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 533 /* oplock breaks must not be held up */
fc40f9cf 534 server->in_flight++;
bc205ed1 535 *credits -= 1;
34f4deb7 536 *instance = server->reconnect_instance;
fc40f9cf 537 spin_unlock(&server->req_lock);
27a97a61
VL
538 return 0;
539 }
540
27a97a61 541 while (1) {
b227d215 542 if (*credits < num_credits) {
fc40f9cf 543 spin_unlock(&server->req_lock);
789e6661 544 cifs_num_waiters_inc(server);
2b53b929
RS
545 rc = wait_event_killable_timeout(server->request_q,
546 has_credits(server, credits, num_credits), t);
789e6661 547 cifs_num_waiters_dec(server);
2b53b929 548 if (!rc) {
7937ca96
SF
549 trace_smb3_credit_timeout(server->CurrentMid,
550 server->hostname, num_credits);
afe6f653 551 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
552 timeout);
553 return -ENOTSUPP;
554 }
555 if (rc == -ERESTARTSYS)
556 return -ERESTARTSYS;
fc40f9cf 557 spin_lock(&server->req_lock);
27a97a61 558 } else {
c5797a94 559 if (server->tcpStatus == CifsExiting) {
fc40f9cf 560 spin_unlock(&server->req_lock);
27a97a61 561 return -ENOENT;
1da177e4 562 }
27a97a61 563
16b34aa4
RS
564 /*
565 * For normal commands, reserve the last MAX_COMPOUND
566 * credits to compound requests.
567 * Otherwise these compounds could be permanently
568 * starved for credits by single-credit requests.
569 *
570 * To prevent spinning CPU, block this thread until
571 * there are >MAX_COMPOUND credits available.
572 * But only do this is we already have a lot of
573 * credits in flight to avoid triggering this check
574 * for servers that are slow to hand out credits on
575 * new sessions.
576 */
577 if (!optype && num_credits == 1 &&
578 server->in_flight > 2 * MAX_COMPOUND &&
579 *credits <= MAX_COMPOUND) {
580 spin_unlock(&server->req_lock);
581 cifs_num_waiters_inc(server);
2b53b929
RS
582 rc = wait_event_killable_timeout(
583 server->request_q,
16b34aa4 584 has_credits(server, credits,
2b53b929
RS
585 MAX_COMPOUND + 1),
586 t);
16b34aa4 587 cifs_num_waiters_dec(server);
2b53b929 588 if (!rc) {
7937ca96
SF
589 trace_smb3_credit_timeout(
590 server->CurrentMid,
591 server->hostname, num_credits);
afe6f653 592 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
2b53b929
RS
593 timeout);
594 return -ENOTSUPP;
595 }
596 if (rc == -ERESTARTSYS)
597 return -ERESTARTSYS;
16b34aa4
RS
598 spin_lock(&server->req_lock);
599 continue;
600 }
601
2d86dbc9
PS
602 /*
603 * Can not count locking commands against total
604 * as they are allowed to block on server.
605 */
27a97a61
VL
606
607 /* update # of requests on the wire to server */
4230cff8 608 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
609 *credits -= num_credits;
610 server->in_flight += num_credits;
34f4deb7 611 *instance = server->reconnect_instance;
2d86dbc9 612 }
fc40f9cf 613 spin_unlock(&server->req_lock);
27a97a61 614 break;
1da177e4
LT
615 }
616 }
7ee1af76
JA
617 return 0;
618}
1da177e4 619
bc205ed1 620static int
480b1cb9
RS
621wait_for_free_request(struct TCP_Server_Info *server, const int flags,
622 unsigned int *instance)
bc205ed1 623{
2b53b929
RS
624 return wait_for_free_credits(server, 1, -1, flags,
625 instance);
bc205ed1
PS
626}
627
257b7809
RS
628static int
629wait_for_compound_request(struct TCP_Server_Info *server, int num,
630 const int flags, unsigned int *instance)
631{
632 int *credits;
633
634 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
635
636 spin_lock(&server->req_lock);
637 if (*credits < num) {
638 /*
639 * Return immediately if not too many requests in flight since
640 * we will likely be stuck on waiting for credits.
641 */
642 if (server->in_flight < num - *credits) {
643 spin_unlock(&server->req_lock);
644 return -ENOTSUPP;
645 }
646 }
647 spin_unlock(&server->req_lock);
648
649 return wait_for_free_credits(server, num, 60000, flags,
650 instance);
651}
652
cb7e9eab
PS
653int
654cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 655 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
656{
657 *num = size;
335b7b62
PS
658 credits->value = 0;
659 credits->instance = server->reconnect_instance;
cb7e9eab
PS
660 return 0;
661}
662
96daf2b0 663static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
664 struct mid_q_entry **ppmidQ)
665{
1da177e4 666 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 667 return -ENOENT;
8fbbd365
VL
668 }
669
670 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 671 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 672 return -EAGAIN;
8fbbd365
VL
673 }
674
7f48558e 675 if (ses->status == CifsNew) {
79a58d1f 676 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 677 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 678 return -EAGAIN;
ad7a2926 679 /* else ok - we are setting up session */
1da177e4 680 }
7f48558e
SP
681
682 if (ses->status == CifsExiting) {
683 /* check if SMB session is bad because we are setting it up */
684 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
685 return -EAGAIN;
686 /* else ok - we are shutting down session */
687 }
688
24b9b06b 689 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 690 if (*ppmidQ == NULL)
7ee1af76 691 return -ENOMEM;
ddc8cf8f
JL
692 spin_lock(&GlobalMid_Lock);
693 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
694 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
695 return 0;
696}
697
0ade640e
JL
698static int
699wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 700{
0ade640e 701 int error;
7ee1af76 702
5853cc2a 703 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 704 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
705 if (error < 0)
706 return -ERESTARTSYS;
7ee1af76 707
0ade640e 708 return 0;
7ee1af76
JA
709}
710
fec344e3
JL
711struct mid_q_entry *
712cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
713{
714 int rc;
fec344e3 715 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
716 struct mid_q_entry *mid;
717
738f9de5
PS
718 if (rqst->rq_iov[0].iov_len != 4 ||
719 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
720 return ERR_PTR(-EIO);
721
792af7b0 722 /* enable signing if server requires it */
38d77c50 723 if (server->sign)
792af7b0
PS
724 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
725
726 mid = AllocMidQEntry(hdr, server);
727 if (mid == NULL)
fec344e3 728 return ERR_PTR(-ENOMEM);
792af7b0 729
fec344e3 730 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
731 if (rc) {
732 DeleteMidQEntry(mid);
fec344e3 733 return ERR_PTR(rc);
ffc61ccb
SP
734 }
735
fec344e3 736 return mid;
792af7b0 737}
133672ef 738
a6827c18
JL
739/*
740 * Send a SMB request and set the callback function in the mid to handle
741 * the result. Caller is responsible for dealing with timeouts.
742 */
743int
fec344e3 744cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 745 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
746 mid_handle_t *handle, void *cbdata, const int flags,
747 const struct cifs_credits *exist_credits)
a6827c18 748{
480b1cb9 749 int rc;
a6827c18 750 struct mid_q_entry *mid;
335b7b62 751 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 752 unsigned int instance;
480b1cb9 753 int optype;
a6827c18 754
a891f0f8
PS
755 optype = flags & CIFS_OP_MASK;
756
cb7e9eab 757 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 758 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
759 if (rc)
760 return rc;
335b7b62 761 credits.value = 1;
34f4deb7 762 credits.instance = instance;
3349c3a7
PS
763 } else
764 instance = exist_credits->instance;
a6827c18
JL
765
766 mutex_lock(&server->srv_mutex);
3349c3a7
PS
767
768 /*
769 * We can't use credits obtained from the previous session to send this
770 * request. Check if there were reconnects after we obtained credits and
771 * return -EAGAIN in such cases to let callers handle it.
772 */
773 if (instance != server->reconnect_instance) {
774 mutex_unlock(&server->srv_mutex);
775 add_credits_and_wake_if(server, &credits, optype);
776 return -EAGAIN;
777 }
778
fec344e3
JL
779 mid = server->ops->setup_async_request(server, rqst);
780 if (IS_ERR(mid)) {
a6827c18 781 mutex_unlock(&server->srv_mutex);
335b7b62 782 add_credits_and_wake_if(server, &credits, optype);
fec344e3 783 return PTR_ERR(mid);
a6827c18
JL
784 }
785
44d22d84 786 mid->receive = receive;
a6827c18
JL
787 mid->callback = callback;
788 mid->callback_data = cbdata;
9b7c18a2 789 mid->handle = handle;
7c9421e1 790 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 791
ffc61ccb
SP
792 /* put it on the pending_mid_q */
793 spin_lock(&GlobalMid_Lock);
794 list_add_tail(&mid->qhead, &server->pending_mid_q);
795 spin_unlock(&GlobalMid_Lock);
796
93d2cb6c
LL
797 /*
798 * Need to store the time in mid before calling I/O. For call_async,
799 * I/O response may come back and free the mid entry on another thread.
800 */
801 cifs_save_when_sent(mid);
789e6661 802 cifs_in_send_inc(server);
1f3a8f5f 803 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 804 cifs_in_send_dec(server);
ad313cb8 805
820962dc 806 if (rc < 0) {
c781af7e 807 revert_current_mid(server, mid->credits);
ad313cb8 808 server->sequence_number -= 2;
820962dc
RV
809 cifs_delete_mid(mid);
810 }
811
a6827c18 812 mutex_unlock(&server->srv_mutex);
789e6661 813
ffc61ccb
SP
814 if (rc == 0)
815 return 0;
a6827c18 816
335b7b62 817 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
818 return rc;
819}
820
133672ef
SF
821/*
822 *
823 * Send an SMB Request. No response info (other than return code)
824 * needs to be parsed.
825 *
826 * flags indicate the type of request buffer and how long to wait
827 * and whether to log NT STATUS code (error) before mapping it to POSIX error
828 *
829 */
830int
96daf2b0 831SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 832 char *in_buf, int flags)
133672ef
SF
833{
834 int rc;
835 struct kvec iov[1];
da502f7d 836 struct kvec rsp_iov;
133672ef
SF
837 int resp_buf_type;
838
792af7b0
PS
839 iov[0].iov_base = in_buf;
840 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 841 flags |= CIFS_NO_RSP_BUF;
da502f7d 842 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 843 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 844
133672ef
SF
845 return rc;
846}
847
053d5034 848static int
3c1105df 849cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
850{
851 int rc = 0;
852
f96637be
JP
853 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
854 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 855
74dd92a8 856 spin_lock(&GlobalMid_Lock);
7c9421e1 857 switch (mid->mid_state) {
74dd92a8 858 case MID_RESPONSE_RECEIVED:
053d5034
JL
859 spin_unlock(&GlobalMid_Lock);
860 return rc;
74dd92a8
JL
861 case MID_RETRY_NEEDED:
862 rc = -EAGAIN;
863 break;
71823baf
JL
864 case MID_RESPONSE_MALFORMED:
865 rc = -EIO;
866 break;
3c1105df
JL
867 case MID_SHUTDOWN:
868 rc = -EHOSTDOWN;
869 break;
74dd92a8 870 default:
3c1105df 871 list_del_init(&mid->qhead);
afe6f653 872 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 873 __func__, mid->mid, mid->mid_state);
74dd92a8 874 rc = -EIO;
053d5034
JL
875 }
876 spin_unlock(&GlobalMid_Lock);
877
2b84a36c 878 DeleteMidQEntry(mid);
053d5034
JL
879 return rc;
880}
881
121b046a 882static inline int
fb2036d8
PS
883send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
884 struct mid_q_entry *mid)
76dcc26f 885{
121b046a 886 return server->ops->send_cancel ?
fb2036d8 887 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
888}
889
2c8f981d
JL
890int
891cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
892 bool log_error)
893{
792af7b0 894 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
895
896 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
897
898 /* convert the length into a more usable form */
38d77c50 899 if (server->sign) {
738f9de5 900 struct kvec iov[2];
985e4ff0 901 int rc = 0;
738f9de5
PS
902 struct smb_rqst rqst = { .rq_iov = iov,
903 .rq_nvec = 2 };
826a95e4 904
738f9de5
PS
905 iov[0].iov_base = mid->resp_buf;
906 iov[0].iov_len = 4;
907 iov[1].iov_base = (char *)mid->resp_buf + 4;
908 iov[1].iov_len = len - 4;
2c8f981d 909 /* FIXME: add code to kill session */
bf5ea0e2 910 rc = cifs_verify_signature(&rqst, server,
0124cc45 911 mid->sequence_number);
985e4ff0 912 if (rc)
afe6f653 913 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 914 rc);
2c8f981d
JL
915 }
916
917 /* BB special case reconnect tid and uid here? */
918 return map_smb_to_linux_error(mid->resp_buf, log_error);
919}
920
fec344e3
JL
921struct mid_q_entry *
922cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
792af7b0
PS
923{
924 int rc;
fec344e3 925 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
926 struct mid_q_entry *mid;
927
738f9de5
PS
928 if (rqst->rq_iov[0].iov_len != 4 ||
929 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
930 return ERR_PTR(-EIO);
931
792af7b0
PS
932 rc = allocate_mid(ses, hdr, &mid);
933 if (rc)
fec344e3
JL
934 return ERR_PTR(rc);
935 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
936 if (rc) {
3c1bf7e4 937 cifs_delete_mid(mid);
fec344e3
JL
938 return ERR_PTR(rc);
939 }
940 return mid;
792af7b0
PS
941}
942
4e34feb5 943static void
ee258d79 944cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
945{
946 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
947 struct cifs_credits credits;
948
949 credits.value = server->ops->get_credits(mid);
950 credits.instance = server->reconnect_instance;
8a26f0f7 951
34f4deb7 952 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
953}
954
ee258d79
PS
955static void
956cifs_compound_last_callback(struct mid_q_entry *mid)
957{
958 cifs_compound_callback(mid);
959 cifs_wake_up_task(mid);
960}
961
962static void
963cifs_cancelled_callback(struct mid_q_entry *mid)
964{
965 cifs_compound_callback(mid);
966 DeleteMidQEntry(mid);
967}
968
b8f57ee8 969int
e0bba0b8
RS
970compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
971 const int flags, const int num_rqst, struct smb_rqst *rqst,
972 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 973{
480b1cb9 974 int i, j, optype, rc = 0;
e0bba0b8 975 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 976 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
977 struct cifs_credits credits[MAX_COMPOUND] = {
978 { .value = 0, .instance = 0 }
979 };
980 unsigned int instance;
738f9de5 981 char *buf;
3190b59a 982 struct TCP_Server_Info *server;
50c2f753 983
a891f0f8 984 optype = flags & CIFS_OP_MASK;
133672ef 985
e0bba0b8
RS
986 for (i = 0; i < num_rqst; i++)
987 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76
JA
988
989 if ((ses == NULL) || (ses->server == NULL)) {
f96637be 990 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
991 return -EIO;
992 }
993
3190b59a
AA
994 server = ses->server;
995 if (server->tcpStatus == CifsExiting)
7ee1af76 996 return -ENOENT;
7ee1af76 997
792af7b0 998 /*
257b7809 999 * Wait for all the requests to become available.
7091bcab
PS
1000 * This approach still leaves the possibility to be stuck waiting for
1001 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1002 * requests and if the client is completely idle, not generating any
1003 * other requests.
1004 * This can be handled by the eventual session reconnect.
792af7b0 1005 */
3190b59a 1006 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1007 &instance);
1008 if (rc)
1009 return rc;
97ea4998 1010
257b7809
RS
1011 for (i = 0; i < num_rqst; i++) {
1012 credits[i].value = 1;
1013 credits[i].instance = instance;
8544f4aa 1014 }
7ee1af76 1015
792af7b0
PS
1016 /*
1017 * Make sure that we sign in the same order that we send on this socket
1018 * and avoid races inside tcp sendmsg code that could cause corruption
1019 * of smb data.
1020 */
7ee1af76 1021
3190b59a 1022 mutex_lock(&server->srv_mutex);
7ee1af76 1023
97ea4998
PS
1024 /*
1025 * All the parts of the compound chain belong obtained credits from the
257b7809 1026 * same session. We can not use credits obtained from the previous
97ea4998
PS
1027 * session to send this request. Check if there were reconnects after
1028 * we obtained credits and return -EAGAIN in such cases to let callers
1029 * handle it.
1030 */
3190b59a
AA
1031 if (instance != server->reconnect_instance) {
1032 mutex_unlock(&server->srv_mutex);
97ea4998 1033 for (j = 0; j < num_rqst; j++)
3190b59a 1034 add_credits(server, &credits[j], optype);
97ea4998
PS
1035 return -EAGAIN;
1036 }
1037
e0bba0b8 1038 for (i = 0; i < num_rqst; i++) {
3190b59a 1039 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
e0bba0b8 1040 if (IS_ERR(midQ[i])) {
3190b59a 1041 revert_current_mid(server, i);
e0bba0b8
RS
1042 for (j = 0; j < i; j++)
1043 cifs_delete_mid(midQ[j]);
3190b59a 1044 mutex_unlock(&server->srv_mutex);
8544f4aa 1045
e0bba0b8 1046 /* Update # of requests on wire to server */
8544f4aa 1047 for (j = 0; j < num_rqst; j++)
3190b59a 1048 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1049 return PTR_ERR(midQ[i]);
1050 }
1051
1052 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1053 midQ[i]->optype = optype;
4e34feb5 1054 /*
ee258d79
PS
1055 * Invoke callback for every part of the compound chain
1056 * to calculate credits properly. Wake up this thread only when
1057 * the last element is received.
4e34feb5
RS
1058 */
1059 if (i < num_rqst - 1)
ee258d79
PS
1060 midQ[i]->callback = cifs_compound_callback;
1061 else
1062 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1063 }
3190b59a
AA
1064 cifs_in_send_inc(server);
1065 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1066 cifs_in_send_dec(server);
e0bba0b8
RS
1067
1068 for (i = 0; i < num_rqst; i++)
1069 cifs_save_when_sent(midQ[i]);
7ee1af76 1070
c781af7e 1071 if (rc < 0) {
3190b59a
AA
1072 revert_current_mid(server, num_rqst);
1073 server->sequence_number -= 2;
c781af7e 1074 }
e0bba0b8 1075
3190b59a 1076 mutex_unlock(&server->srv_mutex);
7ee1af76 1077
d69cb728
RS
1078 /*
1079 * If sending failed for some reason or it is an oplock break that we
1080 * will not receive a response to - return credits back
1081 */
1082 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1083 for (i = 0; i < num_rqst; i++)
3190b59a 1084 add_credits(server, &credits[i], optype);
cb5c2e63 1085 goto out;
ee258d79
PS
1086 }
1087
1088 /*
1089 * At this point the request is passed to the network stack - we assume
1090 * that any credits taken from the server structure on the client have
1091 * been spent and we can't return them back. Once we receive responses
1092 * we will collect credits granted by the server in the mid callbacks
1093 * and add those credits to the server structure.
1094 */
e0bba0b8 1095
cb5c2e63
RS
1096 /*
1097 * Compounding is never used during session establish.
1098 */
1099 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1100 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1101 rqst[0].rq_nvec);
e0bba0b8 1102
cb5c2e63 1103 for (i = 0; i < num_rqst; i++) {
3190b59a 1104 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1105 if (rc != 0)
1106 break;
1107 }
1108 if (rc != 0) {
1109 for (; i < num_rqst; i++) {
afe6f653 1110 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1111 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1112 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8
RS
1113 spin_lock(&GlobalMid_Lock);
1114 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1115 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
8a26f0f7 1116 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1117 cancelled_mid[i] = true;
34f4deb7 1118 credits[i].value = 0;
e0bba0b8 1119 }
1be912dd 1120 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1121 }
cb5c2e63
RS
1122 }
1123
cb5c2e63
RS
1124 for (i = 0; i < num_rqst; i++) {
1125 if (rc < 0)
1126 goto out;
e0bba0b8 1127
3190b59a 1128 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1129 if (rc != 0) {
8544f4aa
PS
1130 /* mark this mid as cancelled to not free it below */
1131 cancelled_mid[i] = true;
1132 goto out;
1be912dd 1133 }
2b2bdfba 1134
e0bba0b8
RS
1135 if (!midQ[i]->resp_buf ||
1136 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1137 rc = -EIO;
1138 cifs_dbg(FYI, "Bad MID state?\n");
1139 goto out;
1140 }
a891f0f8 1141
e0bba0b8
RS
1142 buf = (char *)midQ[i]->resp_buf;
1143 resp_iov[i].iov_base = buf;
1144 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1145 server->vals->header_preamble_size;
e0bba0b8
RS
1146
1147 if (midQ[i]->large_buf)
1148 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1149 else
1150 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1151
3190b59a 1152 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1153 flags & CIFS_LOG_ERROR);
1da177e4 1154
e0bba0b8 1155 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1156 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1157 midQ[i]->resp_buf = NULL;
cb5c2e63 1158
e0bba0b8 1159 }
cb5c2e63
RS
1160
1161 /*
1162 * Compounding is never used during session establish.
1163 */
1164 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1165 struct kvec iov = {
1166 .iov_base = resp_iov[0].iov_base,
1167 .iov_len = resp_iov[0].iov_len
1168 };
1169 smb311_update_preauth_hash(ses, &iov, 1);
1170 }
1171
7ee1af76 1172out:
4e34feb5
RS
1173 /*
1174 * This will dequeue all mids. After this it is important that the
1175 * demultiplex_thread will not process any of these mids any futher.
1176 * This is prevented above by using a noop callback that will not
1177 * wake this thread except for the very last PDU.
1178 */
8544f4aa
PS
1179 for (i = 0; i < num_rqst; i++) {
1180 if (!cancelled_mid[i])
1181 cifs_delete_mid(midQ[i]);
8544f4aa 1182 }
1da177e4 1183
d6e04ae6
SF
1184 return rc;
1185}
1da177e4 1186
e0bba0b8
RS
1187int
1188cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1189 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1190 struct kvec *resp_iov)
1191{
1192 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1193 resp_iov);
1194}
1195
738f9de5
PS
1196int
1197SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1198 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1199 const int flags, struct kvec *resp_iov)
1200{
1201 struct smb_rqst rqst;
3cecf486 1202 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1203 int rc;
1204
3cecf486 1205 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1206 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1207 GFP_KERNEL);
117e3b7f
SF
1208 if (!new_iov) {
1209 /* otherwise cifs_send_recv below sets resp_buf_type */
1210 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1211 return -ENOMEM;
117e3b7f 1212 }
3cecf486
RS
1213 } else
1214 new_iov = s_iov;
738f9de5
PS
1215
1216 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1217 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1218
1219 new_iov[0].iov_base = new_iov[1].iov_base;
1220 new_iov[0].iov_len = 4;
1221 new_iov[1].iov_base += 4;
1222 new_iov[1].iov_len -= 4;
1223
1224 memset(&rqst, 0, sizeof(struct smb_rqst));
1225 rqst.rq_iov = new_iov;
1226 rqst.rq_nvec = n_vec + 1;
1227
1228 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1229 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1230 kfree(new_iov);
738f9de5
PS
1231 return rc;
1232}
1233
1da177e4 1234int
96daf2b0 1235SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1236 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1237 int *pbytes_returned, const int flags)
1da177e4
LT
1238{
1239 int rc = 0;
1da177e4 1240 struct mid_q_entry *midQ;
fb2036d8
PS
1241 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1242 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1243 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1244 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1245 struct TCP_Server_Info *server;
1da177e4
LT
1246
1247 if (ses == NULL) {
f96637be 1248 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1249 return -EIO;
1250 }
ac6ad7a8 1251 server = ses->server;
afe6f653 1252 if (server == NULL) {
f96637be 1253 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1254 return -EIO;
1255 }
1256
afe6f653 1257 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1258 return -ENOENT;
1259
79a58d1f 1260 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1261 to the same server. We may make this configurable later or
1262 use ses->maxReq */
1da177e4 1263
fb2036d8 1264 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
afe6f653 1265 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1266 len);
6d9c6d54
VL
1267 return -EIO;
1268 }
1269
afe6f653 1270 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1271 if (rc)
1272 return rc;
1273
79a58d1f 1274 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1275 and avoid races inside tcp sendmsg code that could cause corruption
1276 of smb data */
1277
afe6f653 1278 mutex_lock(&server->srv_mutex);
1da177e4 1279
7ee1af76
JA
1280 rc = allocate_mid(ses, in_buf, &midQ);
1281 if (rc) {
72ca545b 1282 mutex_unlock(&ses->server->srv_mutex);
7ee1af76 1283 /* Update # of requests on wire to server */
afe6f653 1284 add_credits(server, &credits, 0);
7ee1af76 1285 return rc;
1da177e4
LT
1286 }
1287
afe6f653 1288 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1289 if (rc) {
afe6f653 1290 mutex_unlock(&server->srv_mutex);
829049cb
VL
1291 goto out;
1292 }
1da177e4 1293
7c9421e1 1294 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1295
afe6f653
RS
1296 cifs_in_send_inc(server);
1297 rc = smb_send(server, in_buf, len);
1298 cifs_in_send_dec(server);
789e6661 1299 cifs_save_when_sent(midQ);
ad313cb8
JL
1300
1301 if (rc < 0)
afe6f653 1302 server->sequence_number -= 2;
ad313cb8 1303
afe6f653 1304 mutex_unlock(&server->srv_mutex);
7ee1af76 1305
79a58d1f 1306 if (rc < 0)
7ee1af76
JA
1307 goto out;
1308
afe6f653 1309 rc = wait_for_response(server, midQ);
1be912dd 1310 if (rc != 0) {
afe6f653 1311 send_cancel(server, &rqst, midQ);
1be912dd 1312 spin_lock(&GlobalMid_Lock);
7c9421e1 1313 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1314 /* no longer considered to be "in-flight" */
1315 midQ->callback = DeleteMidQEntry;
1316 spin_unlock(&GlobalMid_Lock);
afe6f653 1317 add_credits(server, &credits, 0);
1be912dd
JL
1318 return rc;
1319 }
1320 spin_unlock(&GlobalMid_Lock);
1321 }
1da177e4 1322
afe6f653 1323 rc = cifs_sync_mid_result(midQ, server);
053d5034 1324 if (rc != 0) {
afe6f653 1325 add_credits(server, &credits, 0);
1da177e4
LT
1326 return rc;
1327 }
50c2f753 1328
2c8f981d 1329 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1330 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1331 rc = -EIO;
afe6f653 1332 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1333 goto out;
1da177e4 1334 }
7ee1af76 1335
d4e4854f 1336 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1337 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1338 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1339out:
3c1bf7e4 1340 cifs_delete_mid(midQ);
afe6f653 1341 add_credits(server, &credits, 0);
1da177e4 1342
7ee1af76
JA
1343 return rc;
1344}
1da177e4 1345
7ee1af76
JA
1346/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1347 blocking lock to return. */
1348
1349static int
96daf2b0 1350send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1351 struct smb_hdr *in_buf,
1352 struct smb_hdr *out_buf)
1353{
1354 int bytes_returned;
96daf2b0 1355 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1356 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1357
1358 /* We just modify the current in_buf to change
1359 the type of lock from LOCKING_ANDX_SHARED_LOCK
1360 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1361 LOCKING_ANDX_CANCEL_LOCK. */
1362
1363 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1364 pSMB->Timeout = 0;
88257360 1365 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1366
1367 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1368 &bytes_returned, 0);
7ee1af76
JA
1369}
1370
1371int
96daf2b0 1372SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1373 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1374 int *pbytes_returned)
1375{
1376 int rc = 0;
1377 int rstart = 0;
7ee1af76 1378 struct mid_q_entry *midQ;
96daf2b0 1379 struct cifs_ses *ses;
fb2036d8
PS
1380 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1381 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1382 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1383 unsigned int instance;
afe6f653 1384 struct TCP_Server_Info *server;
7ee1af76
JA
1385
1386 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1387 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1388 return -EIO;
1389 }
1390 ses = tcon->ses;
afe6f653 1391 server = ses->server;
7ee1af76 1392
afe6f653 1393 if (server == NULL) {
f96637be 1394 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1395 return -EIO;
1396 }
1397
afe6f653 1398 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1399 return -ENOENT;
1400
79a58d1f 1401 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1402 to the same server. We may make this configurable later or
1403 use ses->maxReq */
1404
fb2036d8 1405 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
afe6f653 1406 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
fb2036d8 1407 len);
6d9c6d54
VL
1408 return -EIO;
1409 }
1410
afe6f653 1411 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1412 if (rc)
1413 return rc;
1414
79a58d1f 1415 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1416 and avoid races inside tcp sendmsg code that could cause corruption
1417 of smb data */
1418
afe6f653 1419 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1420
1421 rc = allocate_mid(ses, in_buf, &midQ);
1422 if (rc) {
afe6f653 1423 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1424 return rc;
1425 }
1426
afe6f653 1427 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1428 if (rc) {
3c1bf7e4 1429 cifs_delete_mid(midQ);
afe6f653 1430 mutex_unlock(&server->srv_mutex);
829049cb
VL
1431 return rc;
1432 }
1da177e4 1433
7c9421e1 1434 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1435 cifs_in_send_inc(server);
1436 rc = smb_send(server, in_buf, len);
1437 cifs_in_send_dec(server);
789e6661 1438 cifs_save_when_sent(midQ);
ad313cb8
JL
1439
1440 if (rc < 0)
afe6f653 1441 server->sequence_number -= 2;
ad313cb8 1442
afe6f653 1443 mutex_unlock(&server->srv_mutex);
7ee1af76 1444
79a58d1f 1445 if (rc < 0) {
3c1bf7e4 1446 cifs_delete_mid(midQ);
7ee1af76
JA
1447 return rc;
1448 }
1449
1450 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1451 rc = wait_event_interruptible(server->response_q,
7c9421e1 1452 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1453 ((server->tcpStatus != CifsGood) &&
1454 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1455
1456 /* Were we interrupted by a signal ? */
1457 if ((rc == -ERESTARTSYS) &&
7c9421e1 1458 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1459 ((server->tcpStatus == CifsGood) ||
1460 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1461
1462 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1463 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1464 blocking lock to return. */
afe6f653 1465 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1466 if (rc) {
3c1bf7e4 1467 cifs_delete_mid(midQ);
7ee1af76
JA
1468 return rc;
1469 }
1470 } else {
1471 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1472 to cause the blocking lock to return. */
1473
1474 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1475
1476 /* If we get -ENOLCK back the lock may have
1477 already been removed. Don't exit in this case. */
1478 if (rc && rc != -ENOLCK) {
3c1bf7e4 1479 cifs_delete_mid(midQ);
7ee1af76
JA
1480 return rc;
1481 }
1482 }
1483
afe6f653 1484 rc = wait_for_response(server, midQ);
1be912dd 1485 if (rc) {
afe6f653 1486 send_cancel(server, &rqst, midQ);
1be912dd 1487 spin_lock(&GlobalMid_Lock);
7c9421e1 1488 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1489 /* no longer considered to be "in-flight" */
1490 midQ->callback = DeleteMidQEntry;
1491 spin_unlock(&GlobalMid_Lock);
1492 return rc;
1493 }
1494 spin_unlock(&GlobalMid_Lock);
7ee1af76 1495 }
1be912dd
JL
1496
1497 /* We got the response - restart system call. */
1498 rstart = 1;
7ee1af76
JA
1499 }
1500
afe6f653 1501 rc = cifs_sync_mid_result(midQ, server);
053d5034 1502 if (rc != 0)
7ee1af76 1503 return rc;
50c2f753 1504
17c8bfed 1505 /* rcvd frame is ok */
7c9421e1 1506 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1507 rc = -EIO;
afe6f653 1508 cifs_server_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1509 goto out;
1510 }
1da177e4 1511
d4e4854f 1512 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1513 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1514 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1515out:
3c1bf7e4 1516 cifs_delete_mid(midQ);
7ee1af76
JA
1517 if (rstart && rc == -EACCES)
1518 return -ERESTARTSYS;
1da177e4
LT
1519 return rc;
1520}