1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include "cifspdu.h"
25 #include "cifsglob.h"
26 #include "cifsproto.h"
27 #include "cifs_debug.h"
28 #include "smb2proto.h"
29 #include "smbdirect.h"
30
31 /* Max number of iovectors we can use off the stack when sending requests. */
32 #define CIFS_MAX_IOV_SIZE 8
33
34 void
cifs_wake_up_task(struct mid_q_entry * mid)35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37 wake_up_process(mid->callback_data);
38 }
39
40 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)41 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
42 {
43 struct mid_q_entry *temp;
44
45 if (server == NULL) {
46 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
47 return NULL;
48 }
49
50 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
51 memset(temp, 0, sizeof(struct mid_q_entry));
52 kref_init(&temp->refcount);
53 temp->mid = get_mid(smb_buffer);
54 temp->pid = current->pid;
55 temp->command = cpu_to_le16(smb_buffer->Command);
56 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
57 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
58 /* when mid allocated can be before when sent */
59 temp->when_alloc = jiffies;
60 temp->server = server;
61
62 /*
63 * The default is for the mid to be synchronous, so the
64 * default callback just wakes up the current task.
65 */
66 get_task_struct(current);
67 temp->creator = current;
68 temp->callback = cifs_wake_up_task;
69 temp->callback_data = current;
70
71 atomic_inc(&midCount);
72 temp->mid_state = MID_REQUEST_ALLOCATED;
73 return temp;
74 }
75
_cifs_mid_q_entry_release(struct kref * refcount)76 static void _cifs_mid_q_entry_release(struct kref *refcount)
77 {
78 struct mid_q_entry *midEntry =
79 container_of(refcount, struct mid_q_entry, refcount);
80 #ifdef CONFIG_CIFS_STATS2
81 __le16 command = midEntry->server->vals->lock_cmd;
82 __u16 smb_cmd = le16_to_cpu(midEntry->command);
83 unsigned long now;
84 unsigned long roundtrip_time;
85 #endif
86 struct TCP_Server_Info *server = midEntry->server;
87
88 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 server->ops->handle_cancelled_mid)
91 server->ops->handle_cancelled_mid(midEntry, server);
92
93 midEntry->mid_state = MID_FREE;
94 atomic_dec(&midCount);
95 if (midEntry->large_buf)
96 cifs_buf_release(midEntry->resp_buf);
97 else
98 cifs_small_buf_release(midEntry->resp_buf);
99 #ifdef CONFIG_CIFS_STATS2
100 now = jiffies;
101 if (now < midEntry->when_alloc)
102 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
103 roundtrip_time = now - midEntry->when_alloc;
104
105 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 server->slowest_cmd[smb_cmd] = roundtrip_time;
108 server->fastest_cmd[smb_cmd] = roundtrip_time;
109 } else {
110 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 }
115 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 server->time_per_cmd[smb_cmd] += roundtrip_time;
117 }
118 /*
119 * commands taking longer than one second (default) can be indications
120 * that something is wrong, unless it is quite a slow link or a very
121 * busy server. Note that this calc is unlikely or impossible to wrap
122 * as long as slow_rsp_threshold is not set way above recommended max
123 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 * since only affects debug counters - so leaving the calc as simple
125 * comparison rather than doing multiple conversions and overflow
126 * checks
127 */
128 if ((slow_rsp_threshold != 0) &&
129 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
130 (midEntry->command != command)) {
131 /*
132 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 * NB: le16_to_cpu returns unsigned so can not be negative below
134 */
135 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
137
138 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
139 midEntry->when_sent, midEntry->when_received);
140 if (cifsFYI & CIFS_TIMER) {
141 pr_debug("slow rsp: cmd %d mid %llu",
142 midEntry->command, midEntry->mid);
143 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 now - midEntry->when_alloc,
145 now - midEntry->when_sent,
146 now - midEntry->when_received);
147 }
148 }
149 #endif
150 put_task_struct(midEntry->creator);
151
152 mempool_free(midEntry, cifs_mid_poolp);
153 }
154
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)155 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156 {
157 spin_lock(&GlobalMid_Lock);
158 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 spin_unlock(&GlobalMid_Lock);
160 }
161
DeleteMidQEntry(struct mid_q_entry * midEntry)162 void DeleteMidQEntry(struct mid_q_entry *midEntry)
163 {
164 cifs_mid_q_entry_release(midEntry);
165 }
166
167 void
cifs_delete_mid(struct mid_q_entry * mid)168 cifs_delete_mid(struct mid_q_entry *mid)
169 {
170 spin_lock(&GlobalMid_Lock);
171 if (!(mid->mid_flags & MID_DELETED)) {
172 list_del_init(&mid->qhead);
173 mid->mid_flags |= MID_DELETED;
174 }
175 spin_unlock(&GlobalMid_Lock);
176
177 DeleteMidQEntry(mid);
178 }
179
180 /*
181 * smb_send_kvec - send an array of kvecs to the server
182 * @server: Server to send the data to
183 * @smb_msg: Message to send
184 * @sent: amount of data sent on socket is stored here
185 *
186 * Our basic "send data to server" function. Should be called with srv_mutex
187 * held. The caller is responsible for handling the results.
188 */
189 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)190 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 size_t *sent)
192 {
193 int rc = 0;
194 int retries = 0;
195 struct socket *ssocket = server->ssocket;
196
197 *sent = 0;
198
199 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200 smb_msg->msg_namelen = sizeof(struct sockaddr);
201 smb_msg->msg_control = NULL;
202 smb_msg->msg_controllen = 0;
203 if (server->noblocksnd)
204 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
205 else
206 smb_msg->msg_flags = MSG_NOSIGNAL;
207
208 while (msg_data_left(smb_msg)) {
209 /*
210 * If blocking send, we try 3 times, since each can block
211 * for 5 seconds. For nonblocking we have to try more
212 * but wait increasing amounts of time allowing time for
213 * socket to clear. The overall time we wait in either
214 * case to send on the socket is about 15 seconds.
215 * Similarly we wait for 15 seconds for a response from
216 * the server in SendReceive[2] for the server to send
217 * a response back for most types of requests (except
218 * SMB Write past end of file which can be slow, and
219 * blocking lock operations). NFS waits slightly longer
220 * than CIFS, but this can make it take longer for
221 * nonresponsive servers to be detected and 15 seconds
222 * is more than enough time for modern networks to
223 * send a packet. In most cases if we fail to send
224 * after the retries we will kill the socket and
225 * reconnect which may clear the network problem.
226 */
227 rc = sock_sendmsg(ssocket, smb_msg);
228 if (rc == -EAGAIN) {
229 retries++;
230 if (retries >= 14 ||
231 (!server->noblocksnd && (retries > 2))) {
232 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
233 ssocket);
234 return -EAGAIN;
235 }
236 msleep(1 << retries);
237 continue;
238 }
239
240 if (rc < 0)
241 return rc;
242
243 if (rc == 0) {
244 /* should never happen, letting socket clear before
245 retrying is our only obvious option here */
246 cifs_server_dbg(VFS, "tcp sent no data\n");
247 msleep(500);
248 continue;
249 }
250
251 /* send was at least partially successful */
252 *sent += rc;
253 retries = 0; /* in case we get ENOSPC on the next send */
254 }
255 return 0;
256 }
257
258 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)259 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
260 {
261 unsigned int i;
262 struct kvec *iov;
263 int nvec;
264 unsigned long buflen = 0;
265
266 if (server->vals->header_preamble_size == 0 &&
267 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
268 iov = &rqst->rq_iov[1];
269 nvec = rqst->rq_nvec - 1;
270 } else {
271 iov = rqst->rq_iov;
272 nvec = rqst->rq_nvec;
273 }
274
275 /* total up iov array first */
276 for (i = 0; i < nvec; i++)
277 buflen += iov[i].iov_len;
278
279 /*
280 * Add in the page array if there is one. The caller needs to make
281 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282 * multiple pages ends at page boundary, rq_tailsz needs to be set to
283 * PAGE_SIZE.
284 */
285 if (rqst->rq_npages) {
286 if (rqst->rq_npages == 1)
287 buflen += rqst->rq_tailsz;
288 else {
289 /*
290 * If there is more than one page, calculate the
291 * buffer length based on rq_offset and rq_tailsz
292 */
293 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294 rqst->rq_offset;
295 buflen += rqst->rq_tailsz;
296 }
297 }
298
299 return buflen;
300 }
301
302 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)303 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304 struct smb_rqst *rqst)
305 {
306 int rc = 0;
307 struct kvec *iov;
308 int n_vec;
309 unsigned int send_length = 0;
310 unsigned int i, j;
311 sigset_t mask, oldmask;
312 size_t total_len = 0, sent, size;
313 struct socket *ssocket = server->ssocket;
314 struct msghdr smb_msg;
315 __be32 rfc1002_marker;
316
317 if (cifs_rdma_enabled(server)) {
318 /* return -EAGAIN when connecting or reconnecting */
319 rc = -EAGAIN;
320 if (server->smbd_conn)
321 rc = smbd_send(server, num_rqst, rqst);
322 goto smbd_done;
323 }
324
325 if (ssocket == NULL)
326 return -EAGAIN;
327
328 if (fatal_signal_pending(current)) {
329 cifs_dbg(FYI, "signal pending before send request\n");
330 return -ERESTARTSYS;
331 }
332
333 /* cork the socket */
334 tcp_sock_set_cork(ssocket->sk, true);
335
336 for (j = 0; j < num_rqst; j++)
337 send_length += smb_rqst_len(server, &rqst[j]);
338 rfc1002_marker = cpu_to_be32(send_length);
339
340 /*
341 * We should not allow signals to interrupt the network send because
342 * any partial send will cause session reconnects thus increasing
343 * latency of system calls and overload a server with unnecessary
344 * requests.
345 */
346
347 sigfillset(&mask);
348 sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
350 /* Generate a rfc1002 marker for SMB2+ */
351 if (server->vals->header_preamble_size == 0) {
352 struct kvec hiov = {
353 .iov_base = &rfc1002_marker,
354 .iov_len = 4
355 };
356 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
357 rc = smb_send_kvec(server, &smb_msg, &sent);
358 if (rc < 0)
359 goto unmask;
360
361 total_len += sent;
362 send_length += 4;
363 }
364
365 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
367 for (j = 0; j < num_rqst; j++) {
368 iov = rqst[j].rq_iov;
369 n_vec = rqst[j].rq_nvec;
370
371 size = 0;
372 for (i = 0; i < n_vec; i++) {
373 dump_smb(iov[i].iov_base, iov[i].iov_len);
374 size += iov[i].iov_len;
375 }
376
377 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
378
379 rc = smb_send_kvec(server, &smb_msg, &sent);
380 if (rc < 0)
381 goto unmask;
382
383 total_len += sent;
384
385 /* now walk the page array and send each page in it */
386 for (i = 0; i < rqst[j].rq_npages; i++) {
387 struct bio_vec bvec;
388
389 bvec.bv_page = rqst[j].rq_pages[i];
390 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 &bvec.bv_offset);
392
393 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
394 &bvec, 1, bvec.bv_len);
395 rc = smb_send_kvec(server, &smb_msg, &sent);
396 if (rc < 0)
397 break;
398
399 total_len += sent;
400 }
401 }
402
403 unmask:
404 sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406 /*
407 * If signal is pending but we have already sent the whole packet to
408 * the server we need to return success status to allow a corresponding
409 * mid entry to be kept in the pending requests queue thus allowing
410 * to handle responses from the server by the client.
411 *
412 * If only part of the packet has been sent there is no need to hide
413 * interrupt because the session will be reconnected anyway, so there
414 * won't be any response from the server to handle.
415 */
416
417 if (signal_pending(current) && (total_len != send_length)) {
418 cifs_dbg(FYI, "signal is pending after attempt to send\n");
419 rc = -ERESTARTSYS;
420 }
421
422 /* uncork it */
423 tcp_sock_set_cork(ssocket->sk, false);
424
425 if ((total_len > 0) && (total_len != send_length)) {
426 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
427 send_length, total_len);
428 /*
429 * If we have only sent part of an SMB then the next SMB could
430 * be taken as the remainder of this one. We need to kill the
431 * socket so the server throws away the partial SMB
432 */
433 spin_lock(&GlobalMid_Lock);
434 server->tcpStatus = CifsNeedReconnect;
435 spin_unlock(&GlobalMid_Lock);
436 trace_smb3_partial_send_reconnect(server->CurrentMid,
437 server->conn_id, server->hostname);
438 }
439 smbd_done:
440 if (rc < 0 && rc != -EINTR)
441 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
442 rc);
443 else if (rc > 0)
444 rc = 0;
445
446 return rc;
447 }
448
449 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)450 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
451 struct smb_rqst *rqst, int flags)
452 {
453 struct kvec iov;
454 struct smb2_transform_hdr *tr_hdr;
455 struct smb_rqst cur_rqst[MAX_COMPOUND];
456 int rc;
457
458 if (!(flags & CIFS_TRANSFORM_REQ))
459 return __smb_send_rqst(server, num_rqst, rqst);
460
461 if (num_rqst > MAX_COMPOUND - 1)
462 return -ENOMEM;
463
464 if (!server->ops->init_transform_rq) {
465 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
466 return -EIO;
467 }
468
469 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
470 if (!tr_hdr)
471 return -ENOMEM;
472
473 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
474 memset(&iov, 0, sizeof(iov));
475 memset(tr_hdr, 0, sizeof(*tr_hdr));
476
477 iov.iov_base = tr_hdr;
478 iov.iov_len = sizeof(*tr_hdr);
479 cur_rqst[0].rq_iov = &iov;
480 cur_rqst[0].rq_nvec = 1;
481
482 rc = server->ops->init_transform_rq(server, num_rqst + 1,
483 &cur_rqst[0], rqst);
484 if (rc)
485 goto out;
486
487 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
489 out:
490 kfree(tr_hdr);
491 return rc;
492 }
493
494 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)495 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
496 unsigned int smb_buf_length)
497 {
498 struct kvec iov[2];
499 struct smb_rqst rqst = { .rq_iov = iov,
500 .rq_nvec = 2 };
501
502 iov[0].iov_base = smb_buffer;
503 iov[0].iov_len = 4;
504 iov[1].iov_base = (char *)smb_buffer + 4;
505 iov[1].iov_len = smb_buf_length;
506
507 return __smb_send_rqst(server, 1, &rqst);
508 }
509
510 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)511 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
512 const int timeout, const int flags,
513 unsigned int *instance)
514 {
515 long rc;
516 int *credits;
517 int optype;
518 long int t;
519 int scredits, in_flight;
520
521 if (timeout < 0)
522 t = MAX_JIFFY_OFFSET;
523 else
524 t = msecs_to_jiffies(timeout);
525
526 optype = flags & CIFS_OP_MASK;
527
528 *instance = 0;
529
530 credits = server->ops->get_credits_field(server, optype);
531 /* Since an echo is already inflight, no need to wait to send another */
532 if (*credits <= 0 && optype == CIFS_ECHO_OP)
533 return -EAGAIN;
534
535 spin_lock(&server->req_lock);
536 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
537 /* oplock breaks must not be held up */
538 server->in_flight++;
539 if (server->in_flight > server->max_in_flight)
540 server->max_in_flight = server->in_flight;
541 *credits -= 1;
542 *instance = server->reconnect_instance;
543 scredits = *credits;
544 in_flight = server->in_flight;
545 spin_unlock(&server->req_lock);
546
547 trace_smb3_add_credits(server->CurrentMid,
548 server->conn_id, server->hostname, scredits, -1, in_flight);
549 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
550 __func__, 1, scredits);
551
552 return 0;
553 }
554
555 while (1) {
556 if (*credits < num_credits) {
557 scredits = *credits;
558 spin_unlock(&server->req_lock);
559
560 cifs_num_waiters_inc(server);
561 rc = wait_event_killable_timeout(server->request_q,
562 has_credits(server, credits, num_credits), t);
563 cifs_num_waiters_dec(server);
564 if (!rc) {
565 spin_lock(&server->req_lock);
566 scredits = *credits;
567 in_flight = server->in_flight;
568 spin_unlock(&server->req_lock);
569
570 trace_smb3_credit_timeout(server->CurrentMid,
571 server->conn_id, server->hostname, scredits,
572 num_credits, in_flight);
573 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
574 timeout);
575 return -EBUSY;
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
579 spin_lock(&server->req_lock);
580 } else {
581 if (server->tcpStatus == CifsExiting) {
582 spin_unlock(&server->req_lock);
583 return -ENOENT;
584 }
585
586 /*
587 * For normal commands, reserve the last MAX_COMPOUND
588 * credits to compound requests.
589 * Otherwise these compounds could be permanently
590 * starved for credits by single-credit requests.
591 *
592 * To prevent spinning CPU, block this thread until
593 * there are >MAX_COMPOUND credits available.
594 * But only do this is we already have a lot of
595 * credits in flight to avoid triggering this check
596 * for servers that are slow to hand out credits on
597 * new sessions.
598 */
599 if (!optype && num_credits == 1 &&
600 server->in_flight > 2 * MAX_COMPOUND &&
601 *credits <= MAX_COMPOUND) {
602 spin_unlock(&server->req_lock);
603
604 cifs_num_waiters_inc(server);
605 rc = wait_event_killable_timeout(
606 server->request_q,
607 has_credits(server, credits,
608 MAX_COMPOUND + 1),
609 t);
610 cifs_num_waiters_dec(server);
611 if (!rc) {
612 spin_lock(&server->req_lock);
613 scredits = *credits;
614 in_flight = server->in_flight;
615 spin_unlock(&server->req_lock);
616
617 trace_smb3_credit_timeout(
618 server->CurrentMid,
619 server->conn_id, server->hostname,
620 scredits, num_credits, in_flight);
621 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
622 timeout);
623 return -EBUSY;
624 }
625 if (rc == -ERESTARTSYS)
626 return -ERESTARTSYS;
627 spin_lock(&server->req_lock);
628 continue;
629 }
630
631 /*
632 * Can not count locking commands against total
633 * as they are allowed to block on server.
634 */
635
636 /* update # of requests on the wire to server */
637 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
638 *credits -= num_credits;
639 server->in_flight += num_credits;
640 if (server->in_flight > server->max_in_flight)
641 server->max_in_flight = server->in_flight;
642 *instance = server->reconnect_instance;
643 }
644 scredits = *credits;
645 in_flight = server->in_flight;
646 spin_unlock(&server->req_lock);
647
648 trace_smb3_add_credits(server->CurrentMid,
649 server->conn_id, server->hostname, scredits,
650 -(num_credits), in_flight);
651 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
652 __func__, num_credits, scredits);
653 break;
654 }
655 }
656 return 0;
657 }
658
659 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)660 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
661 unsigned int *instance)
662 {
663 return wait_for_free_credits(server, 1, -1, flags,
664 instance);
665 }
666
667 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)668 wait_for_compound_request(struct TCP_Server_Info *server, int num,
669 const int flags, unsigned int *instance)
670 {
671 int *credits;
672 int scredits, in_flight;
673
674 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
675
676 spin_lock(&server->req_lock);
677 scredits = *credits;
678 in_flight = server->in_flight;
679
680 if (*credits < num) {
681 /*
682 * If the server is tight on resources or just gives us less
683 * credits for other reasons (e.g. requests are coming out of
684 * order and the server delays granting more credits until it
685 * processes a missing mid) and we exhausted most available
686 * credits there may be situations when we try to send
687 * a compound request but we don't have enough credits. At this
688 * point the client needs to decide if it should wait for
689 * additional credits or fail the request. If at least one
690 * request is in flight there is a high probability that the
691 * server will return enough credits to satisfy this compound
692 * request.
693 *
694 * Return immediately if no requests in flight since we will be
695 * stuck on waiting for credits.
696 */
697 if (server->in_flight == 0) {
698 spin_unlock(&server->req_lock);
699 trace_smb3_insufficient_credits(server->CurrentMid,
700 server->conn_id, server->hostname, scredits,
701 num, in_flight);
702 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
703 __func__, in_flight, num, scredits);
704 return -EDEADLK;
705 }
706 }
707 spin_unlock(&server->req_lock);
708
709 return wait_for_free_credits(server, num, 60000, flags,
710 instance);
711 }
712
713 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)714 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
715 unsigned int *num, struct cifs_credits *credits)
716 {
717 *num = size;
718 credits->value = 0;
719 credits->instance = server->reconnect_instance;
720 return 0;
721 }
722
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)723 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
724 struct mid_q_entry **ppmidQ)
725 {
726 if (ses->server->tcpStatus == CifsExiting) {
727 return -ENOENT;
728 }
729
730 if (ses->server->tcpStatus == CifsNeedReconnect) {
731 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
732 return -EAGAIN;
733 }
734
735 if (ses->status == CifsNew) {
736 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
737 (in_buf->Command != SMB_COM_NEGOTIATE))
738 return -EAGAIN;
739 /* else ok - we are setting up session */
740 }
741
742 if (ses->status == CifsExiting) {
743 /* check if SMB session is bad because we are setting it up */
744 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
745 return -EAGAIN;
746 /* else ok - we are shutting down session */
747 }
748
749 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
750 if (*ppmidQ == NULL)
751 return -ENOMEM;
752 spin_lock(&GlobalMid_Lock);
753 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
754 spin_unlock(&GlobalMid_Lock);
755 return 0;
756 }
757
758 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)759 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
760 {
761 int error;
762
763 error = wait_event_freezekillable_unsafe(server->response_q,
764 midQ->mid_state != MID_REQUEST_SUBMITTED);
765 if (error < 0)
766 return -ERESTARTSYS;
767
768 return 0;
769 }
770
771 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)772 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
773 {
774 int rc;
775 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
776 struct mid_q_entry *mid;
777
778 if (rqst->rq_iov[0].iov_len != 4 ||
779 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
780 return ERR_PTR(-EIO);
781
782 /* enable signing if server requires it */
783 if (server->sign)
784 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
785
786 mid = AllocMidQEntry(hdr, server);
787 if (mid == NULL)
788 return ERR_PTR(-ENOMEM);
789
790 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
791 if (rc) {
792 DeleteMidQEntry(mid);
793 return ERR_PTR(rc);
794 }
795
796 return mid;
797 }
798
799 /*
800 * Send a SMB request and set the callback function in the mid to handle
801 * the result. Caller is responsible for dealing with timeouts.
802 */
803 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)804 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
805 mid_receive_t *receive, mid_callback_t *callback,
806 mid_handle_t *handle, void *cbdata, const int flags,
807 const struct cifs_credits *exist_credits)
808 {
809 int rc;
810 struct mid_q_entry *mid;
811 struct cifs_credits credits = { .value = 0, .instance = 0 };
812 unsigned int instance;
813 int optype;
814
815 optype = flags & CIFS_OP_MASK;
816
817 if ((flags & CIFS_HAS_CREDITS) == 0) {
818 rc = wait_for_free_request(server, flags, &instance);
819 if (rc)
820 return rc;
821 credits.value = 1;
822 credits.instance = instance;
823 } else
824 instance = exist_credits->instance;
825
826 mutex_lock(&server->srv_mutex);
827
828 /*
829 * We can't use credits obtained from the previous session to send this
830 * request. Check if there were reconnects after we obtained credits and
831 * return -EAGAIN in such cases to let callers handle it.
832 */
833 if (instance != server->reconnect_instance) {
834 mutex_unlock(&server->srv_mutex);
835 add_credits_and_wake_if(server, &credits, optype);
836 return -EAGAIN;
837 }
838
839 mid = server->ops->setup_async_request(server, rqst);
840 if (IS_ERR(mid)) {
841 mutex_unlock(&server->srv_mutex);
842 add_credits_and_wake_if(server, &credits, optype);
843 return PTR_ERR(mid);
844 }
845
846 mid->receive = receive;
847 mid->callback = callback;
848 mid->callback_data = cbdata;
849 mid->handle = handle;
850 mid->mid_state = MID_REQUEST_SUBMITTED;
851
852 /* put it on the pending_mid_q */
853 spin_lock(&GlobalMid_Lock);
854 list_add_tail(&mid->qhead, &server->pending_mid_q);
855 spin_unlock(&GlobalMid_Lock);
856
857 /*
858 * Need to store the time in mid before calling I/O. For call_async,
859 * I/O response may come back and free the mid entry on another thread.
860 */
861 cifs_save_when_sent(mid);
862 cifs_in_send_inc(server);
863 rc = smb_send_rqst(server, 1, rqst, flags);
864 cifs_in_send_dec(server);
865
866 if (rc < 0) {
867 revert_current_mid(server, mid->credits);
868 server->sequence_number -= 2;
869 cifs_delete_mid(mid);
870 }
871
872 mutex_unlock(&server->srv_mutex);
873
874 if (rc == 0)
875 return 0;
876
877 add_credits_and_wake_if(server, &credits, optype);
878 return rc;
879 }
880
881 /*
882 *
883 * Send an SMB Request. No response info (other than return code)
884 * needs to be parsed.
885 *
886 * flags indicate the type of request buffer and how long to wait
887 * and whether to log NT STATUS code (error) before mapping it to POSIX error
888 *
889 */
890 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)891 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
892 char *in_buf, int flags)
893 {
894 int rc;
895 struct kvec iov[1];
896 struct kvec rsp_iov;
897 int resp_buf_type;
898
899 iov[0].iov_base = in_buf;
900 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
901 flags |= CIFS_NO_RSP_BUF;
902 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
903 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
904
905 return rc;
906 }
907
908 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)909 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
910 {
911 int rc = 0;
912
913 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
914 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
915
916 spin_lock(&GlobalMid_Lock);
917 switch (mid->mid_state) {
918 case MID_RESPONSE_RECEIVED:
919 spin_unlock(&GlobalMid_Lock);
920 return rc;
921 case MID_RETRY_NEEDED:
922 rc = -EAGAIN;
923 break;
924 case MID_RESPONSE_MALFORMED:
925 rc = -EIO;
926 break;
927 case MID_SHUTDOWN:
928 rc = -EHOSTDOWN;
929 break;
930 default:
931 if (!(mid->mid_flags & MID_DELETED)) {
932 list_del_init(&mid->qhead);
933 mid->mid_flags |= MID_DELETED;
934 }
935 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
936 __func__, mid->mid, mid->mid_state);
937 rc = -EIO;
938 }
939 spin_unlock(&GlobalMid_Lock);
940
941 DeleteMidQEntry(mid);
942 return rc;
943 }
944
945 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)946 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
947 struct mid_q_entry *mid)
948 {
949 return server->ops->send_cancel ?
950 server->ops->send_cancel(server, rqst, mid) : 0;
951 }
952
953 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)954 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
955 bool log_error)
956 {
957 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
958
959 dump_smb(mid->resp_buf, min_t(u32, 92, len));
960
961 /* convert the length into a more usable form */
962 if (server->sign) {
963 struct kvec iov[2];
964 int rc = 0;
965 struct smb_rqst rqst = { .rq_iov = iov,
966 .rq_nvec = 2 };
967
968 iov[0].iov_base = mid->resp_buf;
969 iov[0].iov_len = 4;
970 iov[1].iov_base = (char *)mid->resp_buf + 4;
971 iov[1].iov_len = len - 4;
972 /* FIXME: add code to kill session */
973 rc = cifs_verify_signature(&rqst, server,
974 mid->sequence_number);
975 if (rc)
976 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
977 rc);
978 }
979
980 /* BB special case reconnect tid and uid here? */
981 return map_and_check_smb_error(mid, log_error);
982 }
983
984 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)985 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
986 struct smb_rqst *rqst)
987 {
988 int rc;
989 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
990 struct mid_q_entry *mid;
991
992 if (rqst->rq_iov[0].iov_len != 4 ||
993 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
994 return ERR_PTR(-EIO);
995
996 rc = allocate_mid(ses, hdr, &mid);
997 if (rc)
998 return ERR_PTR(rc);
999 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1000 if (rc) {
1001 cifs_delete_mid(mid);
1002 return ERR_PTR(rc);
1003 }
1004 return mid;
1005 }
1006
1007 static void
cifs_compound_callback(struct mid_q_entry * mid)1008 cifs_compound_callback(struct mid_q_entry *mid)
1009 {
1010 struct TCP_Server_Info *server = mid->server;
1011 struct cifs_credits credits;
1012
1013 credits.value = server->ops->get_credits(mid);
1014 credits.instance = server->reconnect_instance;
1015
1016 add_credits(server, &credits, mid->optype);
1017 }
1018
1019 static void
cifs_compound_last_callback(struct mid_q_entry * mid)1020 cifs_compound_last_callback(struct mid_q_entry *mid)
1021 {
1022 cifs_compound_callback(mid);
1023 cifs_wake_up_task(mid);
1024 }
1025
1026 static void
cifs_cancelled_callback(struct mid_q_entry * mid)1027 cifs_cancelled_callback(struct mid_q_entry *mid)
1028 {
1029 cifs_compound_callback(mid);
1030 DeleteMidQEntry(mid);
1031 }
1032
1033 /*
1034 * Return a channel (master if none) of @ses that can be used to send
1035 * regular requests.
1036 *
1037 * If we are currently binding a new channel (negprot/sess.setup),
1038 * return the new incomplete channel.
1039 */
cifs_pick_channel(struct cifs_ses * ses)1040 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1041 {
1042 uint index = 0;
1043
1044 if (!ses)
1045 return NULL;
1046
1047 spin_lock(&ses->chan_lock);
1048 if (!ses->binding) {
1049 /* round robin */
1050 if (ses->chan_count > 1) {
1051 index = (uint)atomic_inc_return(&ses->chan_seq);
1052 index %= ses->chan_count;
1053 }
1054 spin_unlock(&ses->chan_lock);
1055 return ses->chans[index].server;
1056 } else {
1057 spin_unlock(&ses->chan_lock);
1058 return cifs_ses_server(ses);
1059 }
1060 }
1061
1062 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1063 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1064 struct TCP_Server_Info *server,
1065 const int flags, const int num_rqst, struct smb_rqst *rqst,
1066 int *resp_buf_type, struct kvec *resp_iov)
1067 {
1068 int i, j, optype, rc = 0;
1069 struct mid_q_entry *midQ[MAX_COMPOUND];
1070 bool cancelled_mid[MAX_COMPOUND] = {false};
1071 struct cifs_credits credits[MAX_COMPOUND] = {
1072 { .value = 0, .instance = 0 }
1073 };
1074 unsigned int instance;
1075 char *buf;
1076
1077 optype = flags & CIFS_OP_MASK;
1078
1079 for (i = 0; i < num_rqst; i++)
1080 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1081
1082 if (!ses || !ses->server || !server) {
1083 cifs_dbg(VFS, "Null session\n");
1084 return -EIO;
1085 }
1086
1087 if (server->tcpStatus == CifsExiting)
1088 return -ENOENT;
1089
1090 /*
1091 * Wait for all the requests to become available.
1092 * This approach still leaves the possibility to be stuck waiting for
1093 * credits if the server doesn't grant credits to the outstanding
1094 * requests and if the client is completely idle, not generating any
1095 * other requests.
1096 * This can be handled by the eventual session reconnect.
1097 */
1098 rc = wait_for_compound_request(server, num_rqst, flags,
1099 &instance);
1100 if (rc)
1101 return rc;
1102
1103 for (i = 0; i < num_rqst; i++) {
1104 credits[i].value = 1;
1105 credits[i].instance = instance;
1106 }
1107
1108 /*
1109 * Make sure that we sign in the same order that we send on this socket
1110 * and avoid races inside tcp sendmsg code that could cause corruption
1111 * of smb data.
1112 */
1113
1114 mutex_lock(&server->srv_mutex);
1115
1116 /*
1117 * All the parts of the compound chain belong obtained credits from the
1118 * same session. We can not use credits obtained from the previous
1119 * session to send this request. Check if there were reconnects after
1120 * we obtained credits and return -EAGAIN in such cases to let callers
1121 * handle it.
1122 */
1123 if (instance != server->reconnect_instance) {
1124 mutex_unlock(&server->srv_mutex);
1125 for (j = 0; j < num_rqst; j++)
1126 add_credits(server, &credits[j], optype);
1127 return -EAGAIN;
1128 }
1129
1130 for (i = 0; i < num_rqst; i++) {
1131 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1132 if (IS_ERR(midQ[i])) {
1133 revert_current_mid(server, i);
1134 for (j = 0; j < i; j++)
1135 cifs_delete_mid(midQ[j]);
1136 mutex_unlock(&server->srv_mutex);
1137
1138 /* Update # of requests on wire to server */
1139 for (j = 0; j < num_rqst; j++)
1140 add_credits(server, &credits[j], optype);
1141 return PTR_ERR(midQ[i]);
1142 }
1143
1144 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1145 midQ[i]->optype = optype;
1146 /*
1147 * Invoke callback for every part of the compound chain
1148 * to calculate credits properly. Wake up this thread only when
1149 * the last element is received.
1150 */
1151 if (i < num_rqst - 1)
1152 midQ[i]->callback = cifs_compound_callback;
1153 else
1154 midQ[i]->callback = cifs_compound_last_callback;
1155 }
1156 cifs_in_send_inc(server);
1157 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1158 cifs_in_send_dec(server);
1159
1160 for (i = 0; i < num_rqst; i++)
1161 cifs_save_when_sent(midQ[i]);
1162
1163 if (rc < 0) {
1164 revert_current_mid(server, num_rqst);
1165 server->sequence_number -= 2;
1166 }
1167
1168 mutex_unlock(&server->srv_mutex);
1169
1170 /*
1171 * If sending failed for some reason or it is an oplock break that we
1172 * will not receive a response to - return credits back
1173 */
1174 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1175 for (i = 0; i < num_rqst; i++)
1176 add_credits(server, &credits[i], optype);
1177 goto out;
1178 }
1179
1180 /*
1181 * At this point the request is passed to the network stack - we assume
1182 * that any credits taken from the server structure on the client have
1183 * been spent and we can't return them back. Once we receive responses
1184 * we will collect credits granted by the server in the mid callbacks
1185 * and add those credits to the server structure.
1186 */
1187
1188 /*
1189 * Compounding is never used during session establish.
1190 */
1191 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1192 mutex_lock(&server->srv_mutex);
1193 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1194 rqst[0].rq_nvec);
1195 mutex_unlock(&server->srv_mutex);
1196 }
1197
1198 for (i = 0; i < num_rqst; i++) {
1199 rc = wait_for_response(server, midQ[i]);
1200 if (rc != 0)
1201 break;
1202 }
1203 if (rc != 0) {
1204 for (; i < num_rqst; i++) {
1205 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1206 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1207 send_cancel(server, &rqst[i], midQ[i]);
1208 spin_lock(&GlobalMid_Lock);
1209 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1210 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1211 midQ[i]->callback = cifs_cancelled_callback;
1212 cancelled_mid[i] = true;
1213 credits[i].value = 0;
1214 }
1215 spin_unlock(&GlobalMid_Lock);
1216 }
1217 }
1218
1219 for (i = 0; i < num_rqst; i++) {
1220 if (rc < 0)
1221 goto out;
1222
1223 rc = cifs_sync_mid_result(midQ[i], server);
1224 if (rc != 0) {
1225 /* mark this mid as cancelled to not free it below */
1226 cancelled_mid[i] = true;
1227 goto out;
1228 }
1229
1230 if (!midQ[i]->resp_buf ||
1231 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1232 rc = -EIO;
1233 cifs_dbg(FYI, "Bad MID state?\n");
1234 goto out;
1235 }
1236
1237 buf = (char *)midQ[i]->resp_buf;
1238 resp_iov[i].iov_base = buf;
1239 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1240 server->vals->header_preamble_size;
1241
1242 if (midQ[i]->large_buf)
1243 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1244 else
1245 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1246
1247 rc = server->ops->check_receive(midQ[i], server,
1248 flags & CIFS_LOG_ERROR);
1249
1250 /* mark it so buf will not be freed by cifs_delete_mid */
1251 if ((flags & CIFS_NO_RSP_BUF) == 0)
1252 midQ[i]->resp_buf = NULL;
1253
1254 }
1255
1256 /*
1257 * Compounding is never used during session establish.
1258 */
1259 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1260 struct kvec iov = {
1261 .iov_base = resp_iov[0].iov_base,
1262 .iov_len = resp_iov[0].iov_len
1263 };
1264 mutex_lock(&server->srv_mutex);
1265 smb311_update_preauth_hash(ses, &iov, 1);
1266 mutex_unlock(&server->srv_mutex);
1267 }
1268
1269 out:
1270 /*
1271 * This will dequeue all mids. After this it is important that the
1272 * demultiplex_thread will not process any of these mids any futher.
1273 * This is prevented above by using a noop callback that will not
1274 * wake this thread except for the very last PDU.
1275 */
1276 for (i = 0; i < num_rqst; i++) {
1277 if (!cancelled_mid[i])
1278 cifs_delete_mid(midQ[i]);
1279 }
1280
1281 return rc;
1282 }
1283
1284 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1285 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1286 struct TCP_Server_Info *server,
1287 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1288 struct kvec *resp_iov)
1289 {
1290 return compound_send_recv(xid, ses, server, flags, 1,
1291 rqst, resp_buf_type, resp_iov);
1292 }
1293
1294 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1295 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1296 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1297 const int flags, struct kvec *resp_iov)
1298 {
1299 struct smb_rqst rqst;
1300 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1301 int rc;
1302
1303 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1304 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1305 GFP_KERNEL);
1306 if (!new_iov) {
1307 /* otherwise cifs_send_recv below sets resp_buf_type */
1308 *resp_buf_type = CIFS_NO_BUFFER;
1309 return -ENOMEM;
1310 }
1311 } else
1312 new_iov = s_iov;
1313
1314 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1315 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1316
1317 new_iov[0].iov_base = new_iov[1].iov_base;
1318 new_iov[0].iov_len = 4;
1319 new_iov[1].iov_base += 4;
1320 new_iov[1].iov_len -= 4;
1321
1322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = new_iov;
1324 rqst.rq_nvec = n_vec + 1;
1325
1326 rc = cifs_send_recv(xid, ses, ses->server,
1327 &rqst, resp_buf_type, flags, resp_iov);
1328 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1329 kfree(new_iov);
1330 return rc;
1331 }
1332
1333 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1334 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1335 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1336 int *pbytes_returned, const int flags)
1337 {
1338 int rc = 0;
1339 struct mid_q_entry *midQ;
1340 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1341 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1342 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1343 struct cifs_credits credits = { .value = 1, .instance = 0 };
1344 struct TCP_Server_Info *server;
1345
1346 if (ses == NULL) {
1347 cifs_dbg(VFS, "Null smb session\n");
1348 return -EIO;
1349 }
1350 server = ses->server;
1351 if (server == NULL) {
1352 cifs_dbg(VFS, "Null tcp session\n");
1353 return -EIO;
1354 }
1355
1356 if (server->tcpStatus == CifsExiting)
1357 return -ENOENT;
1358
1359 /* Ensure that we do not send more than 50 overlapping requests
1360 to the same server. We may make this configurable later or
1361 use ses->maxReq */
1362
1363 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1364 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1365 len);
1366 return -EIO;
1367 }
1368
1369 rc = wait_for_free_request(server, flags, &credits.instance);
1370 if (rc)
1371 return rc;
1372
1373 /* make sure that we sign in the same order that we send on this socket
1374 and avoid races inside tcp sendmsg code that could cause corruption
1375 of smb data */
1376
1377 mutex_lock(&server->srv_mutex);
1378
1379 rc = allocate_mid(ses, in_buf, &midQ);
1380 if (rc) {
1381 mutex_unlock(&server->srv_mutex);
1382 /* Update # of requests on wire to server */
1383 add_credits(server, &credits, 0);
1384 return rc;
1385 }
1386
1387 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1388 if (rc) {
1389 mutex_unlock(&server->srv_mutex);
1390 goto out;
1391 }
1392
1393 midQ->mid_state = MID_REQUEST_SUBMITTED;
1394
1395 cifs_in_send_inc(server);
1396 rc = smb_send(server, in_buf, len);
1397 cifs_in_send_dec(server);
1398 cifs_save_when_sent(midQ);
1399
1400 if (rc < 0)
1401 server->sequence_number -= 2;
1402
1403 mutex_unlock(&server->srv_mutex);
1404
1405 if (rc < 0)
1406 goto out;
1407
1408 rc = wait_for_response(server, midQ);
1409 if (rc != 0) {
1410 send_cancel(server, &rqst, midQ);
1411 spin_lock(&GlobalMid_Lock);
1412 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1413 /* no longer considered to be "in-flight" */
1414 midQ->callback = DeleteMidQEntry;
1415 spin_unlock(&GlobalMid_Lock);
1416 add_credits(server, &credits, 0);
1417 return rc;
1418 }
1419 spin_unlock(&GlobalMid_Lock);
1420 }
1421
1422 rc = cifs_sync_mid_result(midQ, server);
1423 if (rc != 0) {
1424 add_credits(server, &credits, 0);
1425 return rc;
1426 }
1427
1428 if (!midQ->resp_buf || !out_buf ||
1429 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1430 rc = -EIO;
1431 cifs_server_dbg(VFS, "Bad MID state?\n");
1432 goto out;
1433 }
1434
1435 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1436 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1437 rc = cifs_check_receive(midQ, server, 0);
1438 out:
1439 cifs_delete_mid(midQ);
1440 add_credits(server, &credits, 0);
1441
1442 return rc;
1443 }
1444
1445 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1446 blocking lock to return. */
1447
1448 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1449 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1450 struct smb_hdr *in_buf,
1451 struct smb_hdr *out_buf)
1452 {
1453 int bytes_returned;
1454 struct cifs_ses *ses = tcon->ses;
1455 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1456
1457 /* We just modify the current in_buf to change
1458 the type of lock from LOCKING_ANDX_SHARED_LOCK
1459 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1460 LOCKING_ANDX_CANCEL_LOCK. */
1461
1462 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1463 pSMB->Timeout = 0;
1464 pSMB->hdr.Mid = get_next_mid(ses->server);
1465
1466 return SendReceive(xid, ses, in_buf, out_buf,
1467 &bytes_returned, 0);
1468 }
1469
1470 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1471 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1472 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1473 int *pbytes_returned)
1474 {
1475 int rc = 0;
1476 int rstart = 0;
1477 struct mid_q_entry *midQ;
1478 struct cifs_ses *ses;
1479 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1480 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1481 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1482 unsigned int instance;
1483 struct TCP_Server_Info *server;
1484
1485 if (tcon == NULL || tcon->ses == NULL) {
1486 cifs_dbg(VFS, "Null smb session\n");
1487 return -EIO;
1488 }
1489 ses = tcon->ses;
1490 server = ses->server;
1491
1492 if (server == NULL) {
1493 cifs_dbg(VFS, "Null tcp session\n");
1494 return -EIO;
1495 }
1496
1497 if (server->tcpStatus == CifsExiting)
1498 return -ENOENT;
1499
1500 /* Ensure that we do not send more than 50 overlapping requests
1501 to the same server. We may make this configurable later or
1502 use ses->maxReq */
1503
1504 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1505 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1506 len);
1507 return -EIO;
1508 }
1509
1510 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1511 if (rc)
1512 return rc;
1513
1514 /* make sure that we sign in the same order that we send on this socket
1515 and avoid races inside tcp sendmsg code that could cause corruption
1516 of smb data */
1517
1518 mutex_lock(&server->srv_mutex);
1519
1520 rc = allocate_mid(ses, in_buf, &midQ);
1521 if (rc) {
1522 mutex_unlock(&server->srv_mutex);
1523 return rc;
1524 }
1525
1526 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1527 if (rc) {
1528 cifs_delete_mid(midQ);
1529 mutex_unlock(&server->srv_mutex);
1530 return rc;
1531 }
1532
1533 midQ->mid_state = MID_REQUEST_SUBMITTED;
1534 cifs_in_send_inc(server);
1535 rc = smb_send(server, in_buf, len);
1536 cifs_in_send_dec(server);
1537 cifs_save_when_sent(midQ);
1538
1539 if (rc < 0)
1540 server->sequence_number -= 2;
1541
1542 mutex_unlock(&server->srv_mutex);
1543
1544 if (rc < 0) {
1545 cifs_delete_mid(midQ);
1546 return rc;
1547 }
1548
1549 /* Wait for a reply - allow signals to interrupt. */
1550 rc = wait_event_interruptible(server->response_q,
1551 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1552 ((server->tcpStatus != CifsGood) &&
1553 (server->tcpStatus != CifsNew)));
1554
1555 /* Were we interrupted by a signal ? */
1556 if ((rc == -ERESTARTSYS) &&
1557 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1558 ((server->tcpStatus == CifsGood) ||
1559 (server->tcpStatus == CifsNew))) {
1560
1561 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1562 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1563 blocking lock to return. */
1564 rc = send_cancel(server, &rqst, midQ);
1565 if (rc) {
1566 cifs_delete_mid(midQ);
1567 return rc;
1568 }
1569 } else {
1570 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1571 to cause the blocking lock to return. */
1572
1573 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1574
1575 /* If we get -ENOLCK back the lock may have
1576 already been removed. Don't exit in this case. */
1577 if (rc && rc != -ENOLCK) {
1578 cifs_delete_mid(midQ);
1579 return rc;
1580 }
1581 }
1582
1583 rc = wait_for_response(server, midQ);
1584 if (rc) {
1585 send_cancel(server, &rqst, midQ);
1586 spin_lock(&GlobalMid_Lock);
1587 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1588 /* no longer considered to be "in-flight" */
1589 midQ->callback = DeleteMidQEntry;
1590 spin_unlock(&GlobalMid_Lock);
1591 return rc;
1592 }
1593 spin_unlock(&GlobalMid_Lock);
1594 }
1595
1596 /* We got the response - restart system call. */
1597 rstart = 1;
1598 }
1599
1600 rc = cifs_sync_mid_result(midQ, server);
1601 if (rc != 0)
1602 return rc;
1603
1604 /* rcvd frame is ok */
1605 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1606 rc = -EIO;
1607 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1608 goto out;
1609 }
1610
1611 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1612 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1613 rc = cifs_check_receive(midQ, server, 0);
1614 out:
1615 cifs_delete_mid(midQ);
1616 if (rstart && rc == -EACCES)
1617 return -ERESTARTSYS;
1618 return rc;
1619 }
1620