Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: LGPL-2.1
0002 /*
0003  *
0004  *   Copyright (C) International Business Machines  Corp., 2002,2008
0005  *   Author(s): Steve French (sfrench@us.ibm.com)
0006  *   Jeremy Allison (jra@samba.org) 2006.
0007  *
0008  */
0009 
0010 #include <linux/fs.h>
0011 #include <linux/list.h>
0012 #include <linux/gfp.h>
0013 #include <linux/wait.h>
0014 #include <linux/net.h>
0015 #include <linux/delay.h>
0016 #include <linux/freezer.h>
0017 #include <linux/tcp.h>
0018 #include <linux/bvec.h>
0019 #include <linux/highmem.h>
0020 #include <linux/uaccess.h>
0021 #include <asm/processor.h>
0022 #include <linux/mempool.h>
0023 #include <linux/sched/signal.h>
0024 #include <linux/task_io_accounting_ops.h>
0025 #include "cifspdu.h"
0026 #include "cifsglob.h"
0027 #include "cifsproto.h"
0028 #include "cifs_debug.h"
0029 #include "smb2proto.h"
0030 #include "smbdirect.h"
0031 
0032 /* Max number of iovectors we can use off the stack when sending requests. */
0033 #define CIFS_MAX_IOV_SIZE 8
0034 
0035 void
0036 cifs_wake_up_task(struct mid_q_entry *mid)
0037 {
0038     wake_up_process(mid->callback_data);
0039 }
0040 
0041 static struct mid_q_entry *
0042 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
0043 {
0044     struct mid_q_entry *temp;
0045 
0046     if (server == NULL) {
0047         cifs_dbg(VFS, "%s: null TCP session\n", __func__);
0048         return NULL;
0049     }
0050 
0051     temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
0052     memset(temp, 0, sizeof(struct mid_q_entry));
0053     kref_init(&temp->refcount);
0054     temp->mid = get_mid(smb_buffer);
0055     temp->pid = current->pid;
0056     temp->command = cpu_to_le16(smb_buffer->Command);
0057     cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
0058     /*  do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
0059     /* when mid allocated can be before when sent */
0060     temp->when_alloc = jiffies;
0061     temp->server = server;
0062 
0063     /*
0064      * The default is for the mid to be synchronous, so the
0065      * default callback just wakes up the current task.
0066      */
0067     get_task_struct(current);
0068     temp->creator = current;
0069     temp->callback = cifs_wake_up_task;
0070     temp->callback_data = current;
0071 
0072     atomic_inc(&mid_count);
0073     temp->mid_state = MID_REQUEST_ALLOCATED;
0074     return temp;
0075 }
0076 
0077 static void __release_mid(struct kref *refcount)
0078 {
0079     struct mid_q_entry *midEntry =
0080             container_of(refcount, struct mid_q_entry, refcount);
0081 #ifdef CONFIG_CIFS_STATS2
0082     __le16 command = midEntry->server->vals->lock_cmd;
0083     __u16 smb_cmd = le16_to_cpu(midEntry->command);
0084     unsigned long now;
0085     unsigned long roundtrip_time;
0086 #endif
0087     struct TCP_Server_Info *server = midEntry->server;
0088 
0089     if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
0090         midEntry->mid_state == MID_RESPONSE_RECEIVED &&
0091         server->ops->handle_cancelled_mid)
0092         server->ops->handle_cancelled_mid(midEntry, server);
0093 
0094     midEntry->mid_state = MID_FREE;
0095     atomic_dec(&mid_count);
0096     if (midEntry->large_buf)
0097         cifs_buf_release(midEntry->resp_buf);
0098     else
0099         cifs_small_buf_release(midEntry->resp_buf);
0100 #ifdef CONFIG_CIFS_STATS2
0101     now = jiffies;
0102     if (now < midEntry->when_alloc)
0103         cifs_server_dbg(VFS, "Invalid mid allocation time\n");
0104     roundtrip_time = now - midEntry->when_alloc;
0105 
0106     if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
0107         if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
0108             server->slowest_cmd[smb_cmd] = roundtrip_time;
0109             server->fastest_cmd[smb_cmd] = roundtrip_time;
0110         } else {
0111             if (server->slowest_cmd[smb_cmd] < roundtrip_time)
0112                 server->slowest_cmd[smb_cmd] = roundtrip_time;
0113             else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
0114                 server->fastest_cmd[smb_cmd] = roundtrip_time;
0115         }
0116         cifs_stats_inc(&server->num_cmds[smb_cmd]);
0117         server->time_per_cmd[smb_cmd] += roundtrip_time;
0118     }
0119     /*
0120      * commands taking longer than one second (default) can be indications
0121      * that something is wrong, unless it is quite a slow link or a very
0122      * busy server. Note that this calc is unlikely or impossible to wrap
0123      * as long as slow_rsp_threshold is not set way above recommended max
0124      * value (32767 ie 9 hours) and is generally harmless even if wrong
0125      * since only affects debug counters - so leaving the calc as simple
0126      * comparison rather than doing multiple conversions and overflow
0127      * checks
0128      */
0129     if ((slow_rsp_threshold != 0) &&
0130         time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
0131         (midEntry->command != command)) {
0132         /*
0133          * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
0134          * NB: le16_to_cpu returns unsigned so can not be negative below
0135          */
0136         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
0137             cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
0138 
0139         trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
0140                    midEntry->when_sent, midEntry->when_received);
0141         if (cifsFYI & CIFS_TIMER) {
0142             pr_debug("slow rsp: cmd %d mid %llu",
0143                  midEntry->command, midEntry->mid);
0144             cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
0145                   now - midEntry->when_alloc,
0146                   now - midEntry->when_sent,
0147                   now - midEntry->when_received);
0148         }
0149     }
0150 #endif
0151     put_task_struct(midEntry->creator);
0152 
0153     mempool_free(midEntry, cifs_mid_poolp);
0154 }
0155 
0156 void release_mid(struct mid_q_entry *mid)
0157 {
0158     struct TCP_Server_Info *server = mid->server;
0159 
0160     spin_lock(&server->mid_lock);
0161     kref_put(&mid->refcount, __release_mid);
0162     spin_unlock(&server->mid_lock);
0163 }
0164 
0165 void
0166 delete_mid(struct mid_q_entry *mid)
0167 {
0168     spin_lock(&mid->server->mid_lock);
0169     if (!(mid->mid_flags & MID_DELETED)) {
0170         list_del_init(&mid->qhead);
0171         mid->mid_flags |= MID_DELETED;
0172     }
0173     spin_unlock(&mid->server->mid_lock);
0174 
0175     release_mid(mid);
0176 }
0177 
0178 /*
0179  * smb_send_kvec - send an array of kvecs to the server
0180  * @server: Server to send the data to
0181  * @smb_msg:    Message to send
0182  * @sent:   amount of data sent on socket is stored here
0183  *
0184  * Our basic "send data to server" function. Should be called with srv_mutex
0185  * held. The caller is responsible for handling the results.
0186  */
0187 static int
0188 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
0189           size_t *sent)
0190 {
0191     int rc = 0;
0192     int retries = 0;
0193     struct socket *ssocket = server->ssocket;
0194 
0195     *sent = 0;
0196 
0197     if (server->noblocksnd)
0198         smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
0199     else
0200         smb_msg->msg_flags = MSG_NOSIGNAL;
0201 
0202     while (msg_data_left(smb_msg)) {
0203         /*
0204          * If blocking send, we try 3 times, since each can block
0205          * for 5 seconds. For nonblocking  we have to try more
0206          * but wait increasing amounts of time allowing time for
0207          * socket to clear.  The overall time we wait in either
0208          * case to send on the socket is about 15 seconds.
0209          * Similarly we wait for 15 seconds for a response from
0210          * the server in SendReceive[2] for the server to send
0211          * a response back for most types of requests (except
0212          * SMB Write past end of file which can be slow, and
0213          * blocking lock operations). NFS waits slightly longer
0214          * than CIFS, but this can make it take longer for
0215          * nonresponsive servers to be detected and 15 seconds
0216          * is more than enough time for modern networks to
0217          * send a packet.  In most cases if we fail to send
0218          * after the retries we will kill the socket and
0219          * reconnect which may clear the network problem.
0220          */
0221         rc = sock_sendmsg(ssocket, smb_msg);
0222         if (rc == -EAGAIN) {
0223             retries++;
0224             if (retries >= 14 ||
0225                 (!server->noblocksnd && (retries > 2))) {
0226                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
0227                      ssocket);
0228                 return -EAGAIN;
0229             }
0230             msleep(1 << retries);
0231             continue;
0232         }
0233 
0234         if (rc < 0)
0235             return rc;
0236 
0237         if (rc == 0) {
0238             /* should never happen, letting socket clear before
0239                retrying is our only obvious option here */
0240             cifs_server_dbg(VFS, "tcp sent no data\n");
0241             msleep(500);
0242             continue;
0243         }
0244 
0245         /* send was at least partially successful */
0246         *sent += rc;
0247         retries = 0; /* in case we get ENOSPC on the next send */
0248     }
0249     return 0;
0250 }
0251 
0252 unsigned long
0253 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
0254 {
0255     unsigned int i;
0256     struct kvec *iov;
0257     int nvec;
0258     unsigned long buflen = 0;
0259 
0260     if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
0261         rqst->rq_iov[0].iov_len == 4) {
0262         iov = &rqst->rq_iov[1];
0263         nvec = rqst->rq_nvec - 1;
0264     } else {
0265         iov = rqst->rq_iov;
0266         nvec = rqst->rq_nvec;
0267     }
0268 
0269     /* total up iov array first */
0270     for (i = 0; i < nvec; i++)
0271         buflen += iov[i].iov_len;
0272 
0273     /*
0274      * Add in the page array if there is one. The caller needs to make
0275      * sure rq_offset and rq_tailsz are set correctly. If a buffer of
0276      * multiple pages ends at page boundary, rq_tailsz needs to be set to
0277      * PAGE_SIZE.
0278      */
0279     if (rqst->rq_npages) {
0280         if (rqst->rq_npages == 1)
0281             buflen += rqst->rq_tailsz;
0282         else {
0283             /*
0284              * If there is more than one page, calculate the
0285              * buffer length based on rq_offset and rq_tailsz
0286              */
0287             buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
0288                     rqst->rq_offset;
0289             buflen += rqst->rq_tailsz;
0290         }
0291     }
0292 
0293     return buflen;
0294 }
0295 
0296 static int
0297 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
0298         struct smb_rqst *rqst)
0299 {
0300     int rc = 0;
0301     struct kvec *iov;
0302     int n_vec;
0303     unsigned int send_length = 0;
0304     unsigned int i, j;
0305     sigset_t mask, oldmask;
0306     size_t total_len = 0, sent, size;
0307     struct socket *ssocket = server->ssocket;
0308     struct msghdr smb_msg = {};
0309     __be32 rfc1002_marker;
0310 
0311     if (cifs_rdma_enabled(server)) {
0312         /* return -EAGAIN when connecting or reconnecting */
0313         rc = -EAGAIN;
0314         if (server->smbd_conn)
0315             rc = smbd_send(server, num_rqst, rqst);
0316         goto smbd_done;
0317     }
0318 
0319     if (ssocket == NULL)
0320         return -EAGAIN;
0321 
0322     if (fatal_signal_pending(current)) {
0323         cifs_dbg(FYI, "signal pending before send request\n");
0324         return -ERESTARTSYS;
0325     }
0326 
0327     /* cork the socket */
0328     tcp_sock_set_cork(ssocket->sk, true);
0329 
0330     for (j = 0; j < num_rqst; j++)
0331         send_length += smb_rqst_len(server, &rqst[j]);
0332     rfc1002_marker = cpu_to_be32(send_length);
0333 
0334     /*
0335      * We should not allow signals to interrupt the network send because
0336      * any partial send will cause session reconnects thus increasing
0337      * latency of system calls and overload a server with unnecessary
0338      * requests.
0339      */
0340 
0341     sigfillset(&mask);
0342     sigprocmask(SIG_BLOCK, &mask, &oldmask);
0343 
0344     /* Generate a rfc1002 marker for SMB2+ */
0345     if (!is_smb1(server)) {
0346         struct kvec hiov = {
0347             .iov_base = &rfc1002_marker,
0348             .iov_len  = 4
0349         };
0350         iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
0351         rc = smb_send_kvec(server, &smb_msg, &sent);
0352         if (rc < 0)
0353             goto unmask;
0354 
0355         total_len += sent;
0356         send_length += 4;
0357     }
0358 
0359     cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
0360 
0361     for (j = 0; j < num_rqst; j++) {
0362         iov = rqst[j].rq_iov;
0363         n_vec = rqst[j].rq_nvec;
0364 
0365         size = 0;
0366         for (i = 0; i < n_vec; i++) {
0367             dump_smb(iov[i].iov_base, iov[i].iov_len);
0368             size += iov[i].iov_len;
0369         }
0370 
0371         iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
0372 
0373         rc = smb_send_kvec(server, &smb_msg, &sent);
0374         if (rc < 0)
0375             goto unmask;
0376 
0377         total_len += sent;
0378 
0379         /* now walk the page array and send each page in it */
0380         for (i = 0; i < rqst[j].rq_npages; i++) {
0381             struct bio_vec bvec;
0382 
0383             bvec.bv_page = rqst[j].rq_pages[i];
0384             rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
0385                          &bvec.bv_offset);
0386 
0387             iov_iter_bvec(&smb_msg.msg_iter, WRITE,
0388                       &bvec, 1, bvec.bv_len);
0389             rc = smb_send_kvec(server, &smb_msg, &sent);
0390             if (rc < 0)
0391                 break;
0392 
0393             total_len += sent;
0394         }
0395     }
0396 
0397 unmask:
0398     sigprocmask(SIG_SETMASK, &oldmask, NULL);
0399 
0400     /*
0401      * If signal is pending but we have already sent the whole packet to
0402      * the server we need to return success status to allow a corresponding
0403      * mid entry to be kept in the pending requests queue thus allowing
0404      * to handle responses from the server by the client.
0405      *
0406      * If only part of the packet has been sent there is no need to hide
0407      * interrupt because the session will be reconnected anyway, so there
0408      * won't be any response from the server to handle.
0409      */
0410 
0411     if (signal_pending(current) && (total_len != send_length)) {
0412         cifs_dbg(FYI, "signal is pending after attempt to send\n");
0413         rc = -ERESTARTSYS;
0414     }
0415 
0416     /* uncork it */
0417     tcp_sock_set_cork(ssocket->sk, false);
0418 
0419     if ((total_len > 0) && (total_len != send_length)) {
0420         cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
0421              send_length, total_len);
0422         /*
0423          * If we have only sent part of an SMB then the next SMB could
0424          * be taken as the remainder of this one. We need to kill the
0425          * socket so the server throws away the partial SMB
0426          */
0427         cifs_signal_cifsd_for_reconnect(server, false);
0428         trace_smb3_partial_send_reconnect(server->CurrentMid,
0429                           server->conn_id, server->hostname);
0430     }
0431 smbd_done:
0432     if (rc < 0 && rc != -EINTR)
0433         cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
0434              rc);
0435     else if (rc > 0)
0436         rc = 0;
0437 
0438     return rc;
0439 }
0440 
0441 static int
0442 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
0443           struct smb_rqst *rqst, int flags)
0444 {
0445     struct kvec iov;
0446     struct smb2_transform_hdr *tr_hdr;
0447     struct smb_rqst cur_rqst[MAX_COMPOUND];
0448     int rc;
0449 
0450     if (!(flags & CIFS_TRANSFORM_REQ))
0451         return __smb_send_rqst(server, num_rqst, rqst);
0452 
0453     if (num_rqst > MAX_COMPOUND - 1)
0454         return -ENOMEM;
0455 
0456     if (!server->ops->init_transform_rq) {
0457         cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
0458         return -EIO;
0459     }
0460 
0461     tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
0462     if (!tr_hdr)
0463         return -ENOMEM;
0464 
0465     memset(&cur_rqst[0], 0, sizeof(cur_rqst));
0466     memset(&iov, 0, sizeof(iov));
0467 
0468     iov.iov_base = tr_hdr;
0469     iov.iov_len = sizeof(*tr_hdr);
0470     cur_rqst[0].rq_iov = &iov;
0471     cur_rqst[0].rq_nvec = 1;
0472 
0473     rc = server->ops->init_transform_rq(server, num_rqst + 1,
0474                         &cur_rqst[0], rqst);
0475     if (rc)
0476         goto out;
0477 
0478     rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
0479     smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
0480 out:
0481     kfree(tr_hdr);
0482     return rc;
0483 }
0484 
0485 int
0486 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
0487      unsigned int smb_buf_length)
0488 {
0489     struct kvec iov[2];
0490     struct smb_rqst rqst = { .rq_iov = iov,
0491                  .rq_nvec = 2 };
0492 
0493     iov[0].iov_base = smb_buffer;
0494     iov[0].iov_len = 4;
0495     iov[1].iov_base = (char *)smb_buffer + 4;
0496     iov[1].iov_len = smb_buf_length;
0497 
0498     return __smb_send_rqst(server, 1, &rqst);
0499 }
0500 
0501 static int
0502 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
0503               const int timeout, const int flags,
0504               unsigned int *instance)
0505 {
0506     long rc;
0507     int *credits;
0508     int optype;
0509     long int t;
0510     int scredits, in_flight;
0511 
0512     if (timeout < 0)
0513         t = MAX_JIFFY_OFFSET;
0514     else
0515         t = msecs_to_jiffies(timeout);
0516 
0517     optype = flags & CIFS_OP_MASK;
0518 
0519     *instance = 0;
0520 
0521     credits = server->ops->get_credits_field(server, optype);
0522     /* Since an echo is already inflight, no need to wait to send another */
0523     if (*credits <= 0 && optype == CIFS_ECHO_OP)
0524         return -EAGAIN;
0525 
0526     spin_lock(&server->req_lock);
0527     if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
0528         /* oplock breaks must not be held up */
0529         server->in_flight++;
0530         if (server->in_flight > server->max_in_flight)
0531             server->max_in_flight = server->in_flight;
0532         *credits -= 1;
0533         *instance = server->reconnect_instance;
0534         scredits = *credits;
0535         in_flight = server->in_flight;
0536         spin_unlock(&server->req_lock);
0537 
0538         trace_smb3_nblk_credits(server->CurrentMid,
0539                 server->conn_id, server->hostname, scredits, -1, in_flight);
0540         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
0541                 __func__, 1, scredits);
0542 
0543         return 0;
0544     }
0545 
0546     while (1) {
0547         if (*credits < num_credits) {
0548             scredits = *credits;
0549             spin_unlock(&server->req_lock);
0550 
0551             cifs_num_waiters_inc(server);
0552             rc = wait_event_killable_timeout(server->request_q,
0553                 has_credits(server, credits, num_credits), t);
0554             cifs_num_waiters_dec(server);
0555             if (!rc) {
0556                 spin_lock(&server->req_lock);
0557                 scredits = *credits;
0558                 in_flight = server->in_flight;
0559                 spin_unlock(&server->req_lock);
0560 
0561                 trace_smb3_credit_timeout(server->CurrentMid,
0562                         server->conn_id, server->hostname, scredits,
0563                         num_credits, in_flight);
0564                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
0565                         timeout);
0566                 return -EBUSY;
0567             }
0568             if (rc == -ERESTARTSYS)
0569                 return -ERESTARTSYS;
0570             spin_lock(&server->req_lock);
0571         } else {
0572             spin_unlock(&server->req_lock);
0573 
0574             spin_lock(&server->srv_lock);
0575             if (server->tcpStatus == CifsExiting) {
0576                 spin_unlock(&server->srv_lock);
0577                 return -ENOENT;
0578             }
0579             spin_unlock(&server->srv_lock);
0580 
0581             /*
0582              * For normal commands, reserve the last MAX_COMPOUND
0583              * credits to compound requests.
0584              * Otherwise these compounds could be permanently
0585              * starved for credits by single-credit requests.
0586              *
0587              * To prevent spinning CPU, block this thread until
0588              * there are >MAX_COMPOUND credits available.
0589              * But only do this is we already have a lot of
0590              * credits in flight to avoid triggering this check
0591              * for servers that are slow to hand out credits on
0592              * new sessions.
0593              */
0594             spin_lock(&server->req_lock);
0595             if (!optype && num_credits == 1 &&
0596                 server->in_flight > 2 * MAX_COMPOUND &&
0597                 *credits <= MAX_COMPOUND) {
0598                 spin_unlock(&server->req_lock);
0599 
0600                 cifs_num_waiters_inc(server);
0601                 rc = wait_event_killable_timeout(
0602                     server->request_q,
0603                     has_credits(server, credits,
0604                             MAX_COMPOUND + 1),
0605                     t);
0606                 cifs_num_waiters_dec(server);
0607                 if (!rc) {
0608                     spin_lock(&server->req_lock);
0609                     scredits = *credits;
0610                     in_flight = server->in_flight;
0611                     spin_unlock(&server->req_lock);
0612 
0613                     trace_smb3_credit_timeout(
0614                             server->CurrentMid,
0615                             server->conn_id, server->hostname,
0616                             scredits, num_credits, in_flight);
0617                     cifs_server_dbg(VFS, "wait timed out after %d ms\n",
0618                             timeout);
0619                     return -EBUSY;
0620                 }
0621                 if (rc == -ERESTARTSYS)
0622                     return -ERESTARTSYS;
0623                 spin_lock(&server->req_lock);
0624                 continue;
0625             }
0626 
0627             /*
0628              * Can not count locking commands against total
0629              * as they are allowed to block on server.
0630              */
0631 
0632             /* update # of requests on the wire to server */
0633             if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
0634                 *credits -= num_credits;
0635                 server->in_flight += num_credits;
0636                 if (server->in_flight > server->max_in_flight)
0637                     server->max_in_flight = server->in_flight;
0638                 *instance = server->reconnect_instance;
0639             }
0640             scredits = *credits;
0641             in_flight = server->in_flight;
0642             spin_unlock(&server->req_lock);
0643 
0644             trace_smb3_waitff_credits(server->CurrentMid,
0645                     server->conn_id, server->hostname, scredits,
0646                     -(num_credits), in_flight);
0647             cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
0648                     __func__, num_credits, scredits);
0649             break;
0650         }
0651     }
0652     return 0;
0653 }
0654 
0655 static int
0656 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
0657               unsigned int *instance)
0658 {
0659     return wait_for_free_credits(server, 1, -1, flags,
0660                      instance);
0661 }
0662 
0663 static int
0664 wait_for_compound_request(struct TCP_Server_Info *server, int num,
0665               const int flags, unsigned int *instance)
0666 {
0667     int *credits;
0668     int scredits, in_flight;
0669 
0670     credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
0671 
0672     spin_lock(&server->req_lock);
0673     scredits = *credits;
0674     in_flight = server->in_flight;
0675 
0676     if (*credits < num) {
0677         /*
0678          * If the server is tight on resources or just gives us less
0679          * credits for other reasons (e.g. requests are coming out of
0680          * order and the server delays granting more credits until it
0681          * processes a missing mid) and we exhausted most available
0682          * credits there may be situations when we try to send
0683          * a compound request but we don't have enough credits. At this
0684          * point the client needs to decide if it should wait for
0685          * additional credits or fail the request. If at least one
0686          * request is in flight there is a high probability that the
0687          * server will return enough credits to satisfy this compound
0688          * request.
0689          *
0690          * Return immediately if no requests in flight since we will be
0691          * stuck on waiting for credits.
0692          */
0693         if (server->in_flight == 0) {
0694             spin_unlock(&server->req_lock);
0695             trace_smb3_insufficient_credits(server->CurrentMid,
0696                     server->conn_id, server->hostname, scredits,
0697                     num, in_flight);
0698             cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
0699                     __func__, in_flight, num, scredits);
0700             return -EDEADLK;
0701         }
0702     }
0703     spin_unlock(&server->req_lock);
0704 
0705     return wait_for_free_credits(server, num, 60000, flags,
0706                      instance);
0707 }
0708 
0709 int
0710 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
0711               unsigned int *num, struct cifs_credits *credits)
0712 {
0713     *num = size;
0714     credits->value = 0;
0715     credits->instance = server->reconnect_instance;
0716     return 0;
0717 }
0718 
0719 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
0720             struct mid_q_entry **ppmidQ)
0721 {
0722     spin_lock(&ses->ses_lock);
0723     if (ses->ses_status == SES_NEW) {
0724         if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
0725             (in_buf->Command != SMB_COM_NEGOTIATE)) {
0726             spin_unlock(&ses->ses_lock);
0727             return -EAGAIN;
0728         }
0729         /* else ok - we are setting up session */
0730     }
0731 
0732     if (ses->ses_status == SES_EXITING) {
0733         /* check if SMB session is bad because we are setting it up */
0734         if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
0735             spin_unlock(&ses->ses_lock);
0736             return -EAGAIN;
0737         }
0738         /* else ok - we are shutting down session */
0739     }
0740     spin_unlock(&ses->ses_lock);
0741 
0742     *ppmidQ = alloc_mid(in_buf, ses->server);
0743     if (*ppmidQ == NULL)
0744         return -ENOMEM;
0745     spin_lock(&ses->server->mid_lock);
0746     list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
0747     spin_unlock(&ses->server->mid_lock);
0748     return 0;
0749 }
0750 
0751 static int
0752 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
0753 {
0754     int error;
0755 
0756     error = wait_event_freezekillable_unsafe(server->response_q,
0757                     midQ->mid_state != MID_REQUEST_SUBMITTED);
0758     if (error < 0)
0759         return -ERESTARTSYS;
0760 
0761     return 0;
0762 }
0763 
0764 struct mid_q_entry *
0765 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
0766 {
0767     int rc;
0768     struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
0769     struct mid_q_entry *mid;
0770 
0771     if (rqst->rq_iov[0].iov_len != 4 ||
0772         rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
0773         return ERR_PTR(-EIO);
0774 
0775     /* enable signing if server requires it */
0776     if (server->sign)
0777         hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
0778 
0779     mid = alloc_mid(hdr, server);
0780     if (mid == NULL)
0781         return ERR_PTR(-ENOMEM);
0782 
0783     rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
0784     if (rc) {
0785         release_mid(mid);
0786         return ERR_PTR(rc);
0787     }
0788 
0789     return mid;
0790 }
0791 
0792 /*
0793  * Send a SMB request and set the callback function in the mid to handle
0794  * the result. Caller is responsible for dealing with timeouts.
0795  */
0796 int
0797 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
0798         mid_receive_t *receive, mid_callback_t *callback,
0799         mid_handle_t *handle, void *cbdata, const int flags,
0800         const struct cifs_credits *exist_credits)
0801 {
0802     int rc;
0803     struct mid_q_entry *mid;
0804     struct cifs_credits credits = { .value = 0, .instance = 0 };
0805     unsigned int instance;
0806     int optype;
0807 
0808     optype = flags & CIFS_OP_MASK;
0809 
0810     if ((flags & CIFS_HAS_CREDITS) == 0) {
0811         rc = wait_for_free_request(server, flags, &instance);
0812         if (rc)
0813             return rc;
0814         credits.value = 1;
0815         credits.instance = instance;
0816     } else
0817         instance = exist_credits->instance;
0818 
0819     cifs_server_lock(server);
0820 
0821     /*
0822      * We can't use credits obtained from the previous session to send this
0823      * request. Check if there were reconnects after we obtained credits and
0824      * return -EAGAIN in such cases to let callers handle it.
0825      */
0826     if (instance != server->reconnect_instance) {
0827         cifs_server_unlock(server);
0828         add_credits_and_wake_if(server, &credits, optype);
0829         return -EAGAIN;
0830     }
0831 
0832     mid = server->ops->setup_async_request(server, rqst);
0833     if (IS_ERR(mid)) {
0834         cifs_server_unlock(server);
0835         add_credits_and_wake_if(server, &credits, optype);
0836         return PTR_ERR(mid);
0837     }
0838 
0839     mid->receive = receive;
0840     mid->callback = callback;
0841     mid->callback_data = cbdata;
0842     mid->handle = handle;
0843     mid->mid_state = MID_REQUEST_SUBMITTED;
0844 
0845     /* put it on the pending_mid_q */
0846     spin_lock(&server->mid_lock);
0847     list_add_tail(&mid->qhead, &server->pending_mid_q);
0848     spin_unlock(&server->mid_lock);
0849 
0850     /*
0851      * Need to store the time in mid before calling I/O. For call_async,
0852      * I/O response may come back and free the mid entry on another thread.
0853      */
0854     cifs_save_when_sent(mid);
0855     cifs_in_send_inc(server);
0856     rc = smb_send_rqst(server, 1, rqst, flags);
0857     cifs_in_send_dec(server);
0858 
0859     if (rc < 0) {
0860         revert_current_mid(server, mid->credits);
0861         server->sequence_number -= 2;
0862         delete_mid(mid);
0863     }
0864 
0865     cifs_server_unlock(server);
0866 
0867     if (rc == 0)
0868         return 0;
0869 
0870     add_credits_and_wake_if(server, &credits, optype);
0871     return rc;
0872 }
0873 
0874 /*
0875  *
0876  * Send an SMB Request.  No response info (other than return code)
0877  * needs to be parsed.
0878  *
0879  * flags indicate the type of request buffer and how long to wait
0880  * and whether to log NT STATUS code (error) before mapping it to POSIX error
0881  *
0882  */
0883 int
0884 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
0885          char *in_buf, int flags)
0886 {
0887     int rc;
0888     struct kvec iov[1];
0889     struct kvec rsp_iov;
0890     int resp_buf_type;
0891 
0892     iov[0].iov_base = in_buf;
0893     iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
0894     flags |= CIFS_NO_RSP_BUF;
0895     rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
0896     cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
0897 
0898     return rc;
0899 }
0900 
0901 static int
0902 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
0903 {
0904     int rc = 0;
0905 
0906     cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
0907          __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
0908 
0909     spin_lock(&server->mid_lock);
0910     switch (mid->mid_state) {
0911     case MID_RESPONSE_RECEIVED:
0912         spin_unlock(&server->mid_lock);
0913         return rc;
0914     case MID_RETRY_NEEDED:
0915         rc = -EAGAIN;
0916         break;
0917     case MID_RESPONSE_MALFORMED:
0918         rc = -EIO;
0919         break;
0920     case MID_SHUTDOWN:
0921         rc = -EHOSTDOWN;
0922         break;
0923     default:
0924         if (!(mid->mid_flags & MID_DELETED)) {
0925             list_del_init(&mid->qhead);
0926             mid->mid_flags |= MID_DELETED;
0927         }
0928         cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
0929              __func__, mid->mid, mid->mid_state);
0930         rc = -EIO;
0931     }
0932     spin_unlock(&server->mid_lock);
0933 
0934     release_mid(mid);
0935     return rc;
0936 }
0937 
0938 static inline int
0939 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
0940         struct mid_q_entry *mid)
0941 {
0942     return server->ops->send_cancel ?
0943                 server->ops->send_cancel(server, rqst, mid) : 0;
0944 }
0945 
0946 int
0947 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
0948            bool log_error)
0949 {
0950     unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
0951 
0952     dump_smb(mid->resp_buf, min_t(u32, 92, len));
0953 
0954     /* convert the length into a more usable form */
0955     if (server->sign) {
0956         struct kvec iov[2];
0957         int rc = 0;
0958         struct smb_rqst rqst = { .rq_iov = iov,
0959                      .rq_nvec = 2 };
0960 
0961         iov[0].iov_base = mid->resp_buf;
0962         iov[0].iov_len = 4;
0963         iov[1].iov_base = (char *)mid->resp_buf + 4;
0964         iov[1].iov_len = len - 4;
0965         /* FIXME: add code to kill session */
0966         rc = cifs_verify_signature(&rqst, server,
0967                        mid->sequence_number);
0968         if (rc)
0969             cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
0970                  rc);
0971     }
0972 
0973     /* BB special case reconnect tid and uid here? */
0974     return map_and_check_smb_error(mid, log_error);
0975 }
0976 
0977 struct mid_q_entry *
0978 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
0979            struct smb_rqst *rqst)
0980 {
0981     int rc;
0982     struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
0983     struct mid_q_entry *mid;
0984 
0985     if (rqst->rq_iov[0].iov_len != 4 ||
0986         rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
0987         return ERR_PTR(-EIO);
0988 
0989     rc = allocate_mid(ses, hdr, &mid);
0990     if (rc)
0991         return ERR_PTR(rc);
0992     rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
0993     if (rc) {
0994         delete_mid(mid);
0995         return ERR_PTR(rc);
0996     }
0997     return mid;
0998 }
0999 
1000 static void
1001 cifs_compound_callback(struct mid_q_entry *mid)
1002 {
1003     struct TCP_Server_Info *server = mid->server;
1004     struct cifs_credits credits;
1005 
1006     credits.value = server->ops->get_credits(mid);
1007     credits.instance = server->reconnect_instance;
1008 
1009     add_credits(server, &credits, mid->optype);
1010 }
1011 
1012 static void
1013 cifs_compound_last_callback(struct mid_q_entry *mid)
1014 {
1015     cifs_compound_callback(mid);
1016     cifs_wake_up_task(mid);
1017 }
1018 
1019 static void
1020 cifs_cancelled_callback(struct mid_q_entry *mid)
1021 {
1022     cifs_compound_callback(mid);
1023     release_mid(mid);
1024 }
1025 
1026 /*
1027  * Return a channel (master if none) of @ses that can be used to send
1028  * regular requests.
1029  *
1030  * If we are currently binding a new channel (negprot/sess.setup),
1031  * return the new incomplete channel.
1032  */
1033 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1034 {
1035     uint index = 0;
1036 
1037     if (!ses)
1038         return NULL;
1039 
1040     /* round robin */
1041     index = (uint)atomic_inc_return(&ses->chan_seq);
1042 
1043     spin_lock(&ses->chan_lock);
1044     index %= ses->chan_count;
1045     spin_unlock(&ses->chan_lock);
1046 
1047     return ses->chans[index].server;
1048 }
1049 
1050 int
1051 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1052            struct TCP_Server_Info *server,
1053            const int flags, const int num_rqst, struct smb_rqst *rqst,
1054            int *resp_buf_type, struct kvec *resp_iov)
1055 {
1056     int i, j, optype, rc = 0;
1057     struct mid_q_entry *midQ[MAX_COMPOUND];
1058     bool cancelled_mid[MAX_COMPOUND] = {false};
1059     struct cifs_credits credits[MAX_COMPOUND] = {
1060         { .value = 0, .instance = 0 }
1061     };
1062     unsigned int instance;
1063     char *buf;
1064 
1065     optype = flags & CIFS_OP_MASK;
1066 
1067     for (i = 0; i < num_rqst; i++)
1068         resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1069 
1070     if (!ses || !ses->server || !server) {
1071         cifs_dbg(VFS, "Null session\n");
1072         return -EIO;
1073     }
1074 
1075     spin_lock(&server->srv_lock);
1076     if (server->tcpStatus == CifsExiting) {
1077         spin_unlock(&server->srv_lock);
1078         return -ENOENT;
1079     }
1080     spin_unlock(&server->srv_lock);
1081 
1082     /*
1083      * Wait for all the requests to become available.
1084      * This approach still leaves the possibility to be stuck waiting for
1085      * credits if the server doesn't grant credits to the outstanding
1086      * requests and if the client is completely idle, not generating any
1087      * other requests.
1088      * This can be handled by the eventual session reconnect.
1089      */
1090     rc = wait_for_compound_request(server, num_rqst, flags,
1091                        &instance);
1092     if (rc)
1093         return rc;
1094 
1095     for (i = 0; i < num_rqst; i++) {
1096         credits[i].value = 1;
1097         credits[i].instance = instance;
1098     }
1099 
1100     /*
1101      * Make sure that we sign in the same order that we send on this socket
1102      * and avoid races inside tcp sendmsg code that could cause corruption
1103      * of smb data.
1104      */
1105 
1106     cifs_server_lock(server);
1107 
1108     /*
1109      * All the parts of the compound chain belong obtained credits from the
1110      * same session. We can not use credits obtained from the previous
1111      * session to send this request. Check if there were reconnects after
1112      * we obtained credits and return -EAGAIN in such cases to let callers
1113      * handle it.
1114      */
1115     if (instance != server->reconnect_instance) {
1116         cifs_server_unlock(server);
1117         for (j = 0; j < num_rqst; j++)
1118             add_credits(server, &credits[j], optype);
1119         return -EAGAIN;
1120     }
1121 
1122     for (i = 0; i < num_rqst; i++) {
1123         midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1124         if (IS_ERR(midQ[i])) {
1125             revert_current_mid(server, i);
1126             for (j = 0; j < i; j++)
1127                 delete_mid(midQ[j]);
1128             cifs_server_unlock(server);
1129 
1130             /* Update # of requests on wire to server */
1131             for (j = 0; j < num_rqst; j++)
1132                 add_credits(server, &credits[j], optype);
1133             return PTR_ERR(midQ[i]);
1134         }
1135 
1136         midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1137         midQ[i]->optype = optype;
1138         /*
1139          * Invoke callback for every part of the compound chain
1140          * to calculate credits properly. Wake up this thread only when
1141          * the last element is received.
1142          */
1143         if (i < num_rqst - 1)
1144             midQ[i]->callback = cifs_compound_callback;
1145         else
1146             midQ[i]->callback = cifs_compound_last_callback;
1147     }
1148     cifs_in_send_inc(server);
1149     rc = smb_send_rqst(server, num_rqst, rqst, flags);
1150     cifs_in_send_dec(server);
1151 
1152     for (i = 0; i < num_rqst; i++)
1153         cifs_save_when_sent(midQ[i]);
1154 
1155     if (rc < 0) {
1156         revert_current_mid(server, num_rqst);
1157         server->sequence_number -= 2;
1158     }
1159 
1160     cifs_server_unlock(server);
1161 
1162     /*
1163      * If sending failed for some reason or it is an oplock break that we
1164      * will not receive a response to - return credits back
1165      */
1166     if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1167         for (i = 0; i < num_rqst; i++)
1168             add_credits(server, &credits[i], optype);
1169         goto out;
1170     }
1171 
1172     /*
1173      * At this point the request is passed to the network stack - we assume
1174      * that any credits taken from the server structure on the client have
1175      * been spent and we can't return them back. Once we receive responses
1176      * we will collect credits granted by the server in the mid callbacks
1177      * and add those credits to the server structure.
1178      */
1179 
1180     /*
1181      * Compounding is never used during session establish.
1182      */
1183     spin_lock(&ses->ses_lock);
1184     if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1185         spin_unlock(&ses->ses_lock);
1186 
1187         cifs_server_lock(server);
1188         smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1189         cifs_server_unlock(server);
1190 
1191         spin_lock(&ses->ses_lock);
1192     }
1193     spin_unlock(&ses->ses_lock);
1194 
1195     for (i = 0; i < num_rqst; i++) {
1196         rc = wait_for_response(server, midQ[i]);
1197         if (rc != 0)
1198             break;
1199     }
1200     if (rc != 0) {
1201         for (; i < num_rqst; i++) {
1202             cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1203                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1204             send_cancel(server, &rqst[i], midQ[i]);
1205             spin_lock(&server->mid_lock);
1206             midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1207             if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1208                 midQ[i]->callback = cifs_cancelled_callback;
1209                 cancelled_mid[i] = true;
1210                 credits[i].value = 0;
1211             }
1212             spin_unlock(&server->mid_lock);
1213         }
1214     }
1215 
1216     for (i = 0; i < num_rqst; i++) {
1217         if (rc < 0)
1218             goto out;
1219 
1220         rc = cifs_sync_mid_result(midQ[i], server);
1221         if (rc != 0) {
1222             /* mark this mid as cancelled to not free it below */
1223             cancelled_mid[i] = true;
1224             goto out;
1225         }
1226 
1227         if (!midQ[i]->resp_buf ||
1228             midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229             rc = -EIO;
1230             cifs_dbg(FYI, "Bad MID state?\n");
1231             goto out;
1232         }
1233 
1234         buf = (char *)midQ[i]->resp_buf;
1235         resp_iov[i].iov_base = buf;
1236         resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1237             HEADER_PREAMBLE_SIZE(server);
1238 
1239         if (midQ[i]->large_buf)
1240             resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241         else
1242             resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243 
1244         rc = server->ops->check_receive(midQ[i], server,
1245                              flags & CIFS_LOG_ERROR);
1246 
1247         /* mark it so buf will not be freed by delete_mid */
1248         if ((flags & CIFS_NO_RSP_BUF) == 0)
1249             midQ[i]->resp_buf = NULL;
1250 
1251     }
1252 
1253     /*
1254      * Compounding is never used during session establish.
1255      */
1256     spin_lock(&ses->ses_lock);
1257     if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1258         struct kvec iov = {
1259             .iov_base = resp_iov[0].iov_base,
1260             .iov_len = resp_iov[0].iov_len
1261         };
1262         spin_unlock(&ses->ses_lock);
1263         cifs_server_lock(server);
1264         smb311_update_preauth_hash(ses, server, &iov, 1);
1265         cifs_server_unlock(server);
1266         spin_lock(&ses->ses_lock);
1267     }
1268     spin_unlock(&ses->ses_lock);
1269 
1270 out:
1271     /*
1272      * This will dequeue all mids. After this it is important that the
1273      * demultiplex_thread will not process any of these mids any futher.
1274      * This is prevented above by using a noop callback that will not
1275      * wake this thread except for the very last PDU.
1276      */
1277     for (i = 0; i < num_rqst; i++) {
1278         if (!cancelled_mid[i])
1279             delete_mid(midQ[i]);
1280     }
1281 
1282     return rc;
1283 }
1284 
1285 int
1286 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1287            struct TCP_Server_Info *server,
1288            struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1289            struct kvec *resp_iov)
1290 {
1291     return compound_send_recv(xid, ses, server, flags, 1,
1292                   rqst, resp_buf_type, resp_iov);
1293 }
1294 
1295 int
1296 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1297          struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1298          const int flags, struct kvec *resp_iov)
1299 {
1300     struct smb_rqst rqst;
1301     struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1302     int rc;
1303 
1304     if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1305         new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1306                     GFP_KERNEL);
1307         if (!new_iov) {
1308             /* otherwise cifs_send_recv below sets resp_buf_type */
1309             *resp_buf_type = CIFS_NO_BUFFER;
1310             return -ENOMEM;
1311         }
1312     } else
1313         new_iov = s_iov;
1314 
1315     /* 1st iov is a RFC1001 length followed by the rest of the packet */
1316     memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1317 
1318     new_iov[0].iov_base = new_iov[1].iov_base;
1319     new_iov[0].iov_len = 4;
1320     new_iov[1].iov_base += 4;
1321     new_iov[1].iov_len -= 4;
1322 
1323     memset(&rqst, 0, sizeof(struct smb_rqst));
1324     rqst.rq_iov = new_iov;
1325     rqst.rq_nvec = n_vec + 1;
1326 
1327     rc = cifs_send_recv(xid, ses, ses->server,
1328                 &rqst, resp_buf_type, flags, resp_iov);
1329     if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1330         kfree(new_iov);
1331     return rc;
1332 }
1333 
1334 int
1335 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1336         struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1337         int *pbytes_returned, const int flags)
1338 {
1339     int rc = 0;
1340     struct mid_q_entry *midQ;
1341     unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1342     struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1343     struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1344     struct cifs_credits credits = { .value = 1, .instance = 0 };
1345     struct TCP_Server_Info *server;
1346 
1347     if (ses == NULL) {
1348         cifs_dbg(VFS, "Null smb session\n");
1349         return -EIO;
1350     }
1351     server = ses->server;
1352     if (server == NULL) {
1353         cifs_dbg(VFS, "Null tcp session\n");
1354         return -EIO;
1355     }
1356 
1357     spin_lock(&server->srv_lock);
1358     if (server->tcpStatus == CifsExiting) {
1359         spin_unlock(&server->srv_lock);
1360         return -ENOENT;
1361     }
1362     spin_unlock(&server->srv_lock);
1363 
1364     /* Ensure that we do not send more than 50 overlapping requests
1365        to the same server. We may make this configurable later or
1366        use ses->maxReq */
1367 
1368     if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1369         cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1370                 len);
1371         return -EIO;
1372     }
1373 
1374     rc = wait_for_free_request(server, flags, &credits.instance);
1375     if (rc)
1376         return rc;
1377 
1378     /* make sure that we sign in the same order that we send on this socket
1379        and avoid races inside tcp sendmsg code that could cause corruption
1380        of smb data */
1381 
1382     cifs_server_lock(server);
1383 
1384     rc = allocate_mid(ses, in_buf, &midQ);
1385     if (rc) {
1386         cifs_server_unlock(server);
1387         /* Update # of requests on wire to server */
1388         add_credits(server, &credits, 0);
1389         return rc;
1390     }
1391 
1392     rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1393     if (rc) {
1394         cifs_server_unlock(server);
1395         goto out;
1396     }
1397 
1398     midQ->mid_state = MID_REQUEST_SUBMITTED;
1399 
1400     cifs_in_send_inc(server);
1401     rc = smb_send(server, in_buf, len);
1402     cifs_in_send_dec(server);
1403     cifs_save_when_sent(midQ);
1404 
1405     if (rc < 0)
1406         server->sequence_number -= 2;
1407 
1408     cifs_server_unlock(server);
1409 
1410     if (rc < 0)
1411         goto out;
1412 
1413     rc = wait_for_response(server, midQ);
1414     if (rc != 0) {
1415         send_cancel(server, &rqst, midQ);
1416         spin_lock(&server->mid_lock);
1417         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1418             /* no longer considered to be "in-flight" */
1419             midQ->callback = release_mid;
1420             spin_unlock(&server->mid_lock);
1421             add_credits(server, &credits, 0);
1422             return rc;
1423         }
1424         spin_unlock(&server->mid_lock);
1425     }
1426 
1427     rc = cifs_sync_mid_result(midQ, server);
1428     if (rc != 0) {
1429         add_credits(server, &credits, 0);
1430         return rc;
1431     }
1432 
1433     if (!midQ->resp_buf || !out_buf ||
1434         midQ->mid_state != MID_RESPONSE_RECEIVED) {
1435         rc = -EIO;
1436         cifs_server_dbg(VFS, "Bad MID state?\n");
1437         goto out;
1438     }
1439 
1440     *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1441     memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1442     rc = cifs_check_receive(midQ, server, 0);
1443 out:
1444     delete_mid(midQ);
1445     add_credits(server, &credits, 0);
1446 
1447     return rc;
1448 }
1449 
1450 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1451    blocking lock to return. */
1452 
1453 static int
1454 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1455             struct smb_hdr *in_buf,
1456             struct smb_hdr *out_buf)
1457 {
1458     int bytes_returned;
1459     struct cifs_ses *ses = tcon->ses;
1460     LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1461 
1462     /* We just modify the current in_buf to change
1463        the type of lock from LOCKING_ANDX_SHARED_LOCK
1464        or LOCKING_ANDX_EXCLUSIVE_LOCK to
1465        LOCKING_ANDX_CANCEL_LOCK. */
1466 
1467     pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1468     pSMB->Timeout = 0;
1469     pSMB->hdr.Mid = get_next_mid(ses->server);
1470 
1471     return SendReceive(xid, ses, in_buf, out_buf,
1472             &bytes_returned, 0);
1473 }
1474 
1475 int
1476 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1477         struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1478         int *pbytes_returned)
1479 {
1480     int rc = 0;
1481     int rstart = 0;
1482     struct mid_q_entry *midQ;
1483     struct cifs_ses *ses;
1484     unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1485     struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1486     struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1487     unsigned int instance;
1488     struct TCP_Server_Info *server;
1489 
1490     if (tcon == NULL || tcon->ses == NULL) {
1491         cifs_dbg(VFS, "Null smb session\n");
1492         return -EIO;
1493     }
1494     ses = tcon->ses;
1495     server = ses->server;
1496 
1497     if (server == NULL) {
1498         cifs_dbg(VFS, "Null tcp session\n");
1499         return -EIO;
1500     }
1501 
1502     spin_lock(&server->srv_lock);
1503     if (server->tcpStatus == CifsExiting) {
1504         spin_unlock(&server->srv_lock);
1505         return -ENOENT;
1506     }
1507     spin_unlock(&server->srv_lock);
1508 
1509     /* Ensure that we do not send more than 50 overlapping requests
1510        to the same server. We may make this configurable later or
1511        use ses->maxReq */
1512 
1513     if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1514         cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1515                   len);
1516         return -EIO;
1517     }
1518 
1519     rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1520     if (rc)
1521         return rc;
1522 
1523     /* make sure that we sign in the same order that we send on this socket
1524        and avoid races inside tcp sendmsg code that could cause corruption
1525        of smb data */
1526 
1527     cifs_server_lock(server);
1528 
1529     rc = allocate_mid(ses, in_buf, &midQ);
1530     if (rc) {
1531         cifs_server_unlock(server);
1532         return rc;
1533     }
1534 
1535     rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1536     if (rc) {
1537         delete_mid(midQ);
1538         cifs_server_unlock(server);
1539         return rc;
1540     }
1541 
1542     midQ->mid_state = MID_REQUEST_SUBMITTED;
1543     cifs_in_send_inc(server);
1544     rc = smb_send(server, in_buf, len);
1545     cifs_in_send_dec(server);
1546     cifs_save_when_sent(midQ);
1547 
1548     if (rc < 0)
1549         server->sequence_number -= 2;
1550 
1551     cifs_server_unlock(server);
1552 
1553     if (rc < 0) {
1554         delete_mid(midQ);
1555         return rc;
1556     }
1557 
1558     /* Wait for a reply - allow signals to interrupt. */
1559     rc = wait_event_interruptible(server->response_q,
1560         (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1561         ((server->tcpStatus != CifsGood) &&
1562          (server->tcpStatus != CifsNew)));
1563 
1564     /* Were we interrupted by a signal ? */
1565     spin_lock(&server->srv_lock);
1566     if ((rc == -ERESTARTSYS) &&
1567         (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1568         ((server->tcpStatus == CifsGood) ||
1569          (server->tcpStatus == CifsNew))) {
1570         spin_unlock(&server->srv_lock);
1571 
1572         if (in_buf->Command == SMB_COM_TRANSACTION2) {
1573             /* POSIX lock. We send a NT_CANCEL SMB to cause the
1574                blocking lock to return. */
1575             rc = send_cancel(server, &rqst, midQ);
1576             if (rc) {
1577                 delete_mid(midQ);
1578                 return rc;
1579             }
1580         } else {
1581             /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1582                to cause the blocking lock to return. */
1583 
1584             rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1585 
1586             /* If we get -ENOLCK back the lock may have
1587                already been removed. Don't exit in this case. */
1588             if (rc && rc != -ENOLCK) {
1589                 delete_mid(midQ);
1590                 return rc;
1591             }
1592         }
1593 
1594         rc = wait_for_response(server, midQ);
1595         if (rc) {
1596             send_cancel(server, &rqst, midQ);
1597             spin_lock(&server->mid_lock);
1598             if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1599                 /* no longer considered to be "in-flight" */
1600                 midQ->callback = release_mid;
1601                 spin_unlock(&server->mid_lock);
1602                 return rc;
1603             }
1604             spin_unlock(&server->mid_lock);
1605         }
1606 
1607         /* We got the response - restart system call. */
1608         rstart = 1;
1609         spin_lock(&server->srv_lock);
1610     }
1611     spin_unlock(&server->srv_lock);
1612 
1613     rc = cifs_sync_mid_result(midQ, server);
1614     if (rc != 0)
1615         return rc;
1616 
1617     /* rcvd frame is ok */
1618     if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1619         rc = -EIO;
1620         cifs_tcon_dbg(VFS, "Bad MID state?\n");
1621         goto out;
1622     }
1623 
1624     *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1625     memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1626     rc = cifs_check_receive(midQ, server, 0);
1627 out:
1628     delete_mid(midQ);
1629     if (rstart && rc == -EACCES)
1630         return -ERESTARTSYS;
1631     return rc;
1632 }
1633 
1634 /*
1635  * Discard any remaining data in the current SMB. To do this, we borrow the
1636  * current bigbuf.
1637  */
1638 int
1639 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1640 {
1641     unsigned int rfclen = server->pdu_size;
1642     int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1643         server->total_read;
1644 
1645     while (remaining > 0) {
1646         int length;
1647 
1648         length = cifs_discard_from_socket(server,
1649                 min_t(size_t, remaining,
1650                       CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1651         if (length < 0)
1652             return length;
1653         server->total_read += length;
1654         remaining -= length;
1655     }
1656 
1657     return 0;
1658 }
1659 
1660 static int
1661 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1662              bool malformed)
1663 {
1664     int length;
1665 
1666     length = cifs_discard_remaining_data(server);
1667     dequeue_mid(mid, malformed);
1668     mid->resp_buf = server->smallbuf;
1669     server->smallbuf = NULL;
1670     return length;
1671 }
1672 
1673 static int
1674 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1675 {
1676     struct cifs_readdata *rdata = mid->callback_data;
1677 
1678     return  __cifs_readv_discard(server, mid, rdata->result);
1679 }
1680 
1681 int
1682 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1683 {
1684     int length, len;
1685     unsigned int data_offset, data_len;
1686     struct cifs_readdata *rdata = mid->callback_data;
1687     char *buf = server->smallbuf;
1688     unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1689     bool use_rdma_mr = false;
1690 
1691     cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1692          __func__, mid->mid, rdata->offset, rdata->bytes);
1693 
1694     /*
1695      * read the rest of READ_RSP header (sans Data array), or whatever we
1696      * can if there's not enough data. At this point, we've read down to
1697      * the Mid.
1698      */
1699     len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1700                             HEADER_SIZE(server) + 1;
1701 
1702     length = cifs_read_from_socket(server,
1703                        buf + HEADER_SIZE(server) - 1, len);
1704     if (length < 0)
1705         return length;
1706     server->total_read += length;
1707 
1708     if (server->ops->is_session_expired &&
1709         server->ops->is_session_expired(buf)) {
1710         cifs_reconnect(server, true);
1711         return -1;
1712     }
1713 
1714     if (server->ops->is_status_pending &&
1715         server->ops->is_status_pending(buf, server)) {
1716         cifs_discard_remaining_data(server);
1717         return -1;
1718     }
1719 
1720     /* set up first two iov for signature check and to get credits */
1721     rdata->iov[0].iov_base = buf;
1722     rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1723     rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1724     rdata->iov[1].iov_len =
1725         server->total_read - HEADER_PREAMBLE_SIZE(server);
1726     cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1727          rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1728     cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1729          rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1730 
1731     /* Was the SMB read successful? */
1732     rdata->result = server->ops->map_error(buf, false);
1733     if (rdata->result != 0) {
1734         cifs_dbg(FYI, "%s: server returned error %d\n",
1735              __func__, rdata->result);
1736         /* normal error on read response */
1737         return __cifs_readv_discard(server, mid, false);
1738     }
1739 
1740     /* Is there enough to get to the rest of the READ_RSP header? */
1741     if (server->total_read < server->vals->read_rsp_size) {
1742         cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1743              __func__, server->total_read,
1744              server->vals->read_rsp_size);
1745         rdata->result = -EIO;
1746         return cifs_readv_discard(server, mid);
1747     }
1748 
1749     data_offset = server->ops->read_data_offset(buf) +
1750         HEADER_PREAMBLE_SIZE(server);
1751     if (data_offset < server->total_read) {
1752         /*
1753          * win2k8 sometimes sends an offset of 0 when the read
1754          * is beyond the EOF. Treat it as if the data starts just after
1755          * the header.
1756          */
1757         cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1758              __func__, data_offset);
1759         data_offset = server->total_read;
1760     } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1761         /* data_offset is beyond the end of smallbuf */
1762         cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1763              __func__, data_offset);
1764         rdata->result = -EIO;
1765         return cifs_readv_discard(server, mid);
1766     }
1767 
1768     cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1769          __func__, server->total_read, data_offset);
1770 
1771     len = data_offset - server->total_read;
1772     if (len > 0) {
1773         /* read any junk before data into the rest of smallbuf */
1774         length = cifs_read_from_socket(server,
1775                            buf + server->total_read, len);
1776         if (length < 0)
1777             return length;
1778         server->total_read += length;
1779     }
1780 
1781     /* how much data is in the response? */
1782 #ifdef CONFIG_CIFS_SMB_DIRECT
1783     use_rdma_mr = rdata->mr;
1784 #endif
1785     data_len = server->ops->read_data_length(buf, use_rdma_mr);
1786     if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1787         /* data_len is corrupt -- discard frame */
1788         rdata->result = -EIO;
1789         return cifs_readv_discard(server, mid);
1790     }
1791 
1792     length = rdata->read_into_pages(server, rdata, data_len);
1793     if (length < 0)
1794         return length;
1795 
1796     server->total_read += length;
1797 
1798     cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1799          server->total_read, buflen, data_len);
1800 
1801     /* discard anything left over */
1802     if (server->total_read < buflen)
1803         return cifs_readv_discard(server, mid);
1804 
1805     dequeue_mid(mid, false);
1806     mid->resp_buf = server->smallbuf;
1807     server->smallbuf = NULL;
1808     return length;
1809 }