0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/fs.h>
0011 #include <linux/list.h>
0012 #include <linux/gfp.h>
0013 #include <linux/wait.h>
0014 #include <linux/net.h>
0015 #include <linux/delay.h>
0016 #include <linux/freezer.h>
0017 #include <linux/tcp.h>
0018 #include <linux/bvec.h>
0019 #include <linux/highmem.h>
0020 #include <linux/uaccess.h>
0021 #include <asm/processor.h>
0022 #include <linux/mempool.h>
0023 #include <linux/sched/signal.h>
0024 #include <linux/task_io_accounting_ops.h>
0025 #include "cifspdu.h"
0026 #include "cifsglob.h"
0027 #include "cifsproto.h"
0028 #include "cifs_debug.h"
0029 #include "smb2proto.h"
0030 #include "smbdirect.h"
0031
0032
0033 #define CIFS_MAX_IOV_SIZE 8
0034
0035 void
0036 cifs_wake_up_task(struct mid_q_entry *mid)
0037 {
0038 wake_up_process(mid->callback_data);
0039 }
0040
0041 static struct mid_q_entry *
0042 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
0043 {
0044 struct mid_q_entry *temp;
0045
0046 if (server == NULL) {
0047 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
0048 return NULL;
0049 }
0050
0051 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
0052 memset(temp, 0, sizeof(struct mid_q_entry));
0053 kref_init(&temp->refcount);
0054 temp->mid = get_mid(smb_buffer);
0055 temp->pid = current->pid;
0056 temp->command = cpu_to_le16(smb_buffer->Command);
0057 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
0058
0059
0060 temp->when_alloc = jiffies;
0061 temp->server = server;
0062
0063
0064
0065
0066
0067 get_task_struct(current);
0068 temp->creator = current;
0069 temp->callback = cifs_wake_up_task;
0070 temp->callback_data = current;
0071
0072 atomic_inc(&mid_count);
0073 temp->mid_state = MID_REQUEST_ALLOCATED;
0074 return temp;
0075 }
0076
0077 static void __release_mid(struct kref *refcount)
0078 {
0079 struct mid_q_entry *midEntry =
0080 container_of(refcount, struct mid_q_entry, refcount);
0081 #ifdef CONFIG_CIFS_STATS2
0082 __le16 command = midEntry->server->vals->lock_cmd;
0083 __u16 smb_cmd = le16_to_cpu(midEntry->command);
0084 unsigned long now;
0085 unsigned long roundtrip_time;
0086 #endif
0087 struct TCP_Server_Info *server = midEntry->server;
0088
0089 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
0090 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
0091 server->ops->handle_cancelled_mid)
0092 server->ops->handle_cancelled_mid(midEntry, server);
0093
0094 midEntry->mid_state = MID_FREE;
0095 atomic_dec(&mid_count);
0096 if (midEntry->large_buf)
0097 cifs_buf_release(midEntry->resp_buf);
0098 else
0099 cifs_small_buf_release(midEntry->resp_buf);
0100 #ifdef CONFIG_CIFS_STATS2
0101 now = jiffies;
0102 if (now < midEntry->when_alloc)
0103 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
0104 roundtrip_time = now - midEntry->when_alloc;
0105
0106 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
0107 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
0108 server->slowest_cmd[smb_cmd] = roundtrip_time;
0109 server->fastest_cmd[smb_cmd] = roundtrip_time;
0110 } else {
0111 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
0112 server->slowest_cmd[smb_cmd] = roundtrip_time;
0113 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
0114 server->fastest_cmd[smb_cmd] = roundtrip_time;
0115 }
0116 cifs_stats_inc(&server->num_cmds[smb_cmd]);
0117 server->time_per_cmd[smb_cmd] += roundtrip_time;
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 if ((slow_rsp_threshold != 0) &&
0130 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
0131 (midEntry->command != command)) {
0132
0133
0134
0135
0136 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
0137 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
0138
0139 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
0140 midEntry->when_sent, midEntry->when_received);
0141 if (cifsFYI & CIFS_TIMER) {
0142 pr_debug("slow rsp: cmd %d mid %llu",
0143 midEntry->command, midEntry->mid);
0144 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
0145 now - midEntry->when_alloc,
0146 now - midEntry->when_sent,
0147 now - midEntry->when_received);
0148 }
0149 }
0150 #endif
0151 put_task_struct(midEntry->creator);
0152
0153 mempool_free(midEntry, cifs_mid_poolp);
0154 }
0155
0156 void release_mid(struct mid_q_entry *mid)
0157 {
0158 struct TCP_Server_Info *server = mid->server;
0159
0160 spin_lock(&server->mid_lock);
0161 kref_put(&mid->refcount, __release_mid);
0162 spin_unlock(&server->mid_lock);
0163 }
0164
0165 void
0166 delete_mid(struct mid_q_entry *mid)
0167 {
0168 spin_lock(&mid->server->mid_lock);
0169 if (!(mid->mid_flags & MID_DELETED)) {
0170 list_del_init(&mid->qhead);
0171 mid->mid_flags |= MID_DELETED;
0172 }
0173 spin_unlock(&mid->server->mid_lock);
0174
0175 release_mid(mid);
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static int
0188 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
0189 size_t *sent)
0190 {
0191 int rc = 0;
0192 int retries = 0;
0193 struct socket *ssocket = server->ssocket;
0194
0195 *sent = 0;
0196
0197 if (server->noblocksnd)
0198 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
0199 else
0200 smb_msg->msg_flags = MSG_NOSIGNAL;
0201
0202 while (msg_data_left(smb_msg)) {
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 rc = sock_sendmsg(ssocket, smb_msg);
0222 if (rc == -EAGAIN) {
0223 retries++;
0224 if (retries >= 14 ||
0225 (!server->noblocksnd && (retries > 2))) {
0226 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
0227 ssocket);
0228 return -EAGAIN;
0229 }
0230 msleep(1 << retries);
0231 continue;
0232 }
0233
0234 if (rc < 0)
0235 return rc;
0236
0237 if (rc == 0) {
0238
0239
0240 cifs_server_dbg(VFS, "tcp sent no data\n");
0241 msleep(500);
0242 continue;
0243 }
0244
0245
0246 *sent += rc;
0247 retries = 0;
0248 }
0249 return 0;
0250 }
0251
0252 unsigned long
0253 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
0254 {
0255 unsigned int i;
0256 struct kvec *iov;
0257 int nvec;
0258 unsigned long buflen = 0;
0259
0260 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
0261 rqst->rq_iov[0].iov_len == 4) {
0262 iov = &rqst->rq_iov[1];
0263 nvec = rqst->rq_nvec - 1;
0264 } else {
0265 iov = rqst->rq_iov;
0266 nvec = rqst->rq_nvec;
0267 }
0268
0269
0270 for (i = 0; i < nvec; i++)
0271 buflen += iov[i].iov_len;
0272
0273
0274
0275
0276
0277
0278
0279 if (rqst->rq_npages) {
0280 if (rqst->rq_npages == 1)
0281 buflen += rqst->rq_tailsz;
0282 else {
0283
0284
0285
0286
0287 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
0288 rqst->rq_offset;
0289 buflen += rqst->rq_tailsz;
0290 }
0291 }
0292
0293 return buflen;
0294 }
0295
0296 static int
0297 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
0298 struct smb_rqst *rqst)
0299 {
0300 int rc = 0;
0301 struct kvec *iov;
0302 int n_vec;
0303 unsigned int send_length = 0;
0304 unsigned int i, j;
0305 sigset_t mask, oldmask;
0306 size_t total_len = 0, sent, size;
0307 struct socket *ssocket = server->ssocket;
0308 struct msghdr smb_msg = {};
0309 __be32 rfc1002_marker;
0310
0311 if (cifs_rdma_enabled(server)) {
0312
0313 rc = -EAGAIN;
0314 if (server->smbd_conn)
0315 rc = smbd_send(server, num_rqst, rqst);
0316 goto smbd_done;
0317 }
0318
0319 if (ssocket == NULL)
0320 return -EAGAIN;
0321
0322 if (fatal_signal_pending(current)) {
0323 cifs_dbg(FYI, "signal pending before send request\n");
0324 return -ERESTARTSYS;
0325 }
0326
0327
0328 tcp_sock_set_cork(ssocket->sk, true);
0329
0330 for (j = 0; j < num_rqst; j++)
0331 send_length += smb_rqst_len(server, &rqst[j]);
0332 rfc1002_marker = cpu_to_be32(send_length);
0333
0334
0335
0336
0337
0338
0339
0340
0341 sigfillset(&mask);
0342 sigprocmask(SIG_BLOCK, &mask, &oldmask);
0343
0344
0345 if (!is_smb1(server)) {
0346 struct kvec hiov = {
0347 .iov_base = &rfc1002_marker,
0348 .iov_len = 4
0349 };
0350 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
0351 rc = smb_send_kvec(server, &smb_msg, &sent);
0352 if (rc < 0)
0353 goto unmask;
0354
0355 total_len += sent;
0356 send_length += 4;
0357 }
0358
0359 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
0360
0361 for (j = 0; j < num_rqst; j++) {
0362 iov = rqst[j].rq_iov;
0363 n_vec = rqst[j].rq_nvec;
0364
0365 size = 0;
0366 for (i = 0; i < n_vec; i++) {
0367 dump_smb(iov[i].iov_base, iov[i].iov_len);
0368 size += iov[i].iov_len;
0369 }
0370
0371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
0372
0373 rc = smb_send_kvec(server, &smb_msg, &sent);
0374 if (rc < 0)
0375 goto unmask;
0376
0377 total_len += sent;
0378
0379
0380 for (i = 0; i < rqst[j].rq_npages; i++) {
0381 struct bio_vec bvec;
0382
0383 bvec.bv_page = rqst[j].rq_pages[i];
0384 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
0385 &bvec.bv_offset);
0386
0387 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
0388 &bvec, 1, bvec.bv_len);
0389 rc = smb_send_kvec(server, &smb_msg, &sent);
0390 if (rc < 0)
0391 break;
0392
0393 total_len += sent;
0394 }
0395 }
0396
0397 unmask:
0398 sigprocmask(SIG_SETMASK, &oldmask, NULL);
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 if (signal_pending(current) && (total_len != send_length)) {
0412 cifs_dbg(FYI, "signal is pending after attempt to send\n");
0413 rc = -ERESTARTSYS;
0414 }
0415
0416
0417 tcp_sock_set_cork(ssocket->sk, false);
0418
0419 if ((total_len > 0) && (total_len != send_length)) {
0420 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
0421 send_length, total_len);
0422
0423
0424
0425
0426
0427 cifs_signal_cifsd_for_reconnect(server, false);
0428 trace_smb3_partial_send_reconnect(server->CurrentMid,
0429 server->conn_id, server->hostname);
0430 }
0431 smbd_done:
0432 if (rc < 0 && rc != -EINTR)
0433 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
0434 rc);
0435 else if (rc > 0)
0436 rc = 0;
0437
0438 return rc;
0439 }
0440
0441 static int
0442 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
0443 struct smb_rqst *rqst, int flags)
0444 {
0445 struct kvec iov;
0446 struct smb2_transform_hdr *tr_hdr;
0447 struct smb_rqst cur_rqst[MAX_COMPOUND];
0448 int rc;
0449
0450 if (!(flags & CIFS_TRANSFORM_REQ))
0451 return __smb_send_rqst(server, num_rqst, rqst);
0452
0453 if (num_rqst > MAX_COMPOUND - 1)
0454 return -ENOMEM;
0455
0456 if (!server->ops->init_transform_rq) {
0457 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
0458 return -EIO;
0459 }
0460
0461 tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
0462 if (!tr_hdr)
0463 return -ENOMEM;
0464
0465 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
0466 memset(&iov, 0, sizeof(iov));
0467
0468 iov.iov_base = tr_hdr;
0469 iov.iov_len = sizeof(*tr_hdr);
0470 cur_rqst[0].rq_iov = &iov;
0471 cur_rqst[0].rq_nvec = 1;
0472
0473 rc = server->ops->init_transform_rq(server, num_rqst + 1,
0474 &cur_rqst[0], rqst);
0475 if (rc)
0476 goto out;
0477
0478 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
0479 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
0480 out:
0481 kfree(tr_hdr);
0482 return rc;
0483 }
0484
0485 int
0486 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
0487 unsigned int smb_buf_length)
0488 {
0489 struct kvec iov[2];
0490 struct smb_rqst rqst = { .rq_iov = iov,
0491 .rq_nvec = 2 };
0492
0493 iov[0].iov_base = smb_buffer;
0494 iov[0].iov_len = 4;
0495 iov[1].iov_base = (char *)smb_buffer + 4;
0496 iov[1].iov_len = smb_buf_length;
0497
0498 return __smb_send_rqst(server, 1, &rqst);
0499 }
0500
0501 static int
0502 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
0503 const int timeout, const int flags,
0504 unsigned int *instance)
0505 {
0506 long rc;
0507 int *credits;
0508 int optype;
0509 long int t;
0510 int scredits, in_flight;
0511
0512 if (timeout < 0)
0513 t = MAX_JIFFY_OFFSET;
0514 else
0515 t = msecs_to_jiffies(timeout);
0516
0517 optype = flags & CIFS_OP_MASK;
0518
0519 *instance = 0;
0520
0521 credits = server->ops->get_credits_field(server, optype);
0522
0523 if (*credits <= 0 && optype == CIFS_ECHO_OP)
0524 return -EAGAIN;
0525
0526 spin_lock(&server->req_lock);
0527 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
0528
0529 server->in_flight++;
0530 if (server->in_flight > server->max_in_flight)
0531 server->max_in_flight = server->in_flight;
0532 *credits -= 1;
0533 *instance = server->reconnect_instance;
0534 scredits = *credits;
0535 in_flight = server->in_flight;
0536 spin_unlock(&server->req_lock);
0537
0538 trace_smb3_nblk_credits(server->CurrentMid,
0539 server->conn_id, server->hostname, scredits, -1, in_flight);
0540 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
0541 __func__, 1, scredits);
0542
0543 return 0;
0544 }
0545
0546 while (1) {
0547 if (*credits < num_credits) {
0548 scredits = *credits;
0549 spin_unlock(&server->req_lock);
0550
0551 cifs_num_waiters_inc(server);
0552 rc = wait_event_killable_timeout(server->request_q,
0553 has_credits(server, credits, num_credits), t);
0554 cifs_num_waiters_dec(server);
0555 if (!rc) {
0556 spin_lock(&server->req_lock);
0557 scredits = *credits;
0558 in_flight = server->in_flight;
0559 spin_unlock(&server->req_lock);
0560
0561 trace_smb3_credit_timeout(server->CurrentMid,
0562 server->conn_id, server->hostname, scredits,
0563 num_credits, in_flight);
0564 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
0565 timeout);
0566 return -EBUSY;
0567 }
0568 if (rc == -ERESTARTSYS)
0569 return -ERESTARTSYS;
0570 spin_lock(&server->req_lock);
0571 } else {
0572 spin_unlock(&server->req_lock);
0573
0574 spin_lock(&server->srv_lock);
0575 if (server->tcpStatus == CifsExiting) {
0576 spin_unlock(&server->srv_lock);
0577 return -ENOENT;
0578 }
0579 spin_unlock(&server->srv_lock);
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 spin_lock(&server->req_lock);
0595 if (!optype && num_credits == 1 &&
0596 server->in_flight > 2 * MAX_COMPOUND &&
0597 *credits <= MAX_COMPOUND) {
0598 spin_unlock(&server->req_lock);
0599
0600 cifs_num_waiters_inc(server);
0601 rc = wait_event_killable_timeout(
0602 server->request_q,
0603 has_credits(server, credits,
0604 MAX_COMPOUND + 1),
0605 t);
0606 cifs_num_waiters_dec(server);
0607 if (!rc) {
0608 spin_lock(&server->req_lock);
0609 scredits = *credits;
0610 in_flight = server->in_flight;
0611 spin_unlock(&server->req_lock);
0612
0613 trace_smb3_credit_timeout(
0614 server->CurrentMid,
0615 server->conn_id, server->hostname,
0616 scredits, num_credits, in_flight);
0617 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
0618 timeout);
0619 return -EBUSY;
0620 }
0621 if (rc == -ERESTARTSYS)
0622 return -ERESTARTSYS;
0623 spin_lock(&server->req_lock);
0624 continue;
0625 }
0626
0627
0628
0629
0630
0631
0632
0633 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
0634 *credits -= num_credits;
0635 server->in_flight += num_credits;
0636 if (server->in_flight > server->max_in_flight)
0637 server->max_in_flight = server->in_flight;
0638 *instance = server->reconnect_instance;
0639 }
0640 scredits = *credits;
0641 in_flight = server->in_flight;
0642 spin_unlock(&server->req_lock);
0643
0644 trace_smb3_waitff_credits(server->CurrentMid,
0645 server->conn_id, server->hostname, scredits,
0646 -(num_credits), in_flight);
0647 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
0648 __func__, num_credits, scredits);
0649 break;
0650 }
0651 }
0652 return 0;
0653 }
0654
0655 static int
0656 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
0657 unsigned int *instance)
0658 {
0659 return wait_for_free_credits(server, 1, -1, flags,
0660 instance);
0661 }
0662
0663 static int
0664 wait_for_compound_request(struct TCP_Server_Info *server, int num,
0665 const int flags, unsigned int *instance)
0666 {
0667 int *credits;
0668 int scredits, in_flight;
0669
0670 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
0671
0672 spin_lock(&server->req_lock);
0673 scredits = *credits;
0674 in_flight = server->in_flight;
0675
0676 if (*credits < num) {
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693 if (server->in_flight == 0) {
0694 spin_unlock(&server->req_lock);
0695 trace_smb3_insufficient_credits(server->CurrentMid,
0696 server->conn_id, server->hostname, scredits,
0697 num, in_flight);
0698 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
0699 __func__, in_flight, num, scredits);
0700 return -EDEADLK;
0701 }
0702 }
0703 spin_unlock(&server->req_lock);
0704
0705 return wait_for_free_credits(server, num, 60000, flags,
0706 instance);
0707 }
0708
0709 int
0710 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
0711 unsigned int *num, struct cifs_credits *credits)
0712 {
0713 *num = size;
0714 credits->value = 0;
0715 credits->instance = server->reconnect_instance;
0716 return 0;
0717 }
0718
0719 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
0720 struct mid_q_entry **ppmidQ)
0721 {
0722 spin_lock(&ses->ses_lock);
0723 if (ses->ses_status == SES_NEW) {
0724 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
0725 (in_buf->Command != SMB_COM_NEGOTIATE)) {
0726 spin_unlock(&ses->ses_lock);
0727 return -EAGAIN;
0728 }
0729
0730 }
0731
0732 if (ses->ses_status == SES_EXITING) {
0733
0734 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
0735 spin_unlock(&ses->ses_lock);
0736 return -EAGAIN;
0737 }
0738
0739 }
0740 spin_unlock(&ses->ses_lock);
0741
0742 *ppmidQ = alloc_mid(in_buf, ses->server);
0743 if (*ppmidQ == NULL)
0744 return -ENOMEM;
0745 spin_lock(&ses->server->mid_lock);
0746 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
0747 spin_unlock(&ses->server->mid_lock);
0748 return 0;
0749 }
0750
0751 static int
0752 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
0753 {
0754 int error;
0755
0756 error = wait_event_freezekillable_unsafe(server->response_q,
0757 midQ->mid_state != MID_REQUEST_SUBMITTED);
0758 if (error < 0)
0759 return -ERESTARTSYS;
0760
0761 return 0;
0762 }
0763
0764 struct mid_q_entry *
0765 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
0766 {
0767 int rc;
0768 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
0769 struct mid_q_entry *mid;
0770
0771 if (rqst->rq_iov[0].iov_len != 4 ||
0772 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
0773 return ERR_PTR(-EIO);
0774
0775
0776 if (server->sign)
0777 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
0778
0779 mid = alloc_mid(hdr, server);
0780 if (mid == NULL)
0781 return ERR_PTR(-ENOMEM);
0782
0783 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
0784 if (rc) {
0785 release_mid(mid);
0786 return ERR_PTR(rc);
0787 }
0788
0789 return mid;
0790 }
0791
0792
0793
0794
0795
0796 int
0797 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
0798 mid_receive_t *receive, mid_callback_t *callback,
0799 mid_handle_t *handle, void *cbdata, const int flags,
0800 const struct cifs_credits *exist_credits)
0801 {
0802 int rc;
0803 struct mid_q_entry *mid;
0804 struct cifs_credits credits = { .value = 0, .instance = 0 };
0805 unsigned int instance;
0806 int optype;
0807
0808 optype = flags & CIFS_OP_MASK;
0809
0810 if ((flags & CIFS_HAS_CREDITS) == 0) {
0811 rc = wait_for_free_request(server, flags, &instance);
0812 if (rc)
0813 return rc;
0814 credits.value = 1;
0815 credits.instance = instance;
0816 } else
0817 instance = exist_credits->instance;
0818
0819 cifs_server_lock(server);
0820
0821
0822
0823
0824
0825
0826 if (instance != server->reconnect_instance) {
0827 cifs_server_unlock(server);
0828 add_credits_and_wake_if(server, &credits, optype);
0829 return -EAGAIN;
0830 }
0831
0832 mid = server->ops->setup_async_request(server, rqst);
0833 if (IS_ERR(mid)) {
0834 cifs_server_unlock(server);
0835 add_credits_and_wake_if(server, &credits, optype);
0836 return PTR_ERR(mid);
0837 }
0838
0839 mid->receive = receive;
0840 mid->callback = callback;
0841 mid->callback_data = cbdata;
0842 mid->handle = handle;
0843 mid->mid_state = MID_REQUEST_SUBMITTED;
0844
0845
0846 spin_lock(&server->mid_lock);
0847 list_add_tail(&mid->qhead, &server->pending_mid_q);
0848 spin_unlock(&server->mid_lock);
0849
0850
0851
0852
0853
0854 cifs_save_when_sent(mid);
0855 cifs_in_send_inc(server);
0856 rc = smb_send_rqst(server, 1, rqst, flags);
0857 cifs_in_send_dec(server);
0858
0859 if (rc < 0) {
0860 revert_current_mid(server, mid->credits);
0861 server->sequence_number -= 2;
0862 delete_mid(mid);
0863 }
0864
0865 cifs_server_unlock(server);
0866
0867 if (rc == 0)
0868 return 0;
0869
0870 add_credits_and_wake_if(server, &credits, optype);
0871 return rc;
0872 }
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 int
0884 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
0885 char *in_buf, int flags)
0886 {
0887 int rc;
0888 struct kvec iov[1];
0889 struct kvec rsp_iov;
0890 int resp_buf_type;
0891
0892 iov[0].iov_base = in_buf;
0893 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
0894 flags |= CIFS_NO_RSP_BUF;
0895 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
0896 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
0897
0898 return rc;
0899 }
0900
0901 static int
0902 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
0903 {
0904 int rc = 0;
0905
0906 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
0907 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
0908
0909 spin_lock(&server->mid_lock);
0910 switch (mid->mid_state) {
0911 case MID_RESPONSE_RECEIVED:
0912 spin_unlock(&server->mid_lock);
0913 return rc;
0914 case MID_RETRY_NEEDED:
0915 rc = -EAGAIN;
0916 break;
0917 case MID_RESPONSE_MALFORMED:
0918 rc = -EIO;
0919 break;
0920 case MID_SHUTDOWN:
0921 rc = -EHOSTDOWN;
0922 break;
0923 default:
0924 if (!(mid->mid_flags & MID_DELETED)) {
0925 list_del_init(&mid->qhead);
0926 mid->mid_flags |= MID_DELETED;
0927 }
0928 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
0929 __func__, mid->mid, mid->mid_state);
0930 rc = -EIO;
0931 }
0932 spin_unlock(&server->mid_lock);
0933
0934 release_mid(mid);
0935 return rc;
0936 }
0937
0938 static inline int
0939 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
0940 struct mid_q_entry *mid)
0941 {
0942 return server->ops->send_cancel ?
0943 server->ops->send_cancel(server, rqst, mid) : 0;
0944 }
0945
0946 int
0947 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
0948 bool log_error)
0949 {
0950 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
0951
0952 dump_smb(mid->resp_buf, min_t(u32, 92, len));
0953
0954
0955 if (server->sign) {
0956 struct kvec iov[2];
0957 int rc = 0;
0958 struct smb_rqst rqst = { .rq_iov = iov,
0959 .rq_nvec = 2 };
0960
0961 iov[0].iov_base = mid->resp_buf;
0962 iov[0].iov_len = 4;
0963 iov[1].iov_base = (char *)mid->resp_buf + 4;
0964 iov[1].iov_len = len - 4;
0965
0966 rc = cifs_verify_signature(&rqst, server,
0967 mid->sequence_number);
0968 if (rc)
0969 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
0970 rc);
0971 }
0972
0973
0974 return map_and_check_smb_error(mid, log_error);
0975 }
0976
0977 struct mid_q_entry *
0978 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
0979 struct smb_rqst *rqst)
0980 {
0981 int rc;
0982 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
0983 struct mid_q_entry *mid;
0984
0985 if (rqst->rq_iov[0].iov_len != 4 ||
0986 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
0987 return ERR_PTR(-EIO);
0988
0989 rc = allocate_mid(ses, hdr, &mid);
0990 if (rc)
0991 return ERR_PTR(rc);
0992 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
0993 if (rc) {
0994 delete_mid(mid);
0995 return ERR_PTR(rc);
0996 }
0997 return mid;
0998 }
0999
1000 static void
1001 cifs_compound_callback(struct mid_q_entry *mid)
1002 {
1003 struct TCP_Server_Info *server = mid->server;
1004 struct cifs_credits credits;
1005
1006 credits.value = server->ops->get_credits(mid);
1007 credits.instance = server->reconnect_instance;
1008
1009 add_credits(server, &credits, mid->optype);
1010 }
1011
1012 static void
1013 cifs_compound_last_callback(struct mid_q_entry *mid)
1014 {
1015 cifs_compound_callback(mid);
1016 cifs_wake_up_task(mid);
1017 }
1018
1019 static void
1020 cifs_cancelled_callback(struct mid_q_entry *mid)
1021 {
1022 cifs_compound_callback(mid);
1023 release_mid(mid);
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1034 {
1035 uint index = 0;
1036
1037 if (!ses)
1038 return NULL;
1039
1040
1041 index = (uint)atomic_inc_return(&ses->chan_seq);
1042
1043 spin_lock(&ses->chan_lock);
1044 index %= ses->chan_count;
1045 spin_unlock(&ses->chan_lock);
1046
1047 return ses->chans[index].server;
1048 }
1049
1050 int
1051 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1052 struct TCP_Server_Info *server,
1053 const int flags, const int num_rqst, struct smb_rqst *rqst,
1054 int *resp_buf_type, struct kvec *resp_iov)
1055 {
1056 int i, j, optype, rc = 0;
1057 struct mid_q_entry *midQ[MAX_COMPOUND];
1058 bool cancelled_mid[MAX_COMPOUND] = {false};
1059 struct cifs_credits credits[MAX_COMPOUND] = {
1060 { .value = 0, .instance = 0 }
1061 };
1062 unsigned int instance;
1063 char *buf;
1064
1065 optype = flags & CIFS_OP_MASK;
1066
1067 for (i = 0; i < num_rqst; i++)
1068 resp_buf_type[i] = CIFS_NO_BUFFER;
1069
1070 if (!ses || !ses->server || !server) {
1071 cifs_dbg(VFS, "Null session\n");
1072 return -EIO;
1073 }
1074
1075 spin_lock(&server->srv_lock);
1076 if (server->tcpStatus == CifsExiting) {
1077 spin_unlock(&server->srv_lock);
1078 return -ENOENT;
1079 }
1080 spin_unlock(&server->srv_lock);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 rc = wait_for_compound_request(server, num_rqst, flags,
1091 &instance);
1092 if (rc)
1093 return rc;
1094
1095 for (i = 0; i < num_rqst; i++) {
1096 credits[i].value = 1;
1097 credits[i].instance = instance;
1098 }
1099
1100
1101
1102
1103
1104
1105
1106 cifs_server_lock(server);
1107
1108
1109
1110
1111
1112
1113
1114
1115 if (instance != server->reconnect_instance) {
1116 cifs_server_unlock(server);
1117 for (j = 0; j < num_rqst; j++)
1118 add_credits(server, &credits[j], optype);
1119 return -EAGAIN;
1120 }
1121
1122 for (i = 0; i < num_rqst; i++) {
1123 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1124 if (IS_ERR(midQ[i])) {
1125 revert_current_mid(server, i);
1126 for (j = 0; j < i; j++)
1127 delete_mid(midQ[j]);
1128 cifs_server_unlock(server);
1129
1130
1131 for (j = 0; j < num_rqst; j++)
1132 add_credits(server, &credits[j], optype);
1133 return PTR_ERR(midQ[i]);
1134 }
1135
1136 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1137 midQ[i]->optype = optype;
1138
1139
1140
1141
1142
1143 if (i < num_rqst - 1)
1144 midQ[i]->callback = cifs_compound_callback;
1145 else
1146 midQ[i]->callback = cifs_compound_last_callback;
1147 }
1148 cifs_in_send_inc(server);
1149 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1150 cifs_in_send_dec(server);
1151
1152 for (i = 0; i < num_rqst; i++)
1153 cifs_save_when_sent(midQ[i]);
1154
1155 if (rc < 0) {
1156 revert_current_mid(server, num_rqst);
1157 server->sequence_number -= 2;
1158 }
1159
1160 cifs_server_unlock(server);
1161
1162
1163
1164
1165
1166 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1167 for (i = 0; i < num_rqst; i++)
1168 add_credits(server, &credits[i], optype);
1169 goto out;
1170 }
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 spin_lock(&ses->ses_lock);
1184 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1185 spin_unlock(&ses->ses_lock);
1186
1187 cifs_server_lock(server);
1188 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1189 cifs_server_unlock(server);
1190
1191 spin_lock(&ses->ses_lock);
1192 }
1193 spin_unlock(&ses->ses_lock);
1194
1195 for (i = 0; i < num_rqst; i++) {
1196 rc = wait_for_response(server, midQ[i]);
1197 if (rc != 0)
1198 break;
1199 }
1200 if (rc != 0) {
1201 for (; i < num_rqst; i++) {
1202 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1203 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1204 send_cancel(server, &rqst[i], midQ[i]);
1205 spin_lock(&server->mid_lock);
1206 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1207 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1208 midQ[i]->callback = cifs_cancelled_callback;
1209 cancelled_mid[i] = true;
1210 credits[i].value = 0;
1211 }
1212 spin_unlock(&server->mid_lock);
1213 }
1214 }
1215
1216 for (i = 0; i < num_rqst; i++) {
1217 if (rc < 0)
1218 goto out;
1219
1220 rc = cifs_sync_mid_result(midQ[i], server);
1221 if (rc != 0) {
1222
1223 cancelled_mid[i] = true;
1224 goto out;
1225 }
1226
1227 if (!midQ[i]->resp_buf ||
1228 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229 rc = -EIO;
1230 cifs_dbg(FYI, "Bad MID state?\n");
1231 goto out;
1232 }
1233
1234 buf = (char *)midQ[i]->resp_buf;
1235 resp_iov[i].iov_base = buf;
1236 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1237 HEADER_PREAMBLE_SIZE(server);
1238
1239 if (midQ[i]->large_buf)
1240 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241 else
1242 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243
1244 rc = server->ops->check_receive(midQ[i], server,
1245 flags & CIFS_LOG_ERROR);
1246
1247
1248 if ((flags & CIFS_NO_RSP_BUF) == 0)
1249 midQ[i]->resp_buf = NULL;
1250
1251 }
1252
1253
1254
1255
1256 spin_lock(&ses->ses_lock);
1257 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1258 struct kvec iov = {
1259 .iov_base = resp_iov[0].iov_base,
1260 .iov_len = resp_iov[0].iov_len
1261 };
1262 spin_unlock(&ses->ses_lock);
1263 cifs_server_lock(server);
1264 smb311_update_preauth_hash(ses, server, &iov, 1);
1265 cifs_server_unlock(server);
1266 spin_lock(&ses->ses_lock);
1267 }
1268 spin_unlock(&ses->ses_lock);
1269
1270 out:
1271
1272
1273
1274
1275
1276
1277 for (i = 0; i < num_rqst; i++) {
1278 if (!cancelled_mid[i])
1279 delete_mid(midQ[i]);
1280 }
1281
1282 return rc;
1283 }
1284
1285 int
1286 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1287 struct TCP_Server_Info *server,
1288 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1289 struct kvec *resp_iov)
1290 {
1291 return compound_send_recv(xid, ses, server, flags, 1,
1292 rqst, resp_buf_type, resp_iov);
1293 }
1294
1295 int
1296 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1297 struct kvec *iov, int n_vec, int *resp_buf_type ,
1298 const int flags, struct kvec *resp_iov)
1299 {
1300 struct smb_rqst rqst;
1301 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1302 int rc;
1303
1304 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1305 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1306 GFP_KERNEL);
1307 if (!new_iov) {
1308
1309 *resp_buf_type = CIFS_NO_BUFFER;
1310 return -ENOMEM;
1311 }
1312 } else
1313 new_iov = s_iov;
1314
1315
1316 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1317
1318 new_iov[0].iov_base = new_iov[1].iov_base;
1319 new_iov[0].iov_len = 4;
1320 new_iov[1].iov_base += 4;
1321 new_iov[1].iov_len -= 4;
1322
1323 memset(&rqst, 0, sizeof(struct smb_rqst));
1324 rqst.rq_iov = new_iov;
1325 rqst.rq_nvec = n_vec + 1;
1326
1327 rc = cifs_send_recv(xid, ses, ses->server,
1328 &rqst, resp_buf_type, flags, resp_iov);
1329 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1330 kfree(new_iov);
1331 return rc;
1332 }
1333
1334 int
1335 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1336 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1337 int *pbytes_returned, const int flags)
1338 {
1339 int rc = 0;
1340 struct mid_q_entry *midQ;
1341 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1342 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1343 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1344 struct cifs_credits credits = { .value = 1, .instance = 0 };
1345 struct TCP_Server_Info *server;
1346
1347 if (ses == NULL) {
1348 cifs_dbg(VFS, "Null smb session\n");
1349 return -EIO;
1350 }
1351 server = ses->server;
1352 if (server == NULL) {
1353 cifs_dbg(VFS, "Null tcp session\n");
1354 return -EIO;
1355 }
1356
1357 spin_lock(&server->srv_lock);
1358 if (server->tcpStatus == CifsExiting) {
1359 spin_unlock(&server->srv_lock);
1360 return -ENOENT;
1361 }
1362 spin_unlock(&server->srv_lock);
1363
1364
1365
1366
1367
1368 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1369 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1370 len);
1371 return -EIO;
1372 }
1373
1374 rc = wait_for_free_request(server, flags, &credits.instance);
1375 if (rc)
1376 return rc;
1377
1378
1379
1380
1381
1382 cifs_server_lock(server);
1383
1384 rc = allocate_mid(ses, in_buf, &midQ);
1385 if (rc) {
1386 cifs_server_unlock(server);
1387
1388 add_credits(server, &credits, 0);
1389 return rc;
1390 }
1391
1392 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1393 if (rc) {
1394 cifs_server_unlock(server);
1395 goto out;
1396 }
1397
1398 midQ->mid_state = MID_REQUEST_SUBMITTED;
1399
1400 cifs_in_send_inc(server);
1401 rc = smb_send(server, in_buf, len);
1402 cifs_in_send_dec(server);
1403 cifs_save_when_sent(midQ);
1404
1405 if (rc < 0)
1406 server->sequence_number -= 2;
1407
1408 cifs_server_unlock(server);
1409
1410 if (rc < 0)
1411 goto out;
1412
1413 rc = wait_for_response(server, midQ);
1414 if (rc != 0) {
1415 send_cancel(server, &rqst, midQ);
1416 spin_lock(&server->mid_lock);
1417 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1418
1419 midQ->callback = release_mid;
1420 spin_unlock(&server->mid_lock);
1421 add_credits(server, &credits, 0);
1422 return rc;
1423 }
1424 spin_unlock(&server->mid_lock);
1425 }
1426
1427 rc = cifs_sync_mid_result(midQ, server);
1428 if (rc != 0) {
1429 add_credits(server, &credits, 0);
1430 return rc;
1431 }
1432
1433 if (!midQ->resp_buf || !out_buf ||
1434 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1435 rc = -EIO;
1436 cifs_server_dbg(VFS, "Bad MID state?\n");
1437 goto out;
1438 }
1439
1440 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1441 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1442 rc = cifs_check_receive(midQ, server, 0);
1443 out:
1444 delete_mid(midQ);
1445 add_credits(server, &credits, 0);
1446
1447 return rc;
1448 }
1449
1450
1451
1452
1453 static int
1454 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1455 struct smb_hdr *in_buf,
1456 struct smb_hdr *out_buf)
1457 {
1458 int bytes_returned;
1459 struct cifs_ses *ses = tcon->ses;
1460 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1461
1462
1463
1464
1465
1466
1467 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1468 pSMB->Timeout = 0;
1469 pSMB->hdr.Mid = get_next_mid(ses->server);
1470
1471 return SendReceive(xid, ses, in_buf, out_buf,
1472 &bytes_returned, 0);
1473 }
1474
1475 int
1476 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1477 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1478 int *pbytes_returned)
1479 {
1480 int rc = 0;
1481 int rstart = 0;
1482 struct mid_q_entry *midQ;
1483 struct cifs_ses *ses;
1484 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1485 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1486 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1487 unsigned int instance;
1488 struct TCP_Server_Info *server;
1489
1490 if (tcon == NULL || tcon->ses == NULL) {
1491 cifs_dbg(VFS, "Null smb session\n");
1492 return -EIO;
1493 }
1494 ses = tcon->ses;
1495 server = ses->server;
1496
1497 if (server == NULL) {
1498 cifs_dbg(VFS, "Null tcp session\n");
1499 return -EIO;
1500 }
1501
1502 spin_lock(&server->srv_lock);
1503 if (server->tcpStatus == CifsExiting) {
1504 spin_unlock(&server->srv_lock);
1505 return -ENOENT;
1506 }
1507 spin_unlock(&server->srv_lock);
1508
1509
1510
1511
1512
1513 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1514 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1515 len);
1516 return -EIO;
1517 }
1518
1519 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1520 if (rc)
1521 return rc;
1522
1523
1524
1525
1526
1527 cifs_server_lock(server);
1528
1529 rc = allocate_mid(ses, in_buf, &midQ);
1530 if (rc) {
1531 cifs_server_unlock(server);
1532 return rc;
1533 }
1534
1535 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1536 if (rc) {
1537 delete_mid(midQ);
1538 cifs_server_unlock(server);
1539 return rc;
1540 }
1541
1542 midQ->mid_state = MID_REQUEST_SUBMITTED;
1543 cifs_in_send_inc(server);
1544 rc = smb_send(server, in_buf, len);
1545 cifs_in_send_dec(server);
1546 cifs_save_when_sent(midQ);
1547
1548 if (rc < 0)
1549 server->sequence_number -= 2;
1550
1551 cifs_server_unlock(server);
1552
1553 if (rc < 0) {
1554 delete_mid(midQ);
1555 return rc;
1556 }
1557
1558
1559 rc = wait_event_interruptible(server->response_q,
1560 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1561 ((server->tcpStatus != CifsGood) &&
1562 (server->tcpStatus != CifsNew)));
1563
1564
1565 spin_lock(&server->srv_lock);
1566 if ((rc == -ERESTARTSYS) &&
1567 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1568 ((server->tcpStatus == CifsGood) ||
1569 (server->tcpStatus == CifsNew))) {
1570 spin_unlock(&server->srv_lock);
1571
1572 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1573
1574
1575 rc = send_cancel(server, &rqst, midQ);
1576 if (rc) {
1577 delete_mid(midQ);
1578 return rc;
1579 }
1580 } else {
1581
1582
1583
1584 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1585
1586
1587
1588 if (rc && rc != -ENOLCK) {
1589 delete_mid(midQ);
1590 return rc;
1591 }
1592 }
1593
1594 rc = wait_for_response(server, midQ);
1595 if (rc) {
1596 send_cancel(server, &rqst, midQ);
1597 spin_lock(&server->mid_lock);
1598 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1599
1600 midQ->callback = release_mid;
1601 spin_unlock(&server->mid_lock);
1602 return rc;
1603 }
1604 spin_unlock(&server->mid_lock);
1605 }
1606
1607
1608 rstart = 1;
1609 spin_lock(&server->srv_lock);
1610 }
1611 spin_unlock(&server->srv_lock);
1612
1613 rc = cifs_sync_mid_result(midQ, server);
1614 if (rc != 0)
1615 return rc;
1616
1617
1618 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1619 rc = -EIO;
1620 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1621 goto out;
1622 }
1623
1624 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1625 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1626 rc = cifs_check_receive(midQ, server, 0);
1627 out:
1628 delete_mid(midQ);
1629 if (rstart && rc == -EACCES)
1630 return -ERESTARTSYS;
1631 return rc;
1632 }
1633
1634
1635
1636
1637
1638 int
1639 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1640 {
1641 unsigned int rfclen = server->pdu_size;
1642 int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1643 server->total_read;
1644
1645 while (remaining > 0) {
1646 int length;
1647
1648 length = cifs_discard_from_socket(server,
1649 min_t(size_t, remaining,
1650 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1651 if (length < 0)
1652 return length;
1653 server->total_read += length;
1654 remaining -= length;
1655 }
1656
1657 return 0;
1658 }
1659
1660 static int
1661 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1662 bool malformed)
1663 {
1664 int length;
1665
1666 length = cifs_discard_remaining_data(server);
1667 dequeue_mid(mid, malformed);
1668 mid->resp_buf = server->smallbuf;
1669 server->smallbuf = NULL;
1670 return length;
1671 }
1672
1673 static int
1674 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1675 {
1676 struct cifs_readdata *rdata = mid->callback_data;
1677
1678 return __cifs_readv_discard(server, mid, rdata->result);
1679 }
1680
1681 int
1682 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1683 {
1684 int length, len;
1685 unsigned int data_offset, data_len;
1686 struct cifs_readdata *rdata = mid->callback_data;
1687 char *buf = server->smallbuf;
1688 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1689 bool use_rdma_mr = false;
1690
1691 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1692 __func__, mid->mid, rdata->offset, rdata->bytes);
1693
1694
1695
1696
1697
1698
1699 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1700 HEADER_SIZE(server) + 1;
1701
1702 length = cifs_read_from_socket(server,
1703 buf + HEADER_SIZE(server) - 1, len);
1704 if (length < 0)
1705 return length;
1706 server->total_read += length;
1707
1708 if (server->ops->is_session_expired &&
1709 server->ops->is_session_expired(buf)) {
1710 cifs_reconnect(server, true);
1711 return -1;
1712 }
1713
1714 if (server->ops->is_status_pending &&
1715 server->ops->is_status_pending(buf, server)) {
1716 cifs_discard_remaining_data(server);
1717 return -1;
1718 }
1719
1720
1721 rdata->iov[0].iov_base = buf;
1722 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1723 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1724 rdata->iov[1].iov_len =
1725 server->total_read - HEADER_PREAMBLE_SIZE(server);
1726 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1727 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1728 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1729 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1730
1731
1732 rdata->result = server->ops->map_error(buf, false);
1733 if (rdata->result != 0) {
1734 cifs_dbg(FYI, "%s: server returned error %d\n",
1735 __func__, rdata->result);
1736
1737 return __cifs_readv_discard(server, mid, false);
1738 }
1739
1740
1741 if (server->total_read < server->vals->read_rsp_size) {
1742 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1743 __func__, server->total_read,
1744 server->vals->read_rsp_size);
1745 rdata->result = -EIO;
1746 return cifs_readv_discard(server, mid);
1747 }
1748
1749 data_offset = server->ops->read_data_offset(buf) +
1750 HEADER_PREAMBLE_SIZE(server);
1751 if (data_offset < server->total_read) {
1752
1753
1754
1755
1756
1757 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1758 __func__, data_offset);
1759 data_offset = server->total_read;
1760 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1761
1762 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1763 __func__, data_offset);
1764 rdata->result = -EIO;
1765 return cifs_readv_discard(server, mid);
1766 }
1767
1768 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1769 __func__, server->total_read, data_offset);
1770
1771 len = data_offset - server->total_read;
1772 if (len > 0) {
1773
1774 length = cifs_read_from_socket(server,
1775 buf + server->total_read, len);
1776 if (length < 0)
1777 return length;
1778 server->total_read += length;
1779 }
1780
1781
1782 #ifdef CONFIG_CIFS_SMB_DIRECT
1783 use_rdma_mr = rdata->mr;
1784 #endif
1785 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1786 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1787
1788 rdata->result = -EIO;
1789 return cifs_readv_discard(server, mid);
1790 }
1791
1792 length = rdata->read_into_pages(server, rdata, data_len);
1793 if (length < 0)
1794 return length;
1795
1796 server->total_read += length;
1797
1798 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1799 server->total_read, buflen, data_len);
1800
1801
1802 if (server->total_read < buflen)
1803 return cifs_readv_discard(server, mid);
1804
1805 dequeue_mid(mid, false);
1806 mid->resp_buf = server->smallbuf;
1807 server->smallbuf = NULL;
1808 return length;
1809 }