0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/slab.h>
0010 #include <linux/ctype.h>
0011 #include <linux/mempool.h>
0012 #include <linux/vmalloc.h>
0013 #include "cifspdu.h"
0014 #include "cifsglob.h"
0015 #include "cifsproto.h"
0016 #include "cifs_debug.h"
0017 #include "smberr.h"
0018 #include "nterr.h"
0019 #include "cifs_unicode.h"
0020 #include "smb2pdu.h"
0021 #include "cifsfs.h"
0022 #ifdef CONFIG_CIFS_DFS_UPCALL
0023 #include "dns_resolve.h"
0024 #endif
0025 #include "fs_context.h"
0026 #include "cached_dir.h"
0027
0028 extern mempool_t *cifs_sm_req_poolp;
0029 extern mempool_t *cifs_req_poolp;
0030
0031
0032
0033
0034
0035
0036
0037 unsigned int
0038 _get_xid(void)
0039 {
0040 unsigned int xid;
0041
0042 spin_lock(&GlobalMid_Lock);
0043 GlobalTotalActiveXid++;
0044
0045
0046 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
0047 GlobalMaxActiveXid = GlobalTotalActiveXid;
0048 if (GlobalTotalActiveXid > 65000)
0049 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
0050 xid = GlobalCurrentXid++;
0051 spin_unlock(&GlobalMid_Lock);
0052 return xid;
0053 }
0054
0055 void
0056 _free_xid(unsigned int xid)
0057 {
0058 spin_lock(&GlobalMid_Lock);
0059
0060
0061 GlobalTotalActiveXid--;
0062 spin_unlock(&GlobalMid_Lock);
0063 }
0064
0065 struct cifs_ses *
0066 sesInfoAlloc(void)
0067 {
0068 struct cifs_ses *ret_buf;
0069
0070 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
0071 if (ret_buf) {
0072 atomic_inc(&sesInfoAllocCount);
0073 spin_lock_init(&ret_buf->ses_lock);
0074 ret_buf->ses_status = SES_NEW;
0075 ++ret_buf->ses_count;
0076 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
0077 INIT_LIST_HEAD(&ret_buf->tcon_list);
0078 mutex_init(&ret_buf->session_mutex);
0079 spin_lock_init(&ret_buf->iface_lock);
0080 INIT_LIST_HEAD(&ret_buf->iface_list);
0081 spin_lock_init(&ret_buf->chan_lock);
0082 }
0083 return ret_buf;
0084 }
0085
0086 void
0087 sesInfoFree(struct cifs_ses *buf_to_free)
0088 {
0089 struct cifs_server_iface *iface = NULL, *niface = NULL;
0090
0091 if (buf_to_free == NULL) {
0092 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
0093 return;
0094 }
0095
0096 atomic_dec(&sesInfoAllocCount);
0097 kfree(buf_to_free->serverOS);
0098 kfree(buf_to_free->serverDomain);
0099 kfree(buf_to_free->serverNOS);
0100 kfree_sensitive(buf_to_free->password);
0101 kfree(buf_to_free->user_name);
0102 kfree(buf_to_free->domainName);
0103 kfree_sensitive(buf_to_free->auth_key.response);
0104 spin_lock(&buf_to_free->iface_lock);
0105 list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
0106 iface_head)
0107 kref_put(&iface->refcount, release_iface);
0108 spin_unlock(&buf_to_free->iface_lock);
0109 kfree_sensitive(buf_to_free);
0110 }
0111
0112 struct cifs_tcon *
0113 tconInfoAlloc(void)
0114 {
0115 struct cifs_tcon *ret_buf;
0116
0117 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
0118 if (!ret_buf)
0119 return NULL;
0120 ret_buf->cfid = init_cached_dir();
0121 if (!ret_buf->cfid) {
0122 kfree(ret_buf);
0123 return NULL;
0124 }
0125
0126 atomic_inc(&tconInfoAllocCount);
0127 ret_buf->status = TID_NEW;
0128 ++ret_buf->tc_count;
0129 spin_lock_init(&ret_buf->tc_lock);
0130 INIT_LIST_HEAD(&ret_buf->openFileList);
0131 INIT_LIST_HEAD(&ret_buf->tcon_list);
0132 spin_lock_init(&ret_buf->open_file_lock);
0133 spin_lock_init(&ret_buf->stat_lock);
0134 atomic_set(&ret_buf->num_local_opens, 0);
0135 atomic_set(&ret_buf->num_remote_opens, 0);
0136
0137 return ret_buf;
0138 }
0139
0140 void
0141 tconInfoFree(struct cifs_tcon *tcon)
0142 {
0143 if (tcon == NULL) {
0144 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
0145 return;
0146 }
0147 free_cached_dir(tcon);
0148 atomic_dec(&tconInfoAllocCount);
0149 kfree(tcon->nativeFileSystem);
0150 kfree_sensitive(tcon->password);
0151 kfree(tcon);
0152 }
0153
0154 struct smb_hdr *
0155 cifs_buf_get(void)
0156 {
0157 struct smb_hdr *ret_buf = NULL;
0158
0159
0160
0161
0162 size_t buf_size = sizeof(struct smb2_hdr);
0163
0164
0165
0166
0167
0168
0169
0170 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
0171
0172
0173
0174 memset(ret_buf, 0, buf_size + 3);
0175 atomic_inc(&buf_alloc_count);
0176 #ifdef CONFIG_CIFS_STATS2
0177 atomic_inc(&total_buf_alloc_count);
0178 #endif
0179
0180 return ret_buf;
0181 }
0182
0183 void
0184 cifs_buf_release(void *buf_to_free)
0185 {
0186 if (buf_to_free == NULL) {
0187
0188 return;
0189 }
0190 mempool_free(buf_to_free, cifs_req_poolp);
0191
0192 atomic_dec(&buf_alloc_count);
0193 return;
0194 }
0195
0196 struct smb_hdr *
0197 cifs_small_buf_get(void)
0198 {
0199 struct smb_hdr *ret_buf = NULL;
0200
0201
0202
0203
0204
0205 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
0206
0207
0208 atomic_inc(&small_buf_alloc_count);
0209 #ifdef CONFIG_CIFS_STATS2
0210 atomic_inc(&total_small_buf_alloc_count);
0211 #endif
0212
0213 return ret_buf;
0214 }
0215
0216 void
0217 cifs_small_buf_release(void *buf_to_free)
0218 {
0219
0220 if (buf_to_free == NULL) {
0221 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
0222 return;
0223 }
0224 mempool_free(buf_to_free, cifs_sm_req_poolp);
0225
0226 atomic_dec(&small_buf_alloc_count);
0227 return;
0228 }
0229
0230 void
0231 free_rsp_buf(int resp_buftype, void *rsp)
0232 {
0233 if (resp_buftype == CIFS_SMALL_BUFFER)
0234 cifs_small_buf_release(rsp);
0235 else if (resp_buftype == CIFS_LARGE_BUFFER)
0236 cifs_buf_release(rsp);
0237 }
0238
0239
0240
0241 void
0242 header_assemble(struct smb_hdr *buffer, char smb_command ,
0243 const struct cifs_tcon *treeCon, int word_count
0244 )
0245 {
0246 char *temp = (char *) buffer;
0247
0248 memset(temp, 0, 256);
0249
0250 buffer->smb_buf_length = cpu_to_be32(
0251 (2 * word_count) + sizeof(struct smb_hdr) -
0252 4 +
0253 2 ) ;
0254
0255 buffer->Protocol[0] = 0xFF;
0256 buffer->Protocol[1] = 'S';
0257 buffer->Protocol[2] = 'M';
0258 buffer->Protocol[3] = 'B';
0259 buffer->Command = smb_command;
0260 buffer->Flags = 0x00;
0261 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
0262 buffer->Pid = cpu_to_le16((__u16)current->tgid);
0263 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
0264 if (treeCon) {
0265 buffer->Tid = treeCon->tid;
0266 if (treeCon->ses) {
0267 if (treeCon->ses->capabilities & CAP_UNICODE)
0268 buffer->Flags2 |= SMBFLG2_UNICODE;
0269 if (treeCon->ses->capabilities & CAP_STATUS32)
0270 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
0271
0272
0273 buffer->Uid = treeCon->ses->Suid;
0274 if (treeCon->ses->server)
0275 buffer->Mid = get_next_mid(treeCon->ses->server);
0276 }
0277 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
0278 buffer->Flags2 |= SMBFLG2_DFS;
0279 if (treeCon->nocase)
0280 buffer->Flags |= SMBFLG_CASELESS;
0281 if ((treeCon->ses) && (treeCon->ses->server))
0282 if (treeCon->ses->server->sign)
0283 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
0284 }
0285
0286
0287 buffer->WordCount = (char) word_count;
0288 return;
0289 }
0290
0291 static int
0292 check_smb_hdr(struct smb_hdr *smb)
0293 {
0294
0295 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
0296 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
0297 *(unsigned int *)smb->Protocol);
0298 return 1;
0299 }
0300
0301
0302 if (smb->Flags & SMBFLG_RESPONSE)
0303 return 0;
0304
0305
0306 if (smb->Command == SMB_COM_LOCKING_ANDX)
0307 return 0;
0308
0309 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
0310 get_mid(smb));
0311 return 1;
0312 }
0313
0314 int
0315 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
0316 {
0317 struct smb_hdr *smb = (struct smb_hdr *)buf;
0318 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
0319 __u32 clc_len;
0320 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
0321 total_read, rfclen);
0322
0323
0324 if (total_read < 2 + sizeof(struct smb_hdr)) {
0325 if ((total_read >= sizeof(struct smb_hdr) - 1)
0326 && (smb->Status.CifsError != 0)) {
0327
0328 smb->WordCount = 0;
0329
0330 return 0;
0331 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
0332 (smb->WordCount == 0)) {
0333 char *tmp = (char *)smb;
0334
0335
0336 if (tmp[sizeof(struct smb_hdr)] == 0) {
0337
0338
0339
0340
0341
0342
0343
0344 tmp[sizeof(struct smb_hdr)+1] = 0;
0345 return 0;
0346 }
0347 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
0348 } else {
0349 cifs_dbg(VFS, "Length less than smb header size\n");
0350 }
0351 return -EIO;
0352 }
0353
0354
0355 if (check_smb_hdr(smb))
0356 return -EIO;
0357 clc_len = smbCalcSize(smb);
0358
0359 if (4 + rfclen != total_read) {
0360 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
0361 rfclen);
0362 return -EIO;
0363 }
0364
0365 if (4 + rfclen != clc_len) {
0366 __u16 mid = get_mid(smb);
0367
0368 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
0369
0370 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
0371 return 0;
0372 }
0373 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
0374 clc_len, 4 + rfclen, mid);
0375
0376 if (4 + rfclen < clc_len) {
0377 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
0378 rfclen, mid);
0379 return -EIO;
0380 } else if (rfclen > clc_len + 512) {
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
0391 rfclen, mid);
0392 return -EIO;
0393 }
0394 }
0395 return 0;
0396 }
0397
0398 bool
0399 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
0400 {
0401 struct smb_hdr *buf = (struct smb_hdr *)buffer;
0402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
0403 struct cifs_ses *ses;
0404 struct cifs_tcon *tcon;
0405 struct cifsInodeInfo *pCifsInode;
0406 struct cifsFileInfo *netfile;
0407
0408 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
0409 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
0410 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
0411 struct smb_com_transaction_change_notify_rsp *pSMBr =
0412 (struct smb_com_transaction_change_notify_rsp *)buf;
0413 struct file_notify_information *pnotify;
0414 __u32 data_offset = 0;
0415 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
0416
0417 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
0418 data_offset = le32_to_cpu(pSMBr->DataOffset);
0419
0420 if (data_offset >
0421 len - sizeof(struct file_notify_information)) {
0422 cifs_dbg(FYI, "Invalid data_offset %u\n",
0423 data_offset);
0424 return true;
0425 }
0426 pnotify = (struct file_notify_information *)
0427 ((char *)&pSMBr->hdr.Protocol + data_offset);
0428 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
0429 pnotify->FileName, pnotify->Action);
0430
0431
0432 return true;
0433 }
0434 if (pSMBr->hdr.Status.CifsError) {
0435 cifs_dbg(FYI, "notify err 0x%x\n",
0436 pSMBr->hdr.Status.CifsError);
0437 return true;
0438 }
0439 return false;
0440 }
0441 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
0442 return false;
0443 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
0444
0445
0446
0447
0448 if ((NT_STATUS_INVALID_HANDLE) ==
0449 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
0450 cifs_dbg(FYI, "Invalid handle on oplock break\n");
0451 return true;
0452 } else if (ERRbadfid ==
0453 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
0454 return true;
0455 } else {
0456 return false;
0457 }
0458 }
0459 if (pSMB->hdr.WordCount != 8)
0460 return false;
0461
0462 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
0463 pSMB->LockType, pSMB->OplockLevel);
0464 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
0465 return false;
0466
0467
0468 spin_lock(&cifs_tcp_ses_lock);
0469 list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) {
0470 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
0471 if (tcon->tid != buf->Tid)
0472 continue;
0473
0474 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
0475 spin_lock(&tcon->open_file_lock);
0476 list_for_each_entry(netfile, &tcon->openFileList, tlist) {
0477 if (pSMB->Fid != netfile->fid.netfid)
0478 continue;
0479
0480 cifs_dbg(FYI, "file id match, oplock break\n");
0481 pCifsInode = CIFS_I(d_inode(netfile->dentry));
0482
0483 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
0484 &pCifsInode->flags);
0485
0486 netfile->oplock_epoch = 0;
0487 netfile->oplock_level = pSMB->OplockLevel;
0488 netfile->oplock_break_cancelled = false;
0489 cifs_queue_oplock_break(netfile);
0490
0491 spin_unlock(&tcon->open_file_lock);
0492 spin_unlock(&cifs_tcp_ses_lock);
0493 return true;
0494 }
0495 spin_unlock(&tcon->open_file_lock);
0496 spin_unlock(&cifs_tcp_ses_lock);
0497 cifs_dbg(FYI, "No matching file for oplock break\n");
0498 return true;
0499 }
0500 }
0501 spin_unlock(&cifs_tcp_ses_lock);
0502 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
0503 return true;
0504 }
0505
0506 void
0507 dump_smb(void *buf, int smb_buf_length)
0508 {
0509 if (traceSMB == 0)
0510 return;
0511
0512 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
0513 smb_buf_length, true);
0514 }
0515
0516 void
0517 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
0518 {
0519 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
0520 struct cifs_tcon *tcon = NULL;
0521
0522 if (cifs_sb->master_tlink)
0523 tcon = cifs_sb_master_tcon(cifs_sb);
0524
0525 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
0526 cifs_sb->mnt_cifs_serverino_autodisabled = true;
0527 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
0528 tcon ? tcon->treeName : "new server");
0529 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
0530 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
0531
0532 }
0533 }
0534
0535 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
0536 {
0537 oplock &= 0xF;
0538
0539 if (oplock == OPLOCK_EXCLUSIVE) {
0540 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
0541 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
0542 &cinode->netfs.inode);
0543 } else if (oplock == OPLOCK_READ) {
0544 cinode->oplock = CIFS_CACHE_READ_FLG;
0545 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
0546 &cinode->netfs.inode);
0547 } else
0548 cinode->oplock = 0;
0549 }
0550
0551
0552
0553
0554
0555 int cifs_get_writer(struct cifsInodeInfo *cinode)
0556 {
0557 int rc;
0558
0559 start:
0560 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
0561 TASK_KILLABLE);
0562 if (rc)
0563 return rc;
0564
0565 spin_lock(&cinode->writers_lock);
0566 if (!cinode->writers)
0567 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
0568 cinode->writers++;
0569
0570 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
0571 cinode->writers--;
0572 if (cinode->writers == 0) {
0573 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
0574 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
0575 }
0576 spin_unlock(&cinode->writers_lock);
0577 goto start;
0578 }
0579 spin_unlock(&cinode->writers_lock);
0580 return 0;
0581 }
0582
0583 void cifs_put_writer(struct cifsInodeInfo *cinode)
0584 {
0585 spin_lock(&cinode->writers_lock);
0586 cinode->writers--;
0587 if (cinode->writers == 0) {
0588 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
0589 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
0590 }
0591 spin_unlock(&cinode->writers_lock);
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
0605 {
0606
0607
0608
0609
0610
0611
0612 cifsFileInfo_get(cfile);
0613
0614 queue_work(cifsoplockd_wq, &cfile->oplock_break);
0615 }
0616
0617 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
0618 {
0619 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
0620 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
0621 }
0622
0623 bool
0624 backup_cred(struct cifs_sb_info *cifs_sb)
0625 {
0626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
0627 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
0628 return true;
0629 }
0630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
0631 if (in_group_p(cifs_sb->ctx->backupgid))
0632 return true;
0633 }
0634
0635 return false;
0636 }
0637
0638 void
0639 cifs_del_pending_open(struct cifs_pending_open *open)
0640 {
0641 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
0642 list_del(&open->olist);
0643 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
0644 }
0645
0646 void
0647 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
0648 struct cifs_pending_open *open)
0649 {
0650 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
0651 open->oplock = CIFS_OPLOCK_NO_CHANGE;
0652 open->tlink = tlink;
0653 fid->pending_open = open;
0654 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
0655 }
0656
0657 void
0658 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
0659 struct cifs_pending_open *open)
0660 {
0661 spin_lock(&tlink_tcon(tlink)->open_file_lock);
0662 cifs_add_pending_open_locked(fid, tlink, open);
0663 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
0664 }
0665
0666
0667
0668
0669
0670
0671 bool
0672 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
0673 {
0674 struct cifs_deferred_close *dclose;
0675
0676 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
0677 if ((dclose->netfid == cfile->fid.netfid) &&
0678 (dclose->persistent_fid == cfile->fid.persistent_fid) &&
0679 (dclose->volatile_fid == cfile->fid.volatile_fid)) {
0680 *pdclose = dclose;
0681 return true;
0682 }
0683 }
0684 return false;
0685 }
0686
0687
0688
0689
0690 void
0691 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
0692 {
0693 bool is_deferred = false;
0694 struct cifs_deferred_close *pdclose;
0695
0696 is_deferred = cifs_is_deferred_close(cfile, &pdclose);
0697 if (is_deferred) {
0698 kfree(dclose);
0699 return;
0700 }
0701
0702 dclose->tlink = cfile->tlink;
0703 dclose->netfid = cfile->fid.netfid;
0704 dclose->persistent_fid = cfile->fid.persistent_fid;
0705 dclose->volatile_fid = cfile->fid.volatile_fid;
0706 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
0707 }
0708
0709
0710
0711
0712 void
0713 cifs_del_deferred_close(struct cifsFileInfo *cfile)
0714 {
0715 bool is_deferred = false;
0716 struct cifs_deferred_close *dclose;
0717
0718 is_deferred = cifs_is_deferred_close(cfile, &dclose);
0719 if (!is_deferred)
0720 return;
0721 list_del(&dclose->dlist);
0722 kfree(dclose);
0723 }
0724
0725 void
0726 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
0727 {
0728 struct cifsFileInfo *cfile = NULL;
0729 struct file_list *tmp_list, *tmp_next_list;
0730 struct list_head file_head;
0731
0732 if (cifs_inode == NULL)
0733 return;
0734
0735 INIT_LIST_HEAD(&file_head);
0736 spin_lock(&cifs_inode->open_file_lock);
0737 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
0738 if (delayed_work_pending(&cfile->deferred)) {
0739 if (cancel_delayed_work(&cfile->deferred)) {
0740 cifs_del_deferred_close(cfile);
0741
0742 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
0743 if (tmp_list == NULL)
0744 break;
0745 tmp_list->cfile = cfile;
0746 list_add_tail(&tmp_list->list, &file_head);
0747 }
0748 }
0749 }
0750 spin_unlock(&cifs_inode->open_file_lock);
0751
0752 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
0753 _cifsFileInfo_put(tmp_list->cfile, true, false);
0754 list_del(&tmp_list->list);
0755 kfree(tmp_list);
0756 }
0757 }
0758
0759 void
0760 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
0761 {
0762 struct cifsFileInfo *cfile;
0763 struct file_list *tmp_list, *tmp_next_list;
0764 struct list_head file_head;
0765
0766 INIT_LIST_HEAD(&file_head);
0767 spin_lock(&tcon->open_file_lock);
0768 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
0769 if (delayed_work_pending(&cfile->deferred)) {
0770 if (cancel_delayed_work(&cfile->deferred)) {
0771 cifs_del_deferred_close(cfile);
0772
0773 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
0774 if (tmp_list == NULL)
0775 break;
0776 tmp_list->cfile = cfile;
0777 list_add_tail(&tmp_list->list, &file_head);
0778 }
0779 }
0780 }
0781 spin_unlock(&tcon->open_file_lock);
0782
0783 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
0784 _cifsFileInfo_put(tmp_list->cfile, true, false);
0785 list_del(&tmp_list->list);
0786 kfree(tmp_list);
0787 }
0788 }
0789 void
0790 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
0791 {
0792 struct cifsFileInfo *cfile;
0793 struct file_list *tmp_list, *tmp_next_list;
0794 struct list_head file_head;
0795 void *page;
0796 const char *full_path;
0797
0798 INIT_LIST_HEAD(&file_head);
0799 page = alloc_dentry_path();
0800 spin_lock(&tcon->open_file_lock);
0801 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
0802 full_path = build_path_from_dentry(cfile->dentry, page);
0803 if (strstr(full_path, path)) {
0804 if (delayed_work_pending(&cfile->deferred)) {
0805 if (cancel_delayed_work(&cfile->deferred)) {
0806 cifs_del_deferred_close(cfile);
0807
0808 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
0809 if (tmp_list == NULL)
0810 break;
0811 tmp_list->cfile = cfile;
0812 list_add_tail(&tmp_list->list, &file_head);
0813 }
0814 }
0815 }
0816 }
0817 spin_unlock(&tcon->open_file_lock);
0818
0819 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
0820 _cifsFileInfo_put(tmp_list->cfile, true, false);
0821 list_del(&tmp_list->list);
0822 kfree(tmp_list);
0823 }
0824 free_dentry_path(page);
0825 }
0826
0827
0828
0829
0830
0831
0832
0833 int
0834 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
0835 unsigned int *num_of_nodes,
0836 struct dfs_info3_param **target_nodes,
0837 const struct nls_table *nls_codepage, int remap,
0838 const char *searchName, bool is_unicode)
0839 {
0840 int i, rc = 0;
0841 char *data_end;
0842 struct dfs_referral_level_3 *ref;
0843
0844 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
0845
0846 if (*num_of_nodes < 1) {
0847 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
0848 *num_of_nodes);
0849 rc = -EINVAL;
0850 goto parse_DFS_referrals_exit;
0851 }
0852
0853 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
0854 if (ref->VersionNumber != cpu_to_le16(3)) {
0855 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
0856 le16_to_cpu(ref->VersionNumber));
0857 rc = -EINVAL;
0858 goto parse_DFS_referrals_exit;
0859 }
0860
0861
0862 data_end = (char *)rsp + rsp_size;
0863
0864 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
0865 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
0866
0867 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
0868 GFP_KERNEL);
0869 if (*target_nodes == NULL) {
0870 rc = -ENOMEM;
0871 goto parse_DFS_referrals_exit;
0872 }
0873
0874
0875 for (i = 0; i < *num_of_nodes; i++) {
0876 char *temp;
0877 int max_len;
0878 struct dfs_info3_param *node = (*target_nodes)+i;
0879
0880 node->flags = le32_to_cpu(rsp->DFSFlags);
0881 if (is_unicode) {
0882 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
0883 GFP_KERNEL);
0884 if (tmp == NULL) {
0885 rc = -ENOMEM;
0886 goto parse_DFS_referrals_exit;
0887 }
0888 cifsConvertToUTF16((__le16 *) tmp, searchName,
0889 PATH_MAX, nls_codepage, remap);
0890 node->path_consumed = cifs_utf16_bytes(tmp,
0891 le16_to_cpu(rsp->PathConsumed),
0892 nls_codepage);
0893 kfree(tmp);
0894 } else
0895 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
0896
0897 node->server_type = le16_to_cpu(ref->ServerType);
0898 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
0899
0900
0901 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
0902 max_len = data_end - temp;
0903 node->path_name = cifs_strndup_from_utf16(temp, max_len,
0904 is_unicode, nls_codepage);
0905 if (!node->path_name) {
0906 rc = -ENOMEM;
0907 goto parse_DFS_referrals_exit;
0908 }
0909
0910
0911 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
0912 max_len = data_end - temp;
0913 node->node_name = cifs_strndup_from_utf16(temp, max_len,
0914 is_unicode, nls_codepage);
0915 if (!node->node_name) {
0916 rc = -ENOMEM;
0917 goto parse_DFS_referrals_exit;
0918 }
0919
0920 node->ttl = le32_to_cpu(ref->TimeToLive);
0921
0922 ref++;
0923 }
0924
0925 parse_DFS_referrals_exit:
0926 if (rc) {
0927 free_dfs_info_array(*target_nodes, *num_of_nodes);
0928 *target_nodes = NULL;
0929 *num_of_nodes = 0;
0930 }
0931 return rc;
0932 }
0933
0934 struct cifs_aio_ctx *
0935 cifs_aio_ctx_alloc(void)
0936 {
0937 struct cifs_aio_ctx *ctx;
0938
0939
0940
0941
0942
0943
0944 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
0945 if (!ctx)
0946 return NULL;
0947
0948 INIT_LIST_HEAD(&ctx->list);
0949 mutex_init(&ctx->aio_mutex);
0950 init_completion(&ctx->done);
0951 kref_init(&ctx->refcount);
0952 return ctx;
0953 }
0954
0955 void
0956 cifs_aio_ctx_release(struct kref *refcount)
0957 {
0958 struct cifs_aio_ctx *ctx = container_of(refcount,
0959 struct cifs_aio_ctx, refcount);
0960
0961 cifsFileInfo_put(ctx->cfile);
0962
0963
0964
0965
0966
0967
0968 if (ctx->bv) {
0969 unsigned i;
0970
0971 for (i = 0; i < ctx->npages; i++) {
0972 if (ctx->should_dirty)
0973 set_page_dirty(ctx->bv[i].bv_page);
0974 put_page(ctx->bv[i].bv_page);
0975 }
0976 kvfree(ctx->bv);
0977 }
0978
0979 kfree(ctx);
0980 }
0981
0982 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
0983
0984 int
0985 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
0986 {
0987 ssize_t rc;
0988 unsigned int cur_npages;
0989 unsigned int npages = 0;
0990 unsigned int i;
0991 size_t len;
0992 size_t count = iov_iter_count(iter);
0993 unsigned int saved_len;
0994 size_t start;
0995 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
0996 struct page **pages = NULL;
0997 struct bio_vec *bv = NULL;
0998
0999 if (iov_iter_is_kvec(iter)) {
1000 memcpy(&ctx->iter, iter, sizeof(*iter));
1001 ctx->len = count;
1002 iov_iter_advance(iter, count);
1003 return 0;
1004 }
1005
1006 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
1007 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
1008
1009 if (!bv) {
1010 bv = vmalloc(array_size(max_pages, sizeof(*bv)));
1011 if (!bv)
1012 return -ENOMEM;
1013 }
1014
1015 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
1016 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
1017
1018 if (!pages) {
1019 pages = vmalloc(array_size(max_pages, sizeof(*pages)));
1020 if (!pages) {
1021 kvfree(bv);
1022 return -ENOMEM;
1023 }
1024 }
1025
1026 saved_len = count;
1027
1028 while (count && npages < max_pages) {
1029 rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
1030 if (rc < 0) {
1031 cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
1032 break;
1033 }
1034
1035 if (rc > count) {
1036 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
1037 count);
1038 break;
1039 }
1040
1041 count -= rc;
1042 rc += start;
1043 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
1044
1045 if (npages + cur_npages > max_pages) {
1046 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
1047 npages + cur_npages, max_pages);
1048 break;
1049 }
1050
1051 for (i = 0; i < cur_npages; i++) {
1052 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
1053 bv[npages + i].bv_page = pages[i];
1054 bv[npages + i].bv_offset = start;
1055 bv[npages + i].bv_len = len - start;
1056 rc -= len;
1057 start = 0;
1058 }
1059
1060 npages += cur_npages;
1061 }
1062
1063 kvfree(pages);
1064 ctx->bv = bv;
1065 ctx->len = saved_len - count;
1066 ctx->npages = npages;
1067 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
1068 return 0;
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 int
1081 cifs_alloc_hash(const char *name,
1082 struct crypto_shash **shash, struct sdesc **sdesc)
1083 {
1084 int rc = 0;
1085 size_t size;
1086
1087 if (*sdesc != NULL)
1088 return 0;
1089
1090 *shash = crypto_alloc_shash(name, 0, 0);
1091 if (IS_ERR(*shash)) {
1092 cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
1093 rc = PTR_ERR(*shash);
1094 *shash = NULL;
1095 *sdesc = NULL;
1096 return rc;
1097 }
1098
1099 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
1100 *sdesc = kmalloc(size, GFP_KERNEL);
1101 if (*sdesc == NULL) {
1102 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
1103 crypto_free_shash(*shash);
1104 *shash = NULL;
1105 return -ENOMEM;
1106 }
1107
1108 (*sdesc)->shash.tfm = *shash;
1109 return 0;
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119 void
1120 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
1121 {
1122 kfree(*sdesc);
1123 *sdesc = NULL;
1124 if (*shash)
1125 crypto_free_shash(*shash);
1126 *shash = NULL;
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
1137 unsigned int *len, unsigned int *offset)
1138 {
1139 *len = rqst->rq_pagesz;
1140 *offset = (page == 0) ? rqst->rq_offset : 0;
1141
1142 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
1143 *len = rqst->rq_tailsz;
1144 else if (page == 0)
1145 *len = rqst->rq_pagesz - rqst->rq_offset;
1146 }
1147
1148 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1149 {
1150 const char *end;
1151
1152
1153 while (*unc && (*unc == '\\' || *unc == '/'))
1154 unc++;
1155
1156 end = unc;
1157
1158 while (*end && !(*end == '\\' || *end == '/'))
1159 end++;
1160
1161 *h = unc;
1162 *len = end - unc;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172 int copy_path_name(char *dst, const char *src)
1173 {
1174 int name_len;
1175
1176
1177
1178
1179
1180 name_len = strscpy(dst, src, PATH_MAX);
1181 if (WARN_ON_ONCE(name_len < 0))
1182 name_len = PATH_MAX-1;
1183
1184
1185 name_len++;
1186 return name_len;
1187 }
1188
1189 struct super_cb_data {
1190 void *data;
1191 struct super_block *sb;
1192 };
1193
1194 static void tcp_super_cb(struct super_block *sb, void *arg)
1195 {
1196 struct super_cb_data *sd = arg;
1197 struct TCP_Server_Info *server = sd->data;
1198 struct cifs_sb_info *cifs_sb;
1199 struct cifs_tcon *tcon;
1200
1201 if (sd->sb)
1202 return;
1203
1204 cifs_sb = CIFS_SB(sb);
1205 tcon = cifs_sb_master_tcon(cifs_sb);
1206 if (tcon->ses->server == server)
1207 sd->sb = sb;
1208 }
1209
1210 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1211 void *data)
1212 {
1213 struct super_cb_data sd = {
1214 .data = data,
1215 .sb = NULL,
1216 };
1217 struct file_system_type **fs_type = (struct file_system_type *[]) {
1218 &cifs_fs_type, &smb3_fs_type, NULL,
1219 };
1220
1221 for (; *fs_type; fs_type++) {
1222 iterate_supers_type(*fs_type, f, &sd);
1223 if (sd.sb) {
1224
1225
1226
1227
1228
1229 cifs_sb_active(sd.sb);
1230 return sd.sb;
1231 }
1232 }
1233 return ERR_PTR(-EINVAL);
1234 }
1235
1236 static void __cifs_put_super(struct super_block *sb)
1237 {
1238 if (!IS_ERR_OR_NULL(sb))
1239 cifs_sb_deactive(sb);
1240 }
1241
1242 struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1243 {
1244 return __cifs_get_super(tcp_super_cb, server);
1245 }
1246
1247 void cifs_put_tcp_super(struct super_block *sb)
1248 {
1249 __cifs_put_super(sb);
1250 }
1251
1252 #ifdef CONFIG_CIFS_DFS_UPCALL
1253 int match_target_ip(struct TCP_Server_Info *server,
1254 const char *share, size_t share_len,
1255 bool *result)
1256 {
1257 int rc;
1258 char *target, *tip = NULL;
1259 struct sockaddr tipaddr;
1260
1261 *result = false;
1262
1263 target = kzalloc(share_len + 3, GFP_KERNEL);
1264 if (!target) {
1265 rc = -ENOMEM;
1266 goto out;
1267 }
1268
1269 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1270
1271 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1272
1273 rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
1274 if (rc < 0)
1275 goto out;
1276
1277 cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
1278
1279 if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
1280 cifs_dbg(VFS, "%s: failed to convert target ip address\n",
1281 __func__);
1282 rc = -EINVAL;
1283 goto out;
1284 }
1285
1286 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
1287 &tipaddr);
1288 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1289 rc = 0;
1290
1291 out:
1292 kfree(target);
1293 kfree(tip);
1294
1295 return rc;
1296 }
1297
1298 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1299 {
1300 kfree(cifs_sb->prepath);
1301
1302 if (prefix && *prefix) {
1303 cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
1304 if (!cifs_sb->prepath)
1305 return -ENOMEM;
1306
1307 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1308 } else
1309 cifs_sb->prepath = NULL;
1310
1311 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1312 return 0;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
1324 struct cifs_tcon *tcon,
1325 struct cifs_sb_info *cifs_sb,
1326 const char *linkpath)
1327 {
1328 char *treename, *dfspath, sep;
1329 int treenamelen, linkpathlen, rc;
1330
1331 treename = tcon->treeName;
1332
1333
1334
1335
1336 sep = CIFS_DIR_SEP(cifs_sb);
1337 if (treename[0] == sep && treename[1] == sep)
1338 treename++;
1339 linkpathlen = strlen(linkpath);
1340 treenamelen = strnlen(treename, MAX_TREE_SIZE + 1);
1341 dfspath = kzalloc(treenamelen + linkpathlen + 1, GFP_KERNEL);
1342 if (!dfspath)
1343 return -ENOMEM;
1344 if (treenamelen)
1345 memcpy(dfspath, treename, treenamelen);
1346 memcpy(dfspath + treenamelen, linkpath, linkpathlen);
1347 rc = dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls,
1348 cifs_remap(cifs_sb), dfspath, NULL, NULL);
1349 if (rc == 0) {
1350 cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n",
1351 dfspath);
1352 rc = -EREMOTE;
1353 } else {
1354 cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc);
1355 }
1356 kfree(dfspath);
1357 return rc;
1358 }
1359 #endif