misc.c (36336B)
1// SPDX-License-Identifier: LGPL-2.1 2/* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 */ 8 9#include <linux/slab.h> 10#include <linux/ctype.h> 11#include <linux/mempool.h> 12#include <linux/vmalloc.h> 13#include "cifspdu.h" 14#include "cifsglob.h" 15#include "cifsproto.h" 16#include "cifs_debug.h" 17#include "smberr.h" 18#include "nterr.h" 19#include "cifs_unicode.h" 20#include "smb2pdu.h" 21#include "cifsfs.h" 22#ifdef CONFIG_CIFS_DFS_UPCALL 23#include "dns_resolve.h" 24#endif 25#include "fs_context.h" 26 27extern mempool_t *cifs_sm_req_poolp; 28extern mempool_t *cifs_req_poolp; 29 30/* The xid serves as a useful identifier for each incoming vfs request, 31 in a similar way to the mid which is useful to track each sent smb, 32 and CurrentXid can also provide a running counter (although it 33 will eventually wrap past zero) of the total vfs operations handled 34 since the cifs fs was mounted */ 35 36unsigned int 37_get_xid(void) 38{ 39 unsigned int xid; 40 41 spin_lock(&GlobalMid_Lock); 42 GlobalTotalActiveXid++; 43 44 /* keep high water mark for number of simultaneous ops in filesystem */ 45 if (GlobalTotalActiveXid > GlobalMaxActiveXid) 46 GlobalMaxActiveXid = GlobalTotalActiveXid; 47 if (GlobalTotalActiveXid > 65000) 48 cifs_dbg(FYI, "warning: more than 65000 requests active\n"); 49 xid = GlobalCurrentXid++; 50 spin_unlock(&GlobalMid_Lock); 51 return xid; 52} 53 54void 55_free_xid(unsigned int xid) 56{ 57 spin_lock(&GlobalMid_Lock); 58 /* if (GlobalTotalActiveXid == 0) 59 BUG(); */ 60 GlobalTotalActiveXid--; 61 spin_unlock(&GlobalMid_Lock); 62} 63 64struct cifs_ses * 65sesInfoAlloc(void) 66{ 67 struct cifs_ses *ret_buf; 68 69 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); 70 if (ret_buf) { 71 atomic_inc(&sesInfoAllocCount); 72 ret_buf->ses_status = SES_NEW; 73 ++ret_buf->ses_count; 74 INIT_LIST_HEAD(&ret_buf->smb_ses_list); 75 INIT_LIST_HEAD(&ret_buf->tcon_list); 76 mutex_init(&ret_buf->session_mutex); 77 spin_lock_init(&ret_buf->iface_lock); 78 INIT_LIST_HEAD(&ret_buf->iface_list); 79 spin_lock_init(&ret_buf->chan_lock); 80 } 81 return ret_buf; 82} 83 84void 85sesInfoFree(struct cifs_ses *buf_to_free) 86{ 87 struct cifs_server_iface *iface = NULL, *niface = NULL; 88 89 if (buf_to_free == NULL) { 90 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); 91 return; 92 } 93 94 atomic_dec(&sesInfoAllocCount); 95 kfree(buf_to_free->serverOS); 96 kfree(buf_to_free->serverDomain); 97 kfree(buf_to_free->serverNOS); 98 kfree_sensitive(buf_to_free->password); 99 kfree(buf_to_free->user_name); 100 kfree(buf_to_free->domainName); 101 kfree_sensitive(buf_to_free->auth_key.response); 102 spin_lock(&buf_to_free->iface_lock); 103 list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list, 104 iface_head) 105 kref_put(&iface->refcount, release_iface); 106 spin_unlock(&buf_to_free->iface_lock); 107 kfree_sensitive(buf_to_free); 108} 109 110struct cifs_tcon * 111tconInfoAlloc(void) 112{ 113 struct cifs_tcon *ret_buf; 114 115 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); 116 if (!ret_buf) 117 return NULL; 118 ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL); 119 if (!ret_buf->crfid.fid) { 120 kfree(ret_buf); 121 return NULL; 122 } 123 INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries); 124 mutex_init(&ret_buf->crfid.dirents.de_mutex); 125 126 atomic_inc(&tconInfoAllocCount); 127 ret_buf->status = TID_NEW; 128 ++ret_buf->tc_count; 129 INIT_LIST_HEAD(&ret_buf->openFileList); 130 INIT_LIST_HEAD(&ret_buf->tcon_list); 131 spin_lock_init(&ret_buf->open_file_lock); 132 mutex_init(&ret_buf->crfid.fid_mutex); 133 spin_lock_init(&ret_buf->stat_lock); 134 atomic_set(&ret_buf->num_local_opens, 0); 135 atomic_set(&ret_buf->num_remote_opens, 0); 136 137 return ret_buf; 138} 139 140void 141tconInfoFree(struct cifs_tcon *buf_to_free) 142{ 143 if (buf_to_free == NULL) { 144 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); 145 return; 146 } 147 atomic_dec(&tconInfoAllocCount); 148 kfree(buf_to_free->nativeFileSystem); 149 kfree_sensitive(buf_to_free->password); 150 kfree(buf_to_free->crfid.fid); 151 kfree(buf_to_free); 152} 153 154struct smb_hdr * 155cifs_buf_get(void) 156{ 157 struct smb_hdr *ret_buf = NULL; 158 /* 159 * SMB2 header is bigger than CIFS one - no problems to clean some 160 * more bytes for CIFS. 161 */ 162 size_t buf_size = sizeof(struct smb2_hdr); 163 164 /* 165 * We could use negotiated size instead of max_msgsize - 166 * but it may be more efficient to always alloc same size 167 * albeit slightly larger than necessary and maxbuffersize 168 * defaults to this and can not be bigger. 169 */ 170 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); 171 172 /* clear the first few header bytes */ 173 /* for most paths, more is cleared in header_assemble */ 174 memset(ret_buf, 0, buf_size + 3); 175 atomic_inc(&bufAllocCount); 176#ifdef CONFIG_CIFS_STATS2 177 atomic_inc(&totBufAllocCount); 178#endif /* CONFIG_CIFS_STATS2 */ 179 180 return ret_buf; 181} 182 183void 184cifs_buf_release(void *buf_to_free) 185{ 186 if (buf_to_free == NULL) { 187 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/ 188 return; 189 } 190 mempool_free(buf_to_free, cifs_req_poolp); 191 192 atomic_dec(&bufAllocCount); 193 return; 194} 195 196struct smb_hdr * 197cifs_small_buf_get(void) 198{ 199 struct smb_hdr *ret_buf = NULL; 200 201/* We could use negotiated size instead of max_msgsize - 202 but it may be more efficient to always alloc same size 203 albeit slightly larger than necessary and maxbuffersize 204 defaults to this and can not be bigger */ 205 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); 206 /* No need to clear memory here, cleared in header assemble */ 207 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ 208 atomic_inc(&smBufAllocCount); 209#ifdef CONFIG_CIFS_STATS2 210 atomic_inc(&totSmBufAllocCount); 211#endif /* CONFIG_CIFS_STATS2 */ 212 213 return ret_buf; 214} 215 216void 217cifs_small_buf_release(void *buf_to_free) 218{ 219 220 if (buf_to_free == NULL) { 221 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n"); 222 return; 223 } 224 mempool_free(buf_to_free, cifs_sm_req_poolp); 225 226 atomic_dec(&smBufAllocCount); 227 return; 228} 229 230void 231free_rsp_buf(int resp_buftype, void *rsp) 232{ 233 if (resp_buftype == CIFS_SMALL_BUFFER) 234 cifs_small_buf_release(rsp); 235 else if (resp_buftype == CIFS_LARGE_BUFFER) 236 cifs_buf_release(rsp); 237} 238 239/* NB: MID can not be set if treeCon not passed in, in that 240 case it is responsbility of caller to set the mid */ 241void 242header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , 243 const struct cifs_tcon *treeCon, int word_count 244 /* length of fixed section (word count) in two byte units */) 245{ 246 char *temp = (char *) buffer; 247 248 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ 249 250 buffer->smb_buf_length = cpu_to_be32( 251 (2 * word_count) + sizeof(struct smb_hdr) - 252 4 /* RFC 1001 length field does not count */ + 253 2 /* for bcc field itself */) ; 254 255 buffer->Protocol[0] = 0xFF; 256 buffer->Protocol[1] = 'S'; 257 buffer->Protocol[2] = 'M'; 258 buffer->Protocol[3] = 'B'; 259 buffer->Command = smb_command; 260 buffer->Flags = 0x00; /* case sensitive */ 261 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES; 262 buffer->Pid = cpu_to_le16((__u16)current->tgid); 263 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16)); 264 if (treeCon) { 265 buffer->Tid = treeCon->tid; 266 if (treeCon->ses) { 267 if (treeCon->ses->capabilities & CAP_UNICODE) 268 buffer->Flags2 |= SMBFLG2_UNICODE; 269 if (treeCon->ses->capabilities & CAP_STATUS32) 270 buffer->Flags2 |= SMBFLG2_ERR_STATUS; 271 272 /* Uid is not converted */ 273 buffer->Uid = treeCon->ses->Suid; 274 if (treeCon->ses->server) 275 buffer->Mid = get_next_mid(treeCon->ses->server); 276 } 277 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) 278 buffer->Flags2 |= SMBFLG2_DFS; 279 if (treeCon->nocase) 280 buffer->Flags |= SMBFLG_CASELESS; 281 if ((treeCon->ses) && (treeCon->ses->server)) 282 if (treeCon->ses->server->sign) 283 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 284 } 285 286/* endian conversion of flags is now done just before sending */ 287 buffer->WordCount = (char) word_count; 288 return; 289} 290 291static int 292check_smb_hdr(struct smb_hdr *smb) 293{ 294 /* does it have the right SMB "signature" ? */ 295 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { 296 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n", 297 *(unsigned int *)smb->Protocol); 298 return 1; 299 } 300 301 /* if it's a response then accept */ 302 if (smb->Flags & SMBFLG_RESPONSE) 303 return 0; 304 305 /* only one valid case where server sends us request */ 306 if (smb->Command == SMB_COM_LOCKING_ANDX) 307 return 0; 308 309 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", 310 get_mid(smb)); 311 return 1; 312} 313 314int 315checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server) 316{ 317 struct smb_hdr *smb = (struct smb_hdr *)buf; 318 __u32 rfclen = be32_to_cpu(smb->smb_buf_length); 319 __u32 clc_len; /* calculated length */ 320 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n", 321 total_read, rfclen); 322 323 /* is this frame too small to even get to a BCC? */ 324 if (total_read < 2 + sizeof(struct smb_hdr)) { 325 if ((total_read >= sizeof(struct smb_hdr) - 1) 326 && (smb->Status.CifsError != 0)) { 327 /* it's an error return */ 328 smb->WordCount = 0; 329 /* some error cases do not return wct and bcc */ 330 return 0; 331 } else if ((total_read == sizeof(struct smb_hdr) + 1) && 332 (smb->WordCount == 0)) { 333 char *tmp = (char *)smb; 334 /* Need to work around a bug in two servers here */ 335 /* First, check if the part of bcc they sent was zero */ 336 if (tmp[sizeof(struct smb_hdr)] == 0) { 337 /* some servers return only half of bcc 338 * on simple responses (wct, bcc both zero) 339 * in particular have seen this on 340 * ulogoffX and FindClose. This leaves 341 * one byte of bcc potentially unitialized 342 */ 343 /* zero rest of bcc */ 344 tmp[sizeof(struct smb_hdr)+1] = 0; 345 return 0; 346 } 347 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n"); 348 } else { 349 cifs_dbg(VFS, "Length less than smb header size\n"); 350 } 351 return -EIO; 352 } 353 354 /* otherwise, there is enough to get to the BCC */ 355 if (check_smb_hdr(smb)) 356 return -EIO; 357 clc_len = smbCalcSize(smb, server); 358 359 if (4 + rfclen != total_read) { 360 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", 361 rfclen); 362 return -EIO; 363 } 364 365 if (4 + rfclen != clc_len) { 366 __u16 mid = get_mid(smb); 367 /* check if bcc wrapped around for large read responses */ 368 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) { 369 /* check if lengths match mod 64K */ 370 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF)) 371 return 0; /* bcc wrapped */ 372 } 373 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n", 374 clc_len, 4 + rfclen, mid); 375 376 if (4 + rfclen < clc_len) { 377 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n", 378 rfclen, mid); 379 return -EIO; 380 } else if (rfclen > clc_len + 512) { 381 /* 382 * Some servers (Windows XP in particular) send more 383 * data than the lengths in the SMB packet would 384 * indicate on certain calls (byte range locks and 385 * trans2 find first calls in particular). While the 386 * client can handle such a frame by ignoring the 387 * trailing data, we choose limit the amount of extra 388 * data to 512 bytes. 389 */ 390 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n", 391 rfclen, mid); 392 return -EIO; 393 } 394 } 395 return 0; 396} 397 398bool 399is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) 400{ 401 struct smb_hdr *buf = (struct smb_hdr *)buffer; 402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 403 struct list_head *tmp, *tmp1, *tmp2; 404 struct cifs_ses *ses; 405 struct cifs_tcon *tcon; 406 struct cifsInodeInfo *pCifsInode; 407 struct cifsFileInfo *netfile; 408 409 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n"); 410 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && 411 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { 412 struct smb_com_transaction_change_notify_rsp *pSMBr = 413 (struct smb_com_transaction_change_notify_rsp *)buf; 414 struct file_notify_information *pnotify; 415 __u32 data_offset = 0; 416 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length); 417 418 if (get_bcc(buf) > sizeof(struct file_notify_information)) { 419 data_offset = le32_to_cpu(pSMBr->DataOffset); 420 421 if (data_offset > 422 len - sizeof(struct file_notify_information)) { 423 cifs_dbg(FYI, "Invalid data_offset %u\n", 424 data_offset); 425 return true; 426 } 427 pnotify = (struct file_notify_information *) 428 ((char *)&pSMBr->hdr.Protocol + data_offset); 429 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", 430 pnotify->FileName, pnotify->Action); 431 /* cifs_dump_mem("Rcvd notify Data: ",buf, 432 sizeof(struct smb_hdr)+60); */ 433 return true; 434 } 435 if (pSMBr->hdr.Status.CifsError) { 436 cifs_dbg(FYI, "notify err 0x%x\n", 437 pSMBr->hdr.Status.CifsError); 438 return true; 439 } 440 return false; 441 } 442 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) 443 return false; 444 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { 445 /* no sense logging error on invalid handle on oplock 446 break - harmless race between close request and oplock 447 break response is expected from time to time writing out 448 large dirty files cached on the client */ 449 if ((NT_STATUS_INVALID_HANDLE) == 450 le32_to_cpu(pSMB->hdr.Status.CifsError)) { 451 cifs_dbg(FYI, "Invalid handle on oplock break\n"); 452 return true; 453 } else if (ERRbadfid == 454 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { 455 return true; 456 } else { 457 return false; /* on valid oplock brk we get "request" */ 458 } 459 } 460 if (pSMB->hdr.WordCount != 8) 461 return false; 462 463 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n", 464 pSMB->LockType, pSMB->OplockLevel); 465 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) 466 return false; 467 468 /* look up tcon based on tid & uid */ 469 spin_lock(&cifs_tcp_ses_lock); 470 list_for_each(tmp, &srv->smb_ses_list) { 471 ses = list_entry(tmp, struct cifs_ses, smb_ses_list); 472 list_for_each(tmp1, &ses->tcon_list) { 473 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); 474 if (tcon->tid != buf->Tid) 475 continue; 476 477 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); 478 spin_lock(&tcon->open_file_lock); 479 list_for_each(tmp2, &tcon->openFileList) { 480 netfile = list_entry(tmp2, struct cifsFileInfo, 481 tlist); 482 if (pSMB->Fid != netfile->fid.netfid) 483 continue; 484 485 cifs_dbg(FYI, "file id match, oplock break\n"); 486 pCifsInode = CIFS_I(d_inode(netfile->dentry)); 487 488 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, 489 &pCifsInode->flags); 490 491 netfile->oplock_epoch = 0; 492 netfile->oplock_level = pSMB->OplockLevel; 493 netfile->oplock_break_cancelled = false; 494 cifs_queue_oplock_break(netfile); 495 496 spin_unlock(&tcon->open_file_lock); 497 spin_unlock(&cifs_tcp_ses_lock); 498 return true; 499 } 500 spin_unlock(&tcon->open_file_lock); 501 spin_unlock(&cifs_tcp_ses_lock); 502 cifs_dbg(FYI, "No matching file for oplock break\n"); 503 return true; 504 } 505 } 506 spin_unlock(&cifs_tcp_ses_lock); 507 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 508 return true; 509} 510 511void 512dump_smb(void *buf, int smb_buf_length) 513{ 514 if (traceSMB == 0) 515 return; 516 517 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf, 518 smb_buf_length, true); 519} 520 521void 522cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) 523{ 524 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 525 struct cifs_tcon *tcon = NULL; 526 527 if (cifs_sb->master_tlink) 528 tcon = cifs_sb_master_tcon(cifs_sb); 529 530 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; 531 cifs_sb->mnt_cifs_serverino_autodisabled = true; 532 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n", 533 tcon ? tcon->treeName : "new server"); 534 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n"); 535 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n"); 536 537 } 538} 539 540void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) 541{ 542 oplock &= 0xF; 543 544 if (oplock == OPLOCK_EXCLUSIVE) { 545 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; 546 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", 547 &cinode->netfs.inode); 548 } else if (oplock == OPLOCK_READ) { 549 cinode->oplock = CIFS_CACHE_READ_FLG; 550 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", 551 &cinode->netfs.inode); 552 } else 553 cinode->oplock = 0; 554} 555 556/* 557 * We wait for oplock breaks to be processed before we attempt to perform 558 * writes. 559 */ 560int cifs_get_writer(struct cifsInodeInfo *cinode) 561{ 562 int rc; 563 564start: 565 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, 566 TASK_KILLABLE); 567 if (rc) 568 return rc; 569 570 spin_lock(&cinode->writers_lock); 571 if (!cinode->writers) 572 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 573 cinode->writers++; 574 /* Check to see if we have started servicing an oplock break */ 575 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) { 576 cinode->writers--; 577 if (cinode->writers == 0) { 578 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 579 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); 580 } 581 spin_unlock(&cinode->writers_lock); 582 goto start; 583 } 584 spin_unlock(&cinode->writers_lock); 585 return 0; 586} 587 588void cifs_put_writer(struct cifsInodeInfo *cinode) 589{ 590 spin_lock(&cinode->writers_lock); 591 cinode->writers--; 592 if (cinode->writers == 0) { 593 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 594 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); 595 } 596 spin_unlock(&cinode->writers_lock); 597} 598 599/** 600 * cifs_queue_oplock_break - queue the oplock break handler for cfile 601 * @cfile: The file to break the oplock on 602 * 603 * This function is called from the demultiplex thread when it 604 * receives an oplock break for @cfile. 605 * 606 * Assumes the tcon->open_file_lock is held. 607 * Assumes cfile->file_info_lock is NOT held. 608 */ 609void cifs_queue_oplock_break(struct cifsFileInfo *cfile) 610{ 611 /* 612 * Bump the handle refcount now while we hold the 613 * open_file_lock to enforce the validity of it for the oplock 614 * break handler. The matching put is done at the end of the 615 * handler. 616 */ 617 cifsFileInfo_get(cfile); 618 619 queue_work(cifsoplockd_wq, &cfile->oplock_break); 620} 621 622void cifs_done_oplock_break(struct cifsInodeInfo *cinode) 623{ 624 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); 625 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK); 626} 627 628bool 629backup_cred(struct cifs_sb_info *cifs_sb) 630{ 631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) { 632 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid())) 633 return true; 634 } 635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) { 636 if (in_group_p(cifs_sb->ctx->backupgid)) 637 return true; 638 } 639 640 return false; 641} 642 643void 644cifs_del_pending_open(struct cifs_pending_open *open) 645{ 646 spin_lock(&tlink_tcon(open->tlink)->open_file_lock); 647 list_del(&open->olist); 648 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); 649} 650 651void 652cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, 653 struct cifs_pending_open *open) 654{ 655 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); 656 open->oplock = CIFS_OPLOCK_NO_CHANGE; 657 open->tlink = tlink; 658 fid->pending_open = open; 659 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens); 660} 661 662void 663cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, 664 struct cifs_pending_open *open) 665{ 666 spin_lock(&tlink_tcon(tlink)->open_file_lock); 667 cifs_add_pending_open_locked(fid, tlink, open); 668 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); 669} 670 671/* 672 * Critical section which runs after acquiring deferred_lock. 673 * As there is no reference count on cifs_deferred_close, pdclose 674 * should not be used outside deferred_lock. 675 */ 676bool 677cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose) 678{ 679 struct cifs_deferred_close *dclose; 680 681 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) { 682 if ((dclose->netfid == cfile->fid.netfid) && 683 (dclose->persistent_fid == cfile->fid.persistent_fid) && 684 (dclose->volatile_fid == cfile->fid.volatile_fid)) { 685 *pdclose = dclose; 686 return true; 687 } 688 } 689 return false; 690} 691 692/* 693 * Critical section which runs after acquiring deferred_lock. 694 */ 695void 696cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose) 697{ 698 bool is_deferred = false; 699 struct cifs_deferred_close *pdclose; 700 701 is_deferred = cifs_is_deferred_close(cfile, &pdclose); 702 if (is_deferred) { 703 kfree(dclose); 704 return; 705 } 706 707 dclose->tlink = cfile->tlink; 708 dclose->netfid = cfile->fid.netfid; 709 dclose->persistent_fid = cfile->fid.persistent_fid; 710 dclose->volatile_fid = cfile->fid.volatile_fid; 711 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes); 712} 713 714/* 715 * Critical section which runs after acquiring deferred_lock. 716 */ 717void 718cifs_del_deferred_close(struct cifsFileInfo *cfile) 719{ 720 bool is_deferred = false; 721 struct cifs_deferred_close *dclose; 722 723 is_deferred = cifs_is_deferred_close(cfile, &dclose); 724 if (!is_deferred) 725 return; 726 list_del(&dclose->dlist); 727 kfree(dclose); 728} 729 730void 731cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) 732{ 733 struct cifsFileInfo *cfile = NULL; 734 struct file_list *tmp_list, *tmp_next_list; 735 struct list_head file_head; 736 737 if (cifs_inode == NULL) 738 return; 739 740 INIT_LIST_HEAD(&file_head); 741 spin_lock(&cifs_inode->open_file_lock); 742 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { 743 if (delayed_work_pending(&cfile->deferred)) { 744 if (cancel_delayed_work(&cfile->deferred)) { 745 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 746 if (tmp_list == NULL) 747 break; 748 tmp_list->cfile = cfile; 749 list_add_tail(&tmp_list->list, &file_head); 750 } 751 } 752 } 753 spin_unlock(&cifs_inode->open_file_lock); 754 755 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { 756 _cifsFileInfo_put(tmp_list->cfile, true, false); 757 list_del(&tmp_list->list); 758 kfree(tmp_list); 759 } 760} 761 762void 763cifs_close_all_deferred_files(struct cifs_tcon *tcon) 764{ 765 struct cifsFileInfo *cfile; 766 struct list_head *tmp; 767 struct file_list *tmp_list, *tmp_next_list; 768 struct list_head file_head; 769 770 INIT_LIST_HEAD(&file_head); 771 spin_lock(&tcon->open_file_lock); 772 list_for_each(tmp, &tcon->openFileList) { 773 cfile = list_entry(tmp, struct cifsFileInfo, tlist); 774 if (delayed_work_pending(&cfile->deferred)) { 775 if (cancel_delayed_work(&cfile->deferred)) { 776 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 777 if (tmp_list == NULL) 778 break; 779 tmp_list->cfile = cfile; 780 list_add_tail(&tmp_list->list, &file_head); 781 } 782 } 783 } 784 spin_unlock(&tcon->open_file_lock); 785 786 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { 787 _cifsFileInfo_put(tmp_list->cfile, true, false); 788 list_del(&tmp_list->list); 789 kfree(tmp_list); 790 } 791} 792void 793cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) 794{ 795 struct cifsFileInfo *cfile; 796 struct list_head *tmp; 797 struct file_list *tmp_list, *tmp_next_list; 798 struct list_head file_head; 799 void *page; 800 const char *full_path; 801 802 INIT_LIST_HEAD(&file_head); 803 page = alloc_dentry_path(); 804 spin_lock(&tcon->open_file_lock); 805 list_for_each(tmp, &tcon->openFileList) { 806 cfile = list_entry(tmp, struct cifsFileInfo, tlist); 807 full_path = build_path_from_dentry(cfile->dentry, page); 808 if (strstr(full_path, path)) { 809 if (delayed_work_pending(&cfile->deferred)) { 810 if (cancel_delayed_work(&cfile->deferred)) { 811 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); 812 if (tmp_list == NULL) 813 break; 814 tmp_list->cfile = cfile; 815 list_add_tail(&tmp_list->list, &file_head); 816 } 817 } 818 } 819 } 820 spin_unlock(&tcon->open_file_lock); 821 822 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { 823 _cifsFileInfo_put(tmp_list->cfile, true, false); 824 list_del(&tmp_list->list); 825 kfree(tmp_list); 826 } 827 free_dentry_path(page); 828} 829 830/* parses DFS refferal V3 structure 831 * caller is responsible for freeing target_nodes 832 * returns: 833 * - on success - 0 834 * - on failure - errno 835 */ 836int 837parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, 838 unsigned int *num_of_nodes, 839 struct dfs_info3_param **target_nodes, 840 const struct nls_table *nls_codepage, int remap, 841 const char *searchName, bool is_unicode) 842{ 843 int i, rc = 0; 844 char *data_end; 845 struct dfs_referral_level_3 *ref; 846 847 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); 848 849 if (*num_of_nodes < 1) { 850 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", 851 *num_of_nodes); 852 rc = -EINVAL; 853 goto parse_DFS_referrals_exit; 854 } 855 856 ref = (struct dfs_referral_level_3 *) &(rsp->referrals); 857 if (ref->VersionNumber != cpu_to_le16(3)) { 858 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", 859 le16_to_cpu(ref->VersionNumber)); 860 rc = -EINVAL; 861 goto parse_DFS_referrals_exit; 862 } 863 864 /* get the upper boundary of the resp buffer */ 865 data_end = (char *)rsp + rsp_size; 866 867 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", 868 *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); 869 870 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), 871 GFP_KERNEL); 872 if (*target_nodes == NULL) { 873 rc = -ENOMEM; 874 goto parse_DFS_referrals_exit; 875 } 876 877 /* collect necessary data from referrals */ 878 for (i = 0; i < *num_of_nodes; i++) { 879 char *temp; 880 int max_len; 881 struct dfs_info3_param *node = (*target_nodes)+i; 882 883 node->flags = le32_to_cpu(rsp->DFSFlags); 884 if (is_unicode) { 885 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, 886 GFP_KERNEL); 887 if (tmp == NULL) { 888 rc = -ENOMEM; 889 goto parse_DFS_referrals_exit; 890 } 891 cifsConvertToUTF16((__le16 *) tmp, searchName, 892 PATH_MAX, nls_codepage, remap); 893 node->path_consumed = cifs_utf16_bytes(tmp, 894 le16_to_cpu(rsp->PathConsumed), 895 nls_codepage); 896 kfree(tmp); 897 } else 898 node->path_consumed = le16_to_cpu(rsp->PathConsumed); 899 900 node->server_type = le16_to_cpu(ref->ServerType); 901 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); 902 903 /* copy DfsPath */ 904 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); 905 max_len = data_end - temp; 906 node->path_name = cifs_strndup_from_utf16(temp, max_len, 907 is_unicode, nls_codepage); 908 if (!node->path_name) { 909 rc = -ENOMEM; 910 goto parse_DFS_referrals_exit; 911 } 912 913 /* copy link target UNC */ 914 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); 915 max_len = data_end - temp; 916 node->node_name = cifs_strndup_from_utf16(temp, max_len, 917 is_unicode, nls_codepage); 918 if (!node->node_name) { 919 rc = -ENOMEM; 920 goto parse_DFS_referrals_exit; 921 } 922 923 node->ttl = le32_to_cpu(ref->TimeToLive); 924 925 ref++; 926 } 927 928parse_DFS_referrals_exit: 929 if (rc) { 930 free_dfs_info_array(*target_nodes, *num_of_nodes); 931 *target_nodes = NULL; 932 *num_of_nodes = 0; 933 } 934 return rc; 935} 936 937struct cifs_aio_ctx * 938cifs_aio_ctx_alloc(void) 939{ 940 struct cifs_aio_ctx *ctx; 941 942 /* 943 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io 944 * to false so that we know when we have to unreference pages within 945 * cifs_aio_ctx_release() 946 */ 947 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); 948 if (!ctx) 949 return NULL; 950 951 INIT_LIST_HEAD(&ctx->list); 952 mutex_init(&ctx->aio_mutex); 953 init_completion(&ctx->done); 954 kref_init(&ctx->refcount); 955 return ctx; 956} 957 958void 959cifs_aio_ctx_release(struct kref *refcount) 960{ 961 struct cifs_aio_ctx *ctx = container_of(refcount, 962 struct cifs_aio_ctx, refcount); 963 964 cifsFileInfo_put(ctx->cfile); 965 966 /* 967 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly 968 * which means that iov_iter_get_pages() was a success and thus that 969 * we have taken reference on pages. 970 */ 971 if (ctx->bv) { 972 unsigned i; 973 974 for (i = 0; i < ctx->npages; i++) { 975 if (ctx->should_dirty) 976 set_page_dirty(ctx->bv[i].bv_page); 977 put_page(ctx->bv[i].bv_page); 978 } 979 kvfree(ctx->bv); 980 } 981 982 kfree(ctx); 983} 984 985#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024) 986 987int 988setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) 989{ 990 ssize_t rc; 991 unsigned int cur_npages; 992 unsigned int npages = 0; 993 unsigned int i; 994 size_t len; 995 size_t count = iov_iter_count(iter); 996 unsigned int saved_len; 997 size_t start; 998 unsigned int max_pages = iov_iter_npages(iter, INT_MAX); 999 struct page **pages = NULL; 1000 struct bio_vec *bv = NULL; 1001 1002 if (iov_iter_is_kvec(iter)) { 1003 memcpy(&ctx->iter, iter, sizeof(*iter)); 1004 ctx->len = count; 1005 iov_iter_advance(iter, count); 1006 return 0; 1007 } 1008 1009 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT) 1010 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); 1011 1012 if (!bv) { 1013 bv = vmalloc(array_size(max_pages, sizeof(*bv))); 1014 if (!bv) 1015 return -ENOMEM; 1016 } 1017 1018 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT) 1019 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); 1020 1021 if (!pages) { 1022 pages = vmalloc(array_size(max_pages, sizeof(*pages))); 1023 if (!pages) { 1024 kvfree(bv); 1025 return -ENOMEM; 1026 } 1027 } 1028 1029 saved_len = count; 1030 1031 while (count && npages < max_pages) { 1032 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); 1033 if (rc < 0) { 1034 cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc); 1035 break; 1036 } 1037 1038 if (rc > count) { 1039 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc, 1040 count); 1041 break; 1042 } 1043 1044 iov_iter_advance(iter, rc); 1045 count -= rc; 1046 rc += start; 1047 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE); 1048 1049 if (npages + cur_npages > max_pages) { 1050 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n", 1051 npages + cur_npages, max_pages); 1052 break; 1053 } 1054 1055 for (i = 0; i < cur_npages; i++) { 1056 len = rc > PAGE_SIZE ? PAGE_SIZE : rc; 1057 bv[npages + i].bv_page = pages[i]; 1058 bv[npages + i].bv_offset = start; 1059 bv[npages + i].bv_len = len - start; 1060 rc -= len; 1061 start = 0; 1062 } 1063 1064 npages += cur_npages; 1065 } 1066 1067 kvfree(pages); 1068 ctx->bv = bv; 1069 ctx->len = saved_len - count; 1070 ctx->npages = npages; 1071 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); 1072 return 0; 1073} 1074 1075/** 1076 * cifs_alloc_hash - allocate hash and hash context together 1077 * @name: The name of the crypto hash algo 1078 * @shash: Where to put the pointer to the hash algo 1079 * @sdesc: Where to put the pointer to the hash descriptor 1080 * 1081 * The caller has to make sure @sdesc is initialized to either NULL or 1082 * a valid context. Both can be freed via cifs_free_hash(). 1083 */ 1084int 1085cifs_alloc_hash(const char *name, 1086 struct crypto_shash **shash, struct sdesc **sdesc) 1087{ 1088 int rc = 0; 1089 size_t size; 1090 1091 if (*sdesc != NULL) 1092 return 0; 1093 1094 *shash = crypto_alloc_shash(name, 0, 0); 1095 if (IS_ERR(*shash)) { 1096 cifs_dbg(VFS, "Could not allocate crypto %s\n", name); 1097 rc = PTR_ERR(*shash); 1098 *shash = NULL; 1099 *sdesc = NULL; 1100 return rc; 1101 } 1102 1103 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash); 1104 *sdesc = kmalloc(size, GFP_KERNEL); 1105 if (*sdesc == NULL) { 1106 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name); 1107 crypto_free_shash(*shash); 1108 *shash = NULL; 1109 return -ENOMEM; 1110 } 1111 1112 (*sdesc)->shash.tfm = *shash; 1113 return 0; 1114} 1115 1116/** 1117 * cifs_free_hash - free hash and hash context together 1118 * @shash: Where to find the pointer to the hash algo 1119 * @sdesc: Where to find the pointer to the hash descriptor 1120 * 1121 * Freeing a NULL hash or context is safe. 1122 */ 1123void 1124cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc) 1125{ 1126 kfree(*sdesc); 1127 *sdesc = NULL; 1128 if (*shash) 1129 crypto_free_shash(*shash); 1130 *shash = NULL; 1131} 1132 1133/** 1134 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst 1135 * @rqst: The request descriptor 1136 * @page: The index of the page to query 1137 * @len: Where to store the length for this page: 1138 * @offset: Where to store the offset for this page 1139 */ 1140void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, 1141 unsigned int *len, unsigned int *offset) 1142{ 1143 *len = rqst->rq_pagesz; 1144 *offset = (page == 0) ? rqst->rq_offset : 0; 1145 1146 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1) 1147 *len = rqst->rq_tailsz; 1148 else if (page == 0) 1149 *len = rqst->rq_pagesz - rqst->rq_offset; 1150} 1151 1152void extract_unc_hostname(const char *unc, const char **h, size_t *len) 1153{ 1154 const char *end; 1155 1156 /* skip initial slashes */ 1157 while (*unc && (*unc == '\\' || *unc == '/')) 1158 unc++; 1159 1160 end = unc; 1161 1162 while (*end && !(*end == '\\' || *end == '/')) 1163 end++; 1164 1165 *h = unc; 1166 *len = end - unc; 1167} 1168 1169/** 1170 * copy_path_name - copy src path to dst, possibly truncating 1171 * @dst: The destination buffer 1172 * @src: The source name 1173 * 1174 * returns number of bytes written (including trailing nul) 1175 */ 1176int copy_path_name(char *dst, const char *src) 1177{ 1178 int name_len; 1179 1180 /* 1181 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it 1182 * will truncate and strlen(dst) will be PATH_MAX-1 1183 */ 1184 name_len = strscpy(dst, src, PATH_MAX); 1185 if (WARN_ON_ONCE(name_len < 0)) 1186 name_len = PATH_MAX-1; 1187 1188 /* we count the trailing nul */ 1189 name_len++; 1190 return name_len; 1191} 1192 1193struct super_cb_data { 1194 void *data; 1195 struct super_block *sb; 1196}; 1197 1198static void tcp_super_cb(struct super_block *sb, void *arg) 1199{ 1200 struct super_cb_data *sd = arg; 1201 struct TCP_Server_Info *server = sd->data; 1202 struct cifs_sb_info *cifs_sb; 1203 struct cifs_tcon *tcon; 1204 1205 if (sd->sb) 1206 return; 1207 1208 cifs_sb = CIFS_SB(sb); 1209 tcon = cifs_sb_master_tcon(cifs_sb); 1210 if (tcon->ses->server == server) 1211 sd->sb = sb; 1212} 1213 1214static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *), 1215 void *data) 1216{ 1217 struct super_cb_data sd = { 1218 .data = data, 1219 .sb = NULL, 1220 }; 1221 struct file_system_type **fs_type = (struct file_system_type *[]) { 1222 &cifs_fs_type, &smb3_fs_type, NULL, 1223 }; 1224 1225 for (; *fs_type; fs_type++) { 1226 iterate_supers_type(*fs_type, f, &sd); 1227 if (sd.sb) { 1228 /* 1229 * Grab an active reference in order to prevent automounts (DFS links) 1230 * of expiring and then freeing up our cifs superblock pointer while 1231 * we're doing failover. 1232 */ 1233 cifs_sb_active(sd.sb); 1234 return sd.sb; 1235 } 1236 } 1237 return ERR_PTR(-EINVAL); 1238} 1239 1240static void __cifs_put_super(struct super_block *sb) 1241{ 1242 if (!IS_ERR_OR_NULL(sb)) 1243 cifs_sb_deactive(sb); 1244} 1245 1246struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server) 1247{ 1248 return __cifs_get_super(tcp_super_cb, server); 1249} 1250 1251void cifs_put_tcp_super(struct super_block *sb) 1252{ 1253 __cifs_put_super(sb); 1254} 1255 1256#ifdef CONFIG_CIFS_DFS_UPCALL 1257int match_target_ip(struct TCP_Server_Info *server, 1258 const char *share, size_t share_len, 1259 bool *result) 1260{ 1261 int rc; 1262 char *target, *tip = NULL; 1263 struct sockaddr tipaddr; 1264 1265 *result = false; 1266 1267 target = kzalloc(share_len + 3, GFP_KERNEL); 1268 if (!target) { 1269 rc = -ENOMEM; 1270 goto out; 1271 } 1272 1273 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share); 1274 1275 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2); 1276 1277 rc = dns_resolve_server_name_to_ip(target, &tip, NULL); 1278 if (rc < 0) 1279 goto out; 1280 1281 cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip); 1282 1283 if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) { 1284 cifs_dbg(VFS, "%s: failed to convert target ip address\n", 1285 __func__); 1286 rc = -EINVAL; 1287 goto out; 1288 } 1289 1290 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, 1291 &tipaddr); 1292 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result); 1293 rc = 0; 1294 1295out: 1296 kfree(target); 1297 kfree(tip); 1298 1299 return rc; 1300} 1301 1302int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix) 1303{ 1304 kfree(cifs_sb->prepath); 1305 1306 if (prefix && *prefix) { 1307 cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC); 1308 if (!cifs_sb->prepath) 1309 return -ENOMEM; 1310 1311 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); 1312 } else 1313 cifs_sb->prepath = NULL; 1314 1315 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 1316 return 0; 1317} 1318 1319/** cifs_dfs_query_info_nonascii_quirk 1320 * Handle weird Windows SMB server behaviour. It responds with 1321 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request 1322 * for "\<server>\<dfsname>\<linkpath>" DFS reference, 1323 * where <dfsname> contains non-ASCII unicode symbols. 1324 * 1325 * Check such DFS reference. 1326 */ 1327int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid, 1328 struct cifs_tcon *tcon, 1329 struct cifs_sb_info *cifs_sb, 1330 const char *linkpath) 1331{ 1332 char *treename, *dfspath, sep; 1333 int treenamelen, linkpathlen, rc; 1334 1335 treename = tcon->treeName; 1336 /* MS-DFSC: All paths in REQ_GET_DFS_REFERRAL and RESP_GET_DFS_REFERRAL 1337 * messages MUST be encoded with exactly one leading backslash, not two 1338 * leading backslashes. 1339 */ 1340 sep = CIFS_DIR_SEP(cifs_sb); 1341 if (treename[0] == sep && treename[1] == sep) 1342 treename++; 1343 linkpathlen = strlen(linkpath); 1344 treenamelen = strnlen(treename, MAX_TREE_SIZE + 1); 1345 dfspath = kzalloc(treenamelen + linkpathlen + 1, GFP_KERNEL); 1346 if (!dfspath) 1347 return -ENOMEM; 1348 if (treenamelen) 1349 memcpy(dfspath, treename, treenamelen); 1350 memcpy(dfspath + treenamelen, linkpath, linkpathlen); 1351 rc = dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, 1352 cifs_remap(cifs_sb), dfspath, NULL, NULL); 1353 if (rc == 0) { 1354 cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n", 1355 dfspath); 1356 rc = -EREMOTE; 1357 } else { 1358 cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc); 1359 } 1360 kfree(dfspath); 1361 return rc; 1362} 1363#endif