rdma/cxgb4: Remove a set-but-not-used variable
[linux/fpc-iii.git] / fs / cifs / misc.c
blobaf29ade195c002c0323d855edb391a155f1620f7
1 /*
2 * fs/cifs/misc.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
26 #include "cifspdu.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
30 #include "smberr.h"
31 #include "nterr.h"
32 #include "cifs_unicode.h"
33 #include "smb2pdu.h"
35 extern mempool_t *cifs_sm_req_poolp;
36 extern mempool_t *cifs_req_poolp;
38 /* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
44 unsigned int
45 _get_xid(void)
47 unsigned int xid;
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
59 return xid;
62 void
63 _free_xid(unsigned int xid)
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
67 BUG(); */
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
72 struct cifs_ses *
73 sesInfoAlloc(void)
75 struct cifs_ses *ret_buf;
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 if (ret_buf) {
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
81 ++ret_buf->ses_count;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
86 return ret_buf;
89 void
90 sesInfoFree(struct cifs_ses *buf_to_free)
92 if (buf_to_free == NULL) {
93 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
94 return;
97 atomic_dec(&sesInfoAllocCount);
98 kfree(buf_to_free->serverOS);
99 kfree(buf_to_free->serverDomain);
100 kfree(buf_to_free->serverNOS);
101 kzfree(buf_to_free->password);
102 kfree(buf_to_free->user_name);
103 kfree(buf_to_free->domainName);
104 kzfree(buf_to_free->auth_key.response);
105 kzfree(buf_to_free);
108 struct cifs_tcon *
109 tconInfoAlloc(void)
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119 spin_lock_init(&ret_buf->open_file_lock);
120 mutex_init(&ret_buf->prfid_mutex);
121 ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
122 #ifdef CONFIG_CIFS_STATS
123 spin_lock_init(&ret_buf->stat_lock);
124 #endif
126 return ret_buf;
129 void
130 tconInfoFree(struct cifs_tcon *buf_to_free)
132 if (buf_to_free == NULL) {
133 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
134 return;
136 atomic_dec(&tconInfoAllocCount);
137 kfree(buf_to_free->nativeFileSystem);
138 kzfree(buf_to_free->password);
139 kfree(buf_to_free->prfid);
140 kfree(buf_to_free);
143 struct smb_hdr *
144 cifs_buf_get(void)
146 struct smb_hdr *ret_buf = NULL;
148 * SMB2 header is bigger than CIFS one - no problems to clean some
149 * more bytes for CIFS.
151 size_t buf_size = sizeof(struct smb2_sync_hdr);
154 * We could use negotiated size instead of max_msgsize -
155 * but it may be more efficient to always alloc same size
156 * albeit slightly larger than necessary and maxbuffersize
157 * defaults to this and can not be bigger.
159 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
161 /* clear the first few header bytes */
162 /* for most paths, more is cleared in header_assemble */
163 memset(ret_buf, 0, buf_size + 3);
164 atomic_inc(&bufAllocCount);
165 #ifdef CONFIG_CIFS_STATS2
166 atomic_inc(&totBufAllocCount);
167 #endif /* CONFIG_CIFS_STATS2 */
169 return ret_buf;
172 void
173 cifs_buf_release(void *buf_to_free)
175 if (buf_to_free == NULL) {
176 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
177 return;
179 mempool_free(buf_to_free, cifs_req_poolp);
181 atomic_dec(&bufAllocCount);
182 return;
185 struct smb_hdr *
186 cifs_small_buf_get(void)
188 struct smb_hdr *ret_buf = NULL;
190 /* We could use negotiated size instead of max_msgsize -
191 but it may be more efficient to always alloc same size
192 albeit slightly larger than necessary and maxbuffersize
193 defaults to this and can not be bigger */
194 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
195 /* No need to clear memory here, cleared in header assemble */
196 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
197 atomic_inc(&smBufAllocCount);
198 #ifdef CONFIG_CIFS_STATS2
199 atomic_inc(&totSmBufAllocCount);
200 #endif /* CONFIG_CIFS_STATS2 */
202 return ret_buf;
205 void
206 cifs_small_buf_release(void *buf_to_free)
209 if (buf_to_free == NULL) {
210 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
211 return;
213 mempool_free(buf_to_free, cifs_sm_req_poolp);
215 atomic_dec(&smBufAllocCount);
216 return;
219 void
220 free_rsp_buf(int resp_buftype, void *rsp)
222 if (resp_buftype == CIFS_SMALL_BUFFER)
223 cifs_small_buf_release(rsp);
224 else if (resp_buftype == CIFS_LARGE_BUFFER)
225 cifs_buf_release(rsp);
228 /* NB: MID can not be set if treeCon not passed in, in that
229 case it is responsbility of caller to set the mid */
230 void
231 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
232 const struct cifs_tcon *treeCon, int word_count
233 /* length of fixed section (word count) in two byte units */)
235 char *temp = (char *) buffer;
237 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
239 buffer->smb_buf_length = cpu_to_be32(
240 (2 * word_count) + sizeof(struct smb_hdr) -
241 4 /* RFC 1001 length field does not count */ +
242 2 /* for bcc field itself */) ;
244 buffer->Protocol[0] = 0xFF;
245 buffer->Protocol[1] = 'S';
246 buffer->Protocol[2] = 'M';
247 buffer->Protocol[3] = 'B';
248 buffer->Command = smb_command;
249 buffer->Flags = 0x00; /* case sensitive */
250 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
251 buffer->Pid = cpu_to_le16((__u16)current->tgid);
252 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
253 if (treeCon) {
254 buffer->Tid = treeCon->tid;
255 if (treeCon->ses) {
256 if (treeCon->ses->capabilities & CAP_UNICODE)
257 buffer->Flags2 |= SMBFLG2_UNICODE;
258 if (treeCon->ses->capabilities & CAP_STATUS32)
259 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
261 /* Uid is not converted */
262 buffer->Uid = treeCon->ses->Suid;
263 buffer->Mid = get_next_mid(treeCon->ses->server);
265 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
266 buffer->Flags2 |= SMBFLG2_DFS;
267 if (treeCon->nocase)
268 buffer->Flags |= SMBFLG_CASELESS;
269 if ((treeCon->ses) && (treeCon->ses->server))
270 if (treeCon->ses->server->sign)
271 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
274 /* endian conversion of flags is now done just before sending */
275 buffer->WordCount = (char) word_count;
276 return;
279 static int
280 check_smb_hdr(struct smb_hdr *smb)
282 /* does it have the right SMB "signature" ? */
283 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
284 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
285 *(unsigned int *)smb->Protocol);
286 return 1;
289 /* if it's a response then accept */
290 if (smb->Flags & SMBFLG_RESPONSE)
291 return 0;
293 /* only one valid case where server sends us request */
294 if (smb->Command == SMB_COM_LOCKING_ANDX)
295 return 0;
297 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
298 get_mid(smb));
299 return 1;
303 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
305 struct smb_hdr *smb = (struct smb_hdr *)buf;
306 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
307 __u32 clc_len; /* calculated length */
308 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
309 total_read, rfclen);
311 /* is this frame too small to even get to a BCC? */
312 if (total_read < 2 + sizeof(struct smb_hdr)) {
313 if ((total_read >= sizeof(struct smb_hdr) - 1)
314 && (smb->Status.CifsError != 0)) {
315 /* it's an error return */
316 smb->WordCount = 0;
317 /* some error cases do not return wct and bcc */
318 return 0;
319 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
320 (smb->WordCount == 0)) {
321 char *tmp = (char *)smb;
322 /* Need to work around a bug in two servers here */
323 /* First, check if the part of bcc they sent was zero */
324 if (tmp[sizeof(struct smb_hdr)] == 0) {
325 /* some servers return only half of bcc
326 * on simple responses (wct, bcc both zero)
327 * in particular have seen this on
328 * ulogoffX and FindClose. This leaves
329 * one byte of bcc potentially unitialized
331 /* zero rest of bcc */
332 tmp[sizeof(struct smb_hdr)+1] = 0;
333 return 0;
335 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
336 } else {
337 cifs_dbg(VFS, "Length less than smb header size\n");
339 return -EIO;
342 /* otherwise, there is enough to get to the BCC */
343 if (check_smb_hdr(smb))
344 return -EIO;
345 clc_len = smbCalcSize(smb, server);
347 if (4 + rfclen != total_read) {
348 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
349 rfclen);
350 return -EIO;
353 if (4 + rfclen != clc_len) {
354 __u16 mid = get_mid(smb);
355 /* check if bcc wrapped around for large read responses */
356 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
357 /* check if lengths match mod 64K */
358 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
359 return 0; /* bcc wrapped */
361 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
362 clc_len, 4 + rfclen, mid);
364 if (4 + rfclen < clc_len) {
365 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
366 rfclen, mid);
367 return -EIO;
368 } else if (rfclen > clc_len + 512) {
370 * Some servers (Windows XP in particular) send more
371 * data than the lengths in the SMB packet would
372 * indicate on certain calls (byte range locks and
373 * trans2 find first calls in particular). While the
374 * client can handle such a frame by ignoring the
375 * trailing data, we choose limit the amount of extra
376 * data to 512 bytes.
378 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
379 rfclen, mid);
380 return -EIO;
383 return 0;
386 bool
387 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
389 struct smb_hdr *buf = (struct smb_hdr *)buffer;
390 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
391 struct list_head *tmp, *tmp1, *tmp2;
392 struct cifs_ses *ses;
393 struct cifs_tcon *tcon;
394 struct cifsInodeInfo *pCifsInode;
395 struct cifsFileInfo *netfile;
397 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
398 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
399 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
400 struct smb_com_transaction_change_notify_rsp *pSMBr =
401 (struct smb_com_transaction_change_notify_rsp *)buf;
402 struct file_notify_information *pnotify;
403 __u32 data_offset = 0;
404 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
405 data_offset = le32_to_cpu(pSMBr->DataOffset);
407 pnotify = (struct file_notify_information *)
408 ((char *)&pSMBr->hdr.Protocol + data_offset);
409 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
410 pnotify->FileName, pnotify->Action);
411 /* cifs_dump_mem("Rcvd notify Data: ",buf,
412 sizeof(struct smb_hdr)+60); */
413 return true;
415 if (pSMBr->hdr.Status.CifsError) {
416 cifs_dbg(FYI, "notify err 0x%x\n",
417 pSMBr->hdr.Status.CifsError);
418 return true;
420 return false;
422 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
423 return false;
424 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
425 /* no sense logging error on invalid handle on oplock
426 break - harmless race between close request and oplock
427 break response is expected from time to time writing out
428 large dirty files cached on the client */
429 if ((NT_STATUS_INVALID_HANDLE) ==
430 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
431 cifs_dbg(FYI, "invalid handle on oplock break\n");
432 return true;
433 } else if (ERRbadfid ==
434 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
435 return true;
436 } else {
437 return false; /* on valid oplock brk we get "request" */
440 if (pSMB->hdr.WordCount != 8)
441 return false;
443 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
444 pSMB->LockType, pSMB->OplockLevel);
445 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
446 return false;
448 /* look up tcon based on tid & uid */
449 spin_lock(&cifs_tcp_ses_lock);
450 list_for_each(tmp, &srv->smb_ses_list) {
451 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
452 list_for_each(tmp1, &ses->tcon_list) {
453 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
454 if (tcon->tid != buf->Tid)
455 continue;
457 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
458 spin_lock(&tcon->open_file_lock);
459 list_for_each(tmp2, &tcon->openFileList) {
460 netfile = list_entry(tmp2, struct cifsFileInfo,
461 tlist);
462 if (pSMB->Fid != netfile->fid.netfid)
463 continue;
465 cifs_dbg(FYI, "file id match, oplock break\n");
466 pCifsInode = CIFS_I(d_inode(netfile->dentry));
468 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
469 &pCifsInode->flags);
472 * Set flag if the server downgrades the oplock
473 * to L2 else clear.
475 if (pSMB->OplockLevel)
476 set_bit(
477 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
478 &pCifsInode->flags);
479 else
480 clear_bit(
481 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
482 &pCifsInode->flags);
484 queue_work(cifsoplockd_wq,
485 &netfile->oplock_break);
486 netfile->oplock_break_cancelled = false;
488 spin_unlock(&tcon->open_file_lock);
489 spin_unlock(&cifs_tcp_ses_lock);
490 return true;
492 spin_unlock(&tcon->open_file_lock);
493 spin_unlock(&cifs_tcp_ses_lock);
494 cifs_dbg(FYI, "No matching file for oplock break\n");
495 return true;
498 spin_unlock(&cifs_tcp_ses_lock);
499 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
500 return true;
503 void
504 dump_smb(void *buf, int smb_buf_length)
506 if (traceSMB == 0)
507 return;
509 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
510 smb_buf_length, true);
513 void
514 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
516 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
517 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
518 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
519 cifs_sb_master_tcon(cifs_sb)->treeName);
523 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
525 oplock &= 0xF;
527 if (oplock == OPLOCK_EXCLUSIVE) {
528 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
529 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
530 &cinode->vfs_inode);
531 } else if (oplock == OPLOCK_READ) {
532 cinode->oplock = CIFS_CACHE_READ_FLG;
533 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
534 &cinode->vfs_inode);
535 } else
536 cinode->oplock = 0;
540 * We wait for oplock breaks to be processed before we attempt to perform
541 * writes.
543 int cifs_get_writer(struct cifsInodeInfo *cinode)
545 int rc;
547 start:
548 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
549 TASK_KILLABLE);
550 if (rc)
551 return rc;
553 spin_lock(&cinode->writers_lock);
554 if (!cinode->writers)
555 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
556 cinode->writers++;
557 /* Check to see if we have started servicing an oplock break */
558 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
559 cinode->writers--;
560 if (cinode->writers == 0) {
561 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
562 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
564 spin_unlock(&cinode->writers_lock);
565 goto start;
567 spin_unlock(&cinode->writers_lock);
568 return 0;
571 void cifs_put_writer(struct cifsInodeInfo *cinode)
573 spin_lock(&cinode->writers_lock);
574 cinode->writers--;
575 if (cinode->writers == 0) {
576 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
577 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
579 spin_unlock(&cinode->writers_lock);
582 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
584 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
585 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
588 bool
589 backup_cred(struct cifs_sb_info *cifs_sb)
591 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
592 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
593 return true;
595 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
596 if (in_group_p(cifs_sb->mnt_backupgid))
597 return true;
600 return false;
603 void
604 cifs_del_pending_open(struct cifs_pending_open *open)
606 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
607 list_del(&open->olist);
608 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
611 void
612 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
613 struct cifs_pending_open *open)
615 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
616 open->oplock = CIFS_OPLOCK_NO_CHANGE;
617 open->tlink = tlink;
618 fid->pending_open = open;
619 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
622 void
623 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
624 struct cifs_pending_open *open)
626 spin_lock(&tlink_tcon(tlink)->open_file_lock);
627 cifs_add_pending_open_locked(fid, tlink, open);
628 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
631 /* parses DFS refferal V3 structure
632 * caller is responsible for freeing target_nodes
633 * returns:
634 * - on success - 0
635 * - on failure - errno
638 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
639 unsigned int *num_of_nodes,
640 struct dfs_info3_param **target_nodes,
641 const struct nls_table *nls_codepage, int remap,
642 const char *searchName, bool is_unicode)
644 int i, rc = 0;
645 char *data_end;
646 struct dfs_referral_level_3 *ref;
648 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
650 if (*num_of_nodes < 1) {
651 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
652 *num_of_nodes);
653 rc = -EINVAL;
654 goto parse_DFS_referrals_exit;
657 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
658 if (ref->VersionNumber != cpu_to_le16(3)) {
659 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
660 le16_to_cpu(ref->VersionNumber));
661 rc = -EINVAL;
662 goto parse_DFS_referrals_exit;
665 /* get the upper boundary of the resp buffer */
666 data_end = (char *)rsp + rsp_size;
668 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
669 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
671 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
672 GFP_KERNEL);
673 if (*target_nodes == NULL) {
674 rc = -ENOMEM;
675 goto parse_DFS_referrals_exit;
678 /* collect necessary data from referrals */
679 for (i = 0; i < *num_of_nodes; i++) {
680 char *temp;
681 int max_len;
682 struct dfs_info3_param *node = (*target_nodes)+i;
684 node->flags = le32_to_cpu(rsp->DFSFlags);
685 if (is_unicode) {
686 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
687 GFP_KERNEL);
688 if (tmp == NULL) {
689 rc = -ENOMEM;
690 goto parse_DFS_referrals_exit;
692 cifsConvertToUTF16((__le16 *) tmp, searchName,
693 PATH_MAX, nls_codepage, remap);
694 node->path_consumed = cifs_utf16_bytes(tmp,
695 le16_to_cpu(rsp->PathConsumed),
696 nls_codepage);
697 kfree(tmp);
698 } else
699 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
701 node->server_type = le16_to_cpu(ref->ServerType);
702 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
704 /* copy DfsPath */
705 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
706 max_len = data_end - temp;
707 node->path_name = cifs_strndup_from_utf16(temp, max_len,
708 is_unicode, nls_codepage);
709 if (!node->path_name) {
710 rc = -ENOMEM;
711 goto parse_DFS_referrals_exit;
714 /* copy link target UNC */
715 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
716 max_len = data_end - temp;
717 node->node_name = cifs_strndup_from_utf16(temp, max_len,
718 is_unicode, nls_codepage);
719 if (!node->node_name) {
720 rc = -ENOMEM;
721 goto parse_DFS_referrals_exit;
724 ref++;
727 parse_DFS_referrals_exit:
728 if (rc) {
729 free_dfs_info_array(*target_nodes, *num_of_nodes);
730 *target_nodes = NULL;
731 *num_of_nodes = 0;
733 return rc;
736 struct cifs_aio_ctx *
737 cifs_aio_ctx_alloc(void)
739 struct cifs_aio_ctx *ctx;
741 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
742 if (!ctx)
743 return NULL;
745 INIT_LIST_HEAD(&ctx->list);
746 mutex_init(&ctx->aio_mutex);
747 init_completion(&ctx->done);
748 kref_init(&ctx->refcount);
749 return ctx;
752 void
753 cifs_aio_ctx_release(struct kref *refcount)
755 struct cifs_aio_ctx *ctx = container_of(refcount,
756 struct cifs_aio_ctx, refcount);
758 cifsFileInfo_put(ctx->cfile);
759 kvfree(ctx->bv);
760 kfree(ctx);
763 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
766 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
768 ssize_t rc;
769 unsigned int cur_npages;
770 unsigned int npages = 0;
771 unsigned int i;
772 size_t len;
773 size_t count = iov_iter_count(iter);
774 unsigned int saved_len;
775 size_t start;
776 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
777 struct page **pages = NULL;
778 struct bio_vec *bv = NULL;
780 if (iter->type & ITER_KVEC) {
781 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
782 ctx->len = count;
783 iov_iter_advance(iter, count);
784 return 0;
787 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
788 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
789 GFP_KERNEL);
791 if (!bv) {
792 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
793 if (!bv)
794 return -ENOMEM;
797 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
798 pages = kmalloc_array(max_pages, sizeof(struct page *),
799 GFP_KERNEL);
801 if (!pages) {
802 pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
803 if (!pages) {
804 kvfree(bv);
805 return -ENOMEM;
809 saved_len = count;
811 while (count && npages < max_pages) {
812 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
813 if (rc < 0) {
814 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
815 break;
818 if (rc > count) {
819 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
820 count);
821 break;
824 iov_iter_advance(iter, rc);
825 count -= rc;
826 rc += start;
827 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
829 if (npages + cur_npages > max_pages) {
830 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
831 npages + cur_npages, max_pages);
832 break;
835 for (i = 0; i < cur_npages; i++) {
836 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
837 bv[npages + i].bv_page = pages[i];
838 bv[npages + i].bv_offset = start;
839 bv[npages + i].bv_len = len - start;
840 rc -= len;
841 start = 0;
844 npages += cur_npages;
847 kvfree(pages);
848 ctx->bv = bv;
849 ctx->len = saved_len - count;
850 ctx->npages = npages;
851 iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
852 return 0;
856 * cifs_alloc_hash - allocate hash and hash context together
858 * The caller has to make sure @sdesc is initialized to either NULL or
859 * a valid context. Both can be freed via cifs_free_hash().
862 cifs_alloc_hash(const char *name,
863 struct crypto_shash **shash, struct sdesc **sdesc)
865 int rc = 0;
866 size_t size;
868 if (*sdesc != NULL)
869 return 0;
871 *shash = crypto_alloc_shash(name, 0, 0);
872 if (IS_ERR(*shash)) {
873 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
874 rc = PTR_ERR(*shash);
875 *shash = NULL;
876 *sdesc = NULL;
877 return rc;
880 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
881 *sdesc = kmalloc(size, GFP_KERNEL);
882 if (*sdesc == NULL) {
883 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
884 crypto_free_shash(*shash);
885 *shash = NULL;
886 return -ENOMEM;
889 (*sdesc)->shash.tfm = *shash;
890 (*sdesc)->shash.flags = 0x0;
891 return 0;
895 * cifs_free_hash - free hash and hash context together
897 * Freeing a NULL hash or context is safe.
899 void
900 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
902 kfree(*sdesc);
903 *sdesc = NULL;
904 if (*shash)
905 crypto_free_shash(*shash);
906 *shash = NULL;
910 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
911 * Input: rqst - a smb_rqst, page - a page index for rqst
912 * Output: *len - the length for this page, *offset - the offset for this page
914 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
915 unsigned int *len, unsigned int *offset)
917 *len = rqst->rq_pagesz;
918 *offset = (page == 0) ? rqst->rq_offset : 0;
920 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
921 *len = rqst->rq_tailsz;
922 else if (page == 0)
923 *len = rqst->rq_pagesz - rqst->rq_offset;