perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / fs / cifs / misc.c
blob8a41f4eba7264fd37584b92bbcaa3035b487c649
1 /*
2 * fs/cifs/misc.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
26 #include "cifspdu.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
30 #include "smberr.h"
31 #include "nterr.h"
32 #include "cifs_unicode.h"
33 #include "smb2pdu.h"
35 extern mempool_t *cifs_sm_req_poolp;
36 extern mempool_t *cifs_req_poolp;
38 /* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
44 unsigned int
45 _get_xid(void)
47 unsigned int xid;
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
59 return xid;
62 void
63 _free_xid(unsigned int xid)
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
67 BUG(); */
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
72 struct cifs_ses *
73 sesInfoAlloc(void)
75 struct cifs_ses *ret_buf;
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 if (ret_buf) {
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
81 ++ret_buf->ses_count;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
85 spin_lock_init(&ret_buf->iface_lock);
87 return ret_buf;
90 void
91 sesInfoFree(struct cifs_ses *buf_to_free)
93 if (buf_to_free == NULL) {
94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 return;
98 atomic_dec(&sesInfoAllocCount);
99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS);
102 kzfree(buf_to_free->password);
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kzfree(buf_to_free->auth_key.response);
106 kfree(buf_to_free->iface_list);
107 kzfree(buf_to_free);
110 struct cifs_tcon *
111 tconInfoAlloc(void)
113 struct cifs_tcon *ret_buf;
114 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
115 if (ret_buf) {
116 atomic_inc(&tconInfoAllocCount);
117 ret_buf->tidStatus = CifsNew;
118 ++ret_buf->tc_count;
119 INIT_LIST_HEAD(&ret_buf->openFileList);
120 INIT_LIST_HEAD(&ret_buf->tcon_list);
121 spin_lock_init(&ret_buf->open_file_lock);
122 mutex_init(&ret_buf->crfid.fid_mutex);
123 ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
124 GFP_KERNEL);
125 spin_lock_init(&ret_buf->stat_lock);
126 atomic_set(&ret_buf->num_local_opens, 0);
127 atomic_set(&ret_buf->num_remote_opens, 0);
129 return ret_buf;
132 void
133 tconInfoFree(struct cifs_tcon *buf_to_free)
135 if (buf_to_free == NULL) {
136 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
137 return;
139 atomic_dec(&tconInfoAllocCount);
140 kfree(buf_to_free->nativeFileSystem);
141 kzfree(buf_to_free->password);
142 kfree(buf_to_free->crfid.fid);
143 kfree(buf_to_free);
146 struct smb_hdr *
147 cifs_buf_get(void)
149 struct smb_hdr *ret_buf = NULL;
151 * SMB2 header is bigger than CIFS one - no problems to clean some
152 * more bytes for CIFS.
154 size_t buf_size = sizeof(struct smb2_sync_hdr);
157 * We could use negotiated size instead of max_msgsize -
158 * but it may be more efficient to always alloc same size
159 * albeit slightly larger than necessary and maxbuffersize
160 * defaults to this and can not be bigger.
162 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
164 /* clear the first few header bytes */
165 /* for most paths, more is cleared in header_assemble */
166 memset(ret_buf, 0, buf_size + 3);
167 atomic_inc(&bufAllocCount);
168 #ifdef CONFIG_CIFS_STATS2
169 atomic_inc(&totBufAllocCount);
170 #endif /* CONFIG_CIFS_STATS2 */
172 return ret_buf;
175 void
176 cifs_buf_release(void *buf_to_free)
178 if (buf_to_free == NULL) {
179 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
180 return;
182 mempool_free(buf_to_free, cifs_req_poolp);
184 atomic_dec(&bufAllocCount);
185 return;
188 struct smb_hdr *
189 cifs_small_buf_get(void)
191 struct smb_hdr *ret_buf = NULL;
193 /* We could use negotiated size instead of max_msgsize -
194 but it may be more efficient to always alloc same size
195 albeit slightly larger than necessary and maxbuffersize
196 defaults to this and can not be bigger */
197 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
198 /* No need to clear memory here, cleared in header assemble */
199 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
200 atomic_inc(&smBufAllocCount);
201 #ifdef CONFIG_CIFS_STATS2
202 atomic_inc(&totSmBufAllocCount);
203 #endif /* CONFIG_CIFS_STATS2 */
205 return ret_buf;
208 void
209 cifs_small_buf_release(void *buf_to_free)
212 if (buf_to_free == NULL) {
213 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
214 return;
216 mempool_free(buf_to_free, cifs_sm_req_poolp);
218 atomic_dec(&smBufAllocCount);
219 return;
222 void
223 free_rsp_buf(int resp_buftype, void *rsp)
225 if (resp_buftype == CIFS_SMALL_BUFFER)
226 cifs_small_buf_release(rsp);
227 else if (resp_buftype == CIFS_LARGE_BUFFER)
228 cifs_buf_release(rsp);
231 /* NB: MID can not be set if treeCon not passed in, in that
232 case it is responsbility of caller to set the mid */
233 void
234 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
235 const struct cifs_tcon *treeCon, int word_count
236 /* length of fixed section (word count) in two byte units */)
238 char *temp = (char *) buffer;
240 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
242 buffer->smb_buf_length = cpu_to_be32(
243 (2 * word_count) + sizeof(struct smb_hdr) -
244 4 /* RFC 1001 length field does not count */ +
245 2 /* for bcc field itself */) ;
247 buffer->Protocol[0] = 0xFF;
248 buffer->Protocol[1] = 'S';
249 buffer->Protocol[2] = 'M';
250 buffer->Protocol[3] = 'B';
251 buffer->Command = smb_command;
252 buffer->Flags = 0x00; /* case sensitive */
253 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
254 buffer->Pid = cpu_to_le16((__u16)current->tgid);
255 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
256 if (treeCon) {
257 buffer->Tid = treeCon->tid;
258 if (treeCon->ses) {
259 if (treeCon->ses->capabilities & CAP_UNICODE)
260 buffer->Flags2 |= SMBFLG2_UNICODE;
261 if (treeCon->ses->capabilities & CAP_STATUS32)
262 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
264 /* Uid is not converted */
265 buffer->Uid = treeCon->ses->Suid;
266 buffer->Mid = get_next_mid(treeCon->ses->server);
268 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
269 buffer->Flags2 |= SMBFLG2_DFS;
270 if (treeCon->nocase)
271 buffer->Flags |= SMBFLG_CASELESS;
272 if ((treeCon->ses) && (treeCon->ses->server))
273 if (treeCon->ses->server->sign)
274 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
277 /* endian conversion of flags is now done just before sending */
278 buffer->WordCount = (char) word_count;
279 return;
282 static int
283 check_smb_hdr(struct smb_hdr *smb)
285 /* does it have the right SMB "signature" ? */
286 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
287 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
288 *(unsigned int *)smb->Protocol);
289 return 1;
292 /* if it's a response then accept */
293 if (smb->Flags & SMBFLG_RESPONSE)
294 return 0;
296 /* only one valid case where server sends us request */
297 if (smb->Command == SMB_COM_LOCKING_ANDX)
298 return 0;
300 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
301 get_mid(smb));
302 return 1;
306 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
308 struct smb_hdr *smb = (struct smb_hdr *)buf;
309 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
310 __u32 clc_len; /* calculated length */
311 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
312 total_read, rfclen);
314 /* is this frame too small to even get to a BCC? */
315 if (total_read < 2 + sizeof(struct smb_hdr)) {
316 if ((total_read >= sizeof(struct smb_hdr) - 1)
317 && (smb->Status.CifsError != 0)) {
318 /* it's an error return */
319 smb->WordCount = 0;
320 /* some error cases do not return wct and bcc */
321 return 0;
322 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
323 (smb->WordCount == 0)) {
324 char *tmp = (char *)smb;
325 /* Need to work around a bug in two servers here */
326 /* First, check if the part of bcc they sent was zero */
327 if (tmp[sizeof(struct smb_hdr)] == 0) {
328 /* some servers return only half of bcc
329 * on simple responses (wct, bcc both zero)
330 * in particular have seen this on
331 * ulogoffX and FindClose. This leaves
332 * one byte of bcc potentially unitialized
334 /* zero rest of bcc */
335 tmp[sizeof(struct smb_hdr)+1] = 0;
336 return 0;
338 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
339 } else {
340 cifs_dbg(VFS, "Length less than smb header size\n");
342 return -EIO;
345 /* otherwise, there is enough to get to the BCC */
346 if (check_smb_hdr(smb))
347 return -EIO;
348 clc_len = smbCalcSize(smb, server);
350 if (4 + rfclen != total_read) {
351 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
352 rfclen);
353 return -EIO;
356 if (4 + rfclen != clc_len) {
357 __u16 mid = get_mid(smb);
358 /* check if bcc wrapped around for large read responses */
359 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
360 /* check if lengths match mod 64K */
361 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
362 return 0; /* bcc wrapped */
364 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
365 clc_len, 4 + rfclen, mid);
367 if (4 + rfclen < clc_len) {
368 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
369 rfclen, mid);
370 return -EIO;
371 } else if (rfclen > clc_len + 512) {
373 * Some servers (Windows XP in particular) send more
374 * data than the lengths in the SMB packet would
375 * indicate on certain calls (byte range locks and
376 * trans2 find first calls in particular). While the
377 * client can handle such a frame by ignoring the
378 * trailing data, we choose limit the amount of extra
379 * data to 512 bytes.
381 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
382 rfclen, mid);
383 return -EIO;
386 return 0;
389 bool
390 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
392 struct smb_hdr *buf = (struct smb_hdr *)buffer;
393 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
394 struct list_head *tmp, *tmp1, *tmp2;
395 struct cifs_ses *ses;
396 struct cifs_tcon *tcon;
397 struct cifsInodeInfo *pCifsInode;
398 struct cifsFileInfo *netfile;
400 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
401 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
402 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
403 struct smb_com_transaction_change_notify_rsp *pSMBr =
404 (struct smb_com_transaction_change_notify_rsp *)buf;
405 struct file_notify_information *pnotify;
406 __u32 data_offset = 0;
407 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
409 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
410 data_offset = le32_to_cpu(pSMBr->DataOffset);
412 if (data_offset >
413 len - sizeof(struct file_notify_information)) {
414 cifs_dbg(FYI, "invalid data_offset %u\n",
415 data_offset);
416 return true;
418 pnotify = (struct file_notify_information *)
419 ((char *)&pSMBr->hdr.Protocol + data_offset);
420 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
421 pnotify->FileName, pnotify->Action);
422 /* cifs_dump_mem("Rcvd notify Data: ",buf,
423 sizeof(struct smb_hdr)+60); */
424 return true;
426 if (pSMBr->hdr.Status.CifsError) {
427 cifs_dbg(FYI, "notify err 0x%x\n",
428 pSMBr->hdr.Status.CifsError);
429 return true;
431 return false;
433 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
434 return false;
435 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
436 /* no sense logging error on invalid handle on oplock
437 break - harmless race between close request and oplock
438 break response is expected from time to time writing out
439 large dirty files cached on the client */
440 if ((NT_STATUS_INVALID_HANDLE) ==
441 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
442 cifs_dbg(FYI, "invalid handle on oplock break\n");
443 return true;
444 } else if (ERRbadfid ==
445 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
446 return true;
447 } else {
448 return false; /* on valid oplock brk we get "request" */
451 if (pSMB->hdr.WordCount != 8)
452 return false;
454 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
455 pSMB->LockType, pSMB->OplockLevel);
456 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
457 return false;
459 /* look up tcon based on tid & uid */
460 spin_lock(&cifs_tcp_ses_lock);
461 list_for_each(tmp, &srv->smb_ses_list) {
462 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
463 list_for_each(tmp1, &ses->tcon_list) {
464 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
465 if (tcon->tid != buf->Tid)
466 continue;
468 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
469 spin_lock(&tcon->open_file_lock);
470 list_for_each(tmp2, &tcon->openFileList) {
471 netfile = list_entry(tmp2, struct cifsFileInfo,
472 tlist);
473 if (pSMB->Fid != netfile->fid.netfid)
474 continue;
476 cifs_dbg(FYI, "file id match, oplock break\n");
477 pCifsInode = CIFS_I(d_inode(netfile->dentry));
479 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
480 &pCifsInode->flags);
483 * Set flag if the server downgrades the oplock
484 * to L2 else clear.
486 if (pSMB->OplockLevel)
487 set_bit(
488 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
489 &pCifsInode->flags);
490 else
491 clear_bit(
492 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
493 &pCifsInode->flags);
495 queue_work(cifsoplockd_wq,
496 &netfile->oplock_break);
497 netfile->oplock_break_cancelled = false;
499 spin_unlock(&tcon->open_file_lock);
500 spin_unlock(&cifs_tcp_ses_lock);
501 return true;
503 spin_unlock(&tcon->open_file_lock);
504 spin_unlock(&cifs_tcp_ses_lock);
505 cifs_dbg(FYI, "No matching file for oplock break\n");
506 return true;
509 spin_unlock(&cifs_tcp_ses_lock);
510 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
511 return true;
514 void
515 dump_smb(void *buf, int smb_buf_length)
517 if (traceSMB == 0)
518 return;
520 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
521 smb_buf_length, true);
524 void
525 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
527 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
528 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
529 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
530 cifs_sb_master_tcon(cifs_sb)->treeName);
534 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
536 oplock &= 0xF;
538 if (oplock == OPLOCK_EXCLUSIVE) {
539 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
540 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
541 &cinode->vfs_inode);
542 } else if (oplock == OPLOCK_READ) {
543 cinode->oplock = CIFS_CACHE_READ_FLG;
544 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
545 &cinode->vfs_inode);
546 } else
547 cinode->oplock = 0;
551 * We wait for oplock breaks to be processed before we attempt to perform
552 * writes.
554 int cifs_get_writer(struct cifsInodeInfo *cinode)
556 int rc;
558 start:
559 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
560 TASK_KILLABLE);
561 if (rc)
562 return rc;
564 spin_lock(&cinode->writers_lock);
565 if (!cinode->writers)
566 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
567 cinode->writers++;
568 /* Check to see if we have started servicing an oplock break */
569 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
570 cinode->writers--;
571 if (cinode->writers == 0) {
572 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
573 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
575 spin_unlock(&cinode->writers_lock);
576 goto start;
578 spin_unlock(&cinode->writers_lock);
579 return 0;
582 void cifs_put_writer(struct cifsInodeInfo *cinode)
584 spin_lock(&cinode->writers_lock);
585 cinode->writers--;
586 if (cinode->writers == 0) {
587 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
588 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
590 spin_unlock(&cinode->writers_lock);
593 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
595 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
596 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
599 bool
600 backup_cred(struct cifs_sb_info *cifs_sb)
602 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
603 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
604 return true;
606 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
607 if (in_group_p(cifs_sb->mnt_backupgid))
608 return true;
611 return false;
614 void
615 cifs_del_pending_open(struct cifs_pending_open *open)
617 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
618 list_del(&open->olist);
619 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
622 void
623 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
624 struct cifs_pending_open *open)
626 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
627 open->oplock = CIFS_OPLOCK_NO_CHANGE;
628 open->tlink = tlink;
629 fid->pending_open = open;
630 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
633 void
634 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
635 struct cifs_pending_open *open)
637 spin_lock(&tlink_tcon(tlink)->open_file_lock);
638 cifs_add_pending_open_locked(fid, tlink, open);
639 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
642 /* parses DFS refferal V3 structure
643 * caller is responsible for freeing target_nodes
644 * returns:
645 * - on success - 0
646 * - on failure - errno
649 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
650 unsigned int *num_of_nodes,
651 struct dfs_info3_param **target_nodes,
652 const struct nls_table *nls_codepage, int remap,
653 const char *searchName, bool is_unicode)
655 int i, rc = 0;
656 char *data_end;
657 struct dfs_referral_level_3 *ref;
659 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
661 if (*num_of_nodes < 1) {
662 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
663 *num_of_nodes);
664 rc = -EINVAL;
665 goto parse_DFS_referrals_exit;
668 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
669 if (ref->VersionNumber != cpu_to_le16(3)) {
670 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
671 le16_to_cpu(ref->VersionNumber));
672 rc = -EINVAL;
673 goto parse_DFS_referrals_exit;
676 /* get the upper boundary of the resp buffer */
677 data_end = (char *)rsp + rsp_size;
679 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
680 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
682 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
683 GFP_KERNEL);
684 if (*target_nodes == NULL) {
685 rc = -ENOMEM;
686 goto parse_DFS_referrals_exit;
689 /* collect necessary data from referrals */
690 for (i = 0; i < *num_of_nodes; i++) {
691 char *temp;
692 int max_len;
693 struct dfs_info3_param *node = (*target_nodes)+i;
695 node->flags = le32_to_cpu(rsp->DFSFlags);
696 if (is_unicode) {
697 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
698 GFP_KERNEL);
699 if (tmp == NULL) {
700 rc = -ENOMEM;
701 goto parse_DFS_referrals_exit;
703 cifsConvertToUTF16((__le16 *) tmp, searchName,
704 PATH_MAX, nls_codepage, remap);
705 node->path_consumed = cifs_utf16_bytes(tmp,
706 le16_to_cpu(rsp->PathConsumed),
707 nls_codepage);
708 kfree(tmp);
709 } else
710 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
712 node->server_type = le16_to_cpu(ref->ServerType);
713 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
715 /* copy DfsPath */
716 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
717 max_len = data_end - temp;
718 node->path_name = cifs_strndup_from_utf16(temp, max_len,
719 is_unicode, nls_codepage);
720 if (!node->path_name) {
721 rc = -ENOMEM;
722 goto parse_DFS_referrals_exit;
725 /* copy link target UNC */
726 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
727 max_len = data_end - temp;
728 node->node_name = cifs_strndup_from_utf16(temp, max_len,
729 is_unicode, nls_codepage);
730 if (!node->node_name) {
731 rc = -ENOMEM;
732 goto parse_DFS_referrals_exit;
735 ref++;
738 parse_DFS_referrals_exit:
739 if (rc) {
740 free_dfs_info_array(*target_nodes, *num_of_nodes);
741 *target_nodes = NULL;
742 *num_of_nodes = 0;
744 return rc;
747 struct cifs_aio_ctx *
748 cifs_aio_ctx_alloc(void)
750 struct cifs_aio_ctx *ctx;
752 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
753 if (!ctx)
754 return NULL;
756 INIT_LIST_HEAD(&ctx->list);
757 mutex_init(&ctx->aio_mutex);
758 init_completion(&ctx->done);
759 kref_init(&ctx->refcount);
760 return ctx;
763 void
764 cifs_aio_ctx_release(struct kref *refcount)
766 struct cifs_aio_ctx *ctx = container_of(refcount,
767 struct cifs_aio_ctx, refcount);
769 cifsFileInfo_put(ctx->cfile);
770 kvfree(ctx->bv);
771 kfree(ctx);
774 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
777 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
779 ssize_t rc;
780 unsigned int cur_npages;
781 unsigned int npages = 0;
782 unsigned int i;
783 size_t len;
784 size_t count = iov_iter_count(iter);
785 unsigned int saved_len;
786 size_t start;
787 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
788 struct page **pages = NULL;
789 struct bio_vec *bv = NULL;
791 if (iov_iter_is_kvec(iter)) {
792 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
793 ctx->len = count;
794 iov_iter_advance(iter, count);
795 return 0;
798 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
799 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
800 GFP_KERNEL);
802 if (!bv) {
803 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
804 if (!bv)
805 return -ENOMEM;
808 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
809 pages = kmalloc_array(max_pages, sizeof(struct page *),
810 GFP_KERNEL);
812 if (!pages) {
813 pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
814 if (!pages) {
815 kvfree(bv);
816 return -ENOMEM;
820 saved_len = count;
822 while (count && npages < max_pages) {
823 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
824 if (rc < 0) {
825 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
826 break;
829 if (rc > count) {
830 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
831 count);
832 break;
835 iov_iter_advance(iter, rc);
836 count -= rc;
837 rc += start;
838 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
840 if (npages + cur_npages > max_pages) {
841 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
842 npages + cur_npages, max_pages);
843 break;
846 for (i = 0; i < cur_npages; i++) {
847 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
848 bv[npages + i].bv_page = pages[i];
849 bv[npages + i].bv_offset = start;
850 bv[npages + i].bv_len = len - start;
851 rc -= len;
852 start = 0;
855 npages += cur_npages;
858 kvfree(pages);
859 ctx->bv = bv;
860 ctx->len = saved_len - count;
861 ctx->npages = npages;
862 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
863 return 0;
867 * cifs_alloc_hash - allocate hash and hash context together
869 * The caller has to make sure @sdesc is initialized to either NULL or
870 * a valid context. Both can be freed via cifs_free_hash().
873 cifs_alloc_hash(const char *name,
874 struct crypto_shash **shash, struct sdesc **sdesc)
876 int rc = 0;
877 size_t size;
879 if (*sdesc != NULL)
880 return 0;
882 *shash = crypto_alloc_shash(name, 0, 0);
883 if (IS_ERR(*shash)) {
884 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
885 rc = PTR_ERR(*shash);
886 *shash = NULL;
887 *sdesc = NULL;
888 return rc;
891 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
892 *sdesc = kmalloc(size, GFP_KERNEL);
893 if (*sdesc == NULL) {
894 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
895 crypto_free_shash(*shash);
896 *shash = NULL;
897 return -ENOMEM;
900 (*sdesc)->shash.tfm = *shash;
901 (*sdesc)->shash.flags = 0x0;
902 return 0;
906 * cifs_free_hash - free hash and hash context together
908 * Freeing a NULL hash or context is safe.
910 void
911 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
913 kfree(*sdesc);
914 *sdesc = NULL;
915 if (*shash)
916 crypto_free_shash(*shash);
917 *shash = NULL;
921 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
922 * Input: rqst - a smb_rqst, page - a page index for rqst
923 * Output: *len - the length for this page, *offset - the offset for this page
925 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
926 unsigned int *len, unsigned int *offset)
928 *len = rqst->rq_pagesz;
929 *offset = (page == 0) ? rqst->rq_offset : 0;
931 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
932 *len = rqst->rq_tailsz;
933 else if (page == 0)
934 *len = rqst->rq_pagesz - rqst->rq_offset;