fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / fs / cifs / cifsfs.c
blobba8f7868cb23c84a5e713910f7b7d5a72bfc6f8a
1 /*
2 * fs/cifs/cifsfs.c
4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops;
50 #endif /* QUOTA */
52 #ifdef CONFIG_CIFS_EXPERIMENTAL
53 extern struct export_operations cifs_export_ops;
54 #endif /* EXPERIMENTAL */
56 int cifsFYI = 0;
57 int cifsERROR = 1;
58 int traceSMB = 0;
59 unsigned int oplockEnabled = 1;
60 unsigned int experimEnabled = 0;
61 unsigned int linuxExtEnabled = 1;
62 unsigned int lookupCacheEnabled = 1;
63 unsigned int multiuser_mount = 0;
64 unsigned int extended_security = CIFSSEC_DEF;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs = 1;
67 extern struct task_struct *oplockThread; /* remove sparse warning */
68 struct task_struct *oplockThread = NULL;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static struct task_struct *dnotifyThread = NULL;
71 static const struct super_operations cifs_super_ops;
72 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
73 module_param(CIFSMaxBufSize, int, 0);
74 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
75 "Default: 16384 Range: 8192 to 130048");
76 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
77 module_param(cifs_min_rcv, int, 0);
78 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 "1 to 64");
80 unsigned int cifs_min_small = 30;
81 module_param(cifs_min_small, int, 0);
82 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 "Range: 2 to 256");
84 unsigned int cifs_max_pending = CIFS_MAX_REQ;
85 module_param(cifs_max_pending, int, 0);
86 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
87 "Default: 50 Range: 2 to 256");
89 extern mempool_t *cifs_sm_req_poolp;
90 extern mempool_t *cifs_req_poolp;
91 extern mempool_t *cifs_mid_poolp;
93 extern struct kmem_cache *cifs_oplock_cachep;
95 static int
96 cifs_read_super(struct super_block *sb, void *data,
97 const char *devname, int silent)
99 struct inode *inode;
100 struct cifs_sb_info *cifs_sb;
101 int rc = 0;
103 /* BB should we make this contingent on mount parm? */
104 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
105 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
106 cifs_sb = CIFS_SB(sb);
107 if (cifs_sb == NULL)
108 return -ENOMEM;
110 rc = cifs_mount(sb, cifs_sb, data, devname);
112 if (rc) {
113 if (!silent)
114 cERROR(1,
115 ("cifs_mount failed w/return code = %d", rc));
116 goto out_mount_failed;
119 sb->s_magic = CIFS_MAGIC_NUMBER;
120 sb->s_op = &cifs_super_ops;
121 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
122 sb->s_blocksize =
123 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
124 #ifdef CONFIG_CIFS_QUOTA
125 sb->s_qcop = &cifs_quotactl_ops;
126 #endif
127 sb->s_blocksize = CIFS_MAX_MSGSIZE;
128 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
129 inode = iget(sb, ROOT_I);
131 if (!inode) {
132 rc = -ENOMEM;
133 goto out_no_root;
136 sb->s_root = d_alloc_root(inode);
138 if (!sb->s_root) {
139 rc = -ENOMEM;
140 goto out_no_root;
143 #ifdef CONFIG_CIFS_EXPERIMENTAL
144 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
145 cFYI(1, ("export ops supported"));
146 sb->s_export_op = &cifs_export_ops;
148 #endif /* EXPERIMENTAL */
150 return 0;
152 out_no_root:
153 cERROR(1, ("cifs_read_super: get root inode failed"));
154 if (inode)
155 iput(inode);
157 out_mount_failed:
158 if (cifs_sb) {
159 if (cifs_sb->local_nls)
160 unload_nls(cifs_sb->local_nls);
161 kfree(cifs_sb);
163 return rc;
166 static void
167 cifs_put_super(struct super_block *sb)
169 int rc = 0;
170 struct cifs_sb_info *cifs_sb;
172 cFYI(1, ("In cifs_put_super"));
173 cifs_sb = CIFS_SB(sb);
174 if (cifs_sb == NULL) {
175 cFYI(1, ("Empty cifs superblock info passed to unmount"));
176 return;
178 rc = cifs_umount(sb, cifs_sb);
179 if (rc) {
180 cERROR(1, ("cifs_umount failed with return code %d", rc));
182 unload_nls(cifs_sb->local_nls);
183 kfree(cifs_sb);
184 return;
187 static int
188 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
190 struct super_block *sb = dentry->d_sb;
191 int xid;
192 int rc = -EOPNOTSUPP;
193 struct cifs_sb_info *cifs_sb;
194 struct cifsTconInfo *pTcon;
196 xid = GetXid();
198 cifs_sb = CIFS_SB(sb);
199 pTcon = cifs_sb->tcon;
201 buf->f_type = CIFS_MAGIC_NUMBER;
203 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
204 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
205 presumably be total path, but note
206 that some servers (includinng Samba 3)
207 have a shorter maximum path */
208 buf->f_files = 0; /* undefined */
209 buf->f_ffree = 0; /* unlimited */
211 /* BB we could add a second check for a QFS Unix capability bit */
212 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
213 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
214 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
215 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
217 /* Only need to call the old QFSInfo if failed
218 on newer one */
219 if (rc)
220 if (pTcon->ses->capabilities & CAP_NT_SMBS)
221 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
223 /* Some old Windows servers also do not support level 103, retry with
224 older level one if old server failed the previous call or we
225 bypassed it because we detected that this was an older LANMAN sess */
226 if (rc)
227 rc = SMBOldQFSInfo(xid, pTcon, buf);
228 /* int f_type;
229 __fsid_t f_fsid;
230 int f_namelen; */
231 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
232 FreeXid(xid);
233 return 0; /* always return success? what if volume is no
234 longer available? */
237 static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
239 struct cifs_sb_info *cifs_sb;
241 cifs_sb = CIFS_SB(inode->i_sb);
243 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
244 return 0;
245 } else /* file mode might have been restricted at mount time
246 on the client (above and beyond ACL on servers) for
247 servers which do not support setting and viewing mode bits,
248 so allowing client to check permissions is useful */
249 return generic_permission(inode, mask, NULL);
252 static struct kmem_cache *cifs_inode_cachep;
253 static struct kmem_cache *cifs_req_cachep;
254 static struct kmem_cache *cifs_mid_cachep;
255 struct kmem_cache *cifs_oplock_cachep;
256 static struct kmem_cache *cifs_sm_req_cachep;
257 mempool_t *cifs_sm_req_poolp;
258 mempool_t *cifs_req_poolp;
259 mempool_t *cifs_mid_poolp;
261 static struct inode *
262 cifs_alloc_inode(struct super_block *sb)
264 struct cifsInodeInfo *cifs_inode;
265 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
266 if (!cifs_inode)
267 return NULL;
268 cifs_inode->cifsAttrs = 0x20; /* default */
269 atomic_set(&cifs_inode->inUse, 0);
270 cifs_inode->time = 0;
271 /* Until the file is open and we have gotten oplock
272 info back from the server, can not assume caching of
273 file data or metadata */
274 cifs_inode->clientCanCacheRead = FALSE;
275 cifs_inode->clientCanCacheAll = FALSE;
276 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
278 /* Can not set i_flags here - they get immediately overwritten
279 to zero by the VFS */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 return &cifs_inode->vfs_inode;
285 static void
286 cifs_destroy_inode(struct inode *inode)
288 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
292 * cifs_show_options() is for displaying mount options in /proc/mounts.
293 * Not all settable options are displayed but most of the important
294 * ones are.
296 static int
297 cifs_show_options(struct seq_file *s, struct vfsmount *m)
299 struct cifs_sb_info *cifs_sb;
301 cifs_sb = CIFS_SB(m->mnt_sb);
303 if (cifs_sb) {
304 if (cifs_sb->tcon) {
305 /* BB add prepath to mount options displayed */
306 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
307 if (cifs_sb->tcon->ses) {
308 if (cifs_sb->tcon->ses->userName)
309 seq_printf(s, ",username=%s",
310 cifs_sb->tcon->ses->userName);
311 if (cifs_sb->tcon->ses->domainName)
312 seq_printf(s, ",domain=%s",
313 cifs_sb->tcon->ses->domainName);
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
317 seq_printf(s, ",posixpaths");
318 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
319 !(cifs_sb->tcon->unix_ext))
320 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
321 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
322 !(cifs_sb->tcon->unix_ext))
323 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
324 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
325 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
327 return 0;
330 #ifdef CONFIG_CIFS_QUOTA
331 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
332 struct fs_disk_quota *pdquota)
334 int xid;
335 int rc = 0;
336 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
337 struct cifsTconInfo *pTcon;
339 if (cifs_sb)
340 pTcon = cifs_sb->tcon;
341 else
342 return -EIO;
345 xid = GetXid();
346 if (pTcon) {
347 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
348 } else {
349 return -EIO;
352 FreeXid(xid);
353 return rc;
356 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
357 struct fs_disk_quota *pdquota)
359 int xid;
360 int rc = 0;
361 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
362 struct cifsTconInfo *pTcon;
364 if (cifs_sb)
365 pTcon = cifs_sb->tcon;
366 else
367 return -EIO;
369 xid = GetXid();
370 if (pTcon) {
371 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
372 } else {
373 rc = -EIO;
376 FreeXid(xid);
377 return rc;
380 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
382 int xid;
383 int rc = 0;
384 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
385 struct cifsTconInfo *pTcon;
387 if (cifs_sb)
388 pTcon = cifs_sb->tcon;
389 else
390 return -EIO;
392 xid = GetXid();
393 if (pTcon) {
394 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
395 } else {
396 rc = -EIO;
399 FreeXid(xid);
400 return rc;
403 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
405 int xid;
406 int rc = 0;
407 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
408 struct cifsTconInfo *pTcon;
410 if (cifs_sb) {
411 pTcon = cifs_sb->tcon;
412 } else {
413 return -EIO;
415 xid = GetXid();
416 if (pTcon) {
417 cFYI(1, ("pqstats %p", qstats));
418 } else {
419 rc = -EIO;
422 FreeXid(xid);
423 return rc;
426 static struct quotactl_ops cifs_quotactl_ops = {
427 .set_xquota = cifs_xquota_set,
428 .get_xquota = cifs_xquota_set,
429 .set_xstate = cifs_xstate_set,
430 .get_xstate = cifs_xstate_get,
432 #endif
434 static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
436 struct cifs_sb_info *cifs_sb;
437 struct cifsTconInfo *tcon;
439 if (!(flags & MNT_FORCE))
440 return;
441 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
442 if (cifs_sb == NULL)
443 return;
445 tcon = cifs_sb->tcon;
446 if (tcon == NULL)
447 return;
448 down(&tcon->tconSem);
449 if (atomic_read(&tcon->useCount) == 1)
450 tcon->tidStatus = CifsExiting;
451 up(&tcon->tconSem);
453 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
454 /* cancel_notify_requests(tcon); */
455 if (tcon->ses && tcon->ses->server) {
456 cFYI(1, ("wake up tasks now - umount begin not complete"));
457 wake_up_all(&tcon->ses->server->request_q);
458 wake_up_all(&tcon->ses->server->response_q);
459 msleep(1); /* yield */
460 /* we have to kick the requests once more */
461 wake_up_all(&tcon->ses->server->response_q);
462 msleep(1);
464 /* BB FIXME - finish add checks for tidStatus BB */
466 return;
469 #ifdef CONFIG_CIFS_STATS2
470 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
472 /* BB FIXME */
473 return 0;
475 #endif
477 static int cifs_remount(struct super_block *sb, int *flags, char *data)
479 *flags |= MS_NODIRATIME;
480 return 0;
483 static const struct super_operations cifs_super_ops = {
484 .read_inode = cifs_read_inode,
485 .put_super = cifs_put_super,
486 .statfs = cifs_statfs,
487 .alloc_inode = cifs_alloc_inode,
488 .destroy_inode = cifs_destroy_inode,
489 /* .drop_inode = generic_delete_inode,
490 .delete_inode = cifs_delete_inode, */ /* Do not need above two
491 functions unless later we add lazy close of inodes or unless the
492 kernel forgets to call us with the same number of releases (closes)
493 as opens */
494 .show_options = cifs_show_options,
495 .umount_begin = cifs_umount_begin,
496 .remount_fs = cifs_remount,
497 #ifdef CONFIG_CIFS_STATS2
498 .show_stats = cifs_show_stats,
499 #endif
502 static int
503 cifs_get_sb(struct file_system_type *fs_type,
504 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
506 int rc;
507 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
509 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
511 if (IS_ERR(sb))
512 return PTR_ERR(sb);
514 sb->s_flags = flags;
516 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
517 if (rc) {
518 up_write(&sb->s_umount);
519 deactivate_super(sb);
520 return rc;
522 sb->s_flags |= MS_ACTIVE;
523 return simple_set_mnt(mnt, sb);
526 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
527 unsigned long nr_segs, loff_t pos)
529 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
530 ssize_t written;
532 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
533 if (!CIFS_I(inode)->clientCanCacheAll)
534 filemap_fdatawrite(inode->i_mapping);
535 return written;
538 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
540 /* origin == SEEK_END => we must revalidate the cached file length */
541 if (origin == SEEK_END) {
542 int retval;
544 /* some applications poll for the file length in this strange
545 way so we must seek to end on non-oplocked files by
546 setting the revalidate time to zero */
547 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
549 retval = cifs_revalidate(file->f_path.dentry);
550 if (retval < 0)
551 return (loff_t)retval;
553 return remote_llseek(file, offset, origin);
556 static struct file_system_type cifs_fs_type = {
557 .owner = THIS_MODULE,
558 .name = "cifs",
559 .get_sb = cifs_get_sb,
560 .kill_sb = kill_anon_super,
561 /* .fs_flags */
563 const struct inode_operations cifs_dir_inode_ops = {
564 .create = cifs_create,
565 .lookup = cifs_lookup,
566 .getattr = cifs_getattr,
567 .unlink = cifs_unlink,
568 .link = cifs_hardlink,
569 .mkdir = cifs_mkdir,
570 .rmdir = cifs_rmdir,
571 .rename = cifs_rename,
572 .permission = cifs_permission,
573 /* revalidate:cifs_revalidate, */
574 .setattr = cifs_setattr,
575 .symlink = cifs_symlink,
576 .mknod = cifs_mknod,
577 #ifdef CONFIG_CIFS_XATTR
578 .setxattr = cifs_setxattr,
579 .getxattr = cifs_getxattr,
580 .listxattr = cifs_listxattr,
581 .removexattr = cifs_removexattr,
582 #endif
585 const struct inode_operations cifs_file_inode_ops = {
586 /* revalidate:cifs_revalidate, */
587 .setattr = cifs_setattr,
588 .getattr = cifs_getattr, /* do we need this anymore? */
589 .rename = cifs_rename,
590 .permission = cifs_permission,
591 #ifdef CONFIG_CIFS_XATTR
592 .setxattr = cifs_setxattr,
593 .getxattr = cifs_getxattr,
594 .listxattr = cifs_listxattr,
595 .removexattr = cifs_removexattr,
596 #endif
599 const struct inode_operations cifs_symlink_inode_ops = {
600 .readlink = generic_readlink,
601 .follow_link = cifs_follow_link,
602 .put_link = cifs_put_link,
603 .permission = cifs_permission,
604 /* BB add the following two eventually */
605 /* revalidate: cifs_revalidate,
606 setattr: cifs_notify_change, *//* BB do we need notify change */
607 #ifdef CONFIG_CIFS_XATTR
608 .setxattr = cifs_setxattr,
609 .getxattr = cifs_getxattr,
610 .listxattr = cifs_listxattr,
611 .removexattr = cifs_removexattr,
612 #endif
615 const struct file_operations cifs_file_ops = {
616 .read = do_sync_read,
617 .write = do_sync_write,
618 .aio_read = generic_file_aio_read,
619 .aio_write = cifs_file_aio_write,
620 .open = cifs_open,
621 .release = cifs_close,
622 .lock = cifs_lock,
623 .fsync = cifs_fsync,
624 .flush = cifs_flush,
625 .mmap = cifs_file_mmap,
626 .splice_read = generic_file_splice_read,
627 .llseek = cifs_llseek,
628 #ifdef CONFIG_CIFS_POSIX
629 .ioctl = cifs_ioctl,
630 #endif /* CONFIG_CIFS_POSIX */
632 #ifdef CONFIG_CIFS_EXPERIMENTAL
633 .dir_notify = cifs_dir_notify,
634 #endif /* CONFIG_CIFS_EXPERIMENTAL */
637 const struct file_operations cifs_file_direct_ops = {
638 /* no mmap, no aio, no readv -
639 BB reevaluate whether they can be done with directio, no cache */
640 .read = cifs_user_read,
641 .write = cifs_user_write,
642 .open = cifs_open,
643 .release = cifs_close,
644 .lock = cifs_lock,
645 .fsync = cifs_fsync,
646 .flush = cifs_flush,
647 .splice_read = generic_file_splice_read,
648 #ifdef CONFIG_CIFS_POSIX
649 .ioctl = cifs_ioctl,
650 #endif /* CONFIG_CIFS_POSIX */
651 .llseek = cifs_llseek,
652 #ifdef CONFIG_CIFS_EXPERIMENTAL
653 .dir_notify = cifs_dir_notify,
654 #endif /* CONFIG_CIFS_EXPERIMENTAL */
656 const struct file_operations cifs_file_nobrl_ops = {
657 .read = do_sync_read,
658 .write = do_sync_write,
659 .aio_read = generic_file_aio_read,
660 .aio_write = cifs_file_aio_write,
661 .open = cifs_open,
662 .release = cifs_close,
663 .fsync = cifs_fsync,
664 .flush = cifs_flush,
665 .mmap = cifs_file_mmap,
666 .splice_read = generic_file_splice_read,
667 .llseek = cifs_llseek,
668 #ifdef CONFIG_CIFS_POSIX
669 .ioctl = cifs_ioctl,
670 #endif /* CONFIG_CIFS_POSIX */
672 #ifdef CONFIG_CIFS_EXPERIMENTAL
673 .dir_notify = cifs_dir_notify,
674 #endif /* CONFIG_CIFS_EXPERIMENTAL */
677 const struct file_operations cifs_file_direct_nobrl_ops = {
678 /* no mmap, no aio, no readv -
679 BB reevaluate whether they can be done with directio, no cache */
680 .read = cifs_user_read,
681 .write = cifs_user_write,
682 .open = cifs_open,
683 .release = cifs_close,
684 .fsync = cifs_fsync,
685 .flush = cifs_flush,
686 .splice_read = generic_file_splice_read,
687 #ifdef CONFIG_CIFS_POSIX
688 .ioctl = cifs_ioctl,
689 #endif /* CONFIG_CIFS_POSIX */
690 .llseek = cifs_llseek,
691 #ifdef CONFIG_CIFS_EXPERIMENTAL
692 .dir_notify = cifs_dir_notify,
693 #endif /* CONFIG_CIFS_EXPERIMENTAL */
696 const struct file_operations cifs_dir_ops = {
697 .readdir = cifs_readdir,
698 .release = cifs_closedir,
699 .read = generic_read_dir,
700 #ifdef CONFIG_CIFS_EXPERIMENTAL
701 .dir_notify = cifs_dir_notify,
702 #endif /* CONFIG_CIFS_EXPERIMENTAL */
703 .ioctl = cifs_ioctl,
706 static void
707 cifs_init_once(struct kmem_cache *cachep, void *inode)
709 struct cifsInodeInfo *cifsi = inode;
711 inode_init_once(&cifsi->vfs_inode);
712 INIT_LIST_HEAD(&cifsi->lockList);
715 static int
716 cifs_init_inodecache(void)
718 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
719 sizeof (struct cifsInodeInfo),
720 0, (SLAB_RECLAIM_ACCOUNT|
721 SLAB_MEM_SPREAD),
722 cifs_init_once);
723 if (cifs_inode_cachep == NULL)
724 return -ENOMEM;
726 return 0;
729 static void
730 cifs_destroy_inodecache(void)
732 kmem_cache_destroy(cifs_inode_cachep);
735 static int
736 cifs_init_request_bufs(void)
738 if (CIFSMaxBufSize < 8192) {
739 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
740 Unicode path name has to fit in any SMB/CIFS path based frames */
741 CIFSMaxBufSize = 8192;
742 } else if (CIFSMaxBufSize > 1024*127) {
743 CIFSMaxBufSize = 1024 * 127;
744 } else {
745 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
747 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
748 cifs_req_cachep = kmem_cache_create("cifs_request",
749 CIFSMaxBufSize +
750 MAX_CIFS_HDR_SIZE, 0,
751 SLAB_HWCACHE_ALIGN, NULL);
752 if (cifs_req_cachep == NULL)
753 return -ENOMEM;
755 if (cifs_min_rcv < 1)
756 cifs_min_rcv = 1;
757 else if (cifs_min_rcv > 64) {
758 cifs_min_rcv = 64;
759 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
762 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
763 cifs_req_cachep);
765 if (cifs_req_poolp == NULL) {
766 kmem_cache_destroy(cifs_req_cachep);
767 return -ENOMEM;
769 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
770 almost all handle based requests (but not write response, nor is it
771 sufficient for path based requests). A smaller size would have
772 been more efficient (compacting multiple slab items on one 4k page)
773 for the case in which debug was on, but this larger size allows
774 more SMBs to use small buffer alloc and is still much more
775 efficient to alloc 1 per page off the slab compared to 17K (5page)
776 alloc of large cifs buffers even when page debugging is on */
777 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
778 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
779 NULL);
780 if (cifs_sm_req_cachep == NULL) {
781 mempool_destroy(cifs_req_poolp);
782 kmem_cache_destroy(cifs_req_cachep);
783 return -ENOMEM;
786 if (cifs_min_small < 2)
787 cifs_min_small = 2;
788 else if (cifs_min_small > 256) {
789 cifs_min_small = 256;
790 cFYI(1, ("cifs_min_small set to maximum (256)"));
793 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
794 cifs_sm_req_cachep);
796 if (cifs_sm_req_poolp == NULL) {
797 mempool_destroy(cifs_req_poolp);
798 kmem_cache_destroy(cifs_req_cachep);
799 kmem_cache_destroy(cifs_sm_req_cachep);
800 return -ENOMEM;
803 return 0;
806 static void
807 cifs_destroy_request_bufs(void)
809 mempool_destroy(cifs_req_poolp);
810 kmem_cache_destroy(cifs_req_cachep);
811 mempool_destroy(cifs_sm_req_poolp);
812 kmem_cache_destroy(cifs_sm_req_cachep);
815 static int
816 cifs_init_mids(void)
818 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
819 sizeof (struct mid_q_entry), 0,
820 SLAB_HWCACHE_ALIGN, NULL);
821 if (cifs_mid_cachep == NULL)
822 return -ENOMEM;
824 /* 3 is a reasonable minimum number of simultaneous operations */
825 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
826 if (cifs_mid_poolp == NULL) {
827 kmem_cache_destroy(cifs_mid_cachep);
828 return -ENOMEM;
831 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
832 sizeof (struct oplock_q_entry), 0,
833 SLAB_HWCACHE_ALIGN, NULL);
834 if (cifs_oplock_cachep == NULL) {
835 mempool_destroy(cifs_mid_poolp);
836 kmem_cache_destroy(cifs_mid_cachep);
837 return -ENOMEM;
840 return 0;
843 static void
844 cifs_destroy_mids(void)
846 mempool_destroy(cifs_mid_poolp);
847 kmem_cache_destroy(cifs_mid_cachep);
848 kmem_cache_destroy(cifs_oplock_cachep);
851 static int cifs_oplock_thread(void *dummyarg)
853 struct oplock_q_entry *oplock_item;
854 struct cifsTconInfo *pTcon;
855 struct inode *inode;
856 __u16 netfid;
857 int rc;
859 set_freezable();
860 do {
861 if (try_to_freeze())
862 continue;
864 spin_lock(&GlobalMid_Lock);
865 if (list_empty(&GlobalOplock_Q)) {
866 spin_unlock(&GlobalMid_Lock);
867 set_current_state(TASK_INTERRUPTIBLE);
868 schedule_timeout(39*HZ);
869 } else {
870 oplock_item = list_entry(GlobalOplock_Q.next,
871 struct oplock_q_entry, qhead);
872 if (oplock_item) {
873 cFYI(1, ("found oplock item to write out"));
874 pTcon = oplock_item->tcon;
875 inode = oplock_item->pinode;
876 netfid = oplock_item->netfid;
877 spin_unlock(&GlobalMid_Lock);
878 DeleteOplockQEntry(oplock_item);
879 /* can not grab inode sem here since it would
880 deadlock when oplock received on delete
881 since vfs_unlink holds the i_mutex across
882 the call */
883 /* mutex_lock(&inode->i_mutex);*/
884 if (S_ISREG(inode->i_mode)) {
885 rc = filemap_fdatawrite(inode->i_mapping);
886 if (CIFS_I(inode)->clientCanCacheRead
887 == 0) {
888 filemap_fdatawait(inode->i_mapping);
889 invalidate_remote_inode(inode);
891 } else
892 rc = 0;
893 /* mutex_unlock(&inode->i_mutex);*/
894 if (rc)
895 CIFS_I(inode)->write_behind_rc = rc;
896 cFYI(1, ("Oplock flush inode %p rc %d",
897 inode, rc));
899 /* releasing stale oplock after recent reconnect
900 of smb session using a now incorrect file
901 handle is not a data integrity issue but do
902 not bother sending an oplock release if session
903 to server still is disconnected since oplock
904 already released by the server in that case */
905 if (pTcon->tidStatus != CifsNeedReconnect) {
906 rc = CIFSSMBLock(0, pTcon, netfid,
907 0 /* len */ , 0 /* offset */, 0,
908 0, LOCKING_ANDX_OPLOCK_RELEASE,
909 0 /* wait flag */);
910 cFYI(1,
911 ("Oplock release rc = %d ", rc));
913 } else
914 spin_unlock(&GlobalMid_Lock);
915 set_current_state(TASK_INTERRUPTIBLE);
916 schedule_timeout(1); /* yield in case q were corrupt */
918 } while (!kthread_should_stop());
920 return 0;
923 static int cifs_dnotify_thread(void *dummyarg)
925 struct list_head *tmp;
926 struct cifsSesInfo *ses;
928 do {
929 if (try_to_freeze())
930 continue;
931 set_current_state(TASK_INTERRUPTIBLE);
932 schedule_timeout(15*HZ);
933 read_lock(&GlobalSMBSeslock);
934 /* check if any stuck requests that need
935 to be woken up and wakeq so the
936 thread can wake up and error out */
937 list_for_each(tmp, &GlobalSMBSessionList) {
938 ses = list_entry(tmp, struct cifsSesInfo,
939 cifsSessionList);
940 if (ses && ses->server &&
941 atomic_read(&ses->server->inFlight))
942 wake_up_all(&ses->server->response_q);
944 read_unlock(&GlobalSMBSeslock);
945 } while (!kthread_should_stop());
947 return 0;
950 static int __init
951 init_cifs(void)
953 int rc = 0;
954 #ifdef CONFIG_PROC_FS
955 cifs_proc_init();
956 #endif
957 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
958 INIT_LIST_HEAD(&GlobalSMBSessionList);
959 INIT_LIST_HEAD(&GlobalTreeConnectionList);
960 INIT_LIST_HEAD(&GlobalOplock_Q);
961 #ifdef CONFIG_CIFS_EXPERIMENTAL
962 INIT_LIST_HEAD(&GlobalDnotifyReqList);
963 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
964 #endif
966 * Initialize Global counters
968 atomic_set(&sesInfoAllocCount, 0);
969 atomic_set(&tconInfoAllocCount, 0);
970 atomic_set(&tcpSesAllocCount, 0);
971 atomic_set(&tcpSesReconnectCount, 0);
972 atomic_set(&tconInfoReconnectCount, 0);
974 atomic_set(&bufAllocCount, 0);
975 atomic_set(&smBufAllocCount, 0);
976 #ifdef CONFIG_CIFS_STATS2
977 atomic_set(&totBufAllocCount, 0);
978 atomic_set(&totSmBufAllocCount, 0);
979 #endif /* CONFIG_CIFS_STATS2 */
981 atomic_set(&midCount, 0);
982 GlobalCurrentXid = 0;
983 GlobalTotalActiveXid = 0;
984 GlobalMaxActiveXid = 0;
985 memset(Local_System_Name, 0, 15);
986 rwlock_init(&GlobalSMBSeslock);
987 spin_lock_init(&GlobalMid_Lock);
989 if (cifs_max_pending < 2) {
990 cifs_max_pending = 2;
991 cFYI(1, ("cifs_max_pending set to min of 2"));
992 } else if (cifs_max_pending > 256) {
993 cifs_max_pending = 256;
994 cFYI(1, ("cifs_max_pending set to max of 256"));
997 rc = cifs_init_inodecache();
998 if (rc)
999 goto out_clean_proc;
1001 rc = cifs_init_mids();
1002 if (rc)
1003 goto out_destroy_inodecache;
1005 rc = cifs_init_request_bufs();
1006 if (rc)
1007 goto out_destroy_mids;
1009 rc = register_filesystem(&cifs_fs_type);
1010 if (rc)
1011 goto out_destroy_request_bufs;
1013 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1014 if (IS_ERR(oplockThread)) {
1015 rc = PTR_ERR(oplockThread);
1016 cERROR(1, ("error %d create oplock thread", rc));
1017 goto out_unregister_filesystem;
1020 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1021 if (IS_ERR(dnotifyThread)) {
1022 rc = PTR_ERR(dnotifyThread);
1023 cERROR(1, ("error %d create dnotify thread", rc));
1024 goto out_stop_oplock_thread;
1027 return 0;
1029 out_stop_oplock_thread:
1030 kthread_stop(oplockThread);
1031 out_unregister_filesystem:
1032 unregister_filesystem(&cifs_fs_type);
1033 out_destroy_request_bufs:
1034 cifs_destroy_request_bufs();
1035 out_destroy_mids:
1036 cifs_destroy_mids();
1037 out_destroy_inodecache:
1038 cifs_destroy_inodecache();
1039 out_clean_proc:
1040 #ifdef CONFIG_PROC_FS
1041 cifs_proc_clean();
1042 #endif
1043 return rc;
1046 static void __exit
1047 exit_cifs(void)
1049 cFYI(0, ("exit_cifs"));
1050 #ifdef CONFIG_PROC_FS
1051 cifs_proc_clean();
1052 #endif
1053 unregister_filesystem(&cifs_fs_type);
1054 cifs_destroy_inodecache();
1055 cifs_destroy_mids();
1056 cifs_destroy_request_bufs();
1057 kthread_stop(oplockThread);
1058 kthread_stop(dnotifyThread);
1061 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1062 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1063 MODULE_DESCRIPTION
1064 ("VFS to access servers complying with the SNIA CIFS Specification "
1065 "e.g. Samba and Windows");
1066 MODULE_VERSION(CIFS_VERSION);
1067 module_init(init_cifs)
1068 module_exit(exit_cifs)