4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
39 #define DECLARE_GLOBALS_HERE
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops
;
54 unsigned int oplockEnabled
= 1;
55 unsigned int experimEnabled
= 0;
56 unsigned int linuxExtEnabled
= 1;
57 unsigned int lookupCacheEnabled
= 1;
58 unsigned int multiuser_mount
= 0;
59 unsigned int extended_security
= CIFSSEC_DEF
;
60 /* unsigned int ntlmv2_support = 0; */
61 unsigned int sign_CIFS_PDUs
= 1;
62 extern struct task_struct
* oplockThread
; /* remove sparse warning */
63 struct task_struct
* oplockThread
= NULL
;
64 extern struct task_struct
* dnotifyThread
; /* remove sparse warning */
65 struct task_struct
* dnotifyThread
= NULL
;
66 unsigned int CIFSMaxBufSize
= CIFS_MAX_MSGSIZE
;
67 module_param(CIFSMaxBufSize
, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize
,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv
= CIFS_MIN_RCV_POOL
;
70 module_param(cifs_min_rcv
, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv
,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small
= 30;
73 module_param(cifs_min_small
, int, 0);
74 MODULE_PARM_DESC(cifs_min_small
,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending
= CIFS_MAX_REQ
;
76 module_param(cifs_max_pending
, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending
,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
79 extern mempool_t
*cifs_sm_req_poolp
;
80 extern mempool_t
*cifs_req_poolp
;
81 extern mempool_t
*cifs_mid_poolp
;
83 extern kmem_cache_t
*cifs_oplock_cachep
;
86 cifs_read_super(struct super_block
*sb
, void *data
,
87 const char *devname
, int silent
)
90 struct cifs_sb_info
*cifs_sb
;
93 sb
->s_flags
|= MS_NODIRATIME
; /* and probably even noatime */
94 sb
->s_fs_info
= kzalloc(sizeof(struct cifs_sb_info
),GFP_KERNEL
);
95 cifs_sb
= CIFS_SB(sb
);
99 rc
= cifs_mount(sb
, cifs_sb
, data
, devname
);
104 ("cifs_mount failed w/return code = %d", rc
));
105 goto out_mount_failed
;
108 sb
->s_magic
= CIFS_MAGIC_NUMBER
;
109 sb
->s_op
= &cifs_super_ops
;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb
->s_qcop
= &cifs_quotactl_ops
;
115 sb
->s_blocksize
= CIFS_MAX_MSGSIZE
;
116 sb
->s_blocksize_bits
= 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode
= iget(sb
, ROOT_I
);
124 sb
->s_root
= d_alloc_root(inode
);
134 cERROR(1, ("cifs_read_super: get root inode failed"));
140 if(cifs_sb
->local_nls
)
141 unload_nls(cifs_sb
->local_nls
);
148 cifs_put_super(struct super_block
*sb
)
151 struct cifs_sb_info
*cifs_sb
;
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb
= CIFS_SB(sb
);
155 if(cifs_sb
== NULL
) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
159 rc
= cifs_umount(sb
, cifs_sb
);
161 cERROR(1, ("cifs_umount failed with return code %d", rc
));
163 unload_nls(cifs_sb
->local_nls
);
169 cifs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
171 struct super_block
*sb
= dentry
->d_sb
;
173 int rc
= -EOPNOTSUPP
;
174 struct cifs_sb_info
*cifs_sb
;
175 struct cifsTconInfo
*pTcon
;
179 cifs_sb
= CIFS_SB(sb
);
180 pTcon
= cifs_sb
->tcon
;
182 buf
->f_type
= CIFS_MAGIC_NUMBER
;
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf
->f_namelen
= PATH_MAX
; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf
->f_files
= 0; /* undefined */
190 buf
->f_ffree
= 0; /* unlimited */
192 #ifdef CONFIG_CIFS_EXPERIMENTAL
193 /* BB we could add a second check for a QFS Unix capability bit */
194 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
195 if ((pTcon
->ses
->capabilities
& CAP_UNIX
) && (CIFS_POSIX_EXTENSIONS
&
196 le64_to_cpu(pTcon
->fsUnixInfo
.Capability
)))
197 rc
= CIFSSMBQFSPosixInfo(xid
, pTcon
, buf
);
199 /* Only need to call the old QFSInfo if failed
202 #endif /* CIFS_EXPERIMENTAL */
203 rc
= CIFSSMBQFSInfo(xid
, pTcon
, buf
);
205 /* Old Windows servers do not support level 103, retry with level
206 one if old server failed the previous call */
208 rc
= SMBOldQFSInfo(xid
, pTcon
, buf
);
213 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
215 return 0; /* always return success? what if volume is no
219 static int cifs_permission(struct inode
* inode
, int mask
, struct nameidata
*nd
)
221 struct cifs_sb_info
*cifs_sb
;
223 cifs_sb
= CIFS_SB(inode
->i_sb
);
225 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
) {
227 } else /* file mode might have been restricted at mount time
228 on the client (above and beyond ACL on servers) for
229 servers which do not support setting and viewing mode bits,
230 so allowing client to check permissions is useful */
231 return generic_permission(inode
, mask
, NULL
);
234 static kmem_cache_t
*cifs_inode_cachep
;
235 static kmem_cache_t
*cifs_req_cachep
;
236 static kmem_cache_t
*cifs_mid_cachep
;
237 kmem_cache_t
*cifs_oplock_cachep
;
238 static kmem_cache_t
*cifs_sm_req_cachep
;
239 mempool_t
*cifs_sm_req_poolp
;
240 mempool_t
*cifs_req_poolp
;
241 mempool_t
*cifs_mid_poolp
;
243 static struct inode
*
244 cifs_alloc_inode(struct super_block
*sb
)
246 struct cifsInodeInfo
*cifs_inode
;
247 cifs_inode
= kmem_cache_alloc(cifs_inode_cachep
, SLAB_KERNEL
);
250 cifs_inode
->cifsAttrs
= 0x20; /* default */
251 atomic_set(&cifs_inode
->inUse
, 0);
252 cifs_inode
->time
= 0;
253 /* Until the file is open and we have gotten oplock
254 info back from the server, can not assume caching of
255 file data or metadata */
256 cifs_inode
->clientCanCacheRead
= FALSE
;
257 cifs_inode
->clientCanCacheAll
= FALSE
;
258 cifs_inode
->vfs_inode
.i_blksize
= CIFS_MAX_MSGSIZE
;
259 cifs_inode
->vfs_inode
.i_blkbits
= 14; /* 2**14 = CIFS_MAX_MSGSIZE */
260 cifs_inode
->vfs_inode
.i_flags
= S_NOATIME
| S_NOCMTIME
;
261 INIT_LIST_HEAD(&cifs_inode
->openFileList
);
262 return &cifs_inode
->vfs_inode
;
266 cifs_destroy_inode(struct inode
*inode
)
268 kmem_cache_free(cifs_inode_cachep
, CIFS_I(inode
));
272 * cifs_show_options() is for displaying mount options in /proc/mounts.
273 * Not all settable options are displayed but most of the important
277 cifs_show_options(struct seq_file
*s
, struct vfsmount
*m
)
279 struct cifs_sb_info
*cifs_sb
;
281 cifs_sb
= CIFS_SB(m
->mnt_sb
);
285 seq_printf(s
, ",unc=%s", cifs_sb
->tcon
->treeName
);
286 if (cifs_sb
->tcon
->ses
) {
287 if (cifs_sb
->tcon
->ses
->userName
)
288 seq_printf(s
, ",username=%s",
289 cifs_sb
->tcon
->ses
->userName
);
290 if(cifs_sb
->tcon
->ses
->domainName
)
291 seq_printf(s
, ",domain=%s",
292 cifs_sb
->tcon
->ses
->domainName
);
295 seq_printf(s
, ",rsize=%d",cifs_sb
->rsize
);
296 seq_printf(s
, ",wsize=%d",cifs_sb
->wsize
);
301 #ifdef CONFIG_CIFS_QUOTA
302 int cifs_xquota_set(struct super_block
* sb
, int quota_type
, qid_t qid
,
303 struct fs_disk_quota
* pdquota
)
307 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
308 struct cifsTconInfo
*pTcon
;
311 pTcon
= cifs_sb
->tcon
;
318 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
327 int cifs_xquota_get(struct super_block
* sb
, int quota_type
, qid_t qid
,
328 struct fs_disk_quota
* pdquota
)
332 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
333 struct cifsTconInfo
*pTcon
;
336 pTcon
= cifs_sb
->tcon
;
342 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
351 int cifs_xstate_set(struct super_block
* sb
, unsigned int flags
, int operation
)
355 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
356 struct cifsTconInfo
*pTcon
;
359 pTcon
= cifs_sb
->tcon
;
365 cFYI(1,("flags: 0x%x operation: 0x%x",flags
,operation
));
374 int cifs_xstate_get(struct super_block
* sb
, struct fs_quota_stat
*qstats
)
378 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
379 struct cifsTconInfo
*pTcon
;
382 pTcon
= cifs_sb
->tcon
;
388 cFYI(1,("pqstats %p",qstats
));
397 static struct quotactl_ops cifs_quotactl_ops
= {
398 .set_xquota
= cifs_xquota_set
,
399 .get_xquota
= cifs_xquota_set
,
400 .set_xstate
= cifs_xstate_set
,
401 .get_xstate
= cifs_xstate_get
,
405 #ifdef CONFIG_CIFS_EXPERIMENTAL
406 static void cifs_umount_begin(struct vfsmount
* vfsmnt
, int flags
)
408 struct cifs_sb_info
*cifs_sb
;
409 struct cifsTconInfo
* tcon
;
411 if (!(flags
& MNT_FORCE
))
413 cifs_sb
= CIFS_SB(vfsmnt
->mnt_sb
);
417 tcon
= cifs_sb
->tcon
;
420 down(&tcon
->tconSem
);
421 if (atomic_read(&tcon
->useCount
) == 1)
422 tcon
->tidStatus
= CifsExiting
;
425 /* cancel_brl_requests(tcon); */
426 /* cancel_notify_requests(tcon); */
427 if(tcon
->ses
&& tcon
->ses
->server
)
429 cFYI(1,("wake up tasks now - umount begin not complete"));
430 wake_up_all(&tcon
->ses
->server
->request_q
);
431 wake_up_all(&tcon
->ses
->server
->response_q
);
432 msleep(1); /* yield */
433 /* we have to kick the requests once more */
434 wake_up_all(&tcon
->ses
->server
->response_q
);
437 /* BB FIXME - finish add checks for tidStatus BB */
443 static int cifs_remount(struct super_block
*sb
, int *flags
, char *data
)
445 *flags
|= MS_NODIRATIME
;
449 struct super_operations cifs_super_ops
= {
450 .read_inode
= cifs_read_inode
,
451 .put_super
= cifs_put_super
,
452 .statfs
= cifs_statfs
,
453 .alloc_inode
= cifs_alloc_inode
,
454 .destroy_inode
= cifs_destroy_inode
,
455 /* .drop_inode = generic_delete_inode,
456 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
457 unless later we add lazy close of inodes or unless the kernel forgets to call
458 us with the same number of releases (closes) as opens */
459 .show_options
= cifs_show_options
,
460 #ifdef CONFIG_CIFS_EXPERIMENTAL
461 .umount_begin
= cifs_umount_begin
,
463 .remount_fs
= cifs_remount
,
467 cifs_get_sb(struct file_system_type
*fs_type
,
468 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
471 struct super_block
*sb
= sget(fs_type
, NULL
, set_anon_super
, NULL
);
473 cFYI(1, ("Devname: %s flags: %d ", dev_name
, flags
));
480 rc
= cifs_read_super(sb
, data
, dev_name
, flags
& MS_SILENT
? 1 : 0);
482 up_write(&sb
->s_umount
);
483 deactivate_super(sb
);
486 sb
->s_flags
|= MS_ACTIVE
;
487 return simple_set_mnt(mnt
, sb
);
490 static ssize_t
cifs_file_writev(struct file
*file
, const struct iovec
*iov
,
491 unsigned long nr_segs
, loff_t
*ppos
)
493 struct inode
*inode
= file
->f_dentry
->d_inode
;
496 written
= generic_file_writev(file
, iov
, nr_segs
, ppos
);
497 if (!CIFS_I(inode
)->clientCanCacheAll
)
498 filemap_fdatawrite(inode
->i_mapping
);
502 static ssize_t
cifs_file_aio_write(struct kiocb
*iocb
, const char __user
*buf
,
503 size_t count
, loff_t pos
)
505 struct inode
*inode
= iocb
->ki_filp
->f_dentry
->d_inode
;
508 written
= generic_file_aio_write(iocb
, buf
, count
, pos
);
509 if (!CIFS_I(inode
)->clientCanCacheAll
)
510 filemap_fdatawrite(inode
->i_mapping
);
514 static loff_t
cifs_llseek(struct file
*file
, loff_t offset
, int origin
)
516 /* origin == SEEK_END => we must revalidate the cached file length */
518 int retval
= cifs_revalidate(file
->f_dentry
);
520 return (loff_t
)retval
;
522 return remote_llseek(file
, offset
, origin
);
525 static struct file_system_type cifs_fs_type
= {
526 .owner
= THIS_MODULE
,
528 .get_sb
= cifs_get_sb
,
529 .kill_sb
= kill_anon_super
,
532 struct inode_operations cifs_dir_inode_ops
= {
533 .create
= cifs_create
,
534 .lookup
= cifs_lookup
,
535 .getattr
= cifs_getattr
,
536 .unlink
= cifs_unlink
,
537 .link
= cifs_hardlink
,
540 .rename
= cifs_rename
,
541 .permission
= cifs_permission
,
542 /* revalidate:cifs_revalidate, */
543 .setattr
= cifs_setattr
,
544 .symlink
= cifs_symlink
,
546 #ifdef CONFIG_CIFS_XATTR
547 .setxattr
= cifs_setxattr
,
548 .getxattr
= cifs_getxattr
,
549 .listxattr
= cifs_listxattr
,
550 .removexattr
= cifs_removexattr
,
554 struct inode_operations cifs_file_inode_ops
= {
555 /* revalidate:cifs_revalidate, */
556 .setattr
= cifs_setattr
,
557 .getattr
= cifs_getattr
, /* do we need this anymore? */
558 .rename
= cifs_rename
,
559 .permission
= cifs_permission
,
560 #ifdef CONFIG_CIFS_XATTR
561 .setxattr
= cifs_setxattr
,
562 .getxattr
= cifs_getxattr
,
563 .listxattr
= cifs_listxattr
,
564 .removexattr
= cifs_removexattr
,
568 struct inode_operations cifs_symlink_inode_ops
= {
569 .readlink
= generic_readlink
,
570 .follow_link
= cifs_follow_link
,
571 .put_link
= cifs_put_link
,
572 .permission
= cifs_permission
,
573 /* BB add the following two eventually */
574 /* revalidate: cifs_revalidate,
575 setattr: cifs_notify_change, *//* BB do we need notify change */
576 #ifdef CONFIG_CIFS_XATTR
577 .setxattr
= cifs_setxattr
,
578 .getxattr
= cifs_getxattr
,
579 .listxattr
= cifs_listxattr
,
580 .removexattr
= cifs_removexattr
,
584 const struct file_operations cifs_file_ops
= {
585 .read
= do_sync_read
,
586 .write
= do_sync_write
,
587 .readv
= generic_file_readv
,
588 .writev
= cifs_file_writev
,
589 .aio_read
= generic_file_aio_read
,
590 .aio_write
= cifs_file_aio_write
,
592 .release
= cifs_close
,
596 .mmap
= cifs_file_mmap
,
597 .sendfile
= generic_file_sendfile
,
598 .llseek
= cifs_llseek
,
599 #ifdef CONFIG_CIFS_POSIX
601 #endif /* CONFIG_CIFS_POSIX */
603 #ifdef CONFIG_CIFS_EXPERIMENTAL
604 .dir_notify
= cifs_dir_notify
,
605 #endif /* CONFIG_CIFS_EXPERIMENTAL */
608 const struct file_operations cifs_file_direct_ops
= {
609 /* no mmap, no aio, no readv -
610 BB reevaluate whether they can be done with directio, no cache */
611 .read
= cifs_user_read
,
612 .write
= cifs_user_write
,
614 .release
= cifs_close
,
618 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
619 #ifdef CONFIG_CIFS_POSIX
621 #endif /* CONFIG_CIFS_POSIX */
622 .llseek
= cifs_llseek
,
623 #ifdef CONFIG_CIFS_EXPERIMENTAL
624 .dir_notify
= cifs_dir_notify
,
625 #endif /* CONFIG_CIFS_EXPERIMENTAL */
627 const struct file_operations cifs_file_nobrl_ops
= {
628 .read
= do_sync_read
,
629 .write
= do_sync_write
,
630 .readv
= generic_file_readv
,
631 .writev
= cifs_file_writev
,
632 .aio_read
= generic_file_aio_read
,
633 .aio_write
= cifs_file_aio_write
,
635 .release
= cifs_close
,
638 .mmap
= cifs_file_mmap
,
639 .sendfile
= generic_file_sendfile
,
640 .llseek
= cifs_llseek
,
641 #ifdef CONFIG_CIFS_POSIX
643 #endif /* CONFIG_CIFS_POSIX */
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 .dir_notify
= cifs_dir_notify
,
647 #endif /* CONFIG_CIFS_EXPERIMENTAL */
650 const struct file_operations cifs_file_direct_nobrl_ops
= {
651 /* no mmap, no aio, no readv -
652 BB reevaluate whether they can be done with directio, no cache */
653 .read
= cifs_user_read
,
654 .write
= cifs_user_write
,
656 .release
= cifs_close
,
659 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
660 #ifdef CONFIG_CIFS_POSIX
662 #endif /* CONFIG_CIFS_POSIX */
663 .llseek
= cifs_llseek
,
664 #ifdef CONFIG_CIFS_EXPERIMENTAL
665 .dir_notify
= cifs_dir_notify
,
666 #endif /* CONFIG_CIFS_EXPERIMENTAL */
669 const struct file_operations cifs_dir_ops
= {
670 .readdir
= cifs_readdir
,
671 .release
= cifs_closedir
,
672 .read
= generic_read_dir
,
673 #ifdef CONFIG_CIFS_EXPERIMENTAL
674 .dir_notify
= cifs_dir_notify
,
675 #endif /* CONFIG_CIFS_EXPERIMENTAL */
680 cifs_init_once(void *inode
, kmem_cache_t
* cachep
, unsigned long flags
)
682 struct cifsInodeInfo
*cifsi
= inode
;
684 if ((flags
& (SLAB_CTOR_VERIFY
| SLAB_CTOR_CONSTRUCTOR
)) ==
685 SLAB_CTOR_CONSTRUCTOR
) {
686 inode_init_once(&cifsi
->vfs_inode
);
687 INIT_LIST_HEAD(&cifsi
->lockList
);
692 cifs_init_inodecache(void)
694 cifs_inode_cachep
= kmem_cache_create("cifs_inode_cache",
695 sizeof (struct cifsInodeInfo
),
696 0, (SLAB_RECLAIM_ACCOUNT
|
698 cifs_init_once
, NULL
);
699 if (cifs_inode_cachep
== NULL
)
706 cifs_destroy_inodecache(void)
708 if (kmem_cache_destroy(cifs_inode_cachep
))
709 printk(KERN_WARNING
"cifs_inode_cache: error freeing\n");
713 cifs_init_request_bufs(void)
715 if(CIFSMaxBufSize
< 8192) {
716 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
717 Unicode path name has to fit in any SMB/CIFS path based frames */
718 CIFSMaxBufSize
= 8192;
719 } else if (CIFSMaxBufSize
> 1024*127) {
720 CIFSMaxBufSize
= 1024 * 127;
722 CIFSMaxBufSize
&= 0x1FE00; /* Round size to even 512 byte mult*/
724 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
725 cifs_req_cachep
= kmem_cache_create("cifs_request",
727 MAX_CIFS_HDR_SIZE
, 0,
728 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
729 if (cifs_req_cachep
== NULL
)
734 else if (cifs_min_rcv
> 64) {
736 cERROR(1,("cifs_min_rcv set to maximum (64)"));
739 cifs_req_poolp
= mempool_create_slab_pool(cifs_min_rcv
,
742 if(cifs_req_poolp
== NULL
) {
743 kmem_cache_destroy(cifs_req_cachep
);
746 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
747 almost all handle based requests (but not write response, nor is it
748 sufficient for path based requests). A smaller size would have
749 been more efficient (compacting multiple slab items on one 4k page)
750 for the case in which debug was on, but this larger size allows
751 more SMBs to use small buffer alloc and is still much more
752 efficient to alloc 1 per page off the slab compared to 17K (5page)
753 alloc of large cifs buffers even when page debugging is on */
754 cifs_sm_req_cachep
= kmem_cache_create("cifs_small_rq",
755 MAX_CIFS_SMALL_BUFFER_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
757 if (cifs_sm_req_cachep
== NULL
) {
758 mempool_destroy(cifs_req_poolp
);
759 kmem_cache_destroy(cifs_req_cachep
);
763 if(cifs_min_small
< 2)
765 else if (cifs_min_small
> 256) {
766 cifs_min_small
= 256;
767 cFYI(1,("cifs_min_small set to maximum (256)"));
770 cifs_sm_req_poolp
= mempool_create_slab_pool(cifs_min_small
,
773 if(cifs_sm_req_poolp
== NULL
) {
774 mempool_destroy(cifs_req_poolp
);
775 kmem_cache_destroy(cifs_req_cachep
);
776 kmem_cache_destroy(cifs_sm_req_cachep
);
784 cifs_destroy_request_bufs(void)
786 mempool_destroy(cifs_req_poolp
);
787 if (kmem_cache_destroy(cifs_req_cachep
))
789 "cifs_destroy_request_cache: error not all structures were freed\n");
790 mempool_destroy(cifs_sm_req_poolp
);
791 if (kmem_cache_destroy(cifs_sm_req_cachep
))
793 "cifs_destroy_request_cache: cifs_small_rq free error\n");
799 cifs_mid_cachep
= kmem_cache_create("cifs_mpx_ids",
800 sizeof (struct mid_q_entry
), 0,
801 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
802 if (cifs_mid_cachep
== NULL
)
805 /* 3 is a reasonable minimum number of simultaneous operations */
806 cifs_mid_poolp
= mempool_create_slab_pool(3, cifs_mid_cachep
);
807 if(cifs_mid_poolp
== NULL
) {
808 kmem_cache_destroy(cifs_mid_cachep
);
812 cifs_oplock_cachep
= kmem_cache_create("cifs_oplock_structs",
813 sizeof (struct oplock_q_entry
), 0,
814 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
815 if (cifs_oplock_cachep
== NULL
) {
816 kmem_cache_destroy(cifs_mid_cachep
);
817 mempool_destroy(cifs_mid_poolp
);
825 cifs_destroy_mids(void)
827 mempool_destroy(cifs_mid_poolp
);
828 if (kmem_cache_destroy(cifs_mid_cachep
))
830 "cifs_destroy_mids: error not all structures were freed\n");
832 if (kmem_cache_destroy(cifs_oplock_cachep
))
834 "error not all oplock structures were freed\n");
837 static int cifs_oplock_thread(void * dummyarg
)
839 struct oplock_q_entry
* oplock_item
;
840 struct cifsTconInfo
*pTcon
;
841 struct inode
* inode
;
849 spin_lock(&GlobalMid_Lock
);
850 if(list_empty(&GlobalOplock_Q
)) {
851 spin_unlock(&GlobalMid_Lock
);
852 set_current_state(TASK_INTERRUPTIBLE
);
853 schedule_timeout(39*HZ
);
855 oplock_item
= list_entry(GlobalOplock_Q
.next
,
856 struct oplock_q_entry
, qhead
);
858 cFYI(1,("found oplock item to write out"));
859 pTcon
= oplock_item
->tcon
;
860 inode
= oplock_item
->pinode
;
861 netfid
= oplock_item
->netfid
;
862 spin_unlock(&GlobalMid_Lock
);
863 DeleteOplockQEntry(oplock_item
);
864 /* can not grab inode sem here since it would
865 deadlock when oplock received on delete
866 since vfs_unlink holds the i_mutex across
868 /* mutex_lock(&inode->i_mutex);*/
869 if (S_ISREG(inode
->i_mode
)) {
870 rc
= filemap_fdatawrite(inode
->i_mapping
);
871 if(CIFS_I(inode
)->clientCanCacheRead
== 0) {
872 filemap_fdatawait(inode
->i_mapping
);
873 invalidate_remote_inode(inode
);
877 /* mutex_unlock(&inode->i_mutex);*/
879 CIFS_I(inode
)->write_behind_rc
= rc
;
880 cFYI(1,("Oplock flush inode %p rc %d",inode
,rc
));
882 /* releasing a stale oplock after recent reconnection
883 of smb session using a now incorrect file
884 handle is not a data integrity issue but do
885 not bother sending an oplock release if session
886 to server still is disconnected since oplock
887 already released by the server in that case */
888 if(pTcon
->tidStatus
!= CifsNeedReconnect
) {
889 rc
= CIFSSMBLock(0, pTcon
, netfid
,
890 0 /* len */ , 0 /* offset */, 0,
891 0, LOCKING_ANDX_OPLOCK_RELEASE
,
893 cFYI(1,("Oplock release rc = %d ",rc
));
896 spin_unlock(&GlobalMid_Lock
);
897 set_current_state(TASK_INTERRUPTIBLE
);
898 schedule_timeout(1); /* yield in case q were corrupt */
900 } while (!kthread_should_stop());
905 static int cifs_dnotify_thread(void * dummyarg
)
907 struct list_head
*tmp
;
908 struct cifsSesInfo
*ses
;
913 set_current_state(TASK_INTERRUPTIBLE
);
914 schedule_timeout(15*HZ
);
915 read_lock(&GlobalSMBSeslock
);
916 /* check if any stuck requests that need
917 to be woken up and wakeq so the
918 thread can wake up and error out */
919 list_for_each(tmp
, &GlobalSMBSessionList
) {
920 ses
= list_entry(tmp
, struct cifsSesInfo
,
922 if(ses
&& ses
->server
&&
923 atomic_read(&ses
->server
->inFlight
))
924 wake_up_all(&ses
->server
->response_q
);
926 read_unlock(&GlobalSMBSeslock
);
927 } while (!kthread_should_stop());
936 #ifdef CONFIG_PROC_FS
939 INIT_LIST_HEAD(&GlobalServerList
); /* BB not implemented yet */
940 INIT_LIST_HEAD(&GlobalSMBSessionList
);
941 INIT_LIST_HEAD(&GlobalTreeConnectionList
);
942 INIT_LIST_HEAD(&GlobalOplock_Q
);
943 #ifdef CONFIG_CIFS_EXPERIMENTAL
944 INIT_LIST_HEAD(&GlobalDnotifyReqList
);
945 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q
);
948 * Initialize Global counters
950 atomic_set(&sesInfoAllocCount
, 0);
951 atomic_set(&tconInfoAllocCount
, 0);
952 atomic_set(&tcpSesAllocCount
,0);
953 atomic_set(&tcpSesReconnectCount
, 0);
954 atomic_set(&tconInfoReconnectCount
, 0);
956 atomic_set(&bufAllocCount
, 0);
957 atomic_set(&smBufAllocCount
, 0);
958 #ifdef CONFIG_CIFS_STATS2
959 atomic_set(&totBufAllocCount
, 0);
960 atomic_set(&totSmBufAllocCount
, 0);
961 #endif /* CONFIG_CIFS_STATS2 */
963 atomic_set(&midCount
, 0);
964 GlobalCurrentXid
= 0;
965 GlobalTotalActiveXid
= 0;
966 GlobalMaxActiveXid
= 0;
967 rwlock_init(&GlobalSMBSeslock
);
968 spin_lock_init(&GlobalMid_Lock
);
970 if(cifs_max_pending
< 2) {
971 cifs_max_pending
= 2;
972 cFYI(1,("cifs_max_pending set to min of 2"));
973 } else if(cifs_max_pending
> 256) {
974 cifs_max_pending
= 256;
975 cFYI(1,("cifs_max_pending set to max of 256"));
978 rc
= cifs_init_inodecache();
982 rc
= cifs_init_mids();
984 goto out_destroy_inodecache
;
986 rc
= cifs_init_request_bufs();
988 goto out_destroy_mids
;
990 rc
= register_filesystem(&cifs_fs_type
);
992 goto out_destroy_request_bufs
;
994 oplockThread
= kthread_run(cifs_oplock_thread
, NULL
, "cifsoplockd");
995 if (IS_ERR(oplockThread
)) {
996 rc
= PTR_ERR(oplockThread
);
997 cERROR(1,("error %d create oplock thread", rc
));
998 goto out_unregister_filesystem
;
1001 dnotifyThread
= kthread_run(cifs_dnotify_thread
, NULL
, "cifsdnotifyd");
1002 if (IS_ERR(dnotifyThread
)) {
1003 rc
= PTR_ERR(dnotifyThread
);
1004 cERROR(1,("error %d create dnotify thread", rc
));
1005 goto out_stop_oplock_thread
;
1010 out_stop_oplock_thread
:
1011 kthread_stop(oplockThread
);
1012 out_unregister_filesystem
:
1013 unregister_filesystem(&cifs_fs_type
);
1014 out_destroy_request_bufs
:
1015 cifs_destroy_request_bufs();
1017 cifs_destroy_mids();
1018 out_destroy_inodecache
:
1019 cifs_destroy_inodecache();
1021 #ifdef CONFIG_PROC_FS
1030 cFYI(0, ("In unregister ie exit_cifs"));
1031 #ifdef CONFIG_PROC_FS
1034 unregister_filesystem(&cifs_fs_type
);
1035 cifs_destroy_inodecache();
1036 cifs_destroy_mids();
1037 cifs_destroy_request_bufs();
1038 kthread_stop(oplockThread
);
1039 kthread_stop(dnotifyThread
);
1042 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1043 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1045 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1046 MODULE_VERSION(CIFS_VERSION
);
1047 module_init(init_cifs
)
1048 module_exit(exit_cifs
)