4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
58 bool enable_oplocks
= true;
59 bool linuxExtEnabled
= true;
60 bool lookupCacheEnabled
= true;
61 unsigned int global_secflags
= CIFSSEC_DEF
;
62 /* unsigned int ntlmv2_support = 0; */
63 unsigned int sign_CIFS_PDUs
= 1;
64 static const struct super_operations cifs_super_ops
;
65 unsigned int CIFSMaxBufSize
= CIFS_MAX_MSGSIZE
;
66 module_param(CIFSMaxBufSize
, uint
, 0444);
67 MODULE_PARM_DESC(CIFSMaxBufSize
, "Network buffer size (not including header). "
68 "Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv
= CIFS_MIN_RCV_POOL
;
70 module_param(cifs_min_rcv
, uint
, 0444);
71 MODULE_PARM_DESC(cifs_min_rcv
, "Network buffers in pool. Default: 4 Range: "
73 unsigned int cifs_min_small
= 30;
74 module_param(cifs_min_small
, uint
, 0444);
75 MODULE_PARM_DESC(cifs_min_small
, "Small network buffers in pool. Default: 30 "
77 unsigned int cifs_max_pending
= CIFS_MAX_REQ
;
78 module_param(cifs_max_pending
, uint
, 0444);
79 MODULE_PARM_DESC(cifs_max_pending
, "Simultaneous requests to server. "
80 "Default: 32767 Range: 2 to 32767.");
81 module_param(enable_oplocks
, bool, 0644);
82 MODULE_PARM_DESC(enable_oplocks
, "Enable or disable oplocks. Default: y/Y/1");
84 extern mempool_t
*cifs_sm_req_poolp
;
85 extern mempool_t
*cifs_req_poolp
;
86 extern mempool_t
*cifs_mid_poolp
;
88 struct workqueue_struct
*cifsiod_wq
;
89 struct workqueue_struct
*cifsoplockd_wq
;
90 __u32 cifs_lock_secret
;
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
99 cifs_sb_active(struct super_block
*sb
)
101 struct cifs_sb_info
*server
= CIFS_SB(sb
);
103 if (atomic_inc_return(&server
->active
) == 1)
104 atomic_inc(&sb
->s_active
);
108 cifs_sb_deactive(struct super_block
*sb
)
110 struct cifs_sb_info
*server
= CIFS_SB(sb
);
112 if (atomic_dec_and_test(&server
->active
))
113 deactivate_super(sb
);
117 cifs_read_super(struct super_block
*sb
)
120 struct cifs_sb_info
*cifs_sb
;
121 struct cifs_tcon
*tcon
;
124 cifs_sb
= CIFS_SB(sb
);
125 tcon
= cifs_sb_master_tcon(cifs_sb
);
127 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_POSIXACL
)
128 sb
->s_flags
|= SB_POSIXACL
;
130 if (tcon
->ses
->capabilities
& tcon
->ses
->server
->vals
->cap_large_files
)
131 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
133 sb
->s_maxbytes
= MAX_NON_LFS
;
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb
->s_time_gran
= 100;
138 sb
->s_magic
= CIFS_MAGIC_NUMBER
;
139 sb
->s_op
= &cifs_super_ops
;
140 sb
->s_xattr
= cifs_xattr_handlers
;
141 rc
= super_setup_bdi(sb
);
144 /* tune readahead according to rsize */
145 sb
->s_bdi
->ra_pages
= cifs_sb
->rsize
/ PAGE_SIZE
;
147 sb
->s_blocksize
= CIFS_MAX_MSGSIZE
;
148 sb
->s_blocksize_bits
= 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode
= cifs_root_iget(sb
);
157 sb
->s_d_op
= &cifs_ci_dentry_ops
;
159 sb
->s_d_op
= &cifs_dentry_ops
;
161 sb
->s_root
= d_make_root(inode
);
167 #ifdef CONFIG_CIFS_NFSD_EXPORT
168 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) {
169 cifs_dbg(FYI
, "export ops supported\n");
170 sb
->s_export_op
= &cifs_export_ops
;
172 #endif /* CONFIG_CIFS_NFSD_EXPORT */
177 cifs_dbg(VFS
, "%s: get root inode failed\n", __func__
);
181 static void cifs_kill_sb(struct super_block
*sb
)
183 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
185 cifs_umount(cifs_sb
);
189 cifs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
191 struct super_block
*sb
= dentry
->d_sb
;
192 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
193 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
194 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
201 * PATH_MAX may be too long - it would presumably be total path,
202 * but note that some servers (includinng Samba 3) have a shorter
205 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
207 buf
->f_namelen
= PATH_MAX
;
208 buf
->f_files
= 0; /* undefined */
209 buf
->f_ffree
= 0; /* unlimited */
211 if (server
->ops
->queryfs
)
212 rc
= server
->ops
->queryfs(xid
, tcon
, buf
);
218 static long cifs_fallocate(struct file
*file
, int mode
, loff_t off
, loff_t len
)
220 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
221 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
222 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
224 if (server
->ops
->fallocate
)
225 return server
->ops
->fallocate(file
, tcon
, mode
, off
, len
);
230 static int cifs_permission(struct inode
*inode
, int mask
)
232 struct cifs_sb_info
*cifs_sb
;
234 cifs_sb
= CIFS_SB(inode
->i_sb
);
236 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
) {
237 if ((mask
& MAY_EXEC
) && !execute_ok(inode
))
241 } else /* file mode might have been restricted at mount time
242 on the client (above and beyond ACL on servers) for
243 servers which do not support setting and viewing mode bits,
244 so allowing client to check permissions is useful */
245 return generic_permission(inode
, mask
);
248 static struct kmem_cache
*cifs_inode_cachep
;
249 static struct kmem_cache
*cifs_req_cachep
;
250 static struct kmem_cache
*cifs_mid_cachep
;
251 static struct kmem_cache
*cifs_sm_req_cachep
;
252 mempool_t
*cifs_sm_req_poolp
;
253 mempool_t
*cifs_req_poolp
;
254 mempool_t
*cifs_mid_poolp
;
256 static struct inode
*
257 cifs_alloc_inode(struct super_block
*sb
)
259 struct cifsInodeInfo
*cifs_inode
;
260 cifs_inode
= kmem_cache_alloc(cifs_inode_cachep
, GFP_KERNEL
);
263 cifs_inode
->cifsAttrs
= 0x20; /* default */
264 cifs_inode
->time
= 0;
266 * Until the file is open and we have gotten oplock info back from the
267 * server, can not assume caching of file data or metadata.
269 cifs_set_oplock_level(cifs_inode
, 0);
270 cifs_inode
->flags
= 0;
271 spin_lock_init(&cifs_inode
->writers_lock
);
272 cifs_inode
->writers
= 0;
273 cifs_inode
->vfs_inode
.i_blkbits
= 14; /* 2**14 = CIFS_MAX_MSGSIZE */
274 cifs_inode
->server_eof
= 0;
275 cifs_inode
->uniqueid
= 0;
276 cifs_inode
->createtime
= 0;
277 cifs_inode
->epoch
= 0;
278 generate_random_uuid(cifs_inode
->lease_key
);
281 * Can not set i_flags here - they get immediately overwritten to zero
284 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
285 INIT_LIST_HEAD(&cifs_inode
->openFileList
);
286 INIT_LIST_HEAD(&cifs_inode
->llist
);
287 return &cifs_inode
->vfs_inode
;
290 static void cifs_i_callback(struct rcu_head
*head
)
292 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
293 kmem_cache_free(cifs_inode_cachep
, CIFS_I(inode
));
297 cifs_destroy_inode(struct inode
*inode
)
299 call_rcu(&inode
->i_rcu
, cifs_i_callback
);
303 cifs_evict_inode(struct inode
*inode
)
305 truncate_inode_pages_final(&inode
->i_data
);
307 cifs_fscache_release_inode_cookie(inode
);
311 cifs_show_address(struct seq_file
*s
, struct TCP_Server_Info
*server
)
313 struct sockaddr_in
*sa
= (struct sockaddr_in
*) &server
->dstaddr
;
314 struct sockaddr_in6
*sa6
= (struct sockaddr_in6
*) &server
->dstaddr
;
316 seq_puts(s
, ",addr=");
318 switch (server
->dstaddr
.ss_family
) {
320 seq_printf(s
, "%pI4", &sa
->sin_addr
.s_addr
);
323 seq_printf(s
, "%pI6", &sa6
->sin6_addr
.s6_addr
);
324 if (sa6
->sin6_scope_id
)
325 seq_printf(s
, "%%%u", sa6
->sin6_scope_id
);
328 seq_puts(s
, "(unknown)");
333 cifs_show_security(struct seq_file
*s
, struct cifs_ses
*ses
)
335 if (ses
->sectype
== Unspecified
) {
336 if (ses
->user_name
== NULL
)
337 seq_puts(s
, ",sec=none");
341 seq_puts(s
, ",sec=");
343 switch (ses
->sectype
) {
345 seq_puts(s
, "lanman");
348 seq_puts(s
, "ntlmv2");
357 seq_puts(s
, "ntlmssp");
360 /* shouldn't ever happen */
361 seq_puts(s
, "unknown");
370 cifs_show_cache_flavor(struct seq_file
*s
, struct cifs_sb_info
*cifs_sb
)
372 seq_puts(s
, ",cache=");
374 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
375 seq_puts(s
, "strict");
376 else if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
)
379 seq_puts(s
, "loose");
383 cifs_show_nls(struct seq_file
*s
, struct nls_table
*cur
)
385 struct nls_table
*def
;
387 /* Display iocharset= option if it's not default charset */
388 def
= load_nls_default();
390 seq_printf(s
, ",iocharset=%s", cur
->charset
);
395 * cifs_show_options() is for displaying mount options in /proc/mounts.
396 * Not all settable options are displayed but most of the important
400 cifs_show_options(struct seq_file
*s
, struct dentry
*root
)
402 struct cifs_sb_info
*cifs_sb
= CIFS_SB(root
->d_sb
);
403 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
404 struct sockaddr
*srcaddr
;
405 srcaddr
= (struct sockaddr
*)&tcon
->ses
->server
->srcaddr
;
407 seq_show_option(s
, "vers", tcon
->ses
->server
->vals
->version_string
);
408 cifs_show_security(s
, tcon
->ses
);
409 cifs_show_cache_flavor(s
, cifs_sb
);
411 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
)
412 seq_puts(s
, ",multiuser");
413 else if (tcon
->ses
->user_name
)
414 seq_show_option(s
, "username", tcon
->ses
->user_name
);
416 if (tcon
->ses
->domainName
)
417 seq_show_option(s
, "domain", tcon
->ses
->domainName
);
419 if (srcaddr
->sa_family
!= AF_UNSPEC
) {
420 struct sockaddr_in
*saddr4
;
421 struct sockaddr_in6
*saddr6
;
422 saddr4
= (struct sockaddr_in
*)srcaddr
;
423 saddr6
= (struct sockaddr_in6
*)srcaddr
;
424 if (srcaddr
->sa_family
== AF_INET6
)
425 seq_printf(s
, ",srcaddr=%pI6c",
427 else if (srcaddr
->sa_family
== AF_INET
)
428 seq_printf(s
, ",srcaddr=%pI4",
429 &saddr4
->sin_addr
.s_addr
);
431 seq_printf(s
, ",srcaddr=BAD-AF:%i",
432 (int)(srcaddr
->sa_family
));
435 seq_printf(s
, ",uid=%u",
436 from_kuid_munged(&init_user_ns
, cifs_sb
->mnt_uid
));
437 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_OVERR_UID
)
438 seq_puts(s
, ",forceuid");
440 seq_puts(s
, ",noforceuid");
442 seq_printf(s
, ",gid=%u",
443 from_kgid_munged(&init_user_ns
, cifs_sb
->mnt_gid
));
444 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_OVERR_GID
)
445 seq_puts(s
, ",forcegid");
447 seq_puts(s
, ",noforcegid");
449 cifs_show_address(s
, tcon
->ses
->server
);
452 seq_printf(s
, ",file_mode=0%ho,dir_mode=0%ho",
453 cifs_sb
->mnt_file_mode
,
454 cifs_sb
->mnt_dir_mode
);
456 cifs_show_nls(s
, cifs_sb
->local_nls
);
459 seq_puts(s
, ",seal");
461 seq_puts(s
, ",nocase");
463 seq_puts(s
, ",hard");
465 seq_puts(s
, ",soft");
466 if (tcon
->use_persistent
)
467 seq_puts(s
, ",persistenthandles");
468 else if (tcon
->use_resilient
)
469 seq_puts(s
, ",resilienthandles");
471 seq_puts(s
, ",unix");
473 seq_puts(s
, ",nounix");
474 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_POSIX_PATHS
)
475 seq_puts(s
, ",posixpaths");
476 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SET_UID
)
477 seq_puts(s
, ",setuids");
478 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_UID_FROM_ACL
)
479 seq_puts(s
, ",idsfromsid");
480 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
)
481 seq_puts(s
, ",serverino");
482 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
483 seq_puts(s
, ",rwpidforward");
484 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
)
485 seq_puts(s
, ",forcemand");
486 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_XATTR
)
487 seq_puts(s
, ",nouser_xattr");
488 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
)
489 seq_puts(s
, ",mapchars");
490 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SFM_CHR
)
491 seq_puts(s
, ",mapposix");
492 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_UNX_EMUL
)
494 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_BRL
)
495 seq_puts(s
, ",nobrl");
496 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_ACL
)
497 seq_puts(s
, ",cifsacl");
498 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DYNPERM
)
499 seq_puts(s
, ",dynperm");
500 if (root
->d_sb
->s_flags
& SB_POSIXACL
)
502 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MF_SYMLINKS
)
503 seq_puts(s
, ",mfsymlinks");
504 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_FSCACHE
)
506 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)
507 seq_puts(s
, ",nostrictsync");
508 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
)
509 seq_puts(s
, ",noperm");
510 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPUID
)
511 seq_printf(s
, ",backupuid=%u",
512 from_kuid_munged(&init_user_ns
,
513 cifs_sb
->mnt_backupuid
));
514 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPGID
)
515 seq_printf(s
, ",backupgid=%u",
516 from_kgid_munged(&init_user_ns
,
517 cifs_sb
->mnt_backupgid
));
519 seq_printf(s
, ",rsize=%u", cifs_sb
->rsize
);
520 seq_printf(s
, ",wsize=%u", cifs_sb
->wsize
);
521 seq_printf(s
, ",echo_interval=%lu",
522 tcon
->ses
->server
->echo_interval
/ HZ
);
523 /* convert actimeo and display it in seconds */
524 seq_printf(s
, ",actimeo=%lu", cifs_sb
->actimeo
/ HZ
);
529 static void cifs_umount_begin(struct super_block
*sb
)
531 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
532 struct cifs_tcon
*tcon
;
537 tcon
= cifs_sb_master_tcon(cifs_sb
);
539 spin_lock(&cifs_tcp_ses_lock
);
540 if ((tcon
->tc_count
> 1) || (tcon
->tidStatus
== CifsExiting
)) {
541 /* we have other mounts to same share or we have
542 already tried to force umount this and woken up
543 all waiting network requests, nothing to do */
544 spin_unlock(&cifs_tcp_ses_lock
);
546 } else if (tcon
->tc_count
== 1)
547 tcon
->tidStatus
= CifsExiting
;
548 spin_unlock(&cifs_tcp_ses_lock
);
550 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
551 /* cancel_notify_requests(tcon); */
552 if (tcon
->ses
&& tcon
->ses
->server
) {
553 cifs_dbg(FYI
, "wake up tasks now - umount begin not complete\n");
554 wake_up_all(&tcon
->ses
->server
->request_q
);
555 wake_up_all(&tcon
->ses
->server
->response_q
);
556 msleep(1); /* yield */
557 /* we have to kick the requests once more */
558 wake_up_all(&tcon
->ses
->server
->response_q
);
565 #ifdef CONFIG_CIFS_STATS2
566 static int cifs_show_stats(struct seq_file
*s
, struct dentry
*root
)
573 static int cifs_remount(struct super_block
*sb
, int *flags
, char *data
)
576 *flags
|= SB_NODIRATIME
;
580 static int cifs_drop_inode(struct inode
*inode
)
582 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
584 /* no serverino => unconditional eviction */
585 return !(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) ||
586 generic_drop_inode(inode
);
589 static const struct super_operations cifs_super_ops
= {
590 .statfs
= cifs_statfs
,
591 .alloc_inode
= cifs_alloc_inode
,
592 .destroy_inode
= cifs_destroy_inode
,
593 .drop_inode
= cifs_drop_inode
,
594 .evict_inode
= cifs_evict_inode
,
595 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
596 function unless later we add lazy close of inodes or unless the
597 kernel forgets to call us with the same number of releases (closes)
599 .show_options
= cifs_show_options
,
600 .umount_begin
= cifs_umount_begin
,
601 .remount_fs
= cifs_remount
,
602 #ifdef CONFIG_CIFS_STATS2
603 .show_stats
= cifs_show_stats
,
608 * Get root dentry from superblock according to prefix path mount option.
609 * Return dentry with refcount + 1 on success and NULL otherwise.
611 static struct dentry
*
612 cifs_get_root(struct smb_vol
*vol
, struct super_block
*sb
)
614 struct dentry
*dentry
;
615 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
616 char *full_path
= NULL
;
620 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_USE_PREFIX_PATH
)
621 return dget(sb
->s_root
);
623 full_path
= cifs_build_path_to_root(vol
, cifs_sb
,
624 cifs_sb_master_tcon(cifs_sb
), 0);
625 if (full_path
== NULL
)
626 return ERR_PTR(-ENOMEM
);
628 cifs_dbg(FYI
, "Get root dentry for %s\n", full_path
);
630 sep
= CIFS_DIR_SEP(cifs_sb
);
631 dentry
= dget(sb
->s_root
);
635 struct inode
*dir
= d_inode(dentry
);
636 struct dentry
*child
;
640 dentry
= ERR_PTR(-ENOENT
);
643 if (!S_ISDIR(dir
->i_mode
)) {
645 dentry
= ERR_PTR(-ENOTDIR
);
649 /* skip separators */
656 while (*s
&& *s
!= sep
)
659 child
= lookup_one_len_unlocked(p
, dentry
, s
- p
);
662 } while (!IS_ERR(dentry
));
667 static int cifs_set_super(struct super_block
*sb
, void *data
)
669 struct cifs_mnt_data
*mnt_data
= data
;
670 sb
->s_fs_info
= mnt_data
->cifs_sb
;
671 return set_anon_super(sb
, NULL
);
674 static struct dentry
*
675 cifs_do_mount(struct file_system_type
*fs_type
,
676 int flags
, const char *dev_name
, void *data
)
679 struct super_block
*sb
;
680 struct cifs_sb_info
*cifs_sb
;
681 struct smb_vol
*volume_info
;
682 struct cifs_mnt_data mnt_data
;
685 cifs_dbg(FYI
, "Devname: %s flags: %d\n", dev_name
, flags
);
687 volume_info
= cifs_get_volume_info((char *)data
, dev_name
);
688 if (IS_ERR(volume_info
))
689 return ERR_CAST(volume_info
);
691 cifs_sb
= kzalloc(sizeof(struct cifs_sb_info
), GFP_KERNEL
);
692 if (cifs_sb
== NULL
) {
693 root
= ERR_PTR(-ENOMEM
);
697 cifs_sb
->mountdata
= kstrndup(data
, PAGE_SIZE
, GFP_KERNEL
);
698 if (cifs_sb
->mountdata
== NULL
) {
699 root
= ERR_PTR(-ENOMEM
);
703 rc
= cifs_setup_cifs_sb(volume_info
, cifs_sb
);
709 rc
= cifs_mount(cifs_sb
, volume_info
);
711 if (!(flags
& SB_SILENT
))
712 cifs_dbg(VFS
, "cifs_mount failed w/return code = %d\n",
718 mnt_data
.vol
= volume_info
;
719 mnt_data
.cifs_sb
= cifs_sb
;
720 mnt_data
.flags
= flags
;
722 /* BB should we make this contingent on mount parm? */
723 flags
|= SB_NODIRATIME
| SB_NOATIME
;
725 sb
= sget(fs_type
, cifs_match_super
, cifs_set_super
, flags
, &mnt_data
);
728 cifs_umount(cifs_sb
);
733 cifs_dbg(FYI
, "Use existing superblock\n");
734 cifs_umount(cifs_sb
);
736 rc
= cifs_read_super(sb
);
742 sb
->s_flags
|= SB_ACTIVE
;
745 root
= cifs_get_root(volume_info
, sb
);
749 cifs_dbg(FYI
, "dentry root is: %p\n", root
);
753 deactivate_locked_super(sb
);
755 cifs_cleanup_volume_info(volume_info
);
759 kfree(cifs_sb
->prepath
);
760 kfree(cifs_sb
->mountdata
);
763 unload_nls(volume_info
->local_nls
);
768 cifs_loose_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
771 struct inode
*inode
= file_inode(iocb
->ki_filp
);
773 if (iocb
->ki_filp
->f_flags
& O_DIRECT
)
774 return cifs_user_readv(iocb
, iter
);
776 rc
= cifs_revalidate_mapping(inode
);
780 return generic_file_read_iter(iocb
, iter
);
783 static ssize_t
cifs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
785 struct inode
*inode
= file_inode(iocb
->ki_filp
);
786 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
790 if (iocb
->ki_filp
->f_flags
& O_DIRECT
) {
791 written
= cifs_user_writev(iocb
, from
);
792 if (written
> 0 && CIFS_CACHE_READ(cinode
)) {
793 cifs_zap_mapping(inode
);
795 "Set no oplock for inode=%p after a write operation\n",
802 written
= cifs_get_writer(cinode
);
806 written
= generic_file_write_iter(iocb
, from
);
808 if (CIFS_CACHE_WRITE(CIFS_I(inode
)))
811 rc
= filemap_fdatawrite(inode
->i_mapping
);
813 cifs_dbg(FYI
, "cifs_file_write_iter: %d rc on %p inode\n",
817 cifs_put_writer(cinode
);
821 static loff_t
cifs_llseek(struct file
*file
, loff_t offset
, int whence
)
824 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
825 * the cached file length
827 if (whence
!= SEEK_SET
&& whence
!= SEEK_CUR
) {
829 struct inode
*inode
= file_inode(file
);
832 * We need to be sure that all dirty pages are written and the
833 * server has the newest file length.
835 if (!CIFS_CACHE_READ(CIFS_I(inode
)) && inode
->i_mapping
&&
836 inode
->i_mapping
->nrpages
!= 0) {
837 rc
= filemap_fdatawait(inode
->i_mapping
);
839 mapping_set_error(inode
->i_mapping
, rc
);
844 * Some applications poll for the file length in this strange
845 * way so we must seek to end on non-oplocked files by
846 * setting the revalidate time to zero.
848 CIFS_I(inode
)->time
= 0;
850 rc
= cifs_revalidate_file_attr(file
);
854 return generic_file_llseek(file
, offset
, whence
);
858 cifs_setlease(struct file
*file
, long arg
, struct file_lock
**lease
, void **priv
)
861 * Note that this is called by vfs setlease with i_lock held to
862 * protect *lease from going away.
864 struct inode
*inode
= file_inode(file
);
865 struct cifsFileInfo
*cfile
= file
->private_data
;
867 if (!(S_ISREG(inode
->i_mode
)))
870 /* Check if file is oplocked if this is request for new lease */
871 if (arg
== F_UNLCK
||
872 ((arg
== F_RDLCK
) && CIFS_CACHE_READ(CIFS_I(inode
))) ||
873 ((arg
== F_WRLCK
) && CIFS_CACHE_WRITE(CIFS_I(inode
))))
874 return generic_setlease(file
, arg
, lease
, priv
);
875 else if (tlink_tcon(cfile
->tlink
)->local_lease
&&
876 !CIFS_CACHE_READ(CIFS_I(inode
)))
878 * If the server claims to support oplock on this file, then we
879 * still need to check oplock even if the local_lease mount
880 * option is set, but there are servers which do not support
881 * oplock for which this mount option may be useful if the user
882 * knows that the file won't be changed on the server by anyone
885 return generic_setlease(file
, arg
, lease
, priv
);
890 struct file_system_type cifs_fs_type
= {
891 .owner
= THIS_MODULE
,
893 .mount
= cifs_do_mount
,
894 .kill_sb
= cifs_kill_sb
,
897 MODULE_ALIAS_FS("cifs");
898 const struct inode_operations cifs_dir_inode_ops
= {
899 .create
= cifs_create
,
900 .atomic_open
= cifs_atomic_open
,
901 .lookup
= cifs_lookup
,
902 .getattr
= cifs_getattr
,
903 .unlink
= cifs_unlink
,
904 .link
= cifs_hardlink
,
907 .rename
= cifs_rename2
,
908 .permission
= cifs_permission
,
909 .setattr
= cifs_setattr
,
910 .symlink
= cifs_symlink
,
912 .listxattr
= cifs_listxattr
,
915 const struct inode_operations cifs_file_inode_ops
= {
916 .setattr
= cifs_setattr
,
917 .getattr
= cifs_getattr
,
918 .permission
= cifs_permission
,
919 .listxattr
= cifs_listxattr
,
922 const struct inode_operations cifs_symlink_inode_ops
= {
923 .get_link
= cifs_get_link
,
924 .permission
= cifs_permission
,
925 .listxattr
= cifs_listxattr
,
928 static int cifs_clone_file_range(struct file
*src_file
, loff_t off
,
929 struct file
*dst_file
, loff_t destoff
, u64 len
)
931 struct inode
*src_inode
= file_inode(src_file
);
932 struct inode
*target_inode
= file_inode(dst_file
);
933 struct cifsFileInfo
*smb_file_src
= src_file
->private_data
;
934 struct cifsFileInfo
*smb_file_target
= dst_file
->private_data
;
935 struct cifs_tcon
*target_tcon
= tlink_tcon(smb_file_target
->tlink
);
939 cifs_dbg(FYI
, "clone range\n");
943 if (!src_file
->private_data
|| !dst_file
->private_data
) {
945 cifs_dbg(VFS
, "missing cifsFileInfo on copy range src file\n");
950 * Note: cifs case is easier than btrfs since server responsible for
951 * checks for proper open modes and file type and if it wants
952 * server could even support copy of range where source = target
954 lock_two_nondirectories(target_inode
, src_inode
);
957 len
= src_inode
->i_size
- off
;
959 cifs_dbg(FYI
, "about to flush pages\n");
960 /* should we flush first and last page first */
961 truncate_inode_pages_range(&target_inode
->i_data
, destoff
,
962 PAGE_ALIGN(destoff
+ len
)-1);
964 if (target_tcon
->ses
->server
->ops
->duplicate_extents
)
965 rc
= target_tcon
->ses
->server
->ops
->duplicate_extents(xid
,
966 smb_file_src
, smb_file_target
, off
, len
, destoff
);
970 /* force revalidate of size and timestamps of target file now
971 that target is updated on the server */
972 CIFS_I(target_inode
)->time
= 0;
973 /* although unlocking in the reverse order from locking is not
974 strictly necessary here it is a little cleaner to be consistent */
975 unlock_two_nondirectories(src_inode
, target_inode
);
981 ssize_t
cifs_file_copychunk_range(unsigned int xid
,
982 struct file
*src_file
, loff_t off
,
983 struct file
*dst_file
, loff_t destoff
,
984 size_t len
, unsigned int flags
)
986 struct inode
*src_inode
= file_inode(src_file
);
987 struct inode
*target_inode
= file_inode(dst_file
);
988 struct cifsFileInfo
*smb_file_src
;
989 struct cifsFileInfo
*smb_file_target
;
990 struct cifs_tcon
*src_tcon
;
991 struct cifs_tcon
*target_tcon
;
994 cifs_dbg(FYI
, "copychunk range\n");
996 if (src_inode
== target_inode
) {
1001 if (!src_file
->private_data
|| !dst_file
->private_data
) {
1003 cifs_dbg(VFS
, "missing cifsFileInfo on copy range src file\n");
1008 smb_file_target
= dst_file
->private_data
;
1009 smb_file_src
= src_file
->private_data
;
1010 src_tcon
= tlink_tcon(smb_file_src
->tlink
);
1011 target_tcon
= tlink_tcon(smb_file_target
->tlink
);
1013 if (src_tcon
->ses
!= target_tcon
->ses
) {
1014 cifs_dbg(VFS
, "source and target of copy not on same server\n");
1019 * Note: cifs case is easier than btrfs since server responsible for
1020 * checks for proper open modes and file type and if it wants
1021 * server could even support copy of range where source = target
1023 lock_two_nondirectories(target_inode
, src_inode
);
1025 cifs_dbg(FYI
, "about to flush pages\n");
1026 /* should we flush first and last page first */
1027 truncate_inode_pages(&target_inode
->i_data
, 0);
1029 if (target_tcon
->ses
->server
->ops
->copychunk_range
)
1030 rc
= target_tcon
->ses
->server
->ops
->copychunk_range(xid
,
1031 smb_file_src
, smb_file_target
, off
, len
, destoff
);
1035 /* force revalidate of size and timestamps of target file now
1036 * that target is updated on the server
1038 CIFS_I(target_inode
)->time
= 0;
1039 /* although unlocking in the reverse order from locking is not
1040 * strictly necessary here it is a little cleaner to be consistent
1042 unlock_two_nondirectories(src_inode
, target_inode
);
1048 static ssize_t
cifs_copy_file_range(struct file
*src_file
, loff_t off
,
1049 struct file
*dst_file
, loff_t destoff
,
1050 size_t len
, unsigned int flags
)
1052 unsigned int xid
= get_xid();
1055 rc
= cifs_file_copychunk_range(xid
, src_file
, off
, dst_file
, destoff
,
1061 const struct file_operations cifs_file_ops
= {
1062 .read_iter
= cifs_loose_read_iter
,
1063 .write_iter
= cifs_file_write_iter
,
1065 .release
= cifs_close
,
1067 .fsync
= cifs_fsync
,
1068 .flush
= cifs_flush
,
1069 .mmap
= cifs_file_mmap
,
1070 .splice_read
= generic_file_splice_read
,
1071 .llseek
= cifs_llseek
,
1072 .unlocked_ioctl
= cifs_ioctl
,
1073 .copy_file_range
= cifs_copy_file_range
,
1074 .clone_file_range
= cifs_clone_file_range
,
1075 .setlease
= cifs_setlease
,
1076 .fallocate
= cifs_fallocate
,
1079 const struct file_operations cifs_file_strict_ops
= {
1080 .read_iter
= cifs_strict_readv
,
1081 .write_iter
= cifs_strict_writev
,
1083 .release
= cifs_close
,
1085 .fsync
= cifs_strict_fsync
,
1086 .flush
= cifs_flush
,
1087 .mmap
= cifs_file_strict_mmap
,
1088 .splice_read
= generic_file_splice_read
,
1089 .llseek
= cifs_llseek
,
1090 .unlocked_ioctl
= cifs_ioctl
,
1091 .copy_file_range
= cifs_copy_file_range
,
1092 .clone_file_range
= cifs_clone_file_range
,
1093 .setlease
= cifs_setlease
,
1094 .fallocate
= cifs_fallocate
,
1097 const struct file_operations cifs_file_direct_ops
= {
1098 /* BB reevaluate whether they can be done with directio, no cache */
1099 .read_iter
= cifs_user_readv
,
1100 .write_iter
= cifs_user_writev
,
1102 .release
= cifs_close
,
1104 .fsync
= cifs_fsync
,
1105 .flush
= cifs_flush
,
1106 .mmap
= cifs_file_mmap
,
1107 .splice_read
= generic_file_splice_read
,
1108 .unlocked_ioctl
= cifs_ioctl
,
1109 .copy_file_range
= cifs_copy_file_range
,
1110 .clone_file_range
= cifs_clone_file_range
,
1111 .llseek
= cifs_llseek
,
1112 .setlease
= cifs_setlease
,
1113 .fallocate
= cifs_fallocate
,
1116 const struct file_operations cifs_file_nobrl_ops
= {
1117 .read_iter
= cifs_loose_read_iter
,
1118 .write_iter
= cifs_file_write_iter
,
1120 .release
= cifs_close
,
1121 .fsync
= cifs_fsync
,
1122 .flush
= cifs_flush
,
1123 .mmap
= cifs_file_mmap
,
1124 .splice_read
= generic_file_splice_read
,
1125 .llseek
= cifs_llseek
,
1126 .unlocked_ioctl
= cifs_ioctl
,
1127 .copy_file_range
= cifs_copy_file_range
,
1128 .clone_file_range
= cifs_clone_file_range
,
1129 .setlease
= cifs_setlease
,
1130 .fallocate
= cifs_fallocate
,
1133 const struct file_operations cifs_file_strict_nobrl_ops
= {
1134 .read_iter
= cifs_strict_readv
,
1135 .write_iter
= cifs_strict_writev
,
1137 .release
= cifs_close
,
1138 .fsync
= cifs_strict_fsync
,
1139 .flush
= cifs_flush
,
1140 .mmap
= cifs_file_strict_mmap
,
1141 .splice_read
= generic_file_splice_read
,
1142 .llseek
= cifs_llseek
,
1143 .unlocked_ioctl
= cifs_ioctl
,
1144 .copy_file_range
= cifs_copy_file_range
,
1145 .clone_file_range
= cifs_clone_file_range
,
1146 .setlease
= cifs_setlease
,
1147 .fallocate
= cifs_fallocate
,
1150 const struct file_operations cifs_file_direct_nobrl_ops
= {
1151 /* BB reevaluate whether they can be done with directio, no cache */
1152 .read_iter
= cifs_user_readv
,
1153 .write_iter
= cifs_user_writev
,
1155 .release
= cifs_close
,
1156 .fsync
= cifs_fsync
,
1157 .flush
= cifs_flush
,
1158 .mmap
= cifs_file_mmap
,
1159 .splice_read
= generic_file_splice_read
,
1160 .unlocked_ioctl
= cifs_ioctl
,
1161 .copy_file_range
= cifs_copy_file_range
,
1162 .clone_file_range
= cifs_clone_file_range
,
1163 .llseek
= cifs_llseek
,
1164 .setlease
= cifs_setlease
,
1165 .fallocate
= cifs_fallocate
,
1168 const struct file_operations cifs_dir_ops
= {
1169 .iterate_shared
= cifs_readdir
,
1170 .release
= cifs_closedir
,
1171 .read
= generic_read_dir
,
1172 .unlocked_ioctl
= cifs_ioctl
,
1173 .copy_file_range
= cifs_copy_file_range
,
1174 .clone_file_range
= cifs_clone_file_range
,
1175 .llseek
= generic_file_llseek
,
1179 cifs_init_once(void *inode
)
1181 struct cifsInodeInfo
*cifsi
= inode
;
1183 inode_init_once(&cifsi
->vfs_inode
);
1184 init_rwsem(&cifsi
->lock_sem
);
1188 cifs_init_inodecache(void)
1190 cifs_inode_cachep
= kmem_cache_create("cifs_inode_cache",
1191 sizeof(struct cifsInodeInfo
),
1192 0, (SLAB_RECLAIM_ACCOUNT
|
1193 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
1195 if (cifs_inode_cachep
== NULL
)
1202 cifs_destroy_inodecache(void)
1205 * Make sure all delayed rcu free inodes are flushed before we
1209 kmem_cache_destroy(cifs_inode_cachep
);
1213 cifs_init_request_bufs(void)
1216 * SMB2 maximum header size is bigger than CIFS one - no problems to
1217 * allocate some more bytes for CIFS.
1219 size_t max_hdr_size
= MAX_SMB2_HDR_SIZE
;
1221 if (CIFSMaxBufSize
< 8192) {
1222 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1223 Unicode path name has to fit in any SMB/CIFS path based frames */
1224 CIFSMaxBufSize
= 8192;
1225 } else if (CIFSMaxBufSize
> 1024*127) {
1226 CIFSMaxBufSize
= 1024 * 127;
1228 CIFSMaxBufSize
&= 0x1FE00; /* Round size to even 512 byte mult*/
1231 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1232 CIFSMaxBufSize, CIFSMaxBufSize);
1234 cifs_req_cachep
= kmem_cache_create("cifs_request",
1235 CIFSMaxBufSize
+ max_hdr_size
, 0,
1236 SLAB_HWCACHE_ALIGN
, NULL
);
1237 if (cifs_req_cachep
== NULL
)
1240 if (cifs_min_rcv
< 1)
1242 else if (cifs_min_rcv
> 64) {
1244 cifs_dbg(VFS
, "cifs_min_rcv set to maximum (64)\n");
1247 cifs_req_poolp
= mempool_create_slab_pool(cifs_min_rcv
,
1250 if (cifs_req_poolp
== NULL
) {
1251 kmem_cache_destroy(cifs_req_cachep
);
1254 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1255 almost all handle based requests (but not write response, nor is it
1256 sufficient for path based requests). A smaller size would have
1257 been more efficient (compacting multiple slab items on one 4k page)
1258 for the case in which debug was on, but this larger size allows
1259 more SMBs to use small buffer alloc and is still much more
1260 efficient to alloc 1 per page off the slab compared to 17K (5page)
1261 alloc of large cifs buffers even when page debugging is on */
1262 cifs_sm_req_cachep
= kmem_cache_create("cifs_small_rq",
1263 MAX_CIFS_SMALL_BUFFER_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
1265 if (cifs_sm_req_cachep
== NULL
) {
1266 mempool_destroy(cifs_req_poolp
);
1267 kmem_cache_destroy(cifs_req_cachep
);
1271 if (cifs_min_small
< 2)
1273 else if (cifs_min_small
> 256) {
1274 cifs_min_small
= 256;
1275 cifs_dbg(FYI
, "cifs_min_small set to maximum (256)\n");
1278 cifs_sm_req_poolp
= mempool_create_slab_pool(cifs_min_small
,
1279 cifs_sm_req_cachep
);
1281 if (cifs_sm_req_poolp
== NULL
) {
1282 mempool_destroy(cifs_req_poolp
);
1283 kmem_cache_destroy(cifs_req_cachep
);
1284 kmem_cache_destroy(cifs_sm_req_cachep
);
1292 cifs_destroy_request_bufs(void)
1294 mempool_destroy(cifs_req_poolp
);
1295 kmem_cache_destroy(cifs_req_cachep
);
1296 mempool_destroy(cifs_sm_req_poolp
);
1297 kmem_cache_destroy(cifs_sm_req_cachep
);
1301 cifs_init_mids(void)
1303 cifs_mid_cachep
= kmem_cache_create("cifs_mpx_ids",
1304 sizeof(struct mid_q_entry
), 0,
1305 SLAB_HWCACHE_ALIGN
, NULL
);
1306 if (cifs_mid_cachep
== NULL
)
1309 /* 3 is a reasonable minimum number of simultaneous operations */
1310 cifs_mid_poolp
= mempool_create_slab_pool(3, cifs_mid_cachep
);
1311 if (cifs_mid_poolp
== NULL
) {
1312 kmem_cache_destroy(cifs_mid_cachep
);
1320 cifs_destroy_mids(void)
1322 mempool_destroy(cifs_mid_poolp
);
1323 kmem_cache_destroy(cifs_mid_cachep
);
1331 INIT_LIST_HEAD(&cifs_tcp_ses_list
);
1332 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1333 INIT_LIST_HEAD(&GlobalDnotifyReqList
);
1334 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q
);
1335 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1337 * Initialize Global counters
1339 atomic_set(&sesInfoAllocCount
, 0);
1340 atomic_set(&tconInfoAllocCount
, 0);
1341 atomic_set(&tcpSesAllocCount
, 0);
1342 atomic_set(&tcpSesReconnectCount
, 0);
1343 atomic_set(&tconInfoReconnectCount
, 0);
1345 atomic_set(&bufAllocCount
, 0);
1346 atomic_set(&smBufAllocCount
, 0);
1347 #ifdef CONFIG_CIFS_STATS2
1348 atomic_set(&totBufAllocCount
, 0);
1349 atomic_set(&totSmBufAllocCount
, 0);
1350 #endif /* CONFIG_CIFS_STATS2 */
1352 atomic_set(&midCount
, 0);
1353 GlobalCurrentXid
= 0;
1354 GlobalTotalActiveXid
= 0;
1355 GlobalMaxActiveXid
= 0;
1356 spin_lock_init(&cifs_tcp_ses_lock
);
1357 spin_lock_init(&GlobalMid_Lock
);
1359 cifs_lock_secret
= get_random_u32();
1361 if (cifs_max_pending
< 2) {
1362 cifs_max_pending
= 2;
1363 cifs_dbg(FYI
, "cifs_max_pending set to min of 2\n");
1364 } else if (cifs_max_pending
> CIFS_MAX_REQ
) {
1365 cifs_max_pending
= CIFS_MAX_REQ
;
1366 cifs_dbg(FYI
, "cifs_max_pending set to max of %u\n",
1370 cifsiod_wq
= alloc_workqueue("cifsiod", WQ_FREEZABLE
|WQ_MEM_RECLAIM
, 0);
1373 goto out_clean_proc
;
1376 cifsoplockd_wq
= alloc_workqueue("cifsoplockd",
1377 WQ_FREEZABLE
|WQ_MEM_RECLAIM
, 0);
1378 if (!cifsoplockd_wq
) {
1380 goto out_destroy_cifsiod_wq
;
1383 rc
= cifs_fscache_register();
1385 goto out_destroy_cifsoplockd_wq
;
1387 rc
= cifs_init_inodecache();
1389 goto out_unreg_fscache
;
1391 rc
= cifs_init_mids();
1393 goto out_destroy_inodecache
;
1395 rc
= cifs_init_request_bufs();
1397 goto out_destroy_mids
;
1399 #ifdef CONFIG_CIFS_UPCALL
1400 rc
= init_cifs_spnego();
1402 goto out_destroy_request_bufs
;
1403 #endif /* CONFIG_CIFS_UPCALL */
1405 #ifdef CONFIG_CIFS_ACL
1406 rc
= init_cifs_idmap();
1408 goto out_register_key_type
;
1409 #endif /* CONFIG_CIFS_ACL */
1411 rc
= register_filesystem(&cifs_fs_type
);
1413 goto out_init_cifs_idmap
;
1417 out_init_cifs_idmap
:
1418 #ifdef CONFIG_CIFS_ACL
1420 out_register_key_type
:
1422 #ifdef CONFIG_CIFS_UPCALL
1424 out_destroy_request_bufs
:
1426 cifs_destroy_request_bufs();
1428 cifs_destroy_mids();
1429 out_destroy_inodecache
:
1430 cifs_destroy_inodecache();
1432 cifs_fscache_unregister();
1433 out_destroy_cifsoplockd_wq
:
1434 destroy_workqueue(cifsoplockd_wq
);
1435 out_destroy_cifsiod_wq
:
1436 destroy_workqueue(cifsiod_wq
);
1445 cifs_dbg(NOISY
, "exit_cifs\n");
1446 unregister_filesystem(&cifs_fs_type
);
1447 cifs_dfs_release_automount_timer();
1448 #ifdef CONFIG_CIFS_ACL
1451 #ifdef CONFIG_CIFS_UPCALL
1454 cifs_destroy_request_bufs();
1455 cifs_destroy_mids();
1456 cifs_destroy_inodecache();
1457 cifs_fscache_unregister();
1458 destroy_workqueue(cifsoplockd_wq
);
1459 destroy_workqueue(cifsiod_wq
);
1463 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1464 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1466 ("VFS to access servers complying with the SNIA CIFS Specification "
1467 "e.g. Samba and Windows");
1468 MODULE_VERSION(CIFS_VERSION
);
1469 MODULE_SOFTDEP("pre: arc4");
1470 MODULE_SOFTDEP("pre: des");
1471 MODULE_SOFTDEP("pre: ecb");
1472 MODULE_SOFTDEP("pre: hmac");
1473 MODULE_SOFTDEP("pre: md4");
1474 MODULE_SOFTDEP("pre: md5");
1475 MODULE_SOFTDEP("pre: nls");
1476 MODULE_SOFTDEP("pre: aes");
1477 MODULE_SOFTDEP("pre: cmac");
1478 MODULE_SOFTDEP("pre: sha256");
1479 MODULE_SOFTDEP("pre: aead2");
1480 MODULE_SOFTDEP("pre: ccm");
1481 module_init(init_cifs
)
1482 module_exit(exit_cifs
)