4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
58 bool enable_oplocks
= true;
59 bool linuxExtEnabled
= true;
60 bool lookupCacheEnabled
= true;
61 unsigned int global_secflags
= CIFSSEC_DEF
;
62 /* unsigned int ntlmv2_support = 0; */
63 unsigned int sign_CIFS_PDUs
= 1;
64 static const struct super_operations cifs_super_ops
;
65 unsigned int CIFSMaxBufSize
= CIFS_MAX_MSGSIZE
;
66 module_param(CIFSMaxBufSize
, uint
, 0444);
67 MODULE_PARM_DESC(CIFSMaxBufSize
, "Network buffer size (not including header). "
68 "Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv
= CIFS_MIN_RCV_POOL
;
70 module_param(cifs_min_rcv
, uint
, 0444);
71 MODULE_PARM_DESC(cifs_min_rcv
, "Network buffers in pool. Default: 4 Range: "
73 unsigned int cifs_min_small
= 30;
74 module_param(cifs_min_small
, uint
, 0444);
75 MODULE_PARM_DESC(cifs_min_small
, "Small network buffers in pool. Default: 30 "
77 unsigned int cifs_max_pending
= CIFS_MAX_REQ
;
78 module_param(cifs_max_pending
, uint
, 0444);
79 MODULE_PARM_DESC(cifs_max_pending
, "Simultaneous requests to server. "
80 "Default: 32767 Range: 2 to 32767.");
81 module_param(enable_oplocks
, bool, 0644);
82 MODULE_PARM_DESC(enable_oplocks
, "Enable or disable oplocks. Default: y/Y/1");
84 extern mempool_t
*cifs_sm_req_poolp
;
85 extern mempool_t
*cifs_req_poolp
;
86 extern mempool_t
*cifs_mid_poolp
;
88 struct workqueue_struct
*cifsiod_wq
;
89 struct workqueue_struct
*cifsoplockd_wq
;
90 __u32 cifs_lock_secret
;
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
99 cifs_sb_active(struct super_block
*sb
)
101 struct cifs_sb_info
*server
= CIFS_SB(sb
);
103 if (atomic_inc_return(&server
->active
) == 1)
104 atomic_inc(&sb
->s_active
);
108 cifs_sb_deactive(struct super_block
*sb
)
110 struct cifs_sb_info
*server
= CIFS_SB(sb
);
112 if (atomic_dec_and_test(&server
->active
))
113 deactivate_super(sb
);
117 cifs_read_super(struct super_block
*sb
)
120 struct cifs_sb_info
*cifs_sb
;
121 struct cifs_tcon
*tcon
;
124 cifs_sb
= CIFS_SB(sb
);
125 tcon
= cifs_sb_master_tcon(cifs_sb
);
127 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_POSIXACL
)
128 sb
->s_flags
|= SB_POSIXACL
;
130 if (tcon
->ses
->capabilities
& tcon
->ses
->server
->vals
->cap_large_files
)
131 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
133 sb
->s_maxbytes
= MAX_NON_LFS
;
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb
->s_time_gran
= 100;
138 sb
->s_magic
= CIFS_MAGIC_NUMBER
;
139 sb
->s_op
= &cifs_super_ops
;
140 sb
->s_xattr
= cifs_xattr_handlers
;
141 rc
= super_setup_bdi(sb
);
144 /* tune readahead according to rsize */
145 sb
->s_bdi
->ra_pages
= cifs_sb
->rsize
/ PAGE_SIZE
;
147 sb
->s_blocksize
= CIFS_MAX_MSGSIZE
;
148 sb
->s_blocksize_bits
= 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode
= cifs_root_iget(sb
);
157 sb
->s_d_op
= &cifs_ci_dentry_ops
;
159 sb
->s_d_op
= &cifs_dentry_ops
;
161 sb
->s_root
= d_make_root(inode
);
167 #ifdef CONFIG_CIFS_NFSD_EXPORT
168 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) {
169 cifs_dbg(FYI
, "export ops supported\n");
170 sb
->s_export_op
= &cifs_export_ops
;
172 #endif /* CONFIG_CIFS_NFSD_EXPORT */
177 cifs_dbg(VFS
, "%s: get root inode failed\n", __func__
);
181 static void cifs_kill_sb(struct super_block
*sb
)
183 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
185 cifs_umount(cifs_sb
);
189 cifs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
191 struct super_block
*sb
= dentry
->d_sb
;
192 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
193 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
194 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
201 * PATH_MAX may be too long - it would presumably be total path,
202 * but note that some servers (includinng Samba 3) have a shorter
205 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
207 buf
->f_namelen
= PATH_MAX
;
208 buf
->f_files
= 0; /* undefined */
209 buf
->f_ffree
= 0; /* unlimited */
211 if (server
->ops
->queryfs
)
212 rc
= server
->ops
->queryfs(xid
, tcon
, buf
);
218 static long cifs_fallocate(struct file
*file
, int mode
, loff_t off
, loff_t len
)
220 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
221 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
222 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
224 if (server
->ops
->fallocate
)
225 return server
->ops
->fallocate(file
, tcon
, mode
, off
, len
);
230 static int cifs_permission(struct inode
*inode
, int mask
)
232 struct cifs_sb_info
*cifs_sb
;
234 cifs_sb
= CIFS_SB(inode
->i_sb
);
236 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
) {
237 if ((mask
& MAY_EXEC
) && !execute_ok(inode
))
241 } else /* file mode might have been restricted at mount time
242 on the client (above and beyond ACL on servers) for
243 servers which do not support setting and viewing mode bits,
244 so allowing client to check permissions is useful */
245 return generic_permission(inode
, mask
);
248 static struct kmem_cache
*cifs_inode_cachep
;
249 static struct kmem_cache
*cifs_req_cachep
;
250 static struct kmem_cache
*cifs_mid_cachep
;
251 static struct kmem_cache
*cifs_sm_req_cachep
;
252 mempool_t
*cifs_sm_req_poolp
;
253 mempool_t
*cifs_req_poolp
;
254 mempool_t
*cifs_mid_poolp
;
256 static struct inode
*
257 cifs_alloc_inode(struct super_block
*sb
)
259 struct cifsInodeInfo
*cifs_inode
;
260 cifs_inode
= kmem_cache_alloc(cifs_inode_cachep
, GFP_KERNEL
);
263 cifs_inode
->cifsAttrs
= 0x20; /* default */
264 cifs_inode
->time
= 0;
266 * Until the file is open and we have gotten oplock info back from the
267 * server, can not assume caching of file data or metadata.
269 cifs_set_oplock_level(cifs_inode
, 0);
270 cifs_inode
->flags
= 0;
271 spin_lock_init(&cifs_inode
->writers_lock
);
272 cifs_inode
->writers
= 0;
273 cifs_inode
->vfs_inode
.i_blkbits
= 14; /* 2**14 = CIFS_MAX_MSGSIZE */
274 cifs_inode
->server_eof
= 0;
275 cifs_inode
->uniqueid
= 0;
276 cifs_inode
->createtime
= 0;
277 cifs_inode
->epoch
= 0;
278 generate_random_uuid(cifs_inode
->lease_key
);
281 * Can not set i_flags here - they get immediately overwritten to zero
284 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
285 INIT_LIST_HEAD(&cifs_inode
->openFileList
);
286 INIT_LIST_HEAD(&cifs_inode
->llist
);
287 return &cifs_inode
->vfs_inode
;
290 static void cifs_i_callback(struct rcu_head
*head
)
292 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
293 kmem_cache_free(cifs_inode_cachep
, CIFS_I(inode
));
297 cifs_destroy_inode(struct inode
*inode
)
299 call_rcu(&inode
->i_rcu
, cifs_i_callback
);
303 cifs_evict_inode(struct inode
*inode
)
305 truncate_inode_pages_final(&inode
->i_data
);
307 cifs_fscache_release_inode_cookie(inode
);
311 cifs_show_address(struct seq_file
*s
, struct TCP_Server_Info
*server
)
313 struct sockaddr_in
*sa
= (struct sockaddr_in
*) &server
->dstaddr
;
314 struct sockaddr_in6
*sa6
= (struct sockaddr_in6
*) &server
->dstaddr
;
316 seq_puts(s
, ",addr=");
318 switch (server
->dstaddr
.ss_family
) {
320 seq_printf(s
, "%pI4", &sa
->sin_addr
.s_addr
);
323 seq_printf(s
, "%pI6", &sa6
->sin6_addr
.s6_addr
);
324 if (sa6
->sin6_scope_id
)
325 seq_printf(s
, "%%%u", sa6
->sin6_scope_id
);
328 seq_puts(s
, "(unknown)");
331 seq_puts(s
, ",rdma");
335 cifs_show_security(struct seq_file
*s
, struct cifs_ses
*ses
)
337 if (ses
->sectype
== Unspecified
) {
338 if (ses
->user_name
== NULL
)
339 seq_puts(s
, ",sec=none");
343 seq_puts(s
, ",sec=");
345 switch (ses
->sectype
) {
347 seq_puts(s
, "lanman");
350 seq_puts(s
, "ntlmv2");
359 seq_puts(s
, "ntlmssp");
362 /* shouldn't ever happen */
363 seq_puts(s
, "unknown");
372 cifs_show_cache_flavor(struct seq_file
*s
, struct cifs_sb_info
*cifs_sb
)
374 seq_puts(s
, ",cache=");
376 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
377 seq_puts(s
, "strict");
378 else if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
)
381 seq_puts(s
, "loose");
385 cifs_show_nls(struct seq_file
*s
, struct nls_table
*cur
)
387 struct nls_table
*def
;
389 /* Display iocharset= option if it's not default charset */
390 def
= load_nls_default();
392 seq_printf(s
, ",iocharset=%s", cur
->charset
);
397 * cifs_show_options() is for displaying mount options in /proc/mounts.
398 * Not all settable options are displayed but most of the important
402 cifs_show_options(struct seq_file
*s
, struct dentry
*root
)
404 struct cifs_sb_info
*cifs_sb
= CIFS_SB(root
->d_sb
);
405 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
406 struct sockaddr
*srcaddr
;
407 srcaddr
= (struct sockaddr
*)&tcon
->ses
->server
->srcaddr
;
409 seq_show_option(s
, "vers", tcon
->ses
->server
->vals
->version_string
);
410 cifs_show_security(s
, tcon
->ses
);
411 cifs_show_cache_flavor(s
, cifs_sb
);
413 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
)
414 seq_puts(s
, ",multiuser");
415 else if (tcon
->ses
->user_name
)
416 seq_show_option(s
, "username", tcon
->ses
->user_name
);
418 if (tcon
->ses
->domainName
)
419 seq_show_option(s
, "domain", tcon
->ses
->domainName
);
421 if (srcaddr
->sa_family
!= AF_UNSPEC
) {
422 struct sockaddr_in
*saddr4
;
423 struct sockaddr_in6
*saddr6
;
424 saddr4
= (struct sockaddr_in
*)srcaddr
;
425 saddr6
= (struct sockaddr_in6
*)srcaddr
;
426 if (srcaddr
->sa_family
== AF_INET6
)
427 seq_printf(s
, ",srcaddr=%pI6c",
429 else if (srcaddr
->sa_family
== AF_INET
)
430 seq_printf(s
, ",srcaddr=%pI4",
431 &saddr4
->sin_addr
.s_addr
);
433 seq_printf(s
, ",srcaddr=BAD-AF:%i",
434 (int)(srcaddr
->sa_family
));
437 seq_printf(s
, ",uid=%u",
438 from_kuid_munged(&init_user_ns
, cifs_sb
->mnt_uid
));
439 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_OVERR_UID
)
440 seq_puts(s
, ",forceuid");
442 seq_puts(s
, ",noforceuid");
444 seq_printf(s
, ",gid=%u",
445 from_kgid_munged(&init_user_ns
, cifs_sb
->mnt_gid
));
446 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_OVERR_GID
)
447 seq_puts(s
, ",forcegid");
449 seq_puts(s
, ",noforcegid");
451 cifs_show_address(s
, tcon
->ses
->server
);
454 seq_printf(s
, ",file_mode=0%ho,dir_mode=0%ho",
455 cifs_sb
->mnt_file_mode
,
456 cifs_sb
->mnt_dir_mode
);
458 cifs_show_nls(s
, cifs_sb
->local_nls
);
461 seq_puts(s
, ",seal");
463 seq_puts(s
, ",nocase");
465 seq_puts(s
, ",hard");
467 seq_puts(s
, ",soft");
468 if (tcon
->use_persistent
)
469 seq_puts(s
, ",persistenthandles");
470 else if (tcon
->use_resilient
)
471 seq_puts(s
, ",resilienthandles");
473 seq_puts(s
, ",unix");
475 seq_puts(s
, ",nounix");
476 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_POSIX_PATHS
)
477 seq_puts(s
, ",posixpaths");
478 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SET_UID
)
479 seq_puts(s
, ",setuids");
480 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_UID_FROM_ACL
)
481 seq_puts(s
, ",idsfromsid");
482 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
)
483 seq_puts(s
, ",serverino");
484 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
485 seq_puts(s
, ",rwpidforward");
486 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
)
487 seq_puts(s
, ",forcemand");
488 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_XATTR
)
489 seq_puts(s
, ",nouser_xattr");
490 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
)
491 seq_puts(s
, ",mapchars");
492 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SFM_CHR
)
493 seq_puts(s
, ",mapposix");
494 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_UNX_EMUL
)
496 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_BRL
)
497 seq_puts(s
, ",nobrl");
498 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_ACL
)
499 seq_puts(s
, ",cifsacl");
500 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DYNPERM
)
501 seq_puts(s
, ",dynperm");
502 if (root
->d_sb
->s_flags
& SB_POSIXACL
)
504 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MF_SYMLINKS
)
505 seq_puts(s
, ",mfsymlinks");
506 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_FSCACHE
)
508 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)
509 seq_puts(s
, ",nostrictsync");
510 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
)
511 seq_puts(s
, ",noperm");
512 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPUID
)
513 seq_printf(s
, ",backupuid=%u",
514 from_kuid_munged(&init_user_ns
,
515 cifs_sb
->mnt_backupuid
));
516 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_CIFS_BACKUPGID
)
517 seq_printf(s
, ",backupgid=%u",
518 from_kgid_munged(&init_user_ns
,
519 cifs_sb
->mnt_backupgid
));
521 seq_printf(s
, ",rsize=%u", cifs_sb
->rsize
);
522 seq_printf(s
, ",wsize=%u", cifs_sb
->wsize
);
523 seq_printf(s
, ",echo_interval=%lu",
524 tcon
->ses
->server
->echo_interval
/ HZ
);
525 /* convert actimeo and display it in seconds */
526 seq_printf(s
, ",actimeo=%lu", cifs_sb
->actimeo
/ HZ
);
531 static void cifs_umount_begin(struct super_block
*sb
)
533 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
534 struct cifs_tcon
*tcon
;
539 tcon
= cifs_sb_master_tcon(cifs_sb
);
541 spin_lock(&cifs_tcp_ses_lock
);
542 if ((tcon
->tc_count
> 1) || (tcon
->tidStatus
== CifsExiting
)) {
543 /* we have other mounts to same share or we have
544 already tried to force umount this and woken up
545 all waiting network requests, nothing to do */
546 spin_unlock(&cifs_tcp_ses_lock
);
548 } else if (tcon
->tc_count
== 1)
549 tcon
->tidStatus
= CifsExiting
;
550 spin_unlock(&cifs_tcp_ses_lock
);
552 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
553 /* cancel_notify_requests(tcon); */
554 if (tcon
->ses
&& tcon
->ses
->server
) {
555 cifs_dbg(FYI
, "wake up tasks now - umount begin not complete\n");
556 wake_up_all(&tcon
->ses
->server
->request_q
);
557 wake_up_all(&tcon
->ses
->server
->response_q
);
558 msleep(1); /* yield */
559 /* we have to kick the requests once more */
560 wake_up_all(&tcon
->ses
->server
->response_q
);
567 #ifdef CONFIG_CIFS_STATS2
568 static int cifs_show_stats(struct seq_file
*s
, struct dentry
*root
)
575 static int cifs_remount(struct super_block
*sb
, int *flags
, char *data
)
578 *flags
|= SB_NODIRATIME
;
582 static int cifs_drop_inode(struct inode
*inode
)
584 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
586 /* no serverino => unconditional eviction */
587 return !(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_SERVER_INUM
) ||
588 generic_drop_inode(inode
);
591 static const struct super_operations cifs_super_ops
= {
592 .statfs
= cifs_statfs
,
593 .alloc_inode
= cifs_alloc_inode
,
594 .destroy_inode
= cifs_destroy_inode
,
595 .drop_inode
= cifs_drop_inode
,
596 .evict_inode
= cifs_evict_inode
,
597 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
598 function unless later we add lazy close of inodes or unless the
599 kernel forgets to call us with the same number of releases (closes)
601 .show_options
= cifs_show_options
,
602 .umount_begin
= cifs_umount_begin
,
603 .remount_fs
= cifs_remount
,
604 #ifdef CONFIG_CIFS_STATS2
605 .show_stats
= cifs_show_stats
,
610 * Get root dentry from superblock according to prefix path mount option.
611 * Return dentry with refcount + 1 on success and NULL otherwise.
613 static struct dentry
*
614 cifs_get_root(struct smb_vol
*vol
, struct super_block
*sb
)
616 struct dentry
*dentry
;
617 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
618 char *full_path
= NULL
;
622 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_USE_PREFIX_PATH
)
623 return dget(sb
->s_root
);
625 full_path
= cifs_build_path_to_root(vol
, cifs_sb
,
626 cifs_sb_master_tcon(cifs_sb
), 0);
627 if (full_path
== NULL
)
628 return ERR_PTR(-ENOMEM
);
630 cifs_dbg(FYI
, "Get root dentry for %s\n", full_path
);
632 sep
= CIFS_DIR_SEP(cifs_sb
);
633 dentry
= dget(sb
->s_root
);
637 struct inode
*dir
= d_inode(dentry
);
638 struct dentry
*child
;
642 dentry
= ERR_PTR(-ENOENT
);
645 if (!S_ISDIR(dir
->i_mode
)) {
647 dentry
= ERR_PTR(-ENOTDIR
);
651 /* skip separators */
658 while (*s
&& *s
!= sep
)
661 child
= lookup_one_len_unlocked(p
, dentry
, s
- p
);
664 } while (!IS_ERR(dentry
));
669 static int cifs_set_super(struct super_block
*sb
, void *data
)
671 struct cifs_mnt_data
*mnt_data
= data
;
672 sb
->s_fs_info
= mnt_data
->cifs_sb
;
673 return set_anon_super(sb
, NULL
);
676 static struct dentry
*
677 cifs_do_mount(struct file_system_type
*fs_type
,
678 int flags
, const char *dev_name
, void *data
)
681 struct super_block
*sb
;
682 struct cifs_sb_info
*cifs_sb
;
683 struct smb_vol
*volume_info
;
684 struct cifs_mnt_data mnt_data
;
687 cifs_dbg(FYI
, "Devname: %s flags: %d\n", dev_name
, flags
);
689 volume_info
= cifs_get_volume_info((char *)data
, dev_name
);
690 if (IS_ERR(volume_info
))
691 return ERR_CAST(volume_info
);
693 cifs_sb
= kzalloc(sizeof(struct cifs_sb_info
), GFP_KERNEL
);
694 if (cifs_sb
== NULL
) {
695 root
= ERR_PTR(-ENOMEM
);
699 cifs_sb
->mountdata
= kstrndup(data
, PAGE_SIZE
, GFP_KERNEL
);
700 if (cifs_sb
->mountdata
== NULL
) {
701 root
= ERR_PTR(-ENOMEM
);
705 rc
= cifs_setup_cifs_sb(volume_info
, cifs_sb
);
711 rc
= cifs_mount(cifs_sb
, volume_info
);
713 if (!(flags
& SB_SILENT
))
714 cifs_dbg(VFS
, "cifs_mount failed w/return code = %d\n",
720 mnt_data
.vol
= volume_info
;
721 mnt_data
.cifs_sb
= cifs_sb
;
722 mnt_data
.flags
= flags
;
724 /* BB should we make this contingent on mount parm? */
725 flags
|= SB_NODIRATIME
| SB_NOATIME
;
727 sb
= sget(fs_type
, cifs_match_super
, cifs_set_super
, flags
, &mnt_data
);
730 cifs_umount(cifs_sb
);
735 cifs_dbg(FYI
, "Use existing superblock\n");
736 cifs_umount(cifs_sb
);
738 rc
= cifs_read_super(sb
);
744 sb
->s_flags
|= SB_ACTIVE
;
747 root
= cifs_get_root(volume_info
, sb
);
751 cifs_dbg(FYI
, "dentry root is: %p\n", root
);
755 deactivate_locked_super(sb
);
757 cifs_cleanup_volume_info(volume_info
);
761 kfree(cifs_sb
->prepath
);
762 kfree(cifs_sb
->mountdata
);
765 unload_nls(volume_info
->local_nls
);
770 cifs_loose_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
773 struct inode
*inode
= file_inode(iocb
->ki_filp
);
775 if (iocb
->ki_filp
->f_flags
& O_DIRECT
)
776 return cifs_user_readv(iocb
, iter
);
778 rc
= cifs_revalidate_mapping(inode
);
782 return generic_file_read_iter(iocb
, iter
);
785 static ssize_t
cifs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
787 struct inode
*inode
= file_inode(iocb
->ki_filp
);
788 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
792 if (iocb
->ki_filp
->f_flags
& O_DIRECT
) {
793 written
= cifs_user_writev(iocb
, from
);
794 if (written
> 0 && CIFS_CACHE_READ(cinode
)) {
795 cifs_zap_mapping(inode
);
797 "Set no oplock for inode=%p after a write operation\n",
804 written
= cifs_get_writer(cinode
);
808 written
= generic_file_write_iter(iocb
, from
);
810 if (CIFS_CACHE_WRITE(CIFS_I(inode
)))
813 rc
= filemap_fdatawrite(inode
->i_mapping
);
815 cifs_dbg(FYI
, "cifs_file_write_iter: %d rc on %p inode\n",
819 cifs_put_writer(cinode
);
823 static loff_t
cifs_llseek(struct file
*file
, loff_t offset
, int whence
)
826 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
827 * the cached file length
829 if (whence
!= SEEK_SET
&& whence
!= SEEK_CUR
) {
831 struct inode
*inode
= file_inode(file
);
834 * We need to be sure that all dirty pages are written and the
835 * server has the newest file length.
837 if (!CIFS_CACHE_READ(CIFS_I(inode
)) && inode
->i_mapping
&&
838 inode
->i_mapping
->nrpages
!= 0) {
839 rc
= filemap_fdatawait(inode
->i_mapping
);
841 mapping_set_error(inode
->i_mapping
, rc
);
846 * Some applications poll for the file length in this strange
847 * way so we must seek to end on non-oplocked files by
848 * setting the revalidate time to zero.
850 CIFS_I(inode
)->time
= 0;
852 rc
= cifs_revalidate_file_attr(file
);
856 return generic_file_llseek(file
, offset
, whence
);
860 cifs_setlease(struct file
*file
, long arg
, struct file_lock
**lease
, void **priv
)
863 * Note that this is called by vfs setlease with i_lock held to
864 * protect *lease from going away.
866 struct inode
*inode
= file_inode(file
);
867 struct cifsFileInfo
*cfile
= file
->private_data
;
869 if (!(S_ISREG(inode
->i_mode
)))
872 /* Check if file is oplocked if this is request for new lease */
873 if (arg
== F_UNLCK
||
874 ((arg
== F_RDLCK
) && CIFS_CACHE_READ(CIFS_I(inode
))) ||
875 ((arg
== F_WRLCK
) && CIFS_CACHE_WRITE(CIFS_I(inode
))))
876 return generic_setlease(file
, arg
, lease
, priv
);
877 else if (tlink_tcon(cfile
->tlink
)->local_lease
&&
878 !CIFS_CACHE_READ(CIFS_I(inode
)))
880 * If the server claims to support oplock on this file, then we
881 * still need to check oplock even if the local_lease mount
882 * option is set, but there are servers which do not support
883 * oplock for which this mount option may be useful if the user
884 * knows that the file won't be changed on the server by anyone
887 return generic_setlease(file
, arg
, lease
, priv
);
892 struct file_system_type cifs_fs_type
= {
893 .owner
= THIS_MODULE
,
895 .mount
= cifs_do_mount
,
896 .kill_sb
= cifs_kill_sb
,
899 MODULE_ALIAS_FS("cifs");
900 const struct inode_operations cifs_dir_inode_ops
= {
901 .create
= cifs_create
,
902 .atomic_open
= cifs_atomic_open
,
903 .lookup
= cifs_lookup
,
904 .getattr
= cifs_getattr
,
905 .unlink
= cifs_unlink
,
906 .link
= cifs_hardlink
,
909 .rename
= cifs_rename2
,
910 .permission
= cifs_permission
,
911 .setattr
= cifs_setattr
,
912 .symlink
= cifs_symlink
,
914 .listxattr
= cifs_listxattr
,
917 const struct inode_operations cifs_file_inode_ops
= {
918 .setattr
= cifs_setattr
,
919 .getattr
= cifs_getattr
,
920 .permission
= cifs_permission
,
921 .listxattr
= cifs_listxattr
,
924 const struct inode_operations cifs_symlink_inode_ops
= {
925 .get_link
= cifs_get_link
,
926 .permission
= cifs_permission
,
927 .listxattr
= cifs_listxattr
,
930 static int cifs_clone_file_range(struct file
*src_file
, loff_t off
,
931 struct file
*dst_file
, loff_t destoff
, u64 len
)
933 struct inode
*src_inode
= file_inode(src_file
);
934 struct inode
*target_inode
= file_inode(dst_file
);
935 struct cifsFileInfo
*smb_file_src
= src_file
->private_data
;
936 struct cifsFileInfo
*smb_file_target
= dst_file
->private_data
;
937 struct cifs_tcon
*target_tcon
= tlink_tcon(smb_file_target
->tlink
);
941 cifs_dbg(FYI
, "clone range\n");
945 if (!src_file
->private_data
|| !dst_file
->private_data
) {
947 cifs_dbg(VFS
, "missing cifsFileInfo on copy range src file\n");
952 * Note: cifs case is easier than btrfs since server responsible for
953 * checks for proper open modes and file type and if it wants
954 * server could even support copy of range where source = target
956 lock_two_nondirectories(target_inode
, src_inode
);
959 len
= src_inode
->i_size
- off
;
961 cifs_dbg(FYI
, "about to flush pages\n");
962 /* should we flush first and last page first */
963 truncate_inode_pages_range(&target_inode
->i_data
, destoff
,
964 PAGE_ALIGN(destoff
+ len
)-1);
966 if (target_tcon
->ses
->server
->ops
->duplicate_extents
)
967 rc
= target_tcon
->ses
->server
->ops
->duplicate_extents(xid
,
968 smb_file_src
, smb_file_target
, off
, len
, destoff
);
972 /* force revalidate of size and timestamps of target file now
973 that target is updated on the server */
974 CIFS_I(target_inode
)->time
= 0;
975 /* although unlocking in the reverse order from locking is not
976 strictly necessary here it is a little cleaner to be consistent */
977 unlock_two_nondirectories(src_inode
, target_inode
);
983 ssize_t
cifs_file_copychunk_range(unsigned int xid
,
984 struct file
*src_file
, loff_t off
,
985 struct file
*dst_file
, loff_t destoff
,
986 size_t len
, unsigned int flags
)
988 struct inode
*src_inode
= file_inode(src_file
);
989 struct inode
*target_inode
= file_inode(dst_file
);
990 struct cifsFileInfo
*smb_file_src
;
991 struct cifsFileInfo
*smb_file_target
;
992 struct cifs_tcon
*src_tcon
;
993 struct cifs_tcon
*target_tcon
;
996 cifs_dbg(FYI
, "copychunk range\n");
998 if (src_inode
== target_inode
) {
1003 if (!src_file
->private_data
|| !dst_file
->private_data
) {
1005 cifs_dbg(VFS
, "missing cifsFileInfo on copy range src file\n");
1010 smb_file_target
= dst_file
->private_data
;
1011 smb_file_src
= src_file
->private_data
;
1012 src_tcon
= tlink_tcon(smb_file_src
->tlink
);
1013 target_tcon
= tlink_tcon(smb_file_target
->tlink
);
1015 if (src_tcon
->ses
!= target_tcon
->ses
) {
1016 cifs_dbg(VFS
, "source and target of copy not on same server\n");
1021 * Note: cifs case is easier than btrfs since server responsible for
1022 * checks for proper open modes and file type and if it wants
1023 * server could even support copy of range where source = target
1025 lock_two_nondirectories(target_inode
, src_inode
);
1027 cifs_dbg(FYI
, "about to flush pages\n");
1028 /* should we flush first and last page first */
1029 truncate_inode_pages(&target_inode
->i_data
, 0);
1031 if (target_tcon
->ses
->server
->ops
->copychunk_range
)
1032 rc
= target_tcon
->ses
->server
->ops
->copychunk_range(xid
,
1033 smb_file_src
, smb_file_target
, off
, len
, destoff
);
1037 /* force revalidate of size and timestamps of target file now
1038 * that target is updated on the server
1040 CIFS_I(target_inode
)->time
= 0;
1041 /* although unlocking in the reverse order from locking is not
1042 * strictly necessary here it is a little cleaner to be consistent
1044 unlock_two_nondirectories(src_inode
, target_inode
);
1050 static ssize_t
cifs_copy_file_range(struct file
*src_file
, loff_t off
,
1051 struct file
*dst_file
, loff_t destoff
,
1052 size_t len
, unsigned int flags
)
1054 unsigned int xid
= get_xid();
1057 rc
= cifs_file_copychunk_range(xid
, src_file
, off
, dst_file
, destoff
,
1063 const struct file_operations cifs_file_ops
= {
1064 .read_iter
= cifs_loose_read_iter
,
1065 .write_iter
= cifs_file_write_iter
,
1067 .release
= cifs_close
,
1069 .fsync
= cifs_fsync
,
1070 .flush
= cifs_flush
,
1071 .mmap
= cifs_file_mmap
,
1072 .splice_read
= generic_file_splice_read
,
1073 .splice_write
= iter_file_splice_write
,
1074 .llseek
= cifs_llseek
,
1075 .unlocked_ioctl
= cifs_ioctl
,
1076 .copy_file_range
= cifs_copy_file_range
,
1077 .clone_file_range
= cifs_clone_file_range
,
1078 .setlease
= cifs_setlease
,
1079 .fallocate
= cifs_fallocate
,
1082 const struct file_operations cifs_file_strict_ops
= {
1083 .read_iter
= cifs_strict_readv
,
1084 .write_iter
= cifs_strict_writev
,
1086 .release
= cifs_close
,
1088 .fsync
= cifs_strict_fsync
,
1089 .flush
= cifs_flush
,
1090 .mmap
= cifs_file_strict_mmap
,
1091 .splice_read
= generic_file_splice_read
,
1092 .splice_write
= iter_file_splice_write
,
1093 .llseek
= cifs_llseek
,
1094 .unlocked_ioctl
= cifs_ioctl
,
1095 .copy_file_range
= cifs_copy_file_range
,
1096 .clone_file_range
= cifs_clone_file_range
,
1097 .setlease
= cifs_setlease
,
1098 .fallocate
= cifs_fallocate
,
1101 const struct file_operations cifs_file_direct_ops
= {
1102 /* BB reevaluate whether they can be done with directio, no cache */
1103 .read_iter
= cifs_user_readv
,
1104 .write_iter
= cifs_user_writev
,
1106 .release
= cifs_close
,
1108 .fsync
= cifs_fsync
,
1109 .flush
= cifs_flush
,
1110 .mmap
= cifs_file_mmap
,
1111 .splice_read
= generic_file_splice_read
,
1112 .splice_write
= iter_file_splice_write
,
1113 .unlocked_ioctl
= cifs_ioctl
,
1114 .copy_file_range
= cifs_copy_file_range
,
1115 .clone_file_range
= cifs_clone_file_range
,
1116 .llseek
= cifs_llseek
,
1117 .setlease
= cifs_setlease
,
1118 .fallocate
= cifs_fallocate
,
1121 const struct file_operations cifs_file_nobrl_ops
= {
1122 .read_iter
= cifs_loose_read_iter
,
1123 .write_iter
= cifs_file_write_iter
,
1125 .release
= cifs_close
,
1126 .fsync
= cifs_fsync
,
1127 .flush
= cifs_flush
,
1128 .mmap
= cifs_file_mmap
,
1129 .splice_read
= generic_file_splice_read
,
1130 .splice_write
= iter_file_splice_write
,
1131 .llseek
= cifs_llseek
,
1132 .unlocked_ioctl
= cifs_ioctl
,
1133 .copy_file_range
= cifs_copy_file_range
,
1134 .clone_file_range
= cifs_clone_file_range
,
1135 .setlease
= cifs_setlease
,
1136 .fallocate
= cifs_fallocate
,
1139 const struct file_operations cifs_file_strict_nobrl_ops
= {
1140 .read_iter
= cifs_strict_readv
,
1141 .write_iter
= cifs_strict_writev
,
1143 .release
= cifs_close
,
1144 .fsync
= cifs_strict_fsync
,
1145 .flush
= cifs_flush
,
1146 .mmap
= cifs_file_strict_mmap
,
1147 .splice_read
= generic_file_splice_read
,
1148 .splice_write
= iter_file_splice_write
,
1149 .llseek
= cifs_llseek
,
1150 .unlocked_ioctl
= cifs_ioctl
,
1151 .copy_file_range
= cifs_copy_file_range
,
1152 .clone_file_range
= cifs_clone_file_range
,
1153 .setlease
= cifs_setlease
,
1154 .fallocate
= cifs_fallocate
,
1157 const struct file_operations cifs_file_direct_nobrl_ops
= {
1158 /* BB reevaluate whether they can be done with directio, no cache */
1159 .read_iter
= cifs_user_readv
,
1160 .write_iter
= cifs_user_writev
,
1162 .release
= cifs_close
,
1163 .fsync
= cifs_fsync
,
1164 .flush
= cifs_flush
,
1165 .mmap
= cifs_file_mmap
,
1166 .splice_read
= generic_file_splice_read
,
1167 .splice_write
= iter_file_splice_write
,
1168 .unlocked_ioctl
= cifs_ioctl
,
1169 .copy_file_range
= cifs_copy_file_range
,
1170 .clone_file_range
= cifs_clone_file_range
,
1171 .llseek
= cifs_llseek
,
1172 .setlease
= cifs_setlease
,
1173 .fallocate
= cifs_fallocate
,
1176 const struct file_operations cifs_dir_ops
= {
1177 .iterate_shared
= cifs_readdir
,
1178 .release
= cifs_closedir
,
1179 .read
= generic_read_dir
,
1180 .unlocked_ioctl
= cifs_ioctl
,
1181 .copy_file_range
= cifs_copy_file_range
,
1182 .clone_file_range
= cifs_clone_file_range
,
1183 .llseek
= generic_file_llseek
,
1187 cifs_init_once(void *inode
)
1189 struct cifsInodeInfo
*cifsi
= inode
;
1191 inode_init_once(&cifsi
->vfs_inode
);
1192 init_rwsem(&cifsi
->lock_sem
);
1196 cifs_init_inodecache(void)
1198 cifs_inode_cachep
= kmem_cache_create("cifs_inode_cache",
1199 sizeof(struct cifsInodeInfo
),
1200 0, (SLAB_RECLAIM_ACCOUNT
|
1201 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
1203 if (cifs_inode_cachep
== NULL
)
1210 cifs_destroy_inodecache(void)
1213 * Make sure all delayed rcu free inodes are flushed before we
1217 kmem_cache_destroy(cifs_inode_cachep
);
1221 cifs_init_request_bufs(void)
1224 * SMB2 maximum header size is bigger than CIFS one - no problems to
1225 * allocate some more bytes for CIFS.
1227 size_t max_hdr_size
= MAX_SMB2_HDR_SIZE
;
1229 if (CIFSMaxBufSize
< 8192) {
1230 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1231 Unicode path name has to fit in any SMB/CIFS path based frames */
1232 CIFSMaxBufSize
= 8192;
1233 } else if (CIFSMaxBufSize
> 1024*127) {
1234 CIFSMaxBufSize
= 1024 * 127;
1236 CIFSMaxBufSize
&= 0x1FE00; /* Round size to even 512 byte mult*/
1239 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1240 CIFSMaxBufSize, CIFSMaxBufSize);
1242 cifs_req_cachep
= kmem_cache_create_usercopy("cifs_request",
1243 CIFSMaxBufSize
+ max_hdr_size
, 0,
1244 SLAB_HWCACHE_ALIGN
, 0,
1245 CIFSMaxBufSize
+ max_hdr_size
,
1247 if (cifs_req_cachep
== NULL
)
1250 if (cifs_min_rcv
< 1)
1252 else if (cifs_min_rcv
> 64) {
1254 cifs_dbg(VFS
, "cifs_min_rcv set to maximum (64)\n");
1257 cifs_req_poolp
= mempool_create_slab_pool(cifs_min_rcv
,
1260 if (cifs_req_poolp
== NULL
) {
1261 kmem_cache_destroy(cifs_req_cachep
);
1264 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1265 almost all handle based requests (but not write response, nor is it
1266 sufficient for path based requests). A smaller size would have
1267 been more efficient (compacting multiple slab items on one 4k page)
1268 for the case in which debug was on, but this larger size allows
1269 more SMBs to use small buffer alloc and is still much more
1270 efficient to alloc 1 per page off the slab compared to 17K (5page)
1271 alloc of large cifs buffers even when page debugging is on */
1272 cifs_sm_req_cachep
= kmem_cache_create_usercopy("cifs_small_rq",
1273 MAX_CIFS_SMALL_BUFFER_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
1274 0, MAX_CIFS_SMALL_BUFFER_SIZE
, NULL
);
1275 if (cifs_sm_req_cachep
== NULL
) {
1276 mempool_destroy(cifs_req_poolp
);
1277 kmem_cache_destroy(cifs_req_cachep
);
1281 if (cifs_min_small
< 2)
1283 else if (cifs_min_small
> 256) {
1284 cifs_min_small
= 256;
1285 cifs_dbg(FYI
, "cifs_min_small set to maximum (256)\n");
1288 cifs_sm_req_poolp
= mempool_create_slab_pool(cifs_min_small
,
1289 cifs_sm_req_cachep
);
1291 if (cifs_sm_req_poolp
== NULL
) {
1292 mempool_destroy(cifs_req_poolp
);
1293 kmem_cache_destroy(cifs_req_cachep
);
1294 kmem_cache_destroy(cifs_sm_req_cachep
);
1302 cifs_destroy_request_bufs(void)
1304 mempool_destroy(cifs_req_poolp
);
1305 kmem_cache_destroy(cifs_req_cachep
);
1306 mempool_destroy(cifs_sm_req_poolp
);
1307 kmem_cache_destroy(cifs_sm_req_cachep
);
1311 cifs_init_mids(void)
1313 cifs_mid_cachep
= kmem_cache_create("cifs_mpx_ids",
1314 sizeof(struct mid_q_entry
), 0,
1315 SLAB_HWCACHE_ALIGN
, NULL
);
1316 if (cifs_mid_cachep
== NULL
)
1319 /* 3 is a reasonable minimum number of simultaneous operations */
1320 cifs_mid_poolp
= mempool_create_slab_pool(3, cifs_mid_cachep
);
1321 if (cifs_mid_poolp
== NULL
) {
1322 kmem_cache_destroy(cifs_mid_cachep
);
1330 cifs_destroy_mids(void)
1332 mempool_destroy(cifs_mid_poolp
);
1333 kmem_cache_destroy(cifs_mid_cachep
);
1341 INIT_LIST_HEAD(&cifs_tcp_ses_list
);
1342 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1343 INIT_LIST_HEAD(&GlobalDnotifyReqList
);
1344 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q
);
1345 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1347 * Initialize Global counters
1349 atomic_set(&sesInfoAllocCount
, 0);
1350 atomic_set(&tconInfoAllocCount
, 0);
1351 atomic_set(&tcpSesAllocCount
, 0);
1352 atomic_set(&tcpSesReconnectCount
, 0);
1353 atomic_set(&tconInfoReconnectCount
, 0);
1355 atomic_set(&bufAllocCount
, 0);
1356 atomic_set(&smBufAllocCount
, 0);
1357 #ifdef CONFIG_CIFS_STATS2
1358 atomic_set(&totBufAllocCount
, 0);
1359 atomic_set(&totSmBufAllocCount
, 0);
1360 #endif /* CONFIG_CIFS_STATS2 */
1362 atomic_set(&midCount
, 0);
1363 GlobalCurrentXid
= 0;
1364 GlobalTotalActiveXid
= 0;
1365 GlobalMaxActiveXid
= 0;
1366 spin_lock_init(&cifs_tcp_ses_lock
);
1367 spin_lock_init(&GlobalMid_Lock
);
1369 cifs_lock_secret
= get_random_u32();
1371 if (cifs_max_pending
< 2) {
1372 cifs_max_pending
= 2;
1373 cifs_dbg(FYI
, "cifs_max_pending set to min of 2\n");
1374 } else if (cifs_max_pending
> CIFS_MAX_REQ
) {
1375 cifs_max_pending
= CIFS_MAX_REQ
;
1376 cifs_dbg(FYI
, "cifs_max_pending set to max of %u\n",
1380 cifsiod_wq
= alloc_workqueue("cifsiod", WQ_FREEZABLE
|WQ_MEM_RECLAIM
, 0);
1383 goto out_clean_proc
;
1386 cifsoplockd_wq
= alloc_workqueue("cifsoplockd",
1387 WQ_FREEZABLE
|WQ_MEM_RECLAIM
, 0);
1388 if (!cifsoplockd_wq
) {
1390 goto out_destroy_cifsiod_wq
;
1393 rc
= cifs_fscache_register();
1395 goto out_destroy_cifsoplockd_wq
;
1397 rc
= cifs_init_inodecache();
1399 goto out_unreg_fscache
;
1401 rc
= cifs_init_mids();
1403 goto out_destroy_inodecache
;
1405 rc
= cifs_init_request_bufs();
1407 goto out_destroy_mids
;
1409 #ifdef CONFIG_CIFS_UPCALL
1410 rc
= init_cifs_spnego();
1412 goto out_destroy_request_bufs
;
1413 #endif /* CONFIG_CIFS_UPCALL */
1415 #ifdef CONFIG_CIFS_ACL
1416 rc
= init_cifs_idmap();
1418 goto out_register_key_type
;
1419 #endif /* CONFIG_CIFS_ACL */
1421 rc
= register_filesystem(&cifs_fs_type
);
1423 goto out_init_cifs_idmap
;
1427 out_init_cifs_idmap
:
1428 #ifdef CONFIG_CIFS_ACL
1430 out_register_key_type
:
1432 #ifdef CONFIG_CIFS_UPCALL
1434 out_destroy_request_bufs
:
1436 cifs_destroy_request_bufs();
1438 cifs_destroy_mids();
1439 out_destroy_inodecache
:
1440 cifs_destroy_inodecache();
1442 cifs_fscache_unregister();
1443 out_destroy_cifsoplockd_wq
:
1444 destroy_workqueue(cifsoplockd_wq
);
1445 out_destroy_cifsiod_wq
:
1446 destroy_workqueue(cifsiod_wq
);
1455 cifs_dbg(NOISY
, "exit_cifs\n");
1456 unregister_filesystem(&cifs_fs_type
);
1457 cifs_dfs_release_automount_timer();
1458 #ifdef CONFIG_CIFS_ACL
1461 #ifdef CONFIG_CIFS_UPCALL
1464 cifs_destroy_request_bufs();
1465 cifs_destroy_mids();
1466 cifs_destroy_inodecache();
1467 cifs_fscache_unregister();
1468 destroy_workqueue(cifsoplockd_wq
);
1469 destroy_workqueue(cifsiod_wq
);
1473 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1474 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1476 ("VFS to access servers complying with the SNIA CIFS Specification "
1477 "e.g. Samba and Windows");
1478 MODULE_VERSION(CIFS_VERSION
);
1479 MODULE_SOFTDEP("pre: arc4");
1480 MODULE_SOFTDEP("pre: des");
1481 MODULE_SOFTDEP("pre: ecb");
1482 MODULE_SOFTDEP("pre: hmac");
1483 MODULE_SOFTDEP("pre: md4");
1484 MODULE_SOFTDEP("pre: md5");
1485 MODULE_SOFTDEP("pre: nls");
1486 MODULE_SOFTDEP("pre: aes");
1487 MODULE_SOFTDEP("pre: cmac");
1488 MODULE_SOFTDEP("pre: sha256");
1489 MODULE_SOFTDEP("pre: aead2");
1490 MODULE_SOFTDEP("pre: ccm");
1491 module_init(init_cifs
)
1492 module_exit(exit_cifs
)