4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
47 static inline int cifs_convert_flags(unsigned int flags
)
49 if ((flags
& O_ACCMODE
) == O_RDONLY
)
51 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
53 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ
| GENERIC_WRITE
);
60 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
61 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
65 static u32
cifs_posix_convert_flags(unsigned int flags
)
69 if ((flags
& O_ACCMODE
) == O_RDONLY
)
70 posix_flags
= SMB_O_RDONLY
;
71 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
72 posix_flags
= SMB_O_WRONLY
;
73 else if ((flags
& O_ACCMODE
) == O_RDWR
)
74 posix_flags
= SMB_O_RDWR
;
76 if (flags
& O_CREAT
) {
77 posix_flags
|= SMB_O_CREAT
;
79 posix_flags
|= SMB_O_EXCL
;
80 } else if (flags
& O_EXCL
)
81 cifs_dbg(FYI
, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current
->comm
, current
->tgid
);
85 posix_flags
|= SMB_O_TRUNC
;
86 /* be safe and imply O_SYNC for O_DSYNC */
88 posix_flags
|= SMB_O_SYNC
;
89 if (flags
& O_DIRECTORY
)
90 posix_flags
|= SMB_O_DIRECTORY
;
91 if (flags
& O_NOFOLLOW
)
92 posix_flags
|= SMB_O_NOFOLLOW
;
94 posix_flags
|= SMB_O_DIRECT
;
99 static inline int cifs_get_disposition(unsigned int flags
)
101 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
103 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
104 return FILE_OVERWRITE_IF
;
105 else if ((flags
& O_CREAT
) == O_CREAT
)
107 else if ((flags
& O_TRUNC
) == O_TRUNC
)
108 return FILE_OVERWRITE
;
113 int cifs_posix_open(char *full_path
, struct inode
**pinode
,
114 struct super_block
*sb
, int mode
, unsigned int f_flags
,
115 __u32
*poplock
, __u16
*pnetfid
, unsigned int xid
)
118 FILE_UNIX_BASIC_INFO
*presp_data
;
119 __u32 posix_flags
= 0;
120 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
121 struct cifs_fattr fattr
;
122 struct tcon_link
*tlink
;
123 struct cifs_tcon
*tcon
;
125 cifs_dbg(FYI
, "posix open %s\n", full_path
);
127 presp_data
= kzalloc(sizeof(FILE_UNIX_BASIC_INFO
), GFP_KERNEL
);
128 if (presp_data
== NULL
)
131 tlink
= cifs_sb_tlink(cifs_sb
);
137 tcon
= tlink_tcon(tlink
);
138 mode
&= ~current_umask();
140 posix_flags
= cifs_posix_convert_flags(f_flags
);
141 rc
= CIFSPOSIXCreate(xid
, tcon
, posix_flags
, mode
, pnetfid
, presp_data
,
142 poplock
, full_path
, cifs_sb
->local_nls
,
143 cifs_remap(cifs_sb
));
144 cifs_put_tlink(tlink
);
149 if (presp_data
->Type
== cpu_to_le32(-1))
150 goto posix_open_ret
; /* open ok, caller does qpathinfo */
153 goto posix_open_ret
; /* caller does not need info */
155 cifs_unix_basic_to_fattr(&fattr
, presp_data
, cifs_sb
);
157 /* get new inode and set it up */
158 if (*pinode
== NULL
) {
159 cifs_fill_uniqueid(sb
, &fattr
);
160 *pinode
= cifs_iget(sb
, &fattr
);
166 cifs_fattr_to_inode(*pinode
, &fattr
);
175 cifs_nt_open(char *full_path
, struct inode
*inode
, struct cifs_sb_info
*cifs_sb
,
176 struct cifs_tcon
*tcon
, unsigned int f_flags
, __u32
*oplock
,
177 struct cifs_fid
*fid
, unsigned int xid
)
182 int create_options
= CREATE_NOT_DIR
;
184 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
185 struct cifs_open_parms oparms
;
187 if (!server
->ops
->open
)
190 desired_access
= cifs_convert_flags(f_flags
);
192 /*********************************************************************
193 * open flag mapping table:
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
216 disposition
= cifs_get_disposition(f_flags
);
218 /* BB pass O_SYNC flag through on file attributes .. BB */
220 buf
= kmalloc(sizeof(FILE_ALL_INFO
), GFP_KERNEL
);
224 if (backup_cred(cifs_sb
))
225 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags
& O_SYNC
)
229 create_options
|= CREATE_WRITE_THROUGH
;
231 if (f_flags
& O_DIRECT
)
232 create_options
|= CREATE_NO_BUFFER
;
235 oparms
.cifs_sb
= cifs_sb
;
236 oparms
.desired_access
= desired_access
;
237 oparms
.create_options
= create_options
;
238 oparms
.disposition
= disposition
;
239 oparms
.path
= full_path
;
241 oparms
.reconnect
= false;
243 rc
= server
->ops
->open(xid
, &oparms
, oplock
, buf
);
249 rc
= cifs_get_inode_info_unix(&inode
, full_path
, inode
->i_sb
,
252 rc
= cifs_get_inode_info(&inode
, full_path
, buf
, inode
->i_sb
,
256 server
->ops
->close(xid
, tcon
, fid
);
267 cifs_has_mand_locks(struct cifsInodeInfo
*cinode
)
269 struct cifs_fid_locks
*cur
;
270 bool has_locks
= false;
272 down_read(&cinode
->lock_sem
);
273 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
274 if (!list_empty(&cur
->locks
)) {
279 up_read(&cinode
->lock_sem
);
284 cifs_down_write(struct rw_semaphore
*sem
)
286 while (!down_write_trylock(sem
))
290 struct cifsFileInfo
*
291 cifs_new_fileinfo(struct cifs_fid
*fid
, struct file
*file
,
292 struct tcon_link
*tlink
, __u32 oplock
)
294 struct dentry
*dentry
= file
->f_path
.dentry
;
295 struct inode
*inode
= d_inode(dentry
);
296 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
297 struct cifsFileInfo
*cfile
;
298 struct cifs_fid_locks
*fdlocks
;
299 struct cifs_tcon
*tcon
= tlink_tcon(tlink
);
300 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
302 cfile
= kzalloc(sizeof(struct cifsFileInfo
), GFP_KERNEL
);
306 fdlocks
= kzalloc(sizeof(struct cifs_fid_locks
), GFP_KERNEL
);
312 INIT_LIST_HEAD(&fdlocks
->locks
);
313 fdlocks
->cfile
= cfile
;
314 cfile
->llist
= fdlocks
;
317 cfile
->pid
= current
->tgid
;
318 cfile
->uid
= current_fsuid();
319 cfile
->dentry
= dget(dentry
);
320 cfile
->f_flags
= file
->f_flags
;
321 cfile
->invalidHandle
= false;
322 cfile
->tlink
= cifs_get_tlink(tlink
);
323 INIT_WORK(&cfile
->oplock_break
, cifs_oplock_break
);
324 mutex_init(&cfile
->fh_mutex
);
325 spin_lock_init(&cfile
->file_info_lock
);
327 cifs_sb_active(inode
->i_sb
);
330 * If the server returned a read oplock and we have mandatory brlocks,
331 * set oplock level to None.
333 if (server
->ops
->is_read_op(oplock
) && cifs_has_mand_locks(cinode
)) {
334 cifs_dbg(FYI
, "Reset oplock val from read to None due to mand locks\n");
338 cifs_down_write(&cinode
->lock_sem
);
339 list_add(&fdlocks
->llist
, &cinode
->llist
);
340 up_write(&cinode
->lock_sem
);
342 spin_lock(&tcon
->open_file_lock
);
343 if (fid
->pending_open
->oplock
!= CIFS_OPLOCK_NO_CHANGE
&& oplock
)
344 oplock
= fid
->pending_open
->oplock
;
345 list_del(&fid
->pending_open
->olist
);
347 fid
->purge_cache
= false;
348 server
->ops
->set_fid(cfile
, fid
, oplock
);
350 list_add(&cfile
->tlist
, &tcon
->openFileList
);
352 /* if readable file instance put first in list*/
353 if (file
->f_mode
& FMODE_READ
)
354 list_add(&cfile
->flist
, &cinode
->openFileList
);
356 list_add_tail(&cfile
->flist
, &cinode
->openFileList
);
357 spin_unlock(&tcon
->open_file_lock
);
359 if (fid
->purge_cache
)
360 cifs_zap_mapping(inode
);
362 file
->private_data
= cfile
;
366 struct cifsFileInfo
*
367 cifsFileInfo_get(struct cifsFileInfo
*cifs_file
)
369 spin_lock(&cifs_file
->file_info_lock
);
370 cifsFileInfo_get_locked(cifs_file
);
371 spin_unlock(&cifs_file
->file_info_lock
);
376 * Release a reference on the file private data. This may involve closing
377 * the filehandle out on the server. Must be called without holding
378 * tcon->open_file_lock and cifs_file->file_info_lock.
380 void cifsFileInfo_put(struct cifsFileInfo
*cifs_file
)
382 struct inode
*inode
= d_inode(cifs_file
->dentry
);
383 struct cifs_tcon
*tcon
= tlink_tcon(cifs_file
->tlink
);
384 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
385 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
386 struct super_block
*sb
= inode
->i_sb
;
387 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
388 struct cifsLockInfo
*li
, *tmp
;
390 struct cifs_pending_open open
;
391 bool oplock_break_cancelled
;
393 spin_lock(&tcon
->open_file_lock
);
395 spin_lock(&cifs_file
->file_info_lock
);
396 if (--cifs_file
->count
> 0) {
397 spin_unlock(&cifs_file
->file_info_lock
);
398 spin_unlock(&tcon
->open_file_lock
);
401 spin_unlock(&cifs_file
->file_info_lock
);
403 if (server
->ops
->get_lease_key
)
404 server
->ops
->get_lease_key(inode
, &fid
);
406 /* store open in pending opens to make sure we don't miss lease break */
407 cifs_add_pending_open_locked(&fid
, cifs_file
->tlink
, &open
);
409 /* remove it from the lists */
410 list_del(&cifs_file
->flist
);
411 list_del(&cifs_file
->tlist
);
413 if (list_empty(&cifsi
->openFileList
)) {
414 cifs_dbg(FYI
, "closing last open instance for inode %p\n",
415 d_inode(cifs_file
->dentry
));
417 * In strict cache mode we need invalidate mapping on the last
418 * close because it may cause a error when we open this file
419 * again and get at least level II oplock.
421 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
422 set_bit(CIFS_INO_INVALID_MAPPING
, &cifsi
->flags
);
423 cifs_set_oplock_level(cifsi
, 0);
426 spin_unlock(&tcon
->open_file_lock
);
428 oplock_break_cancelled
= cancel_work_sync(&cifs_file
->oplock_break
);
430 if (!tcon
->need_reconnect
&& !cifs_file
->invalidHandle
) {
431 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
435 if (server
->ops
->close
)
436 server
->ops
->close(xid
, tcon
, &cifs_file
->fid
);
440 if (oplock_break_cancelled
)
441 cifs_done_oplock_break(cifsi
);
443 cifs_del_pending_open(&open
);
446 * Delete any outstanding lock records. We'll lose them when the file
449 cifs_down_write(&cifsi
->lock_sem
);
450 list_for_each_entry_safe(li
, tmp
, &cifs_file
->llist
->locks
, llist
) {
451 list_del(&li
->llist
);
452 cifs_del_lock_waiters(li
);
455 list_del(&cifs_file
->llist
->llist
);
456 kfree(cifs_file
->llist
);
457 up_write(&cifsi
->lock_sem
);
459 cifs_put_tlink(cifs_file
->tlink
);
460 dput(cifs_file
->dentry
);
461 cifs_sb_deactive(sb
);
465 int cifs_open(struct inode
*inode
, struct file
*file
)
471 struct cifs_sb_info
*cifs_sb
;
472 struct TCP_Server_Info
*server
;
473 struct cifs_tcon
*tcon
;
474 struct tcon_link
*tlink
;
475 struct cifsFileInfo
*cfile
= NULL
;
476 char *full_path
= NULL
;
477 bool posix_open_ok
= false;
479 struct cifs_pending_open open
;
483 cifs_sb
= CIFS_SB(inode
->i_sb
);
484 tlink
= cifs_sb_tlink(cifs_sb
);
487 return PTR_ERR(tlink
);
489 tcon
= tlink_tcon(tlink
);
490 server
= tcon
->ses
->server
;
492 full_path
= build_path_from_dentry(file
->f_path
.dentry
);
493 if (full_path
== NULL
) {
498 cifs_dbg(FYI
, "inode = 0x%p file flags are 0x%x for %s\n",
499 inode
, file
->f_flags
, full_path
);
501 if (file
->f_flags
& O_DIRECT
&&
502 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
) {
503 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_BRL
)
504 file
->f_op
= &cifs_file_direct_nobrl_ops
;
506 file
->f_op
= &cifs_file_direct_ops
;
514 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
515 cap_unix(tcon
->ses
) && (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
516 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
517 /* can not refresh inode info since size could be stale */
518 rc
= cifs_posix_open(full_path
, &inode
, inode
->i_sb
,
519 cifs_sb
->mnt_file_mode
/* ignored */,
520 file
->f_flags
, &oplock
, &fid
.netfid
, xid
);
522 cifs_dbg(FYI
, "posix open succeeded\n");
523 posix_open_ok
= true;
524 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
525 if (tcon
->ses
->serverNOS
)
526 cifs_dbg(VFS
, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
527 tcon
->ses
->serverName
,
528 tcon
->ses
->serverNOS
);
529 tcon
->broken_posix_open
= true;
530 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
531 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
534 * Else fallthrough to retry open the old way on network i/o
539 if (server
->ops
->get_lease_key
)
540 server
->ops
->get_lease_key(inode
, &fid
);
542 cifs_add_pending_open(&fid
, tlink
, &open
);
544 if (!posix_open_ok
) {
545 if (server
->ops
->get_lease_key
)
546 server
->ops
->get_lease_key(inode
, &fid
);
548 rc
= cifs_nt_open(full_path
, inode
, cifs_sb
, tcon
,
549 file
->f_flags
, &oplock
, &fid
, xid
);
551 cifs_del_pending_open(&open
);
556 cfile
= cifs_new_fileinfo(&fid
, file
, tlink
, oplock
);
558 if (server
->ops
->close
)
559 server
->ops
->close(xid
, tcon
, &fid
);
560 cifs_del_pending_open(&open
);
565 cifs_fscache_set_inode_cookie(inode
, file
);
567 if ((oplock
& CIFS_CREATE_ACTION
) && !posix_open_ok
&& tcon
->unix_ext
) {
569 * Time to set mode which we can not set earlier due to
570 * problems creating new read-only files.
572 struct cifs_unix_set_info_args args
= {
573 .mode
= inode
->i_mode
,
574 .uid
= INVALID_UID
, /* no change */
575 .gid
= INVALID_GID
, /* no change */
576 .ctime
= NO_CHANGE_64
,
577 .atime
= NO_CHANGE_64
,
578 .mtime
= NO_CHANGE_64
,
581 CIFSSMBUnixSetFileInfo(xid
, tcon
, &args
, fid
.netfid
,
588 cifs_put_tlink(tlink
);
592 static int cifs_push_posix_locks(struct cifsFileInfo
*cfile
);
595 * Try to reacquire byte range locks that were released when session
596 * to server was lost.
599 cifs_relock_file(struct cifsFileInfo
*cfile
)
601 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
602 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
603 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
606 down_read_nested(&cinode
->lock_sem
, SINGLE_DEPTH_NESTING
);
607 if (cinode
->can_cache_brlcks
) {
608 /* can cache locks - no need to relock */
609 up_read(&cinode
->lock_sem
);
613 if (cap_unix(tcon
->ses
) &&
614 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
615 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
616 rc
= cifs_push_posix_locks(cfile
);
618 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
620 up_read(&cinode
->lock_sem
);
625 cifs_reopen_file(struct cifsFileInfo
*cfile
, bool can_flush
)
630 struct cifs_sb_info
*cifs_sb
;
631 struct cifs_tcon
*tcon
;
632 struct TCP_Server_Info
*server
;
633 struct cifsInodeInfo
*cinode
;
635 char *full_path
= NULL
;
637 int disposition
= FILE_OPEN
;
638 int create_options
= CREATE_NOT_DIR
;
639 struct cifs_open_parms oparms
;
642 mutex_lock(&cfile
->fh_mutex
);
643 if (!cfile
->invalidHandle
) {
644 mutex_unlock(&cfile
->fh_mutex
);
650 inode
= d_inode(cfile
->dentry
);
651 cifs_sb
= CIFS_SB(inode
->i_sb
);
652 tcon
= tlink_tcon(cfile
->tlink
);
653 server
= tcon
->ses
->server
;
656 * Can not grab rename sem here because various ops, including those
657 * that already have the rename sem can end up causing writepage to get
658 * called and if the server was down that means we end up here, and we
659 * can never tell if the caller already has the rename_sem.
661 full_path
= build_path_from_dentry(cfile
->dentry
);
662 if (full_path
== NULL
) {
664 mutex_unlock(&cfile
->fh_mutex
);
669 cifs_dbg(FYI
, "inode = 0x%p file flags 0x%x for %s\n",
670 inode
, cfile
->f_flags
, full_path
);
672 if (tcon
->ses
->server
->oplocks
)
677 if (tcon
->unix_ext
&& cap_unix(tcon
->ses
) &&
678 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
679 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
681 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
682 * original open. Must mask them off for a reopen.
684 unsigned int oflags
= cfile
->f_flags
&
685 ~(O_CREAT
| O_EXCL
| O_TRUNC
);
687 rc
= cifs_posix_open(full_path
, NULL
, inode
->i_sb
,
688 cifs_sb
->mnt_file_mode
/* ignored */,
689 oflags
, &oplock
, &cfile
->fid
.netfid
, xid
);
691 cifs_dbg(FYI
, "posix reopen succeeded\n");
692 oparms
.reconnect
= true;
696 * fallthrough to retry open the old way on errors, especially
697 * in the reconnect path it is important to retry hard
701 desired_access
= cifs_convert_flags(cfile
->f_flags
);
703 if (backup_cred(cifs_sb
))
704 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
706 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
707 if (cfile
->f_flags
& O_SYNC
)
708 create_options
|= CREATE_WRITE_THROUGH
;
710 if (cfile
->f_flags
& O_DIRECT
)
711 create_options
|= CREATE_NO_BUFFER
;
713 if (server
->ops
->get_lease_key
)
714 server
->ops
->get_lease_key(inode
, &cfile
->fid
);
717 oparms
.cifs_sb
= cifs_sb
;
718 oparms
.desired_access
= desired_access
;
719 oparms
.create_options
= create_options
;
720 oparms
.disposition
= disposition
;
721 oparms
.path
= full_path
;
722 oparms
.fid
= &cfile
->fid
;
723 oparms
.reconnect
= true;
726 * Can not refresh inode by passing in file_info buf to be returned by
727 * ops->open and then calling get_inode_info with returned buf since
728 * file might have write behind data that needs to be flushed and server
729 * version of file size can be stale. If we knew for sure that inode was
730 * not dirty locally we could do this.
732 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
733 if (rc
== -ENOENT
&& oparms
.reconnect
== false) {
734 /* durable handle timeout is expired - open the file again */
735 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
736 /* indicate that we need to relock the file */
737 oparms
.reconnect
= true;
741 mutex_unlock(&cfile
->fh_mutex
);
742 cifs_dbg(FYI
, "cifs_reopen returned 0x%x\n", rc
);
743 cifs_dbg(FYI
, "oplock: %d\n", oplock
);
744 goto reopen_error_exit
;
748 cfile
->invalidHandle
= false;
749 mutex_unlock(&cfile
->fh_mutex
);
750 cinode
= CIFS_I(inode
);
753 rc
= filemap_write_and_wait(inode
->i_mapping
);
754 mapping_set_error(inode
->i_mapping
, rc
);
757 rc
= cifs_get_inode_info_unix(&inode
, full_path
,
760 rc
= cifs_get_inode_info(&inode
, full_path
, NULL
,
761 inode
->i_sb
, xid
, NULL
);
764 * Else we are writing out data to server already and could deadlock if
765 * we tried to flush data, and since we do not know if we have data that
766 * would invalidate the current end of file on the server we can not go
767 * to the server to get the new inode info.
770 server
->ops
->set_fid(cfile
, &cfile
->fid
, oplock
);
771 if (oparms
.reconnect
)
772 cifs_relock_file(cfile
);
780 int cifs_close(struct inode
*inode
, struct file
*file
)
782 if (file
->private_data
!= NULL
) {
783 cifsFileInfo_put(file
->private_data
);
784 file
->private_data
= NULL
;
787 /* return code from the ->release op is always ignored */
791 int cifs_closedir(struct inode
*inode
, struct file
*file
)
795 struct cifsFileInfo
*cfile
= file
->private_data
;
796 struct cifs_tcon
*tcon
;
797 struct TCP_Server_Info
*server
;
800 cifs_dbg(FYI
, "Closedir inode = 0x%p\n", inode
);
806 tcon
= tlink_tcon(cfile
->tlink
);
807 server
= tcon
->ses
->server
;
809 cifs_dbg(FYI
, "Freeing private data in close dir\n");
810 spin_lock(&cfile
->file_info_lock
);
811 if (server
->ops
->dir_needs_close(cfile
)) {
812 cfile
->invalidHandle
= true;
813 spin_unlock(&cfile
->file_info_lock
);
814 if (server
->ops
->close_dir
)
815 rc
= server
->ops
->close_dir(xid
, tcon
, &cfile
->fid
);
818 cifs_dbg(FYI
, "Closing uncompleted readdir with rc %d\n", rc
);
819 /* not much we can do if it fails anyway, ignore rc */
822 spin_unlock(&cfile
->file_info_lock
);
824 buf
= cfile
->srch_inf
.ntwrk_buf_start
;
826 cifs_dbg(FYI
, "closedir free smb buf in srch struct\n");
827 cfile
->srch_inf
.ntwrk_buf_start
= NULL
;
828 if (cfile
->srch_inf
.smallBuf
)
829 cifs_small_buf_release(buf
);
831 cifs_buf_release(buf
);
834 cifs_put_tlink(cfile
->tlink
);
835 kfree(file
->private_data
);
836 file
->private_data
= NULL
;
837 /* BB can we lock the filestruct while this is going on? */
842 static struct cifsLockInfo
*
843 cifs_lock_init(__u64 offset
, __u64 length
, __u8 type
)
845 struct cifsLockInfo
*lock
=
846 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
849 lock
->offset
= offset
;
850 lock
->length
= length
;
852 lock
->pid
= current
->tgid
;
853 INIT_LIST_HEAD(&lock
->blist
);
854 init_waitqueue_head(&lock
->block_q
);
859 cifs_del_lock_waiters(struct cifsLockInfo
*lock
)
861 struct cifsLockInfo
*li
, *tmp
;
862 list_for_each_entry_safe(li
, tmp
, &lock
->blist
, blist
) {
863 list_del_init(&li
->blist
);
864 wake_up(&li
->block_q
);
868 #define CIFS_LOCK_OP 0
869 #define CIFS_READ_OP 1
870 #define CIFS_WRITE_OP 2
872 /* @rw_check : 0 - no op, 1 - read, 2 - write */
874 cifs_find_fid_lock_conflict(struct cifs_fid_locks
*fdlocks
, __u64 offset
,
875 __u64 length
, __u8 type
, struct cifsFileInfo
*cfile
,
876 struct cifsLockInfo
**conf_lock
, int rw_check
)
878 struct cifsLockInfo
*li
;
879 struct cifsFileInfo
*cur_cfile
= fdlocks
->cfile
;
880 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
882 list_for_each_entry(li
, &fdlocks
->locks
, llist
) {
883 if (offset
+ length
<= li
->offset
||
884 offset
>= li
->offset
+ li
->length
)
886 if (rw_check
!= CIFS_LOCK_OP
&& current
->tgid
== li
->pid
&&
887 server
->ops
->compare_fids(cfile
, cur_cfile
)) {
888 /* shared lock prevents write op through the same fid */
889 if (!(li
->type
& server
->vals
->shared_lock_type
) ||
890 rw_check
!= CIFS_WRITE_OP
)
893 if ((type
& server
->vals
->shared_lock_type
) &&
894 ((server
->ops
->compare_fids(cfile
, cur_cfile
) &&
895 current
->tgid
== li
->pid
) || type
== li
->type
))
905 cifs_find_lock_conflict(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
906 __u8 type
, struct cifsLockInfo
**conf_lock
,
910 struct cifs_fid_locks
*cur
;
911 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
913 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
914 rc
= cifs_find_fid_lock_conflict(cur
, offset
, length
, type
,
915 cfile
, conf_lock
, rw_check
);
924 * Check if there is another lock that prevents us to set the lock (mandatory
925 * style). If such a lock exists, update the flock structure with its
926 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
927 * or leave it the same if we can't. Returns 0 if we don't need to request to
928 * the server or 1 otherwise.
931 cifs_lock_test(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
932 __u8 type
, struct file_lock
*flock
)
935 struct cifsLockInfo
*conf_lock
;
936 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
937 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
940 down_read(&cinode
->lock_sem
);
942 exist
= cifs_find_lock_conflict(cfile
, offset
, length
, type
,
943 &conf_lock
, CIFS_LOCK_OP
);
945 flock
->fl_start
= conf_lock
->offset
;
946 flock
->fl_end
= conf_lock
->offset
+ conf_lock
->length
- 1;
947 flock
->fl_pid
= conf_lock
->pid
;
948 if (conf_lock
->type
& server
->vals
->shared_lock_type
)
949 flock
->fl_type
= F_RDLCK
;
951 flock
->fl_type
= F_WRLCK
;
952 } else if (!cinode
->can_cache_brlcks
)
955 flock
->fl_type
= F_UNLCK
;
957 up_read(&cinode
->lock_sem
);
962 cifs_lock_add(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
)
964 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
965 cifs_down_write(&cinode
->lock_sem
);
966 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
967 up_write(&cinode
->lock_sem
);
971 * Set the byte-range lock (mandatory style). Returns:
972 * 1) 0, if we set the lock and don't need to request to the server;
973 * 2) 1, if no locks prevent us but we need to request to the server;
974 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
977 cifs_lock_add_if(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
,
980 struct cifsLockInfo
*conf_lock
;
981 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
987 cifs_down_write(&cinode
->lock_sem
);
989 exist
= cifs_find_lock_conflict(cfile
, lock
->offset
, lock
->length
,
990 lock
->type
, &conf_lock
, CIFS_LOCK_OP
);
991 if (!exist
&& cinode
->can_cache_brlcks
) {
992 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
993 up_write(&cinode
->lock_sem
);
1002 list_add_tail(&lock
->blist
, &conf_lock
->blist
);
1003 up_write(&cinode
->lock_sem
);
1004 rc
= wait_event_interruptible(lock
->block_q
,
1005 (lock
->blist
.prev
== &lock
->blist
) &&
1006 (lock
->blist
.next
== &lock
->blist
));
1009 cifs_down_write(&cinode
->lock_sem
);
1010 list_del_init(&lock
->blist
);
1013 up_write(&cinode
->lock_sem
);
1018 * Check if there is another lock that prevents us to set the lock (posix
1019 * style). If such a lock exists, update the flock structure with its
1020 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1021 * or leave it the same if we can't. Returns 0 if we don't need to request to
1022 * the server or 1 otherwise.
1025 cifs_posix_lock_test(struct file
*file
, struct file_lock
*flock
)
1028 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1029 unsigned char saved_type
= flock
->fl_type
;
1031 if ((flock
->fl_flags
& FL_POSIX
) == 0)
1034 down_read(&cinode
->lock_sem
);
1035 posix_test_lock(file
, flock
);
1037 if (flock
->fl_type
== F_UNLCK
&& !cinode
->can_cache_brlcks
) {
1038 flock
->fl_type
= saved_type
;
1042 up_read(&cinode
->lock_sem
);
1047 * Set the byte-range lock (posix style). Returns:
1048 * 1) 0, if we set the lock and don't need to request to the server;
1049 * 2) 1, if we need to request to the server;
1050 * 3) <0, if the error occurs while setting the lock.
1053 cifs_posix_lock_set(struct file
*file
, struct file_lock
*flock
)
1055 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1058 if ((flock
->fl_flags
& FL_POSIX
) == 0)
1062 cifs_down_write(&cinode
->lock_sem
);
1063 if (!cinode
->can_cache_brlcks
) {
1064 up_write(&cinode
->lock_sem
);
1068 rc
= posix_lock_file(file
, flock
, NULL
);
1069 up_write(&cinode
->lock_sem
);
1070 if (rc
== FILE_LOCK_DEFERRED
) {
1071 rc
= wait_event_interruptible(flock
->fl_wait
, !flock
->fl_next
);
1074 posix_unblock_lock(flock
);
1080 cifs_push_mandatory_locks(struct cifsFileInfo
*cfile
)
1083 int rc
= 0, stored_rc
;
1084 struct cifsLockInfo
*li
, *tmp
;
1085 struct cifs_tcon
*tcon
;
1086 unsigned int num
, max_num
, max_buf
;
1087 LOCKING_ANDX_RANGE
*buf
, *cur
;
1088 int types
[] = {LOCKING_ANDX_LARGE_FILES
,
1089 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
};
1093 tcon
= tlink_tcon(cfile
->tlink
);
1096 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1097 * and check it before using.
1099 max_buf
= tcon
->ses
->server
->maxBuf
;
1100 if (max_buf
< (sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
))) {
1105 BUILD_BUG_ON(sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
) >
1107 max_buf
= min_t(unsigned int, max_buf
- sizeof(struct smb_hdr
),
1109 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
1110 sizeof(LOCKING_ANDX_RANGE
);
1111 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
1117 for (i
= 0; i
< 2; i
++) {
1120 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
1121 if (li
->type
!= types
[i
])
1123 cur
->Pid
= cpu_to_le16(li
->pid
);
1124 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
1125 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
1126 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
1127 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
1128 if (++num
== max_num
) {
1129 stored_rc
= cifs_lockv(xid
, tcon
,
1131 (__u8
)li
->type
, 0, num
,
1142 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
1143 (__u8
)types
[i
], 0, num
, buf
);
1154 struct lock_to_push
{
1155 struct list_head llist
;
1164 cifs_push_posix_locks(struct cifsFileInfo
*cfile
)
1166 struct inode
*inode
= d_inode(cfile
->dentry
);
1167 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1168 struct file_lock
*flock
;
1169 struct file_lock_context
*flctx
= inode
->i_flctx
;
1170 unsigned int count
= 0, i
;
1171 int rc
= 0, xid
, type
;
1172 struct list_head locks_to_send
, *el
;
1173 struct lock_to_push
*lck
, *tmp
;
1181 spin_lock(&flctx
->flc_lock
);
1182 list_for_each(el
, &flctx
->flc_posix
) {
1185 spin_unlock(&flctx
->flc_lock
);
1187 INIT_LIST_HEAD(&locks_to_send
);
1190 * Allocating count locks is enough because no FL_POSIX locks can be
1191 * added to the list while we are holding cinode->lock_sem that
1192 * protects locking operations of this inode.
1194 for (i
= 0; i
< count
; i
++) {
1195 lck
= kmalloc(sizeof(struct lock_to_push
), GFP_KERNEL
);
1200 list_add_tail(&lck
->llist
, &locks_to_send
);
1203 el
= locks_to_send
.next
;
1204 spin_lock(&flctx
->flc_lock
);
1205 list_for_each_entry(flock
, &flctx
->flc_posix
, fl_list
) {
1206 if (el
== &locks_to_send
) {
1208 * The list ended. We don't have enough allocated
1209 * structures - something is really wrong.
1211 cifs_dbg(VFS
, "Can't push all brlocks!\n");
1214 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1215 if (flock
->fl_type
== F_RDLCK
|| flock
->fl_type
== F_SHLCK
)
1219 lck
= list_entry(el
, struct lock_to_push
, llist
);
1220 lck
->pid
= flock
->fl_pid
;
1221 lck
->netfid
= cfile
->fid
.netfid
;
1222 lck
->length
= length
;
1224 lck
->offset
= flock
->fl_start
;
1226 spin_unlock(&flctx
->flc_lock
);
1228 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1231 stored_rc
= CIFSSMBPosixLock(xid
, tcon
, lck
->netfid
, lck
->pid
,
1232 lck
->offset
, lck
->length
, NULL
,
1236 list_del(&lck
->llist
);
1244 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1245 list_del(&lck
->llist
);
1252 cifs_push_locks(struct cifsFileInfo
*cfile
)
1254 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
1255 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1256 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1259 /* we are going to update can_cache_brlcks here - need a write access */
1260 cifs_down_write(&cinode
->lock_sem
);
1261 if (!cinode
->can_cache_brlcks
) {
1262 up_write(&cinode
->lock_sem
);
1266 if (cap_unix(tcon
->ses
) &&
1267 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1268 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1269 rc
= cifs_push_posix_locks(cfile
);
1271 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
1273 cinode
->can_cache_brlcks
= false;
1274 up_write(&cinode
->lock_sem
);
1279 cifs_read_flock(struct file_lock
*flock
, __u32
*type
, int *lock
, int *unlock
,
1280 bool *wait_flag
, struct TCP_Server_Info
*server
)
1282 if (flock
->fl_flags
& FL_POSIX
)
1283 cifs_dbg(FYI
, "Posix\n");
1284 if (flock
->fl_flags
& FL_FLOCK
)
1285 cifs_dbg(FYI
, "Flock\n");
1286 if (flock
->fl_flags
& FL_SLEEP
) {
1287 cifs_dbg(FYI
, "Blocking lock\n");
1290 if (flock
->fl_flags
& FL_ACCESS
)
1291 cifs_dbg(FYI
, "Process suspended by mandatory locking - not implemented yet\n");
1292 if (flock
->fl_flags
& FL_LEASE
)
1293 cifs_dbg(FYI
, "Lease on file - not implemented yet\n");
1294 if (flock
->fl_flags
&
1295 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
|
1296 FL_ACCESS
| FL_LEASE
| FL_CLOSE
)))
1297 cifs_dbg(FYI
, "Unknown lock flags 0x%x\n", flock
->fl_flags
);
1299 *type
= server
->vals
->large_lock_type
;
1300 if (flock
->fl_type
== F_WRLCK
) {
1301 cifs_dbg(FYI
, "F_WRLCK\n");
1302 *type
|= server
->vals
->exclusive_lock_type
;
1304 } else if (flock
->fl_type
== F_UNLCK
) {
1305 cifs_dbg(FYI
, "F_UNLCK\n");
1306 *type
|= server
->vals
->unlock_lock_type
;
1308 /* Check if unlock includes more than one lock range */
1309 } else if (flock
->fl_type
== F_RDLCK
) {
1310 cifs_dbg(FYI
, "F_RDLCK\n");
1311 *type
|= server
->vals
->shared_lock_type
;
1313 } else if (flock
->fl_type
== F_EXLCK
) {
1314 cifs_dbg(FYI
, "F_EXLCK\n");
1315 *type
|= server
->vals
->exclusive_lock_type
;
1317 } else if (flock
->fl_type
== F_SHLCK
) {
1318 cifs_dbg(FYI
, "F_SHLCK\n");
1319 *type
|= server
->vals
->shared_lock_type
;
1322 cifs_dbg(FYI
, "Unknown type of lock\n");
1326 cifs_getlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
1327 bool wait_flag
, bool posix_lck
, unsigned int xid
)
1330 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1331 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
1332 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1333 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1334 __u16 netfid
= cfile
->fid
.netfid
;
1337 int posix_lock_type
;
1339 rc
= cifs_posix_lock_test(file
, flock
);
1343 if (type
& server
->vals
->shared_lock_type
)
1344 posix_lock_type
= CIFS_RDLCK
;
1346 posix_lock_type
= CIFS_WRLCK
;
1347 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
, current
->tgid
,
1348 flock
->fl_start
, length
, flock
,
1349 posix_lock_type
, wait_flag
);
1353 rc
= cifs_lock_test(cfile
, flock
->fl_start
, length
, type
, flock
);
1357 /* BB we could chain these into one lock request BB */
1358 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
, type
,
1361 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1363 flock
->fl_type
= F_UNLCK
;
1365 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
1370 if (type
& server
->vals
->shared_lock_type
) {
1371 flock
->fl_type
= F_WRLCK
;
1375 type
&= ~server
->vals
->exclusive_lock_type
;
1377 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1378 type
| server
->vals
->shared_lock_type
,
1381 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1382 type
| server
->vals
->shared_lock_type
, 0, 1, false);
1383 flock
->fl_type
= F_RDLCK
;
1385 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
1388 flock
->fl_type
= F_WRLCK
;
1394 cifs_move_llist(struct list_head
*source
, struct list_head
*dest
)
1396 struct list_head
*li
, *tmp
;
1397 list_for_each_safe(li
, tmp
, source
)
1398 list_move(li
, dest
);
1402 cifs_free_llist(struct list_head
*llist
)
1404 struct cifsLockInfo
*li
, *tmp
;
1405 list_for_each_entry_safe(li
, tmp
, llist
, llist
) {
1406 cifs_del_lock_waiters(li
);
1407 list_del(&li
->llist
);
1413 cifs_unlock_range(struct cifsFileInfo
*cfile
, struct file_lock
*flock
,
1416 int rc
= 0, stored_rc
;
1417 int types
[] = {LOCKING_ANDX_LARGE_FILES
,
1418 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
};
1420 unsigned int max_num
, num
, max_buf
;
1421 LOCKING_ANDX_RANGE
*buf
, *cur
;
1422 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1423 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1424 struct cifsLockInfo
*li
, *tmp
;
1425 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1426 struct list_head tmp_llist
;
1428 INIT_LIST_HEAD(&tmp_llist
);
1431 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1432 * and check it before using.
1434 max_buf
= tcon
->ses
->server
->maxBuf
;
1435 if (max_buf
< (sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
)))
1438 BUILD_BUG_ON(sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
) >
1440 max_buf
= min_t(unsigned int, max_buf
- sizeof(struct smb_hdr
),
1442 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
1443 sizeof(LOCKING_ANDX_RANGE
);
1444 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
1448 cifs_down_write(&cinode
->lock_sem
);
1449 for (i
= 0; i
< 2; i
++) {
1452 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
1453 if (flock
->fl_start
> li
->offset
||
1454 (flock
->fl_start
+ length
) <
1455 (li
->offset
+ li
->length
))
1457 if (current
->tgid
!= li
->pid
)
1459 if (types
[i
] != li
->type
)
1461 if (cinode
->can_cache_brlcks
) {
1463 * We can cache brlock requests - simply remove
1464 * a lock from the file's list.
1466 list_del(&li
->llist
);
1467 cifs_del_lock_waiters(li
);
1471 cur
->Pid
= cpu_to_le16(li
->pid
);
1472 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
1473 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
1474 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
1475 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
1477 * We need to save a lock here to let us add it again to
1478 * the file's list if the unlock range request fails on
1481 list_move(&li
->llist
, &tmp_llist
);
1482 if (++num
== max_num
) {
1483 stored_rc
= cifs_lockv(xid
, tcon
,
1485 li
->type
, num
, 0, buf
);
1488 * We failed on the unlock range
1489 * request - add all locks from the tmp
1490 * list to the head of the file's list.
1492 cifs_move_llist(&tmp_llist
,
1493 &cfile
->llist
->locks
);
1497 * The unlock range request succeed -
1498 * free the tmp list.
1500 cifs_free_llist(&tmp_llist
);
1507 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
1508 types
[i
], num
, 0, buf
);
1510 cifs_move_llist(&tmp_llist
,
1511 &cfile
->llist
->locks
);
1514 cifs_free_llist(&tmp_llist
);
1518 up_write(&cinode
->lock_sem
);
1524 cifs_setlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
1525 bool wait_flag
, bool posix_lck
, int lock
, int unlock
,
1529 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1530 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
1531 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1532 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1533 struct inode
*inode
= d_inode(cfile
->dentry
);
1536 int posix_lock_type
;
1538 rc
= cifs_posix_lock_set(file
, flock
);
1542 if (type
& server
->vals
->shared_lock_type
)
1543 posix_lock_type
= CIFS_RDLCK
;
1545 posix_lock_type
= CIFS_WRLCK
;
1548 posix_lock_type
= CIFS_UNLCK
;
1550 rc
= CIFSSMBPosixLock(xid
, tcon
, cfile
->fid
.netfid
,
1551 current
->tgid
, flock
->fl_start
, length
,
1552 NULL
, posix_lock_type
, wait_flag
);
1557 struct cifsLockInfo
*lock
;
1559 lock
= cifs_lock_init(flock
->fl_start
, length
, type
);
1563 rc
= cifs_lock_add_if(cfile
, lock
, wait_flag
);
1572 * Windows 7 server can delay breaking lease from read to None
1573 * if we set a byte-range lock on a file - break it explicitly
1574 * before sending the lock to the server to be sure the next
1575 * read won't conflict with non-overlapted locks due to
1578 if (!CIFS_CACHE_WRITE(CIFS_I(inode
)) &&
1579 CIFS_CACHE_READ(CIFS_I(inode
))) {
1580 cifs_zap_mapping(inode
);
1581 cifs_dbg(FYI
, "Set no oplock for inode=%p due to mand locks\n",
1583 CIFS_I(inode
)->oplock
= 0;
1586 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1587 type
, 1, 0, wait_flag
);
1593 cifs_lock_add(cfile
, lock
);
1595 rc
= server
->ops
->mand_unlock_range(cfile
, flock
, xid
);
1598 if (flock
->fl_flags
& FL_POSIX
) {
1600 * If this is a request to remove all locks because we
1601 * are closing the file, it doesn't matter if the
1602 * unlocking failed as both cifs.ko and the SMB server
1603 * remove the lock on file close
1606 cifs_dbg(VFS
, "%s failed rc=%d\n", __func__
, rc
);
1607 if (!(flock
->fl_flags
& FL_CLOSE
))
1610 rc
= locks_lock_file_wait(file
, flock
);
1615 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*flock
)
1618 int lock
= 0, unlock
= 0;
1619 bool wait_flag
= false;
1620 bool posix_lck
= false;
1621 struct cifs_sb_info
*cifs_sb
;
1622 struct cifs_tcon
*tcon
;
1623 struct cifsInodeInfo
*cinode
;
1624 struct cifsFileInfo
*cfile
;
1631 cifs_dbg(FYI
, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1632 cmd
, flock
->fl_flags
, flock
->fl_type
,
1633 flock
->fl_start
, flock
->fl_end
);
1635 cfile
= (struct cifsFileInfo
*)file
->private_data
;
1636 tcon
= tlink_tcon(cfile
->tlink
);
1638 cifs_read_flock(flock
, &type
, &lock
, &unlock
, &wait_flag
,
1641 cifs_sb
= CIFS_FILE_SB(file
);
1642 netfid
= cfile
->fid
.netfid
;
1643 cinode
= CIFS_I(file_inode(file
));
1645 if (cap_unix(tcon
->ses
) &&
1646 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1647 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1650 * BB add code here to normalize offset and length to account for
1651 * negative length which we can not accept over the wire.
1653 if (IS_GETLK(cmd
)) {
1654 rc
= cifs_getlk(file
, flock
, type
, wait_flag
, posix_lck
, xid
);
1659 if (!lock
&& !unlock
) {
1661 * if no lock or unlock then nothing to do since we do not
1668 rc
= cifs_setlk(file
, flock
, type
, wait_flag
, posix_lck
, lock
, unlock
,
1675 * update the file size (if needed) after a write. Should be called with
1676 * the inode->i_lock held
1679 cifs_update_eof(struct cifsInodeInfo
*cifsi
, loff_t offset
,
1680 unsigned int bytes_written
)
1682 loff_t end_of_write
= offset
+ bytes_written
;
1684 if (end_of_write
> cifsi
->server_eof
)
1685 cifsi
->server_eof
= end_of_write
;
1689 cifs_write(struct cifsFileInfo
*open_file
, __u32 pid
, const char *write_data
,
1690 size_t write_size
, loff_t
*offset
)
1693 unsigned int bytes_written
= 0;
1694 unsigned int total_written
;
1695 struct cifs_sb_info
*cifs_sb
;
1696 struct cifs_tcon
*tcon
;
1697 struct TCP_Server_Info
*server
;
1699 struct dentry
*dentry
= open_file
->dentry
;
1700 struct cifsInodeInfo
*cifsi
= CIFS_I(d_inode(dentry
));
1701 struct cifs_io_parms io_parms
;
1703 cifs_sb
= CIFS_SB(dentry
->d_sb
);
1705 cifs_dbg(FYI
, "write %zd bytes to offset %lld of %pd\n",
1706 write_size
, *offset
, dentry
);
1708 tcon
= tlink_tcon(open_file
->tlink
);
1709 server
= tcon
->ses
->server
;
1711 if (!server
->ops
->sync_write
)
1716 for (total_written
= 0; write_size
> total_written
;
1717 total_written
+= bytes_written
) {
1719 while (rc
== -EAGAIN
) {
1723 if (open_file
->invalidHandle
) {
1724 /* we could deadlock if we called
1725 filemap_fdatawait from here so tell
1726 reopen_file not to flush data to
1728 rc
= cifs_reopen_file(open_file
, false);
1733 len
= min(server
->ops
->wp_retry_size(d_inode(dentry
)),
1734 (unsigned int)write_size
- total_written
);
1735 /* iov[0] is reserved for smb header */
1736 iov
[1].iov_base
= (char *)write_data
+ total_written
;
1737 iov
[1].iov_len
= len
;
1739 io_parms
.tcon
= tcon
;
1740 io_parms
.offset
= *offset
;
1741 io_parms
.length
= len
;
1742 rc
= server
->ops
->sync_write(xid
, &open_file
->fid
,
1743 &io_parms
, &bytes_written
, iov
, 1);
1745 if (rc
|| (bytes_written
== 0)) {
1753 spin_lock(&d_inode(dentry
)->i_lock
);
1754 cifs_update_eof(cifsi
, *offset
, bytes_written
);
1755 spin_unlock(&d_inode(dentry
)->i_lock
);
1756 *offset
+= bytes_written
;
1760 cifs_stats_bytes_written(tcon
, total_written
);
1762 if (total_written
> 0) {
1763 spin_lock(&d_inode(dentry
)->i_lock
);
1764 if (*offset
> d_inode(dentry
)->i_size
)
1765 i_size_write(d_inode(dentry
), *offset
);
1766 spin_unlock(&d_inode(dentry
)->i_lock
);
1768 mark_inode_dirty_sync(d_inode(dentry
));
1770 return total_written
;
1773 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
,
1776 struct cifsFileInfo
*open_file
= NULL
;
1777 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
1778 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
1780 /* only filter by fsuid on multiuser mounts */
1781 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
1784 spin_lock(&tcon
->open_file_lock
);
1785 /* we could simply get the first_list_entry since write-only entries
1786 are always at the end of the list but since the first entry might
1787 have a close pending, we go through the whole list */
1788 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1789 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
1791 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_READ
) {
1792 if (!open_file
->invalidHandle
) {
1793 /* found a good file */
1794 /* lock it so it will not be closed on us */
1795 cifsFileInfo_get(open_file
);
1796 spin_unlock(&tcon
->open_file_lock
);
1798 } /* else might as well continue, and look for
1799 another, or simply have the caller reopen it
1800 again rather than trying to fix this handle */
1801 } else /* write only file */
1802 break; /* write only files are last so must be done */
1804 spin_unlock(&tcon
->open_file_lock
);
1808 struct cifsFileInfo
*find_writable_file(struct cifsInodeInfo
*cifs_inode
,
1811 struct cifsFileInfo
*open_file
, *inv_file
= NULL
;
1812 struct cifs_sb_info
*cifs_sb
;
1813 struct cifs_tcon
*tcon
;
1814 bool any_available
= false;
1816 unsigned int refind
= 0;
1818 /* Having a null inode here (because mapping->host was set to zero by
1819 the VFS or MM) should not happen but we had reports of on oops (due to
1820 it being zero) during stress testcases so we need to check for it */
1822 if (cifs_inode
== NULL
) {
1823 cifs_dbg(VFS
, "Null inode passed to cifs_writeable_file\n");
1828 cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
1829 tcon
= cifs_sb_master_tcon(cifs_sb
);
1831 /* only filter by fsuid on multiuser mounts */
1832 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
1835 spin_lock(&tcon
->open_file_lock
);
1837 if (refind
> MAX_REOPEN_ATT
) {
1838 spin_unlock(&tcon
->open_file_lock
);
1841 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1842 if (!any_available
&& open_file
->pid
!= current
->tgid
)
1844 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
1846 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
1847 if (!open_file
->invalidHandle
) {
1848 /* found a good writable file */
1849 cifsFileInfo_get(open_file
);
1850 spin_unlock(&tcon
->open_file_lock
);
1854 inv_file
= open_file
;
1858 /* couldn't find useable FH with same pid, try any available */
1859 if (!any_available
) {
1860 any_available
= true;
1861 goto refind_writable
;
1865 any_available
= false;
1866 cifsFileInfo_get(inv_file
);
1869 spin_unlock(&tcon
->open_file_lock
);
1872 rc
= cifs_reopen_file(inv_file
, false);
1876 spin_lock(&tcon
->open_file_lock
);
1877 list_move_tail(&inv_file
->flist
,
1878 &cifs_inode
->openFileList
);
1879 spin_unlock(&tcon
->open_file_lock
);
1880 cifsFileInfo_put(inv_file
);
1883 spin_lock(&tcon
->open_file_lock
);
1884 goto refind_writable
;
1891 static int cifs_partialpagewrite(struct page
*page
, unsigned from
, unsigned to
)
1893 struct address_space
*mapping
= page
->mapping
;
1894 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
1897 int bytes_written
= 0;
1898 struct inode
*inode
;
1899 struct cifsFileInfo
*open_file
;
1901 if (!mapping
|| !mapping
->host
)
1904 inode
= page
->mapping
->host
;
1906 offset
+= (loff_t
)from
;
1907 write_data
= kmap(page
);
1910 if ((to
> PAGE_CACHE_SIZE
) || (from
> to
)) {
1915 /* racing with truncate? */
1916 if (offset
> mapping
->host
->i_size
) {
1918 return 0; /* don't care */
1921 /* check to make sure that we are not extending the file */
1922 if (mapping
->host
->i_size
- offset
< (loff_t
)to
)
1923 to
= (unsigned)(mapping
->host
->i_size
- offset
);
1925 open_file
= find_writable_file(CIFS_I(mapping
->host
), false);
1927 bytes_written
= cifs_write(open_file
, open_file
->pid
,
1928 write_data
, to
- from
, &offset
);
1929 cifsFileInfo_put(open_file
);
1930 /* Does mm or vfs already set times? */
1931 inode
->i_atime
= inode
->i_mtime
= current_fs_time(inode
->i_sb
);
1932 if ((bytes_written
> 0) && (offset
))
1934 else if (bytes_written
< 0)
1937 cifs_dbg(FYI
, "No writeable filehandles for inode\n");
1945 static struct cifs_writedata
*
1946 wdata_alloc_and_fillpages(pgoff_t tofind
, struct address_space
*mapping
,
1947 pgoff_t end
, pgoff_t
*index
,
1948 unsigned int *found_pages
)
1950 unsigned int nr_pages
;
1951 struct page
**pages
;
1952 struct cifs_writedata
*wdata
;
1954 wdata
= cifs_writedata_alloc((unsigned int)tofind
,
1955 cifs_writev_complete
);
1960 * find_get_pages_tag seems to return a max of 256 on each
1961 * iteration, so we must call it several times in order to
1962 * fill the array or the wsize is effectively limited to
1963 * 256 * PAGE_CACHE_SIZE.
1966 pages
= wdata
->pages
;
1968 nr_pages
= find_get_pages_tag(mapping
, index
,
1969 PAGECACHE_TAG_DIRTY
, tofind
,
1971 *found_pages
+= nr_pages
;
1974 } while (nr_pages
&& tofind
&& *index
<= end
);
1980 wdata_prepare_pages(struct cifs_writedata
*wdata
, unsigned int found_pages
,
1981 struct address_space
*mapping
,
1982 struct writeback_control
*wbc
,
1983 pgoff_t end
, pgoff_t
*index
, pgoff_t
*next
, bool *done
)
1985 unsigned int nr_pages
= 0, i
;
1988 for (i
= 0; i
< found_pages
; i
++) {
1989 page
= wdata
->pages
[i
];
1991 * At this point we hold neither mapping->tree_lock nor
1992 * lock on the page itself: the page may be truncated or
1993 * invalidated (changing page->mapping to NULL), or even
1994 * swizzled back from swapper_space to tmpfs file
2000 else if (!trylock_page(page
))
2003 if (unlikely(page
->mapping
!= mapping
)) {
2008 if (!wbc
->range_cyclic
&& page
->index
> end
) {
2014 if (*next
&& (page
->index
!= *next
)) {
2015 /* Not next consecutive page */
2020 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
2021 wait_on_page_writeback(page
);
2023 if (PageWriteback(page
) ||
2024 !clear_page_dirty_for_io(page
)) {
2030 * This actually clears the dirty bit in the radix tree.
2031 * See cifs_writepage() for more commentary.
2033 set_page_writeback(page
);
2034 if (page_offset(page
) >= i_size_read(mapping
->host
)) {
2037 end_page_writeback(page
);
2041 wdata
->pages
[i
] = page
;
2042 *next
= page
->index
+ 1;
2046 /* reset index to refind any pages skipped */
2048 *index
= wdata
->pages
[0]->index
+ 1;
2050 /* put any pages we aren't going to use */
2051 for (i
= nr_pages
; i
< found_pages
; i
++) {
2052 page_cache_release(wdata
->pages
[i
]);
2053 wdata
->pages
[i
] = NULL
;
2060 wdata_send_pages(struct cifs_writedata
*wdata
, unsigned int nr_pages
,
2061 struct address_space
*mapping
, struct writeback_control
*wbc
)
2064 struct TCP_Server_Info
*server
;
2067 wdata
->sync_mode
= wbc
->sync_mode
;
2068 wdata
->nr_pages
= nr_pages
;
2069 wdata
->offset
= page_offset(wdata
->pages
[0]);
2070 wdata
->pagesz
= PAGE_CACHE_SIZE
;
2071 wdata
->tailsz
= min(i_size_read(mapping
->host
) -
2072 page_offset(wdata
->pages
[nr_pages
- 1]),
2073 (loff_t
)PAGE_CACHE_SIZE
);
2074 wdata
->bytes
= ((nr_pages
- 1) * PAGE_CACHE_SIZE
) + wdata
->tailsz
;
2076 if (wdata
->cfile
!= NULL
)
2077 cifsFileInfo_put(wdata
->cfile
);
2078 wdata
->cfile
= find_writable_file(CIFS_I(mapping
->host
), false);
2079 if (!wdata
->cfile
) {
2080 cifs_dbg(VFS
, "No writable handles for inode\n");
2083 wdata
->pid
= wdata
->cfile
->pid
;
2084 server
= tlink_tcon(wdata
->cfile
->tlink
)->ses
->server
;
2085 rc
= server
->ops
->async_writev(wdata
, cifs_writedata_release
);
2088 for (i
= 0; i
< nr_pages
; ++i
)
2089 unlock_page(wdata
->pages
[i
]);
2094 static int cifs_writepages(struct address_space
*mapping
,
2095 struct writeback_control
*wbc
)
2097 struct cifs_sb_info
*cifs_sb
= CIFS_SB(mapping
->host
->i_sb
);
2098 struct TCP_Server_Info
*server
;
2099 bool done
= false, scanned
= false, range_whole
= false;
2101 struct cifs_writedata
*wdata
;
2105 * If wsize is smaller than the page cache size, default to writing
2106 * one page at a time via cifs_writepage
2108 if (cifs_sb
->wsize
< PAGE_CACHE_SIZE
)
2109 return generic_writepages(mapping
, wbc
);
2111 if (wbc
->range_cyclic
) {
2112 index
= mapping
->writeback_index
; /* Start from prev offset */
2115 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
2116 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
2117 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2121 server
= cifs_sb_master_tcon(cifs_sb
)->ses
->server
;
2123 while (!done
&& index
<= end
) {
2124 unsigned int i
, nr_pages
, found_pages
, wsize
, credits
;
2125 pgoff_t next
= 0, tofind
, saved_index
= index
;
2127 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->wsize
,
2132 tofind
= min((wsize
/ PAGE_CACHE_SIZE
) - 1, end
- index
) + 1;
2134 wdata
= wdata_alloc_and_fillpages(tofind
, mapping
, end
, &index
,
2138 add_credits_and_wake_if(server
, credits
, 0);
2142 if (found_pages
== 0) {
2143 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2144 add_credits_and_wake_if(server
, credits
, 0);
2148 nr_pages
= wdata_prepare_pages(wdata
, found_pages
, mapping
, wbc
,
2149 end
, &index
, &next
, &done
);
2151 /* nothing to write? */
2152 if (nr_pages
== 0) {
2153 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2154 add_credits_and_wake_if(server
, credits
, 0);
2158 wdata
->credits
= credits
;
2160 rc
= wdata_send_pages(wdata
, nr_pages
, mapping
, wbc
);
2162 /* send failure -- clean up the mess */
2164 add_credits_and_wake_if(server
, wdata
->credits
, 0);
2165 for (i
= 0; i
< nr_pages
; ++i
) {
2167 redirty_page_for_writepage(wbc
,
2170 SetPageError(wdata
->pages
[i
]);
2171 end_page_writeback(wdata
->pages
[i
]);
2172 page_cache_release(wdata
->pages
[i
]);
2175 mapping_set_error(mapping
, rc
);
2177 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2179 if (wbc
->sync_mode
== WB_SYNC_ALL
&& rc
== -EAGAIN
) {
2180 index
= saved_index
;
2184 wbc
->nr_to_write
-= nr_pages
;
2185 if (wbc
->nr_to_write
<= 0)
2191 if (!scanned
&& !done
) {
2193 * We hit the last page and there is more work to be done: wrap
2194 * back to the start of the file
2201 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
2202 mapping
->writeback_index
= index
;
2208 cifs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
2214 /* BB add check for wbc flags */
2215 page_cache_get(page
);
2216 if (!PageUptodate(page
))
2217 cifs_dbg(FYI
, "ppw - page not up to date\n");
2220 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2222 * A writepage() implementation always needs to do either this,
2223 * or re-dirty the page with "redirty_page_for_writepage()" in
2224 * the case of a failure.
2226 * Just unlocking the page will cause the radix tree tag-bits
2227 * to fail to update with the state of the page correctly.
2229 set_page_writeback(page
);
2231 rc
= cifs_partialpagewrite(page
, 0, PAGE_CACHE_SIZE
);
2232 if (rc
== -EAGAIN
&& wbc
->sync_mode
== WB_SYNC_ALL
)
2234 else if (rc
== -EAGAIN
)
2235 redirty_page_for_writepage(wbc
, page
);
2239 SetPageUptodate(page
);
2240 end_page_writeback(page
);
2241 page_cache_release(page
);
2246 static int cifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
2248 int rc
= cifs_writepage_locked(page
, wbc
);
2253 static int cifs_write_end(struct file
*file
, struct address_space
*mapping
,
2254 loff_t pos
, unsigned len
, unsigned copied
,
2255 struct page
*page
, void *fsdata
)
2258 struct inode
*inode
= mapping
->host
;
2259 struct cifsFileInfo
*cfile
= file
->private_data
;
2260 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
2263 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2266 pid
= current
->tgid
;
2268 cifs_dbg(FYI
, "write_end for page %p from pos %lld with %d bytes\n",
2271 if (PageChecked(page
)) {
2273 SetPageUptodate(page
);
2274 ClearPageChecked(page
);
2275 } else if (!PageUptodate(page
) && copied
== PAGE_CACHE_SIZE
)
2276 SetPageUptodate(page
);
2278 if (!PageUptodate(page
)) {
2280 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
2284 /* this is probably better than directly calling
2285 partialpage_write since in this function the file handle is
2286 known which we might as well leverage */
2287 /* BB check if anything else missing out of ppw
2288 such as updating last write time */
2289 page_data
= kmap(page
);
2290 rc
= cifs_write(cfile
, pid
, page_data
+ offset
, copied
, &pos
);
2291 /* if (rc < 0) should we set writebehind rc? */
2298 set_page_dirty(page
);
2302 spin_lock(&inode
->i_lock
);
2303 if (pos
> inode
->i_size
)
2304 i_size_write(inode
, pos
);
2305 spin_unlock(&inode
->i_lock
);
2309 page_cache_release(page
);
2314 int cifs_strict_fsync(struct file
*file
, loff_t start
, loff_t end
,
2319 struct cifs_tcon
*tcon
;
2320 struct TCP_Server_Info
*server
;
2321 struct cifsFileInfo
*smbfile
= file
->private_data
;
2322 struct inode
*inode
= file_inode(file
);
2323 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2325 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
2328 mutex_lock(&inode
->i_mutex
);
2332 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2335 if (!CIFS_CACHE_READ(CIFS_I(inode
))) {
2336 rc
= cifs_zap_mapping(inode
);
2338 cifs_dbg(FYI
, "rc: %d during invalidate phase\n", rc
);
2339 rc
= 0; /* don't care about it in fsync */
2343 tcon
= tlink_tcon(smbfile
->tlink
);
2344 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2345 server
= tcon
->ses
->server
;
2346 if (server
->ops
->flush
)
2347 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2353 mutex_unlock(&inode
->i_mutex
);
2357 int cifs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2361 struct cifs_tcon
*tcon
;
2362 struct TCP_Server_Info
*server
;
2363 struct cifsFileInfo
*smbfile
= file
->private_data
;
2364 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
2365 struct inode
*inode
= file
->f_mapping
->host
;
2367 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
2370 mutex_lock(&inode
->i_mutex
);
2374 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2377 tcon
= tlink_tcon(smbfile
->tlink
);
2378 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2379 server
= tcon
->ses
->server
;
2380 if (server
->ops
->flush
)
2381 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2387 mutex_unlock(&inode
->i_mutex
);
2392 * As file closes, flush all cached write data for this inode checking
2393 * for write behind errors.
2395 int cifs_flush(struct file
*file
, fl_owner_t id
)
2397 struct inode
*inode
= file_inode(file
);
2400 if (file
->f_mode
& FMODE_WRITE
)
2401 rc
= filemap_write_and_wait(inode
->i_mapping
);
2403 cifs_dbg(FYI
, "Flush inode %p file %p rc %d\n", inode
, file
, rc
);
2409 cifs_write_allocate_pages(struct page
**pages
, unsigned long num_pages
)
2414 for (i
= 0; i
< num_pages
; i
++) {
2415 pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_HIGHMEM
);
2418 * save number of pages we have already allocated and
2419 * return with ENOMEM error
2428 for (i
= 0; i
< num_pages
; i
++)
2435 size_t get_numpages(const size_t wsize
, const size_t len
, size_t *cur_len
)
2440 clen
= min_t(const size_t, len
, wsize
);
2441 num_pages
= DIV_ROUND_UP(clen
, PAGE_SIZE
);
2450 cifs_uncached_writedata_release(struct kref
*refcount
)
2453 struct cifs_writedata
*wdata
= container_of(refcount
,
2454 struct cifs_writedata
, refcount
);
2456 for (i
= 0; i
< wdata
->nr_pages
; i
++)
2457 put_page(wdata
->pages
[i
]);
2458 cifs_writedata_release(refcount
);
2462 cifs_uncached_writev_complete(struct work_struct
*work
)
2464 struct cifs_writedata
*wdata
= container_of(work
,
2465 struct cifs_writedata
, work
);
2466 struct inode
*inode
= d_inode(wdata
->cfile
->dentry
);
2467 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
2469 spin_lock(&inode
->i_lock
);
2470 cifs_update_eof(cifsi
, wdata
->offset
, wdata
->bytes
);
2471 if (cifsi
->server_eof
> inode
->i_size
)
2472 i_size_write(inode
, cifsi
->server_eof
);
2473 spin_unlock(&inode
->i_lock
);
2475 complete(&wdata
->done
);
2477 kref_put(&wdata
->refcount
, cifs_uncached_writedata_release
);
2481 wdata_fill_from_iovec(struct cifs_writedata
*wdata
, struct iov_iter
*from
,
2482 size_t *len
, unsigned long *num_pages
)
2484 size_t save_len
, copied
, bytes
, cur_len
= *len
;
2485 unsigned long i
, nr_pages
= *num_pages
;
2488 for (i
= 0; i
< nr_pages
; i
++) {
2489 bytes
= min_t(const size_t, cur_len
, PAGE_SIZE
);
2490 copied
= copy_page_from_iter(wdata
->pages
[i
], 0, bytes
, from
);
2493 * If we didn't copy as much as we expected, then that
2494 * may mean we trod into an unmapped area. Stop copying
2495 * at that point. On the next pass through the big
2496 * loop, we'll likely end up getting a zero-length
2497 * write and bailing out of it.
2502 cur_len
= save_len
- cur_len
;
2506 * If we have no data to send, then that probably means that
2507 * the copy above failed altogether. That's most likely because
2508 * the address in the iovec was bogus. Return -EFAULT and let
2509 * the caller free anything we allocated and bail out.
2515 * i + 1 now represents the number of pages we actually used in
2516 * the copy phase above.
2523 cifs_write_from_iter(loff_t offset
, size_t len
, struct iov_iter
*from
,
2524 struct cifsFileInfo
*open_file
,
2525 struct cifs_sb_info
*cifs_sb
, struct list_head
*wdata_list
)
2529 unsigned long nr_pages
, num_pages
, i
;
2530 struct cifs_writedata
*wdata
;
2531 struct iov_iter saved_from
;
2532 loff_t saved_offset
= offset
;
2534 struct TCP_Server_Info
*server
;
2536 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2537 pid
= open_file
->pid
;
2539 pid
= current
->tgid
;
2541 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
2542 memcpy(&saved_from
, from
, sizeof(struct iov_iter
));
2545 unsigned int wsize
, credits
;
2547 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->wsize
,
2552 nr_pages
= get_numpages(wsize
, len
, &cur_len
);
2553 wdata
= cifs_writedata_alloc(nr_pages
,
2554 cifs_uncached_writev_complete
);
2557 add_credits_and_wake_if(server
, credits
, 0);
2561 rc
= cifs_write_allocate_pages(wdata
->pages
, nr_pages
);
2564 add_credits_and_wake_if(server
, credits
, 0);
2568 num_pages
= nr_pages
;
2569 rc
= wdata_fill_from_iovec(wdata
, from
, &cur_len
, &num_pages
);
2571 for (i
= 0; i
< nr_pages
; i
++)
2572 put_page(wdata
->pages
[i
]);
2574 add_credits_and_wake_if(server
, credits
, 0);
2579 * Bring nr_pages down to the number of pages we actually used,
2580 * and free any pages that we didn't use.
2582 for ( ; nr_pages
> num_pages
; nr_pages
--)
2583 put_page(wdata
->pages
[nr_pages
- 1]);
2585 wdata
->sync_mode
= WB_SYNC_ALL
;
2586 wdata
->nr_pages
= nr_pages
;
2587 wdata
->offset
= (__u64
)offset
;
2588 wdata
->cfile
= cifsFileInfo_get(open_file
);
2590 wdata
->bytes
= cur_len
;
2591 wdata
->pagesz
= PAGE_SIZE
;
2592 wdata
->tailsz
= cur_len
- ((nr_pages
- 1) * PAGE_SIZE
);
2593 wdata
->credits
= credits
;
2595 if (!wdata
->cfile
->invalidHandle
||
2596 !(rc
= cifs_reopen_file(wdata
->cfile
, false)))
2597 rc
= server
->ops
->async_writev(wdata
,
2598 cifs_uncached_writedata_release
);
2600 add_credits_and_wake_if(server
, wdata
->credits
, 0);
2601 kref_put(&wdata
->refcount
,
2602 cifs_uncached_writedata_release
);
2603 if (rc
== -EAGAIN
) {
2604 memcpy(from
, &saved_from
,
2605 sizeof(struct iov_iter
));
2606 iov_iter_advance(from
, offset
- saved_offset
);
2612 list_add_tail(&wdata
->list
, wdata_list
);
2620 ssize_t
cifs_user_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2622 struct file
*file
= iocb
->ki_filp
;
2623 ssize_t total_written
= 0;
2624 struct cifsFileInfo
*open_file
;
2625 struct cifs_tcon
*tcon
;
2626 struct cifs_sb_info
*cifs_sb
;
2627 struct cifs_writedata
*wdata
, *tmp
;
2628 struct list_head wdata_list
;
2629 struct iov_iter saved_from
;
2633 * BB - optimize the way when signing is disabled. We can drop this
2634 * extra memory-to-memory copying and use iovec buffers for constructing
2638 rc
= generic_write_checks(iocb
, from
);
2642 INIT_LIST_HEAD(&wdata_list
);
2643 cifs_sb
= CIFS_FILE_SB(file
);
2644 open_file
= file
->private_data
;
2645 tcon
= tlink_tcon(open_file
->tlink
);
2647 if (!tcon
->ses
->server
->ops
->async_writev
)
2650 memcpy(&saved_from
, from
, sizeof(struct iov_iter
));
2652 rc
= cifs_write_from_iter(iocb
->ki_pos
, iov_iter_count(from
), from
,
2653 open_file
, cifs_sb
, &wdata_list
);
2656 * If at least one write was successfully sent, then discard any rc
2657 * value from the later writes. If the other write succeeds, then
2658 * we'll end up returning whatever was written. If it fails, then
2659 * we'll get a new rc value from that.
2661 if (!list_empty(&wdata_list
))
2665 * Wait for and collect replies for any successful sends in order of
2666 * increasing offset. Once an error is hit or we get a fatal signal
2667 * while waiting, then return without waiting for any more replies.
2670 list_for_each_entry_safe(wdata
, tmp
, &wdata_list
, list
) {
2672 /* FIXME: freezable too? */
2673 rc
= wait_for_completion_killable(&wdata
->done
);
2676 else if (wdata
->result
)
2679 total_written
+= wdata
->bytes
;
2681 /* resend call if it's a retryable error */
2682 if (rc
== -EAGAIN
) {
2683 struct list_head tmp_list
;
2684 struct iov_iter tmp_from
;
2686 INIT_LIST_HEAD(&tmp_list
);
2687 list_del_init(&wdata
->list
);
2689 memcpy(&tmp_from
, &saved_from
,
2690 sizeof(struct iov_iter
));
2691 iov_iter_advance(&tmp_from
,
2692 wdata
->offset
- iocb
->ki_pos
);
2694 rc
= cifs_write_from_iter(wdata
->offset
,
2695 wdata
->bytes
, &tmp_from
,
2696 open_file
, cifs_sb
, &tmp_list
);
2698 list_splice(&tmp_list
, &wdata_list
);
2700 kref_put(&wdata
->refcount
,
2701 cifs_uncached_writedata_release
);
2705 list_del_init(&wdata
->list
);
2706 kref_put(&wdata
->refcount
, cifs_uncached_writedata_release
);
2709 if (unlikely(!total_written
))
2712 iocb
->ki_pos
+= total_written
;
2713 set_bit(CIFS_INO_INVALID_MAPPING
, &CIFS_I(file_inode(file
))->flags
);
2714 cifs_stats_bytes_written(tcon
, total_written
);
2715 return total_written
;
2719 cifs_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2721 struct file
*file
= iocb
->ki_filp
;
2722 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
2723 struct inode
*inode
= file
->f_mapping
->host
;
2724 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2725 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
2729 * We need to hold the sem to be sure nobody modifies lock list
2730 * with a brlock that prevents writing.
2732 down_read(&cinode
->lock_sem
);
2733 mutex_lock(&inode
->i_mutex
);
2735 rc
= generic_write_checks(iocb
, from
);
2739 if (!cifs_find_lock_conflict(cfile
, iocb
->ki_pos
, iov_iter_count(from
),
2740 server
->vals
->exclusive_lock_type
, NULL
,
2742 rc
= __generic_file_write_iter(iocb
, from
);
2746 mutex_unlock(&inode
->i_mutex
);
2749 ssize_t err
= generic_write_sync(file
, iocb
->ki_pos
- rc
, rc
);
2753 up_read(&cinode
->lock_sem
);
2758 cifs_strict_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2760 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2761 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2762 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2763 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
2764 iocb
->ki_filp
->private_data
;
2765 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2768 written
= cifs_get_writer(cinode
);
2772 if (CIFS_CACHE_WRITE(cinode
)) {
2773 if (cap_unix(tcon
->ses
) &&
2774 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
))
2775 && ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0)) {
2776 written
= generic_file_write_iter(iocb
, from
);
2779 written
= cifs_writev(iocb
, from
);
2783 * For non-oplocked files in strict cache mode we need to write the data
2784 * to the server exactly from the pos to pos+len-1 rather than flush all
2785 * affected pages because it may cause a error with mandatory locks on
2786 * these pages but not on the region from pos to ppos+len-1.
2788 written
= cifs_user_writev(iocb
, from
);
2789 if (CIFS_CACHE_READ(cinode
)) {
2791 * We have read level caching and we have just sent a write
2792 * request to the server thus making data in the cache stale.
2793 * Zap the cache and set oplock/lease level to NONE to avoid
2794 * reading stale data from the cache. All subsequent read
2795 * operations will read new data from the server.
2797 cifs_zap_mapping(inode
);
2798 cifs_dbg(FYI
, "Set Oplock/Lease to NONE for inode=%p after write\n",
2803 cifs_put_writer(cinode
);
2807 static struct cifs_readdata
*
2808 cifs_readdata_alloc(unsigned int nr_pages
, work_func_t complete
)
2810 struct cifs_readdata
*rdata
;
2812 rdata
= kzalloc(sizeof(*rdata
) + (sizeof(struct page
*) * nr_pages
),
2814 if (rdata
!= NULL
) {
2815 kref_init(&rdata
->refcount
);
2816 INIT_LIST_HEAD(&rdata
->list
);
2817 init_completion(&rdata
->done
);
2818 INIT_WORK(&rdata
->work
, complete
);
2825 cifs_readdata_release(struct kref
*refcount
)
2827 struct cifs_readdata
*rdata
= container_of(refcount
,
2828 struct cifs_readdata
, refcount
);
2831 cifsFileInfo_put(rdata
->cfile
);
2837 cifs_read_allocate_pages(struct cifs_readdata
*rdata
, unsigned int nr_pages
)
2843 for (i
= 0; i
< nr_pages
; i
++) {
2844 page
= alloc_page(GFP_KERNEL
|__GFP_HIGHMEM
);
2849 rdata
->pages
[i
] = page
;
2853 unsigned int nr_page_failed
= i
;
2855 for (i
= 0; i
< nr_page_failed
; i
++) {
2856 put_page(rdata
->pages
[i
]);
2857 rdata
->pages
[i
] = NULL
;
2864 cifs_uncached_readdata_release(struct kref
*refcount
)
2866 struct cifs_readdata
*rdata
= container_of(refcount
,
2867 struct cifs_readdata
, refcount
);
2870 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
2871 put_page(rdata
->pages
[i
]);
2872 rdata
->pages
[i
] = NULL
;
2874 cifs_readdata_release(refcount
);
2878 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2879 * @rdata: the readdata response with list of pages holding data
2880 * @iter: destination for our data
2882 * This function copies data from a list of pages in a readdata response into
2883 * an array of iovecs. It will first calculate where the data should go
2884 * based on the info in the readdata and then copy the data into that spot.
2887 cifs_readdata_to_iov(struct cifs_readdata
*rdata
, struct iov_iter
*iter
)
2889 size_t remaining
= rdata
->got_bytes
;
2892 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
2893 struct page
*page
= rdata
->pages
[i
];
2894 size_t copy
= min_t(size_t, remaining
, PAGE_SIZE
);
2895 size_t written
= copy_page_to_iter(page
, 0, copy
, iter
);
2896 remaining
-= written
;
2897 if (written
< copy
&& iov_iter_count(iter
) > 0)
2900 return remaining
? -EFAULT
: 0;
2904 cifs_uncached_readv_complete(struct work_struct
*work
)
2906 struct cifs_readdata
*rdata
= container_of(work
,
2907 struct cifs_readdata
, work
);
2909 complete(&rdata
->done
);
2910 kref_put(&rdata
->refcount
, cifs_uncached_readdata_release
);
2914 cifs_uncached_read_into_pages(struct TCP_Server_Info
*server
,
2915 struct cifs_readdata
*rdata
, unsigned int len
)
2919 unsigned int nr_pages
= rdata
->nr_pages
;
2922 rdata
->got_bytes
= 0;
2923 rdata
->tailsz
= PAGE_SIZE
;
2924 for (i
= 0; i
< nr_pages
; i
++) {
2925 struct page
*page
= rdata
->pages
[i
];
2927 if (len
>= PAGE_SIZE
) {
2928 /* enough data to fill the page */
2929 iov
.iov_base
= kmap(page
);
2930 iov
.iov_len
= PAGE_SIZE
;
2931 cifs_dbg(FYI
, "%u: iov_base=%p iov_len=%zu\n",
2932 i
, iov
.iov_base
, iov
.iov_len
);
2934 } else if (len
> 0) {
2935 /* enough for partial page, fill and zero the rest */
2936 iov
.iov_base
= kmap(page
);
2938 cifs_dbg(FYI
, "%u: iov_base=%p iov_len=%zu\n",
2939 i
, iov
.iov_base
, iov
.iov_len
);
2940 memset(iov
.iov_base
+ len
, '\0', PAGE_SIZE
- len
);
2941 rdata
->tailsz
= len
;
2944 /* no need to hold page hostage */
2945 rdata
->pages
[i
] = NULL
;
2951 result
= cifs_readv_from_socket(server
, &iov
, 1, iov
.iov_len
);
2956 rdata
->got_bytes
+= result
;
2959 return rdata
->got_bytes
> 0 && result
!= -ECONNABORTED
?
2960 rdata
->got_bytes
: result
;
2964 cifs_send_async_read(loff_t offset
, size_t len
, struct cifsFileInfo
*open_file
,
2965 struct cifs_sb_info
*cifs_sb
, struct list_head
*rdata_list
)
2967 struct cifs_readdata
*rdata
;
2968 unsigned int npages
, rsize
, credits
;
2972 struct TCP_Server_Info
*server
;
2974 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
2976 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2977 pid
= open_file
->pid
;
2979 pid
= current
->tgid
;
2982 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->rsize
,
2987 cur_len
= min_t(const size_t, len
, rsize
);
2988 npages
= DIV_ROUND_UP(cur_len
, PAGE_SIZE
);
2990 /* allocate a readdata struct */
2991 rdata
= cifs_readdata_alloc(npages
,
2992 cifs_uncached_readv_complete
);
2994 add_credits_and_wake_if(server
, credits
, 0);
2999 rc
= cifs_read_allocate_pages(rdata
, npages
);
3003 rdata
->cfile
= cifsFileInfo_get(open_file
);
3004 rdata
->nr_pages
= npages
;
3005 rdata
->offset
= offset
;
3006 rdata
->bytes
= cur_len
;
3008 rdata
->pagesz
= PAGE_SIZE
;
3009 rdata
->read_into_pages
= cifs_uncached_read_into_pages
;
3010 rdata
->credits
= credits
;
3012 if (!rdata
->cfile
->invalidHandle
||
3013 !(rc
= cifs_reopen_file(rdata
->cfile
, true)))
3014 rc
= server
->ops
->async_readv(rdata
);
3017 add_credits_and_wake_if(server
, rdata
->credits
, 0);
3018 kref_put(&rdata
->refcount
,
3019 cifs_uncached_readdata_release
);
3025 list_add_tail(&rdata
->list
, rdata_list
);
3033 ssize_t
cifs_user_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
3035 struct file
*file
= iocb
->ki_filp
;
3038 ssize_t total_read
= 0;
3039 loff_t offset
= iocb
->ki_pos
;
3040 struct cifs_sb_info
*cifs_sb
;
3041 struct cifs_tcon
*tcon
;
3042 struct cifsFileInfo
*open_file
;
3043 struct cifs_readdata
*rdata
, *tmp
;
3044 struct list_head rdata_list
;
3046 len
= iov_iter_count(to
);
3050 INIT_LIST_HEAD(&rdata_list
);
3051 cifs_sb
= CIFS_FILE_SB(file
);
3052 open_file
= file
->private_data
;
3053 tcon
= tlink_tcon(open_file
->tlink
);
3055 if (!tcon
->ses
->server
->ops
->async_readv
)
3058 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
3059 cifs_dbg(FYI
, "attempting read on write only file instance\n");
3061 rc
= cifs_send_async_read(offset
, len
, open_file
, cifs_sb
, &rdata_list
);
3063 /* if at least one read request send succeeded, then reset rc */
3064 if (!list_empty(&rdata_list
))
3067 len
= iov_iter_count(to
);
3068 /* the loop below should proceed in the order of increasing offsets */
3070 list_for_each_entry_safe(rdata
, tmp
, &rdata_list
, list
) {
3072 /* FIXME: freezable sleep too? */
3073 rc
= wait_for_completion_killable(&rdata
->done
);
3076 else if (rdata
->result
== -EAGAIN
) {
3077 /* resend call if it's a retryable error */
3078 struct list_head tmp_list
;
3079 unsigned int got_bytes
= rdata
->got_bytes
;
3081 list_del_init(&rdata
->list
);
3082 INIT_LIST_HEAD(&tmp_list
);
3085 * Got a part of data and then reconnect has
3086 * happened -- fill the buffer and continue
3089 if (got_bytes
&& got_bytes
< rdata
->bytes
) {
3090 rc
= cifs_readdata_to_iov(rdata
, to
);
3092 kref_put(&rdata
->refcount
,
3093 cifs_uncached_readdata_release
);
3098 rc
= cifs_send_async_read(
3099 rdata
->offset
+ got_bytes
,
3100 rdata
->bytes
- got_bytes
,
3101 rdata
->cfile
, cifs_sb
,
3104 list_splice(&tmp_list
, &rdata_list
);
3106 kref_put(&rdata
->refcount
,
3107 cifs_uncached_readdata_release
);
3109 } else if (rdata
->result
)
3112 rc
= cifs_readdata_to_iov(rdata
, to
);
3114 /* if there was a short read -- discard anything left */
3115 if (rdata
->got_bytes
&& rdata
->got_bytes
< rdata
->bytes
)
3118 list_del_init(&rdata
->list
);
3119 kref_put(&rdata
->refcount
, cifs_uncached_readdata_release
);
3122 total_read
= len
- iov_iter_count(to
);
3124 cifs_stats_bytes_read(tcon
, total_read
);
3126 /* mask nodata case */
3131 iocb
->ki_pos
+= total_read
;
3138 cifs_strict_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
3140 struct inode
*inode
= file_inode(iocb
->ki_filp
);
3141 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
3142 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
3143 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
3144 iocb
->ki_filp
->private_data
;
3145 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
3149 * In strict cache mode we need to read from the server all the time
3150 * if we don't have level II oplock because the server can delay mtime
3151 * change - so we can't make a decision about inode invalidating.
3152 * And we can also fail with pagereading if there are mandatory locks
3153 * on pages affected by this read but not on the region from pos to
3156 if (!CIFS_CACHE_READ(cinode
))
3157 return cifs_user_readv(iocb
, to
);
3159 if (cap_unix(tcon
->ses
) &&
3160 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
3161 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
3162 return generic_file_read_iter(iocb
, to
);
3165 * We need to hold the sem to be sure nobody modifies lock list
3166 * with a brlock that prevents reading.
3168 down_read(&cinode
->lock_sem
);
3169 if (!cifs_find_lock_conflict(cfile
, iocb
->ki_pos
, iov_iter_count(to
),
3170 tcon
->ses
->server
->vals
->shared_lock_type
,
3171 NULL
, CIFS_READ_OP
))
3172 rc
= generic_file_read_iter(iocb
, to
);
3173 up_read(&cinode
->lock_sem
);
3178 cifs_read(struct file
*file
, char *read_data
, size_t read_size
, loff_t
*offset
)
3181 unsigned int bytes_read
= 0;
3182 unsigned int total_read
;
3183 unsigned int current_read_size
;
3185 struct cifs_sb_info
*cifs_sb
;
3186 struct cifs_tcon
*tcon
;
3187 struct TCP_Server_Info
*server
;
3190 struct cifsFileInfo
*open_file
;
3191 struct cifs_io_parms io_parms
;
3192 int buf_type
= CIFS_NO_BUFFER
;
3196 cifs_sb
= CIFS_FILE_SB(file
);
3198 /* FIXME: set up handlers for larger reads and/or convert to async */
3199 rsize
= min_t(unsigned int, cifs_sb
->rsize
, CIFSMaxBufSize
);
3201 if (file
->private_data
== NULL
) {
3206 open_file
= file
->private_data
;
3207 tcon
= tlink_tcon(open_file
->tlink
);
3208 server
= tcon
->ses
->server
;
3210 if (!server
->ops
->sync_read
) {
3215 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
3216 pid
= open_file
->pid
;
3218 pid
= current
->tgid
;
3220 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
3221 cifs_dbg(FYI
, "attempting read on write only file instance\n");
3223 for (total_read
= 0, cur_offset
= read_data
; read_size
> total_read
;
3224 total_read
+= bytes_read
, cur_offset
+= bytes_read
) {
3226 current_read_size
= min_t(uint
, read_size
- total_read
,
3229 * For windows me and 9x we do not want to request more
3230 * than it negotiated since it will refuse the read
3233 if (!(tcon
->ses
->capabilities
&
3234 tcon
->ses
->server
->vals
->cap_large_files
)) {
3235 current_read_size
= min_t(uint
,
3236 current_read_size
, CIFSMaxBufSize
);
3238 if (open_file
->invalidHandle
) {
3239 rc
= cifs_reopen_file(open_file
, true);
3244 io_parms
.tcon
= tcon
;
3245 io_parms
.offset
= *offset
;
3246 io_parms
.length
= current_read_size
;
3247 rc
= server
->ops
->sync_read(xid
, &open_file
->fid
, &io_parms
,
3248 &bytes_read
, &cur_offset
,
3250 } while (rc
== -EAGAIN
);
3252 if (rc
|| (bytes_read
== 0)) {
3260 cifs_stats_bytes_read(tcon
, total_read
);
3261 *offset
+= bytes_read
;
3269 * If the page is mmap'ed into a process' page tables, then we need to make
3270 * sure that it doesn't change while being written back.
3273 cifs_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
3275 struct page
*page
= vmf
->page
;
3278 return VM_FAULT_LOCKED
;
3281 static const struct vm_operations_struct cifs_file_vm_ops
= {
3282 .fault
= filemap_fault
,
3283 .map_pages
= filemap_map_pages
,
3284 .page_mkwrite
= cifs_page_mkwrite
,
3287 int cifs_file_strict_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3290 struct inode
*inode
= file_inode(file
);
3294 if (!CIFS_CACHE_READ(CIFS_I(inode
)))
3295 rc
= cifs_zap_mapping(inode
);
3297 rc
= generic_file_mmap(file
, vma
);
3299 vma
->vm_ops
= &cifs_file_vm_ops
;
3305 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3311 rc
= cifs_revalidate_file(file
);
3313 cifs_dbg(FYI
, "Validation prior to mmap failed, error=%d\n",
3316 rc
= generic_file_mmap(file
, vma
);
3318 vma
->vm_ops
= &cifs_file_vm_ops
;
3325 cifs_readv_complete(struct work_struct
*work
)
3327 unsigned int i
, got_bytes
;
3328 struct cifs_readdata
*rdata
= container_of(work
,
3329 struct cifs_readdata
, work
);
3331 got_bytes
= rdata
->got_bytes
;
3332 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
3333 struct page
*page
= rdata
->pages
[i
];
3335 lru_cache_add_file(page
);
3337 if (rdata
->result
== 0 ||
3338 (rdata
->result
== -EAGAIN
&& got_bytes
)) {
3339 flush_dcache_page(page
);
3340 SetPageUptodate(page
);
3345 if (rdata
->result
== 0 ||
3346 (rdata
->result
== -EAGAIN
&& got_bytes
))
3347 cifs_readpage_to_fscache(rdata
->mapping
->host
, page
);
3349 got_bytes
-= min_t(unsigned int, PAGE_CACHE_SIZE
, got_bytes
);
3351 page_cache_release(page
);
3352 rdata
->pages
[i
] = NULL
;
3354 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3358 cifs_readpages_read_into_pages(struct TCP_Server_Info
*server
,
3359 struct cifs_readdata
*rdata
, unsigned int len
)
3365 unsigned int nr_pages
= rdata
->nr_pages
;
3368 /* determine the eof that the server (probably) has */
3369 eof
= CIFS_I(rdata
->mapping
->host
)->server_eof
;
3370 eof_index
= eof
? (eof
- 1) >> PAGE_CACHE_SHIFT
: 0;
3371 cifs_dbg(FYI
, "eof=%llu eof_index=%lu\n", eof
, eof_index
);
3373 rdata
->got_bytes
= 0;
3374 rdata
->tailsz
= PAGE_CACHE_SIZE
;
3375 for (i
= 0; i
< nr_pages
; i
++) {
3376 struct page
*page
= rdata
->pages
[i
];
3378 if (len
>= PAGE_CACHE_SIZE
) {
3379 /* enough data to fill the page */
3380 iov
.iov_base
= kmap(page
);
3381 iov
.iov_len
= PAGE_CACHE_SIZE
;
3382 cifs_dbg(FYI
, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3383 i
, page
->index
, iov
.iov_base
, iov
.iov_len
);
3384 len
-= PAGE_CACHE_SIZE
;
3385 } else if (len
> 0) {
3386 /* enough for partial page, fill and zero the rest */
3387 iov
.iov_base
= kmap(page
);
3389 cifs_dbg(FYI
, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3390 i
, page
->index
, iov
.iov_base
, iov
.iov_len
);
3391 memset(iov
.iov_base
+ len
,
3392 '\0', PAGE_CACHE_SIZE
- len
);
3393 rdata
->tailsz
= len
;
3395 } else if (page
->index
> eof_index
) {
3397 * The VFS will not try to do readahead past the
3398 * i_size, but it's possible that we have outstanding
3399 * writes with gaps in the middle and the i_size hasn't
3400 * caught up yet. Populate those with zeroed out pages
3401 * to prevent the VFS from repeatedly attempting to
3402 * fill them until the writes are flushed.
3404 zero_user(page
, 0, PAGE_CACHE_SIZE
);
3405 lru_cache_add_file(page
);
3406 flush_dcache_page(page
);
3407 SetPageUptodate(page
);
3409 page_cache_release(page
);
3410 rdata
->pages
[i
] = NULL
;
3414 /* no need to hold page hostage */
3415 lru_cache_add_file(page
);
3417 page_cache_release(page
);
3418 rdata
->pages
[i
] = NULL
;
3423 result
= cifs_readv_from_socket(server
, &iov
, 1, iov
.iov_len
);
3428 rdata
->got_bytes
+= result
;
3431 return rdata
->got_bytes
> 0 && result
!= -ECONNABORTED
?
3432 rdata
->got_bytes
: result
;
3436 readpages_get_pages(struct address_space
*mapping
, struct list_head
*page_list
,
3437 unsigned int rsize
, struct list_head
*tmplist
,
3438 unsigned int *nr_pages
, loff_t
*offset
, unsigned int *bytes
)
3440 struct page
*page
, *tpage
;
3441 unsigned int expected_index
;
3443 gfp_t gfp
= mapping_gfp_constraint(mapping
, GFP_KERNEL
);
3445 INIT_LIST_HEAD(tmplist
);
3447 page
= list_entry(page_list
->prev
, struct page
, lru
);
3450 * Lock the page and put it in the cache. Since no one else
3451 * should have access to this page, we're safe to simply set
3452 * PG_locked without checking it first.
3454 __set_page_locked(page
);
3455 rc
= add_to_page_cache_locked(page
, mapping
,
3458 /* give up if we can't stick it in the cache */
3460 __clear_page_locked(page
);
3464 /* move first page to the tmplist */
3465 *offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
3466 *bytes
= PAGE_CACHE_SIZE
;
3468 list_move_tail(&page
->lru
, tmplist
);
3470 /* now try and add more pages onto the request */
3471 expected_index
= page
->index
+ 1;
3472 list_for_each_entry_safe_reverse(page
, tpage
, page_list
, lru
) {
3473 /* discontinuity ? */
3474 if (page
->index
!= expected_index
)
3477 /* would this page push the read over the rsize? */
3478 if (*bytes
+ PAGE_CACHE_SIZE
> rsize
)
3481 __set_page_locked(page
);
3482 if (add_to_page_cache_locked(page
, mapping
, page
->index
, gfp
)) {
3483 __clear_page_locked(page
);
3486 list_move_tail(&page
->lru
, tmplist
);
3487 (*bytes
) += PAGE_CACHE_SIZE
;
3494 static int cifs_readpages(struct file
*file
, struct address_space
*mapping
,
3495 struct list_head
*page_list
, unsigned num_pages
)
3498 struct list_head tmplist
;
3499 struct cifsFileInfo
*open_file
= file
->private_data
;
3500 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
3501 struct TCP_Server_Info
*server
;
3505 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3506 * immediately if the cookie is negative
3508 * After this point, every page in the list might have PG_fscache set,
3509 * so we will need to clean that up off of every page we don't use.
3511 rc
= cifs_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
3516 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
3517 pid
= open_file
->pid
;
3519 pid
= current
->tgid
;
3522 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
3524 cifs_dbg(FYI
, "%s: file=%p mapping=%p num_pages=%u\n",
3525 __func__
, file
, mapping
, num_pages
);
3528 * Start with the page at end of list and move it to private
3529 * list. Do the same with any following pages until we hit
3530 * the rsize limit, hit an index discontinuity, or run out of
3531 * pages. Issue the async read and then start the loop again
3532 * until the list is empty.
3534 * Note that list order is important. The page_list is in
3535 * the order of declining indexes. When we put the pages in
3536 * the rdata->pages, then we want them in increasing order.
3538 while (!list_empty(page_list
)) {
3539 unsigned int i
, nr_pages
, bytes
, rsize
;
3541 struct page
*page
, *tpage
;
3542 struct cifs_readdata
*rdata
;
3545 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->rsize
,
3551 * Give up immediately if rsize is too small to read an entire
3552 * page. The VFS will fall back to readpage. We should never
3553 * reach this point however since we set ra_pages to 0 when the
3554 * rsize is smaller than a cache page.
3556 if (unlikely(rsize
< PAGE_CACHE_SIZE
)) {
3557 add_credits_and_wake_if(server
, credits
, 0);
3561 rc
= readpages_get_pages(mapping
, page_list
, rsize
, &tmplist
,
3562 &nr_pages
, &offset
, &bytes
);
3564 add_credits_and_wake_if(server
, credits
, 0);
3568 rdata
= cifs_readdata_alloc(nr_pages
, cifs_readv_complete
);
3570 /* best to give up if we're out of mem */
3571 list_for_each_entry_safe(page
, tpage
, &tmplist
, lru
) {
3572 list_del(&page
->lru
);
3573 lru_cache_add_file(page
);
3575 page_cache_release(page
);
3578 add_credits_and_wake_if(server
, credits
, 0);
3582 rdata
->cfile
= cifsFileInfo_get(open_file
);
3583 rdata
->mapping
= mapping
;
3584 rdata
->offset
= offset
;
3585 rdata
->bytes
= bytes
;
3587 rdata
->pagesz
= PAGE_CACHE_SIZE
;
3588 rdata
->read_into_pages
= cifs_readpages_read_into_pages
;
3589 rdata
->credits
= credits
;
3591 list_for_each_entry_safe(page
, tpage
, &tmplist
, lru
) {
3592 list_del(&page
->lru
);
3593 rdata
->pages
[rdata
->nr_pages
++] = page
;
3596 if (!rdata
->cfile
->invalidHandle
||
3597 !(rc
= cifs_reopen_file(rdata
->cfile
, true)))
3598 rc
= server
->ops
->async_readv(rdata
);
3600 add_credits_and_wake_if(server
, rdata
->credits
, 0);
3601 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
3602 page
= rdata
->pages
[i
];
3603 lru_cache_add_file(page
);
3605 page_cache_release(page
);
3607 /* Fallback to the readpage in error/reconnect cases */
3608 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3612 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3615 /* Any pages that have been shown to fscache but didn't get added to
3616 * the pagecache must be uncached before they get returned to the
3619 cifs_fscache_readpages_cancel(mapping
->host
, page_list
);
3624 * cifs_readpage_worker must be called with the page pinned
3626 static int cifs_readpage_worker(struct file
*file
, struct page
*page
,
3632 /* Is the page cached? */
3633 rc
= cifs_readpage_from_fscache(file_inode(file
), page
);
3637 read_data
= kmap(page
);
3638 /* for reads over a certain size could initiate async read ahead */
3640 rc
= cifs_read(file
, read_data
, PAGE_CACHE_SIZE
, poffset
);
3645 cifs_dbg(FYI
, "Bytes read %d\n", rc
);
3647 file_inode(file
)->i_atime
=
3648 current_fs_time(file_inode(file
)->i_sb
);
3650 if (PAGE_CACHE_SIZE
> rc
)
3651 memset(read_data
+ rc
, 0, PAGE_CACHE_SIZE
- rc
);
3653 flush_dcache_page(page
);
3654 SetPageUptodate(page
);
3656 /* send this page to the cache */
3657 cifs_readpage_to_fscache(file_inode(file
), page
);
3669 static int cifs_readpage(struct file
*file
, struct page
*page
)
3671 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
3677 if (file
->private_data
== NULL
) {
3683 cifs_dbg(FYI
, "readpage %p at offset %d 0x%x\n",
3684 page
, (int)offset
, (int)offset
);
3686 rc
= cifs_readpage_worker(file
, page
, &offset
);
3692 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
3694 struct cifsFileInfo
*open_file
;
3695 struct cifs_tcon
*tcon
=
3696 cifs_sb_master_tcon(CIFS_SB(cifs_inode
->vfs_inode
.i_sb
));
3698 spin_lock(&tcon
->open_file_lock
);
3699 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
3700 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
3701 spin_unlock(&tcon
->open_file_lock
);
3705 spin_unlock(&tcon
->open_file_lock
);
3709 /* We do not want to update the file size from server for inodes
3710 open for write - to avoid races with writepage extending
3711 the file - in the future we could consider allowing
3712 refreshing the inode only on increases in the file size
3713 but this is tricky to do without racing with writebehind
3714 page caching in the current Linux kernel design */
3715 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
)
3720 if (is_inode_writable(cifsInode
)) {
3721 /* This inode is open for write at least once */
3722 struct cifs_sb_info
*cifs_sb
;
3724 cifs_sb
= CIFS_SB(cifsInode
->vfs_inode
.i_sb
);
3725 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
3726 /* since no page cache to corrupt on directio
3727 we can change size safely */
3731 if (i_size_read(&cifsInode
->vfs_inode
) < end_of_file
)
3739 static int cifs_write_begin(struct file
*file
, struct address_space
*mapping
,
3740 loff_t pos
, unsigned len
, unsigned flags
,
3741 struct page
**pagep
, void **fsdata
)
3744 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
3745 loff_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
3746 loff_t page_start
= pos
& PAGE_MASK
;
3751 cifs_dbg(FYI
, "write_begin from %lld len %d\n", (long long)pos
, len
);
3754 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
3760 if (PageUptodate(page
))
3764 * If we write a full page it will be up to date, no need to read from
3765 * the server. If the write is short, we'll end up doing a sync write
3768 if (len
== PAGE_CACHE_SIZE
)
3772 * optimize away the read when we have an oplock, and we're not
3773 * expecting to use any of the data we'd be reading in. That
3774 * is, when the page lies beyond the EOF, or straddles the EOF
3775 * and the write will cover all of the existing data.
3777 if (CIFS_CACHE_READ(CIFS_I(mapping
->host
))) {
3778 i_size
= i_size_read(mapping
->host
);
3779 if (page_start
>= i_size
||
3780 (offset
== 0 && (pos
+ len
) >= i_size
)) {
3781 zero_user_segments(page
, 0, offset
,
3785 * PageChecked means that the parts of the page
3786 * to which we're not writing are considered up
3787 * to date. Once the data is copied to the
3788 * page, it can be set uptodate.
3790 SetPageChecked(page
);
3795 if ((file
->f_flags
& O_ACCMODE
) != O_WRONLY
&& !oncethru
) {
3797 * might as well read a page, it is fast enough. If we get
3798 * an error, we don't need to return it. cifs_write_end will
3799 * do a sync write instead since PG_uptodate isn't set.
3801 cifs_readpage_worker(file
, page
, &page_start
);
3802 page_cache_release(page
);
3806 /* we could try using another file handle if there is one -
3807 but how would we lock it to prevent close of that handle
3808 racing with this read? In any case
3809 this will be written out by write_end so is fine */
3816 static int cifs_release_page(struct page
*page
, gfp_t gfp
)
3818 if (PagePrivate(page
))
3821 return cifs_fscache_release_page(page
, gfp
);
3824 static void cifs_invalidate_page(struct page
*page
, unsigned int offset
,
3825 unsigned int length
)
3827 struct cifsInodeInfo
*cifsi
= CIFS_I(page
->mapping
->host
);
3829 if (offset
== 0 && length
== PAGE_CACHE_SIZE
)
3830 cifs_fscache_invalidate_page(page
, &cifsi
->vfs_inode
);
3833 static int cifs_launder_page(struct page
*page
)
3836 loff_t range_start
= page_offset(page
);
3837 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
3838 struct writeback_control wbc
= {
3839 .sync_mode
= WB_SYNC_ALL
,
3841 .range_start
= range_start
,
3842 .range_end
= range_end
,
3845 cifs_dbg(FYI
, "Launder page: %p\n", page
);
3847 if (clear_page_dirty_for_io(page
))
3848 rc
= cifs_writepage_locked(page
, &wbc
);
3850 cifs_fscache_invalidate_page(page
, page
->mapping
->host
);
3854 void cifs_oplock_break(struct work_struct
*work
)
3856 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
3858 struct inode
*inode
= d_inode(cfile
->dentry
);
3859 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
3860 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
3861 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
3864 wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
,
3865 TASK_UNINTERRUPTIBLE
);
3867 server
->ops
->downgrade_oplock(server
, cinode
,
3868 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2
, &cinode
->flags
));
3870 if (!CIFS_CACHE_WRITE(cinode
) && CIFS_CACHE_READ(cinode
) &&
3871 cifs_has_mand_locks(cinode
)) {
3872 cifs_dbg(FYI
, "Reset oplock to None for inode=%p due to mand locks\n",
3877 if (inode
&& S_ISREG(inode
->i_mode
)) {
3878 if (CIFS_CACHE_READ(cinode
))
3879 break_lease(inode
, O_RDONLY
);
3881 break_lease(inode
, O_WRONLY
);
3882 rc
= filemap_fdatawrite(inode
->i_mapping
);
3883 if (!CIFS_CACHE_READ(cinode
)) {
3884 rc
= filemap_fdatawait(inode
->i_mapping
);
3885 mapping_set_error(inode
->i_mapping
, rc
);
3886 cifs_zap_mapping(inode
);
3888 cifs_dbg(FYI
, "Oplock flush inode %p rc %d\n", inode
, rc
);
3891 rc
= cifs_push_locks(cfile
);
3893 cifs_dbg(VFS
, "Push locks rc = %d\n", rc
);
3896 * releasing stale oplock after recent reconnect of smb session using
3897 * a now incorrect file handle is not a data integrity issue but do
3898 * not bother sending an oplock release if session to server still is
3899 * disconnected since oplock already released by the server
3901 if (!cfile
->oplock_break_cancelled
) {
3902 rc
= tcon
->ses
->server
->ops
->oplock_response(tcon
, &cfile
->fid
,
3904 cifs_dbg(FYI
, "Oplock release rc = %d\n", rc
);
3906 cifs_done_oplock_break(cinode
);
3910 * The presence of cifs_direct_io() in the address space ops vector
3911 * allowes open() O_DIRECT flags which would have failed otherwise.
3913 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3914 * so this method should never be called.
3916 * Direct IO is not yet supported in the cached mode.
3919 cifs_direct_io(struct kiocb
*iocb
, struct iov_iter
*iter
, loff_t pos
)
3923 * Eventually need to support direct IO for non forcedirectio mounts
3929 const struct address_space_operations cifs_addr_ops
= {
3930 .readpage
= cifs_readpage
,
3931 .readpages
= cifs_readpages
,
3932 .writepage
= cifs_writepage
,
3933 .writepages
= cifs_writepages
,
3934 .write_begin
= cifs_write_begin
,
3935 .write_end
= cifs_write_end
,
3936 .set_page_dirty
= __set_page_dirty_nobuffers
,
3937 .releasepage
= cifs_release_page
,
3938 .direct_IO
= cifs_direct_io
,
3939 .invalidatepage
= cifs_invalidate_page
,
3940 .launder_page
= cifs_launder_page
,
3944 * cifs_readpages requires the server to support a buffer large enough to
3945 * contain the header plus one complete page of data. Otherwise, we need
3946 * to leave cifs_readpages out of the address space operations.
3948 const struct address_space_operations cifs_addr_ops_smallbuf
= {
3949 .readpage
= cifs_readpage
,
3950 .writepage
= cifs_writepage
,
3951 .writepages
= cifs_writepages
,
3952 .write_begin
= cifs_write_begin
,
3953 .write_end
= cifs_write_end
,
3954 .set_page_dirty
= __set_page_dirty_nobuffers
,
3955 .releasepage
= cifs_release_page
,
3956 .invalidatepage
= cifs_invalidate_page
,
3957 .launder_page
= cifs_launder_page
,