Avoid beyond bounds copy while caching ACL
[zen-stable.git] / fs / cifs / file.c
blob0f7dc228c10a6c7b51f0eb2344a9ba4eb1f0f637
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
64 static u32 cifs_posix_convert_flags(unsigned int flags)
66 u32 posix_flags = 0;
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
82 if (flags & O_DSYNC)
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
88 if (flags & O_DIRECT)
89 posix_flags |= SMB_O_DIRECT;
91 return posix_flags;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
104 else
105 return FILE_OPEN;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
142 if (rc)
143 goto posix_open_ret;
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
165 posix_open_ret:
166 kfree(presp_data);
167 return rc;
170 static int
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173 __u16 *pnetfid, int xid)
175 int rc;
176 int desiredAccess;
177 int disposition;
178 int create_options = CREATE_NOT_DIR;
179 FILE_ALL_INFO *buf;
181 desiredAccess = cifs_convert_flags(f_flags);
183 /*********************************************************************
184 * open flag mapping table:
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
207 disposition = cifs_get_disposition(f_flags);
209 /* BB pass O_SYNC flag through on file attributes .. BB */
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220 desiredAccess, create_options, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
229 if (rc)
230 goto out;
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
239 out:
240 kfree(buf);
241 return rc;
244 struct cifsFileInfo *
245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
257 pCifsFile->count = 1;
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
268 spin_lock(&cifs_file_list_lock);
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
275 spin_unlock(&cifs_file_list_lock);
277 cifs_set_oplock_level(pCifsInode, oplock);
278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
280 file->private_data = pCifsFile;
281 return pCifsFile;
284 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
287 * Release a reference on the file private data. This may involve closing
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
291 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293 struct inode *inode = cifs_file->dentry->d_inode;
294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297 struct cifsLockInfo *li, *tmp;
299 spin_lock(&cifs_file_list_lock);
300 if (--cifs_file->count > 0) {
301 spin_unlock(&cifs_file_list_lock);
302 return;
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
319 cifs_set_oplock_level(cifsi, 0);
321 spin_unlock(&cifs_file_list_lock);
323 cancel_work_sync(&cifs_file->oplock_break);
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
340 list_del(&li->llist);
341 cifs_del_lock_waiters(li);
342 kfree(li);
344 mutex_unlock(&cifsi->lock_mutex);
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
351 int cifs_open(struct inode *inode, struct file *file)
353 int rc = -EACCES;
354 int xid;
355 __u32 oplock;
356 struct cifs_sb_info *cifs_sb;
357 struct cifs_tcon *tcon;
358 struct tcon_link *tlink;
359 struct cifsFileInfo *pCifsFile = NULL;
360 char *full_path = NULL;
361 bool posix_open_ok = false;
362 __u16 netfid;
364 xid = GetXid();
366 cifs_sb = CIFS_SB(inode->i_sb);
367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
372 tcon = tlink_tcon(tlink);
374 full_path = build_path_from_dentry(file->f_path.dentry);
375 if (full_path == NULL) {
376 rc = -ENOMEM;
377 goto out;
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
383 if (tcon->ses->server->oplocks)
384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
392 /* can not refresh inode info since size could be stale */
393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
394 cifs_sb->mnt_file_mode /* ignored */,
395 file->f_flags, &oplock, &netfid, xid);
396 if (rc == 0) {
397 cFYI(1, "posix open succeeded");
398 posix_open_ok = true;
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
401 cERROR(1, "server %s of type %s returned"
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
406 tcon->ses->serverNOS);
407 tcon->broken_posix_open = true;
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
423 if (pCifsFile == NULL) {
424 CIFSSMBClose(xid, tcon, netfid);
425 rc = -ENOMEM;
426 goto out;
429 cifs_fscache_set_inode_cookie(inode, file);
431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
447 out:
448 kfree(full_path);
449 FreeXid(xid);
450 cifs_put_tlink(tlink);
451 return rc;
454 /* Try to reacquire byte range locks that were released when session */
455 /* to server was lost */
456 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
458 int rc = 0;
460 /* BB list all locks open on this file and relock */
462 return rc;
465 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
467 int rc = -EACCES;
468 int xid;
469 __u32 oplock;
470 struct cifs_sb_info *cifs_sb;
471 struct cifs_tcon *tcon;
472 struct cifsInodeInfo *pCifsInode;
473 struct inode *inode;
474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
477 int create_options = CREATE_NOT_DIR;
478 __u16 netfid;
480 xid = GetXid();
481 mutex_lock(&pCifsFile->fh_mutex);
482 if (!pCifsFile->invalidHandle) {
483 mutex_unlock(&pCifsFile->fh_mutex);
484 rc = 0;
485 FreeXid(xid);
486 return rc;
489 inode = pCifsFile->dentry->d_inode;
490 cifs_sb = CIFS_SB(inode->i_sb);
491 tcon = tlink_tcon(pCifsFile->tlink);
493 /* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
497 full_path = build_path_from_dentry(pCifsFile->dentry);
498 if (full_path == NULL) {
499 rc = -ENOMEM;
500 mutex_unlock(&pCifsFile->fh_mutex);
501 FreeXid(xid);
502 return rc;
505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
506 inode, pCifsFile->f_flags, full_path);
508 if (tcon->ses->server->oplocks)
509 oplock = REQ_OPLOCK;
510 else
511 oplock = 0;
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
527 if (rc == 0) {
528 cFYI(1, "posix reopen succeeded");
529 goto reopen_success;
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
540 /* Can not refresh inode by passing in file_info buf to be returned
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
547 create_options, &netfid, &oplock, NULL,
548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
549 CIFS_MOUNT_MAP_SPECIAL_CHR);
550 if (rc) {
551 mutex_unlock(&pCifsFile->fh_mutex);
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
554 goto reopen_error_exit;
557 reopen_success:
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
565 mapping_set_error(inode->i_mapping, rc);
567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
581 cifs_set_oplock_level(pCifsInode, oplock);
583 cifs_relock_file(pCifsFile);
585 reopen_error_exit:
586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
591 int cifs_close(struct inode *inode, struct file *file)
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
598 /* return code from the ->release op is always ignored */
599 return 0;
602 int cifs_closedir(struct inode *inode, struct file *file)
604 int rc = 0;
605 int xid;
606 struct cifsFileInfo *pCFileStruct = file->private_data;
607 char *ptmp;
609 cFYI(1, "Closedir inode = 0x%p", inode);
611 xid = GetXid();
613 if (pCFileStruct) {
614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
616 cFYI(1, "Freeing private data in close dir");
617 spin_lock(&cifs_file_list_lock);
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
621 spin_unlock(&cifs_file_list_lock);
622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
627 } else
628 spin_unlock(&cifs_file_list_lock);
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
631 cFYI(1, "closedir free smb buf in srch struct");
632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
633 if (pCFileStruct->srch_inf.smallBuf)
634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
638 cifs_put_tlink(pCFileStruct->tlink);
639 kfree(file->private_data);
640 file->private_data = NULL;
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
647 static struct cifsLockInfo *
648 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
650 struct cifsLockInfo *lock =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
652 if (!lock)
653 return lock;
654 lock->offset = offset;
655 lock->length = length;
656 lock->type = type;
657 lock->netfid = netfid;
658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q);
661 return lock;
664 static void
665 cifs_del_lock_waiters(struct cifsLockInfo *lock)
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
674 static bool
675 __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
679 struct cifsLockInfo *li, *tmp;
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
694 return false;
697 static bool
698 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699 struct cifsLockInfo **conf_lock)
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702 lock->type, lock->netfid, conf_lock);
706 * Check if there is another lock that prevents us to set the lock (mandatory
707 * style). If such a lock exists, update the flock structure with its
708 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
709 * or leave it the same if we can't. Returns 0 if we don't need to request to
710 * the server or 1 otherwise.
712 static int
713 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
714 __u8 type, __u16 netfid, struct file_lock *flock)
716 int rc = 0;
717 struct cifsLockInfo *conf_lock;
718 bool exist;
720 mutex_lock(&cinode->lock_mutex);
722 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
723 &conf_lock);
724 if (exist) {
725 flock->fl_start = conf_lock->offset;
726 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
727 flock->fl_pid = conf_lock->pid;
728 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
729 flock->fl_type = F_RDLCK;
730 else
731 flock->fl_type = F_WRLCK;
732 } else if (!cinode->can_cache_brlcks)
733 rc = 1;
734 else
735 flock->fl_type = F_UNLCK;
737 mutex_unlock(&cinode->lock_mutex);
738 return rc;
741 static void
742 cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
744 mutex_lock(&cinode->lock_mutex);
745 list_add_tail(&lock->llist, &cinode->llist);
746 mutex_unlock(&cinode->lock_mutex);
750 * Set the byte-range lock (mandatory style). Returns:
751 * 1) 0, if we set the lock and don't need to request to the server;
752 * 2) 1, if no locks prevent us but we need to request to the server;
753 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
755 static int
756 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
757 bool wait)
759 struct cifsLockInfo *conf_lock;
760 bool exist;
761 int rc = 0;
763 try_again:
764 exist = false;
765 mutex_lock(&cinode->lock_mutex);
767 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
768 if (!exist && cinode->can_cache_brlcks) {
769 list_add_tail(&lock->llist, &cinode->llist);
770 mutex_unlock(&cinode->lock_mutex);
771 return rc;
774 if (!exist)
775 rc = 1;
776 else if (!wait)
777 rc = -EACCES;
778 else {
779 list_add_tail(&lock->blist, &conf_lock->blist);
780 mutex_unlock(&cinode->lock_mutex);
781 rc = wait_event_interruptible(lock->block_q,
782 (lock->blist.prev == &lock->blist) &&
783 (lock->blist.next == &lock->blist));
784 if (!rc)
785 goto try_again;
786 mutex_lock(&cinode->lock_mutex);
787 list_del_init(&lock->blist);
790 mutex_unlock(&cinode->lock_mutex);
791 return rc;
795 * Check if there is another lock that prevents us to set the lock (posix
796 * style). If such a lock exists, update the flock structure with its
797 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
798 * or leave it the same if we can't. Returns 0 if we don't need to request to
799 * the server or 1 otherwise.
801 static int
802 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
804 int rc = 0;
805 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
806 unsigned char saved_type = flock->fl_type;
808 if ((flock->fl_flags & FL_POSIX) == 0)
809 return 1;
811 mutex_lock(&cinode->lock_mutex);
812 posix_test_lock(file, flock);
814 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
815 flock->fl_type = saved_type;
816 rc = 1;
819 mutex_unlock(&cinode->lock_mutex);
820 return rc;
824 * Set the byte-range lock (posix style). Returns:
825 * 1) 0, if we set the lock and don't need to request to the server;
826 * 2) 1, if we need to request to the server;
827 * 3) <0, if the error occurs while setting the lock.
829 static int
830 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
832 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
833 int rc = 1;
835 if ((flock->fl_flags & FL_POSIX) == 0)
836 return rc;
838 try_again:
839 mutex_lock(&cinode->lock_mutex);
840 if (!cinode->can_cache_brlcks) {
841 mutex_unlock(&cinode->lock_mutex);
842 return rc;
845 rc = posix_lock_file(file, flock, NULL);
846 mutex_unlock(&cinode->lock_mutex);
847 if (rc == FILE_LOCK_DEFERRED) {
848 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
849 if (!rc)
850 goto try_again;
851 locks_delete_block(flock);
853 return rc;
856 static int
857 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
859 int xid, rc = 0, stored_rc;
860 struct cifsLockInfo *li, *tmp;
861 struct cifs_tcon *tcon;
862 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
863 unsigned int num, max_num;
864 LOCKING_ANDX_RANGE *buf, *cur;
865 int types[] = {LOCKING_ANDX_LARGE_FILES,
866 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
867 int i;
869 xid = GetXid();
870 tcon = tlink_tcon(cfile->tlink);
872 mutex_lock(&cinode->lock_mutex);
873 if (!cinode->can_cache_brlcks) {
874 mutex_unlock(&cinode->lock_mutex);
875 FreeXid(xid);
876 return rc;
879 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
880 sizeof(LOCKING_ANDX_RANGE);
881 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
882 if (!buf) {
883 mutex_unlock(&cinode->lock_mutex);
884 FreeXid(xid);
885 return rc;
888 for (i = 0; i < 2; i++) {
889 cur = buf;
890 num = 0;
891 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
892 if (li->type != types[i])
893 continue;
894 cur->Pid = cpu_to_le16(li->pid);
895 cur->LengthLow = cpu_to_le32((u32)li->length);
896 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
897 cur->OffsetLow = cpu_to_le32((u32)li->offset);
898 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
899 if (++num == max_num) {
900 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
901 li->type, 0, num, buf);
902 if (stored_rc)
903 rc = stored_rc;
904 cur = buf;
905 num = 0;
906 } else
907 cur++;
910 if (num) {
911 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
912 types[i], 0, num, buf);
913 if (stored_rc)
914 rc = stored_rc;
918 cinode->can_cache_brlcks = false;
919 mutex_unlock(&cinode->lock_mutex);
921 kfree(buf);
922 FreeXid(xid);
923 return rc;
926 /* copied from fs/locks.c with a name change */
927 #define cifs_for_each_lock(inode, lockp) \
928 for (lockp = &inode->i_flock; *lockp != NULL; \
929 lockp = &(*lockp)->fl_next)
931 struct lock_to_push {
932 struct list_head llist;
933 __u64 offset;
934 __u64 length;
935 __u32 pid;
936 __u16 netfid;
937 __u8 type;
940 static int
941 cifs_push_posix_locks(struct cifsFileInfo *cfile)
943 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
944 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
945 struct file_lock *flock, **before;
946 unsigned int count = 0, i = 0;
947 int rc = 0, xid, type;
948 struct list_head locks_to_send, *el;
949 struct lock_to_push *lck, *tmp;
950 __u64 length;
952 xid = GetXid();
954 mutex_lock(&cinode->lock_mutex);
955 if (!cinode->can_cache_brlcks) {
956 mutex_unlock(&cinode->lock_mutex);
957 FreeXid(xid);
958 return rc;
961 lock_flocks();
962 cifs_for_each_lock(cfile->dentry->d_inode, before) {
963 if ((*before)->fl_flags & FL_POSIX)
964 count++;
966 unlock_flocks();
968 INIT_LIST_HEAD(&locks_to_send);
971 * Allocating count locks is enough because no FL_POSIX locks can be
972 * added to the list while we are holding cinode->lock_mutex that
973 * protects locking operations of this inode.
975 for (; i < count; i++) {
976 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
977 if (!lck) {
978 rc = -ENOMEM;
979 goto err_out;
981 list_add_tail(&lck->llist, &locks_to_send);
984 el = locks_to_send.next;
985 lock_flocks();
986 cifs_for_each_lock(cfile->dentry->d_inode, before) {
987 flock = *before;
988 if ((flock->fl_flags & FL_POSIX) == 0)
989 continue;
990 if (el == &locks_to_send) {
992 * The list ended. We don't have enough allocated
993 * structures - something is really wrong.
995 cERROR(1, "Can't push all brlocks!");
996 break;
998 length = 1 + flock->fl_end - flock->fl_start;
999 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1000 type = CIFS_RDLCK;
1001 else
1002 type = CIFS_WRLCK;
1003 lck = list_entry(el, struct lock_to_push, llist);
1004 lck->pid = flock->fl_pid;
1005 lck->netfid = cfile->netfid;
1006 lck->length = length;
1007 lck->type = type;
1008 lck->offset = flock->fl_start;
1009 el = el->next;
1011 unlock_flocks();
1013 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1014 struct file_lock tmp_lock;
1015 int stored_rc;
1017 tmp_lock.fl_start = lck->offset;
1018 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1019 0, lck->length, &tmp_lock,
1020 lck->type, 0);
1021 if (stored_rc)
1022 rc = stored_rc;
1023 list_del(&lck->llist);
1024 kfree(lck);
1027 out:
1028 cinode->can_cache_brlcks = false;
1029 mutex_unlock(&cinode->lock_mutex);
1031 FreeXid(xid);
1032 return rc;
1033 err_out:
1034 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1035 list_del(&lck->llist);
1036 kfree(lck);
1038 goto out;
1041 static int
1042 cifs_push_locks(struct cifsFileInfo *cfile)
1044 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1045 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1047 if ((tcon->ses->capabilities & CAP_UNIX) &&
1048 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1049 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1050 return cifs_push_posix_locks(cfile);
1052 return cifs_push_mandatory_locks(cfile);
1055 static void
1056 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1057 bool *wait_flag)
1059 if (flock->fl_flags & FL_POSIX)
1060 cFYI(1, "Posix");
1061 if (flock->fl_flags & FL_FLOCK)
1062 cFYI(1, "Flock");
1063 if (flock->fl_flags & FL_SLEEP) {
1064 cFYI(1, "Blocking lock");
1065 *wait_flag = true;
1067 if (flock->fl_flags & FL_ACCESS)
1068 cFYI(1, "Process suspended by mandatory locking - "
1069 "not implemented yet");
1070 if (flock->fl_flags & FL_LEASE)
1071 cFYI(1, "Lease on file - not implemented yet");
1072 if (flock->fl_flags &
1073 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1074 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1076 *type = LOCKING_ANDX_LARGE_FILES;
1077 if (flock->fl_type == F_WRLCK) {
1078 cFYI(1, "F_WRLCK ");
1079 *lock = 1;
1080 } else if (flock->fl_type == F_UNLCK) {
1081 cFYI(1, "F_UNLCK");
1082 *unlock = 1;
1083 /* Check if unlock includes more than one lock range */
1084 } else if (flock->fl_type == F_RDLCK) {
1085 cFYI(1, "F_RDLCK");
1086 *type |= LOCKING_ANDX_SHARED_LOCK;
1087 *lock = 1;
1088 } else if (flock->fl_type == F_EXLCK) {
1089 cFYI(1, "F_EXLCK");
1090 *lock = 1;
1091 } else if (flock->fl_type == F_SHLCK) {
1092 cFYI(1, "F_SHLCK");
1093 *type |= LOCKING_ANDX_SHARED_LOCK;
1094 *lock = 1;
1095 } else
1096 cFYI(1, "Unknown type of lock");
1099 static int
1100 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1101 bool wait_flag, bool posix_lck, int xid)
1103 int rc = 0;
1104 __u64 length = 1 + flock->fl_end - flock->fl_start;
1105 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1106 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1107 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1108 __u16 netfid = cfile->netfid;
1110 if (posix_lck) {
1111 int posix_lock_type;
1113 rc = cifs_posix_lock_test(file, flock);
1114 if (!rc)
1115 return rc;
1117 if (type & LOCKING_ANDX_SHARED_LOCK)
1118 posix_lock_type = CIFS_RDLCK;
1119 else
1120 posix_lock_type = CIFS_WRLCK;
1121 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1122 1 /* get */, length, flock,
1123 posix_lock_type, wait_flag);
1124 return rc;
1127 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1128 flock);
1129 if (!rc)
1130 return rc;
1132 /* BB we could chain these into one lock request BB */
1133 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1134 flock->fl_start, 0, 1, type, 0, 0);
1135 if (rc == 0) {
1136 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1137 length, flock->fl_start, 1, 0,
1138 type, 0, 0);
1139 flock->fl_type = F_UNLCK;
1140 if (rc != 0)
1141 cERROR(1, "Error unlocking previously locked "
1142 "range %d during test of lock", rc);
1143 return 0;
1146 if (type & LOCKING_ANDX_SHARED_LOCK) {
1147 flock->fl_type = F_WRLCK;
1148 return 0;
1151 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1152 flock->fl_start, 0, 1,
1153 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1154 if (rc == 0) {
1155 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1156 length, flock->fl_start, 1, 0,
1157 type | LOCKING_ANDX_SHARED_LOCK,
1158 0, 0);
1159 flock->fl_type = F_RDLCK;
1160 if (rc != 0)
1161 cERROR(1, "Error unlocking previously locked "
1162 "range %d during test of lock", rc);
1163 } else
1164 flock->fl_type = F_WRLCK;
1166 return 0;
1169 static void
1170 cifs_move_llist(struct list_head *source, struct list_head *dest)
1172 struct list_head *li, *tmp;
1173 list_for_each_safe(li, tmp, source)
1174 list_move(li, dest);
1177 static void
1178 cifs_free_llist(struct list_head *llist)
1180 struct cifsLockInfo *li, *tmp;
1181 list_for_each_entry_safe(li, tmp, llist, llist) {
1182 cifs_del_lock_waiters(li);
1183 list_del(&li->llist);
1184 kfree(li);
1188 static int
1189 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1191 int rc = 0, stored_rc;
1192 int types[] = {LOCKING_ANDX_LARGE_FILES,
1193 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1194 unsigned int i;
1195 unsigned int max_num, num;
1196 LOCKING_ANDX_RANGE *buf, *cur;
1197 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1198 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1199 struct cifsLockInfo *li, *tmp;
1200 __u64 length = 1 + flock->fl_end - flock->fl_start;
1201 struct list_head tmp_llist;
1203 INIT_LIST_HEAD(&tmp_llist);
1205 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1206 sizeof(LOCKING_ANDX_RANGE);
1207 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1208 if (!buf)
1209 return -ENOMEM;
1211 mutex_lock(&cinode->lock_mutex);
1212 for (i = 0; i < 2; i++) {
1213 cur = buf;
1214 num = 0;
1215 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1216 if (flock->fl_start > li->offset ||
1217 (flock->fl_start + length) <
1218 (li->offset + li->length))
1219 continue;
1220 if (current->tgid != li->pid)
1221 continue;
1222 if (cfile->netfid != li->netfid)
1223 continue;
1224 if (types[i] != li->type)
1225 continue;
1226 if (!cinode->can_cache_brlcks) {
1227 cur->Pid = cpu_to_le16(li->pid);
1228 cur->LengthLow = cpu_to_le32((u32)li->length);
1229 cur->LengthHigh =
1230 cpu_to_le32((u32)(li->length>>32));
1231 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1232 cur->OffsetHigh =
1233 cpu_to_le32((u32)(li->offset>>32));
1235 * We need to save a lock here to let us add
1236 * it again to the inode list if the unlock
1237 * range request fails on the server.
1239 list_move(&li->llist, &tmp_llist);
1240 if (++num == max_num) {
1241 stored_rc = cifs_lockv(xid, tcon,
1242 cfile->netfid,
1243 li->type, num,
1244 0, buf);
1245 if (stored_rc) {
1247 * We failed on the unlock range
1248 * request - add all locks from
1249 * the tmp list to the head of
1250 * the inode list.
1252 cifs_move_llist(&tmp_llist,
1253 &cinode->llist);
1254 rc = stored_rc;
1255 } else
1257 * The unlock range request
1258 * succeed - free the tmp list.
1260 cifs_free_llist(&tmp_llist);
1261 cur = buf;
1262 num = 0;
1263 } else
1264 cur++;
1265 } else {
1267 * We can cache brlock requests - simply remove
1268 * a lock from the inode list.
1270 list_del(&li->llist);
1271 cifs_del_lock_waiters(li);
1272 kfree(li);
1275 if (num) {
1276 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1277 types[i], num, 0, buf);
1278 if (stored_rc) {
1279 cifs_move_llist(&tmp_llist, &cinode->llist);
1280 rc = stored_rc;
1281 } else
1282 cifs_free_llist(&tmp_llist);
1286 mutex_unlock(&cinode->lock_mutex);
1287 kfree(buf);
1288 return rc;
1291 static int
1292 cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1293 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1295 int rc = 0;
1296 __u64 length = 1 + flock->fl_end - flock->fl_start;
1297 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1298 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1299 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1300 __u16 netfid = cfile->netfid;
1302 if (posix_lck) {
1303 int posix_lock_type;
1305 rc = cifs_posix_lock_set(file, flock);
1306 if (!rc || rc < 0)
1307 return rc;
1309 if (type & LOCKING_ANDX_SHARED_LOCK)
1310 posix_lock_type = CIFS_RDLCK;
1311 else
1312 posix_lock_type = CIFS_WRLCK;
1314 if (unlock == 1)
1315 posix_lock_type = CIFS_UNLCK;
1317 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1318 0 /* set */, length, flock,
1319 posix_lock_type, wait_flag);
1320 goto out;
1323 if (lock) {
1324 struct cifsLockInfo *lock;
1326 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
1327 if (!lock)
1328 return -ENOMEM;
1330 rc = cifs_lock_add_if(cinode, lock, wait_flag);
1331 if (rc < 0)
1332 kfree(lock);
1333 if (rc <= 0)
1334 goto out;
1336 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1337 flock->fl_start, 0, 1, type, wait_flag, 0);
1338 if (rc) {
1339 kfree(lock);
1340 goto out;
1343 cifs_lock_add(cinode, lock);
1344 } else if (unlock)
1345 rc = cifs_unlock_range(cfile, flock, xid);
1347 out:
1348 if (flock->fl_flags & FL_POSIX)
1349 posix_lock_file_wait(file, flock);
1350 return rc;
1353 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1355 int rc, xid;
1356 int lock = 0, unlock = 0;
1357 bool wait_flag = false;
1358 bool posix_lck = false;
1359 struct cifs_sb_info *cifs_sb;
1360 struct cifs_tcon *tcon;
1361 struct cifsInodeInfo *cinode;
1362 struct cifsFileInfo *cfile;
1363 __u16 netfid;
1364 __u8 type;
1366 rc = -EACCES;
1367 xid = GetXid();
1369 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1370 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1371 flock->fl_start, flock->fl_end);
1373 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1375 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1376 cfile = (struct cifsFileInfo *)file->private_data;
1377 tcon = tlink_tcon(cfile->tlink);
1378 netfid = cfile->netfid;
1379 cinode = CIFS_I(file->f_path.dentry->d_inode);
1381 if ((tcon->ses->capabilities & CAP_UNIX) &&
1382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1383 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1384 posix_lck = true;
1386 * BB add code here to normalize offset and length to account for
1387 * negative length which we can not accept over the wire.
1389 if (IS_GETLK(cmd)) {
1390 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1391 FreeXid(xid);
1392 return rc;
1395 if (!lock && !unlock) {
1397 * if no lock or unlock then nothing to do since we do not
1398 * know what it is
1400 FreeXid(xid);
1401 return -EOPNOTSUPP;
1404 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1405 xid);
1406 FreeXid(xid);
1407 return rc;
1410 /* update the file size (if needed) after a write */
1411 void
1412 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1413 unsigned int bytes_written)
1415 loff_t end_of_write = offset + bytes_written;
1417 if (end_of_write > cifsi->server_eof)
1418 cifsi->server_eof = end_of_write;
1421 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1422 const char *write_data, size_t write_size,
1423 loff_t *poffset)
1425 int rc = 0;
1426 unsigned int bytes_written = 0;
1427 unsigned int total_written;
1428 struct cifs_sb_info *cifs_sb;
1429 struct cifs_tcon *pTcon;
1430 int xid;
1431 struct dentry *dentry = open_file->dentry;
1432 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1433 struct cifs_io_parms io_parms;
1435 cifs_sb = CIFS_SB(dentry->d_sb);
1437 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1438 *poffset, dentry->d_name.name);
1440 pTcon = tlink_tcon(open_file->tlink);
1442 xid = GetXid();
1444 for (total_written = 0; write_size > total_written;
1445 total_written += bytes_written) {
1446 rc = -EAGAIN;
1447 while (rc == -EAGAIN) {
1448 struct kvec iov[2];
1449 unsigned int len;
1451 if (open_file->invalidHandle) {
1452 /* we could deadlock if we called
1453 filemap_fdatawait from here so tell
1454 reopen_file not to flush data to
1455 server now */
1456 rc = cifs_reopen_file(open_file, false);
1457 if (rc != 0)
1458 break;
1461 len = min((size_t)cifs_sb->wsize,
1462 write_size - total_written);
1463 /* iov[0] is reserved for smb header */
1464 iov[1].iov_base = (char *)write_data + total_written;
1465 iov[1].iov_len = len;
1466 io_parms.netfid = open_file->netfid;
1467 io_parms.pid = pid;
1468 io_parms.tcon = pTcon;
1469 io_parms.offset = *poffset;
1470 io_parms.length = len;
1471 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1472 1, 0);
1474 if (rc || (bytes_written == 0)) {
1475 if (total_written)
1476 break;
1477 else {
1478 FreeXid(xid);
1479 return rc;
1481 } else {
1482 cifs_update_eof(cifsi, *poffset, bytes_written);
1483 *poffset += bytes_written;
1487 cifs_stats_bytes_written(pTcon, total_written);
1489 if (total_written > 0) {
1490 spin_lock(&dentry->d_inode->i_lock);
1491 if (*poffset > dentry->d_inode->i_size)
1492 i_size_write(dentry->d_inode, *poffset);
1493 spin_unlock(&dentry->d_inode->i_lock);
1495 mark_inode_dirty_sync(dentry->d_inode);
1496 FreeXid(xid);
1497 return total_written;
1500 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1501 bool fsuid_only)
1503 struct cifsFileInfo *open_file = NULL;
1504 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1506 /* only filter by fsuid on multiuser mounts */
1507 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1508 fsuid_only = false;
1510 spin_lock(&cifs_file_list_lock);
1511 /* we could simply get the first_list_entry since write-only entries
1512 are always at the end of the list but since the first entry might
1513 have a close pending, we go through the whole list */
1514 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1515 if (fsuid_only && open_file->uid != current_fsuid())
1516 continue;
1517 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1518 if (!open_file->invalidHandle) {
1519 /* found a good file */
1520 /* lock it so it will not be closed on us */
1521 cifsFileInfo_get(open_file);
1522 spin_unlock(&cifs_file_list_lock);
1523 return open_file;
1524 } /* else might as well continue, and look for
1525 another, or simply have the caller reopen it
1526 again rather than trying to fix this handle */
1527 } else /* write only file */
1528 break; /* write only files are last so must be done */
1530 spin_unlock(&cifs_file_list_lock);
1531 return NULL;
1534 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1535 bool fsuid_only)
1537 struct cifsFileInfo *open_file;
1538 struct cifs_sb_info *cifs_sb;
1539 bool any_available = false;
1540 int rc;
1542 /* Having a null inode here (because mapping->host was set to zero by
1543 the VFS or MM) should not happen but we had reports of on oops (due to
1544 it being zero) during stress testcases so we need to check for it */
1546 if (cifs_inode == NULL) {
1547 cERROR(1, "Null inode passed to cifs_writeable_file");
1548 dump_stack();
1549 return NULL;
1552 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1554 /* only filter by fsuid on multiuser mounts */
1555 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1556 fsuid_only = false;
1558 spin_lock(&cifs_file_list_lock);
1559 refind_writable:
1560 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1561 if (!any_available && open_file->pid != current->tgid)
1562 continue;
1563 if (fsuid_only && open_file->uid != current_fsuid())
1564 continue;
1565 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1566 cifsFileInfo_get(open_file);
1568 if (!open_file->invalidHandle) {
1569 /* found a good writable file */
1570 spin_unlock(&cifs_file_list_lock);
1571 return open_file;
1574 spin_unlock(&cifs_file_list_lock);
1576 /* Had to unlock since following call can block */
1577 rc = cifs_reopen_file(open_file, false);
1578 if (!rc)
1579 return open_file;
1581 /* if it fails, try another handle if possible */
1582 cFYI(1, "wp failed on reopen file");
1583 cifsFileInfo_put(open_file);
1585 spin_lock(&cifs_file_list_lock);
1587 /* else we simply continue to the next entry. Thus
1588 we do not loop on reopen errors. If we
1589 can not reopen the file, for example if we
1590 reconnected to a server with another client
1591 racing to delete or lock the file we would not
1592 make progress if we restarted before the beginning
1593 of the loop here. */
1596 /* couldn't find useable FH with same pid, try any available */
1597 if (!any_available) {
1598 any_available = true;
1599 goto refind_writable;
1601 spin_unlock(&cifs_file_list_lock);
1602 return NULL;
1605 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1607 struct address_space *mapping = page->mapping;
1608 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1609 char *write_data;
1610 int rc = -EFAULT;
1611 int bytes_written = 0;
1612 struct inode *inode;
1613 struct cifsFileInfo *open_file;
1615 if (!mapping || !mapping->host)
1616 return -EFAULT;
1618 inode = page->mapping->host;
1620 offset += (loff_t)from;
1621 write_data = kmap(page);
1622 write_data += from;
1624 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1625 kunmap(page);
1626 return -EIO;
1629 /* racing with truncate? */
1630 if (offset > mapping->host->i_size) {
1631 kunmap(page);
1632 return 0; /* don't care */
1635 /* check to make sure that we are not extending the file */
1636 if (mapping->host->i_size - offset < (loff_t)to)
1637 to = (unsigned)(mapping->host->i_size - offset);
1639 open_file = find_writable_file(CIFS_I(mapping->host), false);
1640 if (open_file) {
1641 bytes_written = cifs_write(open_file, open_file->pid,
1642 write_data, to - from, &offset);
1643 cifsFileInfo_put(open_file);
1644 /* Does mm or vfs already set times? */
1645 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1646 if ((bytes_written > 0) && (offset))
1647 rc = 0;
1648 else if (bytes_written < 0)
1649 rc = bytes_written;
1650 } else {
1651 cFYI(1, "No writeable filehandles for inode");
1652 rc = -EIO;
1655 kunmap(page);
1656 return rc;
1659 static int cifs_writepages(struct address_space *mapping,
1660 struct writeback_control *wbc)
1662 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1663 bool done = false, scanned = false, range_whole = false;
1664 pgoff_t end, index;
1665 struct cifs_writedata *wdata;
1666 struct page *page;
1667 int rc = 0;
1670 * If wsize is smaller than the page cache size, default to writing
1671 * one page at a time via cifs_writepage
1673 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1674 return generic_writepages(mapping, wbc);
1676 if (wbc->range_cyclic) {
1677 index = mapping->writeback_index; /* Start from prev offset */
1678 end = -1;
1679 } else {
1680 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1681 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1682 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1683 range_whole = true;
1684 scanned = true;
1686 retry:
1687 while (!done && index <= end) {
1688 unsigned int i, nr_pages, found_pages;
1689 pgoff_t next = 0, tofind;
1690 struct page **pages;
1692 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1693 end - index) + 1;
1695 wdata = cifs_writedata_alloc((unsigned int)tofind);
1696 if (!wdata) {
1697 rc = -ENOMEM;
1698 break;
1702 * find_get_pages_tag seems to return a max of 256 on each
1703 * iteration, so we must call it several times in order to
1704 * fill the array or the wsize is effectively limited to
1705 * 256 * PAGE_CACHE_SIZE.
1707 found_pages = 0;
1708 pages = wdata->pages;
1709 do {
1710 nr_pages = find_get_pages_tag(mapping, &index,
1711 PAGECACHE_TAG_DIRTY,
1712 tofind, pages);
1713 found_pages += nr_pages;
1714 tofind -= nr_pages;
1715 pages += nr_pages;
1716 } while (nr_pages && tofind && index <= end);
1718 if (found_pages == 0) {
1719 kref_put(&wdata->refcount, cifs_writedata_release);
1720 break;
1723 nr_pages = 0;
1724 for (i = 0; i < found_pages; i++) {
1725 page = wdata->pages[i];
1727 * At this point we hold neither mapping->tree_lock nor
1728 * lock on the page itself: the page may be truncated or
1729 * invalidated (changing page->mapping to NULL), or even
1730 * swizzled back from swapper_space to tmpfs file
1731 * mapping
1734 if (nr_pages == 0)
1735 lock_page(page);
1736 else if (!trylock_page(page))
1737 break;
1739 if (unlikely(page->mapping != mapping)) {
1740 unlock_page(page);
1741 break;
1744 if (!wbc->range_cyclic && page->index > end) {
1745 done = true;
1746 unlock_page(page);
1747 break;
1750 if (next && (page->index != next)) {
1751 /* Not next consecutive page */
1752 unlock_page(page);
1753 break;
1756 if (wbc->sync_mode != WB_SYNC_NONE)
1757 wait_on_page_writeback(page);
1759 if (PageWriteback(page) ||
1760 !clear_page_dirty_for_io(page)) {
1761 unlock_page(page);
1762 break;
1766 * This actually clears the dirty bit in the radix tree.
1767 * See cifs_writepage() for more commentary.
1769 set_page_writeback(page);
1771 if (page_offset(page) >= mapping->host->i_size) {
1772 done = true;
1773 unlock_page(page);
1774 end_page_writeback(page);
1775 break;
1778 wdata->pages[i] = page;
1779 next = page->index + 1;
1780 ++nr_pages;
1783 /* reset index to refind any pages skipped */
1784 if (nr_pages == 0)
1785 index = wdata->pages[0]->index + 1;
1787 /* put any pages we aren't going to use */
1788 for (i = nr_pages; i < found_pages; i++) {
1789 page_cache_release(wdata->pages[i]);
1790 wdata->pages[i] = NULL;
1793 /* nothing to write? */
1794 if (nr_pages == 0) {
1795 kref_put(&wdata->refcount, cifs_writedata_release);
1796 continue;
1799 wdata->sync_mode = wbc->sync_mode;
1800 wdata->nr_pages = nr_pages;
1801 wdata->offset = page_offset(wdata->pages[0]);
1803 do {
1804 if (wdata->cfile != NULL)
1805 cifsFileInfo_put(wdata->cfile);
1806 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1807 false);
1808 if (!wdata->cfile) {
1809 cERROR(1, "No writable handles for inode");
1810 rc = -EBADF;
1811 break;
1813 rc = cifs_async_writev(wdata);
1814 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1816 for (i = 0; i < nr_pages; ++i)
1817 unlock_page(wdata->pages[i]);
1819 /* send failure -- clean up the mess */
1820 if (rc != 0) {
1821 for (i = 0; i < nr_pages; ++i) {
1822 if (rc == -EAGAIN)
1823 redirty_page_for_writepage(wbc,
1824 wdata->pages[i]);
1825 else
1826 SetPageError(wdata->pages[i]);
1827 end_page_writeback(wdata->pages[i]);
1828 page_cache_release(wdata->pages[i]);
1830 if (rc != -EAGAIN)
1831 mapping_set_error(mapping, rc);
1833 kref_put(&wdata->refcount, cifs_writedata_release);
1835 wbc->nr_to_write -= nr_pages;
1836 if (wbc->nr_to_write <= 0)
1837 done = true;
1839 index = next;
1842 if (!scanned && !done) {
1844 * We hit the last page and there is more work to be done: wrap
1845 * back to the start of the file
1847 scanned = true;
1848 index = 0;
1849 goto retry;
1852 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1853 mapping->writeback_index = index;
1855 return rc;
1858 static int
1859 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1861 int rc;
1862 int xid;
1864 xid = GetXid();
1865 /* BB add check for wbc flags */
1866 page_cache_get(page);
1867 if (!PageUptodate(page))
1868 cFYI(1, "ppw - page not up to date");
1871 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1873 * A writepage() implementation always needs to do either this,
1874 * or re-dirty the page with "redirty_page_for_writepage()" in
1875 * the case of a failure.
1877 * Just unlocking the page will cause the radix tree tag-bits
1878 * to fail to update with the state of the page correctly.
1880 set_page_writeback(page);
1881 retry_write:
1882 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1883 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1884 goto retry_write;
1885 else if (rc == -EAGAIN)
1886 redirty_page_for_writepage(wbc, page);
1887 else if (rc != 0)
1888 SetPageError(page);
1889 else
1890 SetPageUptodate(page);
1891 end_page_writeback(page);
1892 page_cache_release(page);
1893 FreeXid(xid);
1894 return rc;
1897 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1899 int rc = cifs_writepage_locked(page, wbc);
1900 unlock_page(page);
1901 return rc;
1904 static int cifs_write_end(struct file *file, struct address_space *mapping,
1905 loff_t pos, unsigned len, unsigned copied,
1906 struct page *page, void *fsdata)
1908 int rc;
1909 struct inode *inode = mapping->host;
1910 struct cifsFileInfo *cfile = file->private_data;
1911 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1912 __u32 pid;
1914 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1915 pid = cfile->pid;
1916 else
1917 pid = current->tgid;
1919 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1920 page, pos, copied);
1922 if (PageChecked(page)) {
1923 if (copied == len)
1924 SetPageUptodate(page);
1925 ClearPageChecked(page);
1926 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1927 SetPageUptodate(page);
1929 if (!PageUptodate(page)) {
1930 char *page_data;
1931 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1932 int xid;
1934 xid = GetXid();
1935 /* this is probably better than directly calling
1936 partialpage_write since in this function the file handle is
1937 known which we might as well leverage */
1938 /* BB check if anything else missing out of ppw
1939 such as updating last write time */
1940 page_data = kmap(page);
1941 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1942 /* if (rc < 0) should we set writebehind rc? */
1943 kunmap(page);
1945 FreeXid(xid);
1946 } else {
1947 rc = copied;
1948 pos += copied;
1949 set_page_dirty(page);
1952 if (rc > 0) {
1953 spin_lock(&inode->i_lock);
1954 if (pos > inode->i_size)
1955 i_size_write(inode, pos);
1956 spin_unlock(&inode->i_lock);
1959 unlock_page(page);
1960 page_cache_release(page);
1962 return rc;
1965 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1966 int datasync)
1968 int xid;
1969 int rc = 0;
1970 struct cifs_tcon *tcon;
1971 struct cifsFileInfo *smbfile = file->private_data;
1972 struct inode *inode = file->f_path.dentry->d_inode;
1973 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1975 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1976 if (rc)
1977 return rc;
1978 mutex_lock(&inode->i_mutex);
1980 xid = GetXid();
1982 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1983 file->f_path.dentry->d_name.name, datasync);
1985 if (!CIFS_I(inode)->clientCanCacheRead) {
1986 rc = cifs_invalidate_mapping(inode);
1987 if (rc) {
1988 cFYI(1, "rc: %d during invalidate phase", rc);
1989 rc = 0; /* don't care about it in fsync */
1993 tcon = tlink_tcon(smbfile->tlink);
1994 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1995 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1997 FreeXid(xid);
1998 mutex_unlock(&inode->i_mutex);
1999 return rc;
2002 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2004 int xid;
2005 int rc = 0;
2006 struct cifs_tcon *tcon;
2007 struct cifsFileInfo *smbfile = file->private_data;
2008 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2009 struct inode *inode = file->f_mapping->host;
2011 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2012 if (rc)
2013 return rc;
2014 mutex_lock(&inode->i_mutex);
2016 xid = GetXid();
2018 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2019 file->f_path.dentry->d_name.name, datasync);
2021 tcon = tlink_tcon(smbfile->tlink);
2022 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2023 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2025 FreeXid(xid);
2026 mutex_unlock(&inode->i_mutex);
2027 return rc;
2031 * As file closes, flush all cached write data for this inode checking
2032 * for write behind errors.
2034 int cifs_flush(struct file *file, fl_owner_t id)
2036 struct inode *inode = file->f_path.dentry->d_inode;
2037 int rc = 0;
2039 if (file->f_mode & FMODE_WRITE)
2040 rc = filemap_write_and_wait(inode->i_mapping);
2042 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2044 return rc;
2047 static int
2048 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2050 int rc = 0;
2051 unsigned long i;
2053 for (i = 0; i < num_pages; i++) {
2054 pages[i] = alloc_page(__GFP_HIGHMEM);
2055 if (!pages[i]) {
2057 * save number of pages we have already allocated and
2058 * return with ENOMEM error
2060 num_pages = i;
2061 rc = -ENOMEM;
2062 goto error;
2066 return rc;
2068 error:
2069 for (i = 0; i < num_pages; i++)
2070 put_page(pages[i]);
2071 return rc;
2074 static inline
2075 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2077 size_t num_pages;
2078 size_t clen;
2080 clen = min_t(const size_t, len, wsize);
2081 num_pages = clen / PAGE_CACHE_SIZE;
2082 if (clen % PAGE_CACHE_SIZE)
2083 num_pages++;
2085 if (cur_len)
2086 *cur_len = clen;
2088 return num_pages;
2091 static ssize_t
2092 cifs_iovec_write(struct file *file, const struct iovec *iov,
2093 unsigned long nr_segs, loff_t *poffset)
2095 unsigned int written;
2096 unsigned long num_pages, npages, i;
2097 size_t copied, len, cur_len;
2098 ssize_t total_written = 0;
2099 struct kvec *to_send;
2100 struct page **pages;
2101 struct iov_iter it;
2102 struct inode *inode;
2103 struct cifsFileInfo *open_file;
2104 struct cifs_tcon *pTcon;
2105 struct cifs_sb_info *cifs_sb;
2106 struct cifs_io_parms io_parms;
2107 int xid, rc;
2108 __u32 pid;
2110 len = iov_length(iov, nr_segs);
2111 if (!len)
2112 return 0;
2114 rc = generic_write_checks(file, poffset, &len, 0);
2115 if (rc)
2116 return rc;
2118 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2119 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2121 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2122 if (!pages)
2123 return -ENOMEM;
2125 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2126 if (!to_send) {
2127 kfree(pages);
2128 return -ENOMEM;
2131 rc = cifs_write_allocate_pages(pages, num_pages);
2132 if (rc) {
2133 kfree(pages);
2134 kfree(to_send);
2135 return rc;
2138 xid = GetXid();
2139 open_file = file->private_data;
2141 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2142 pid = open_file->pid;
2143 else
2144 pid = current->tgid;
2146 pTcon = tlink_tcon(open_file->tlink);
2147 inode = file->f_path.dentry->d_inode;
2149 iov_iter_init(&it, iov, nr_segs, len, 0);
2150 npages = num_pages;
2152 do {
2153 size_t save_len = cur_len;
2154 for (i = 0; i < npages; i++) {
2155 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2156 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2157 copied);
2158 cur_len -= copied;
2159 iov_iter_advance(&it, copied);
2160 to_send[i+1].iov_base = kmap(pages[i]);
2161 to_send[i+1].iov_len = copied;
2164 cur_len = save_len - cur_len;
2166 do {
2167 if (open_file->invalidHandle) {
2168 rc = cifs_reopen_file(open_file, false);
2169 if (rc != 0)
2170 break;
2172 io_parms.netfid = open_file->netfid;
2173 io_parms.pid = pid;
2174 io_parms.tcon = pTcon;
2175 io_parms.offset = *poffset;
2176 io_parms.length = cur_len;
2177 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2178 npages, 0);
2179 } while (rc == -EAGAIN);
2181 for (i = 0; i < npages; i++)
2182 kunmap(pages[i]);
2184 if (written) {
2185 len -= written;
2186 total_written += written;
2187 cifs_update_eof(CIFS_I(inode), *poffset, written);
2188 *poffset += written;
2189 } else if (rc < 0) {
2190 if (!total_written)
2191 total_written = rc;
2192 break;
2195 /* get length and number of kvecs of the next write */
2196 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2197 } while (len > 0);
2199 if (total_written > 0) {
2200 spin_lock(&inode->i_lock);
2201 if (*poffset > inode->i_size)
2202 i_size_write(inode, *poffset);
2203 spin_unlock(&inode->i_lock);
2206 cifs_stats_bytes_written(pTcon, total_written);
2207 mark_inode_dirty_sync(inode);
2209 for (i = 0; i < num_pages; i++)
2210 put_page(pages[i]);
2211 kfree(to_send);
2212 kfree(pages);
2213 FreeXid(xid);
2214 return total_written;
2217 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2218 unsigned long nr_segs, loff_t pos)
2220 ssize_t written;
2221 struct inode *inode;
2223 inode = iocb->ki_filp->f_path.dentry->d_inode;
2226 * BB - optimize the way when signing is disabled. We can drop this
2227 * extra memory-to-memory copying and use iovec buffers for constructing
2228 * write request.
2231 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2232 if (written > 0) {
2233 CIFS_I(inode)->invalid_mapping = true;
2234 iocb->ki_pos = pos;
2237 return written;
2240 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2241 unsigned long nr_segs, loff_t pos)
2243 struct inode *inode;
2245 inode = iocb->ki_filp->f_path.dentry->d_inode;
2247 if (CIFS_I(inode)->clientCanCacheAll)
2248 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2251 * In strict cache mode we need to write the data to the server exactly
2252 * from the pos to pos+len-1 rather than flush all affected pages
2253 * because it may cause a error with mandatory locks on these pages but
2254 * not on the region from pos to ppos+len-1.
2257 return cifs_user_writev(iocb, iov, nr_segs, pos);
2260 static ssize_t
2261 cifs_iovec_read(struct file *file, const struct iovec *iov,
2262 unsigned long nr_segs, loff_t *poffset)
2264 int rc;
2265 int xid;
2266 ssize_t total_read;
2267 unsigned int bytes_read = 0;
2268 size_t len, cur_len;
2269 int iov_offset = 0;
2270 struct cifs_sb_info *cifs_sb;
2271 struct cifs_tcon *pTcon;
2272 struct cifsFileInfo *open_file;
2273 struct smb_com_read_rsp *pSMBr;
2274 struct cifs_io_parms io_parms;
2275 char *read_data;
2276 unsigned int rsize;
2277 __u32 pid;
2279 if (!nr_segs)
2280 return 0;
2282 len = iov_length(iov, nr_segs);
2283 if (!len)
2284 return 0;
2286 xid = GetXid();
2287 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2289 /* FIXME: set up handlers for larger reads and/or convert to async */
2290 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2292 open_file = file->private_data;
2293 pTcon = tlink_tcon(open_file->tlink);
2295 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2296 pid = open_file->pid;
2297 else
2298 pid = current->tgid;
2300 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2301 cFYI(1, "attempting read on write only file instance");
2303 for (total_read = 0; total_read < len; total_read += bytes_read) {
2304 cur_len = min_t(const size_t, len - total_read, rsize);
2305 rc = -EAGAIN;
2306 read_data = NULL;
2308 while (rc == -EAGAIN) {
2309 int buf_type = CIFS_NO_BUFFER;
2310 if (open_file->invalidHandle) {
2311 rc = cifs_reopen_file(open_file, true);
2312 if (rc != 0)
2313 break;
2315 io_parms.netfid = open_file->netfid;
2316 io_parms.pid = pid;
2317 io_parms.tcon = pTcon;
2318 io_parms.offset = *poffset;
2319 io_parms.length = cur_len;
2320 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2321 &read_data, &buf_type);
2322 pSMBr = (struct smb_com_read_rsp *)read_data;
2323 if (read_data) {
2324 char *data_offset = read_data + 4 +
2325 le16_to_cpu(pSMBr->DataOffset);
2326 if (memcpy_toiovecend(iov, data_offset,
2327 iov_offset, bytes_read))
2328 rc = -EFAULT;
2329 if (buf_type == CIFS_SMALL_BUFFER)
2330 cifs_small_buf_release(read_data);
2331 else if (buf_type == CIFS_LARGE_BUFFER)
2332 cifs_buf_release(read_data);
2333 read_data = NULL;
2334 iov_offset += bytes_read;
2338 if (rc || (bytes_read == 0)) {
2339 if (total_read) {
2340 break;
2341 } else {
2342 FreeXid(xid);
2343 return rc;
2345 } else {
2346 cifs_stats_bytes_read(pTcon, bytes_read);
2347 *poffset += bytes_read;
2351 FreeXid(xid);
2352 return total_read;
2355 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2356 unsigned long nr_segs, loff_t pos)
2358 ssize_t read;
2360 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2361 if (read > 0)
2362 iocb->ki_pos = pos;
2364 return read;
2367 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2368 unsigned long nr_segs, loff_t pos)
2370 struct inode *inode;
2372 inode = iocb->ki_filp->f_path.dentry->d_inode;
2374 if (CIFS_I(inode)->clientCanCacheRead)
2375 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2378 * In strict cache mode we need to read from the server all the time
2379 * if we don't have level II oplock because the server can delay mtime
2380 * change - so we can't make a decision about inode invalidating.
2381 * And we can also fail with pagereading if there are mandatory locks
2382 * on pages affected by this read but not on the region from pos to
2383 * pos+len-1.
2386 return cifs_user_readv(iocb, iov, nr_segs, pos);
2389 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2390 loff_t *poffset)
2392 int rc = -EACCES;
2393 unsigned int bytes_read = 0;
2394 unsigned int total_read;
2395 unsigned int current_read_size;
2396 unsigned int rsize;
2397 struct cifs_sb_info *cifs_sb;
2398 struct cifs_tcon *pTcon;
2399 int xid;
2400 char *current_offset;
2401 struct cifsFileInfo *open_file;
2402 struct cifs_io_parms io_parms;
2403 int buf_type = CIFS_NO_BUFFER;
2404 __u32 pid;
2406 xid = GetXid();
2407 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2409 /* FIXME: set up handlers for larger reads and/or convert to async */
2410 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2412 if (file->private_data == NULL) {
2413 rc = -EBADF;
2414 FreeXid(xid);
2415 return rc;
2417 open_file = file->private_data;
2418 pTcon = tlink_tcon(open_file->tlink);
2420 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2421 pid = open_file->pid;
2422 else
2423 pid = current->tgid;
2425 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2426 cFYI(1, "attempting read on write only file instance");
2428 for (total_read = 0, current_offset = read_data;
2429 read_size > total_read;
2430 total_read += bytes_read, current_offset += bytes_read) {
2431 current_read_size = min_t(uint, read_size - total_read, rsize);
2433 /* For windows me and 9x we do not want to request more
2434 than it negotiated since it will refuse the read then */
2435 if ((pTcon->ses) &&
2436 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2437 current_read_size = min_t(uint, current_read_size,
2438 CIFSMaxBufSize);
2440 rc = -EAGAIN;
2441 while (rc == -EAGAIN) {
2442 if (open_file->invalidHandle) {
2443 rc = cifs_reopen_file(open_file, true);
2444 if (rc != 0)
2445 break;
2447 io_parms.netfid = open_file->netfid;
2448 io_parms.pid = pid;
2449 io_parms.tcon = pTcon;
2450 io_parms.offset = *poffset;
2451 io_parms.length = current_read_size;
2452 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2453 &current_offset, &buf_type);
2455 if (rc || (bytes_read == 0)) {
2456 if (total_read) {
2457 break;
2458 } else {
2459 FreeXid(xid);
2460 return rc;
2462 } else {
2463 cifs_stats_bytes_read(pTcon, total_read);
2464 *poffset += bytes_read;
2467 FreeXid(xid);
2468 return total_read;
2472 * If the page is mmap'ed into a process' page tables, then we need to make
2473 * sure that it doesn't change while being written back.
2475 static int
2476 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2478 struct page *page = vmf->page;
2480 lock_page(page);
2481 return VM_FAULT_LOCKED;
2484 static struct vm_operations_struct cifs_file_vm_ops = {
2485 .fault = filemap_fault,
2486 .page_mkwrite = cifs_page_mkwrite,
2489 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2491 int rc, xid;
2492 struct inode *inode = file->f_path.dentry->d_inode;
2494 xid = GetXid();
2496 if (!CIFS_I(inode)->clientCanCacheRead) {
2497 rc = cifs_invalidate_mapping(inode);
2498 if (rc)
2499 return rc;
2502 rc = generic_file_mmap(file, vma);
2503 if (rc == 0)
2504 vma->vm_ops = &cifs_file_vm_ops;
2505 FreeXid(xid);
2506 return rc;
2509 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2511 int rc, xid;
2513 xid = GetXid();
2514 rc = cifs_revalidate_file(file);
2515 if (rc) {
2516 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2517 FreeXid(xid);
2518 return rc;
2520 rc = generic_file_mmap(file, vma);
2521 if (rc == 0)
2522 vma->vm_ops = &cifs_file_vm_ops;
2523 FreeXid(xid);
2524 return rc;
2527 static int cifs_readpages(struct file *file, struct address_space *mapping,
2528 struct list_head *page_list, unsigned num_pages)
2530 int rc;
2531 struct list_head tmplist;
2532 struct cifsFileInfo *open_file = file->private_data;
2533 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2534 unsigned int rsize = cifs_sb->rsize;
2535 pid_t pid;
2538 * Give up immediately if rsize is too small to read an entire page.
2539 * The VFS will fall back to readpage. We should never reach this
2540 * point however since we set ra_pages to 0 when the rsize is smaller
2541 * than a cache page.
2543 if (unlikely(rsize < PAGE_CACHE_SIZE))
2544 return 0;
2547 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2548 * immediately if the cookie is negative
2550 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2551 &num_pages);
2552 if (rc == 0)
2553 return rc;
2555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2556 pid = open_file->pid;
2557 else
2558 pid = current->tgid;
2560 rc = 0;
2561 INIT_LIST_HEAD(&tmplist);
2563 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2564 mapping, num_pages);
2567 * Start with the page at end of list and move it to private
2568 * list. Do the same with any following pages until we hit
2569 * the rsize limit, hit an index discontinuity, or run out of
2570 * pages. Issue the async read and then start the loop again
2571 * until the list is empty.
2573 * Note that list order is important. The page_list is in
2574 * the order of declining indexes. When we put the pages in
2575 * the rdata->pages, then we want them in increasing order.
2577 while (!list_empty(page_list)) {
2578 unsigned int bytes = PAGE_CACHE_SIZE;
2579 unsigned int expected_index;
2580 unsigned int nr_pages = 1;
2581 loff_t offset;
2582 struct page *page, *tpage;
2583 struct cifs_readdata *rdata;
2585 page = list_entry(page_list->prev, struct page, lru);
2588 * Lock the page and put it in the cache. Since no one else
2589 * should have access to this page, we're safe to simply set
2590 * PG_locked without checking it first.
2592 __set_page_locked(page);
2593 rc = add_to_page_cache_locked(page, mapping,
2594 page->index, GFP_KERNEL);
2596 /* give up if we can't stick it in the cache */
2597 if (rc) {
2598 __clear_page_locked(page);
2599 break;
2602 /* move first page to the tmplist */
2603 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2604 list_move_tail(&page->lru, &tmplist);
2606 /* now try and add more pages onto the request */
2607 expected_index = page->index + 1;
2608 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2609 /* discontinuity ? */
2610 if (page->index != expected_index)
2611 break;
2613 /* would this page push the read over the rsize? */
2614 if (bytes + PAGE_CACHE_SIZE > rsize)
2615 break;
2617 __set_page_locked(page);
2618 if (add_to_page_cache_locked(page, mapping,
2619 page->index, GFP_KERNEL)) {
2620 __clear_page_locked(page);
2621 break;
2623 list_move_tail(&page->lru, &tmplist);
2624 bytes += PAGE_CACHE_SIZE;
2625 expected_index++;
2626 nr_pages++;
2629 rdata = cifs_readdata_alloc(nr_pages);
2630 if (!rdata) {
2631 /* best to give up if we're out of mem */
2632 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2633 list_del(&page->lru);
2634 lru_cache_add_file(page);
2635 unlock_page(page);
2636 page_cache_release(page);
2638 rc = -ENOMEM;
2639 break;
2642 spin_lock(&cifs_file_list_lock);
2643 cifsFileInfo_get(open_file);
2644 spin_unlock(&cifs_file_list_lock);
2645 rdata->cfile = open_file;
2646 rdata->mapping = mapping;
2647 rdata->offset = offset;
2648 rdata->bytes = bytes;
2649 rdata->pid = pid;
2650 list_splice_init(&tmplist, &rdata->pages);
2652 do {
2653 if (open_file->invalidHandle) {
2654 rc = cifs_reopen_file(open_file, true);
2655 if (rc != 0)
2656 continue;
2658 rc = cifs_async_readv(rdata);
2659 } while (rc == -EAGAIN);
2661 if (rc != 0) {
2662 list_for_each_entry_safe(page, tpage, &rdata->pages,
2663 lru) {
2664 list_del(&page->lru);
2665 lru_cache_add_file(page);
2666 unlock_page(page);
2667 page_cache_release(page);
2669 cifs_readdata_free(rdata);
2670 break;
2674 return rc;
2677 static int cifs_readpage_worker(struct file *file, struct page *page,
2678 loff_t *poffset)
2680 char *read_data;
2681 int rc;
2683 /* Is the page cached? */
2684 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2685 if (rc == 0)
2686 goto read_complete;
2688 page_cache_get(page);
2689 read_data = kmap(page);
2690 /* for reads over a certain size could initiate async read ahead */
2692 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2694 if (rc < 0)
2695 goto io_error;
2696 else
2697 cFYI(1, "Bytes read %d", rc);
2699 file->f_path.dentry->d_inode->i_atime =
2700 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2702 if (PAGE_CACHE_SIZE > rc)
2703 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2705 flush_dcache_page(page);
2706 SetPageUptodate(page);
2708 /* send this page to the cache */
2709 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2711 rc = 0;
2713 io_error:
2714 kunmap(page);
2715 page_cache_release(page);
2717 read_complete:
2718 return rc;
2721 static int cifs_readpage(struct file *file, struct page *page)
2723 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2724 int rc = -EACCES;
2725 int xid;
2727 xid = GetXid();
2729 if (file->private_data == NULL) {
2730 rc = -EBADF;
2731 FreeXid(xid);
2732 return rc;
2735 cFYI(1, "readpage %p at offset %d 0x%x\n",
2736 page, (int)offset, (int)offset);
2738 rc = cifs_readpage_worker(file, page, &offset);
2740 unlock_page(page);
2742 FreeXid(xid);
2743 return rc;
2746 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2748 struct cifsFileInfo *open_file;
2750 spin_lock(&cifs_file_list_lock);
2751 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2752 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2753 spin_unlock(&cifs_file_list_lock);
2754 return 1;
2757 spin_unlock(&cifs_file_list_lock);
2758 return 0;
2761 /* We do not want to update the file size from server for inodes
2762 open for write - to avoid races with writepage extending
2763 the file - in the future we could consider allowing
2764 refreshing the inode only on increases in the file size
2765 but this is tricky to do without racing with writebehind
2766 page caching in the current Linux kernel design */
2767 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2769 if (!cifsInode)
2770 return true;
2772 if (is_inode_writable(cifsInode)) {
2773 /* This inode is open for write at least once */
2774 struct cifs_sb_info *cifs_sb;
2776 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2777 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2778 /* since no page cache to corrupt on directio
2779 we can change size safely */
2780 return true;
2783 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2784 return true;
2786 return false;
2787 } else
2788 return true;
2791 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2792 loff_t pos, unsigned len, unsigned flags,
2793 struct page **pagep, void **fsdata)
2795 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2796 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2797 loff_t page_start = pos & PAGE_MASK;
2798 loff_t i_size;
2799 struct page *page;
2800 int rc = 0;
2802 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2804 page = grab_cache_page_write_begin(mapping, index, flags);
2805 if (!page) {
2806 rc = -ENOMEM;
2807 goto out;
2810 if (PageUptodate(page))
2811 goto out;
2814 * If we write a full page it will be up to date, no need to read from
2815 * the server. If the write is short, we'll end up doing a sync write
2816 * instead.
2818 if (len == PAGE_CACHE_SIZE)
2819 goto out;
2822 * optimize away the read when we have an oplock, and we're not
2823 * expecting to use any of the data we'd be reading in. That
2824 * is, when the page lies beyond the EOF, or straddles the EOF
2825 * and the write will cover all of the existing data.
2827 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2828 i_size = i_size_read(mapping->host);
2829 if (page_start >= i_size ||
2830 (offset == 0 && (pos + len) >= i_size)) {
2831 zero_user_segments(page, 0, offset,
2832 offset + len,
2833 PAGE_CACHE_SIZE);
2835 * PageChecked means that the parts of the page
2836 * to which we're not writing are considered up
2837 * to date. Once the data is copied to the
2838 * page, it can be set uptodate.
2840 SetPageChecked(page);
2841 goto out;
2845 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2847 * might as well read a page, it is fast enough. If we get
2848 * an error, we don't need to return it. cifs_write_end will
2849 * do a sync write instead since PG_uptodate isn't set.
2851 cifs_readpage_worker(file, page, &page_start);
2852 } else {
2853 /* we could try using another file handle if there is one -
2854 but how would we lock it to prevent close of that handle
2855 racing with this read? In any case
2856 this will be written out by write_end so is fine */
2858 out:
2859 *pagep = page;
2860 return rc;
2863 static int cifs_release_page(struct page *page, gfp_t gfp)
2865 if (PagePrivate(page))
2866 return 0;
2868 return cifs_fscache_release_page(page, gfp);
2871 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2873 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2875 if (offset == 0)
2876 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2879 static int cifs_launder_page(struct page *page)
2881 int rc = 0;
2882 loff_t range_start = page_offset(page);
2883 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2884 struct writeback_control wbc = {
2885 .sync_mode = WB_SYNC_ALL,
2886 .nr_to_write = 0,
2887 .range_start = range_start,
2888 .range_end = range_end,
2891 cFYI(1, "Launder page: %p", page);
2893 if (clear_page_dirty_for_io(page))
2894 rc = cifs_writepage_locked(page, &wbc);
2896 cifs_fscache_invalidate_page(page, page->mapping->host);
2897 return rc;
2900 void cifs_oplock_break(struct work_struct *work)
2902 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2903 oplock_break);
2904 struct inode *inode = cfile->dentry->d_inode;
2905 struct cifsInodeInfo *cinode = CIFS_I(inode);
2906 int rc = 0;
2908 if (inode && S_ISREG(inode->i_mode)) {
2909 if (cinode->clientCanCacheRead)
2910 break_lease(inode, O_RDONLY);
2911 else
2912 break_lease(inode, O_WRONLY);
2913 rc = filemap_fdatawrite(inode->i_mapping);
2914 if (cinode->clientCanCacheRead == 0) {
2915 rc = filemap_fdatawait(inode->i_mapping);
2916 mapping_set_error(inode->i_mapping, rc);
2917 invalidate_remote_inode(inode);
2919 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2922 rc = cifs_push_locks(cfile);
2923 if (rc)
2924 cERROR(1, "Push locks rc = %d", rc);
2927 * releasing stale oplock after recent reconnect of smb session using
2928 * a now incorrect file handle is not a data integrity issue but do
2929 * not bother sending an oplock release if session to server still is
2930 * disconnected since oplock already released by the server
2932 if (!cfile->oplock_break_cancelled) {
2933 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2934 current->tgid, 0, 0, 0, 0,
2935 LOCKING_ANDX_OPLOCK_RELEASE, false,
2936 cinode->clientCanCacheRead ? 1 : 0);
2937 cFYI(1, "Oplock release rc = %d", rc);
2941 const struct address_space_operations cifs_addr_ops = {
2942 .readpage = cifs_readpage,
2943 .readpages = cifs_readpages,
2944 .writepage = cifs_writepage,
2945 .writepages = cifs_writepages,
2946 .write_begin = cifs_write_begin,
2947 .write_end = cifs_write_end,
2948 .set_page_dirty = __set_page_dirty_nobuffers,
2949 .releasepage = cifs_release_page,
2950 .invalidatepage = cifs_invalidate_page,
2951 .launder_page = cifs_launder_page,
2955 * cifs_readpages requires the server to support a buffer large enough to
2956 * contain the header plus one complete page of data. Otherwise, we need
2957 * to leave cifs_readpages out of the address space operations.
2959 const struct address_space_operations cifs_addr_ops_smallbuf = {
2960 .readpage = cifs_readpage,
2961 .writepage = cifs_writepage,
2962 .writepages = cifs_writepages,
2963 .write_begin = cifs_write_begin,
2964 .write_end = cifs_write_end,
2965 .set_page_dirty = __set_page_dirty_nobuffers,
2966 .releasepage = cifs_release_page,
2967 .invalidatepage = cifs_invalidate_page,
2968 .launder_page = cifs_launder_page,