Merge remote-tracking branch 'fbdev/master'
[linux-2.6/next.git] / fs / cifs / file.c
blob9f41a10523a1bad91ba0a9e8581ed89f552785e2
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
45 static inline int cifs_convert_flags(unsigned int flags)
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
63 static u32 cifs_posix_convert_flags(unsigned int flags)
65 u32 posix_flags = 0;
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = SMB_O_RDONLY;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
81 if (flags & O_DSYNC)
82 posix_flags |= SMB_O_SYNC;
83 if (flags & O_DIRECTORY)
84 posix_flags |= SMB_O_DIRECTORY;
85 if (flags & O_NOFOLLOW)
86 posix_flags |= SMB_O_NOFOLLOW;
87 if (flags & O_DIRECT)
88 posix_flags |= SMB_O_DIRECT;
90 return posix_flags;
93 static inline int cifs_get_disposition(unsigned int flags)
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
103 else
104 return FILE_OPEN;
107 int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifs_tcon *tcon;
119 cFYI(1, "posix open %s", full_path);
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
141 if (rc)
142 goto posix_open_ret;
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
164 posix_open_ret:
165 kfree(presp_data);
166 return rc;
169 static int
170 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
179 desiredAccess = cifs_convert_flags(f_flags);
181 /*********************************************************************
182 * open flag mapping table:
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
205 disposition = cifs_get_disposition(f_flags);
207 /* BB pass O_SYNC flag through on file attributes .. BB */
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 if (rc)
225 goto out;
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
234 out:
235 kfree(buf);
236 return rc;
239 struct cifsFileInfo *
240 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
252 pCifsFile->count = 1;
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
265 spin_lock(&cifs_file_list_lock);
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
272 spin_unlock(&cifs_file_list_lock);
274 cifs_set_oplock_level(pCifsInode, oplock);
276 file->private_data = pCifsFile;
277 return pCifsFile;
281 * Release a reference on the file private data. This may involve closing
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
287 struct inode *inode = cifs_file->dentry->d_inode;
288 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 struct cifsLockInfo *li, *tmp;
293 spin_lock(&cifs_file_list_lock);
294 if (--cifs_file->count > 0) {
295 spin_unlock(&cifs_file_list_lock);
296 return;
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
313 cifs_set_oplock_level(cifsi, 0);
315 spin_unlock(&cifs_file_list_lock);
317 cancel_work_sync(&cifs_file->oplock_break);
319 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
320 int xid, rc;
322 xid = GetXid();
323 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
324 FreeXid(xid);
327 /* Delete any outstanding lock records. We'll lose them when the file
328 * is closed anyway.
330 mutex_lock(&cifs_file->lock_mutex);
331 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
332 list_del(&li->llist);
333 kfree(li);
335 mutex_unlock(&cifs_file->lock_mutex);
337 cifs_put_tlink(cifs_file->tlink);
338 dput(cifs_file->dentry);
339 kfree(cifs_file);
342 int cifs_open(struct inode *inode, struct file *file)
344 int rc = -EACCES;
345 int xid;
346 __u32 oplock;
347 struct cifs_sb_info *cifs_sb;
348 struct cifs_tcon *tcon;
349 struct tcon_link *tlink;
350 struct cifsFileInfo *pCifsFile = NULL;
351 char *full_path = NULL;
352 bool posix_open_ok = false;
353 __u16 netfid;
355 xid = GetXid();
357 cifs_sb = CIFS_SB(inode->i_sb);
358 tlink = cifs_sb_tlink(cifs_sb);
359 if (IS_ERR(tlink)) {
360 FreeXid(xid);
361 return PTR_ERR(tlink);
363 tcon = tlink_tcon(tlink);
365 full_path = build_path_from_dentry(file->f_path.dentry);
366 if (full_path == NULL) {
367 rc = -ENOMEM;
368 goto out;
371 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
372 inode, file->f_flags, full_path);
374 if (oplockEnabled)
375 oplock = REQ_OPLOCK;
376 else
377 oplock = 0;
379 if (!tcon->broken_posix_open && tcon->unix_ext &&
380 (tcon->ses->capabilities & CAP_UNIX) &&
381 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
382 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
383 /* can not refresh inode info since size could be stale */
384 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
385 cifs_sb->mnt_file_mode /* ignored */,
386 file->f_flags, &oplock, &netfid, xid);
387 if (rc == 0) {
388 cFYI(1, "posix open succeeded");
389 posix_open_ok = true;
390 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
391 if (tcon->ses->serverNOS)
392 cERROR(1, "server %s of type %s returned"
393 " unexpected error on SMB posix open"
394 ", disabling posix open support."
395 " Check if server update available.",
396 tcon->ses->serverName,
397 tcon->ses->serverNOS);
398 tcon->broken_posix_open = true;
399 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
400 (rc != -EOPNOTSUPP)) /* path not found or net err */
401 goto out;
402 /* else fallthrough to retry open the old way on network i/o
403 or DFS errors */
406 if (!posix_open_ok) {
407 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
408 file->f_flags, &oplock, &netfid, xid);
409 if (rc)
410 goto out;
413 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
414 if (pCifsFile == NULL) {
415 CIFSSMBClose(xid, tcon, netfid);
416 rc = -ENOMEM;
417 goto out;
420 cifs_fscache_set_inode_cookie(inode, file);
422 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
423 /* time to set mode which we can not set earlier due to
424 problems creating new read-only files */
425 struct cifs_unix_set_info_args args = {
426 .mode = inode->i_mode,
427 .uid = NO_CHANGE_64,
428 .gid = NO_CHANGE_64,
429 .ctime = NO_CHANGE_64,
430 .atime = NO_CHANGE_64,
431 .mtime = NO_CHANGE_64,
432 .device = 0,
434 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
435 pCifsFile->pid);
438 out:
439 kfree(full_path);
440 FreeXid(xid);
441 cifs_put_tlink(tlink);
442 return rc;
445 /* Try to reacquire byte range locks that were released when session */
446 /* to server was lost */
447 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
449 int rc = 0;
451 /* BB list all locks open on this file and relock */
453 return rc;
456 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
458 int rc = -EACCES;
459 int xid;
460 __u32 oplock;
461 struct cifs_sb_info *cifs_sb;
462 struct cifs_tcon *tcon;
463 struct cifsInodeInfo *pCifsInode;
464 struct inode *inode;
465 char *full_path = NULL;
466 int desiredAccess;
467 int disposition = FILE_OPEN;
468 __u16 netfid;
470 xid = GetXid();
471 mutex_lock(&pCifsFile->fh_mutex);
472 if (!pCifsFile->invalidHandle) {
473 mutex_unlock(&pCifsFile->fh_mutex);
474 rc = 0;
475 FreeXid(xid);
476 return rc;
479 inode = pCifsFile->dentry->d_inode;
480 cifs_sb = CIFS_SB(inode->i_sb);
481 tcon = tlink_tcon(pCifsFile->tlink);
483 /* can not grab rename sem here because various ops, including
484 those that already have the rename sem can end up causing writepage
485 to get called and if the server was down that means we end up here,
486 and we can never tell if the caller already has the rename_sem */
487 full_path = build_path_from_dentry(pCifsFile->dentry);
488 if (full_path == NULL) {
489 rc = -ENOMEM;
490 mutex_unlock(&pCifsFile->fh_mutex);
491 FreeXid(xid);
492 return rc;
495 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
496 inode, pCifsFile->f_flags, full_path);
498 if (oplockEnabled)
499 oplock = REQ_OPLOCK;
500 else
501 oplock = 0;
503 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
504 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
505 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
508 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
509 * original open. Must mask them off for a reopen.
511 unsigned int oflags = pCifsFile->f_flags &
512 ~(O_CREAT | O_EXCL | O_TRUNC);
514 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
515 cifs_sb->mnt_file_mode /* ignored */,
516 oflags, &oplock, &netfid, xid);
517 if (rc == 0) {
518 cFYI(1, "posix reopen succeeded");
519 goto reopen_success;
521 /* fallthrough to retry open the old way on errors, especially
522 in the reconnect path it is important to retry hard */
525 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
527 /* Can not refresh inode by passing in file_info buf to be returned
528 by SMBOpen and then calling get_inode_info with returned buf
529 since file might have write behind data that needs to be flushed
530 and server version of file size can be stale. If we knew for sure
531 that inode was not dirty locally we could do this */
533 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
534 CREATE_NOT_DIR, &netfid, &oplock, NULL,
535 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
536 CIFS_MOUNT_MAP_SPECIAL_CHR);
537 if (rc) {
538 mutex_unlock(&pCifsFile->fh_mutex);
539 cFYI(1, "cifs_open returned 0x%x", rc);
540 cFYI(1, "oplock: %d", oplock);
541 goto reopen_error_exit;
544 reopen_success:
545 pCifsFile->netfid = netfid;
546 pCifsFile->invalidHandle = false;
547 mutex_unlock(&pCifsFile->fh_mutex);
548 pCifsInode = CIFS_I(inode);
550 if (can_flush) {
551 rc = filemap_write_and_wait(inode->i_mapping);
552 mapping_set_error(inode->i_mapping, rc);
554 if (tcon->unix_ext)
555 rc = cifs_get_inode_info_unix(&inode,
556 full_path, inode->i_sb, xid);
557 else
558 rc = cifs_get_inode_info(&inode,
559 full_path, NULL, inode->i_sb,
560 xid, NULL);
561 } /* else we are writing out data to server already
562 and could deadlock if we tried to flush data, and
563 since we do not know if we have data that would
564 invalidate the current end of file on the server
565 we can not go to the server to get the new inod
566 info */
568 cifs_set_oplock_level(pCifsInode, oplock);
570 cifs_relock_file(pCifsFile);
572 reopen_error_exit:
573 kfree(full_path);
574 FreeXid(xid);
575 return rc;
578 int cifs_close(struct inode *inode, struct file *file)
580 if (file->private_data != NULL) {
581 cifsFileInfo_put(file->private_data);
582 file->private_data = NULL;
585 /* return code from the ->release op is always ignored */
586 return 0;
589 int cifs_closedir(struct inode *inode, struct file *file)
591 int rc = 0;
592 int xid;
593 struct cifsFileInfo *pCFileStruct = file->private_data;
594 char *ptmp;
596 cFYI(1, "Closedir inode = 0x%p", inode);
598 xid = GetXid();
600 if (pCFileStruct) {
601 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
603 cFYI(1, "Freeing private data in close dir");
604 spin_lock(&cifs_file_list_lock);
605 if (!pCFileStruct->srch_inf.endOfSearch &&
606 !pCFileStruct->invalidHandle) {
607 pCFileStruct->invalidHandle = true;
608 spin_unlock(&cifs_file_list_lock);
609 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
610 cFYI(1, "Closing uncompleted readdir with rc %d",
611 rc);
612 /* not much we can do if it fails anyway, ignore rc */
613 rc = 0;
614 } else
615 spin_unlock(&cifs_file_list_lock);
616 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
617 if (ptmp) {
618 cFYI(1, "closedir free smb buf in srch struct");
619 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
620 if (pCFileStruct->srch_inf.smallBuf)
621 cifs_small_buf_release(ptmp);
622 else
623 cifs_buf_release(ptmp);
625 cifs_put_tlink(pCFileStruct->tlink);
626 kfree(file->private_data);
627 file->private_data = NULL;
629 /* BB can we lock the filestruct while this is going on? */
630 FreeXid(xid);
631 return rc;
634 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
635 __u64 offset, __u8 lockType)
637 struct cifsLockInfo *li =
638 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
639 if (li == NULL)
640 return -ENOMEM;
641 li->offset = offset;
642 li->length = len;
643 li->type = lockType;
644 mutex_lock(&fid->lock_mutex);
645 list_add(&li->llist, &fid->llist);
646 mutex_unlock(&fid->lock_mutex);
647 return 0;
650 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
652 int rc, xid;
653 __u32 numLock = 0;
654 __u32 numUnlock = 0;
655 __u64 length;
656 bool wait_flag = false;
657 struct cifs_sb_info *cifs_sb;
658 struct cifs_tcon *tcon;
659 __u16 netfid;
660 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
661 bool posix_locking = 0;
663 length = 1 + pfLock->fl_end - pfLock->fl_start;
664 rc = -EACCES;
665 xid = GetXid();
667 cFYI(1, "Lock parm: 0x%x flockflags: "
668 "0x%x flocktype: 0x%x start: %lld end: %lld",
669 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
670 pfLock->fl_end);
672 if (pfLock->fl_flags & FL_POSIX)
673 cFYI(1, "Posix");
674 if (pfLock->fl_flags & FL_FLOCK)
675 cFYI(1, "Flock");
676 if (pfLock->fl_flags & FL_SLEEP) {
677 cFYI(1, "Blocking lock");
678 wait_flag = true;
680 if (pfLock->fl_flags & FL_ACCESS)
681 cFYI(1, "Process suspended by mandatory locking - "
682 "not implemented yet");
683 if (pfLock->fl_flags & FL_LEASE)
684 cFYI(1, "Lease on file - not implemented yet");
685 if (pfLock->fl_flags &
686 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
687 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
689 if (pfLock->fl_type == F_WRLCK) {
690 cFYI(1, "F_WRLCK ");
691 numLock = 1;
692 } else if (pfLock->fl_type == F_UNLCK) {
693 cFYI(1, "F_UNLCK");
694 numUnlock = 1;
695 /* Check if unlock includes more than
696 one lock range */
697 } else if (pfLock->fl_type == F_RDLCK) {
698 cFYI(1, "F_RDLCK");
699 lockType |= LOCKING_ANDX_SHARED_LOCK;
700 numLock = 1;
701 } else if (pfLock->fl_type == F_EXLCK) {
702 cFYI(1, "F_EXLCK");
703 numLock = 1;
704 } else if (pfLock->fl_type == F_SHLCK) {
705 cFYI(1, "F_SHLCK");
706 lockType |= LOCKING_ANDX_SHARED_LOCK;
707 numLock = 1;
708 } else
709 cFYI(1, "Unknown type of lock");
711 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
712 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
713 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
715 if ((tcon->ses->capabilities & CAP_UNIX) &&
716 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
717 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
718 posix_locking = 1;
719 /* BB add code here to normalize offset and length to
720 account for negative length which we can not accept over the
721 wire */
722 if (IS_GETLK(cmd)) {
723 if (posix_locking) {
724 int posix_lock_type;
725 if (lockType & LOCKING_ANDX_SHARED_LOCK)
726 posix_lock_type = CIFS_RDLCK;
727 else
728 posix_lock_type = CIFS_WRLCK;
729 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
730 length, pfLock, posix_lock_type,
731 wait_flag);
732 FreeXid(xid);
733 return rc;
736 /* BB we could chain these into one lock request BB */
737 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
738 0, 1, lockType, 0 /* wait flag */, 0);
739 if (rc == 0) {
740 rc = CIFSSMBLock(xid, tcon, netfid, length,
741 pfLock->fl_start, 1 /* numUnlock */ ,
742 0 /* numLock */ , lockType,
743 0 /* wait flag */, 0);
744 pfLock->fl_type = F_UNLCK;
745 if (rc != 0)
746 cERROR(1, "Error unlocking previously locked "
747 "range %d during test of lock", rc);
748 rc = 0;
750 } else {
751 /* if rc == ERR_SHARING_VIOLATION ? */
752 rc = 0;
754 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
755 pfLock->fl_type = F_WRLCK;
756 } else {
757 rc = CIFSSMBLock(xid, tcon, netfid, length,
758 pfLock->fl_start, 0, 1,
759 lockType | LOCKING_ANDX_SHARED_LOCK,
760 0 /* wait flag */, 0);
761 if (rc == 0) {
762 rc = CIFSSMBLock(xid, tcon, netfid,
763 length, pfLock->fl_start, 1, 0,
764 lockType |
765 LOCKING_ANDX_SHARED_LOCK,
766 0 /* wait flag */, 0);
767 pfLock->fl_type = F_RDLCK;
768 if (rc != 0)
769 cERROR(1, "Error unlocking "
770 "previously locked range %d "
771 "during test of lock", rc);
772 rc = 0;
773 } else {
774 pfLock->fl_type = F_WRLCK;
775 rc = 0;
780 FreeXid(xid);
781 return rc;
784 if (!numLock && !numUnlock) {
785 /* if no lock or unlock then nothing
786 to do since we do not know what it is */
787 FreeXid(xid);
788 return -EOPNOTSUPP;
791 if (posix_locking) {
792 int posix_lock_type;
793 if (lockType & LOCKING_ANDX_SHARED_LOCK)
794 posix_lock_type = CIFS_RDLCK;
795 else
796 posix_lock_type = CIFS_WRLCK;
798 if (numUnlock == 1)
799 posix_lock_type = CIFS_UNLCK;
801 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
802 length, pfLock, posix_lock_type,
803 wait_flag);
804 } else {
805 struct cifsFileInfo *fid = file->private_data;
807 if (numLock) {
808 rc = CIFSSMBLock(xid, tcon, netfid, length,
809 pfLock->fl_start, 0, numLock, lockType,
810 wait_flag, 0);
812 if (rc == 0) {
813 /* For Windows locks we must store them. */
814 rc = store_file_lock(fid, length,
815 pfLock->fl_start, lockType);
817 } else if (numUnlock) {
818 /* For each stored lock that this unlock overlaps
819 completely, unlock it. */
820 int stored_rc = 0;
821 struct cifsLockInfo *li, *tmp;
823 rc = 0;
824 mutex_lock(&fid->lock_mutex);
825 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
826 if (pfLock->fl_start <= li->offset &&
827 (pfLock->fl_start + length) >=
828 (li->offset + li->length)) {
829 stored_rc = CIFSSMBLock(xid, tcon,
830 netfid, li->length,
831 li->offset, 1, 0,
832 li->type, false, 0);
833 if (stored_rc)
834 rc = stored_rc;
835 else {
836 list_del(&li->llist);
837 kfree(li);
841 mutex_unlock(&fid->lock_mutex);
845 if (pfLock->fl_flags & FL_POSIX)
846 posix_lock_file_wait(file, pfLock);
847 FreeXid(xid);
848 return rc;
851 /* update the file size (if needed) after a write */
852 void
853 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
854 unsigned int bytes_written)
856 loff_t end_of_write = offset + bytes_written;
858 if (end_of_write > cifsi->server_eof)
859 cifsi->server_eof = end_of_write;
862 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
863 const char *write_data, size_t write_size,
864 loff_t *poffset)
866 int rc = 0;
867 unsigned int bytes_written = 0;
868 unsigned int total_written;
869 struct cifs_sb_info *cifs_sb;
870 struct cifs_tcon *pTcon;
871 int xid;
872 struct dentry *dentry = open_file->dentry;
873 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
874 struct cifs_io_parms io_parms;
876 cifs_sb = CIFS_SB(dentry->d_sb);
878 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
879 *poffset, dentry->d_name.name);
881 pTcon = tlink_tcon(open_file->tlink);
883 xid = GetXid();
885 for (total_written = 0; write_size > total_written;
886 total_written += bytes_written) {
887 rc = -EAGAIN;
888 while (rc == -EAGAIN) {
889 struct kvec iov[2];
890 unsigned int len;
892 if (open_file->invalidHandle) {
893 /* we could deadlock if we called
894 filemap_fdatawait from here so tell
895 reopen_file not to flush data to
896 server now */
897 rc = cifs_reopen_file(open_file, false);
898 if (rc != 0)
899 break;
902 len = min((size_t)cifs_sb->wsize,
903 write_size - total_written);
904 /* iov[0] is reserved for smb header */
905 iov[1].iov_base = (char *)write_data + total_written;
906 iov[1].iov_len = len;
907 io_parms.netfid = open_file->netfid;
908 io_parms.pid = pid;
909 io_parms.tcon = pTcon;
910 io_parms.offset = *poffset;
911 io_parms.length = len;
912 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
913 1, 0);
915 if (rc || (bytes_written == 0)) {
916 if (total_written)
917 break;
918 else {
919 FreeXid(xid);
920 return rc;
922 } else {
923 cifs_update_eof(cifsi, *poffset, bytes_written);
924 *poffset += bytes_written;
928 cifs_stats_bytes_written(pTcon, total_written);
930 if (total_written > 0) {
931 spin_lock(&dentry->d_inode->i_lock);
932 if (*poffset > dentry->d_inode->i_size)
933 i_size_write(dentry->d_inode, *poffset);
934 spin_unlock(&dentry->d_inode->i_lock);
936 mark_inode_dirty_sync(dentry->d_inode);
937 FreeXid(xid);
938 return total_written;
941 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
942 bool fsuid_only)
944 struct cifsFileInfo *open_file = NULL;
945 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
947 /* only filter by fsuid on multiuser mounts */
948 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
949 fsuid_only = false;
951 spin_lock(&cifs_file_list_lock);
952 /* we could simply get the first_list_entry since write-only entries
953 are always at the end of the list but since the first entry might
954 have a close pending, we go through the whole list */
955 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
956 if (fsuid_only && open_file->uid != current_fsuid())
957 continue;
958 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
959 if (!open_file->invalidHandle) {
960 /* found a good file */
961 /* lock it so it will not be closed on us */
962 cifsFileInfo_get(open_file);
963 spin_unlock(&cifs_file_list_lock);
964 return open_file;
965 } /* else might as well continue, and look for
966 another, or simply have the caller reopen it
967 again rather than trying to fix this handle */
968 } else /* write only file */
969 break; /* write only files are last so must be done */
971 spin_unlock(&cifs_file_list_lock);
972 return NULL;
975 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
976 bool fsuid_only)
978 struct cifsFileInfo *open_file;
979 struct cifs_sb_info *cifs_sb;
980 bool any_available = false;
981 int rc;
983 /* Having a null inode here (because mapping->host was set to zero by
984 the VFS or MM) should not happen but we had reports of on oops (due to
985 it being zero) during stress testcases so we need to check for it */
987 if (cifs_inode == NULL) {
988 cERROR(1, "Null inode passed to cifs_writeable_file");
989 dump_stack();
990 return NULL;
993 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
995 /* only filter by fsuid on multiuser mounts */
996 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
997 fsuid_only = false;
999 spin_lock(&cifs_file_list_lock);
1000 refind_writable:
1001 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1002 if (!any_available && open_file->pid != current->tgid)
1003 continue;
1004 if (fsuid_only && open_file->uid != current_fsuid())
1005 continue;
1006 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1007 cifsFileInfo_get(open_file);
1009 if (!open_file->invalidHandle) {
1010 /* found a good writable file */
1011 spin_unlock(&cifs_file_list_lock);
1012 return open_file;
1015 spin_unlock(&cifs_file_list_lock);
1017 /* Had to unlock since following call can block */
1018 rc = cifs_reopen_file(open_file, false);
1019 if (!rc)
1020 return open_file;
1022 /* if it fails, try another handle if possible */
1023 cFYI(1, "wp failed on reopen file");
1024 cifsFileInfo_put(open_file);
1026 spin_lock(&cifs_file_list_lock);
1028 /* else we simply continue to the next entry. Thus
1029 we do not loop on reopen errors. If we
1030 can not reopen the file, for example if we
1031 reconnected to a server with another client
1032 racing to delete or lock the file we would not
1033 make progress if we restarted before the beginning
1034 of the loop here. */
1037 /* couldn't find useable FH with same pid, try any available */
1038 if (!any_available) {
1039 any_available = true;
1040 goto refind_writable;
1042 spin_unlock(&cifs_file_list_lock);
1043 return NULL;
1046 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1048 struct address_space *mapping = page->mapping;
1049 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1050 char *write_data;
1051 int rc = -EFAULT;
1052 int bytes_written = 0;
1053 struct inode *inode;
1054 struct cifsFileInfo *open_file;
1056 if (!mapping || !mapping->host)
1057 return -EFAULT;
1059 inode = page->mapping->host;
1061 offset += (loff_t)from;
1062 write_data = kmap(page);
1063 write_data += from;
1065 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1066 kunmap(page);
1067 return -EIO;
1070 /* racing with truncate? */
1071 if (offset > mapping->host->i_size) {
1072 kunmap(page);
1073 return 0; /* don't care */
1076 /* check to make sure that we are not extending the file */
1077 if (mapping->host->i_size - offset < (loff_t)to)
1078 to = (unsigned)(mapping->host->i_size - offset);
1080 open_file = find_writable_file(CIFS_I(mapping->host), false);
1081 if (open_file) {
1082 bytes_written = cifs_write(open_file, open_file->pid,
1083 write_data, to - from, &offset);
1084 cifsFileInfo_put(open_file);
1085 /* Does mm or vfs already set times? */
1086 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1087 if ((bytes_written > 0) && (offset))
1088 rc = 0;
1089 else if (bytes_written < 0)
1090 rc = bytes_written;
1091 } else {
1092 cFYI(1, "No writeable filehandles for inode");
1093 rc = -EIO;
1096 kunmap(page);
1097 return rc;
1100 static int cifs_writepages(struct address_space *mapping,
1101 struct writeback_control *wbc)
1103 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1104 bool done = false, scanned = false, range_whole = false;
1105 pgoff_t end, index;
1106 struct cifs_writedata *wdata;
1107 struct page *page;
1108 int rc = 0;
1111 * If wsize is smaller than the page cache size, default to writing
1112 * one page at a time via cifs_writepage
1114 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1115 return generic_writepages(mapping, wbc);
1117 if (wbc->range_cyclic) {
1118 index = mapping->writeback_index; /* Start from prev offset */
1119 end = -1;
1120 } else {
1121 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1122 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1123 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1124 range_whole = true;
1125 scanned = true;
1127 retry:
1128 while (!done && index <= end) {
1129 unsigned int i, nr_pages, found_pages;
1130 pgoff_t next = 0, tofind;
1131 struct page **pages;
1133 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1134 end - index) + 1;
1136 wdata = cifs_writedata_alloc((unsigned int)tofind);
1137 if (!wdata) {
1138 rc = -ENOMEM;
1139 break;
1143 * find_get_pages_tag seems to return a max of 256 on each
1144 * iteration, so we must call it several times in order to
1145 * fill the array or the wsize is effectively limited to
1146 * 256 * PAGE_CACHE_SIZE.
1148 found_pages = 0;
1149 pages = wdata->pages;
1150 do {
1151 nr_pages = find_get_pages_tag(mapping, &index,
1152 PAGECACHE_TAG_DIRTY,
1153 tofind, pages);
1154 found_pages += nr_pages;
1155 tofind -= nr_pages;
1156 pages += nr_pages;
1157 } while (nr_pages && tofind && index <= end);
1159 if (found_pages == 0) {
1160 kref_put(&wdata->refcount, cifs_writedata_release);
1161 break;
1164 nr_pages = 0;
1165 for (i = 0; i < found_pages; i++) {
1166 page = wdata->pages[i];
1168 * At this point we hold neither mapping->tree_lock nor
1169 * lock on the page itself: the page may be truncated or
1170 * invalidated (changing page->mapping to NULL), or even
1171 * swizzled back from swapper_space to tmpfs file
1172 * mapping
1175 if (nr_pages == 0)
1176 lock_page(page);
1177 else if (!trylock_page(page))
1178 break;
1180 if (unlikely(page->mapping != mapping)) {
1181 unlock_page(page);
1182 break;
1185 if (!wbc->range_cyclic && page->index > end) {
1186 done = true;
1187 unlock_page(page);
1188 break;
1191 if (next && (page->index != next)) {
1192 /* Not next consecutive page */
1193 unlock_page(page);
1194 break;
1197 if (wbc->sync_mode != WB_SYNC_NONE)
1198 wait_on_page_writeback(page);
1200 if (PageWriteback(page) ||
1201 !clear_page_dirty_for_io(page)) {
1202 unlock_page(page);
1203 break;
1207 * This actually clears the dirty bit in the radix tree.
1208 * See cifs_writepage() for more commentary.
1210 set_page_writeback(page);
1212 if (page_offset(page) >= mapping->host->i_size) {
1213 done = true;
1214 unlock_page(page);
1215 end_page_writeback(page);
1216 break;
1219 wdata->pages[i] = page;
1220 next = page->index + 1;
1221 ++nr_pages;
1224 /* reset index to refind any pages skipped */
1225 if (nr_pages == 0)
1226 index = wdata->pages[0]->index + 1;
1228 /* put any pages we aren't going to use */
1229 for (i = nr_pages; i < found_pages; i++) {
1230 page_cache_release(wdata->pages[i]);
1231 wdata->pages[i] = NULL;
1234 /* nothing to write? */
1235 if (nr_pages == 0) {
1236 kref_put(&wdata->refcount, cifs_writedata_release);
1237 continue;
1240 wdata->sync_mode = wbc->sync_mode;
1241 wdata->nr_pages = nr_pages;
1242 wdata->offset = page_offset(wdata->pages[0]);
1244 do {
1245 if (wdata->cfile != NULL)
1246 cifsFileInfo_put(wdata->cfile);
1247 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1248 false);
1249 if (!wdata->cfile) {
1250 cERROR(1, "No writable handles for inode");
1251 rc = -EBADF;
1252 break;
1254 rc = cifs_async_writev(wdata);
1255 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1257 for (i = 0; i < nr_pages; ++i)
1258 unlock_page(wdata->pages[i]);
1260 /* send failure -- clean up the mess */
1261 if (rc != 0) {
1262 for (i = 0; i < nr_pages; ++i) {
1263 if (rc == -EAGAIN)
1264 redirty_page_for_writepage(wbc,
1265 wdata->pages[i]);
1266 else
1267 SetPageError(wdata->pages[i]);
1268 end_page_writeback(wdata->pages[i]);
1269 page_cache_release(wdata->pages[i]);
1271 if (rc != -EAGAIN)
1272 mapping_set_error(mapping, rc);
1274 kref_put(&wdata->refcount, cifs_writedata_release);
1276 wbc->nr_to_write -= nr_pages;
1277 if (wbc->nr_to_write <= 0)
1278 done = true;
1280 index = next;
1283 if (!scanned && !done) {
1285 * We hit the last page and there is more work to be done: wrap
1286 * back to the start of the file
1288 scanned = true;
1289 index = 0;
1290 goto retry;
1293 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1294 mapping->writeback_index = index;
1296 return rc;
1299 static int
1300 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1302 int rc;
1303 int xid;
1305 xid = GetXid();
1306 /* BB add check for wbc flags */
1307 page_cache_get(page);
1308 if (!PageUptodate(page))
1309 cFYI(1, "ppw - page not up to date");
1312 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1314 * A writepage() implementation always needs to do either this,
1315 * or re-dirty the page with "redirty_page_for_writepage()" in
1316 * the case of a failure.
1318 * Just unlocking the page will cause the radix tree tag-bits
1319 * to fail to update with the state of the page correctly.
1321 set_page_writeback(page);
1322 retry_write:
1323 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1324 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1325 goto retry_write;
1326 else if (rc == -EAGAIN)
1327 redirty_page_for_writepage(wbc, page);
1328 else if (rc != 0)
1329 SetPageError(page);
1330 else
1331 SetPageUptodate(page);
1332 end_page_writeback(page);
1333 page_cache_release(page);
1334 FreeXid(xid);
1335 return rc;
1338 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1340 int rc = cifs_writepage_locked(page, wbc);
1341 unlock_page(page);
1342 return rc;
1345 static int cifs_write_end(struct file *file, struct address_space *mapping,
1346 loff_t pos, unsigned len, unsigned copied,
1347 struct page *page, void *fsdata)
1349 int rc;
1350 struct inode *inode = mapping->host;
1351 struct cifsFileInfo *cfile = file->private_data;
1352 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1353 __u32 pid;
1355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1356 pid = cfile->pid;
1357 else
1358 pid = current->tgid;
1360 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1361 page, pos, copied);
1363 if (PageChecked(page)) {
1364 if (copied == len)
1365 SetPageUptodate(page);
1366 ClearPageChecked(page);
1367 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1368 SetPageUptodate(page);
1370 if (!PageUptodate(page)) {
1371 char *page_data;
1372 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1373 int xid;
1375 xid = GetXid();
1376 /* this is probably better than directly calling
1377 partialpage_write since in this function the file handle is
1378 known which we might as well leverage */
1379 /* BB check if anything else missing out of ppw
1380 such as updating last write time */
1381 page_data = kmap(page);
1382 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1383 /* if (rc < 0) should we set writebehind rc? */
1384 kunmap(page);
1386 FreeXid(xid);
1387 } else {
1388 rc = copied;
1389 pos += copied;
1390 set_page_dirty(page);
1393 if (rc > 0) {
1394 spin_lock(&inode->i_lock);
1395 if (pos > inode->i_size)
1396 i_size_write(inode, pos);
1397 spin_unlock(&inode->i_lock);
1400 unlock_page(page);
1401 page_cache_release(page);
1403 return rc;
1406 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1407 int datasync)
1409 int xid;
1410 int rc = 0;
1411 struct cifs_tcon *tcon;
1412 struct cifsFileInfo *smbfile = file->private_data;
1413 struct inode *inode = file->f_path.dentry->d_inode;
1414 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1416 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1417 if (rc)
1418 return rc;
1419 mutex_lock(&inode->i_mutex);
1421 xid = GetXid();
1423 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1424 file->f_path.dentry->d_name.name, datasync);
1426 if (!CIFS_I(inode)->clientCanCacheRead) {
1427 rc = cifs_invalidate_mapping(inode);
1428 if (rc) {
1429 cFYI(1, "rc: %d during invalidate phase", rc);
1430 rc = 0; /* don't care about it in fsync */
1434 tcon = tlink_tcon(smbfile->tlink);
1435 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1436 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1438 FreeXid(xid);
1439 mutex_unlock(&inode->i_mutex);
1440 return rc;
1443 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1445 int xid;
1446 int rc = 0;
1447 struct cifs_tcon *tcon;
1448 struct cifsFileInfo *smbfile = file->private_data;
1449 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1450 struct inode *inode = file->f_mapping->host;
1452 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1453 if (rc)
1454 return rc;
1455 mutex_lock(&inode->i_mutex);
1457 xid = GetXid();
1459 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1460 file->f_path.dentry->d_name.name, datasync);
1462 tcon = tlink_tcon(smbfile->tlink);
1463 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1464 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1466 FreeXid(xid);
1467 mutex_unlock(&inode->i_mutex);
1468 return rc;
1472 * As file closes, flush all cached write data for this inode checking
1473 * for write behind errors.
1475 int cifs_flush(struct file *file, fl_owner_t id)
1477 struct inode *inode = file->f_path.dentry->d_inode;
1478 int rc = 0;
1480 if (file->f_mode & FMODE_WRITE)
1481 rc = filemap_write_and_wait(inode->i_mapping);
1483 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1485 return rc;
1488 static int
1489 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1491 int rc = 0;
1492 unsigned long i;
1494 for (i = 0; i < num_pages; i++) {
1495 pages[i] = alloc_page(__GFP_HIGHMEM);
1496 if (!pages[i]) {
1498 * save number of pages we have already allocated and
1499 * return with ENOMEM error
1501 num_pages = i;
1502 rc = -ENOMEM;
1503 goto error;
1507 return rc;
1509 error:
1510 for (i = 0; i < num_pages; i++)
1511 put_page(pages[i]);
1512 return rc;
1515 static inline
1516 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1518 size_t num_pages;
1519 size_t clen;
1521 clen = min_t(const size_t, len, wsize);
1522 num_pages = clen / PAGE_CACHE_SIZE;
1523 if (clen % PAGE_CACHE_SIZE)
1524 num_pages++;
1526 if (cur_len)
1527 *cur_len = clen;
1529 return num_pages;
1532 static ssize_t
1533 cifs_iovec_write(struct file *file, const struct iovec *iov,
1534 unsigned long nr_segs, loff_t *poffset)
1536 unsigned int written;
1537 unsigned long num_pages, npages, i;
1538 size_t copied, len, cur_len;
1539 ssize_t total_written = 0;
1540 struct kvec *to_send;
1541 struct page **pages;
1542 struct iov_iter it;
1543 struct inode *inode;
1544 struct cifsFileInfo *open_file;
1545 struct cifs_tcon *pTcon;
1546 struct cifs_sb_info *cifs_sb;
1547 struct cifs_io_parms io_parms;
1548 int xid, rc;
1549 __u32 pid;
1551 len = iov_length(iov, nr_segs);
1552 if (!len)
1553 return 0;
1555 rc = generic_write_checks(file, poffset, &len, 0);
1556 if (rc)
1557 return rc;
1559 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1560 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1562 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1563 if (!pages)
1564 return -ENOMEM;
1566 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1567 if (!to_send) {
1568 kfree(pages);
1569 return -ENOMEM;
1572 rc = cifs_write_allocate_pages(pages, num_pages);
1573 if (rc) {
1574 kfree(pages);
1575 kfree(to_send);
1576 return rc;
1579 xid = GetXid();
1580 open_file = file->private_data;
1582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1583 pid = open_file->pid;
1584 else
1585 pid = current->tgid;
1587 pTcon = tlink_tcon(open_file->tlink);
1588 inode = file->f_path.dentry->d_inode;
1590 iov_iter_init(&it, iov, nr_segs, len, 0);
1591 npages = num_pages;
1593 do {
1594 size_t save_len = cur_len;
1595 for (i = 0; i < npages; i++) {
1596 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1597 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1598 copied);
1599 cur_len -= copied;
1600 iov_iter_advance(&it, copied);
1601 to_send[i+1].iov_base = kmap(pages[i]);
1602 to_send[i+1].iov_len = copied;
1605 cur_len = save_len - cur_len;
1607 do {
1608 if (open_file->invalidHandle) {
1609 rc = cifs_reopen_file(open_file, false);
1610 if (rc != 0)
1611 break;
1613 io_parms.netfid = open_file->netfid;
1614 io_parms.pid = pid;
1615 io_parms.tcon = pTcon;
1616 io_parms.offset = *poffset;
1617 io_parms.length = cur_len;
1618 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1619 npages, 0);
1620 } while (rc == -EAGAIN);
1622 for (i = 0; i < npages; i++)
1623 kunmap(pages[i]);
1625 if (written) {
1626 len -= written;
1627 total_written += written;
1628 cifs_update_eof(CIFS_I(inode), *poffset, written);
1629 *poffset += written;
1630 } else if (rc < 0) {
1631 if (!total_written)
1632 total_written = rc;
1633 break;
1636 /* get length and number of kvecs of the next write */
1637 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1638 } while (len > 0);
1640 if (total_written > 0) {
1641 spin_lock(&inode->i_lock);
1642 if (*poffset > inode->i_size)
1643 i_size_write(inode, *poffset);
1644 spin_unlock(&inode->i_lock);
1647 cifs_stats_bytes_written(pTcon, total_written);
1648 mark_inode_dirty_sync(inode);
1650 for (i = 0; i < num_pages; i++)
1651 put_page(pages[i]);
1652 kfree(to_send);
1653 kfree(pages);
1654 FreeXid(xid);
1655 return total_written;
1658 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1659 unsigned long nr_segs, loff_t pos)
1661 ssize_t written;
1662 struct inode *inode;
1664 inode = iocb->ki_filp->f_path.dentry->d_inode;
1667 * BB - optimize the way when signing is disabled. We can drop this
1668 * extra memory-to-memory copying and use iovec buffers for constructing
1669 * write request.
1672 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1673 if (written > 0) {
1674 CIFS_I(inode)->invalid_mapping = true;
1675 iocb->ki_pos = pos;
1678 return written;
1681 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1682 unsigned long nr_segs, loff_t pos)
1684 struct inode *inode;
1686 inode = iocb->ki_filp->f_path.dentry->d_inode;
1688 if (CIFS_I(inode)->clientCanCacheAll)
1689 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1692 * In strict cache mode we need to write the data to the server exactly
1693 * from the pos to pos+len-1 rather than flush all affected pages
1694 * because it may cause a error with mandatory locks on these pages but
1695 * not on the region from pos to ppos+len-1.
1698 return cifs_user_writev(iocb, iov, nr_segs, pos);
1701 static ssize_t
1702 cifs_iovec_read(struct file *file, const struct iovec *iov,
1703 unsigned long nr_segs, loff_t *poffset)
1705 int rc;
1706 int xid;
1707 ssize_t total_read;
1708 unsigned int bytes_read = 0;
1709 size_t len, cur_len;
1710 int iov_offset = 0;
1711 struct cifs_sb_info *cifs_sb;
1712 struct cifs_tcon *pTcon;
1713 struct cifsFileInfo *open_file;
1714 struct smb_com_read_rsp *pSMBr;
1715 struct cifs_io_parms io_parms;
1716 char *read_data;
1717 __u32 pid;
1719 if (!nr_segs)
1720 return 0;
1722 len = iov_length(iov, nr_segs);
1723 if (!len)
1724 return 0;
1726 xid = GetXid();
1727 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1729 open_file = file->private_data;
1730 pTcon = tlink_tcon(open_file->tlink);
1732 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1733 pid = open_file->pid;
1734 else
1735 pid = current->tgid;
1737 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1738 cFYI(1, "attempting read on write only file instance");
1740 for (total_read = 0; total_read < len; total_read += bytes_read) {
1741 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1742 rc = -EAGAIN;
1743 read_data = NULL;
1745 while (rc == -EAGAIN) {
1746 int buf_type = CIFS_NO_BUFFER;
1747 if (open_file->invalidHandle) {
1748 rc = cifs_reopen_file(open_file, true);
1749 if (rc != 0)
1750 break;
1752 io_parms.netfid = open_file->netfid;
1753 io_parms.pid = pid;
1754 io_parms.tcon = pTcon;
1755 io_parms.offset = *poffset;
1756 io_parms.length = cur_len;
1757 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1758 &read_data, &buf_type);
1759 pSMBr = (struct smb_com_read_rsp *)read_data;
1760 if (read_data) {
1761 char *data_offset = read_data + 4 +
1762 le16_to_cpu(pSMBr->DataOffset);
1763 if (memcpy_toiovecend(iov, data_offset,
1764 iov_offset, bytes_read))
1765 rc = -EFAULT;
1766 if (buf_type == CIFS_SMALL_BUFFER)
1767 cifs_small_buf_release(read_data);
1768 else if (buf_type == CIFS_LARGE_BUFFER)
1769 cifs_buf_release(read_data);
1770 read_data = NULL;
1771 iov_offset += bytes_read;
1775 if (rc || (bytes_read == 0)) {
1776 if (total_read) {
1777 break;
1778 } else {
1779 FreeXid(xid);
1780 return rc;
1782 } else {
1783 cifs_stats_bytes_read(pTcon, bytes_read);
1784 *poffset += bytes_read;
1788 FreeXid(xid);
1789 return total_read;
1792 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1793 unsigned long nr_segs, loff_t pos)
1795 ssize_t read;
1797 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1798 if (read > 0)
1799 iocb->ki_pos = pos;
1801 return read;
1804 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1805 unsigned long nr_segs, loff_t pos)
1807 struct inode *inode;
1809 inode = iocb->ki_filp->f_path.dentry->d_inode;
1811 if (CIFS_I(inode)->clientCanCacheRead)
1812 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1815 * In strict cache mode we need to read from the server all the time
1816 * if we don't have level II oplock because the server can delay mtime
1817 * change - so we can't make a decision about inode invalidating.
1818 * And we can also fail with pagereading if there are mandatory locks
1819 * on pages affected by this read but not on the region from pos to
1820 * pos+len-1.
1823 return cifs_user_readv(iocb, iov, nr_segs, pos);
1826 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1827 loff_t *poffset)
1829 int rc = -EACCES;
1830 unsigned int bytes_read = 0;
1831 unsigned int total_read;
1832 unsigned int current_read_size;
1833 struct cifs_sb_info *cifs_sb;
1834 struct cifs_tcon *pTcon;
1835 int xid;
1836 char *current_offset;
1837 struct cifsFileInfo *open_file;
1838 struct cifs_io_parms io_parms;
1839 int buf_type = CIFS_NO_BUFFER;
1840 __u32 pid;
1842 xid = GetXid();
1843 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1845 if (file->private_data == NULL) {
1846 rc = -EBADF;
1847 FreeXid(xid);
1848 return rc;
1850 open_file = file->private_data;
1851 pTcon = tlink_tcon(open_file->tlink);
1853 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1854 pid = open_file->pid;
1855 else
1856 pid = current->tgid;
1858 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1859 cFYI(1, "attempting read on write only file instance");
1861 for (total_read = 0, current_offset = read_data;
1862 read_size > total_read;
1863 total_read += bytes_read, current_offset += bytes_read) {
1864 current_read_size = min_t(const int, read_size - total_read,
1865 cifs_sb->rsize);
1866 /* For windows me and 9x we do not want to request more
1867 than it negotiated since it will refuse the read then */
1868 if ((pTcon->ses) &&
1869 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1870 current_read_size = min_t(const int, current_read_size,
1871 pTcon->ses->server->maxBuf - 128);
1873 rc = -EAGAIN;
1874 while (rc == -EAGAIN) {
1875 if (open_file->invalidHandle) {
1876 rc = cifs_reopen_file(open_file, true);
1877 if (rc != 0)
1878 break;
1880 io_parms.netfid = open_file->netfid;
1881 io_parms.pid = pid;
1882 io_parms.tcon = pTcon;
1883 io_parms.offset = *poffset;
1884 io_parms.length = current_read_size;
1885 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1886 &current_offset, &buf_type);
1888 if (rc || (bytes_read == 0)) {
1889 if (total_read) {
1890 break;
1891 } else {
1892 FreeXid(xid);
1893 return rc;
1895 } else {
1896 cifs_stats_bytes_read(pTcon, total_read);
1897 *poffset += bytes_read;
1900 FreeXid(xid);
1901 return total_read;
1905 * If the page is mmap'ed into a process' page tables, then we need to make
1906 * sure that it doesn't change while being written back.
1908 static int
1909 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1911 struct page *page = vmf->page;
1913 lock_page(page);
1914 return VM_FAULT_LOCKED;
1917 static struct vm_operations_struct cifs_file_vm_ops = {
1918 .fault = filemap_fault,
1919 .page_mkwrite = cifs_page_mkwrite,
1922 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1924 int rc, xid;
1925 struct inode *inode = file->f_path.dentry->d_inode;
1927 xid = GetXid();
1929 if (!CIFS_I(inode)->clientCanCacheRead) {
1930 rc = cifs_invalidate_mapping(inode);
1931 if (rc)
1932 return rc;
1935 rc = generic_file_mmap(file, vma);
1936 if (rc == 0)
1937 vma->vm_ops = &cifs_file_vm_ops;
1938 FreeXid(xid);
1939 return rc;
1942 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1944 int rc, xid;
1946 xid = GetXid();
1947 rc = cifs_revalidate_file(file);
1948 if (rc) {
1949 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1950 FreeXid(xid);
1951 return rc;
1953 rc = generic_file_mmap(file, vma);
1954 if (rc == 0)
1955 vma->vm_ops = &cifs_file_vm_ops;
1956 FreeXid(xid);
1957 return rc;
1961 static void cifs_copy_cache_pages(struct address_space *mapping,
1962 struct list_head *pages, int bytes_read, char *data)
1964 struct page *page;
1965 char *target;
1967 while (bytes_read > 0) {
1968 if (list_empty(pages))
1969 break;
1971 page = list_entry(pages->prev, struct page, lru);
1972 list_del(&page->lru);
1974 if (add_to_page_cache_lru(page, mapping, page->index,
1975 GFP_KERNEL)) {
1976 page_cache_release(page);
1977 cFYI(1, "Add page cache failed");
1978 data += PAGE_CACHE_SIZE;
1979 bytes_read -= PAGE_CACHE_SIZE;
1980 continue;
1982 page_cache_release(page);
1984 target = kmap_atomic(page, KM_USER0);
1986 if (PAGE_CACHE_SIZE > bytes_read) {
1987 memcpy(target, data, bytes_read);
1988 /* zero the tail end of this partial page */
1989 memset(target + bytes_read, 0,
1990 PAGE_CACHE_SIZE - bytes_read);
1991 bytes_read = 0;
1992 } else {
1993 memcpy(target, data, PAGE_CACHE_SIZE);
1994 bytes_read -= PAGE_CACHE_SIZE;
1996 kunmap_atomic(target, KM_USER0);
1998 flush_dcache_page(page);
1999 SetPageUptodate(page);
2000 unlock_page(page);
2001 data += PAGE_CACHE_SIZE;
2003 /* add page to FS-Cache */
2004 cifs_readpage_to_fscache(mapping->host, page);
2006 return;
2009 static int cifs_readpages(struct file *file, struct address_space *mapping,
2010 struct list_head *page_list, unsigned num_pages)
2012 int rc = -EACCES;
2013 int xid;
2014 loff_t offset;
2015 struct page *page;
2016 struct cifs_sb_info *cifs_sb;
2017 struct cifs_tcon *pTcon;
2018 unsigned int bytes_read = 0;
2019 unsigned int read_size, i;
2020 char *smb_read_data = NULL;
2021 struct smb_com_read_rsp *pSMBr;
2022 struct cifsFileInfo *open_file;
2023 struct cifs_io_parms io_parms;
2024 int buf_type = CIFS_NO_BUFFER;
2025 __u32 pid;
2027 xid = GetXid();
2028 if (file->private_data == NULL) {
2029 rc = -EBADF;
2030 FreeXid(xid);
2031 return rc;
2033 open_file = file->private_data;
2034 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2035 pTcon = tlink_tcon(open_file->tlink);
2038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2039 * immediately if the cookie is negative
2041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2042 &num_pages);
2043 if (rc == 0)
2044 goto read_complete;
2046 cFYI(DBG2, "rpages: num pages %d", num_pages);
2047 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2048 pid = open_file->pid;
2049 else
2050 pid = current->tgid;
2052 for (i = 0; i < num_pages; ) {
2053 unsigned contig_pages;
2054 struct page *tmp_page;
2055 unsigned long expected_index;
2057 if (list_empty(page_list))
2058 break;
2060 page = list_entry(page_list->prev, struct page, lru);
2061 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2063 /* count adjacent pages that we will read into */
2064 contig_pages = 0;
2065 expected_index =
2066 list_entry(page_list->prev, struct page, lru)->index;
2067 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2068 if (tmp_page->index == expected_index) {
2069 contig_pages++;
2070 expected_index++;
2071 } else
2072 break;
2074 if (contig_pages + i > num_pages)
2075 contig_pages = num_pages - i;
2077 /* for reads over a certain size could initiate async
2078 read ahead */
2080 read_size = contig_pages * PAGE_CACHE_SIZE;
2081 /* Read size needs to be in multiples of one page */
2082 read_size = min_t(const unsigned int, read_size,
2083 cifs_sb->rsize & PAGE_CACHE_MASK);
2084 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2085 read_size, contig_pages);
2086 rc = -EAGAIN;
2087 while (rc == -EAGAIN) {
2088 if (open_file->invalidHandle) {
2089 rc = cifs_reopen_file(open_file, true);
2090 if (rc != 0)
2091 break;
2093 io_parms.netfid = open_file->netfid;
2094 io_parms.pid = pid;
2095 io_parms.tcon = pTcon;
2096 io_parms.offset = offset;
2097 io_parms.length = read_size;
2098 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2099 &smb_read_data, &buf_type);
2100 /* BB more RC checks ? */
2101 if (rc == -EAGAIN) {
2102 if (smb_read_data) {
2103 if (buf_type == CIFS_SMALL_BUFFER)
2104 cifs_small_buf_release(smb_read_data);
2105 else if (buf_type == CIFS_LARGE_BUFFER)
2106 cifs_buf_release(smb_read_data);
2107 smb_read_data = NULL;
2111 if ((rc < 0) || (smb_read_data == NULL)) {
2112 cFYI(1, "Read error in readpages: %d", rc);
2113 break;
2114 } else if (bytes_read > 0) {
2115 task_io_account_read(bytes_read);
2116 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2117 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2118 smb_read_data + 4 /* RFC1001 hdr */ +
2119 le16_to_cpu(pSMBr->DataOffset));
2121 i += bytes_read >> PAGE_CACHE_SHIFT;
2122 cifs_stats_bytes_read(pTcon, bytes_read);
2123 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2124 i++; /* account for partial page */
2126 /* server copy of file can have smaller size
2127 than client */
2128 /* BB do we need to verify this common case ?
2129 this case is ok - if we are at server EOF
2130 we will hit it on next read */
2132 /* break; */
2134 } else {
2135 cFYI(1, "No bytes read (%d) at offset %lld . "
2136 "Cleaning remaining pages from readahead list",
2137 bytes_read, offset);
2138 /* BB turn off caching and do new lookup on
2139 file size at server? */
2140 break;
2142 if (smb_read_data) {
2143 if (buf_type == CIFS_SMALL_BUFFER)
2144 cifs_small_buf_release(smb_read_data);
2145 else if (buf_type == CIFS_LARGE_BUFFER)
2146 cifs_buf_release(smb_read_data);
2147 smb_read_data = NULL;
2149 bytes_read = 0;
2152 /* need to free smb_read_data buf before exit */
2153 if (smb_read_data) {
2154 if (buf_type == CIFS_SMALL_BUFFER)
2155 cifs_small_buf_release(smb_read_data);
2156 else if (buf_type == CIFS_LARGE_BUFFER)
2157 cifs_buf_release(smb_read_data);
2158 smb_read_data = NULL;
2161 read_complete:
2162 FreeXid(xid);
2163 return rc;
2166 static int cifs_readpage_worker(struct file *file, struct page *page,
2167 loff_t *poffset)
2169 char *read_data;
2170 int rc;
2172 /* Is the page cached? */
2173 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2174 if (rc == 0)
2175 goto read_complete;
2177 page_cache_get(page);
2178 read_data = kmap(page);
2179 /* for reads over a certain size could initiate async read ahead */
2181 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2183 if (rc < 0)
2184 goto io_error;
2185 else
2186 cFYI(1, "Bytes read %d", rc);
2188 file->f_path.dentry->d_inode->i_atime =
2189 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2191 if (PAGE_CACHE_SIZE > rc)
2192 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2194 flush_dcache_page(page);
2195 SetPageUptodate(page);
2197 /* send this page to the cache */
2198 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2200 rc = 0;
2202 io_error:
2203 kunmap(page);
2204 page_cache_release(page);
2206 read_complete:
2207 return rc;
2210 static int cifs_readpage(struct file *file, struct page *page)
2212 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2213 int rc = -EACCES;
2214 int xid;
2216 xid = GetXid();
2218 if (file->private_data == NULL) {
2219 rc = -EBADF;
2220 FreeXid(xid);
2221 return rc;
2224 cFYI(1, "readpage %p at offset %d 0x%x\n",
2225 page, (int)offset, (int)offset);
2227 rc = cifs_readpage_worker(file, page, &offset);
2229 unlock_page(page);
2231 FreeXid(xid);
2232 return rc;
2235 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2237 struct cifsFileInfo *open_file;
2239 spin_lock(&cifs_file_list_lock);
2240 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2241 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2242 spin_unlock(&cifs_file_list_lock);
2243 return 1;
2246 spin_unlock(&cifs_file_list_lock);
2247 return 0;
2250 /* We do not want to update the file size from server for inodes
2251 open for write - to avoid races with writepage extending
2252 the file - in the future we could consider allowing
2253 refreshing the inode only on increases in the file size
2254 but this is tricky to do without racing with writebehind
2255 page caching in the current Linux kernel design */
2256 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2258 if (!cifsInode)
2259 return true;
2261 if (is_inode_writable(cifsInode)) {
2262 /* This inode is open for write at least once */
2263 struct cifs_sb_info *cifs_sb;
2265 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2266 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2267 /* since no page cache to corrupt on directio
2268 we can change size safely */
2269 return true;
2272 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2273 return true;
2275 return false;
2276 } else
2277 return true;
2280 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2281 loff_t pos, unsigned len, unsigned flags,
2282 struct page **pagep, void **fsdata)
2284 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2285 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2286 loff_t page_start = pos & PAGE_MASK;
2287 loff_t i_size;
2288 struct page *page;
2289 int rc = 0;
2291 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2293 page = grab_cache_page_write_begin(mapping, index, flags);
2294 if (!page) {
2295 rc = -ENOMEM;
2296 goto out;
2299 if (PageUptodate(page))
2300 goto out;
2303 * If we write a full page it will be up to date, no need to read from
2304 * the server. If the write is short, we'll end up doing a sync write
2305 * instead.
2307 if (len == PAGE_CACHE_SIZE)
2308 goto out;
2311 * optimize away the read when we have an oplock, and we're not
2312 * expecting to use any of the data we'd be reading in. That
2313 * is, when the page lies beyond the EOF, or straddles the EOF
2314 * and the write will cover all of the existing data.
2316 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2317 i_size = i_size_read(mapping->host);
2318 if (page_start >= i_size ||
2319 (offset == 0 && (pos + len) >= i_size)) {
2320 zero_user_segments(page, 0, offset,
2321 offset + len,
2322 PAGE_CACHE_SIZE);
2324 * PageChecked means that the parts of the page
2325 * to which we're not writing are considered up
2326 * to date. Once the data is copied to the
2327 * page, it can be set uptodate.
2329 SetPageChecked(page);
2330 goto out;
2334 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2336 * might as well read a page, it is fast enough. If we get
2337 * an error, we don't need to return it. cifs_write_end will
2338 * do a sync write instead since PG_uptodate isn't set.
2340 cifs_readpage_worker(file, page, &page_start);
2341 } else {
2342 /* we could try using another file handle if there is one -
2343 but how would we lock it to prevent close of that handle
2344 racing with this read? In any case
2345 this will be written out by write_end so is fine */
2347 out:
2348 *pagep = page;
2349 return rc;
2352 static int cifs_release_page(struct page *page, gfp_t gfp)
2354 if (PagePrivate(page))
2355 return 0;
2357 return cifs_fscache_release_page(page, gfp);
2360 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2362 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2364 if (offset == 0)
2365 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2368 static int cifs_launder_page(struct page *page)
2370 int rc = 0;
2371 loff_t range_start = page_offset(page);
2372 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2373 struct writeback_control wbc = {
2374 .sync_mode = WB_SYNC_ALL,
2375 .nr_to_write = 0,
2376 .range_start = range_start,
2377 .range_end = range_end,
2380 cFYI(1, "Launder page: %p", page);
2382 if (clear_page_dirty_for_io(page))
2383 rc = cifs_writepage_locked(page, &wbc);
2385 cifs_fscache_invalidate_page(page, page->mapping->host);
2386 return rc;
2389 void cifs_oplock_break(struct work_struct *work)
2391 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2392 oplock_break);
2393 struct inode *inode = cfile->dentry->d_inode;
2394 struct cifsInodeInfo *cinode = CIFS_I(inode);
2395 int rc = 0;
2397 if (inode && S_ISREG(inode->i_mode)) {
2398 if (cinode->clientCanCacheRead)
2399 break_lease(inode, O_RDONLY);
2400 else
2401 break_lease(inode, O_WRONLY);
2402 rc = filemap_fdatawrite(inode->i_mapping);
2403 if (cinode->clientCanCacheRead == 0) {
2404 rc = filemap_fdatawait(inode->i_mapping);
2405 mapping_set_error(inode->i_mapping, rc);
2406 invalidate_remote_inode(inode);
2408 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2412 * releasing stale oplock after recent reconnect of smb session using
2413 * a now incorrect file handle is not a data integrity issue but do
2414 * not bother sending an oplock release if session to server still is
2415 * disconnected since oplock already released by the server
2417 if (!cfile->oplock_break_cancelled) {
2418 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2419 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2420 cinode->clientCanCacheRead ? 1 : 0);
2421 cFYI(1, "Oplock release rc = %d", rc);
2425 const struct address_space_operations cifs_addr_ops = {
2426 .readpage = cifs_readpage,
2427 .readpages = cifs_readpages,
2428 .writepage = cifs_writepage,
2429 .writepages = cifs_writepages,
2430 .write_begin = cifs_write_begin,
2431 .write_end = cifs_write_end,
2432 .set_page_dirty = __set_page_dirty_nobuffers,
2433 .releasepage = cifs_release_page,
2434 .invalidatepage = cifs_invalidate_page,
2435 .launder_page = cifs_launder_page,
2439 * cifs_readpages requires the server to support a buffer large enough to
2440 * contain the header plus one complete page of data. Otherwise, we need
2441 * to leave cifs_readpages out of the address space operations.
2443 const struct address_space_operations cifs_addr_ops_smallbuf = {
2444 .readpage = cifs_readpage,
2445 .writepage = cifs_writepage,
2446 .writepages = cifs_writepages,
2447 .write_begin = cifs_write_begin,
2448 .write_end = cifs_write_end,
2449 .set_page_dirty = __set_page_dirty_nobuffers,
2450 .releasepage = cifs_release_page,
2451 .invalidatepage = cifs_invalidate_page,
2452 .launder_page = cifs_launder_page,