[CIFS] Fix rsize calculation so that large readx flag is checked.
[linux-2.6/verdex.git] / fs / cifs / file.c
blob11806c879c4715e21cee08a102403cb191c633da
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/fs.h>
24 #include <linux/backing-dev.h>
25 #include <linux/stat.h>
26 #include <linux/fcntl.h>
27 #include <linux/mpage.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <asm/div64.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_unicode.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
41 static inline struct cifsFileInfo *cifs_init_private(
42 struct cifsFileInfo *private_data, struct inode *inode,
43 struct file *file, __u16 netfid)
45 memset(private_data, 0, sizeof(struct cifsFileInfo));
46 private_data->netfid = netfid;
47 private_data->pid = current->tgid;
48 init_MUTEX(&private_data->fh_sem);
49 private_data->pfile = file; /* needed for writepage */
50 private_data->pInode = inode;
51 private_data->invalidHandle = FALSE;
52 private_data->closePend = FALSE;
54 return private_data;
57 static inline int cifs_convert_flags(unsigned int flags)
59 if ((flags & O_ACCMODE) == O_RDONLY)
60 return GENERIC_READ;
61 else if ((flags & O_ACCMODE) == O_WRONLY)
62 return GENERIC_WRITE;
63 else if ((flags & O_ACCMODE) == O_RDWR) {
64 /* GENERIC_ALL is too much permission to request
65 can cause unnecessary access denied on create */
66 /* return GENERIC_ALL; */
67 return (GENERIC_READ | GENERIC_WRITE);
70 return 0x20197;
73 static inline int cifs_get_disposition(unsigned int flags)
75 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
76 return FILE_CREATE;
77 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
78 return FILE_OVERWRITE_IF;
79 else if ((flags & O_CREAT) == O_CREAT)
80 return FILE_OPEN_IF;
81 else
82 return FILE_OPEN;
85 /* all arguments to this function must be checked for validity in caller */
86 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
87 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
88 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
89 char *full_path, int xid)
91 struct timespec temp;
92 int rc;
94 /* want handles we can use to read with first
95 in the list so we do not have to walk the
96 list to search for one in prepare_write */
97 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
98 list_add_tail(&pCifsFile->flist,
99 &pCifsInode->openFileList);
100 } else {
101 list_add(&pCifsFile->flist,
102 &pCifsInode->openFileList);
104 write_unlock(&GlobalSMBSeslock);
105 write_unlock(&file->f_owner.lock);
106 if (pCifsInode->clientCanCacheRead) {
107 /* we have the inode open somewhere else
108 no need to discard cache data */
109 goto client_can_cache;
112 /* BB need same check in cifs_create too? */
113 /* if not oplocked, invalidate inode pages if mtime or file
114 size changed */
115 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
116 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
117 (file->f_dentry->d_inode->i_size ==
118 (loff_t)le64_to_cpu(buf->EndOfFile))) {
119 cFYI(1, ("inode unchanged on server"));
120 } else {
121 if (file->f_dentry->d_inode->i_mapping) {
122 /* BB no need to lock inode until after invalidate
123 since namei code should already have it locked? */
124 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
125 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
127 cFYI(1, ("invalidating remote inode since open detected it "
128 "changed"));
129 invalidate_remote_inode(file->f_dentry->d_inode);
132 client_can_cache:
133 if (pTcon->ses->capabilities & CAP_UNIX)
134 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
135 full_path, inode->i_sb, xid);
136 else
137 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
138 full_path, buf, inode->i_sb, xid);
140 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
141 pCifsInode->clientCanCacheAll = TRUE;
142 pCifsInode->clientCanCacheRead = TRUE;
143 cFYI(1, ("Exclusive Oplock granted on inode %p",
144 file->f_dentry->d_inode));
145 } else if ((*oplock & 0xF) == OPLOCK_READ)
146 pCifsInode->clientCanCacheRead = TRUE;
148 return rc;
151 int cifs_open(struct inode *inode, struct file *file)
153 int rc = -EACCES;
154 int xid, oplock;
155 struct cifs_sb_info *cifs_sb;
156 struct cifsTconInfo *pTcon;
157 struct cifsFileInfo *pCifsFile;
158 struct cifsInodeInfo *pCifsInode;
159 struct list_head *tmp;
160 char *full_path = NULL;
161 int desiredAccess;
162 int disposition;
163 __u16 netfid;
164 FILE_ALL_INFO *buf = NULL;
166 xid = GetXid();
168 cifs_sb = CIFS_SB(inode->i_sb);
169 pTcon = cifs_sb->tcon;
171 if (file->f_flags & O_CREAT) {
172 /* search inode for this file and fill in file->private_data */
173 pCifsInode = CIFS_I(file->f_dentry->d_inode);
174 read_lock(&GlobalSMBSeslock);
175 list_for_each(tmp, &pCifsInode->openFileList) {
176 pCifsFile = list_entry(tmp, struct cifsFileInfo,
177 flist);
178 if ((pCifsFile->pfile == NULL) &&
179 (pCifsFile->pid == current->tgid)) {
180 /* mode set in cifs_create */
182 /* needed for writepage */
183 pCifsFile->pfile = file;
185 file->private_data = pCifsFile;
186 break;
189 read_unlock(&GlobalSMBSeslock);
190 if (file->private_data != NULL) {
191 rc = 0;
192 FreeXid(xid);
193 return rc;
194 } else {
195 if (file->f_flags & O_EXCL)
196 cERROR(1, ("could not find file instance for "
197 "new file %p ", file));
201 down(&inode->i_sb->s_vfs_rename_sem);
202 full_path = build_path_from_dentry(file->f_dentry);
203 up(&inode->i_sb->s_vfs_rename_sem);
204 if (full_path == NULL) {
205 FreeXid(xid);
206 return -ENOMEM;
209 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
210 inode, file->f_flags, full_path));
211 desiredAccess = cifs_convert_flags(file->f_flags);
213 /*********************************************************************
214 * open flag mapping table:
216 * POSIX Flag CIFS Disposition
217 * ---------- ----------------
218 * O_CREAT FILE_OPEN_IF
219 * O_CREAT | O_EXCL FILE_CREATE
220 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
221 * O_TRUNC FILE_OVERWRITE
222 * none of the above FILE_OPEN
224 * Note that there is not a direct match between disposition
225 * FILE_SUPERSEDE (ie create whether or not file exists although
226 * O_CREAT | O_TRUNC is similar but truncates the existing
227 * file rather than creating a new file as FILE_SUPERSEDE does
228 * (which uses the attributes / metadata passed in on open call)
230 *? O_SYNC is a reasonable match to CIFS writethrough flag
231 *? and the read write flags match reasonably. O_LARGEFILE
232 *? is irrelevant because largefile support is always used
233 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
234 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
235 *********************************************************************/
237 disposition = cifs_get_disposition(file->f_flags);
239 if (oplockEnabled)
240 oplock = REQ_OPLOCK;
241 else
242 oplock = FALSE;
244 /* BB pass O_SYNC flag through on file attributes .. BB */
246 /* Also refresh inode by passing in file_info buf returned by SMBOpen
247 and calling get_inode_info with returned buf (at least helps
248 non-Unix server case) */
250 /* BB we can not do this if this is the second open of a file
251 and the first handle has writebehind data, we might be
252 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
253 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
254 if (!buf) {
255 rc = -ENOMEM;
256 goto out;
258 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
259 CREATE_NOT_DIR, &netfid, &oplock, buf,
260 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
261 & CIFS_MOUNT_MAP_SPECIAL_CHR);
262 if (rc == -EIO) {
263 /* Old server, try legacy style OpenX */
264 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
269 if (rc) {
270 cFYI(1, ("cifs_open returned 0x%x ", rc));
271 goto out;
273 file->private_data =
274 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
275 if (file->private_data == NULL) {
276 rc = -ENOMEM;
277 goto out;
279 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
280 write_lock(&file->f_owner.lock);
281 write_lock(&GlobalSMBSeslock);
282 list_add(&pCifsFile->tlist, &pTcon->openFileList);
284 pCifsInode = CIFS_I(file->f_dentry->d_inode);
285 if (pCifsInode) {
286 rc = cifs_open_inode_helper(inode, file, pCifsInode,
287 pCifsFile, pTcon,
288 &oplock, buf, full_path, xid);
289 } else {
290 write_unlock(&GlobalSMBSeslock);
291 write_unlock(&file->f_owner.lock);
294 if (oplock & CIFS_CREATE_ACTION) {
295 /* time to set mode which we can not set earlier due to
296 problems creating new read-only files */
297 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
298 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
299 inode->i_mode,
300 (__u64)-1, (__u64)-1, 0 /* dev */,
301 cifs_sb->local_nls,
302 cifs_sb->mnt_cifs_flags &
303 CIFS_MOUNT_MAP_SPECIAL_CHR);
304 } else {
305 /* BB implement via Windows security descriptors eg
306 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
307 -1, -1, local_nls);
308 in the meantime could set r/o dos attribute when
309 perms are eg: mode & 0222 == 0 */
313 out:
314 kfree(buf);
315 kfree(full_path);
316 FreeXid(xid);
317 return rc;
320 /* Try to reaquire byte range locks that were released when session */
321 /* to server was lost */
322 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
324 int rc = 0;
326 /* BB list all locks open on this file and relock */
328 return rc;
331 static int cifs_reopen_file(struct inode *inode, struct file *file,
332 int can_flush)
334 int rc = -EACCES;
335 int xid, oplock;
336 struct cifs_sb_info *cifs_sb;
337 struct cifsTconInfo *pTcon;
338 struct cifsFileInfo *pCifsFile;
339 struct cifsInodeInfo *pCifsInode;
340 char *full_path = NULL;
341 int desiredAccess;
342 int disposition = FILE_OPEN;
343 __u16 netfid;
345 if (inode == NULL)
346 return -EBADF;
347 if (file->private_data) {
348 pCifsFile = (struct cifsFileInfo *)file->private_data;
349 } else
350 return -EBADF;
352 xid = GetXid();
353 down(&pCifsFile->fh_sem);
354 if (pCifsFile->invalidHandle == FALSE) {
355 up(&pCifsFile->fh_sem);
356 FreeXid(xid);
357 return 0;
360 if (file->f_dentry == NULL) {
361 up(&pCifsFile->fh_sem);
362 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
363 FreeXid(xid);
364 return -EBADF;
366 cifs_sb = CIFS_SB(inode->i_sb);
367 pTcon = cifs_sb->tcon;
368 /* can not grab rename sem here because various ops, including
369 those that already have the rename sem can end up causing writepage
370 to get called and if the server was down that means we end up here,
371 and we can never tell if the caller already has the rename_sem */
372 full_path = build_path_from_dentry(file->f_dentry);
373 if (full_path == NULL) {
374 up(&pCifsFile->fh_sem);
375 FreeXid(xid);
376 return -ENOMEM;
379 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
380 inode, file->f_flags,full_path));
381 desiredAccess = cifs_convert_flags(file->f_flags);
383 if (oplockEnabled)
384 oplock = REQ_OPLOCK;
385 else
386 oplock = FALSE;
388 /* Can not refresh inode by passing in file_info buf to be returned
389 by SMBOpen and then calling get_inode_info with returned buf
390 since file might have write behind data that needs to be flushed
391 and server version of file size can be stale. If we knew for sure
392 that inode was not dirty locally we could do this */
394 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
395 if (buf == 0) {
396 up(&pCifsFile->fh_sem);
397 kfree(full_path);
398 FreeXid(xid);
399 return -ENOMEM;
400 } */
401 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
402 CREATE_NOT_DIR, &netfid, &oplock, NULL,
403 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
404 CIFS_MOUNT_MAP_SPECIAL_CHR);
405 if (rc) {
406 up(&pCifsFile->fh_sem);
407 cFYI(1, ("cifs_open returned 0x%x ", rc));
408 cFYI(1, ("oplock: %d ", oplock));
409 } else {
410 pCifsFile->netfid = netfid;
411 pCifsFile->invalidHandle = FALSE;
412 up(&pCifsFile->fh_sem);
413 pCifsInode = CIFS_I(inode);
414 if (pCifsInode) {
415 if (can_flush) {
416 filemap_fdatawrite(inode->i_mapping);
417 filemap_fdatawait(inode->i_mapping);
418 /* temporarily disable caching while we
419 go to server to get inode info */
420 pCifsInode->clientCanCacheAll = FALSE;
421 pCifsInode->clientCanCacheRead = FALSE;
422 if (pTcon->ses->capabilities & CAP_UNIX)
423 rc = cifs_get_inode_info_unix(&inode,
424 full_path, inode->i_sb, xid);
425 else
426 rc = cifs_get_inode_info(&inode,
427 full_path, NULL, inode->i_sb,
428 xid);
429 } /* else we are writing out data to server already
430 and could deadlock if we tried to flush data, and
431 since we do not know if we have data that would
432 invalidate the current end of file on the server
433 we can not go to the server to get the new inod
434 info */
435 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
436 pCifsInode->clientCanCacheAll = TRUE;
437 pCifsInode->clientCanCacheRead = TRUE;
438 cFYI(1, ("Exclusive Oplock granted on inode %p",
439 file->f_dentry->d_inode));
440 } else if ((oplock & 0xF) == OPLOCK_READ) {
441 pCifsInode->clientCanCacheRead = TRUE;
442 pCifsInode->clientCanCacheAll = FALSE;
443 } else {
444 pCifsInode->clientCanCacheRead = FALSE;
445 pCifsInode->clientCanCacheAll = FALSE;
447 cifs_relock_file(pCifsFile);
451 kfree(full_path);
452 FreeXid(xid);
453 return rc;
456 int cifs_close(struct inode *inode, struct file *file)
458 int rc = 0;
459 int xid;
460 struct cifs_sb_info *cifs_sb;
461 struct cifsTconInfo *pTcon;
462 struct cifsFileInfo *pSMBFile =
463 (struct cifsFileInfo *)file->private_data;
465 xid = GetXid();
467 cifs_sb = CIFS_SB(inode->i_sb);
468 pTcon = cifs_sb->tcon;
469 if (pSMBFile) {
470 pSMBFile->closePend = TRUE;
471 write_lock(&file->f_owner.lock);
472 if (pTcon) {
473 /* no sense reconnecting to close a file that is
474 already closed */
475 if (pTcon->tidStatus != CifsNeedReconnect) {
476 write_unlock(&file->f_owner.lock);
477 rc = CIFSSMBClose(xid, pTcon,
478 pSMBFile->netfid);
479 write_lock(&file->f_owner.lock);
482 write_lock(&GlobalSMBSeslock);
483 list_del(&pSMBFile->flist);
484 list_del(&pSMBFile->tlist);
485 write_unlock(&GlobalSMBSeslock);
486 write_unlock(&file->f_owner.lock);
487 kfree(pSMBFile->search_resume_name);
488 kfree(file->private_data);
489 file->private_data = NULL;
490 } else
491 rc = -EBADF;
493 if (list_empty(&(CIFS_I(inode)->openFileList))) {
494 cFYI(1, ("closing last open instance for inode %p", inode));
495 /* if the file is not open we do not know if we can cache info
496 on this inode, much less write behind and read ahead */
497 CIFS_I(inode)->clientCanCacheRead = FALSE;
498 CIFS_I(inode)->clientCanCacheAll = FALSE;
500 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
501 rc = CIFS_I(inode)->write_behind_rc;
502 FreeXid(xid);
503 return rc;
506 int cifs_closedir(struct inode *inode, struct file *file)
508 int rc = 0;
509 int xid;
510 struct cifsFileInfo *pCFileStruct =
511 (struct cifsFileInfo *)file->private_data;
512 char *ptmp;
514 cFYI(1, ("Closedir inode = 0x%p with ", inode));
516 xid = GetXid();
518 if (pCFileStruct) {
519 struct cifsTconInfo *pTcon;
520 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
522 pTcon = cifs_sb->tcon;
524 cFYI(1, ("Freeing private data in close dir"));
525 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
526 (pCFileStruct->invalidHandle == FALSE)) {
527 pCFileStruct->invalidHandle = TRUE;
528 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
529 cFYI(1, ("Closing uncompleted readdir with rc %d",
530 rc));
531 /* not much we can do if it fails anyway, ignore rc */
532 rc = 0;
534 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
535 if (ptmp) {
536 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir"));
537 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
538 cifs_buf_release(ptmp);
540 ptmp = pCFileStruct->search_resume_name;
541 if (ptmp) {
542 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir"));
543 pCFileStruct->search_resume_name = NULL;
544 kfree(ptmp);
546 kfree(file->private_data);
547 file->private_data = NULL;
549 /* BB can we lock the filestruct while this is going on? */
550 FreeXid(xid);
551 return rc;
554 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
556 int rc, xid;
557 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
558 __u32 numLock = 0;
559 __u32 numUnlock = 0;
560 __u64 length;
561 int wait_flag = FALSE;
562 struct cifs_sb_info *cifs_sb;
563 struct cifsTconInfo *pTcon;
565 length = 1 + pfLock->fl_end - pfLock->fl_start;
566 rc = -EACCES;
567 xid = GetXid();
569 cFYI(1, ("Lock parm: 0x%x flockflags: "
570 "0x%x flocktype: 0x%x start: %lld end: %lld",
571 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
572 pfLock->fl_end));
574 if (pfLock->fl_flags & FL_POSIX)
575 cFYI(1, ("Posix "));
576 if (pfLock->fl_flags & FL_FLOCK)
577 cFYI(1, ("Flock "));
578 if (pfLock->fl_flags & FL_SLEEP) {
579 cFYI(1, ("Blocking lock "));
580 wait_flag = TRUE;
582 if (pfLock->fl_flags & FL_ACCESS)
583 cFYI(1, ("Process suspended by mandatory locking - "
584 "not implemented yet "));
585 if (pfLock->fl_flags & FL_LEASE)
586 cFYI(1, ("Lease on file - not implemented yet"));
587 if (pfLock->fl_flags &
588 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
589 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
591 if (pfLock->fl_type == F_WRLCK) {
592 cFYI(1, ("F_WRLCK "));
593 numLock = 1;
594 } else if (pfLock->fl_type == F_UNLCK) {
595 cFYI(1, ("F_UNLCK "));
596 numUnlock = 1;
597 } else if (pfLock->fl_type == F_RDLCK) {
598 cFYI(1, ("F_RDLCK "));
599 lockType |= LOCKING_ANDX_SHARED_LOCK;
600 numLock = 1;
601 } else if (pfLock->fl_type == F_EXLCK) {
602 cFYI(1, ("F_EXLCK "));
603 numLock = 1;
604 } else if (pfLock->fl_type == F_SHLCK) {
605 cFYI(1, ("F_SHLCK "));
606 lockType |= LOCKING_ANDX_SHARED_LOCK;
607 numLock = 1;
608 } else
609 cFYI(1, ("Unknown type of lock "));
611 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
612 pTcon = cifs_sb->tcon;
614 if (file->private_data == NULL) {
615 FreeXid(xid);
616 return -EBADF;
619 if (IS_GETLK(cmd)) {
620 rc = CIFSSMBLock(xid, pTcon,
621 ((struct cifsFileInfo *)file->
622 private_data)->netfid,
623 length,
624 pfLock->fl_start, 0, 1, lockType,
625 0 /* wait flag */ );
626 if (rc == 0) {
627 rc = CIFSSMBLock(xid, pTcon,
628 ((struct cifsFileInfo *) file->
629 private_data)->netfid,
630 length,
631 pfLock->fl_start, 1 /* numUnlock */ ,
632 0 /* numLock */ , lockType,
633 0 /* wait flag */ );
634 pfLock->fl_type = F_UNLCK;
635 if (rc != 0)
636 cERROR(1, ("Error unlocking previously locked "
637 "range %d during test of lock ",
638 rc));
639 rc = 0;
641 } else {
642 /* if rc == ERR_SHARING_VIOLATION ? */
643 rc = 0; /* do not change lock type to unlock
644 since range in use */
647 FreeXid(xid);
648 return rc;
651 rc = CIFSSMBLock(xid, pTcon,
652 ((struct cifsFileInfo *) file->private_data)->
653 netfid, length,
654 pfLock->fl_start, numUnlock, numLock, lockType,
655 wait_flag);
656 if (pfLock->fl_flags & FL_POSIX)
657 posix_lock_file_wait(file, pfLock);
658 FreeXid(xid);
659 return rc;
662 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
663 size_t write_size, loff_t *poffset)
665 int rc = 0;
666 unsigned int bytes_written = 0;
667 unsigned int total_written;
668 struct cifs_sb_info *cifs_sb;
669 struct cifsTconInfo *pTcon;
670 int xid, long_op;
671 struct cifsFileInfo *open_file;
673 if (file->f_dentry == NULL)
674 return -EBADF;
676 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
677 if (cifs_sb == NULL)
678 return -EBADF;
680 pTcon = cifs_sb->tcon;
682 /* cFYI(1,
683 (" write %d bytes to offset %lld of %s", write_size,
684 *poffset, file->f_dentry->d_name.name)); */
686 if (file->private_data == NULL)
687 return -EBADF;
688 else
689 open_file = (struct cifsFileInfo *) file->private_data;
691 xid = GetXid();
692 if (file->f_dentry->d_inode == NULL) {
693 FreeXid(xid);
694 return -EBADF;
697 if (*poffset > file->f_dentry->d_inode->i_size)
698 long_op = 2; /* writes past end of file can take a long time */
699 else
700 long_op = 1;
702 for (total_written = 0; write_size > total_written;
703 total_written += bytes_written) {
704 rc = -EAGAIN;
705 while (rc == -EAGAIN) {
706 if (file->private_data == NULL) {
707 /* file has been closed on us */
708 FreeXid(xid);
709 /* if we have gotten here we have written some data
710 and blocked, and the file has been freed on us while
711 we blocked so return what we managed to write */
712 return total_written;
714 if (open_file->closePend) {
715 FreeXid(xid);
716 if (total_written)
717 return total_written;
718 else
719 return -EBADF;
721 if (open_file->invalidHandle) {
722 if ((file->f_dentry == NULL) ||
723 (file->f_dentry->d_inode == NULL)) {
724 FreeXid(xid);
725 return total_written;
727 /* we could deadlock if we called
728 filemap_fdatawait from here so tell
729 reopen_file not to flush data to server
730 now */
731 rc = cifs_reopen_file(file->f_dentry->d_inode,
732 file, FALSE);
733 if (rc != 0)
734 break;
737 rc = CIFSSMBWrite(xid, pTcon,
738 open_file->netfid,
739 min_t(const int, cifs_sb->wsize,
740 write_size - total_written),
741 *poffset, &bytes_written,
742 NULL, write_data + total_written, long_op);
744 if (rc || (bytes_written == 0)) {
745 if (total_written)
746 break;
747 else {
748 FreeXid(xid);
749 return rc;
751 } else
752 *poffset += bytes_written;
753 long_op = FALSE; /* subsequent writes fast -
754 15 seconds is plenty */
757 cifs_stats_bytes_written(pTcon, total_written);
759 /* since the write may have blocked check these pointers again */
760 if (file->f_dentry) {
761 if (file->f_dentry->d_inode) {
762 struct inode *inode = file->f_dentry->d_inode;
763 inode->i_ctime = inode->i_mtime =
764 current_fs_time(inode->i_sb);
765 if (total_written > 0) {
766 if (*poffset > file->f_dentry->d_inode->i_size)
767 i_size_write(file->f_dentry->d_inode,
768 *poffset);
770 mark_inode_dirty_sync(file->f_dentry->d_inode);
773 FreeXid(xid);
774 return total_written;
777 static ssize_t cifs_write(struct file *file, const char *write_data,
778 size_t write_size, loff_t *poffset)
780 int rc = 0;
781 unsigned int bytes_written = 0;
782 unsigned int total_written;
783 struct cifs_sb_info *cifs_sb;
784 struct cifsTconInfo *pTcon;
785 int xid, long_op;
786 struct cifsFileInfo *open_file;
788 if (file->f_dentry == NULL)
789 return -EBADF;
791 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
792 if (cifs_sb == NULL)
793 return -EBADF;
795 pTcon = cifs_sb->tcon;
797 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
798 *poffset, file->f_dentry->d_name.name));
800 if (file->private_data == NULL)
801 return -EBADF;
802 else
803 open_file = (struct cifsFileInfo *)file->private_data;
805 xid = GetXid();
806 if (file->f_dentry->d_inode == NULL) {
807 FreeXid(xid);
808 return -EBADF;
811 if (*poffset > file->f_dentry->d_inode->i_size)
812 long_op = 2; /* writes past end of file can take a long time */
813 else
814 long_op = 1;
816 for (total_written = 0; write_size > total_written;
817 total_written += bytes_written) {
818 rc = -EAGAIN;
819 while (rc == -EAGAIN) {
820 if (file->private_data == NULL) {
821 /* file has been closed on us */
822 FreeXid(xid);
823 /* if we have gotten here we have written some data
824 and blocked, and the file has been freed on us
825 while we blocked so return what we managed to
826 write */
827 return total_written;
829 if (open_file->closePend) {
830 FreeXid(xid);
831 if (total_written)
832 return total_written;
833 else
834 return -EBADF;
836 if (open_file->invalidHandle) {
837 if ((file->f_dentry == NULL) ||
838 (file->f_dentry->d_inode == NULL)) {
839 FreeXid(xid);
840 return total_written;
842 /* we could deadlock if we called
843 filemap_fdatawait from here so tell
844 reopen_file not to flush data to
845 server now */
846 rc = cifs_reopen_file(file->f_dentry->d_inode,
847 file, FALSE);
848 if (rc != 0)
849 break;
851 #ifdef CONFIG_CIFS_EXPERIMENTAL
852 /* BB FIXME We can not sign across two buffers yet */
853 if((experimEnabled) && ((pTcon->ses->server->secMode &
854 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) {
855 struct kvec iov[2];
856 unsigned int len;
858 len = min((size_t)cifs_sb->wsize,
859 write_size - total_written);
860 /* iov[0] is reserved for smb header */
861 iov[1].iov_base = (char *)write_data +
862 total_written;
863 iov[1].iov_len = len;
864 rc = CIFSSMBWrite2(xid, pTcon,
865 open_file->netfid, len,
866 *poffset, &bytes_written,
867 iov, 1, long_op);
868 } else
869 /* BB FIXME fixup indentation of line below */
870 #endif
871 rc = CIFSSMBWrite(xid, pTcon,
872 open_file->netfid,
873 min_t(const int, cifs_sb->wsize,
874 write_size - total_written),
875 *poffset, &bytes_written,
876 write_data + total_written, NULL, long_op);
878 if (rc || (bytes_written == 0)) {
879 if (total_written)
880 break;
881 else {
882 FreeXid(xid);
883 return rc;
885 } else
886 *poffset += bytes_written;
887 long_op = FALSE; /* subsequent writes fast -
888 15 seconds is plenty */
891 cifs_stats_bytes_written(pTcon, total_written);
893 /* since the write may have blocked check these pointers again */
894 if (file->f_dentry) {
895 if (file->f_dentry->d_inode) {
896 file->f_dentry->d_inode->i_ctime =
897 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
898 if (total_written > 0) {
899 if (*poffset > file->f_dentry->d_inode->i_size)
900 i_size_write(file->f_dentry->d_inode,
901 *poffset);
903 mark_inode_dirty_sync(file->f_dentry->d_inode);
906 FreeXid(xid);
907 return total_written;
910 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
912 struct cifsFileInfo *open_file;
913 int rc;
915 read_lock(&GlobalSMBSeslock);
916 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
917 if (open_file->closePend)
918 continue;
919 if (open_file->pfile &&
920 ((open_file->pfile->f_flags & O_RDWR) ||
921 (open_file->pfile->f_flags & O_WRONLY))) {
922 read_unlock(&GlobalSMBSeslock);
923 if((open_file->invalidHandle) &&
924 (!open_file->closePend)) {
925 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
926 open_file->pfile, FALSE);
927 /* if it fails, try another handle - might be */
928 /* dangerous to hold up writepages with retry */
929 if(rc) {
930 cFYI(1,("failed on reopen file in wp"));
931 read_lock(&GlobalSMBSeslock);
932 continue;
935 return open_file;
938 read_unlock(&GlobalSMBSeslock);
939 return NULL;
942 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
944 struct address_space *mapping = page->mapping;
945 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
946 char *write_data;
947 int rc = -EFAULT;
948 int bytes_written = 0;
949 struct cifs_sb_info *cifs_sb;
950 struct cifsTconInfo *pTcon;
951 struct inode *inode;
952 struct cifsFileInfo *open_file;
954 if (!mapping || !mapping->host)
955 return -EFAULT;
957 inode = page->mapping->host;
958 cifs_sb = CIFS_SB(inode->i_sb);
959 pTcon = cifs_sb->tcon;
961 offset += (loff_t)from;
962 write_data = kmap(page);
963 write_data += from;
965 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
966 kunmap(page);
967 return -EIO;
970 /* racing with truncate? */
971 if (offset > mapping->host->i_size) {
972 kunmap(page);
973 return 0; /* don't care */
976 /* check to make sure that we are not extending the file */
977 if (mapping->host->i_size - offset < (loff_t)to)
978 to = (unsigned)(mapping->host->i_size - offset);
980 open_file = find_writable_file(CIFS_I(mapping->host));
981 if (open_file) {
982 bytes_written = cifs_write(open_file->pfile, write_data,
983 to-from, &offset);
984 /* Does mm or vfs already set times? */
985 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
986 if ((bytes_written > 0) && (offset)) {
987 rc = 0;
988 } else if (bytes_written < 0) {
989 if (rc != -EBADF)
990 rc = bytes_written;
992 } else {
993 cFYI(1, ("No writeable filehandles for inode"));
994 rc = -EIO;
997 kunmap(page);
998 return rc;
1001 #ifdef CONFIG_CIFS_EXPERIMENTAL
1002 static int cifs_writepages(struct address_space *mapping,
1003 struct writeback_control *wbc)
1005 struct backing_dev_info *bdi = mapping->backing_dev_info;
1006 unsigned int bytes_to_write;
1007 unsigned int bytes_written;
1008 struct cifs_sb_info *cifs_sb;
1009 int done = 0;
1010 pgoff_t end = -1;
1011 pgoff_t index;
1012 int is_range = 0;
1013 struct kvec iov[32];
1014 int n_iov = 0;
1015 pgoff_t next;
1016 int nr_pages;
1017 __u64 offset = 0;
1018 struct cifsFileInfo *open_file = NULL;
1019 struct page *page;
1020 struct pagevec pvec;
1021 int rc = 0;
1022 int scanned = 0;
1023 int xid;
1025 cifs_sb = CIFS_SB(mapping->host->i_sb);
1028 * If wsize is smaller that the page cache size, default to writing
1029 * one page at a time via cifs_writepage
1031 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1032 return generic_writepages(mapping, wbc);
1034 /* BB FIXME we do not have code to sign across multiple buffers yet,
1035 so go to older writepage style write which we can sign if needed */
1036 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1037 if(cifs_sb->tcon->ses->server->secMode &
1038 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1039 return generic_writepages(mapping, wbc);
1042 * BB: Is this meaningful for a non-block-device file system?
1043 * If it is, we should test it again after we do I/O
1045 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1046 wbc->encountered_congestion = 1;
1047 return 0;
1050 xid = GetXid();
1052 pagevec_init(&pvec, 0);
1053 if (wbc->sync_mode == WB_SYNC_NONE)
1054 index = mapping->writeback_index; /* Start from prev offset */
1055 else {
1056 index = 0;
1057 scanned = 1;
1059 if (wbc->start || wbc->end) {
1060 index = wbc->start >> PAGE_CACHE_SHIFT;
1061 end = wbc->end >> PAGE_CACHE_SHIFT;
1062 is_range = 1;
1063 scanned = 1;
1065 retry:
1066 while (!done && (index <= end) &&
1067 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1068 PAGECACHE_TAG_DIRTY,
1069 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1070 int first;
1071 unsigned int i;
1073 if (!open_file) {
1074 open_file = find_writable_file(CIFS_I(mapping->host));
1075 if (!open_file) {
1076 pagevec_release(&pvec);
1077 cERROR(1, ("No writable handles for inode"));
1078 return -EIO;
1082 first = -1;
1083 next = 0;
1084 n_iov = 0;
1085 bytes_to_write = 0;
1087 for (i = 0; i < nr_pages; i++) {
1088 page = pvec.pages[i];
1090 * At this point we hold neither mapping->tree_lock nor
1091 * lock on the page itself: the page may be truncated or
1092 * invalidated (changing page->mapping to NULL), or even
1093 * swizzled back from swapper_space to tmpfs file
1094 * mapping
1097 if (first < 0)
1098 lock_page(page);
1099 else if (TestSetPageLocked(page))
1100 break;
1102 if (unlikely(page->mapping != mapping)) {
1103 unlock_page(page);
1104 break;
1107 if (unlikely(is_range) && (page->index > end)) {
1108 done = 1;
1109 unlock_page(page);
1110 break;
1113 if (next && (page->index != next)) {
1114 /* Not next consecutive page */
1115 unlock_page(page);
1116 break;
1119 if (wbc->sync_mode != WB_SYNC_NONE)
1120 wait_on_page_writeback(page);
1122 if (PageWriteback(page) ||
1123 !test_clear_page_dirty(page)) {
1124 unlock_page(page);
1125 break;
1128 * BB can we get rid of this? pages are held by pvec
1130 page_cache_get(page);
1132 /* reserve iov[0] for the smb header */
1133 n_iov++;
1134 iov[n_iov].iov_base = kmap(page);
1135 iov[n_iov].iov_len = PAGE_CACHE_SIZE;
1136 bytes_to_write += PAGE_CACHE_SIZE;
1138 if (first < 0) {
1139 first = i;
1140 offset = page_offset(page);
1142 next = page->index + 1;
1143 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1144 break;
1146 if (n_iov) {
1147 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1148 open_file->netfid, bytes_to_write,
1149 offset, &bytes_written, iov, n_iov,
1151 if (rc || bytes_written < bytes_to_write) {
1152 cERROR(1,("CIFSSMBWrite2 returned %d, written = %x",
1153 rc, bytes_written));
1154 set_bit(AS_EIO, &mapping->flags);
1155 SetPageError(page);
1157 for (i = 0; i < n_iov; i++) {
1158 page = pvec.pages[first + i];
1159 kunmap(page);
1160 unlock_page(page);
1161 page_cache_release(page);
1163 if ((wbc->nr_to_write -= n_iov) <= 0)
1164 done = 1;
1165 index = next;
1167 pagevec_release(&pvec);
1169 if (!scanned && !done) {
1171 * We hit the last page and there is more work to be done: wrap
1172 * back to the start of the file
1174 scanned = 1;
1175 index = 0;
1176 goto retry;
1178 if (!is_range)
1179 mapping->writeback_index = index;
1181 FreeXid(xid);
1183 return rc;
1185 #endif
1187 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1189 int rc = -EFAULT;
1190 int xid;
1192 xid = GetXid();
1193 /* BB add check for wbc flags */
1194 page_cache_get(page);
1195 if (!PageUptodate(page)) {
1196 cFYI(1, ("ppw - page not up to date"));
1199 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1200 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1201 unlock_page(page);
1202 page_cache_release(page);
1203 FreeXid(xid);
1204 return rc;
1207 static int cifs_commit_write(struct file *file, struct page *page,
1208 unsigned offset, unsigned to)
1210 int xid;
1211 int rc = 0;
1212 struct inode *inode = page->mapping->host;
1213 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1214 char *page_data;
1216 xid = GetXid();
1217 cFYI(1, ("commit write for page %p up to position %lld for %d",
1218 page, position, to));
1219 if (position > inode->i_size) {
1220 i_size_write(inode, position);
1221 /* if (file->private_data == NULL) {
1222 rc = -EBADF;
1223 } else {
1224 open_file = (struct cifsFileInfo *)file->private_data;
1225 cifs_sb = CIFS_SB(inode->i_sb);
1226 rc = -EAGAIN;
1227 while (rc == -EAGAIN) {
1228 if ((open_file->invalidHandle) &&
1229 (!open_file->closePend)) {
1230 rc = cifs_reopen_file(
1231 file->f_dentry->d_inode, file);
1232 if (rc != 0)
1233 break;
1235 if (!open_file->closePend) {
1236 rc = CIFSSMBSetFileSize(xid,
1237 cifs_sb->tcon, position,
1238 open_file->netfid,
1239 open_file->pid, FALSE);
1240 } else {
1241 rc = -EBADF;
1242 break;
1245 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1246 } */
1248 if (!PageUptodate(page)) {
1249 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1250 /* can not rely on (or let) writepage write this data */
1251 if (to < offset) {
1252 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1253 offset, to));
1254 FreeXid(xid);
1255 return rc;
1257 /* this is probably better than directly calling
1258 partialpage_write since in this function the file handle is
1259 known which we might as well leverage */
1260 /* BB check if anything else missing out of ppw
1261 such as updating last write time */
1262 page_data = kmap(page);
1263 rc = cifs_write(file, page_data + offset, to-offset,
1264 &position);
1265 if (rc > 0)
1266 rc = 0;
1267 /* else if (rc < 0) should we set writebehind rc? */
1268 kunmap(page);
1269 } else {
1270 set_page_dirty(page);
1273 FreeXid(xid);
1274 return rc;
1277 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1279 int xid;
1280 int rc = 0;
1281 struct inode *inode = file->f_dentry->d_inode;
1283 xid = GetXid();
1285 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1286 dentry->d_name.name, datasync));
1288 rc = filemap_fdatawrite(inode->i_mapping);
1289 if (rc == 0)
1290 CIFS_I(inode)->write_behind_rc = 0;
1291 FreeXid(xid);
1292 return rc;
1295 /* static int cifs_sync_page(struct page *page)
1297 struct address_space *mapping;
1298 struct inode *inode;
1299 unsigned long index = page->index;
1300 unsigned int rpages = 0;
1301 int rc = 0;
1303 cFYI(1, ("sync page %p",page));
1304 mapping = page->mapping;
1305 if (!mapping)
1306 return 0;
1307 inode = mapping->host;
1308 if (!inode)
1309 return 0; */
1311 /* fill in rpages then
1312 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1314 /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1316 if (rc < 0)
1317 return rc;
1318 return 0;
1319 } */
1322 * As file closes, flush all cached write data for this inode checking
1323 * for write behind errors.
1325 int cifs_flush(struct file *file)
1327 struct inode * inode = file->f_dentry->d_inode;
1328 int rc = 0;
1330 /* Rather than do the steps manually:
1331 lock the inode for writing
1332 loop through pages looking for write behind data (dirty pages)
1333 coalesce into contiguous 16K (or smaller) chunks to write to server
1334 send to server (prefer in parallel)
1335 deal with writebehind errors
1336 unlock inode for writing
1337 filemapfdatawrite appears easier for the time being */
1339 rc = filemap_fdatawrite(inode->i_mapping);
1340 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1341 CIFS_I(inode)->write_behind_rc = 0;
1343 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1345 return rc;
1348 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1349 size_t read_size, loff_t *poffset)
1351 int rc = -EACCES;
1352 unsigned int bytes_read = 0;
1353 unsigned int total_read = 0;
1354 unsigned int current_read_size;
1355 struct cifs_sb_info *cifs_sb;
1356 struct cifsTconInfo *pTcon;
1357 int xid;
1358 struct cifsFileInfo *open_file;
1359 char *smb_read_data;
1360 char __user *current_offset;
1361 struct smb_com_read_rsp *pSMBr;
1363 xid = GetXid();
1364 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1365 pTcon = cifs_sb->tcon;
1367 if (file->private_data == NULL) {
1368 FreeXid(xid);
1369 return -EBADF;
1371 open_file = (struct cifsFileInfo *)file->private_data;
1373 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1374 cFYI(1, ("attempting read on write only file instance"));
1376 for (total_read = 0, current_offset = read_data;
1377 read_size > total_read;
1378 total_read += bytes_read, current_offset += bytes_read) {
1379 current_read_size = min_t(const int, read_size - total_read,
1380 cifs_sb->rsize);
1381 rc = -EAGAIN;
1382 smb_read_data = NULL;
1383 while (rc == -EAGAIN) {
1384 if ((open_file->invalidHandle) &&
1385 (!open_file->closePend)) {
1386 rc = cifs_reopen_file(file->f_dentry->d_inode,
1387 file, TRUE);
1388 if (rc != 0)
1389 break;
1391 rc = CIFSSMBRead(xid, pTcon,
1392 open_file->netfid,
1393 current_read_size, *poffset,
1394 &bytes_read, &smb_read_data);
1395 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1396 if (copy_to_user(current_offset,
1397 smb_read_data + 4 /* RFC1001 hdr */
1398 + le16_to_cpu(pSMBr->DataOffset),
1399 bytes_read)) {
1400 rc = -EFAULT;
1401 FreeXid(xid);
1402 return rc;
1404 if (smb_read_data) {
1405 cifs_buf_release(smb_read_data);
1406 smb_read_data = NULL;
1409 if (rc || (bytes_read == 0)) {
1410 if (total_read) {
1411 break;
1412 } else {
1413 FreeXid(xid);
1414 return rc;
1416 } else {
1417 cifs_stats_bytes_read(pTcon, bytes_read);
1418 *poffset += bytes_read;
1421 FreeXid(xid);
1422 return total_read;
1426 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1427 loff_t *poffset)
1429 int rc = -EACCES;
1430 unsigned int bytes_read = 0;
1431 unsigned int total_read;
1432 unsigned int current_read_size;
1433 struct cifs_sb_info *cifs_sb;
1434 struct cifsTconInfo *pTcon;
1435 int xid;
1436 char *current_offset;
1437 struct cifsFileInfo *open_file;
1439 xid = GetXid();
1440 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1441 pTcon = cifs_sb->tcon;
1443 if (file->private_data == NULL) {
1444 FreeXid(xid);
1445 return -EBADF;
1447 open_file = (struct cifsFileInfo *)file->private_data;
1449 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1450 cFYI(1, ("attempting read on write only file instance"));
1452 for (total_read = 0, current_offset = read_data;
1453 read_size > total_read;
1454 total_read += bytes_read, current_offset += bytes_read) {
1455 current_read_size = min_t(const int, read_size - total_read,
1456 cifs_sb->rsize);
1457 /* For windows me and 9x we do not want to request more
1458 than it negotiated since it will refuse the read then */
1459 if((pTcon->ses) &&
1460 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1461 current_read_size = min_t(const int, current_read_size,
1462 pTcon->ses->server->maxBuf - 128);
1464 rc = -EAGAIN;
1465 while (rc == -EAGAIN) {
1466 if ((open_file->invalidHandle) &&
1467 (!open_file->closePend)) {
1468 rc = cifs_reopen_file(file->f_dentry->d_inode,
1469 file, TRUE);
1470 if (rc != 0)
1471 break;
1473 rc = CIFSSMBRead(xid, pTcon,
1474 open_file->netfid,
1475 current_read_size, *poffset,
1476 &bytes_read, &current_offset);
1478 if (rc || (bytes_read == 0)) {
1479 if (total_read) {
1480 break;
1481 } else {
1482 FreeXid(xid);
1483 return rc;
1485 } else {
1486 cifs_stats_bytes_read(pTcon, total_read);
1487 *poffset += bytes_read;
1490 FreeXid(xid);
1491 return total_read;
1494 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1496 struct dentry *dentry = file->f_dentry;
1497 int rc, xid;
1499 xid = GetXid();
1500 rc = cifs_revalidate(dentry);
1501 if (rc) {
1502 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1503 FreeXid(xid);
1504 return rc;
1506 rc = generic_file_mmap(file, vma);
1507 FreeXid(xid);
1508 return rc;
1512 static void cifs_copy_cache_pages(struct address_space *mapping,
1513 struct list_head *pages, int bytes_read, char *data,
1514 struct pagevec *plru_pvec)
1516 struct page *page;
1517 char *target;
1519 while (bytes_read > 0) {
1520 if (list_empty(pages))
1521 break;
1523 page = list_entry(pages->prev, struct page, lru);
1524 list_del(&page->lru);
1526 if (add_to_page_cache(page, mapping, page->index,
1527 GFP_KERNEL)) {
1528 page_cache_release(page);
1529 cFYI(1, ("Add page cache failed"));
1530 data += PAGE_CACHE_SIZE;
1531 bytes_read -= PAGE_CACHE_SIZE;
1532 continue;
1535 target = kmap_atomic(page,KM_USER0);
1537 if (PAGE_CACHE_SIZE > bytes_read) {
1538 memcpy(target, data, bytes_read);
1539 /* zero the tail end of this partial page */
1540 memset(target + bytes_read, 0,
1541 PAGE_CACHE_SIZE - bytes_read);
1542 bytes_read = 0;
1543 } else {
1544 memcpy(target, data, PAGE_CACHE_SIZE);
1545 bytes_read -= PAGE_CACHE_SIZE;
1547 kunmap_atomic(target, KM_USER0);
1549 flush_dcache_page(page);
1550 SetPageUptodate(page);
1551 unlock_page(page);
1552 if (!pagevec_add(plru_pvec, page))
1553 __pagevec_lru_add(plru_pvec);
1554 data += PAGE_CACHE_SIZE;
1556 return;
1559 static int cifs_readpages(struct file *file, struct address_space *mapping,
1560 struct list_head *page_list, unsigned num_pages)
1562 int rc = -EACCES;
1563 int xid;
1564 loff_t offset;
1565 struct page *page;
1566 struct cifs_sb_info *cifs_sb;
1567 struct cifsTconInfo *pTcon;
1568 int bytes_read = 0;
1569 unsigned int read_size,i;
1570 char *smb_read_data = NULL;
1571 struct smb_com_read_rsp *pSMBr;
1572 struct pagevec lru_pvec;
1573 struct cifsFileInfo *open_file;
1575 xid = GetXid();
1576 if (file->private_data == NULL) {
1577 FreeXid(xid);
1578 return -EBADF;
1580 open_file = (struct cifsFileInfo *)file->private_data;
1581 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1582 pTcon = cifs_sb->tcon;
1584 pagevec_init(&lru_pvec, 0);
1586 for (i = 0; i < num_pages; ) {
1587 unsigned contig_pages;
1588 struct page *tmp_page;
1589 unsigned long expected_index;
1591 if (list_empty(page_list))
1592 break;
1594 page = list_entry(page_list->prev, struct page, lru);
1595 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1597 /* count adjacent pages that we will read into */
1598 contig_pages = 0;
1599 expected_index =
1600 list_entry(page_list->prev, struct page, lru)->index;
1601 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1602 if (tmp_page->index == expected_index) {
1603 contig_pages++;
1604 expected_index++;
1605 } else
1606 break;
1608 if (contig_pages + i > num_pages)
1609 contig_pages = num_pages - i;
1611 /* for reads over a certain size could initiate async
1612 read ahead */
1614 read_size = contig_pages * PAGE_CACHE_SIZE;
1615 /* Read size needs to be in multiples of one page */
1616 read_size = min_t(const unsigned int, read_size,
1617 cifs_sb->rsize & PAGE_CACHE_MASK);
1619 rc = -EAGAIN;
1620 while (rc == -EAGAIN) {
1621 if ((open_file->invalidHandle) &&
1622 (!open_file->closePend)) {
1623 rc = cifs_reopen_file(file->f_dentry->d_inode,
1624 file, TRUE);
1625 if (rc != 0)
1626 break;
1629 rc = CIFSSMBRead(xid, pTcon,
1630 open_file->netfid,
1631 read_size, offset,
1632 &bytes_read, &smb_read_data);
1634 /* BB more RC checks ? */
1635 if (rc== -EAGAIN) {
1636 if (smb_read_data) {
1637 cifs_buf_release(smb_read_data);
1638 smb_read_data = NULL;
1642 if ((rc < 0) || (smb_read_data == NULL)) {
1643 cFYI(1, ("Read error in readpages: %d", rc));
1644 /* clean up remaing pages off list */
1645 while (!list_empty(page_list) && (i < num_pages)) {
1646 page = list_entry(page_list->prev, struct page,
1647 lru);
1648 list_del(&page->lru);
1649 page_cache_release(page);
1651 break;
1652 } else if (bytes_read > 0) {
1653 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1654 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1655 smb_read_data + 4 /* RFC1001 hdr */ +
1656 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1658 i += bytes_read >> PAGE_CACHE_SHIFT;
1659 cifs_stats_bytes_read(pTcon, bytes_read);
1660 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1661 i++; /* account for partial page */
1663 /* server copy of file can have smaller size
1664 than client */
1665 /* BB do we need to verify this common case ?
1666 this case is ok - if we are at server EOF
1667 we will hit it on next read */
1669 /* while (!list_empty(page_list) && (i < num_pages)) {
1670 page = list_entry(page_list->prev,
1671 struct page, list);
1672 list_del(&page->list);
1673 page_cache_release(page);
1675 break; */
1677 } else {
1678 cFYI(1, ("No bytes read (%d) at offset %lld . "
1679 "Cleaning remaining pages from readahead list",
1680 bytes_read, offset));
1681 /* BB turn off caching and do new lookup on
1682 file size at server? */
1683 while (!list_empty(page_list) && (i < num_pages)) {
1684 page = list_entry(page_list->prev, struct page,
1685 lru);
1686 list_del(&page->lru);
1688 /* BB removeme - replace with zero of page? */
1689 page_cache_release(page);
1691 break;
1693 if (smb_read_data) {
1694 cifs_buf_release(smb_read_data);
1695 smb_read_data = NULL;
1697 bytes_read = 0;
1700 pagevec_lru_add(&lru_pvec);
1702 /* need to free smb_read_data buf before exit */
1703 if (smb_read_data) {
1704 cifs_buf_release(smb_read_data);
1705 smb_read_data = NULL;
1708 FreeXid(xid);
1709 return rc;
1712 static int cifs_readpage_worker(struct file *file, struct page *page,
1713 loff_t *poffset)
1715 char *read_data;
1716 int rc;
1718 page_cache_get(page);
1719 read_data = kmap(page);
1720 /* for reads over a certain size could initiate async read ahead */
1722 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1724 if (rc < 0)
1725 goto io_error;
1726 else
1727 cFYI(1, ("Bytes read %d ",rc));
1729 file->f_dentry->d_inode->i_atime =
1730 current_fs_time(file->f_dentry->d_inode->i_sb);
1732 if (PAGE_CACHE_SIZE > rc)
1733 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1735 flush_dcache_page(page);
1736 SetPageUptodate(page);
1737 rc = 0;
1739 io_error:
1740 kunmap(page);
1741 page_cache_release(page);
1742 return rc;
1745 static int cifs_readpage(struct file *file, struct page *page)
1747 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1748 int rc = -EACCES;
1749 int xid;
1751 xid = GetXid();
1753 if (file->private_data == NULL) {
1754 FreeXid(xid);
1755 return -EBADF;
1758 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1759 page, (int)offset, (int)offset));
1761 rc = cifs_readpage_worker(file, page, &offset);
1763 unlock_page(page);
1765 FreeXid(xid);
1766 return rc;
1769 /* We do not want to update the file size from server for inodes
1770 open for write - to avoid races with writepage extending
1771 the file - in the future we could consider allowing
1772 refreshing the inode only on increases in the file size
1773 but this is tricky to do without racing with writebehind
1774 page caching in the current Linux kernel design */
1775 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1777 if (cifsInode && find_writable_file(cifsInode))
1778 return 0;
1779 else
1780 return 1;
1783 static int cifs_prepare_write(struct file *file, struct page *page,
1784 unsigned from, unsigned to)
1786 int rc = 0;
1787 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1788 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1789 if (!PageUptodate(page)) {
1790 /* if (to - from != PAGE_CACHE_SIZE) {
1791 void *kaddr = kmap_atomic(page, KM_USER0);
1792 memset(kaddr, 0, from);
1793 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1794 flush_dcache_page(page);
1795 kunmap_atomic(kaddr, KM_USER0);
1796 } */
1797 /* If we are writing a full page it will be up to date,
1798 no need to read from the server */
1799 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1800 SetPageUptodate(page);
1802 /* might as well read a page, it is fast enough */
1803 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1804 rc = cifs_readpage_worker(file, page, &offset);
1805 } else {
1806 /* should we try using another file handle if there is one -
1807 how would we lock it to prevent close of that handle
1808 racing with this read?
1809 In any case this will be written out by commit_write */
1813 /* BB should we pass any errors back?
1814 e.g. if we do not have read access to the file */
1815 return 0;
1818 struct address_space_operations cifs_addr_ops = {
1819 .readpage = cifs_readpage,
1820 .readpages = cifs_readpages,
1821 .writepage = cifs_writepage,
1822 #ifdef CONFIG_CIFS_EXPERIMENTAL
1823 .writepages = cifs_writepages,
1824 #endif
1825 .prepare_write = cifs_prepare_write,
1826 .commit_write = cifs_commit_write,
1827 .set_page_dirty = __set_page_dirty_nobuffers,
1828 /* .sync_page = cifs_sync_page, */
1829 /* .direct_IO = */