x86/topology: Update the 'cpu cores' field in /proc/cpuinfo correctly across CPU...
[cris-mirror.git] / fs / cifs / file.c
blob7cee97b93a614736e77414162bdf46c432475a40
1 /*
2 * fs/cifs/file.c
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
45 #include "smbdirect.h"
47 static inline int cifs_convert_flags(unsigned int flags)
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
60 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
65 static u32 cifs_posix_convert_flags(unsigned int flags)
67 u32 posix_flags = 0;
69 if ((flags & O_ACCMODE) == O_RDONLY)
70 posix_flags = SMB_O_RDONLY;
71 else if ((flags & O_ACCMODE) == O_WRONLY)
72 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
76 if (flags & O_CREAT) {
77 posix_flags |= SMB_O_CREAT;
78 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
81 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
84 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
87 if (flags & O_DSYNC)
88 posix_flags |= SMB_O_SYNC;
89 if (flags & O_DIRECTORY)
90 posix_flags |= SMB_O_DIRECTORY;
91 if (flags & O_NOFOLLOW)
92 posix_flags |= SMB_O_NOFOLLOW;
93 if (flags & O_DIRECT)
94 posix_flags |= SMB_O_DIRECT;
96 return posix_flags;
99 static inline int cifs_get_disposition(unsigned int flags)
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
109 else
110 return FILE_OPEN;
113 int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
123 struct cifs_tcon *tcon;
125 cifs_dbg(FYI, "posix open %s\n", full_path);
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_remap(cifs_sb));
144 cifs_put_tlink(tlink);
146 if (rc)
147 goto posix_open_ret;
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
169 posix_open_ret:
170 kfree(presp_data);
171 return rc;
174 static int
175 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
179 int rc;
180 int desired_access;
181 int disposition;
182 int create_options = CREATE_NOT_DIR;
183 FILE_ALL_INFO *buf;
184 struct TCP_Server_Info *server = tcon->ses->server;
185 struct cifs_open_parms oparms;
187 if (!server->ops->open)
188 return -ENOSYS;
190 desired_access = cifs_convert_flags(f_flags);
192 /*********************************************************************
193 * open flag mapping table:
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
216 disposition = cifs_get_disposition(f_flags);
218 /* BB pass O_SYNC flag through on file attributes .. BB */
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
241 oparms.reconnect = false;
243 rc = server->ops->open(xid, &oparms, oplock, buf);
245 if (rc)
246 goto out;
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
253 xid, fid);
255 out:
256 kfree(buf);
257 return rc;
260 static bool
261 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
273 up_read(&cinode->lock_sem);
274 return has_locks;
277 struct cifsFileInfo *
278 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
279 struct tcon_link *tlink, __u32 oplock)
281 struct dentry *dentry = file_dentry(file);
282 struct inode *inode = d_inode(dentry);
283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
285 struct cifs_fid_locks *fdlocks;
286 struct cifs_tcon *tcon = tlink_tcon(tlink);
287 struct TCP_Server_Info *server = tcon->ses->server;
289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
302 down_write(&cinode->lock_sem);
303 list_add(&fdlocks->llist, &cinode->llist);
304 up_write(&cinode->lock_sem);
306 cfile->count = 1;
307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
314 mutex_init(&cfile->fh_mutex);
315 spin_lock_init(&cfile->file_info_lock);
317 cifs_sb_active(inode->i_sb);
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
325 oplock = 0;
328 spin_lock(&tcon->open_file_lock);
329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
333 fid->purge_cache = false;
334 server->ops->set_fid(cfile, fid, oplock);
336 list_add(&cfile->tlist, &tcon->openFileList);
338 /* if readable file instance put first in list*/
339 if (file->f_mode & FMODE_READ)
340 list_add(&cfile->flist, &cinode->openFileList);
341 else
342 list_add_tail(&cfile->flist, &cinode->openFileList);
343 spin_unlock(&tcon->open_file_lock);
345 if (fid->purge_cache)
346 cifs_zap_mapping(inode);
348 file->private_data = cfile;
349 return cfile;
352 struct cifsFileInfo *
353 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
355 spin_lock(&cifs_file->file_info_lock);
356 cifsFileInfo_get_locked(cifs_file);
357 spin_unlock(&cifs_file->file_info_lock);
358 return cifs_file;
362 * Release a reference on the file private data. This may involve closing
363 * the filehandle out on the server. Must be called without holding
364 * tcon->open_file_lock and cifs_file->file_info_lock.
366 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
368 struct inode *inode = d_inode(cifs_file->dentry);
369 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
370 struct TCP_Server_Info *server = tcon->ses->server;
371 struct cifsInodeInfo *cifsi = CIFS_I(inode);
372 struct super_block *sb = inode->i_sb;
373 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
374 struct cifsLockInfo *li, *tmp;
375 struct cifs_fid fid;
376 struct cifs_pending_open open;
377 bool oplock_break_cancelled;
379 spin_lock(&tcon->open_file_lock);
381 spin_lock(&cifs_file->file_info_lock);
382 if (--cifs_file->count > 0) {
383 spin_unlock(&cifs_file->file_info_lock);
384 spin_unlock(&tcon->open_file_lock);
385 return;
387 spin_unlock(&cifs_file->file_info_lock);
389 if (server->ops->get_lease_key)
390 server->ops->get_lease_key(inode, &fid);
392 /* store open in pending opens to make sure we don't miss lease break */
393 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
395 /* remove it from the lists */
396 list_del(&cifs_file->flist);
397 list_del(&cifs_file->tlist);
399 if (list_empty(&cifsi->openFileList)) {
400 cifs_dbg(FYI, "closing last open instance for inode %p\n",
401 d_inode(cifs_file->dentry));
403 * In strict cache mode we need invalidate mapping on the last
404 * close because it may cause a error when we open this file
405 * again and get at least level II oplock.
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
408 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
409 cifs_set_oplock_level(cifsi, 0);
412 spin_unlock(&tcon->open_file_lock);
414 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
416 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
417 struct TCP_Server_Info *server = tcon->ses->server;
418 unsigned int xid;
420 xid = get_xid();
421 if (server->ops->close)
422 server->ops->close(xid, tcon, &cifs_file->fid);
423 _free_xid(xid);
426 if (oplock_break_cancelled)
427 cifs_done_oplock_break(cifsi);
429 cifs_del_pending_open(&open);
432 * Delete any outstanding lock records. We'll lose them when the file
433 * is closed anyway.
435 down_write(&cifsi->lock_sem);
436 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
437 list_del(&li->llist);
438 cifs_del_lock_waiters(li);
439 kfree(li);
441 list_del(&cifs_file->llist->llist);
442 kfree(cifs_file->llist);
443 up_write(&cifsi->lock_sem);
445 cifs_put_tlink(cifs_file->tlink);
446 dput(cifs_file->dentry);
447 cifs_sb_deactive(sb);
448 kfree(cifs_file);
451 int cifs_open(struct inode *inode, struct file *file)
454 int rc = -EACCES;
455 unsigned int xid;
456 __u32 oplock;
457 struct cifs_sb_info *cifs_sb;
458 struct TCP_Server_Info *server;
459 struct cifs_tcon *tcon;
460 struct tcon_link *tlink;
461 struct cifsFileInfo *cfile = NULL;
462 char *full_path = NULL;
463 bool posix_open_ok = false;
464 struct cifs_fid fid;
465 struct cifs_pending_open open;
467 xid = get_xid();
469 cifs_sb = CIFS_SB(inode->i_sb);
470 tlink = cifs_sb_tlink(cifs_sb);
471 if (IS_ERR(tlink)) {
472 free_xid(xid);
473 return PTR_ERR(tlink);
475 tcon = tlink_tcon(tlink);
476 server = tcon->ses->server;
478 full_path = build_path_from_dentry(file_dentry(file));
479 if (full_path == NULL) {
480 rc = -ENOMEM;
481 goto out;
484 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
485 inode, file->f_flags, full_path);
487 if (file->f_flags & O_DIRECT &&
488 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
489 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
490 file->f_op = &cifs_file_direct_nobrl_ops;
491 else
492 file->f_op = &cifs_file_direct_ops;
495 if (server->oplocks)
496 oplock = REQ_OPLOCK;
497 else
498 oplock = 0;
500 if (!tcon->broken_posix_open && tcon->unix_ext &&
501 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
502 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
503 /* can not refresh inode info since size could be stale */
504 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
505 cifs_sb->mnt_file_mode /* ignored */,
506 file->f_flags, &oplock, &fid.netfid, xid);
507 if (rc == 0) {
508 cifs_dbg(FYI, "posix open succeeded\n");
509 posix_open_ok = true;
510 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
511 if (tcon->ses->serverNOS)
512 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
513 tcon->ses->serverName,
514 tcon->ses->serverNOS);
515 tcon->broken_posix_open = true;
516 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
517 (rc != -EOPNOTSUPP)) /* path not found or net err */
518 goto out;
520 * Else fallthrough to retry open the old way on network i/o
521 * or DFS errors.
525 if (server->ops->get_lease_key)
526 server->ops->get_lease_key(inode, &fid);
528 cifs_add_pending_open(&fid, tlink, &open);
530 if (!posix_open_ok) {
531 if (server->ops->get_lease_key)
532 server->ops->get_lease_key(inode, &fid);
534 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
535 file->f_flags, &oplock, &fid, xid);
536 if (rc) {
537 cifs_del_pending_open(&open);
538 goto out;
542 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
543 if (cfile == NULL) {
544 if (server->ops->close)
545 server->ops->close(xid, tcon, &fid);
546 cifs_del_pending_open(&open);
547 rc = -ENOMEM;
548 goto out;
551 cifs_fscache_set_inode_cookie(inode, file);
553 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
555 * Time to set mode which we can not set earlier due to
556 * problems creating new read-only files.
558 struct cifs_unix_set_info_args args = {
559 .mode = inode->i_mode,
560 .uid = INVALID_UID, /* no change */
561 .gid = INVALID_GID, /* no change */
562 .ctime = NO_CHANGE_64,
563 .atime = NO_CHANGE_64,
564 .mtime = NO_CHANGE_64,
565 .device = 0,
567 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
568 cfile->pid);
571 out:
572 kfree(full_path);
573 free_xid(xid);
574 cifs_put_tlink(tlink);
575 return rc;
578 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
581 * Try to reacquire byte range locks that were released when session
582 * to server was lost.
584 static int
585 cifs_relock_file(struct cifsFileInfo *cfile)
587 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
588 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
590 int rc = 0;
592 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
593 if (cinode->can_cache_brlcks) {
594 /* can cache locks - no need to relock */
595 up_read(&cinode->lock_sem);
596 return rc;
599 if (cap_unix(tcon->ses) &&
600 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
601 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
602 rc = cifs_push_posix_locks(cfile);
603 else
604 rc = tcon->ses->server->ops->push_mand_locks(cfile);
606 up_read(&cinode->lock_sem);
607 return rc;
610 static int
611 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
613 int rc = -EACCES;
614 unsigned int xid;
615 __u32 oplock;
616 struct cifs_sb_info *cifs_sb;
617 struct cifs_tcon *tcon;
618 struct TCP_Server_Info *server;
619 struct cifsInodeInfo *cinode;
620 struct inode *inode;
621 char *full_path = NULL;
622 int desired_access;
623 int disposition = FILE_OPEN;
624 int create_options = CREATE_NOT_DIR;
625 struct cifs_open_parms oparms;
627 xid = get_xid();
628 mutex_lock(&cfile->fh_mutex);
629 if (!cfile->invalidHandle) {
630 mutex_unlock(&cfile->fh_mutex);
631 rc = 0;
632 free_xid(xid);
633 return rc;
636 inode = d_inode(cfile->dentry);
637 cifs_sb = CIFS_SB(inode->i_sb);
638 tcon = tlink_tcon(cfile->tlink);
639 server = tcon->ses->server;
642 * Can not grab rename sem here because various ops, including those
643 * that already have the rename sem can end up causing writepage to get
644 * called and if the server was down that means we end up here, and we
645 * can never tell if the caller already has the rename_sem.
647 full_path = build_path_from_dentry(cfile->dentry);
648 if (full_path == NULL) {
649 rc = -ENOMEM;
650 mutex_unlock(&cfile->fh_mutex);
651 free_xid(xid);
652 return rc;
655 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
656 inode, cfile->f_flags, full_path);
658 if (tcon->ses->server->oplocks)
659 oplock = REQ_OPLOCK;
660 else
661 oplock = 0;
663 if (tcon->unix_ext && cap_unix(tcon->ses) &&
664 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
665 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
667 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
668 * original open. Must mask them off for a reopen.
670 unsigned int oflags = cfile->f_flags &
671 ~(O_CREAT | O_EXCL | O_TRUNC);
673 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
674 cifs_sb->mnt_file_mode /* ignored */,
675 oflags, &oplock, &cfile->fid.netfid, xid);
676 if (rc == 0) {
677 cifs_dbg(FYI, "posix reopen succeeded\n");
678 oparms.reconnect = true;
679 goto reopen_success;
682 * fallthrough to retry open the old way on errors, especially
683 * in the reconnect path it is important to retry hard
687 desired_access = cifs_convert_flags(cfile->f_flags);
689 if (backup_cred(cifs_sb))
690 create_options |= CREATE_OPEN_BACKUP_INTENT;
692 if (server->ops->get_lease_key)
693 server->ops->get_lease_key(inode, &cfile->fid);
695 oparms.tcon = tcon;
696 oparms.cifs_sb = cifs_sb;
697 oparms.desired_access = desired_access;
698 oparms.create_options = create_options;
699 oparms.disposition = disposition;
700 oparms.path = full_path;
701 oparms.fid = &cfile->fid;
702 oparms.reconnect = true;
705 * Can not refresh inode by passing in file_info buf to be returned by
706 * ops->open and then calling get_inode_info with returned buf since
707 * file might have write behind data that needs to be flushed and server
708 * version of file size can be stale. If we knew for sure that inode was
709 * not dirty locally we could do this.
711 rc = server->ops->open(xid, &oparms, &oplock, NULL);
712 if (rc == -ENOENT && oparms.reconnect == false) {
713 /* durable handle timeout is expired - open the file again */
714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
715 /* indicate that we need to relock the file */
716 oparms.reconnect = true;
719 if (rc) {
720 mutex_unlock(&cfile->fh_mutex);
721 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
722 cifs_dbg(FYI, "oplock: %d\n", oplock);
723 goto reopen_error_exit;
726 reopen_success:
727 cfile->invalidHandle = false;
728 mutex_unlock(&cfile->fh_mutex);
729 cinode = CIFS_I(inode);
731 if (can_flush) {
732 rc = filemap_write_and_wait(inode->i_mapping);
733 mapping_set_error(inode->i_mapping, rc);
735 if (tcon->unix_ext)
736 rc = cifs_get_inode_info_unix(&inode, full_path,
737 inode->i_sb, xid);
738 else
739 rc = cifs_get_inode_info(&inode, full_path, NULL,
740 inode->i_sb, xid, NULL);
743 * Else we are writing out data to server already and could deadlock if
744 * we tried to flush data, and since we do not know if we have data that
745 * would invalidate the current end of file on the server we can not go
746 * to the server to get the new inode info.
750 * If the server returned a read oplock and we have mandatory brlocks,
751 * set oplock level to None.
753 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
754 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
755 oplock = 0;
758 server->ops->set_fid(cfile, &cfile->fid, oplock);
759 if (oparms.reconnect)
760 cifs_relock_file(cfile);
762 reopen_error_exit:
763 kfree(full_path);
764 free_xid(xid);
765 return rc;
768 int cifs_close(struct inode *inode, struct file *file)
770 if (file->private_data != NULL) {
771 cifsFileInfo_put(file->private_data);
772 file->private_data = NULL;
775 /* return code from the ->release op is always ignored */
776 return 0;
779 void
780 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
782 struct cifsFileInfo *open_file;
783 struct list_head *tmp;
784 struct list_head *tmp1;
785 struct list_head tmp_list;
787 if (!tcon->use_persistent || !tcon->need_reopen_files)
788 return;
790 tcon->need_reopen_files = false;
792 cifs_dbg(FYI, "Reopen persistent handles");
793 INIT_LIST_HEAD(&tmp_list);
795 /* list all files open on tree connection, reopen resilient handles */
796 spin_lock(&tcon->open_file_lock);
797 list_for_each(tmp, &tcon->openFileList) {
798 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
799 if (!open_file->invalidHandle)
800 continue;
801 cifsFileInfo_get(open_file);
802 list_add_tail(&open_file->rlist, &tmp_list);
804 spin_unlock(&tcon->open_file_lock);
806 list_for_each_safe(tmp, tmp1, &tmp_list) {
807 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
808 if (cifs_reopen_file(open_file, false /* do not flush */))
809 tcon->need_reopen_files = true;
810 list_del_init(&open_file->rlist);
811 cifsFileInfo_put(open_file);
815 int cifs_closedir(struct inode *inode, struct file *file)
817 int rc = 0;
818 unsigned int xid;
819 struct cifsFileInfo *cfile = file->private_data;
820 struct cifs_tcon *tcon;
821 struct TCP_Server_Info *server;
822 char *buf;
824 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
826 if (cfile == NULL)
827 return rc;
829 xid = get_xid();
830 tcon = tlink_tcon(cfile->tlink);
831 server = tcon->ses->server;
833 cifs_dbg(FYI, "Freeing private data in close dir\n");
834 spin_lock(&cfile->file_info_lock);
835 if (server->ops->dir_needs_close(cfile)) {
836 cfile->invalidHandle = true;
837 spin_unlock(&cfile->file_info_lock);
838 if (server->ops->close_dir)
839 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
840 else
841 rc = -ENOSYS;
842 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
843 /* not much we can do if it fails anyway, ignore rc */
844 rc = 0;
845 } else
846 spin_unlock(&cfile->file_info_lock);
848 buf = cfile->srch_inf.ntwrk_buf_start;
849 if (buf) {
850 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
851 cfile->srch_inf.ntwrk_buf_start = NULL;
852 if (cfile->srch_inf.smallBuf)
853 cifs_small_buf_release(buf);
854 else
855 cifs_buf_release(buf);
858 cifs_put_tlink(cfile->tlink);
859 kfree(file->private_data);
860 file->private_data = NULL;
861 /* BB can we lock the filestruct while this is going on? */
862 free_xid(xid);
863 return rc;
866 static struct cifsLockInfo *
867 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
869 struct cifsLockInfo *lock =
870 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
871 if (!lock)
872 return lock;
873 lock->offset = offset;
874 lock->length = length;
875 lock->type = type;
876 lock->pid = current->tgid;
877 INIT_LIST_HEAD(&lock->blist);
878 init_waitqueue_head(&lock->block_q);
879 return lock;
882 void
883 cifs_del_lock_waiters(struct cifsLockInfo *lock)
885 struct cifsLockInfo *li, *tmp;
886 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
887 list_del_init(&li->blist);
888 wake_up(&li->block_q);
892 #define CIFS_LOCK_OP 0
893 #define CIFS_READ_OP 1
894 #define CIFS_WRITE_OP 2
896 /* @rw_check : 0 - no op, 1 - read, 2 - write */
897 static bool
898 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
899 __u64 length, __u8 type, struct cifsFileInfo *cfile,
900 struct cifsLockInfo **conf_lock, int rw_check)
902 struct cifsLockInfo *li;
903 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
904 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
906 list_for_each_entry(li, &fdlocks->locks, llist) {
907 if (offset + length <= li->offset ||
908 offset >= li->offset + li->length)
909 continue;
910 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
911 server->ops->compare_fids(cfile, cur_cfile)) {
912 /* shared lock prevents write op through the same fid */
913 if (!(li->type & server->vals->shared_lock_type) ||
914 rw_check != CIFS_WRITE_OP)
915 continue;
917 if ((type & server->vals->shared_lock_type) &&
918 ((server->ops->compare_fids(cfile, cur_cfile) &&
919 current->tgid == li->pid) || type == li->type))
920 continue;
921 if (conf_lock)
922 *conf_lock = li;
923 return true;
925 return false;
928 bool
929 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
930 __u8 type, struct cifsLockInfo **conf_lock,
931 int rw_check)
933 bool rc = false;
934 struct cifs_fid_locks *cur;
935 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
937 list_for_each_entry(cur, &cinode->llist, llist) {
938 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
939 cfile, conf_lock, rw_check);
940 if (rc)
941 break;
944 return rc;
948 * Check if there is another lock that prevents us to set the lock (mandatory
949 * style). If such a lock exists, update the flock structure with its
950 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
951 * or leave it the same if we can't. Returns 0 if we don't need to request to
952 * the server or 1 otherwise.
954 static int
955 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
956 __u8 type, struct file_lock *flock)
958 int rc = 0;
959 struct cifsLockInfo *conf_lock;
960 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
961 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
962 bool exist;
964 down_read(&cinode->lock_sem);
966 exist = cifs_find_lock_conflict(cfile, offset, length, type,
967 &conf_lock, CIFS_LOCK_OP);
968 if (exist) {
969 flock->fl_start = conf_lock->offset;
970 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
971 flock->fl_pid = conf_lock->pid;
972 if (conf_lock->type & server->vals->shared_lock_type)
973 flock->fl_type = F_RDLCK;
974 else
975 flock->fl_type = F_WRLCK;
976 } else if (!cinode->can_cache_brlcks)
977 rc = 1;
978 else
979 flock->fl_type = F_UNLCK;
981 up_read(&cinode->lock_sem);
982 return rc;
985 static void
986 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
988 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
989 down_write(&cinode->lock_sem);
990 list_add_tail(&lock->llist, &cfile->llist->locks);
991 up_write(&cinode->lock_sem);
995 * Set the byte-range lock (mandatory style). Returns:
996 * 1) 0, if we set the lock and don't need to request to the server;
997 * 2) 1, if no locks prevent us but we need to request to the server;
998 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1000 static int
1001 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1002 bool wait)
1004 struct cifsLockInfo *conf_lock;
1005 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1006 bool exist;
1007 int rc = 0;
1009 try_again:
1010 exist = false;
1011 down_write(&cinode->lock_sem);
1013 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1014 lock->type, &conf_lock, CIFS_LOCK_OP);
1015 if (!exist && cinode->can_cache_brlcks) {
1016 list_add_tail(&lock->llist, &cfile->llist->locks);
1017 up_write(&cinode->lock_sem);
1018 return rc;
1021 if (!exist)
1022 rc = 1;
1023 else if (!wait)
1024 rc = -EACCES;
1025 else {
1026 list_add_tail(&lock->blist, &conf_lock->blist);
1027 up_write(&cinode->lock_sem);
1028 rc = wait_event_interruptible(lock->block_q,
1029 (lock->blist.prev == &lock->blist) &&
1030 (lock->blist.next == &lock->blist));
1031 if (!rc)
1032 goto try_again;
1033 down_write(&cinode->lock_sem);
1034 list_del_init(&lock->blist);
1037 up_write(&cinode->lock_sem);
1038 return rc;
1042 * Check if there is another lock that prevents us to set the lock (posix
1043 * style). If such a lock exists, update the flock structure with its
1044 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1045 * or leave it the same if we can't. Returns 0 if we don't need to request to
1046 * the server or 1 otherwise.
1048 static int
1049 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1051 int rc = 0;
1052 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1053 unsigned char saved_type = flock->fl_type;
1055 if ((flock->fl_flags & FL_POSIX) == 0)
1056 return 1;
1058 down_read(&cinode->lock_sem);
1059 posix_test_lock(file, flock);
1061 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1062 flock->fl_type = saved_type;
1063 rc = 1;
1066 up_read(&cinode->lock_sem);
1067 return rc;
1071 * Set the byte-range lock (posix style). Returns:
1072 * 1) 0, if we set the lock and don't need to request to the server;
1073 * 2) 1, if we need to request to the server;
1074 * 3) <0, if the error occurs while setting the lock.
1076 static int
1077 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1079 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1080 int rc = 1;
1082 if ((flock->fl_flags & FL_POSIX) == 0)
1083 return rc;
1085 try_again:
1086 down_write(&cinode->lock_sem);
1087 if (!cinode->can_cache_brlcks) {
1088 up_write(&cinode->lock_sem);
1089 return rc;
1092 rc = posix_lock_file(file, flock, NULL);
1093 up_write(&cinode->lock_sem);
1094 if (rc == FILE_LOCK_DEFERRED) {
1095 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1096 if (!rc)
1097 goto try_again;
1098 posix_unblock_lock(flock);
1100 return rc;
1104 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1106 unsigned int xid;
1107 int rc = 0, stored_rc;
1108 struct cifsLockInfo *li, *tmp;
1109 struct cifs_tcon *tcon;
1110 unsigned int num, max_num, max_buf;
1111 LOCKING_ANDX_RANGE *buf, *cur;
1112 static const int types[] = {
1113 LOCKING_ANDX_LARGE_FILES,
1114 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1116 int i;
1118 xid = get_xid();
1119 tcon = tlink_tcon(cfile->tlink);
1122 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1123 * and check it for zero before using.
1125 max_buf = tcon->ses->server->maxBuf;
1126 if (!max_buf) {
1127 free_xid(xid);
1128 return -EINVAL;
1131 max_num = (max_buf - sizeof(struct smb_hdr)) /
1132 sizeof(LOCKING_ANDX_RANGE);
1133 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1134 if (!buf) {
1135 free_xid(xid);
1136 return -ENOMEM;
1139 for (i = 0; i < 2; i++) {
1140 cur = buf;
1141 num = 0;
1142 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1143 if (li->type != types[i])
1144 continue;
1145 cur->Pid = cpu_to_le16(li->pid);
1146 cur->LengthLow = cpu_to_le32((u32)li->length);
1147 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1148 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1149 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1150 if (++num == max_num) {
1151 stored_rc = cifs_lockv(xid, tcon,
1152 cfile->fid.netfid,
1153 (__u8)li->type, 0, num,
1154 buf);
1155 if (stored_rc)
1156 rc = stored_rc;
1157 cur = buf;
1158 num = 0;
1159 } else
1160 cur++;
1163 if (num) {
1164 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1165 (__u8)types[i], 0, num, buf);
1166 if (stored_rc)
1167 rc = stored_rc;
1171 kfree(buf);
1172 free_xid(xid);
1173 return rc;
1176 static __u32
1177 hash_lockowner(fl_owner_t owner)
1179 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1182 struct lock_to_push {
1183 struct list_head llist;
1184 __u64 offset;
1185 __u64 length;
1186 __u32 pid;
1187 __u16 netfid;
1188 __u8 type;
1191 static int
1192 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1194 struct inode *inode = d_inode(cfile->dentry);
1195 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1196 struct file_lock *flock;
1197 struct file_lock_context *flctx = inode->i_flctx;
1198 unsigned int count = 0, i;
1199 int rc = 0, xid, type;
1200 struct list_head locks_to_send, *el;
1201 struct lock_to_push *lck, *tmp;
1202 __u64 length;
1204 xid = get_xid();
1206 if (!flctx)
1207 goto out;
1209 spin_lock(&flctx->flc_lock);
1210 list_for_each(el, &flctx->flc_posix) {
1211 count++;
1213 spin_unlock(&flctx->flc_lock);
1215 INIT_LIST_HEAD(&locks_to_send);
1218 * Allocating count locks is enough because no FL_POSIX locks can be
1219 * added to the list while we are holding cinode->lock_sem that
1220 * protects locking operations of this inode.
1222 for (i = 0; i < count; i++) {
1223 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1224 if (!lck) {
1225 rc = -ENOMEM;
1226 goto err_out;
1228 list_add_tail(&lck->llist, &locks_to_send);
1231 el = locks_to_send.next;
1232 spin_lock(&flctx->flc_lock);
1233 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1234 if (el == &locks_to_send) {
1236 * The list ended. We don't have enough allocated
1237 * structures - something is really wrong.
1239 cifs_dbg(VFS, "Can't push all brlocks!\n");
1240 break;
1242 length = 1 + flock->fl_end - flock->fl_start;
1243 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1244 type = CIFS_RDLCK;
1245 else
1246 type = CIFS_WRLCK;
1247 lck = list_entry(el, struct lock_to_push, llist);
1248 lck->pid = hash_lockowner(flock->fl_owner);
1249 lck->netfid = cfile->fid.netfid;
1250 lck->length = length;
1251 lck->type = type;
1252 lck->offset = flock->fl_start;
1254 spin_unlock(&flctx->flc_lock);
1256 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1257 int stored_rc;
1259 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1260 lck->offset, lck->length, NULL,
1261 lck->type, 0);
1262 if (stored_rc)
1263 rc = stored_rc;
1264 list_del(&lck->llist);
1265 kfree(lck);
1268 out:
1269 free_xid(xid);
1270 return rc;
1271 err_out:
1272 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1273 list_del(&lck->llist);
1274 kfree(lck);
1276 goto out;
1279 static int
1280 cifs_push_locks(struct cifsFileInfo *cfile)
1282 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1283 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1284 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1285 int rc = 0;
1287 /* we are going to update can_cache_brlcks here - need a write access */
1288 down_write(&cinode->lock_sem);
1289 if (!cinode->can_cache_brlcks) {
1290 up_write(&cinode->lock_sem);
1291 return rc;
1294 if (cap_unix(tcon->ses) &&
1295 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1296 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1297 rc = cifs_push_posix_locks(cfile);
1298 else
1299 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1301 cinode->can_cache_brlcks = false;
1302 up_write(&cinode->lock_sem);
1303 return rc;
1306 static void
1307 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1308 bool *wait_flag, struct TCP_Server_Info *server)
1310 if (flock->fl_flags & FL_POSIX)
1311 cifs_dbg(FYI, "Posix\n");
1312 if (flock->fl_flags & FL_FLOCK)
1313 cifs_dbg(FYI, "Flock\n");
1314 if (flock->fl_flags & FL_SLEEP) {
1315 cifs_dbg(FYI, "Blocking lock\n");
1316 *wait_flag = true;
1318 if (flock->fl_flags & FL_ACCESS)
1319 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1320 if (flock->fl_flags & FL_LEASE)
1321 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1322 if (flock->fl_flags &
1323 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1324 FL_ACCESS | FL_LEASE | FL_CLOSE)))
1325 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1327 *type = server->vals->large_lock_type;
1328 if (flock->fl_type == F_WRLCK) {
1329 cifs_dbg(FYI, "F_WRLCK\n");
1330 *type |= server->vals->exclusive_lock_type;
1331 *lock = 1;
1332 } else if (flock->fl_type == F_UNLCK) {
1333 cifs_dbg(FYI, "F_UNLCK\n");
1334 *type |= server->vals->unlock_lock_type;
1335 *unlock = 1;
1336 /* Check if unlock includes more than one lock range */
1337 } else if (flock->fl_type == F_RDLCK) {
1338 cifs_dbg(FYI, "F_RDLCK\n");
1339 *type |= server->vals->shared_lock_type;
1340 *lock = 1;
1341 } else if (flock->fl_type == F_EXLCK) {
1342 cifs_dbg(FYI, "F_EXLCK\n");
1343 *type |= server->vals->exclusive_lock_type;
1344 *lock = 1;
1345 } else if (flock->fl_type == F_SHLCK) {
1346 cifs_dbg(FYI, "F_SHLCK\n");
1347 *type |= server->vals->shared_lock_type;
1348 *lock = 1;
1349 } else
1350 cifs_dbg(FYI, "Unknown type of lock\n");
1353 static int
1354 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1355 bool wait_flag, bool posix_lck, unsigned int xid)
1357 int rc = 0;
1358 __u64 length = 1 + flock->fl_end - flock->fl_start;
1359 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1360 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1361 struct TCP_Server_Info *server = tcon->ses->server;
1362 __u16 netfid = cfile->fid.netfid;
1364 if (posix_lck) {
1365 int posix_lock_type;
1367 rc = cifs_posix_lock_test(file, flock);
1368 if (!rc)
1369 return rc;
1371 if (type & server->vals->shared_lock_type)
1372 posix_lock_type = CIFS_RDLCK;
1373 else
1374 posix_lock_type = CIFS_WRLCK;
1375 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1376 hash_lockowner(flock->fl_owner),
1377 flock->fl_start, length, flock,
1378 posix_lock_type, wait_flag);
1379 return rc;
1382 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1383 if (!rc)
1384 return rc;
1386 /* BB we could chain these into one lock request BB */
1387 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1388 1, 0, false);
1389 if (rc == 0) {
1390 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1391 type, 0, 1, false);
1392 flock->fl_type = F_UNLCK;
1393 if (rc != 0)
1394 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1395 rc);
1396 return 0;
1399 if (type & server->vals->shared_lock_type) {
1400 flock->fl_type = F_WRLCK;
1401 return 0;
1404 type &= ~server->vals->exclusive_lock_type;
1406 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1407 type | server->vals->shared_lock_type,
1408 1, 0, false);
1409 if (rc == 0) {
1410 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1411 type | server->vals->shared_lock_type, 0, 1, false);
1412 flock->fl_type = F_RDLCK;
1413 if (rc != 0)
1414 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1415 rc);
1416 } else
1417 flock->fl_type = F_WRLCK;
1419 return 0;
1422 void
1423 cifs_move_llist(struct list_head *source, struct list_head *dest)
1425 struct list_head *li, *tmp;
1426 list_for_each_safe(li, tmp, source)
1427 list_move(li, dest);
1430 void
1431 cifs_free_llist(struct list_head *llist)
1433 struct cifsLockInfo *li, *tmp;
1434 list_for_each_entry_safe(li, tmp, llist, llist) {
1435 cifs_del_lock_waiters(li);
1436 list_del(&li->llist);
1437 kfree(li);
1442 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1443 unsigned int xid)
1445 int rc = 0, stored_rc;
1446 static const int types[] = {
1447 LOCKING_ANDX_LARGE_FILES,
1448 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1450 unsigned int i;
1451 unsigned int max_num, num, max_buf;
1452 LOCKING_ANDX_RANGE *buf, *cur;
1453 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1454 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1455 struct cifsLockInfo *li, *tmp;
1456 __u64 length = 1 + flock->fl_end - flock->fl_start;
1457 struct list_head tmp_llist;
1459 INIT_LIST_HEAD(&tmp_llist);
1462 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1463 * and check it for zero before using.
1465 max_buf = tcon->ses->server->maxBuf;
1466 if (!max_buf)
1467 return -EINVAL;
1469 max_num = (max_buf - sizeof(struct smb_hdr)) /
1470 sizeof(LOCKING_ANDX_RANGE);
1471 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1472 if (!buf)
1473 return -ENOMEM;
1475 down_write(&cinode->lock_sem);
1476 for (i = 0; i < 2; i++) {
1477 cur = buf;
1478 num = 0;
1479 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1480 if (flock->fl_start > li->offset ||
1481 (flock->fl_start + length) <
1482 (li->offset + li->length))
1483 continue;
1484 if (current->tgid != li->pid)
1485 continue;
1486 if (types[i] != li->type)
1487 continue;
1488 if (cinode->can_cache_brlcks) {
1490 * We can cache brlock requests - simply remove
1491 * a lock from the file's list.
1493 list_del(&li->llist);
1494 cifs_del_lock_waiters(li);
1495 kfree(li);
1496 continue;
1498 cur->Pid = cpu_to_le16(li->pid);
1499 cur->LengthLow = cpu_to_le32((u32)li->length);
1500 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1501 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1502 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1504 * We need to save a lock here to let us add it again to
1505 * the file's list if the unlock range request fails on
1506 * the server.
1508 list_move(&li->llist, &tmp_llist);
1509 if (++num == max_num) {
1510 stored_rc = cifs_lockv(xid, tcon,
1511 cfile->fid.netfid,
1512 li->type, num, 0, buf);
1513 if (stored_rc) {
1515 * We failed on the unlock range
1516 * request - add all locks from the tmp
1517 * list to the head of the file's list.
1519 cifs_move_llist(&tmp_llist,
1520 &cfile->llist->locks);
1521 rc = stored_rc;
1522 } else
1524 * The unlock range request succeed -
1525 * free the tmp list.
1527 cifs_free_llist(&tmp_llist);
1528 cur = buf;
1529 num = 0;
1530 } else
1531 cur++;
1533 if (num) {
1534 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1535 types[i], num, 0, buf);
1536 if (stored_rc) {
1537 cifs_move_llist(&tmp_llist,
1538 &cfile->llist->locks);
1539 rc = stored_rc;
1540 } else
1541 cifs_free_llist(&tmp_llist);
1545 up_write(&cinode->lock_sem);
1546 kfree(buf);
1547 return rc;
1550 static int
1551 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1552 bool wait_flag, bool posix_lck, int lock, int unlock,
1553 unsigned int xid)
1555 int rc = 0;
1556 __u64 length = 1 + flock->fl_end - flock->fl_start;
1557 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1558 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1559 struct TCP_Server_Info *server = tcon->ses->server;
1560 struct inode *inode = d_inode(cfile->dentry);
1562 if (posix_lck) {
1563 int posix_lock_type;
1565 rc = cifs_posix_lock_set(file, flock);
1566 if (!rc || rc < 0)
1567 return rc;
1569 if (type & server->vals->shared_lock_type)
1570 posix_lock_type = CIFS_RDLCK;
1571 else
1572 posix_lock_type = CIFS_WRLCK;
1574 if (unlock == 1)
1575 posix_lock_type = CIFS_UNLCK;
1577 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1578 hash_lockowner(flock->fl_owner),
1579 flock->fl_start, length,
1580 NULL, posix_lock_type, wait_flag);
1581 goto out;
1584 if (lock) {
1585 struct cifsLockInfo *lock;
1587 lock = cifs_lock_init(flock->fl_start, length, type);
1588 if (!lock)
1589 return -ENOMEM;
1591 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1592 if (rc < 0) {
1593 kfree(lock);
1594 return rc;
1596 if (!rc)
1597 goto out;
1600 * Windows 7 server can delay breaking lease from read to None
1601 * if we set a byte-range lock on a file - break it explicitly
1602 * before sending the lock to the server to be sure the next
1603 * read won't conflict with non-overlapted locks due to
1604 * pagereading.
1606 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1607 CIFS_CACHE_READ(CIFS_I(inode))) {
1608 cifs_zap_mapping(inode);
1609 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1610 inode);
1611 CIFS_I(inode)->oplock = 0;
1614 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1615 type, 1, 0, wait_flag);
1616 if (rc) {
1617 kfree(lock);
1618 return rc;
1621 cifs_lock_add(cfile, lock);
1622 } else if (unlock)
1623 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1625 out:
1626 if (flock->fl_flags & FL_POSIX && !rc)
1627 rc = locks_lock_file_wait(file, flock);
1628 return rc;
1631 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1633 int rc, xid;
1634 int lock = 0, unlock = 0;
1635 bool wait_flag = false;
1636 bool posix_lck = false;
1637 struct cifs_sb_info *cifs_sb;
1638 struct cifs_tcon *tcon;
1639 struct cifsInodeInfo *cinode;
1640 struct cifsFileInfo *cfile;
1641 __u16 netfid;
1642 __u32 type;
1644 rc = -EACCES;
1645 xid = get_xid();
1647 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1648 cmd, flock->fl_flags, flock->fl_type,
1649 flock->fl_start, flock->fl_end);
1651 cfile = (struct cifsFileInfo *)file->private_data;
1652 tcon = tlink_tcon(cfile->tlink);
1654 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1655 tcon->ses->server);
1657 cifs_sb = CIFS_FILE_SB(file);
1658 netfid = cfile->fid.netfid;
1659 cinode = CIFS_I(file_inode(file));
1661 if (cap_unix(tcon->ses) &&
1662 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1663 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1664 posix_lck = true;
1666 * BB add code here to normalize offset and length to account for
1667 * negative length which we can not accept over the wire.
1669 if (IS_GETLK(cmd)) {
1670 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1671 free_xid(xid);
1672 return rc;
1675 if (!lock && !unlock) {
1677 * if no lock or unlock then nothing to do since we do not
1678 * know what it is
1680 free_xid(xid);
1681 return -EOPNOTSUPP;
1684 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1685 xid);
1686 free_xid(xid);
1687 return rc;
1691 * update the file size (if needed) after a write. Should be called with
1692 * the inode->i_lock held
1694 void
1695 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1696 unsigned int bytes_written)
1698 loff_t end_of_write = offset + bytes_written;
1700 if (end_of_write > cifsi->server_eof)
1701 cifsi->server_eof = end_of_write;
1704 static ssize_t
1705 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1706 size_t write_size, loff_t *offset)
1708 int rc = 0;
1709 unsigned int bytes_written = 0;
1710 unsigned int total_written;
1711 struct cifs_sb_info *cifs_sb;
1712 struct cifs_tcon *tcon;
1713 struct TCP_Server_Info *server;
1714 unsigned int xid;
1715 struct dentry *dentry = open_file->dentry;
1716 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1717 struct cifs_io_parms io_parms;
1719 cifs_sb = CIFS_SB(dentry->d_sb);
1721 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1722 write_size, *offset, dentry);
1724 tcon = tlink_tcon(open_file->tlink);
1725 server = tcon->ses->server;
1727 if (!server->ops->sync_write)
1728 return -ENOSYS;
1730 xid = get_xid();
1732 for (total_written = 0; write_size > total_written;
1733 total_written += bytes_written) {
1734 rc = -EAGAIN;
1735 while (rc == -EAGAIN) {
1736 struct kvec iov[2];
1737 unsigned int len;
1739 if (open_file->invalidHandle) {
1740 /* we could deadlock if we called
1741 filemap_fdatawait from here so tell
1742 reopen_file not to flush data to
1743 server now */
1744 rc = cifs_reopen_file(open_file, false);
1745 if (rc != 0)
1746 break;
1749 len = min(server->ops->wp_retry_size(d_inode(dentry)),
1750 (unsigned int)write_size - total_written);
1751 /* iov[0] is reserved for smb header */
1752 iov[1].iov_base = (char *)write_data + total_written;
1753 iov[1].iov_len = len;
1754 io_parms.pid = pid;
1755 io_parms.tcon = tcon;
1756 io_parms.offset = *offset;
1757 io_parms.length = len;
1758 rc = server->ops->sync_write(xid, &open_file->fid,
1759 &io_parms, &bytes_written, iov, 1);
1761 if (rc || (bytes_written == 0)) {
1762 if (total_written)
1763 break;
1764 else {
1765 free_xid(xid);
1766 return rc;
1768 } else {
1769 spin_lock(&d_inode(dentry)->i_lock);
1770 cifs_update_eof(cifsi, *offset, bytes_written);
1771 spin_unlock(&d_inode(dentry)->i_lock);
1772 *offset += bytes_written;
1776 cifs_stats_bytes_written(tcon, total_written);
1778 if (total_written > 0) {
1779 spin_lock(&d_inode(dentry)->i_lock);
1780 if (*offset > d_inode(dentry)->i_size)
1781 i_size_write(d_inode(dentry), *offset);
1782 spin_unlock(&d_inode(dentry)->i_lock);
1784 mark_inode_dirty_sync(d_inode(dentry));
1785 free_xid(xid);
1786 return total_written;
1789 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1790 bool fsuid_only)
1792 struct cifsFileInfo *open_file = NULL;
1793 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1794 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1796 /* only filter by fsuid on multiuser mounts */
1797 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1798 fsuid_only = false;
1800 spin_lock(&tcon->open_file_lock);
1801 /* we could simply get the first_list_entry since write-only entries
1802 are always at the end of the list but since the first entry might
1803 have a close pending, we go through the whole list */
1804 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1805 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1806 continue;
1807 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1808 if (!open_file->invalidHandle) {
1809 /* found a good file */
1810 /* lock it so it will not be closed on us */
1811 cifsFileInfo_get(open_file);
1812 spin_unlock(&tcon->open_file_lock);
1813 return open_file;
1814 } /* else might as well continue, and look for
1815 another, or simply have the caller reopen it
1816 again rather than trying to fix this handle */
1817 } else /* write only file */
1818 break; /* write only files are last so must be done */
1820 spin_unlock(&tcon->open_file_lock);
1821 return NULL;
1824 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1825 bool fsuid_only)
1827 struct cifsFileInfo *open_file, *inv_file = NULL;
1828 struct cifs_sb_info *cifs_sb;
1829 struct cifs_tcon *tcon;
1830 bool any_available = false;
1831 int rc;
1832 unsigned int refind = 0;
1834 /* Having a null inode here (because mapping->host was set to zero by
1835 the VFS or MM) should not happen but we had reports of on oops (due to
1836 it being zero) during stress testcases so we need to check for it */
1838 if (cifs_inode == NULL) {
1839 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1840 dump_stack();
1841 return NULL;
1844 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1845 tcon = cifs_sb_master_tcon(cifs_sb);
1847 /* only filter by fsuid on multiuser mounts */
1848 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1849 fsuid_only = false;
1851 spin_lock(&tcon->open_file_lock);
1852 refind_writable:
1853 if (refind > MAX_REOPEN_ATT) {
1854 spin_unlock(&tcon->open_file_lock);
1855 return NULL;
1857 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1858 if (!any_available && open_file->pid != current->tgid)
1859 continue;
1860 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1861 continue;
1862 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1863 if (!open_file->invalidHandle) {
1864 /* found a good writable file */
1865 cifsFileInfo_get(open_file);
1866 spin_unlock(&tcon->open_file_lock);
1867 return open_file;
1868 } else {
1869 if (!inv_file)
1870 inv_file = open_file;
1874 /* couldn't find useable FH with same pid, try any available */
1875 if (!any_available) {
1876 any_available = true;
1877 goto refind_writable;
1880 if (inv_file) {
1881 any_available = false;
1882 cifsFileInfo_get(inv_file);
1885 spin_unlock(&tcon->open_file_lock);
1887 if (inv_file) {
1888 rc = cifs_reopen_file(inv_file, false);
1889 if (!rc)
1890 return inv_file;
1891 else {
1892 spin_lock(&tcon->open_file_lock);
1893 list_move_tail(&inv_file->flist,
1894 &cifs_inode->openFileList);
1895 spin_unlock(&tcon->open_file_lock);
1896 cifsFileInfo_put(inv_file);
1897 ++refind;
1898 inv_file = NULL;
1899 spin_lock(&tcon->open_file_lock);
1900 goto refind_writable;
1904 return NULL;
1907 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1909 struct address_space *mapping = page->mapping;
1910 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1911 char *write_data;
1912 int rc = -EFAULT;
1913 int bytes_written = 0;
1914 struct inode *inode;
1915 struct cifsFileInfo *open_file;
1917 if (!mapping || !mapping->host)
1918 return -EFAULT;
1920 inode = page->mapping->host;
1922 offset += (loff_t)from;
1923 write_data = kmap(page);
1924 write_data += from;
1926 if ((to > PAGE_SIZE) || (from > to)) {
1927 kunmap(page);
1928 return -EIO;
1931 /* racing with truncate? */
1932 if (offset > mapping->host->i_size) {
1933 kunmap(page);
1934 return 0; /* don't care */
1937 /* check to make sure that we are not extending the file */
1938 if (mapping->host->i_size - offset < (loff_t)to)
1939 to = (unsigned)(mapping->host->i_size - offset);
1941 open_file = find_writable_file(CIFS_I(mapping->host), false);
1942 if (open_file) {
1943 bytes_written = cifs_write(open_file, open_file->pid,
1944 write_data, to - from, &offset);
1945 cifsFileInfo_put(open_file);
1946 /* Does mm or vfs already set times? */
1947 inode->i_atime = inode->i_mtime = current_time(inode);
1948 if ((bytes_written > 0) && (offset))
1949 rc = 0;
1950 else if (bytes_written < 0)
1951 rc = bytes_written;
1952 } else {
1953 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1954 rc = -EIO;
1957 kunmap(page);
1958 return rc;
1961 static struct cifs_writedata *
1962 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1963 pgoff_t end, pgoff_t *index,
1964 unsigned int *found_pages)
1966 struct cifs_writedata *wdata;
1968 wdata = cifs_writedata_alloc((unsigned int)tofind,
1969 cifs_writev_complete);
1970 if (!wdata)
1971 return NULL;
1973 *found_pages = find_get_pages_range_tag(mapping, index, end,
1974 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
1975 return wdata;
1978 static unsigned int
1979 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1980 struct address_space *mapping,
1981 struct writeback_control *wbc,
1982 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1984 unsigned int nr_pages = 0, i;
1985 struct page *page;
1987 for (i = 0; i < found_pages; i++) {
1988 page = wdata->pages[i];
1990 * At this point we hold neither mapping->tree_lock nor
1991 * lock on the page itself: the page may be truncated or
1992 * invalidated (changing page->mapping to NULL), or even
1993 * swizzled back from swapper_space to tmpfs file
1994 * mapping
1997 if (nr_pages == 0)
1998 lock_page(page);
1999 else if (!trylock_page(page))
2000 break;
2002 if (unlikely(page->mapping != mapping)) {
2003 unlock_page(page);
2004 break;
2007 if (!wbc->range_cyclic && page->index > end) {
2008 *done = true;
2009 unlock_page(page);
2010 break;
2013 if (*next && (page->index != *next)) {
2014 /* Not next consecutive page */
2015 unlock_page(page);
2016 break;
2019 if (wbc->sync_mode != WB_SYNC_NONE)
2020 wait_on_page_writeback(page);
2022 if (PageWriteback(page) ||
2023 !clear_page_dirty_for_io(page)) {
2024 unlock_page(page);
2025 break;
2029 * This actually clears the dirty bit in the radix tree.
2030 * See cifs_writepage() for more commentary.
2032 set_page_writeback(page);
2033 if (page_offset(page) >= i_size_read(mapping->host)) {
2034 *done = true;
2035 unlock_page(page);
2036 end_page_writeback(page);
2037 break;
2040 wdata->pages[i] = page;
2041 *next = page->index + 1;
2042 ++nr_pages;
2045 /* reset index to refind any pages skipped */
2046 if (nr_pages == 0)
2047 *index = wdata->pages[0]->index + 1;
2049 /* put any pages we aren't going to use */
2050 for (i = nr_pages; i < found_pages; i++) {
2051 put_page(wdata->pages[i]);
2052 wdata->pages[i] = NULL;
2055 return nr_pages;
2058 static int
2059 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2060 struct address_space *mapping, struct writeback_control *wbc)
2062 int rc = 0;
2063 struct TCP_Server_Info *server;
2064 unsigned int i;
2066 wdata->sync_mode = wbc->sync_mode;
2067 wdata->nr_pages = nr_pages;
2068 wdata->offset = page_offset(wdata->pages[0]);
2069 wdata->pagesz = PAGE_SIZE;
2070 wdata->tailsz = min(i_size_read(mapping->host) -
2071 page_offset(wdata->pages[nr_pages - 1]),
2072 (loff_t)PAGE_SIZE);
2073 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2075 if (wdata->cfile != NULL)
2076 cifsFileInfo_put(wdata->cfile);
2077 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2078 if (!wdata->cfile) {
2079 cifs_dbg(VFS, "No writable handles for inode\n");
2080 rc = -EBADF;
2081 } else {
2082 wdata->pid = wdata->cfile->pid;
2083 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2084 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2087 for (i = 0; i < nr_pages; ++i)
2088 unlock_page(wdata->pages[i]);
2090 return rc;
2093 static int cifs_writepages(struct address_space *mapping,
2094 struct writeback_control *wbc)
2096 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2097 struct TCP_Server_Info *server;
2098 bool done = false, scanned = false, range_whole = false;
2099 pgoff_t end, index;
2100 struct cifs_writedata *wdata;
2101 int rc = 0;
2104 * If wsize is smaller than the page cache size, default to writing
2105 * one page at a time via cifs_writepage
2107 if (cifs_sb->wsize < PAGE_SIZE)
2108 return generic_writepages(mapping, wbc);
2110 if (wbc->range_cyclic) {
2111 index = mapping->writeback_index; /* Start from prev offset */
2112 end = -1;
2113 } else {
2114 index = wbc->range_start >> PAGE_SHIFT;
2115 end = wbc->range_end >> PAGE_SHIFT;
2116 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2117 range_whole = true;
2118 scanned = true;
2120 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2121 retry:
2122 while (!done && index <= end) {
2123 unsigned int i, nr_pages, found_pages, wsize, credits;
2124 pgoff_t next = 0, tofind, saved_index = index;
2126 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2127 &wsize, &credits);
2128 if (rc)
2129 break;
2131 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2133 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2134 &found_pages);
2135 if (!wdata) {
2136 rc = -ENOMEM;
2137 add_credits_and_wake_if(server, credits, 0);
2138 break;
2141 if (found_pages == 0) {
2142 kref_put(&wdata->refcount, cifs_writedata_release);
2143 add_credits_and_wake_if(server, credits, 0);
2144 break;
2147 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2148 end, &index, &next, &done);
2150 /* nothing to write? */
2151 if (nr_pages == 0) {
2152 kref_put(&wdata->refcount, cifs_writedata_release);
2153 add_credits_and_wake_if(server, credits, 0);
2154 continue;
2157 wdata->credits = credits;
2159 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2161 /* send failure -- clean up the mess */
2162 if (rc != 0) {
2163 add_credits_and_wake_if(server, wdata->credits, 0);
2164 for (i = 0; i < nr_pages; ++i) {
2165 if (rc == -EAGAIN)
2166 redirty_page_for_writepage(wbc,
2167 wdata->pages[i]);
2168 else
2169 SetPageError(wdata->pages[i]);
2170 end_page_writeback(wdata->pages[i]);
2171 put_page(wdata->pages[i]);
2173 if (rc != -EAGAIN)
2174 mapping_set_error(mapping, rc);
2176 kref_put(&wdata->refcount, cifs_writedata_release);
2178 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2179 index = saved_index;
2180 continue;
2183 wbc->nr_to_write -= nr_pages;
2184 if (wbc->nr_to_write <= 0)
2185 done = true;
2187 index = next;
2190 if (!scanned && !done) {
2192 * We hit the last page and there is more work to be done: wrap
2193 * back to the start of the file
2195 scanned = true;
2196 index = 0;
2197 goto retry;
2200 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2201 mapping->writeback_index = index;
2203 return rc;
2206 static int
2207 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2209 int rc;
2210 unsigned int xid;
2212 xid = get_xid();
2213 /* BB add check for wbc flags */
2214 get_page(page);
2215 if (!PageUptodate(page))
2216 cifs_dbg(FYI, "ppw - page not up to date\n");
2219 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2221 * A writepage() implementation always needs to do either this,
2222 * or re-dirty the page with "redirty_page_for_writepage()" in
2223 * the case of a failure.
2225 * Just unlocking the page will cause the radix tree tag-bits
2226 * to fail to update with the state of the page correctly.
2228 set_page_writeback(page);
2229 retry_write:
2230 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2231 if (rc == -EAGAIN) {
2232 if (wbc->sync_mode == WB_SYNC_ALL)
2233 goto retry_write;
2234 redirty_page_for_writepage(wbc, page);
2235 } else if (rc != 0) {
2236 SetPageError(page);
2237 mapping_set_error(page->mapping, rc);
2238 } else {
2239 SetPageUptodate(page);
2241 end_page_writeback(page);
2242 put_page(page);
2243 free_xid(xid);
2244 return rc;
2247 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2249 int rc = cifs_writepage_locked(page, wbc);
2250 unlock_page(page);
2251 return rc;
2254 static int cifs_write_end(struct file *file, struct address_space *mapping,
2255 loff_t pos, unsigned len, unsigned copied,
2256 struct page *page, void *fsdata)
2258 int rc;
2259 struct inode *inode = mapping->host;
2260 struct cifsFileInfo *cfile = file->private_data;
2261 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2262 __u32 pid;
2264 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2265 pid = cfile->pid;
2266 else
2267 pid = current->tgid;
2269 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2270 page, pos, copied);
2272 if (PageChecked(page)) {
2273 if (copied == len)
2274 SetPageUptodate(page);
2275 ClearPageChecked(page);
2276 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2277 SetPageUptodate(page);
2279 if (!PageUptodate(page)) {
2280 char *page_data;
2281 unsigned offset = pos & (PAGE_SIZE - 1);
2282 unsigned int xid;
2284 xid = get_xid();
2285 /* this is probably better than directly calling
2286 partialpage_write since in this function the file handle is
2287 known which we might as well leverage */
2288 /* BB check if anything else missing out of ppw
2289 such as updating last write time */
2290 page_data = kmap(page);
2291 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2292 /* if (rc < 0) should we set writebehind rc? */
2293 kunmap(page);
2295 free_xid(xid);
2296 } else {
2297 rc = copied;
2298 pos += copied;
2299 set_page_dirty(page);
2302 if (rc > 0) {
2303 spin_lock(&inode->i_lock);
2304 if (pos > inode->i_size)
2305 i_size_write(inode, pos);
2306 spin_unlock(&inode->i_lock);
2309 unlock_page(page);
2310 put_page(page);
2312 return rc;
2315 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2316 int datasync)
2318 unsigned int xid;
2319 int rc = 0;
2320 struct cifs_tcon *tcon;
2321 struct TCP_Server_Info *server;
2322 struct cifsFileInfo *smbfile = file->private_data;
2323 struct inode *inode = file_inode(file);
2324 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2326 rc = file_write_and_wait_range(file, start, end);
2327 if (rc)
2328 return rc;
2329 inode_lock(inode);
2331 xid = get_xid();
2333 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2334 file, datasync);
2336 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2337 rc = cifs_zap_mapping(inode);
2338 if (rc) {
2339 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2340 rc = 0; /* don't care about it in fsync */
2344 tcon = tlink_tcon(smbfile->tlink);
2345 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2346 server = tcon->ses->server;
2347 if (server->ops->flush)
2348 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2349 else
2350 rc = -ENOSYS;
2353 free_xid(xid);
2354 inode_unlock(inode);
2355 return rc;
2358 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2360 unsigned int xid;
2361 int rc = 0;
2362 struct cifs_tcon *tcon;
2363 struct TCP_Server_Info *server;
2364 struct cifsFileInfo *smbfile = file->private_data;
2365 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2366 struct inode *inode = file->f_mapping->host;
2368 rc = file_write_and_wait_range(file, start, end);
2369 if (rc)
2370 return rc;
2371 inode_lock(inode);
2373 xid = get_xid();
2375 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2376 file, datasync);
2378 tcon = tlink_tcon(smbfile->tlink);
2379 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2380 server = tcon->ses->server;
2381 if (server->ops->flush)
2382 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2383 else
2384 rc = -ENOSYS;
2387 free_xid(xid);
2388 inode_unlock(inode);
2389 return rc;
2393 * As file closes, flush all cached write data for this inode checking
2394 * for write behind errors.
2396 int cifs_flush(struct file *file, fl_owner_t id)
2398 struct inode *inode = file_inode(file);
2399 int rc = 0;
2401 if (file->f_mode & FMODE_WRITE)
2402 rc = filemap_write_and_wait(inode->i_mapping);
2404 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2406 return rc;
2409 static int
2410 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2412 int rc = 0;
2413 unsigned long i;
2415 for (i = 0; i < num_pages; i++) {
2416 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2417 if (!pages[i]) {
2419 * save number of pages we have already allocated and
2420 * return with ENOMEM error
2422 num_pages = i;
2423 rc = -ENOMEM;
2424 break;
2428 if (rc) {
2429 for (i = 0; i < num_pages; i++)
2430 put_page(pages[i]);
2432 return rc;
2435 static inline
2436 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2438 size_t num_pages;
2439 size_t clen;
2441 clen = min_t(const size_t, len, wsize);
2442 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2444 if (cur_len)
2445 *cur_len = clen;
2447 return num_pages;
2450 static void
2451 cifs_uncached_writedata_release(struct kref *refcount)
2453 int i;
2454 struct cifs_writedata *wdata = container_of(refcount,
2455 struct cifs_writedata, refcount);
2457 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2458 for (i = 0; i < wdata->nr_pages; i++)
2459 put_page(wdata->pages[i]);
2460 cifs_writedata_release(refcount);
2463 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2465 static void
2466 cifs_uncached_writev_complete(struct work_struct *work)
2468 struct cifs_writedata *wdata = container_of(work,
2469 struct cifs_writedata, work);
2470 struct inode *inode = d_inode(wdata->cfile->dentry);
2471 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2473 spin_lock(&inode->i_lock);
2474 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2475 if (cifsi->server_eof > inode->i_size)
2476 i_size_write(inode, cifsi->server_eof);
2477 spin_unlock(&inode->i_lock);
2479 complete(&wdata->done);
2480 collect_uncached_write_data(wdata->ctx);
2481 /* the below call can possibly free the last ref to aio ctx */
2482 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2485 static int
2486 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2487 size_t *len, unsigned long *num_pages)
2489 size_t save_len, copied, bytes, cur_len = *len;
2490 unsigned long i, nr_pages = *num_pages;
2492 save_len = cur_len;
2493 for (i = 0; i < nr_pages; i++) {
2494 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2495 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2496 cur_len -= copied;
2498 * If we didn't copy as much as we expected, then that
2499 * may mean we trod into an unmapped area. Stop copying
2500 * at that point. On the next pass through the big
2501 * loop, we'll likely end up getting a zero-length
2502 * write and bailing out of it.
2504 if (copied < bytes)
2505 break;
2507 cur_len = save_len - cur_len;
2508 *len = cur_len;
2511 * If we have no data to send, then that probably means that
2512 * the copy above failed altogether. That's most likely because
2513 * the address in the iovec was bogus. Return -EFAULT and let
2514 * the caller free anything we allocated and bail out.
2516 if (!cur_len)
2517 return -EFAULT;
2520 * i + 1 now represents the number of pages we actually used in
2521 * the copy phase above.
2523 *num_pages = i + 1;
2524 return 0;
2527 static int
2528 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2529 struct cifsFileInfo *open_file,
2530 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2531 struct cifs_aio_ctx *ctx)
2533 int rc = 0;
2534 size_t cur_len;
2535 unsigned long nr_pages, num_pages, i;
2536 struct cifs_writedata *wdata;
2537 struct iov_iter saved_from = *from;
2538 loff_t saved_offset = offset;
2539 pid_t pid;
2540 struct TCP_Server_Info *server;
2542 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2543 pid = open_file->pid;
2544 else
2545 pid = current->tgid;
2547 server = tlink_tcon(open_file->tlink)->ses->server;
2549 do {
2550 unsigned int wsize, credits;
2552 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2553 &wsize, &credits);
2554 if (rc)
2555 break;
2557 nr_pages = get_numpages(wsize, len, &cur_len);
2558 wdata = cifs_writedata_alloc(nr_pages,
2559 cifs_uncached_writev_complete);
2560 if (!wdata) {
2561 rc = -ENOMEM;
2562 add_credits_and_wake_if(server, credits, 0);
2563 break;
2566 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2567 if (rc) {
2568 kfree(wdata);
2569 add_credits_and_wake_if(server, credits, 0);
2570 break;
2573 num_pages = nr_pages;
2574 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2575 if (rc) {
2576 for (i = 0; i < nr_pages; i++)
2577 put_page(wdata->pages[i]);
2578 kfree(wdata);
2579 add_credits_and_wake_if(server, credits, 0);
2580 break;
2584 * Bring nr_pages down to the number of pages we actually used,
2585 * and free any pages that we didn't use.
2587 for ( ; nr_pages > num_pages; nr_pages--)
2588 put_page(wdata->pages[nr_pages - 1]);
2590 wdata->sync_mode = WB_SYNC_ALL;
2591 wdata->nr_pages = nr_pages;
2592 wdata->offset = (__u64)offset;
2593 wdata->cfile = cifsFileInfo_get(open_file);
2594 wdata->pid = pid;
2595 wdata->bytes = cur_len;
2596 wdata->pagesz = PAGE_SIZE;
2597 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2598 wdata->credits = credits;
2599 wdata->ctx = ctx;
2600 kref_get(&ctx->refcount);
2602 if (!wdata->cfile->invalidHandle ||
2603 !(rc = cifs_reopen_file(wdata->cfile, false)))
2604 rc = server->ops->async_writev(wdata,
2605 cifs_uncached_writedata_release);
2606 if (rc) {
2607 add_credits_and_wake_if(server, wdata->credits, 0);
2608 kref_put(&wdata->refcount,
2609 cifs_uncached_writedata_release);
2610 if (rc == -EAGAIN) {
2611 *from = saved_from;
2612 iov_iter_advance(from, offset - saved_offset);
2613 continue;
2615 break;
2618 list_add_tail(&wdata->list, wdata_list);
2619 offset += cur_len;
2620 len -= cur_len;
2621 } while (len > 0);
2623 return rc;
2626 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2628 struct cifs_writedata *wdata, *tmp;
2629 struct cifs_tcon *tcon;
2630 struct cifs_sb_info *cifs_sb;
2631 struct dentry *dentry = ctx->cfile->dentry;
2632 unsigned int i;
2633 int rc;
2635 tcon = tlink_tcon(ctx->cfile->tlink);
2636 cifs_sb = CIFS_SB(dentry->d_sb);
2638 mutex_lock(&ctx->aio_mutex);
2640 if (list_empty(&ctx->list)) {
2641 mutex_unlock(&ctx->aio_mutex);
2642 return;
2645 rc = ctx->rc;
2647 * Wait for and collect replies for any successful sends in order of
2648 * increasing offset. Once an error is hit, then return without waiting
2649 * for any more replies.
2651 restart_loop:
2652 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2653 if (!rc) {
2654 if (!try_wait_for_completion(&wdata->done)) {
2655 mutex_unlock(&ctx->aio_mutex);
2656 return;
2659 if (wdata->result)
2660 rc = wdata->result;
2661 else
2662 ctx->total_len += wdata->bytes;
2664 /* resend call if it's a retryable error */
2665 if (rc == -EAGAIN) {
2666 struct list_head tmp_list;
2667 struct iov_iter tmp_from = ctx->iter;
2669 INIT_LIST_HEAD(&tmp_list);
2670 list_del_init(&wdata->list);
2672 iov_iter_advance(&tmp_from,
2673 wdata->offset - ctx->pos);
2675 rc = cifs_write_from_iter(wdata->offset,
2676 wdata->bytes, &tmp_from,
2677 ctx->cfile, cifs_sb, &tmp_list,
2678 ctx);
2680 list_splice(&tmp_list, &ctx->list);
2682 kref_put(&wdata->refcount,
2683 cifs_uncached_writedata_release);
2684 goto restart_loop;
2687 list_del_init(&wdata->list);
2688 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2691 for (i = 0; i < ctx->npages; i++)
2692 put_page(ctx->bv[i].bv_page);
2694 cifs_stats_bytes_written(tcon, ctx->total_len);
2695 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2697 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2699 mutex_unlock(&ctx->aio_mutex);
2701 if (ctx->iocb && ctx->iocb->ki_complete)
2702 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2703 else
2704 complete(&ctx->done);
2707 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2709 struct file *file = iocb->ki_filp;
2710 ssize_t total_written = 0;
2711 struct cifsFileInfo *cfile;
2712 struct cifs_tcon *tcon;
2713 struct cifs_sb_info *cifs_sb;
2714 struct cifs_aio_ctx *ctx;
2715 struct iov_iter saved_from = *from;
2716 int rc;
2719 * BB - optimize the way when signing is disabled. We can drop this
2720 * extra memory-to-memory copying and use iovec buffers for constructing
2721 * write request.
2724 rc = generic_write_checks(iocb, from);
2725 if (rc <= 0)
2726 return rc;
2728 cifs_sb = CIFS_FILE_SB(file);
2729 cfile = file->private_data;
2730 tcon = tlink_tcon(cfile->tlink);
2732 if (!tcon->ses->server->ops->async_writev)
2733 return -ENOSYS;
2735 ctx = cifs_aio_ctx_alloc();
2736 if (!ctx)
2737 return -ENOMEM;
2739 ctx->cfile = cifsFileInfo_get(cfile);
2741 if (!is_sync_kiocb(iocb))
2742 ctx->iocb = iocb;
2744 ctx->pos = iocb->ki_pos;
2746 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2747 if (rc) {
2748 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2749 return rc;
2752 /* grab a lock here due to read response handlers can access ctx */
2753 mutex_lock(&ctx->aio_mutex);
2755 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2756 cfile, cifs_sb, &ctx->list, ctx);
2759 * If at least one write was successfully sent, then discard any rc
2760 * value from the later writes. If the other write succeeds, then
2761 * we'll end up returning whatever was written. If it fails, then
2762 * we'll get a new rc value from that.
2764 if (!list_empty(&ctx->list))
2765 rc = 0;
2767 mutex_unlock(&ctx->aio_mutex);
2769 if (rc) {
2770 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2771 return rc;
2774 if (!is_sync_kiocb(iocb)) {
2775 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2776 return -EIOCBQUEUED;
2779 rc = wait_for_completion_killable(&ctx->done);
2780 if (rc) {
2781 mutex_lock(&ctx->aio_mutex);
2782 ctx->rc = rc = -EINTR;
2783 total_written = ctx->total_len;
2784 mutex_unlock(&ctx->aio_mutex);
2785 } else {
2786 rc = ctx->rc;
2787 total_written = ctx->total_len;
2790 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2792 if (unlikely(!total_written))
2793 return rc;
2795 iocb->ki_pos += total_written;
2796 return total_written;
2799 static ssize_t
2800 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2802 struct file *file = iocb->ki_filp;
2803 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2804 struct inode *inode = file->f_mapping->host;
2805 struct cifsInodeInfo *cinode = CIFS_I(inode);
2806 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2807 ssize_t rc;
2809 inode_lock(inode);
2811 * We need to hold the sem to be sure nobody modifies lock list
2812 * with a brlock that prevents writing.
2814 down_read(&cinode->lock_sem);
2816 rc = generic_write_checks(iocb, from);
2817 if (rc <= 0)
2818 goto out;
2820 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2821 server->vals->exclusive_lock_type, NULL,
2822 CIFS_WRITE_OP))
2823 rc = __generic_file_write_iter(iocb, from);
2824 else
2825 rc = -EACCES;
2826 out:
2827 up_read(&cinode->lock_sem);
2828 inode_unlock(inode);
2830 if (rc > 0)
2831 rc = generic_write_sync(iocb, rc);
2832 return rc;
2835 ssize_t
2836 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2838 struct inode *inode = file_inode(iocb->ki_filp);
2839 struct cifsInodeInfo *cinode = CIFS_I(inode);
2840 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2841 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2842 iocb->ki_filp->private_data;
2843 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2844 ssize_t written;
2846 written = cifs_get_writer(cinode);
2847 if (written)
2848 return written;
2850 if (CIFS_CACHE_WRITE(cinode)) {
2851 if (cap_unix(tcon->ses) &&
2852 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2853 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2854 written = generic_file_write_iter(iocb, from);
2855 goto out;
2857 written = cifs_writev(iocb, from);
2858 goto out;
2861 * For non-oplocked files in strict cache mode we need to write the data
2862 * to the server exactly from the pos to pos+len-1 rather than flush all
2863 * affected pages because it may cause a error with mandatory locks on
2864 * these pages but not on the region from pos to ppos+len-1.
2866 written = cifs_user_writev(iocb, from);
2867 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2869 * Windows 7 server can delay breaking level2 oplock if a write
2870 * request comes - break it on the client to prevent reading
2871 * an old data.
2873 cifs_zap_mapping(inode);
2874 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2875 inode);
2876 cinode->oplock = 0;
2878 out:
2879 cifs_put_writer(cinode);
2880 return written;
2883 static struct cifs_readdata *
2884 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2886 struct cifs_readdata *rdata;
2888 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2889 GFP_KERNEL);
2890 if (rdata != NULL) {
2891 kref_init(&rdata->refcount);
2892 INIT_LIST_HEAD(&rdata->list);
2893 init_completion(&rdata->done);
2894 INIT_WORK(&rdata->work, complete);
2897 return rdata;
2900 void
2901 cifs_readdata_release(struct kref *refcount)
2903 struct cifs_readdata *rdata = container_of(refcount,
2904 struct cifs_readdata, refcount);
2905 #ifdef CONFIG_CIFS_SMB_DIRECT
2906 if (rdata->mr) {
2907 smbd_deregister_mr(rdata->mr);
2908 rdata->mr = NULL;
2910 #endif
2911 if (rdata->cfile)
2912 cifsFileInfo_put(rdata->cfile);
2914 kfree(rdata);
2917 static int
2918 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2920 int rc = 0;
2921 struct page *page;
2922 unsigned int i;
2924 for (i = 0; i < nr_pages; i++) {
2925 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2926 if (!page) {
2927 rc = -ENOMEM;
2928 break;
2930 rdata->pages[i] = page;
2933 if (rc) {
2934 for (i = 0; i < nr_pages; i++) {
2935 put_page(rdata->pages[i]);
2936 rdata->pages[i] = NULL;
2939 return rc;
2942 static void
2943 cifs_uncached_readdata_release(struct kref *refcount)
2945 struct cifs_readdata *rdata = container_of(refcount,
2946 struct cifs_readdata, refcount);
2947 unsigned int i;
2949 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
2950 for (i = 0; i < rdata->nr_pages; i++) {
2951 put_page(rdata->pages[i]);
2952 rdata->pages[i] = NULL;
2954 cifs_readdata_release(refcount);
2958 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2959 * @rdata: the readdata response with list of pages holding data
2960 * @iter: destination for our data
2962 * This function copies data from a list of pages in a readdata response into
2963 * an array of iovecs. It will first calculate where the data should go
2964 * based on the info in the readdata and then copy the data into that spot.
2966 static int
2967 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2969 size_t remaining = rdata->got_bytes;
2970 unsigned int i;
2972 for (i = 0; i < rdata->nr_pages; i++) {
2973 struct page *page = rdata->pages[i];
2974 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2975 size_t written;
2977 if (unlikely(iter->type & ITER_PIPE)) {
2978 void *addr = kmap_atomic(page);
2980 written = copy_to_iter(addr, copy, iter);
2981 kunmap_atomic(addr);
2982 } else
2983 written = copy_page_to_iter(page, 0, copy, iter);
2984 remaining -= written;
2985 if (written < copy && iov_iter_count(iter) > 0)
2986 break;
2988 return remaining ? -EFAULT : 0;
2991 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
2993 static void
2994 cifs_uncached_readv_complete(struct work_struct *work)
2996 struct cifs_readdata *rdata = container_of(work,
2997 struct cifs_readdata, work);
2999 complete(&rdata->done);
3000 collect_uncached_read_data(rdata->ctx);
3001 /* the below call can possibly free the last ref to aio ctx */
3002 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3005 static int
3006 uncached_fill_pages(struct TCP_Server_Info *server,
3007 struct cifs_readdata *rdata, struct iov_iter *iter,
3008 unsigned int len)
3010 int result = 0;
3011 unsigned int i;
3012 unsigned int nr_pages = rdata->nr_pages;
3014 rdata->got_bytes = 0;
3015 rdata->tailsz = PAGE_SIZE;
3016 for (i = 0; i < nr_pages; i++) {
3017 struct page *page = rdata->pages[i];
3018 size_t n;
3020 if (len <= 0) {
3021 /* no need to hold page hostage */
3022 rdata->pages[i] = NULL;
3023 rdata->nr_pages--;
3024 put_page(page);
3025 continue;
3027 n = len;
3028 if (len >= PAGE_SIZE) {
3029 /* enough data to fill the page */
3030 n = PAGE_SIZE;
3031 len -= n;
3032 } else {
3033 zero_user(page, len, PAGE_SIZE - len);
3034 rdata->tailsz = len;
3035 len = 0;
3037 if (iter)
3038 result = copy_page_from_iter(page, 0, n, iter);
3039 #ifdef CONFIG_CIFS_SMB_DIRECT
3040 else if (rdata->mr)
3041 result = n;
3042 #endif
3043 else
3044 result = cifs_read_page_from_socket(server, page, n);
3045 if (result < 0)
3046 break;
3048 rdata->got_bytes += result;
3051 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3052 rdata->got_bytes : result;
3055 static int
3056 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3057 struct cifs_readdata *rdata, unsigned int len)
3059 return uncached_fill_pages(server, rdata, NULL, len);
3062 static int
3063 cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3064 struct cifs_readdata *rdata,
3065 struct iov_iter *iter)
3067 return uncached_fill_pages(server, rdata, iter, iter->count);
3070 static int
3071 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3072 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3073 struct cifs_aio_ctx *ctx)
3075 struct cifs_readdata *rdata;
3076 unsigned int npages, rsize, credits;
3077 size_t cur_len;
3078 int rc;
3079 pid_t pid;
3080 struct TCP_Server_Info *server;
3082 server = tlink_tcon(open_file->tlink)->ses->server;
3084 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3085 pid = open_file->pid;
3086 else
3087 pid = current->tgid;
3089 do {
3090 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3091 &rsize, &credits);
3092 if (rc)
3093 break;
3095 cur_len = min_t(const size_t, len, rsize);
3096 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3098 /* allocate a readdata struct */
3099 rdata = cifs_readdata_alloc(npages,
3100 cifs_uncached_readv_complete);
3101 if (!rdata) {
3102 add_credits_and_wake_if(server, credits, 0);
3103 rc = -ENOMEM;
3104 break;
3107 rc = cifs_read_allocate_pages(rdata, npages);
3108 if (rc)
3109 goto error;
3111 rdata->cfile = cifsFileInfo_get(open_file);
3112 rdata->nr_pages = npages;
3113 rdata->offset = offset;
3114 rdata->bytes = cur_len;
3115 rdata->pid = pid;
3116 rdata->pagesz = PAGE_SIZE;
3117 rdata->read_into_pages = cifs_uncached_read_into_pages;
3118 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
3119 rdata->credits = credits;
3120 rdata->ctx = ctx;
3121 kref_get(&ctx->refcount);
3123 if (!rdata->cfile->invalidHandle ||
3124 !(rc = cifs_reopen_file(rdata->cfile, true)))
3125 rc = server->ops->async_readv(rdata);
3126 error:
3127 if (rc) {
3128 add_credits_and_wake_if(server, rdata->credits, 0);
3129 kref_put(&rdata->refcount,
3130 cifs_uncached_readdata_release);
3131 if (rc == -EAGAIN)
3132 continue;
3133 break;
3136 list_add_tail(&rdata->list, rdata_list);
3137 offset += cur_len;
3138 len -= cur_len;
3139 } while (len > 0);
3141 return rc;
3144 static void
3145 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3147 struct cifs_readdata *rdata, *tmp;
3148 struct iov_iter *to = &ctx->iter;
3149 struct cifs_sb_info *cifs_sb;
3150 struct cifs_tcon *tcon;
3151 unsigned int i;
3152 int rc;
3154 tcon = tlink_tcon(ctx->cfile->tlink);
3155 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3157 mutex_lock(&ctx->aio_mutex);
3159 if (list_empty(&ctx->list)) {
3160 mutex_unlock(&ctx->aio_mutex);
3161 return;
3164 rc = ctx->rc;
3165 /* the loop below should proceed in the order of increasing offsets */
3166 again:
3167 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3168 if (!rc) {
3169 if (!try_wait_for_completion(&rdata->done)) {
3170 mutex_unlock(&ctx->aio_mutex);
3171 return;
3174 if (rdata->result == -EAGAIN) {
3175 /* resend call if it's a retryable error */
3176 struct list_head tmp_list;
3177 unsigned int got_bytes = rdata->got_bytes;
3179 list_del_init(&rdata->list);
3180 INIT_LIST_HEAD(&tmp_list);
3183 * Got a part of data and then reconnect has
3184 * happened -- fill the buffer and continue
3185 * reading.
3187 if (got_bytes && got_bytes < rdata->bytes) {
3188 rc = cifs_readdata_to_iov(rdata, to);
3189 if (rc) {
3190 kref_put(&rdata->refcount,
3191 cifs_uncached_readdata_release);
3192 continue;
3196 rc = cifs_send_async_read(
3197 rdata->offset + got_bytes,
3198 rdata->bytes - got_bytes,
3199 rdata->cfile, cifs_sb,
3200 &tmp_list, ctx);
3202 list_splice(&tmp_list, &ctx->list);
3204 kref_put(&rdata->refcount,
3205 cifs_uncached_readdata_release);
3206 goto again;
3207 } else if (rdata->result)
3208 rc = rdata->result;
3209 else
3210 rc = cifs_readdata_to_iov(rdata, to);
3212 /* if there was a short read -- discard anything left */
3213 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3214 rc = -ENODATA;
3216 list_del_init(&rdata->list);
3217 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3220 for (i = 0; i < ctx->npages; i++) {
3221 if (ctx->should_dirty)
3222 set_page_dirty(ctx->bv[i].bv_page);
3223 put_page(ctx->bv[i].bv_page);
3226 ctx->total_len = ctx->len - iov_iter_count(to);
3228 cifs_stats_bytes_read(tcon, ctx->total_len);
3230 /* mask nodata case */
3231 if (rc == -ENODATA)
3232 rc = 0;
3234 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3236 mutex_unlock(&ctx->aio_mutex);
3238 if (ctx->iocb && ctx->iocb->ki_complete)
3239 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3240 else
3241 complete(&ctx->done);
3244 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3246 struct file *file = iocb->ki_filp;
3247 ssize_t rc;
3248 size_t len;
3249 ssize_t total_read = 0;
3250 loff_t offset = iocb->ki_pos;
3251 struct cifs_sb_info *cifs_sb;
3252 struct cifs_tcon *tcon;
3253 struct cifsFileInfo *cfile;
3254 struct cifs_aio_ctx *ctx;
3256 len = iov_iter_count(to);
3257 if (!len)
3258 return 0;
3260 cifs_sb = CIFS_FILE_SB(file);
3261 cfile = file->private_data;
3262 tcon = tlink_tcon(cfile->tlink);
3264 if (!tcon->ses->server->ops->async_readv)
3265 return -ENOSYS;
3267 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3268 cifs_dbg(FYI, "attempting read on write only file instance\n");
3270 ctx = cifs_aio_ctx_alloc();
3271 if (!ctx)
3272 return -ENOMEM;
3274 ctx->cfile = cifsFileInfo_get(cfile);
3276 if (!is_sync_kiocb(iocb))
3277 ctx->iocb = iocb;
3279 if (to->type == ITER_IOVEC)
3280 ctx->should_dirty = true;
3282 rc = setup_aio_ctx_iter(ctx, to, READ);
3283 if (rc) {
3284 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3285 return rc;
3288 len = ctx->len;
3290 /* grab a lock here due to read response handlers can access ctx */
3291 mutex_lock(&ctx->aio_mutex);
3293 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3295 /* if at least one read request send succeeded, then reset rc */
3296 if (!list_empty(&ctx->list))
3297 rc = 0;
3299 mutex_unlock(&ctx->aio_mutex);
3301 if (rc) {
3302 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3303 return rc;
3306 if (!is_sync_kiocb(iocb)) {
3307 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3308 return -EIOCBQUEUED;
3311 rc = wait_for_completion_killable(&ctx->done);
3312 if (rc) {
3313 mutex_lock(&ctx->aio_mutex);
3314 ctx->rc = rc = -EINTR;
3315 total_read = ctx->total_len;
3316 mutex_unlock(&ctx->aio_mutex);
3317 } else {
3318 rc = ctx->rc;
3319 total_read = ctx->total_len;
3322 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3324 if (total_read) {
3325 iocb->ki_pos += total_read;
3326 return total_read;
3328 return rc;
3331 ssize_t
3332 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3334 struct inode *inode = file_inode(iocb->ki_filp);
3335 struct cifsInodeInfo *cinode = CIFS_I(inode);
3336 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3337 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3338 iocb->ki_filp->private_data;
3339 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3340 int rc = -EACCES;
3343 * In strict cache mode we need to read from the server all the time
3344 * if we don't have level II oplock because the server can delay mtime
3345 * change - so we can't make a decision about inode invalidating.
3346 * And we can also fail with pagereading if there are mandatory locks
3347 * on pages affected by this read but not on the region from pos to
3348 * pos+len-1.
3350 if (!CIFS_CACHE_READ(cinode))
3351 return cifs_user_readv(iocb, to);
3353 if (cap_unix(tcon->ses) &&
3354 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3355 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3356 return generic_file_read_iter(iocb, to);
3359 * We need to hold the sem to be sure nobody modifies lock list
3360 * with a brlock that prevents reading.
3362 down_read(&cinode->lock_sem);
3363 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3364 tcon->ses->server->vals->shared_lock_type,
3365 NULL, CIFS_READ_OP))
3366 rc = generic_file_read_iter(iocb, to);
3367 up_read(&cinode->lock_sem);
3368 return rc;
3371 static ssize_t
3372 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3374 int rc = -EACCES;
3375 unsigned int bytes_read = 0;
3376 unsigned int total_read;
3377 unsigned int current_read_size;
3378 unsigned int rsize;
3379 struct cifs_sb_info *cifs_sb;
3380 struct cifs_tcon *tcon;
3381 struct TCP_Server_Info *server;
3382 unsigned int xid;
3383 char *cur_offset;
3384 struct cifsFileInfo *open_file;
3385 struct cifs_io_parms io_parms;
3386 int buf_type = CIFS_NO_BUFFER;
3387 __u32 pid;
3389 xid = get_xid();
3390 cifs_sb = CIFS_FILE_SB(file);
3392 /* FIXME: set up handlers for larger reads and/or convert to async */
3393 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3395 if (file->private_data == NULL) {
3396 rc = -EBADF;
3397 free_xid(xid);
3398 return rc;
3400 open_file = file->private_data;
3401 tcon = tlink_tcon(open_file->tlink);
3402 server = tcon->ses->server;
3404 if (!server->ops->sync_read) {
3405 free_xid(xid);
3406 return -ENOSYS;
3409 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3410 pid = open_file->pid;
3411 else
3412 pid = current->tgid;
3414 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3415 cifs_dbg(FYI, "attempting read on write only file instance\n");
3417 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3418 total_read += bytes_read, cur_offset += bytes_read) {
3419 do {
3420 current_read_size = min_t(uint, read_size - total_read,
3421 rsize);
3423 * For windows me and 9x we do not want to request more
3424 * than it negotiated since it will refuse the read
3425 * then.
3427 if ((tcon->ses) && !(tcon->ses->capabilities &
3428 tcon->ses->server->vals->cap_large_files)) {
3429 current_read_size = min_t(uint,
3430 current_read_size, CIFSMaxBufSize);
3432 if (open_file->invalidHandle) {
3433 rc = cifs_reopen_file(open_file, true);
3434 if (rc != 0)
3435 break;
3437 io_parms.pid = pid;
3438 io_parms.tcon = tcon;
3439 io_parms.offset = *offset;
3440 io_parms.length = current_read_size;
3441 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3442 &bytes_read, &cur_offset,
3443 &buf_type);
3444 } while (rc == -EAGAIN);
3446 if (rc || (bytes_read == 0)) {
3447 if (total_read) {
3448 break;
3449 } else {
3450 free_xid(xid);
3451 return rc;
3453 } else {
3454 cifs_stats_bytes_read(tcon, total_read);
3455 *offset += bytes_read;
3458 free_xid(xid);
3459 return total_read;
3463 * If the page is mmap'ed into a process' page tables, then we need to make
3464 * sure that it doesn't change while being written back.
3466 static int
3467 cifs_page_mkwrite(struct vm_fault *vmf)
3469 struct page *page = vmf->page;
3471 lock_page(page);
3472 return VM_FAULT_LOCKED;
3475 static const struct vm_operations_struct cifs_file_vm_ops = {
3476 .fault = filemap_fault,
3477 .map_pages = filemap_map_pages,
3478 .page_mkwrite = cifs_page_mkwrite,
3481 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3483 int xid, rc = 0;
3484 struct inode *inode = file_inode(file);
3486 xid = get_xid();
3488 if (!CIFS_CACHE_READ(CIFS_I(inode)))
3489 rc = cifs_zap_mapping(inode);
3490 if (!rc)
3491 rc = generic_file_mmap(file, vma);
3492 if (!rc)
3493 vma->vm_ops = &cifs_file_vm_ops;
3495 free_xid(xid);
3496 return rc;
3499 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3501 int rc, xid;
3503 xid = get_xid();
3505 rc = cifs_revalidate_file(file);
3506 if (rc)
3507 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3508 rc);
3509 if (!rc)
3510 rc = generic_file_mmap(file, vma);
3511 if (!rc)
3512 vma->vm_ops = &cifs_file_vm_ops;
3514 free_xid(xid);
3515 return rc;
3518 static void
3519 cifs_readv_complete(struct work_struct *work)
3521 unsigned int i, got_bytes;
3522 struct cifs_readdata *rdata = container_of(work,
3523 struct cifs_readdata, work);
3525 got_bytes = rdata->got_bytes;
3526 for (i = 0; i < rdata->nr_pages; i++) {
3527 struct page *page = rdata->pages[i];
3529 lru_cache_add_file(page);
3531 if (rdata->result == 0 ||
3532 (rdata->result == -EAGAIN && got_bytes)) {
3533 flush_dcache_page(page);
3534 SetPageUptodate(page);
3537 unlock_page(page);
3539 if (rdata->result == 0 ||
3540 (rdata->result == -EAGAIN && got_bytes))
3541 cifs_readpage_to_fscache(rdata->mapping->host, page);
3543 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3545 put_page(page);
3546 rdata->pages[i] = NULL;
3548 kref_put(&rdata->refcount, cifs_readdata_release);
3551 static int
3552 readpages_fill_pages(struct TCP_Server_Info *server,
3553 struct cifs_readdata *rdata, struct iov_iter *iter,
3554 unsigned int len)
3556 int result = 0;
3557 unsigned int i;
3558 u64 eof;
3559 pgoff_t eof_index;
3560 unsigned int nr_pages = rdata->nr_pages;
3562 /* determine the eof that the server (probably) has */
3563 eof = CIFS_I(rdata->mapping->host)->server_eof;
3564 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3565 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3567 rdata->got_bytes = 0;
3568 rdata->tailsz = PAGE_SIZE;
3569 for (i = 0; i < nr_pages; i++) {
3570 struct page *page = rdata->pages[i];
3571 size_t n = PAGE_SIZE;
3573 if (len >= PAGE_SIZE) {
3574 len -= PAGE_SIZE;
3575 } else if (len > 0) {
3576 /* enough for partial page, fill and zero the rest */
3577 zero_user(page, len, PAGE_SIZE - len);
3578 n = rdata->tailsz = len;
3579 len = 0;
3580 } else if (page->index > eof_index) {
3582 * The VFS will not try to do readahead past the
3583 * i_size, but it's possible that we have outstanding
3584 * writes with gaps in the middle and the i_size hasn't
3585 * caught up yet. Populate those with zeroed out pages
3586 * to prevent the VFS from repeatedly attempting to
3587 * fill them until the writes are flushed.
3589 zero_user(page, 0, PAGE_SIZE);
3590 lru_cache_add_file(page);
3591 flush_dcache_page(page);
3592 SetPageUptodate(page);
3593 unlock_page(page);
3594 put_page(page);
3595 rdata->pages[i] = NULL;
3596 rdata->nr_pages--;
3597 continue;
3598 } else {
3599 /* no need to hold page hostage */
3600 lru_cache_add_file(page);
3601 unlock_page(page);
3602 put_page(page);
3603 rdata->pages[i] = NULL;
3604 rdata->nr_pages--;
3605 continue;
3608 if (iter)
3609 result = copy_page_from_iter(page, 0, n, iter);
3610 #ifdef CONFIG_CIFS_SMB_DIRECT
3611 else if (rdata->mr)
3612 result = n;
3613 #endif
3614 else
3615 result = cifs_read_page_from_socket(server, page, n);
3616 if (result < 0)
3617 break;
3619 rdata->got_bytes += result;
3622 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3623 rdata->got_bytes : result;
3626 static int
3627 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3628 struct cifs_readdata *rdata, unsigned int len)
3630 return readpages_fill_pages(server, rdata, NULL, len);
3633 static int
3634 cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3635 struct cifs_readdata *rdata,
3636 struct iov_iter *iter)
3638 return readpages_fill_pages(server, rdata, iter, iter->count);
3641 static int
3642 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3643 unsigned int rsize, struct list_head *tmplist,
3644 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3646 struct page *page, *tpage;
3647 unsigned int expected_index;
3648 int rc;
3649 gfp_t gfp = readahead_gfp_mask(mapping);
3651 INIT_LIST_HEAD(tmplist);
3653 page = list_entry(page_list->prev, struct page, lru);
3656 * Lock the page and put it in the cache. Since no one else
3657 * should have access to this page, we're safe to simply set
3658 * PG_locked without checking it first.
3660 __SetPageLocked(page);
3661 rc = add_to_page_cache_locked(page, mapping,
3662 page->index, gfp);
3664 /* give up if we can't stick it in the cache */
3665 if (rc) {
3666 __ClearPageLocked(page);
3667 return rc;
3670 /* move first page to the tmplist */
3671 *offset = (loff_t)page->index << PAGE_SHIFT;
3672 *bytes = PAGE_SIZE;
3673 *nr_pages = 1;
3674 list_move_tail(&page->lru, tmplist);
3676 /* now try and add more pages onto the request */
3677 expected_index = page->index + 1;
3678 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3679 /* discontinuity ? */
3680 if (page->index != expected_index)
3681 break;
3683 /* would this page push the read over the rsize? */
3684 if (*bytes + PAGE_SIZE > rsize)
3685 break;
3687 __SetPageLocked(page);
3688 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
3689 __ClearPageLocked(page);
3690 break;
3692 list_move_tail(&page->lru, tmplist);
3693 (*bytes) += PAGE_SIZE;
3694 expected_index++;
3695 (*nr_pages)++;
3697 return rc;
3700 static int cifs_readpages(struct file *file, struct address_space *mapping,
3701 struct list_head *page_list, unsigned num_pages)
3703 int rc;
3704 struct list_head tmplist;
3705 struct cifsFileInfo *open_file = file->private_data;
3706 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3707 struct TCP_Server_Info *server;
3708 pid_t pid;
3711 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3712 * immediately if the cookie is negative
3714 * After this point, every page in the list might have PG_fscache set,
3715 * so we will need to clean that up off of every page we don't use.
3717 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3718 &num_pages);
3719 if (rc == 0)
3720 return rc;
3722 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3723 pid = open_file->pid;
3724 else
3725 pid = current->tgid;
3727 rc = 0;
3728 server = tlink_tcon(open_file->tlink)->ses->server;
3730 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3731 __func__, file, mapping, num_pages);
3734 * Start with the page at end of list and move it to private
3735 * list. Do the same with any following pages until we hit
3736 * the rsize limit, hit an index discontinuity, or run out of
3737 * pages. Issue the async read and then start the loop again
3738 * until the list is empty.
3740 * Note that list order is important. The page_list is in
3741 * the order of declining indexes. When we put the pages in
3742 * the rdata->pages, then we want them in increasing order.
3744 while (!list_empty(page_list)) {
3745 unsigned int i, nr_pages, bytes, rsize;
3746 loff_t offset;
3747 struct page *page, *tpage;
3748 struct cifs_readdata *rdata;
3749 unsigned credits;
3751 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3752 &rsize, &credits);
3753 if (rc)
3754 break;
3757 * Give up immediately if rsize is too small to read an entire
3758 * page. The VFS will fall back to readpage. We should never
3759 * reach this point however since we set ra_pages to 0 when the
3760 * rsize is smaller than a cache page.
3762 if (unlikely(rsize < PAGE_SIZE)) {
3763 add_credits_and_wake_if(server, credits, 0);
3764 return 0;
3767 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3768 &nr_pages, &offset, &bytes);
3769 if (rc) {
3770 add_credits_and_wake_if(server, credits, 0);
3771 break;
3774 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3775 if (!rdata) {
3776 /* best to give up if we're out of mem */
3777 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3778 list_del(&page->lru);
3779 lru_cache_add_file(page);
3780 unlock_page(page);
3781 put_page(page);
3783 rc = -ENOMEM;
3784 add_credits_and_wake_if(server, credits, 0);
3785 break;
3788 rdata->cfile = cifsFileInfo_get(open_file);
3789 rdata->mapping = mapping;
3790 rdata->offset = offset;
3791 rdata->bytes = bytes;
3792 rdata->pid = pid;
3793 rdata->pagesz = PAGE_SIZE;
3794 rdata->read_into_pages = cifs_readpages_read_into_pages;
3795 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
3796 rdata->credits = credits;
3798 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3799 list_del(&page->lru);
3800 rdata->pages[rdata->nr_pages++] = page;
3803 if (!rdata->cfile->invalidHandle ||
3804 !(rc = cifs_reopen_file(rdata->cfile, true)))
3805 rc = server->ops->async_readv(rdata);
3806 if (rc) {
3807 add_credits_and_wake_if(server, rdata->credits, 0);
3808 for (i = 0; i < rdata->nr_pages; i++) {
3809 page = rdata->pages[i];
3810 lru_cache_add_file(page);
3811 unlock_page(page);
3812 put_page(page);
3814 /* Fallback to the readpage in error/reconnect cases */
3815 kref_put(&rdata->refcount, cifs_readdata_release);
3816 break;
3819 kref_put(&rdata->refcount, cifs_readdata_release);
3822 /* Any pages that have been shown to fscache but didn't get added to
3823 * the pagecache must be uncached before they get returned to the
3824 * allocator.
3826 cifs_fscache_readpages_cancel(mapping->host, page_list);
3827 return rc;
3831 * cifs_readpage_worker must be called with the page pinned
3833 static int cifs_readpage_worker(struct file *file, struct page *page,
3834 loff_t *poffset)
3836 char *read_data;
3837 int rc;
3839 /* Is the page cached? */
3840 rc = cifs_readpage_from_fscache(file_inode(file), page);
3841 if (rc == 0)
3842 goto read_complete;
3844 read_data = kmap(page);
3845 /* for reads over a certain size could initiate async read ahead */
3847 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3849 if (rc < 0)
3850 goto io_error;
3851 else
3852 cifs_dbg(FYI, "Bytes read %d\n", rc);
3854 file_inode(file)->i_atime =
3855 current_time(file_inode(file));
3857 if (PAGE_SIZE > rc)
3858 memset(read_data + rc, 0, PAGE_SIZE - rc);
3860 flush_dcache_page(page);
3861 SetPageUptodate(page);
3863 /* send this page to the cache */
3864 cifs_readpage_to_fscache(file_inode(file), page);
3866 rc = 0;
3868 io_error:
3869 kunmap(page);
3870 unlock_page(page);
3872 read_complete:
3873 return rc;
3876 static int cifs_readpage(struct file *file, struct page *page)
3878 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3879 int rc = -EACCES;
3880 unsigned int xid;
3882 xid = get_xid();
3884 if (file->private_data == NULL) {
3885 rc = -EBADF;
3886 free_xid(xid);
3887 return rc;
3890 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
3891 page, (int)offset, (int)offset);
3893 rc = cifs_readpage_worker(file, page, &offset);
3895 free_xid(xid);
3896 return rc;
3899 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3901 struct cifsFileInfo *open_file;
3902 struct cifs_tcon *tcon =
3903 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3905 spin_lock(&tcon->open_file_lock);
3906 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3907 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3908 spin_unlock(&tcon->open_file_lock);
3909 return 1;
3912 spin_unlock(&tcon->open_file_lock);
3913 return 0;
3916 /* We do not want to update the file size from server for inodes
3917 open for write - to avoid races with writepage extending
3918 the file - in the future we could consider allowing
3919 refreshing the inode only on increases in the file size
3920 but this is tricky to do without racing with writebehind
3921 page caching in the current Linux kernel design */
3922 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3924 if (!cifsInode)
3925 return true;
3927 if (is_inode_writable(cifsInode)) {
3928 /* This inode is open for write at least once */
3929 struct cifs_sb_info *cifs_sb;
3931 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3932 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3933 /* since no page cache to corrupt on directio
3934 we can change size safely */
3935 return true;
3938 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3939 return true;
3941 return false;
3942 } else
3943 return true;
3946 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3947 loff_t pos, unsigned len, unsigned flags,
3948 struct page **pagep, void **fsdata)
3950 int oncethru = 0;
3951 pgoff_t index = pos >> PAGE_SHIFT;
3952 loff_t offset = pos & (PAGE_SIZE - 1);
3953 loff_t page_start = pos & PAGE_MASK;
3954 loff_t i_size;
3955 struct page *page;
3956 int rc = 0;
3958 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
3960 start:
3961 page = grab_cache_page_write_begin(mapping, index, flags);
3962 if (!page) {
3963 rc = -ENOMEM;
3964 goto out;
3967 if (PageUptodate(page))
3968 goto out;
3971 * If we write a full page it will be up to date, no need to read from
3972 * the server. If the write is short, we'll end up doing a sync write
3973 * instead.
3975 if (len == PAGE_SIZE)
3976 goto out;
3979 * optimize away the read when we have an oplock, and we're not
3980 * expecting to use any of the data we'd be reading in. That
3981 * is, when the page lies beyond the EOF, or straddles the EOF
3982 * and the write will cover all of the existing data.
3984 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
3985 i_size = i_size_read(mapping->host);
3986 if (page_start >= i_size ||
3987 (offset == 0 && (pos + len) >= i_size)) {
3988 zero_user_segments(page, 0, offset,
3989 offset + len,
3990 PAGE_SIZE);
3992 * PageChecked means that the parts of the page
3993 * to which we're not writing are considered up
3994 * to date. Once the data is copied to the
3995 * page, it can be set uptodate.
3997 SetPageChecked(page);
3998 goto out;
4002 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4004 * might as well read a page, it is fast enough. If we get
4005 * an error, we don't need to return it. cifs_write_end will
4006 * do a sync write instead since PG_uptodate isn't set.
4008 cifs_readpage_worker(file, page, &page_start);
4009 put_page(page);
4010 oncethru = 1;
4011 goto start;
4012 } else {
4013 /* we could try using another file handle if there is one -
4014 but how would we lock it to prevent close of that handle
4015 racing with this read? In any case
4016 this will be written out by write_end so is fine */
4018 out:
4019 *pagep = page;
4020 return rc;
4023 static int cifs_release_page(struct page *page, gfp_t gfp)
4025 if (PagePrivate(page))
4026 return 0;
4028 return cifs_fscache_release_page(page, gfp);
4031 static void cifs_invalidate_page(struct page *page, unsigned int offset,
4032 unsigned int length)
4034 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4036 if (offset == 0 && length == PAGE_SIZE)
4037 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4040 static int cifs_launder_page(struct page *page)
4042 int rc = 0;
4043 loff_t range_start = page_offset(page);
4044 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4045 struct writeback_control wbc = {
4046 .sync_mode = WB_SYNC_ALL,
4047 .nr_to_write = 0,
4048 .range_start = range_start,
4049 .range_end = range_end,
4052 cifs_dbg(FYI, "Launder page: %p\n", page);
4054 if (clear_page_dirty_for_io(page))
4055 rc = cifs_writepage_locked(page, &wbc);
4057 cifs_fscache_invalidate_page(page, page->mapping->host);
4058 return rc;
4061 void cifs_oplock_break(struct work_struct *work)
4063 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4064 oplock_break);
4065 struct inode *inode = d_inode(cfile->dentry);
4066 struct cifsInodeInfo *cinode = CIFS_I(inode);
4067 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4068 struct TCP_Server_Info *server = tcon->ses->server;
4069 int rc = 0;
4071 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4072 TASK_UNINTERRUPTIBLE);
4074 server->ops->downgrade_oplock(server, cinode,
4075 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4077 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4078 cifs_has_mand_locks(cinode)) {
4079 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4080 inode);
4081 cinode->oplock = 0;
4084 if (inode && S_ISREG(inode->i_mode)) {
4085 if (CIFS_CACHE_READ(cinode))
4086 break_lease(inode, O_RDONLY);
4087 else
4088 break_lease(inode, O_WRONLY);
4089 rc = filemap_fdatawrite(inode->i_mapping);
4090 if (!CIFS_CACHE_READ(cinode)) {
4091 rc = filemap_fdatawait(inode->i_mapping);
4092 mapping_set_error(inode->i_mapping, rc);
4093 cifs_zap_mapping(inode);
4095 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4098 rc = cifs_push_locks(cfile);
4099 if (rc)
4100 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4103 * releasing stale oplock after recent reconnect of smb session using
4104 * a now incorrect file handle is not a data integrity issue but do
4105 * not bother sending an oplock release if session to server still is
4106 * disconnected since oplock already released by the server
4108 if (!cfile->oplock_break_cancelled) {
4109 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4110 cinode);
4111 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4113 cifs_done_oplock_break(cinode);
4117 * The presence of cifs_direct_io() in the address space ops vector
4118 * allowes open() O_DIRECT flags which would have failed otherwise.
4120 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4121 * so this method should never be called.
4123 * Direct IO is not yet supported in the cached mode.
4125 static ssize_t
4126 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4129 * FIXME
4130 * Eventually need to support direct IO for non forcedirectio mounts
4132 return -EINVAL;
4136 const struct address_space_operations cifs_addr_ops = {
4137 .readpage = cifs_readpage,
4138 .readpages = cifs_readpages,
4139 .writepage = cifs_writepage,
4140 .writepages = cifs_writepages,
4141 .write_begin = cifs_write_begin,
4142 .write_end = cifs_write_end,
4143 .set_page_dirty = __set_page_dirty_nobuffers,
4144 .releasepage = cifs_release_page,
4145 .direct_IO = cifs_direct_io,
4146 .invalidatepage = cifs_invalidate_page,
4147 .launder_page = cifs_launder_page,
4151 * cifs_readpages requires the server to support a buffer large enough to
4152 * contain the header plus one complete page of data. Otherwise, we need
4153 * to leave cifs_readpages out of the address space operations.
4155 const struct address_space_operations cifs_addr_ops_smallbuf = {
4156 .readpage = cifs_readpage,
4157 .writepage = cifs_writepage,
4158 .writepages = cifs_writepages,
4159 .write_begin = cifs_write_begin,
4160 .write_end = cifs_write_end,
4161 .set_page_dirty = __set_page_dirty_nobuffers,
4162 .releasepage = cifs_release_page,
4163 .invalidatepage = cifs_invalidate_page,
4164 .launder_page = cifs_launder_page,