1 // SPDX-License-Identifier: LGPL-2.1
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
25 #include <asm/div64.h>
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
41 static int cifs_reopen_file(struct cifsFileInfo
*cfile
, bool can_flush
);
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
47 static void cifs_prepare_write(struct netfs_io_subrequest
*subreq
)
49 struct cifs_io_subrequest
*wdata
=
50 container_of(subreq
, struct cifs_io_subrequest
, subreq
);
51 struct cifs_io_request
*req
= wdata
->req
;
52 struct netfs_io_stream
*stream
= &req
->rreq
.io_streams
[subreq
->stream_nr
];
53 struct TCP_Server_Info
*server
;
54 struct cifsFileInfo
*open_file
= req
->cfile
;
55 size_t wsize
= req
->rreq
.wsize
;
58 if (!wdata
->have_xid
) {
59 wdata
->xid
= get_xid();
60 wdata
->have_xid
= true;
63 server
= cifs_pick_channel(tlink_tcon(open_file
->tlink
)->ses
);
64 wdata
->server
= server
;
67 if (open_file
->invalidHandle
) {
68 rc
= cifs_reopen_file(open_file
, false);
73 return netfs_prepare_write_failed(subreq
);
77 rc
= server
->ops
->wait_mtu_credits(server
, wsize
, &stream
->sreq_max_len
,
81 return netfs_prepare_write_failed(subreq
);
84 wdata
->credits
.rreq_debug_id
= subreq
->rreq
->debug_id
;
85 wdata
->credits
.rreq_debug_index
= subreq
->debug_index
;
86 wdata
->credits
.in_flight_check
= 1;
87 trace_smb3_rw_credits(wdata
->rreq
->debug_id
,
88 wdata
->subreq
.debug_index
,
90 server
->credits
, server
->in_flight
,
92 cifs_trace_rw_credits_write_prepare
);
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 if (server
->smbd_conn
)
96 stream
->sreq_max_segs
= server
->smbd_conn
->max_frmr_depth
;
101 * Issue a subrequest to upload to the server.
103 static void cifs_issue_write(struct netfs_io_subrequest
*subreq
)
105 struct cifs_io_subrequest
*wdata
=
106 container_of(subreq
, struct cifs_io_subrequest
, subreq
);
107 struct cifs_sb_info
*sbi
= CIFS_SB(subreq
->rreq
->inode
->i_sb
);
110 if (cifs_forced_shutdown(sbi
)) {
115 rc
= adjust_credits(wdata
->server
, wdata
, cifs_trace_rw_credits_issue_write_adjust
);
120 if (wdata
->req
->cfile
->invalidHandle
)
123 wdata
->server
->ops
->async_writev(wdata
);
129 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
131 trace_netfs_sreq(subreq
, netfs_sreq_trace_fail
);
132 add_credits_and_wake_if(wdata
->server
, &wdata
->credits
, 0);
133 cifs_write_subrequest_terminated(wdata
, rc
, false);
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request
*wreq
)
139 cifs_invalidate_cache(wreq
->inode
, 0);
143 * Negotiate the size of a read operation on behalf of the netfs library.
145 static int cifs_prepare_read(struct netfs_io_subrequest
*subreq
)
147 struct netfs_io_request
*rreq
= subreq
->rreq
;
148 struct cifs_io_subrequest
*rdata
= container_of(subreq
, struct cifs_io_subrequest
, subreq
);
149 struct cifs_io_request
*req
= container_of(subreq
->rreq
, struct cifs_io_request
, rreq
);
150 struct TCP_Server_Info
*server
= req
->server
;
151 struct cifs_sb_info
*cifs_sb
= CIFS_SB(rreq
->inode
->i_sb
);
155 if (!rdata
->have_xid
) {
156 rdata
->xid
= get_xid();
157 rdata
->have_xid
= true;
159 rdata
->server
= server
;
161 if (cifs_sb
->ctx
->rsize
== 0)
162 cifs_sb
->ctx
->rsize
=
163 server
->ops
->negotiate_rsize(tlink_tcon(req
->cfile
->tlink
),
166 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->ctx
->rsize
,
167 &size
, &rdata
->credits
);
171 rreq
->io_streams
[0].sreq_max_len
= size
;
173 rdata
->credits
.in_flight_check
= 1;
174 rdata
->credits
.rreq_debug_id
= rreq
->debug_id
;
175 rdata
->credits
.rreq_debug_index
= subreq
->debug_index
;
177 trace_smb3_rw_credits(rdata
->rreq
->debug_id
,
178 rdata
->subreq
.debug_index
,
179 rdata
->credits
.value
,
180 server
->credits
, server
->in_flight
, 0,
181 cifs_trace_rw_credits_read_submit
);
183 #ifdef CONFIG_CIFS_SMB_DIRECT
184 if (server
->smbd_conn
)
185 rreq
->io_streams
[0].sreq_max_segs
= server
->smbd_conn
->max_frmr_depth
;
191 * Issue a read operation on behalf of the netfs helper functions. We're asked
192 * to make a read of a certain size at a point in the file. We are permitted
193 * to only read a portion of that, but as long as we read something, the netfs
194 * helper will call us again so that we can issue another read.
196 static void cifs_issue_read(struct netfs_io_subrequest
*subreq
)
198 struct netfs_io_request
*rreq
= subreq
->rreq
;
199 struct cifs_io_subrequest
*rdata
= container_of(subreq
, struct cifs_io_subrequest
, subreq
);
200 struct cifs_io_request
*req
= container_of(subreq
->rreq
, struct cifs_io_request
, rreq
);
201 struct TCP_Server_Info
*server
= req
->server
;
204 cifs_dbg(FYI
, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
205 __func__
, rreq
->debug_id
, subreq
->debug_index
, rreq
->mapping
,
206 subreq
->transferred
, subreq
->len
);
208 rc
= adjust_credits(server
, rdata
, cifs_trace_rw_credits_issue_read_adjust
);
212 if (req
->cfile
->invalidHandle
) {
214 rc
= cifs_reopen_file(req
->cfile
, true);
215 } while (rc
== -EAGAIN
);
220 if (subreq
->rreq
->origin
!= NETFS_DIO_READ
)
221 __set_bit(NETFS_SREQ_CLEAR_TAIL
, &subreq
->flags
);
223 trace_netfs_sreq(subreq
, netfs_sreq_trace_submit
);
224 rc
= rdata
->server
->ops
->async_readv(rdata
);
230 netfs_read_subreq_terminated(subreq
, rc
, false);
234 * Writeback calls this when it finds a folio that needs uploading. This isn't
235 * called if writeback only has copy-to-cache to deal with.
237 static void cifs_begin_writeback(struct netfs_io_request
*wreq
)
239 struct cifs_io_request
*req
= container_of(wreq
, struct cifs_io_request
, rreq
);
242 ret
= cifs_get_writable_file(CIFS_I(wreq
->inode
), FIND_WR_ANY
, &req
->cfile
);
244 cifs_dbg(VFS
, "No writable handle in writepages ret=%d\n", ret
);
248 wreq
->io_streams
[0].avail
= true;
252 * Initialise a request.
254 static int cifs_init_request(struct netfs_io_request
*rreq
, struct file
*file
)
256 struct cifs_io_request
*req
= container_of(rreq
, struct cifs_io_request
, rreq
);
257 struct cifs_sb_info
*cifs_sb
= CIFS_SB(rreq
->inode
->i_sb
);
258 struct cifsFileInfo
*open_file
= NULL
;
260 rreq
->rsize
= cifs_sb
->ctx
->rsize
;
261 rreq
->wsize
= cifs_sb
->ctx
->wsize
;
262 req
->pid
= current
->tgid
; // Ummm... This may be a workqueue
265 open_file
= file
->private_data
;
266 rreq
->netfs_priv
= file
->private_data
;
267 req
->cfile
= cifsFileInfo_get(open_file
);
268 req
->server
= cifs_pick_channel(tlink_tcon(req
->cfile
->tlink
)->ses
);
269 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
270 req
->pid
= req
->cfile
->pid
;
271 } else if (rreq
->origin
!= NETFS_WRITEBACK
) {
280 * Completion of a request operation.
282 static void cifs_rreq_done(struct netfs_io_request
*rreq
)
284 struct timespec64 atime
, mtime
;
285 struct inode
*inode
= rreq
->inode
;
287 /* we do not want atime to be less than mtime, it broke some apps */
288 atime
= inode_set_atime_to_ts(inode
, current_time(inode
));
289 mtime
= inode_get_mtime(inode
);
290 if (timespec64_compare(&atime
, &mtime
))
291 inode_set_atime_to_ts(inode
, inode_get_mtime(inode
));
294 static void cifs_free_request(struct netfs_io_request
*rreq
)
296 struct cifs_io_request
*req
= container_of(rreq
, struct cifs_io_request
, rreq
);
299 cifsFileInfo_put(req
->cfile
);
302 static void cifs_free_subrequest(struct netfs_io_subrequest
*subreq
)
304 struct cifs_io_subrequest
*rdata
=
305 container_of(subreq
, struct cifs_io_subrequest
, subreq
);
306 int rc
= subreq
->error
;
308 if (rdata
->subreq
.source
== NETFS_DOWNLOAD_FROM_SERVER
) {
309 #ifdef CONFIG_CIFS_SMB_DIRECT
311 smbd_deregister_mr(rdata
->mr
);
317 if (rdata
->credits
.value
!= 0) {
318 trace_smb3_rw_credits(rdata
->rreq
->debug_id
,
319 rdata
->subreq
.debug_index
,
320 rdata
->credits
.value
,
321 rdata
->server
? rdata
->server
->credits
: 0,
322 rdata
->server
? rdata
->server
->in_flight
: 0,
323 -rdata
->credits
.value
,
324 cifs_trace_rw_credits_free_subreq
);
326 add_credits_and_wake_if(rdata
->server
, &rdata
->credits
, 0);
328 rdata
->credits
.value
= 0;
332 free_xid(rdata
->xid
);
335 const struct netfs_request_ops cifs_req_ops
= {
336 .request_pool
= &cifs_io_request_pool
,
337 .subrequest_pool
= &cifs_io_subrequest_pool
,
338 .init_request
= cifs_init_request
,
339 .free_request
= cifs_free_request
,
340 .free_subrequest
= cifs_free_subrequest
,
341 .prepare_read
= cifs_prepare_read
,
342 .issue_read
= cifs_issue_read
,
343 .done
= cifs_rreq_done
,
344 .begin_writeback
= cifs_begin_writeback
,
345 .prepare_write
= cifs_prepare_write
,
346 .issue_write
= cifs_issue_write
,
347 .invalidate_cache
= cifs_netfs_invalidate_cache
,
351 * Mark as invalid, all open files on tree connections since they
352 * were closed when session to server was lost.
355 cifs_mark_open_files_invalid(struct cifs_tcon
*tcon
)
357 struct cifsFileInfo
*open_file
= NULL
;
358 struct list_head
*tmp
;
359 struct list_head
*tmp1
;
361 /* only send once per connect */
362 spin_lock(&tcon
->tc_lock
);
363 if (tcon
->need_reconnect
)
364 tcon
->status
= TID_NEED_RECON
;
366 if (tcon
->status
!= TID_NEED_RECON
) {
367 spin_unlock(&tcon
->tc_lock
);
370 tcon
->status
= TID_IN_FILES_INVALIDATE
;
371 spin_unlock(&tcon
->tc_lock
);
373 /* list all files open on tree connection and mark them invalid */
374 spin_lock(&tcon
->open_file_lock
);
375 list_for_each_safe(tmp
, tmp1
, &tcon
->openFileList
) {
376 open_file
= list_entry(tmp
, struct cifsFileInfo
, tlist
);
377 open_file
->invalidHandle
= true;
378 open_file
->oplock_break_cancelled
= true;
380 spin_unlock(&tcon
->open_file_lock
);
382 invalidate_all_cached_dirs(tcon
);
383 spin_lock(&tcon
->tc_lock
);
384 if (tcon
->status
== TID_IN_FILES_INVALIDATE
)
385 tcon
->status
= TID_NEED_TCON
;
386 spin_unlock(&tcon
->tc_lock
);
389 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
394 static inline int cifs_convert_flags(unsigned int flags
, int rdwr_for_fscache
)
396 if ((flags
& O_ACCMODE
) == O_RDONLY
)
398 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
399 return rdwr_for_fscache
== 1 ? (GENERIC_READ
| GENERIC_WRITE
) : GENERIC_WRITE
;
400 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
401 /* GENERIC_ALL is too much permission to request
402 can cause unnecessary access denied on create */
403 /* return GENERIC_ALL; */
404 return (GENERIC_READ
| GENERIC_WRITE
);
407 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
408 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
413 static u32
cifs_posix_convert_flags(unsigned int flags
)
417 if ((flags
& O_ACCMODE
) == O_RDONLY
)
418 posix_flags
= SMB_O_RDONLY
;
419 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
420 posix_flags
= SMB_O_WRONLY
;
421 else if ((flags
& O_ACCMODE
) == O_RDWR
)
422 posix_flags
= SMB_O_RDWR
;
424 if (flags
& O_CREAT
) {
425 posix_flags
|= SMB_O_CREAT
;
427 posix_flags
|= SMB_O_EXCL
;
428 } else if (flags
& O_EXCL
)
429 cifs_dbg(FYI
, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
430 current
->comm
, current
->tgid
);
433 posix_flags
|= SMB_O_TRUNC
;
434 /* be safe and imply O_SYNC for O_DSYNC */
436 posix_flags
|= SMB_O_SYNC
;
437 if (flags
& O_DIRECTORY
)
438 posix_flags
|= SMB_O_DIRECTORY
;
439 if (flags
& O_NOFOLLOW
)
440 posix_flags
|= SMB_O_NOFOLLOW
;
441 if (flags
& O_DIRECT
)
442 posix_flags
|= SMB_O_DIRECT
;
446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
448 static inline int cifs_get_disposition(unsigned int flags
)
450 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
452 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
453 return FILE_OVERWRITE_IF
;
454 else if ((flags
& O_CREAT
) == O_CREAT
)
456 else if ((flags
& O_TRUNC
) == O_TRUNC
)
457 return FILE_OVERWRITE
;
462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
463 int cifs_posix_open(const char *full_path
, struct inode
**pinode
,
464 struct super_block
*sb
, int mode
, unsigned int f_flags
,
465 __u32
*poplock
, __u16
*pnetfid
, unsigned int xid
)
468 FILE_UNIX_BASIC_INFO
*presp_data
;
469 __u32 posix_flags
= 0;
470 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
471 struct cifs_fattr fattr
;
472 struct tcon_link
*tlink
;
473 struct cifs_tcon
*tcon
;
475 cifs_dbg(FYI
, "posix open %s\n", full_path
);
477 presp_data
= kzalloc(sizeof(FILE_UNIX_BASIC_INFO
), GFP_KERNEL
);
478 if (presp_data
== NULL
)
481 tlink
= cifs_sb_tlink(cifs_sb
);
487 tcon
= tlink_tcon(tlink
);
488 mode
&= ~current_umask();
490 posix_flags
= cifs_posix_convert_flags(f_flags
);
491 rc
= CIFSPOSIXCreate(xid
, tcon
, posix_flags
, mode
, pnetfid
, presp_data
,
492 poplock
, full_path
, cifs_sb
->local_nls
,
493 cifs_remap(cifs_sb
));
494 cifs_put_tlink(tlink
);
499 if (presp_data
->Type
== cpu_to_le32(-1))
500 goto posix_open_ret
; /* open ok, caller does qpathinfo */
503 goto posix_open_ret
; /* caller does not need info */
505 cifs_unix_basic_to_fattr(&fattr
, presp_data
, cifs_sb
);
507 /* get new inode and set it up */
508 if (*pinode
== NULL
) {
509 cifs_fill_uniqueid(sb
, &fattr
);
510 *pinode
= cifs_iget(sb
, &fattr
);
516 cifs_revalidate_mapping(*pinode
);
517 rc
= cifs_fattr_to_inode(*pinode
, &fattr
, false);
524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
526 static int cifs_nt_open(const char *full_path
, struct inode
*inode
, struct cifs_sb_info
*cifs_sb
,
527 struct cifs_tcon
*tcon
, unsigned int f_flags
, __u32
*oplock
,
528 struct cifs_fid
*fid
, unsigned int xid
, struct cifs_open_info_data
*buf
)
533 int create_options
= CREATE_NOT_DIR
;
534 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
535 struct cifs_open_parms oparms
;
536 int rdwr_for_fscache
= 0;
538 if (!server
->ops
->open
)
541 /* If we're caching, we need to be able to fill in around partial writes. */
542 if (cifs_fscache_enabled(inode
) && (f_flags
& O_ACCMODE
) == O_WRONLY
)
543 rdwr_for_fscache
= 1;
545 desired_access
= cifs_convert_flags(f_flags
, rdwr_for_fscache
);
547 /*********************************************************************
548 * open flag mapping table:
550 * POSIX Flag CIFS Disposition
551 * ---------- ----------------
552 * O_CREAT FILE_OPEN_IF
553 * O_CREAT | O_EXCL FILE_CREATE
554 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
555 * O_TRUNC FILE_OVERWRITE
556 * none of the above FILE_OPEN
558 * Note that there is not a direct match between disposition
559 * FILE_SUPERSEDE (ie create whether or not file exists although
560 * O_CREAT | O_TRUNC is similar but truncates the existing
561 * file rather than creating a new file as FILE_SUPERSEDE does
562 * (which uses the attributes / metadata passed in on open call)
564 *? O_SYNC is a reasonable match to CIFS writethrough flag
565 *? and the read write flags match reasonably. O_LARGEFILE
566 *? is irrelevant because largefile support is always used
567 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
568 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
569 *********************************************************************/
571 disposition
= cifs_get_disposition(f_flags
);
573 /* BB pass O_SYNC flag through on file attributes .. BB */
575 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
576 if (f_flags
& O_SYNC
)
577 create_options
|= CREATE_WRITE_THROUGH
;
579 if (f_flags
& O_DIRECT
)
580 create_options
|= CREATE_NO_BUFFER
;
583 oparms
= (struct cifs_open_parms
) {
586 .desired_access
= desired_access
,
587 .create_options
= cifs_create_options(cifs_sb
, create_options
),
588 .disposition
= disposition
,
593 rc
= server
->ops
->open(xid
, &oparms
, oplock
, buf
);
595 if (rc
== -EACCES
&& rdwr_for_fscache
== 1) {
596 desired_access
= cifs_convert_flags(f_flags
, 0);
597 rdwr_for_fscache
= 2;
602 if (rdwr_for_fscache
== 2)
603 cifs_invalidate_cache(inode
, FSCACHE_INVAL_DIO_WRITE
);
605 /* TODO: Add support for calling posix query info but with passing in fid */
607 rc
= cifs_get_inode_info_unix(&inode
, full_path
, inode
->i_sb
,
610 rc
= cifs_get_inode_info(&inode
, full_path
, buf
, inode
->i_sb
,
614 server
->ops
->close(xid
, tcon
, fid
);
623 cifs_has_mand_locks(struct cifsInodeInfo
*cinode
)
625 struct cifs_fid_locks
*cur
;
626 bool has_locks
= false;
628 down_read(&cinode
->lock_sem
);
629 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
630 if (!list_empty(&cur
->locks
)) {
635 up_read(&cinode
->lock_sem
);
640 cifs_down_write(struct rw_semaphore
*sem
)
642 while (!down_write_trylock(sem
))
646 static void cifsFileInfo_put_work(struct work_struct
*work
);
647 void serverclose_work(struct work_struct
*work
);
649 struct cifsFileInfo
*cifs_new_fileinfo(struct cifs_fid
*fid
, struct file
*file
,
650 struct tcon_link
*tlink
, __u32 oplock
,
651 const char *symlink_target
)
653 struct dentry
*dentry
= file_dentry(file
);
654 struct inode
*inode
= d_inode(dentry
);
655 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
656 struct cifsFileInfo
*cfile
;
657 struct cifs_fid_locks
*fdlocks
;
658 struct cifs_tcon
*tcon
= tlink_tcon(tlink
);
659 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
661 cfile
= kzalloc(sizeof(struct cifsFileInfo
), GFP_KERNEL
);
665 fdlocks
= kzalloc(sizeof(struct cifs_fid_locks
), GFP_KERNEL
);
671 if (symlink_target
) {
672 cfile
->symlink_target
= kstrdup(symlink_target
, GFP_KERNEL
);
673 if (!cfile
->symlink_target
) {
680 INIT_LIST_HEAD(&fdlocks
->locks
);
681 fdlocks
->cfile
= cfile
;
682 cfile
->llist
= fdlocks
;
685 cfile
->pid
= current
->tgid
;
686 cfile
->uid
= current_fsuid();
687 cfile
->dentry
= dget(dentry
);
688 cfile
->f_flags
= file
->f_flags
;
689 cfile
->invalidHandle
= false;
690 cfile
->deferred_close_scheduled
= false;
691 cfile
->tlink
= cifs_get_tlink(tlink
);
692 INIT_WORK(&cfile
->oplock_break
, cifs_oplock_break
);
693 INIT_WORK(&cfile
->put
, cifsFileInfo_put_work
);
694 INIT_WORK(&cfile
->serverclose
, serverclose_work
);
695 INIT_DELAYED_WORK(&cfile
->deferred
, smb2_deferred_work_close
);
696 mutex_init(&cfile
->fh_mutex
);
697 spin_lock_init(&cfile
->file_info_lock
);
699 cifs_sb_active(inode
->i_sb
);
702 * If the server returned a read oplock and we have mandatory brlocks,
703 * set oplock level to None.
705 if (server
->ops
->is_read_op(oplock
) && cifs_has_mand_locks(cinode
)) {
706 cifs_dbg(FYI
, "Reset oplock val from read to None due to mand locks\n");
710 cifs_down_write(&cinode
->lock_sem
);
711 list_add(&fdlocks
->llist
, &cinode
->llist
);
712 up_write(&cinode
->lock_sem
);
714 spin_lock(&tcon
->open_file_lock
);
715 if (fid
->pending_open
->oplock
!= CIFS_OPLOCK_NO_CHANGE
&& oplock
)
716 oplock
= fid
->pending_open
->oplock
;
717 list_del(&fid
->pending_open
->olist
);
719 fid
->purge_cache
= false;
720 server
->ops
->set_fid(cfile
, fid
, oplock
);
722 list_add(&cfile
->tlist
, &tcon
->openFileList
);
723 atomic_inc(&tcon
->num_local_opens
);
725 /* if readable file instance put first in list*/
726 spin_lock(&cinode
->open_file_lock
);
727 if (file
->f_mode
& FMODE_READ
)
728 list_add(&cfile
->flist
, &cinode
->openFileList
);
730 list_add_tail(&cfile
->flist
, &cinode
->openFileList
);
731 spin_unlock(&cinode
->open_file_lock
);
732 spin_unlock(&tcon
->open_file_lock
);
734 if (fid
->purge_cache
)
735 cifs_zap_mapping(inode
);
737 file
->private_data
= cfile
;
741 struct cifsFileInfo
*
742 cifsFileInfo_get(struct cifsFileInfo
*cifs_file
)
744 spin_lock(&cifs_file
->file_info_lock
);
745 cifsFileInfo_get_locked(cifs_file
);
746 spin_unlock(&cifs_file
->file_info_lock
);
750 static void cifsFileInfo_put_final(struct cifsFileInfo
*cifs_file
)
752 struct inode
*inode
= d_inode(cifs_file
->dentry
);
753 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
754 struct cifsLockInfo
*li
, *tmp
;
755 struct super_block
*sb
= inode
->i_sb
;
758 * Delete any outstanding lock records. We'll lose them when the file
761 cifs_down_write(&cifsi
->lock_sem
);
762 list_for_each_entry_safe(li
, tmp
, &cifs_file
->llist
->locks
, llist
) {
763 list_del(&li
->llist
);
764 cifs_del_lock_waiters(li
);
767 list_del(&cifs_file
->llist
->llist
);
768 kfree(cifs_file
->llist
);
769 up_write(&cifsi
->lock_sem
);
771 cifs_put_tlink(cifs_file
->tlink
);
772 dput(cifs_file
->dentry
);
773 cifs_sb_deactive(sb
);
774 kfree(cifs_file
->symlink_target
);
778 static void cifsFileInfo_put_work(struct work_struct
*work
)
780 struct cifsFileInfo
*cifs_file
= container_of(work
,
781 struct cifsFileInfo
, put
);
783 cifsFileInfo_put_final(cifs_file
);
786 void serverclose_work(struct work_struct
*work
)
788 struct cifsFileInfo
*cifs_file
= container_of(work
,
789 struct cifsFileInfo
, serverclose
);
791 struct cifs_tcon
*tcon
= tlink_tcon(cifs_file
->tlink
);
793 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
799 if (server
->ops
->close_getattr
)
800 rc
= server
->ops
->close_getattr(0, tcon
, cifs_file
);
801 else if (server
->ops
->close
)
802 rc
= server
->ops
->close(0, tcon
, &cifs_file
->fid
);
804 if (rc
== -EBUSY
|| rc
== -EAGAIN
) {
808 } while ((rc
== -EBUSY
|| rc
== -EAGAIN
) && (retries
< MAX_RETRIES
)
811 if (retries
== MAX_RETRIES
)
812 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES
);
814 if (cifs_file
->offload
)
815 queue_work(fileinfo_put_wq
, &cifs_file
->put
);
817 cifsFileInfo_put_final(cifs_file
);
821 * cifsFileInfo_put - release a reference of file priv data
823 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
825 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
827 void cifsFileInfo_put(struct cifsFileInfo
*cifs_file
)
829 _cifsFileInfo_put(cifs_file
, true, true);
833 * _cifsFileInfo_put - release a reference of file priv data
835 * This may involve closing the filehandle @cifs_file out on the
836 * server. Must be called without holding tcon->open_file_lock,
837 * cinode->open_file_lock and cifs_file->file_info_lock.
839 * If @wait_for_oplock_handler is true and we are releasing the last
840 * reference, wait for any running oplock break handler of the file
841 * and cancel any pending one.
843 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
844 * @wait_oplock_handler: must be false if called from oplock_break_handler
845 * @offload: not offloaded on close and oplock breaks
848 void _cifsFileInfo_put(struct cifsFileInfo
*cifs_file
,
849 bool wait_oplock_handler
, bool offload
)
851 struct inode
*inode
= d_inode(cifs_file
->dentry
);
852 struct cifs_tcon
*tcon
= tlink_tcon(cifs_file
->tlink
);
853 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
854 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
855 struct super_block
*sb
= inode
->i_sb
;
856 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
857 struct cifs_fid fid
= {};
858 struct cifs_pending_open open
;
859 bool oplock_break_cancelled
;
860 bool serverclose_offloaded
= false;
862 spin_lock(&tcon
->open_file_lock
);
863 spin_lock(&cifsi
->open_file_lock
);
864 spin_lock(&cifs_file
->file_info_lock
);
866 cifs_file
->offload
= offload
;
867 if (--cifs_file
->count
> 0) {
868 spin_unlock(&cifs_file
->file_info_lock
);
869 spin_unlock(&cifsi
->open_file_lock
);
870 spin_unlock(&tcon
->open_file_lock
);
873 spin_unlock(&cifs_file
->file_info_lock
);
875 if (server
->ops
->get_lease_key
)
876 server
->ops
->get_lease_key(inode
, &fid
);
878 /* store open in pending opens to make sure we don't miss lease break */
879 cifs_add_pending_open_locked(&fid
, cifs_file
->tlink
, &open
);
881 /* remove it from the lists */
882 list_del(&cifs_file
->flist
);
883 list_del(&cifs_file
->tlist
);
884 atomic_dec(&tcon
->num_local_opens
);
886 if (list_empty(&cifsi
->openFileList
)) {
887 cifs_dbg(FYI
, "closing last open instance for inode %p\n",
888 d_inode(cifs_file
->dentry
));
890 * In strict cache mode we need invalidate mapping on the last
891 * close because it may cause a error when we open this file
892 * again and get at least level II oplock.
894 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
895 set_bit(CIFS_INO_INVALID_MAPPING
, &cifsi
->flags
);
896 cifs_set_oplock_level(cifsi
, 0);
899 spin_unlock(&cifsi
->open_file_lock
);
900 spin_unlock(&tcon
->open_file_lock
);
902 oplock_break_cancelled
= wait_oplock_handler
?
903 cancel_work_sync(&cifs_file
->oplock_break
) : false;
905 if (!tcon
->need_reconnect
&& !cifs_file
->invalidHandle
) {
906 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
911 if (server
->ops
->close_getattr
)
912 rc
= server
->ops
->close_getattr(xid
, tcon
, cifs_file
);
913 else if (server
->ops
->close
)
914 rc
= server
->ops
->close(xid
, tcon
, &cifs_file
->fid
);
917 if (rc
== -EBUSY
|| rc
== -EAGAIN
) {
918 // Server close failed, hence offloading it as an async op
919 queue_work(serverclose_wq
, &cifs_file
->serverclose
);
920 serverclose_offloaded
= true;
924 if (oplock_break_cancelled
)
925 cifs_done_oplock_break(cifsi
);
927 cifs_del_pending_open(&open
);
929 // if serverclose has been offloaded to wq (on failure), it will
930 // handle offloading put as well. If serverclose not offloaded,
931 // we need to handle offloading put here.
932 if (!serverclose_offloaded
) {
934 queue_work(fileinfo_put_wq
, &cifs_file
->put
);
936 cifsFileInfo_put_final(cifs_file
);
940 int cifs_open(struct inode
*inode
, struct file
*file
)
946 struct cifs_sb_info
*cifs_sb
;
947 struct TCP_Server_Info
*server
;
948 struct cifs_tcon
*tcon
;
949 struct tcon_link
*tlink
;
950 struct cifsFileInfo
*cfile
= NULL
;
952 const char *full_path
;
953 bool posix_open_ok
= false;
954 struct cifs_fid fid
= {};
955 struct cifs_pending_open open
;
956 struct cifs_open_info_data data
= {};
960 cifs_sb
= CIFS_SB(inode
->i_sb
);
961 if (unlikely(cifs_forced_shutdown(cifs_sb
))) {
966 tlink
= cifs_sb_tlink(cifs_sb
);
969 return PTR_ERR(tlink
);
971 tcon
= tlink_tcon(tlink
);
972 server
= tcon
->ses
->server
;
974 page
= alloc_dentry_path();
975 full_path
= build_path_from_dentry(file_dentry(file
), page
);
976 if (IS_ERR(full_path
)) {
977 rc
= PTR_ERR(full_path
);
981 cifs_dbg(FYI
, "inode = 0x%p file flags are 0x%x for %s\n",
982 inode
, file
->f_flags
, full_path
);
984 if (file
->f_flags
& O_DIRECT
&&
985 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
) {
986 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_BRL
)
987 file
->f_op
= &cifs_file_direct_nobrl_ops
;
989 file
->f_op
= &cifs_file_direct_ops
;
992 /* Get the cached handle as SMB2 close is deferred */
993 rc
= cifs_get_readable_path(tcon
, full_path
, &cfile
);
995 if (file
->f_flags
== cfile
->f_flags
) {
996 file
->private_data
= cfile
;
997 spin_lock(&CIFS_I(inode
)->deferred_lock
);
998 cifs_del_deferred_close(cfile
);
999 spin_unlock(&CIFS_I(inode
)->deferred_lock
);
1002 _cifsFileInfo_put(cfile
, true, false);
1006 if (server
->oplocks
)
1007 oplock
= REQ_OPLOCK
;
1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1012 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
1013 cap_unix(tcon
->ses
) && (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
1014 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
1015 /* can not refresh inode info since size could be stale */
1016 rc
= cifs_posix_open(full_path
, &inode
, inode
->i_sb
,
1017 cifs_sb
->ctx
->file_mode
/* ignored */,
1018 file
->f_flags
, &oplock
, &fid
.netfid
, xid
);
1020 cifs_dbg(FYI
, "posix open succeeded\n");
1021 posix_open_ok
= true;
1022 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
1023 if (tcon
->ses
->serverNOS
)
1024 cifs_dbg(VFS
, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1026 tcon
->ses
->serverNOS
);
1027 tcon
->broken_posix_open
= true;
1028 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
1029 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
1032 * Else fallthrough to retry open the old way on network i/o
1036 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1038 if (server
->ops
->get_lease_key
)
1039 server
->ops
->get_lease_key(inode
, &fid
);
1041 cifs_add_pending_open(&fid
, tlink
, &open
);
1043 if (!posix_open_ok
) {
1044 if (server
->ops
->get_lease_key
)
1045 server
->ops
->get_lease_key(inode
, &fid
);
1047 rc
= cifs_nt_open(full_path
, inode
, cifs_sb
, tcon
, file
->f_flags
, &oplock
, &fid
,
1050 cifs_del_pending_open(&open
);
1055 cfile
= cifs_new_fileinfo(&fid
, file
, tlink
, oplock
, data
.symlink_target
);
1056 if (cfile
== NULL
) {
1057 if (server
->ops
->close
)
1058 server
->ops
->close(xid
, tcon
, &fid
);
1059 cifs_del_pending_open(&open
);
1064 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1065 if ((oplock
& CIFS_CREATE_ACTION
) && !posix_open_ok
&& tcon
->unix_ext
) {
1067 * Time to set mode which we can not set earlier due to
1068 * problems creating new read-only files.
1070 struct cifs_unix_set_info_args args
= {
1071 .mode
= inode
->i_mode
,
1072 .uid
= INVALID_UID
, /* no change */
1073 .gid
= INVALID_GID
, /* no change */
1074 .ctime
= NO_CHANGE_64
,
1075 .atime
= NO_CHANGE_64
,
1076 .mtime
= NO_CHANGE_64
,
1079 CIFSSMBUnixSetFileInfo(xid
, tcon
, &args
, fid
.netfid
,
1082 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1085 fscache_use_cookie(cifs_inode_cookie(file_inode(file
)),
1086 file
->f_mode
& FMODE_WRITE
);
1087 if (!(file
->f_flags
& O_DIRECT
))
1089 if ((file
->f_flags
& (O_ACCMODE
| O_APPEND
)) == O_RDONLY
)
1091 cifs_invalidate_cache(file_inode(file
), FSCACHE_INVAL_DIO_WRITE
);
1094 free_dentry_path(page
);
1096 cifs_put_tlink(tlink
);
1097 cifs_free_open_info(&data
);
1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1102 static int cifs_push_posix_locks(struct cifsFileInfo
*cfile
);
1103 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1106 * Try to reacquire byte range locks that were released when session
1107 * to server was lost.
1110 cifs_relock_file(struct cifsFileInfo
*cfile
)
1112 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1113 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1115 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1116 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1119 down_read_nested(&cinode
->lock_sem
, SINGLE_DEPTH_NESTING
);
1120 if (cinode
->can_cache_brlcks
) {
1121 /* can cache locks - no need to relock */
1122 up_read(&cinode
->lock_sem
);
1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1127 if (cap_unix(tcon
->ses
) &&
1128 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1129 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1130 rc
= cifs_push_posix_locks(cfile
);
1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1133 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
1135 up_read(&cinode
->lock_sem
);
1140 cifs_reopen_file(struct cifsFileInfo
*cfile
, bool can_flush
)
1145 struct cifs_sb_info
*cifs_sb
;
1146 struct cifs_tcon
*tcon
;
1147 struct TCP_Server_Info
*server
;
1148 struct cifsInodeInfo
*cinode
;
1149 struct inode
*inode
;
1151 const char *full_path
;
1153 int disposition
= FILE_OPEN
;
1154 int create_options
= CREATE_NOT_DIR
;
1155 struct cifs_open_parms oparms
;
1156 int rdwr_for_fscache
= 0;
1159 mutex_lock(&cfile
->fh_mutex
);
1160 if (!cfile
->invalidHandle
) {
1161 mutex_unlock(&cfile
->fh_mutex
);
1166 inode
= d_inode(cfile
->dentry
);
1167 cifs_sb
= CIFS_SB(inode
->i_sb
);
1168 tcon
= tlink_tcon(cfile
->tlink
);
1169 server
= tcon
->ses
->server
;
1172 * Can not grab rename sem here because various ops, including those
1173 * that already have the rename sem can end up causing writepage to get
1174 * called and if the server was down that means we end up here, and we
1175 * can never tell if the caller already has the rename_sem.
1177 page
= alloc_dentry_path();
1178 full_path
= build_path_from_dentry(cfile
->dentry
, page
);
1179 if (IS_ERR(full_path
)) {
1180 mutex_unlock(&cfile
->fh_mutex
);
1181 free_dentry_path(page
);
1183 return PTR_ERR(full_path
);
1186 cifs_dbg(FYI
, "inode = 0x%p file flags 0x%x for %s\n",
1187 inode
, cfile
->f_flags
, full_path
);
1189 if (tcon
->ses
->server
->oplocks
)
1190 oplock
= REQ_OPLOCK
;
1194 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1195 if (tcon
->unix_ext
&& cap_unix(tcon
->ses
) &&
1196 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
1197 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
1199 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1200 * original open. Must mask them off for a reopen.
1202 unsigned int oflags
= cfile
->f_flags
&
1203 ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1205 rc
= cifs_posix_open(full_path
, NULL
, inode
->i_sb
,
1206 cifs_sb
->ctx
->file_mode
/* ignored */,
1207 oflags
, &oplock
, &cfile
->fid
.netfid
, xid
);
1209 cifs_dbg(FYI
, "posix reopen succeeded\n");
1210 oparms
.reconnect
= true;
1211 goto reopen_success
;
1214 * fallthrough to retry open the old way on errors, especially
1215 * in the reconnect path it is important to retry hard
1218 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1220 /* If we're caching, we need to be able to fill in around partial writes. */
1221 if (cifs_fscache_enabled(inode
) && (cfile
->f_flags
& O_ACCMODE
) == O_WRONLY
)
1222 rdwr_for_fscache
= 1;
1224 desired_access
= cifs_convert_flags(cfile
->f_flags
, rdwr_for_fscache
);
1226 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1227 if (cfile
->f_flags
& O_SYNC
)
1228 create_options
|= CREATE_WRITE_THROUGH
;
1230 if (cfile
->f_flags
& O_DIRECT
)
1231 create_options
|= CREATE_NO_BUFFER
;
1233 if (server
->ops
->get_lease_key
)
1234 server
->ops
->get_lease_key(inode
, &cfile
->fid
);
1237 oparms
= (struct cifs_open_parms
) {
1240 .desired_access
= desired_access
,
1241 .create_options
= cifs_create_options(cifs_sb
, create_options
),
1242 .disposition
= disposition
,
1249 * Can not refresh inode by passing in file_info buf to be returned by
1250 * ops->open and then calling get_inode_info with returned buf since
1251 * file might have write behind data that needs to be flushed and server
1252 * version of file size can be stale. If we knew for sure that inode was
1253 * not dirty locally we could do this.
1255 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
1256 if (rc
== -ENOENT
&& oparms
.reconnect
== false) {
1257 /* durable handle timeout is expired - open the file again */
1258 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
1259 /* indicate that we need to relock the file */
1260 oparms
.reconnect
= true;
1262 if (rc
== -EACCES
&& rdwr_for_fscache
== 1) {
1263 desired_access
= cifs_convert_flags(cfile
->f_flags
, 0);
1264 rdwr_for_fscache
= 2;
1269 mutex_unlock(&cfile
->fh_mutex
);
1270 cifs_dbg(FYI
, "cifs_reopen returned 0x%x\n", rc
);
1271 cifs_dbg(FYI
, "oplock: %d\n", oplock
);
1272 goto reopen_error_exit
;
1275 if (rdwr_for_fscache
== 2)
1276 cifs_invalidate_cache(inode
, FSCACHE_INVAL_DIO_WRITE
);
1278 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1280 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1281 cfile
->invalidHandle
= false;
1282 mutex_unlock(&cfile
->fh_mutex
);
1283 cinode
= CIFS_I(inode
);
1286 rc
= filemap_write_and_wait(inode
->i_mapping
);
1287 if (!is_interrupt_error(rc
))
1288 mapping_set_error(inode
->i_mapping
, rc
);
1290 if (tcon
->posix_extensions
) {
1291 rc
= smb311_posix_get_inode_info(&inode
, full_path
,
1292 NULL
, inode
->i_sb
, xid
);
1293 } else if (tcon
->unix_ext
) {
1294 rc
= cifs_get_inode_info_unix(&inode
, full_path
,
1297 rc
= cifs_get_inode_info(&inode
, full_path
, NULL
,
1298 inode
->i_sb
, xid
, NULL
);
1302 * Else we are writing out data to server already and could deadlock if
1303 * we tried to flush data, and since we do not know if we have data that
1304 * would invalidate the current end of file on the server we can not go
1305 * to the server to get the new inode info.
1309 * If the server returned a read oplock and we have mandatory brlocks,
1310 * set oplock level to None.
1312 if (server
->ops
->is_read_op(oplock
) && cifs_has_mand_locks(cinode
)) {
1313 cifs_dbg(FYI
, "Reset oplock val from read to None due to mand locks\n");
1317 server
->ops
->set_fid(cfile
, &cfile
->fid
, oplock
);
1318 if (oparms
.reconnect
)
1319 cifs_relock_file(cfile
);
1322 free_dentry_path(page
);
1327 void smb2_deferred_work_close(struct work_struct
*work
)
1329 struct cifsFileInfo
*cfile
= container_of(work
,
1330 struct cifsFileInfo
, deferred
.work
);
1332 spin_lock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
1333 cifs_del_deferred_close(cfile
);
1334 cfile
->deferred_close_scheduled
= false;
1335 spin_unlock(&CIFS_I(d_inode(cfile
->dentry
))->deferred_lock
);
1336 _cifsFileInfo_put(cfile
, true, false);
1340 smb2_can_defer_close(struct inode
*inode
, struct cifs_deferred_close
*dclose
)
1342 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1343 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
1345 return (cifs_sb
->ctx
->closetimeo
&& cinode
->lease_granted
&& dclose
&&
1346 (cinode
->oplock
== CIFS_CACHE_RHW_FLG
||
1347 cinode
->oplock
== CIFS_CACHE_RH_FLG
) &&
1348 !test_bit(CIFS_INO_CLOSE_ON_LOCK
, &cinode
->flags
));
1352 int cifs_close(struct inode
*inode
, struct file
*file
)
1354 struct cifsFileInfo
*cfile
;
1355 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
1356 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1357 struct cifs_deferred_close
*dclose
;
1359 cifs_fscache_unuse_inode_cookie(inode
, file
->f_mode
& FMODE_WRITE
);
1361 if (file
->private_data
!= NULL
) {
1362 cfile
= file
->private_data
;
1363 file
->private_data
= NULL
;
1364 dclose
= kmalloc(sizeof(struct cifs_deferred_close
), GFP_KERNEL
);
1365 if ((cfile
->status_file_deleted
== false) &&
1366 (smb2_can_defer_close(inode
, dclose
))) {
1367 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR
, &cinode
->netfs
.flags
)) {
1368 inode_set_mtime_to_ts(inode
,
1369 inode_set_ctime_current(inode
));
1371 spin_lock(&cinode
->deferred_lock
);
1372 cifs_add_deferred_close(cfile
, dclose
);
1373 if (cfile
->deferred_close_scheduled
&&
1374 delayed_work_pending(&cfile
->deferred
)) {
1376 * If there is no pending work, mod_delayed_work queues new work.
1377 * So, Increase the ref count to avoid use-after-free.
1379 if (!mod_delayed_work(deferredclose_wq
,
1380 &cfile
->deferred
, cifs_sb
->ctx
->closetimeo
))
1381 cifsFileInfo_get(cfile
);
1383 /* Deferred close for files */
1384 queue_delayed_work(deferredclose_wq
,
1385 &cfile
->deferred
, cifs_sb
->ctx
->closetimeo
);
1386 cfile
->deferred_close_scheduled
= true;
1387 spin_unlock(&cinode
->deferred_lock
);
1390 spin_unlock(&cinode
->deferred_lock
);
1391 _cifsFileInfo_put(cfile
, true, false);
1393 _cifsFileInfo_put(cfile
, true, false);
1398 /* return code from the ->release op is always ignored */
1403 cifs_reopen_persistent_handles(struct cifs_tcon
*tcon
)
1405 struct cifsFileInfo
*open_file
, *tmp
;
1406 LIST_HEAD(tmp_list
);
1408 if (!tcon
->use_persistent
|| !tcon
->need_reopen_files
)
1411 tcon
->need_reopen_files
= false;
1413 cifs_dbg(FYI
, "Reopen persistent handles\n");
1415 /* list all files open on tree connection, reopen resilient handles */
1416 spin_lock(&tcon
->open_file_lock
);
1417 list_for_each_entry(open_file
, &tcon
->openFileList
, tlist
) {
1418 if (!open_file
->invalidHandle
)
1420 cifsFileInfo_get(open_file
);
1421 list_add_tail(&open_file
->rlist
, &tmp_list
);
1423 spin_unlock(&tcon
->open_file_lock
);
1425 list_for_each_entry_safe(open_file
, tmp
, &tmp_list
, rlist
) {
1426 if (cifs_reopen_file(open_file
, false /* do not flush */))
1427 tcon
->need_reopen_files
= true;
1428 list_del_init(&open_file
->rlist
);
1429 cifsFileInfo_put(open_file
);
1433 int cifs_closedir(struct inode
*inode
, struct file
*file
)
1437 struct cifsFileInfo
*cfile
= file
->private_data
;
1438 struct cifs_tcon
*tcon
;
1439 struct TCP_Server_Info
*server
;
1442 cifs_dbg(FYI
, "Closedir inode = 0x%p\n", inode
);
1448 tcon
= tlink_tcon(cfile
->tlink
);
1449 server
= tcon
->ses
->server
;
1451 cifs_dbg(FYI
, "Freeing private data in close dir\n");
1452 spin_lock(&cfile
->file_info_lock
);
1453 if (server
->ops
->dir_needs_close(cfile
)) {
1454 cfile
->invalidHandle
= true;
1455 spin_unlock(&cfile
->file_info_lock
);
1456 if (server
->ops
->close_dir
)
1457 rc
= server
->ops
->close_dir(xid
, tcon
, &cfile
->fid
);
1460 cifs_dbg(FYI
, "Closing uncompleted readdir with rc %d\n", rc
);
1461 /* not much we can do if it fails anyway, ignore rc */
1464 spin_unlock(&cfile
->file_info_lock
);
1466 buf
= cfile
->srch_inf
.ntwrk_buf_start
;
1468 cifs_dbg(FYI
, "closedir free smb buf in srch struct\n");
1469 cfile
->srch_inf
.ntwrk_buf_start
= NULL
;
1470 if (cfile
->srch_inf
.smallBuf
)
1471 cifs_small_buf_release(buf
);
1473 cifs_buf_release(buf
);
1476 cifs_put_tlink(cfile
->tlink
);
1477 kfree(file
->private_data
);
1478 file
->private_data
= NULL
;
1479 /* BB can we lock the filestruct while this is going on? */
1484 static struct cifsLockInfo
*
1485 cifs_lock_init(__u64 offset
, __u64 length
, __u8 type
, __u16 flags
)
1487 struct cifsLockInfo
*lock
=
1488 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
1491 lock
->offset
= offset
;
1492 lock
->length
= length
;
1494 lock
->pid
= current
->tgid
;
1495 lock
->flags
= flags
;
1496 INIT_LIST_HEAD(&lock
->blist
);
1497 init_waitqueue_head(&lock
->block_q
);
1502 cifs_del_lock_waiters(struct cifsLockInfo
*lock
)
1504 struct cifsLockInfo
*li
, *tmp
;
1505 list_for_each_entry_safe(li
, tmp
, &lock
->blist
, blist
) {
1506 list_del_init(&li
->blist
);
1507 wake_up(&li
->block_q
);
1511 #define CIFS_LOCK_OP 0
1512 #define CIFS_READ_OP 1
1513 #define CIFS_WRITE_OP 2
1515 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1517 cifs_find_fid_lock_conflict(struct cifs_fid_locks
*fdlocks
, __u64 offset
,
1518 __u64 length
, __u8 type
, __u16 flags
,
1519 struct cifsFileInfo
*cfile
,
1520 struct cifsLockInfo
**conf_lock
, int rw_check
)
1522 struct cifsLockInfo
*li
;
1523 struct cifsFileInfo
*cur_cfile
= fdlocks
->cfile
;
1524 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
1526 list_for_each_entry(li
, &fdlocks
->locks
, llist
) {
1527 if (offset
+ length
<= li
->offset
||
1528 offset
>= li
->offset
+ li
->length
)
1530 if (rw_check
!= CIFS_LOCK_OP
&& current
->tgid
== li
->pid
&&
1531 server
->ops
->compare_fids(cfile
, cur_cfile
)) {
1532 /* shared lock prevents write op through the same fid */
1533 if (!(li
->type
& server
->vals
->shared_lock_type
) ||
1534 rw_check
!= CIFS_WRITE_OP
)
1537 if ((type
& server
->vals
->shared_lock_type
) &&
1538 ((server
->ops
->compare_fids(cfile
, cur_cfile
) &&
1539 current
->tgid
== li
->pid
) || type
== li
->type
))
1541 if (rw_check
== CIFS_LOCK_OP
&&
1542 (flags
& FL_OFDLCK
) && (li
->flags
& FL_OFDLCK
) &&
1543 server
->ops
->compare_fids(cfile
, cur_cfile
))
1553 cifs_find_lock_conflict(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
1554 __u8 type
, __u16 flags
,
1555 struct cifsLockInfo
**conf_lock
, int rw_check
)
1558 struct cifs_fid_locks
*cur
;
1559 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1561 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
1562 rc
= cifs_find_fid_lock_conflict(cur
, offset
, length
, type
,
1563 flags
, cfile
, conf_lock
,
1573 * Check if there is another lock that prevents us to set the lock (mandatory
1574 * style). If such a lock exists, update the flock structure with its
1575 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1576 * or leave it the same if we can't. Returns 0 if we don't need to request to
1577 * the server or 1 otherwise.
1580 cifs_lock_test(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
1581 __u8 type
, struct file_lock
*flock
)
1584 struct cifsLockInfo
*conf_lock
;
1585 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1586 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
1589 down_read(&cinode
->lock_sem
);
1591 exist
= cifs_find_lock_conflict(cfile
, offset
, length
, type
,
1592 flock
->c
.flc_flags
, &conf_lock
,
1595 flock
->fl_start
= conf_lock
->offset
;
1596 flock
->fl_end
= conf_lock
->offset
+ conf_lock
->length
- 1;
1597 flock
->c
.flc_pid
= conf_lock
->pid
;
1598 if (conf_lock
->type
& server
->vals
->shared_lock_type
)
1599 flock
->c
.flc_type
= F_RDLCK
;
1601 flock
->c
.flc_type
= F_WRLCK
;
1602 } else if (!cinode
->can_cache_brlcks
)
1605 flock
->c
.flc_type
= F_UNLCK
;
1607 up_read(&cinode
->lock_sem
);
1612 cifs_lock_add(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
)
1614 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1615 cifs_down_write(&cinode
->lock_sem
);
1616 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
1617 up_write(&cinode
->lock_sem
);
1621 * Set the byte-range lock (mandatory style). Returns:
1622 * 1) 0, if we set the lock and don't need to request to the server;
1623 * 2) 1, if no locks prevent us but we need to request to the server;
1624 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1627 cifs_lock_add_if(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
,
1630 struct cifsLockInfo
*conf_lock
;
1631 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1637 cifs_down_write(&cinode
->lock_sem
);
1639 exist
= cifs_find_lock_conflict(cfile
, lock
->offset
, lock
->length
,
1640 lock
->type
, lock
->flags
, &conf_lock
,
1642 if (!exist
&& cinode
->can_cache_brlcks
) {
1643 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
1644 up_write(&cinode
->lock_sem
);
1653 list_add_tail(&lock
->blist
, &conf_lock
->blist
);
1654 up_write(&cinode
->lock_sem
);
1655 rc
= wait_event_interruptible(lock
->block_q
,
1656 (lock
->blist
.prev
== &lock
->blist
) &&
1657 (lock
->blist
.next
== &lock
->blist
));
1660 cifs_down_write(&cinode
->lock_sem
);
1661 list_del_init(&lock
->blist
);
1664 up_write(&cinode
->lock_sem
);
1668 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1670 * Check if there is another lock that prevents us to set the lock (posix
1671 * style). If such a lock exists, update the flock structure with its
1672 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1673 * or leave it the same if we can't. Returns 0 if we don't need to request to
1674 * the server or 1 otherwise.
1677 cifs_posix_lock_test(struct file
*file
, struct file_lock
*flock
)
1680 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1681 unsigned char saved_type
= flock
->c
.flc_type
;
1683 if ((flock
->c
.flc_flags
& FL_POSIX
) == 0)
1686 down_read(&cinode
->lock_sem
);
1687 posix_test_lock(file
, flock
);
1689 if (lock_is_unlock(flock
) && !cinode
->can_cache_brlcks
) {
1690 flock
->c
.flc_type
= saved_type
;
1694 up_read(&cinode
->lock_sem
);
1699 * Set the byte-range lock (posix style). Returns:
1700 * 1) <0, if the error occurs while setting the lock;
1701 * 2) 0, if we set the lock and don't need to request to the server;
1702 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1703 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1706 cifs_posix_lock_set(struct file
*file
, struct file_lock
*flock
)
1708 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1709 int rc
= FILE_LOCK_DEFERRED
+ 1;
1711 if ((flock
->c
.flc_flags
& FL_POSIX
) == 0)
1714 cifs_down_write(&cinode
->lock_sem
);
1715 if (!cinode
->can_cache_brlcks
) {
1716 up_write(&cinode
->lock_sem
);
1720 rc
= posix_lock_file(file
, flock
, NULL
);
1721 up_write(&cinode
->lock_sem
);
1726 cifs_push_mandatory_locks(struct cifsFileInfo
*cfile
)
1729 int rc
= 0, stored_rc
;
1730 struct cifsLockInfo
*li
, *tmp
;
1731 struct cifs_tcon
*tcon
;
1732 unsigned int num
, max_num
, max_buf
;
1733 LOCKING_ANDX_RANGE
*buf
, *cur
;
1734 static const int types
[] = {
1735 LOCKING_ANDX_LARGE_FILES
,
1736 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
1741 tcon
= tlink_tcon(cfile
->tlink
);
1744 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1745 * and check it before using.
1747 max_buf
= tcon
->ses
->server
->maxBuf
;
1748 if (max_buf
< (sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
))) {
1753 BUILD_BUG_ON(sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
) >
1755 max_buf
= min_t(unsigned int, max_buf
- sizeof(struct smb_hdr
),
1757 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
1758 sizeof(LOCKING_ANDX_RANGE
);
1759 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
1765 for (i
= 0; i
< 2; i
++) {
1768 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
1769 if (li
->type
!= types
[i
])
1771 cur
->Pid
= cpu_to_le16(li
->pid
);
1772 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
1773 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
1774 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
1775 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
1776 if (++num
== max_num
) {
1777 stored_rc
= cifs_lockv(xid
, tcon
,
1779 (__u8
)li
->type
, 0, num
,
1790 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
1791 (__u8
)types
[i
], 0, num
, buf
);
1803 hash_lockowner(fl_owner_t owner
)
1805 return cifs_lock_secret
^ hash32_ptr((const void *)owner
);
1807 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1809 struct lock_to_push
{
1810 struct list_head llist
;
1818 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1820 cifs_push_posix_locks(struct cifsFileInfo
*cfile
)
1822 struct inode
*inode
= d_inode(cfile
->dentry
);
1823 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1824 struct file_lock
*flock
;
1825 struct file_lock_context
*flctx
= locks_inode_context(inode
);
1826 unsigned int count
= 0, i
;
1827 int rc
= 0, xid
, type
;
1828 struct list_head locks_to_send
, *el
;
1829 struct lock_to_push
*lck
, *tmp
;
1837 spin_lock(&flctx
->flc_lock
);
1838 list_for_each(el
, &flctx
->flc_posix
) {
1841 spin_unlock(&flctx
->flc_lock
);
1843 INIT_LIST_HEAD(&locks_to_send
);
1846 * Allocating count locks is enough because no FL_POSIX locks can be
1847 * added to the list while we are holding cinode->lock_sem that
1848 * protects locking operations of this inode.
1850 for (i
= 0; i
< count
; i
++) {
1851 lck
= kmalloc(sizeof(struct lock_to_push
), GFP_KERNEL
);
1856 list_add_tail(&lck
->llist
, &locks_to_send
);
1859 el
= locks_to_send
.next
;
1860 spin_lock(&flctx
->flc_lock
);
1861 for_each_file_lock(flock
, &flctx
->flc_posix
) {
1862 unsigned char ftype
= flock
->c
.flc_type
;
1864 if (el
== &locks_to_send
) {
1866 * The list ended. We don't have enough allocated
1867 * structures - something is really wrong.
1869 cifs_dbg(VFS
, "Can't push all brlocks!\n");
1872 length
= cifs_flock_len(flock
);
1873 if (ftype
== F_RDLCK
|| ftype
== F_SHLCK
)
1877 lck
= list_entry(el
, struct lock_to_push
, llist
);
1878 lck
->pid
= hash_lockowner(flock
->c
.flc_owner
);
1879 lck
->netfid
= cfile
->fid
.netfid
;
1880 lck
->length
= length
;
1882 lck
->offset
= flock
->fl_start
;
1884 spin_unlock(&flctx
->flc_lock
);
1886 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1889 stored_rc
= CIFSSMBPosixLock(xid
, tcon
, lck
->netfid
, lck
->pid
,
1890 lck
->offset
, lck
->length
, NULL
,
1894 list_del(&lck
->llist
);
1902 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1903 list_del(&lck
->llist
);
1908 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1911 cifs_push_locks(struct cifsFileInfo
*cfile
)
1913 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1914 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1916 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1917 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
1918 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1920 /* we are going to update can_cache_brlcks here - need a write access */
1921 cifs_down_write(&cinode
->lock_sem
);
1922 if (!cinode
->can_cache_brlcks
) {
1923 up_write(&cinode
->lock_sem
);
1927 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1928 if (cap_unix(tcon
->ses
) &&
1929 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1930 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1931 rc
= cifs_push_posix_locks(cfile
);
1933 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1934 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
1936 cinode
->can_cache_brlcks
= false;
1937 up_write(&cinode
->lock_sem
);
1942 cifs_read_flock(struct file_lock
*flock
, __u32
*type
, int *lock
, int *unlock
,
1943 bool *wait_flag
, struct TCP_Server_Info
*server
)
1945 if (flock
->c
.flc_flags
& FL_POSIX
)
1946 cifs_dbg(FYI
, "Posix\n");
1947 if (flock
->c
.flc_flags
& FL_FLOCK
)
1948 cifs_dbg(FYI
, "Flock\n");
1949 if (flock
->c
.flc_flags
& FL_SLEEP
) {
1950 cifs_dbg(FYI
, "Blocking lock\n");
1953 if (flock
->c
.flc_flags
& FL_ACCESS
)
1954 cifs_dbg(FYI
, "Process suspended by mandatory locking - not implemented yet\n");
1955 if (flock
->c
.flc_flags
& FL_LEASE
)
1956 cifs_dbg(FYI
, "Lease on file - not implemented yet\n");
1957 if (flock
->c
.flc_flags
&
1958 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
|
1959 FL_ACCESS
| FL_LEASE
| FL_CLOSE
| FL_OFDLCK
)))
1960 cifs_dbg(FYI
, "Unknown lock flags 0x%x\n",
1961 flock
->c
.flc_flags
);
1963 *type
= server
->vals
->large_lock_type
;
1964 if (lock_is_write(flock
)) {
1965 cifs_dbg(FYI
, "F_WRLCK\n");
1966 *type
|= server
->vals
->exclusive_lock_type
;
1968 } else if (lock_is_unlock(flock
)) {
1969 cifs_dbg(FYI
, "F_UNLCK\n");
1970 *type
|= server
->vals
->unlock_lock_type
;
1972 /* Check if unlock includes more than one lock range */
1973 } else if (lock_is_read(flock
)) {
1974 cifs_dbg(FYI
, "F_RDLCK\n");
1975 *type
|= server
->vals
->shared_lock_type
;
1977 } else if (flock
->c
.flc_type
== F_EXLCK
) {
1978 cifs_dbg(FYI
, "F_EXLCK\n");
1979 *type
|= server
->vals
->exclusive_lock_type
;
1981 } else if (flock
->c
.flc_type
== F_SHLCK
) {
1982 cifs_dbg(FYI
, "F_SHLCK\n");
1983 *type
|= server
->vals
->shared_lock_type
;
1986 cifs_dbg(FYI
, "Unknown type of lock\n");
1990 cifs_getlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
1991 bool wait_flag
, bool posix_lck
, unsigned int xid
)
1994 __u64 length
= cifs_flock_len(flock
);
1995 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
1996 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1997 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1998 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1999 __u16 netfid
= cfile
->fid
.netfid
;
2002 int posix_lock_type
;
2004 rc
= cifs_posix_lock_test(file
, flock
);
2008 if (type
& server
->vals
->shared_lock_type
)
2009 posix_lock_type
= CIFS_RDLCK
;
2011 posix_lock_type
= CIFS_WRLCK
;
2012 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
,
2013 hash_lockowner(flock
->c
.flc_owner
),
2014 flock
->fl_start
, length
, flock
,
2015 posix_lock_type
, wait_flag
);
2018 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2020 rc
= cifs_lock_test(cfile
, flock
->fl_start
, length
, type
, flock
);
2024 /* BB we could chain these into one lock request BB */
2025 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
, type
,
2028 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
2030 flock
->c
.flc_type
= F_UNLCK
;
2032 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
2037 if (type
& server
->vals
->shared_lock_type
) {
2038 flock
->c
.flc_type
= F_WRLCK
;
2042 type
&= ~server
->vals
->exclusive_lock_type
;
2044 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
2045 type
| server
->vals
->shared_lock_type
,
2048 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
2049 type
| server
->vals
->shared_lock_type
, 0, 1, false);
2050 flock
->c
.flc_type
= F_RDLCK
;
2052 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
2055 flock
->c
.flc_type
= F_WRLCK
;
2061 cifs_move_llist(struct list_head
*source
, struct list_head
*dest
)
2063 struct list_head
*li
, *tmp
;
2064 list_for_each_safe(li
, tmp
, source
)
2065 list_move(li
, dest
);
2069 cifs_free_llist(struct list_head
*llist
)
2071 struct cifsLockInfo
*li
, *tmp
;
2072 list_for_each_entry_safe(li
, tmp
, llist
, llist
) {
2073 cifs_del_lock_waiters(li
);
2074 list_del(&li
->llist
);
2079 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2081 cifs_unlock_range(struct cifsFileInfo
*cfile
, struct file_lock
*flock
,
2084 int rc
= 0, stored_rc
;
2085 static const int types
[] = {
2086 LOCKING_ANDX_LARGE_FILES
,
2087 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
2090 unsigned int max_num
, num
, max_buf
;
2091 LOCKING_ANDX_RANGE
*buf
, *cur
;
2092 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2093 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
2094 struct cifsLockInfo
*li
, *tmp
;
2095 __u64 length
= cifs_flock_len(flock
);
2096 LIST_HEAD(tmp_llist
);
2099 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2100 * and check it before using.
2102 max_buf
= tcon
->ses
->server
->maxBuf
;
2103 if (max_buf
< (sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
)))
2106 BUILD_BUG_ON(sizeof(struct smb_hdr
) + sizeof(LOCKING_ANDX_RANGE
) >
2108 max_buf
= min_t(unsigned int, max_buf
- sizeof(struct smb_hdr
),
2110 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
2111 sizeof(LOCKING_ANDX_RANGE
);
2112 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
2116 cifs_down_write(&cinode
->lock_sem
);
2117 for (i
= 0; i
< 2; i
++) {
2120 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
2121 if (flock
->fl_start
> li
->offset
||
2122 (flock
->fl_start
+ length
) <
2123 (li
->offset
+ li
->length
))
2125 if (current
->tgid
!= li
->pid
)
2127 if (types
[i
] != li
->type
)
2129 if (cinode
->can_cache_brlcks
) {
2131 * We can cache brlock requests - simply remove
2132 * a lock from the file's list.
2134 list_del(&li
->llist
);
2135 cifs_del_lock_waiters(li
);
2139 cur
->Pid
= cpu_to_le16(li
->pid
);
2140 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
2141 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
2142 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
2143 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
2145 * We need to save a lock here to let us add it again to
2146 * the file's list if the unlock range request fails on
2149 list_move(&li
->llist
, &tmp_llist
);
2150 if (++num
== max_num
) {
2151 stored_rc
= cifs_lockv(xid
, tcon
,
2153 li
->type
, num
, 0, buf
);
2156 * We failed on the unlock range
2157 * request - add all locks from the tmp
2158 * list to the head of the file's list.
2160 cifs_move_llist(&tmp_llist
,
2161 &cfile
->llist
->locks
);
2165 * The unlock range request succeed -
2166 * free the tmp list.
2168 cifs_free_llist(&tmp_llist
);
2175 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
2176 types
[i
], num
, 0, buf
);
2178 cifs_move_llist(&tmp_llist
,
2179 &cfile
->llist
->locks
);
2182 cifs_free_llist(&tmp_llist
);
2186 up_write(&cinode
->lock_sem
);
2190 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2193 cifs_setlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
2194 bool wait_flag
, bool posix_lck
, int lock
, int unlock
,
2198 __u64 length
= cifs_flock_len(flock
);
2199 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
2200 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2201 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
2202 struct inode
*inode
= d_inode(cfile
->dentry
);
2204 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2206 int posix_lock_type
;
2208 rc
= cifs_posix_lock_set(file
, flock
);
2209 if (rc
<= FILE_LOCK_DEFERRED
)
2212 if (type
& server
->vals
->shared_lock_type
)
2213 posix_lock_type
= CIFS_RDLCK
;
2215 posix_lock_type
= CIFS_WRLCK
;
2218 posix_lock_type
= CIFS_UNLCK
;
2220 rc
= CIFSSMBPosixLock(xid
, tcon
, cfile
->fid
.netfid
,
2221 hash_lockowner(flock
->c
.flc_owner
),
2222 flock
->fl_start
, length
,
2223 NULL
, posix_lock_type
, wait_flag
);
2226 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2228 struct cifsLockInfo
*lock
;
2230 lock
= cifs_lock_init(flock
->fl_start
, length
, type
,
2231 flock
->c
.flc_flags
);
2235 rc
= cifs_lock_add_if(cfile
, lock
, wait_flag
);
2244 * Windows 7 server can delay breaking lease from read to None
2245 * if we set a byte-range lock on a file - break it explicitly
2246 * before sending the lock to the server to be sure the next
2247 * read won't conflict with non-overlapted locks due to
2250 if (!CIFS_CACHE_WRITE(CIFS_I(inode
)) &&
2251 CIFS_CACHE_READ(CIFS_I(inode
))) {
2252 cifs_zap_mapping(inode
);
2253 cifs_dbg(FYI
, "Set no oplock for inode=%p due to mand locks\n",
2255 CIFS_I(inode
)->oplock
= 0;
2258 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
2259 type
, 1, 0, wait_flag
);
2265 cifs_lock_add(cfile
, lock
);
2267 rc
= server
->ops
->mand_unlock_range(cfile
, flock
, xid
);
2270 if ((flock
->c
.flc_flags
& FL_POSIX
) || (flock
->c
.flc_flags
& FL_FLOCK
)) {
2272 * If this is a request to remove all locks because we
2273 * are closing the file, it doesn't matter if the
2274 * unlocking failed as both cifs.ko and the SMB server
2275 * remove the lock on file close
2278 cifs_dbg(VFS
, "%s failed rc=%d\n", __func__
, rc
);
2279 if (!(flock
->c
.flc_flags
& FL_CLOSE
))
2282 rc
= locks_lock_file_wait(file
, flock
);
2287 int cifs_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2290 int lock
= 0, unlock
= 0;
2291 bool wait_flag
= false;
2292 bool posix_lck
= false;
2293 struct cifs_sb_info
*cifs_sb
;
2294 struct cifs_tcon
*tcon
;
2295 struct cifsFileInfo
*cfile
;
2300 if (!(fl
->c
.flc_flags
& FL_FLOCK
)) {
2306 cfile
= (struct cifsFileInfo
*)file
->private_data
;
2307 tcon
= tlink_tcon(cfile
->tlink
);
2309 cifs_read_flock(fl
, &type
, &lock
, &unlock
, &wait_flag
,
2311 cifs_sb
= CIFS_FILE_SB(file
);
2313 if (cap_unix(tcon
->ses
) &&
2314 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
2315 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
2318 if (!lock
&& !unlock
) {
2320 * if no lock or unlock then nothing to do since we do not
2328 rc
= cifs_setlk(file
, fl
, type
, wait_flag
, posix_lck
, lock
, unlock
,
2336 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*flock
)
2339 int lock
= 0, unlock
= 0;
2340 bool wait_flag
= false;
2341 bool posix_lck
= false;
2342 struct cifs_sb_info
*cifs_sb
;
2343 struct cifs_tcon
*tcon
;
2344 struct cifsFileInfo
*cfile
;
2350 cifs_dbg(FYI
, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__
, file
, cmd
,
2351 flock
->c
.flc_flags
, flock
->c
.flc_type
,
2352 (long long)flock
->fl_start
,
2353 (long long)flock
->fl_end
);
2355 cfile
= (struct cifsFileInfo
*)file
->private_data
;
2356 tcon
= tlink_tcon(cfile
->tlink
);
2358 cifs_read_flock(flock
, &type
, &lock
, &unlock
, &wait_flag
,
2360 cifs_sb
= CIFS_FILE_SB(file
);
2361 set_bit(CIFS_INO_CLOSE_ON_LOCK
, &CIFS_I(d_inode(cfile
->dentry
))->flags
);
2363 if (cap_unix(tcon
->ses
) &&
2364 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
2365 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
2368 * BB add code here to normalize offset and length to account for
2369 * negative length which we can not accept over the wire.
2371 if (IS_GETLK(cmd
)) {
2372 rc
= cifs_getlk(file
, flock
, type
, wait_flag
, posix_lck
, xid
);
2377 if (!lock
&& !unlock
) {
2379 * if no lock or unlock then nothing to do since we do not
2386 rc
= cifs_setlk(file
, flock
, type
, wait_flag
, posix_lck
, lock
, unlock
,
2392 void cifs_write_subrequest_terminated(struct cifs_io_subrequest
*wdata
, ssize_t result
,
2395 struct netfs_io_request
*wreq
= wdata
->rreq
;
2396 struct netfs_inode
*ictx
= netfs_inode(wreq
->inode
);
2400 wrend
= wdata
->subreq
.start
+ wdata
->subreq
.transferred
+ result
;
2402 if (wrend
> ictx
->zero_point
&&
2403 (wdata
->rreq
->origin
== NETFS_UNBUFFERED_WRITE
||
2404 wdata
->rreq
->origin
== NETFS_DIO_WRITE
))
2405 ictx
->zero_point
= wrend
;
2406 if (wrend
> ictx
->remote_i_size
)
2407 netfs_resize_file(ictx
, wrend
, true);
2410 netfs_write_subrequest_terminated(&wdata
->subreq
, result
, was_async
);
2413 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
,
2416 struct cifsFileInfo
*open_file
= NULL
;
2417 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cifs_inode
->netfs
.inode
.i_sb
);
2419 /* only filter by fsuid on multiuser mounts */
2420 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
2423 spin_lock(&cifs_inode
->open_file_lock
);
2424 /* we could simply get the first_list_entry since write-only entries
2425 are always at the end of the list but since the first entry might
2426 have a close pending, we go through the whole list */
2427 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
2428 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
2430 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_READ
) {
2431 if ((!open_file
->invalidHandle
)) {
2432 /* found a good file */
2433 /* lock it so it will not be closed on us */
2434 cifsFileInfo_get(open_file
);
2435 spin_unlock(&cifs_inode
->open_file_lock
);
2437 } /* else might as well continue, and look for
2438 another, or simply have the caller reopen it
2439 again rather than trying to fix this handle */
2440 } else /* write only file */
2441 break; /* write only files are last so must be done */
2443 spin_unlock(&cifs_inode
->open_file_lock
);
2447 /* Return -EBADF if no handle is found and general rc otherwise */
2449 cifs_get_writable_file(struct cifsInodeInfo
*cifs_inode
, int flags
,
2450 struct cifsFileInfo
**ret_file
)
2452 struct cifsFileInfo
*open_file
, *inv_file
= NULL
;
2453 struct cifs_sb_info
*cifs_sb
;
2454 bool any_available
= false;
2456 unsigned int refind
= 0;
2457 bool fsuid_only
= flags
& FIND_WR_FSUID_ONLY
;
2458 bool with_delete
= flags
& FIND_WR_WITH_DELETE
;
2462 * Having a null inode here (because mapping->host was set to zero by
2463 * the VFS or MM) should not happen but we had reports of on oops (due
2464 * to it being zero) during stress testcases so we need to check for it
2467 if (cifs_inode
== NULL
) {
2468 cifs_dbg(VFS
, "Null inode passed to cifs_writeable_file\n");
2473 cifs_sb
= CIFS_SB(cifs_inode
->netfs
.inode
.i_sb
);
2475 /* only filter by fsuid on multiuser mounts */
2476 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
2479 spin_lock(&cifs_inode
->open_file_lock
);
2481 if (refind
> MAX_REOPEN_ATT
) {
2482 spin_unlock(&cifs_inode
->open_file_lock
);
2485 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
2486 if (!any_available
&& open_file
->pid
!= current
->tgid
)
2488 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
2490 if (with_delete
&& !(open_file
->fid
.access
& DELETE
))
2492 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
2493 if (!open_file
->invalidHandle
) {
2494 /* found a good writable file */
2495 cifsFileInfo_get(open_file
);
2496 spin_unlock(&cifs_inode
->open_file_lock
);
2497 *ret_file
= open_file
;
2501 inv_file
= open_file
;
2505 /* couldn't find usable FH with same pid, try any available */
2506 if (!any_available
) {
2507 any_available
= true;
2508 goto refind_writable
;
2512 any_available
= false;
2513 cifsFileInfo_get(inv_file
);
2516 spin_unlock(&cifs_inode
->open_file_lock
);
2519 rc
= cifs_reopen_file(inv_file
, false);
2521 *ret_file
= inv_file
;
2525 spin_lock(&cifs_inode
->open_file_lock
);
2526 list_move_tail(&inv_file
->flist
, &cifs_inode
->openFileList
);
2527 spin_unlock(&cifs_inode
->open_file_lock
);
2528 cifsFileInfo_put(inv_file
);
2531 spin_lock(&cifs_inode
->open_file_lock
);
2532 goto refind_writable
;
2538 struct cifsFileInfo
*
2539 find_writable_file(struct cifsInodeInfo
*cifs_inode
, int flags
)
2541 struct cifsFileInfo
*cfile
;
2544 rc
= cifs_get_writable_file(cifs_inode
, flags
, &cfile
);
2546 cifs_dbg(FYI
, "Couldn't find writable handle rc=%d\n", rc
);
2552 cifs_get_writable_path(struct cifs_tcon
*tcon
, const char *name
,
2554 struct cifsFileInfo
**ret_file
)
2556 struct cifsFileInfo
*cfile
;
2557 void *page
= alloc_dentry_path();
2561 spin_lock(&tcon
->open_file_lock
);
2562 list_for_each_entry(cfile
, &tcon
->openFileList
, tlist
) {
2563 struct cifsInodeInfo
*cinode
;
2564 const char *full_path
= build_path_from_dentry(cfile
->dentry
, page
);
2565 if (IS_ERR(full_path
)) {
2566 spin_unlock(&tcon
->open_file_lock
);
2567 free_dentry_path(page
);
2568 return PTR_ERR(full_path
);
2570 if (strcmp(full_path
, name
))
2573 cinode
= CIFS_I(d_inode(cfile
->dentry
));
2574 spin_unlock(&tcon
->open_file_lock
);
2575 free_dentry_path(page
);
2576 return cifs_get_writable_file(cinode
, flags
, ret_file
);
2579 spin_unlock(&tcon
->open_file_lock
);
2580 free_dentry_path(page
);
2585 cifs_get_readable_path(struct cifs_tcon
*tcon
, const char *name
,
2586 struct cifsFileInfo
**ret_file
)
2588 struct cifsFileInfo
*cfile
;
2589 void *page
= alloc_dentry_path();
2593 spin_lock(&tcon
->open_file_lock
);
2594 list_for_each_entry(cfile
, &tcon
->openFileList
, tlist
) {
2595 struct cifsInodeInfo
*cinode
;
2596 const char *full_path
= build_path_from_dentry(cfile
->dentry
, page
);
2597 if (IS_ERR(full_path
)) {
2598 spin_unlock(&tcon
->open_file_lock
);
2599 free_dentry_path(page
);
2600 return PTR_ERR(full_path
);
2602 if (strcmp(full_path
, name
))
2605 cinode
= CIFS_I(d_inode(cfile
->dentry
));
2606 spin_unlock(&tcon
->open_file_lock
);
2607 free_dentry_path(page
);
2608 *ret_file
= find_readable_file(cinode
, 0);
2609 return *ret_file
? 0 : -ENOENT
;
2612 spin_unlock(&tcon
->open_file_lock
);
2613 free_dentry_path(page
);
2618 * Flush data on a strict file.
2620 int cifs_strict_fsync(struct file
*file
, loff_t start
, loff_t end
,
2625 struct cifs_tcon
*tcon
;
2626 struct TCP_Server_Info
*server
;
2627 struct cifsFileInfo
*smbfile
= file
->private_data
;
2628 struct inode
*inode
= file_inode(file
);
2629 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2631 rc
= file_write_and_wait_range(file
, start
, end
);
2633 trace_cifs_fsync_err(inode
->i_ino
, rc
);
2639 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2642 if (!CIFS_CACHE_READ(CIFS_I(inode
))) {
2643 rc
= cifs_zap_mapping(inode
);
2645 cifs_dbg(FYI
, "rc: %d during invalidate phase\n", rc
);
2646 rc
= 0; /* don't care about it in fsync */
2650 tcon
= tlink_tcon(smbfile
->tlink
);
2651 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2652 server
= tcon
->ses
->server
;
2653 if (server
->ops
->flush
== NULL
) {
2655 goto strict_fsync_exit
;
2658 if ((OPEN_FMODE(smbfile
->f_flags
) & FMODE_WRITE
) == 0) {
2659 smbfile
= find_writable_file(CIFS_I(inode
), FIND_WR_ANY
);
2661 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2662 cifsFileInfo_put(smbfile
);
2664 cifs_dbg(FYI
, "ignore fsync for file not open for write\n");
2666 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2675 * Flush data on a non-strict data.
2677 int cifs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2681 struct cifs_tcon
*tcon
;
2682 struct TCP_Server_Info
*server
;
2683 struct cifsFileInfo
*smbfile
= file
->private_data
;
2684 struct inode
*inode
= file_inode(file
);
2685 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
2687 rc
= file_write_and_wait_range(file
, start
, end
);
2689 trace_cifs_fsync_err(file_inode(file
)->i_ino
, rc
);
2695 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2698 tcon
= tlink_tcon(smbfile
->tlink
);
2699 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2700 server
= tcon
->ses
->server
;
2701 if (server
->ops
->flush
== NULL
) {
2706 if ((OPEN_FMODE(smbfile
->f_flags
) & FMODE_WRITE
) == 0) {
2707 smbfile
= find_writable_file(CIFS_I(inode
), FIND_WR_ANY
);
2709 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2710 cifsFileInfo_put(smbfile
);
2712 cifs_dbg(FYI
, "ignore fsync for file not open for write\n");
2714 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2723 * As file closes, flush all cached write data for this inode checking
2724 * for write behind errors.
2726 int cifs_flush(struct file
*file
, fl_owner_t id
)
2728 struct inode
*inode
= file_inode(file
);
2731 if (file
->f_mode
& FMODE_WRITE
)
2732 rc
= filemap_write_and_wait(inode
->i_mapping
);
2734 cifs_dbg(FYI
, "Flush inode %p file %p rc %d\n", inode
, file
, rc
);
2736 /* get more nuanced writeback errors */
2737 rc
= filemap_check_wb_err(file
->f_mapping
, 0);
2738 trace_cifs_flush_err(inode
->i_ino
, rc
);
2744 cifs_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2746 struct file
*file
= iocb
->ki_filp
;
2747 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
2748 struct inode
*inode
= file
->f_mapping
->host
;
2749 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2750 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
2751 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2754 rc
= netfs_start_io_write(inode
);
2759 * We need to hold the sem to be sure nobody modifies lock list
2760 * with a brlock that prevents writing.
2762 down_read(&cinode
->lock_sem
);
2764 rc
= generic_write_checks(iocb
, from
);
2768 if ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) &&
2769 (cifs_find_lock_conflict(cfile
, iocb
->ki_pos
, iov_iter_count(from
),
2770 server
->vals
->exclusive_lock_type
, 0,
2771 NULL
, CIFS_WRITE_OP
))) {
2776 rc
= netfs_buffered_write_iter_locked(iocb
, from
, NULL
);
2779 up_read(&cinode
->lock_sem
);
2780 netfs_end_io_write(inode
);
2782 rc
= generic_write_sync(iocb
, rc
);
2787 cifs_strict_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2789 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2790 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2791 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2792 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
2793 iocb
->ki_filp
->private_data
;
2794 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2797 written
= cifs_get_writer(cinode
);
2801 if (CIFS_CACHE_WRITE(cinode
)) {
2802 if (cap_unix(tcon
->ses
) &&
2803 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
2804 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0)) {
2805 written
= netfs_file_write_iter(iocb
, from
);
2808 written
= cifs_writev(iocb
, from
);
2812 * For non-oplocked files in strict cache mode we need to write the data
2813 * to the server exactly from the pos to pos+len-1 rather than flush all
2814 * affected pages because it may cause a error with mandatory locks on
2815 * these pages but not on the region from pos to ppos+len-1.
2817 written
= netfs_file_write_iter(iocb
, from
);
2818 if (CIFS_CACHE_READ(cinode
)) {
2820 * We have read level caching and we have just sent a write
2821 * request to the server thus making data in the cache stale.
2822 * Zap the cache and set oplock/lease level to NONE to avoid
2823 * reading stale data from the cache. All subsequent read
2824 * operations will read new data from the server.
2826 cifs_zap_mapping(inode
);
2827 cifs_dbg(FYI
, "Set Oplock/Lease to NONE for inode=%p after write\n",
2832 cifs_put_writer(cinode
);
2836 ssize_t
cifs_loose_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
2839 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2841 if (iocb
->ki_flags
& IOCB_DIRECT
)
2842 return netfs_unbuffered_read_iter(iocb
, iter
);
2844 rc
= cifs_revalidate_mapping(inode
);
2848 return netfs_file_read_iter(iocb
, iter
);
2851 ssize_t
cifs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2853 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2854 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2858 if (iocb
->ki_filp
->f_flags
& O_DIRECT
) {
2859 written
= netfs_unbuffered_write_iter(iocb
, from
);
2860 if (written
> 0 && CIFS_CACHE_READ(cinode
)) {
2861 cifs_zap_mapping(inode
);
2863 "Set no oplock for inode=%p after a write operation\n",
2870 written
= cifs_get_writer(cinode
);
2874 written
= netfs_file_write_iter(iocb
, from
);
2876 if (!CIFS_CACHE_WRITE(CIFS_I(inode
))) {
2877 rc
= filemap_fdatawrite(inode
->i_mapping
);
2879 cifs_dbg(FYI
, "cifs_file_write_iter: %d rc on %p inode\n",
2883 cifs_put_writer(cinode
);
2888 cifs_strict_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
2890 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2891 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2892 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2893 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
2894 iocb
->ki_filp
->private_data
;
2895 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2899 * In strict cache mode we need to read from the server all the time
2900 * if we don't have level II oplock because the server can delay mtime
2901 * change - so we can't make a decision about inode invalidating.
2902 * And we can also fail with pagereading if there are mandatory locks
2903 * on pages affected by this read but not on the region from pos to
2906 if (!CIFS_CACHE_READ(cinode
))
2907 return netfs_unbuffered_read_iter(iocb
, to
);
2909 if ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0) {
2910 if (iocb
->ki_flags
& IOCB_DIRECT
)
2911 return netfs_unbuffered_read_iter(iocb
, to
);
2912 return netfs_buffered_read_iter(iocb
, to
);
2916 * We need to hold the sem to be sure nobody modifies lock list
2917 * with a brlock that prevents reading.
2919 if (iocb
->ki_flags
& IOCB_DIRECT
) {
2920 rc
= netfs_start_io_direct(inode
);
2924 down_read(&cinode
->lock_sem
);
2925 if (!cifs_find_lock_conflict(
2926 cfile
, iocb
->ki_pos
, iov_iter_count(to
),
2927 tcon
->ses
->server
->vals
->shared_lock_type
,
2928 0, NULL
, CIFS_READ_OP
))
2929 rc
= netfs_unbuffered_read_iter_locked(iocb
, to
);
2930 up_read(&cinode
->lock_sem
);
2931 netfs_end_io_direct(inode
);
2933 rc
= netfs_start_io_read(inode
);
2937 down_read(&cinode
->lock_sem
);
2938 if (!cifs_find_lock_conflict(
2939 cfile
, iocb
->ki_pos
, iov_iter_count(to
),
2940 tcon
->ses
->server
->vals
->shared_lock_type
,
2941 0, NULL
, CIFS_READ_OP
))
2942 rc
= filemap_read(iocb
, to
, 0);
2943 up_read(&cinode
->lock_sem
);
2944 netfs_end_io_read(inode
);
2950 static vm_fault_t
cifs_page_mkwrite(struct vm_fault
*vmf
)
2952 return netfs_page_mkwrite(vmf
, NULL
);
2955 static const struct vm_operations_struct cifs_file_vm_ops
= {
2956 .fault
= filemap_fault
,
2957 .map_pages
= filemap_map_pages
,
2958 .page_mkwrite
= cifs_page_mkwrite
,
2961 int cifs_file_strict_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2964 struct inode
*inode
= file_inode(file
);
2968 if (!CIFS_CACHE_READ(CIFS_I(inode
)))
2969 rc
= cifs_zap_mapping(inode
);
2971 rc
= generic_file_mmap(file
, vma
);
2973 vma
->vm_ops
= &cifs_file_vm_ops
;
2979 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2985 rc
= cifs_revalidate_file(file
);
2987 cifs_dbg(FYI
, "Validation prior to mmap failed, error=%d\n",
2990 rc
= generic_file_mmap(file
, vma
);
2992 vma
->vm_ops
= &cifs_file_vm_ops
;
2998 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
3000 struct cifsFileInfo
*open_file
;
3002 spin_lock(&cifs_inode
->open_file_lock
);
3003 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
3004 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
3005 spin_unlock(&cifs_inode
->open_file_lock
);
3009 spin_unlock(&cifs_inode
->open_file_lock
);
3013 /* We do not want to update the file size from server for inodes
3014 open for write - to avoid races with writepage extending
3015 the file - in the future we could consider allowing
3016 refreshing the inode only on increases in the file size
3017 but this is tricky to do without racing with writebehind
3018 page caching in the current Linux kernel design */
3019 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
,
3025 if (is_inode_writable(cifsInode
) ||
3026 ((cifsInode
->oplock
& CIFS_CACHE_RW_FLG
) != 0 && from_readdir
)) {
3027 /* This inode is open for write at least once */
3028 struct cifs_sb_info
*cifs_sb
;
3030 cifs_sb
= CIFS_SB(cifsInode
->netfs
.inode
.i_sb
);
3031 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
3032 /* since no page cache to corrupt on directio
3033 we can change size safely */
3037 if (i_size_read(&cifsInode
->netfs
.inode
) < end_of_file
)
3045 void cifs_oplock_break(struct work_struct
*work
)
3047 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
3049 struct inode
*inode
= d_inode(cfile
->dentry
);
3050 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
3051 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
3052 struct cifs_tcon
*tcon
;
3053 struct TCP_Server_Info
*server
;
3054 struct tcon_link
*tlink
;
3056 bool purge_cache
= false, oplock_break_cancelled
;
3057 __u64 persistent_fid
, volatile_fid
;
3060 wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
,
3061 TASK_UNINTERRUPTIBLE
);
3063 tlink
= cifs_sb_tlink(cifs_sb
);
3066 tcon
= tlink_tcon(tlink
);
3067 server
= tcon
->ses
->server
;
3069 server
->ops
->downgrade_oplock(server
, cinode
, cfile
->oplock_level
,
3070 cfile
->oplock_epoch
, &purge_cache
);
3072 if (!CIFS_CACHE_WRITE(cinode
) && CIFS_CACHE_READ(cinode
) &&
3073 cifs_has_mand_locks(cinode
)) {
3074 cifs_dbg(FYI
, "Reset oplock to None for inode=%p due to mand locks\n",
3079 if (inode
&& S_ISREG(inode
->i_mode
)) {
3080 if (CIFS_CACHE_READ(cinode
))
3081 break_lease(inode
, O_RDONLY
);
3083 break_lease(inode
, O_WRONLY
);
3084 rc
= filemap_fdatawrite(inode
->i_mapping
);
3085 if (!CIFS_CACHE_READ(cinode
) || purge_cache
) {
3086 rc
= filemap_fdatawait(inode
->i_mapping
);
3087 mapping_set_error(inode
->i_mapping
, rc
);
3088 cifs_zap_mapping(inode
);
3090 cifs_dbg(FYI
, "Oplock flush inode %p rc %d\n", inode
, rc
);
3091 if (CIFS_CACHE_WRITE(cinode
))
3092 goto oplock_break_ack
;
3095 rc
= cifs_push_locks(cfile
);
3097 cifs_dbg(VFS
, "Push locks rc = %d\n", rc
);
3101 * When oplock break is received and there are no active
3102 * file handles but cached, then schedule deferred close immediately.
3103 * So, new open will not use cached handle.
3106 if (!CIFS_CACHE_HANDLE(cinode
) && !list_empty(&cinode
->deferred_closes
))
3107 cifs_close_deferred_file(cinode
);
3109 persistent_fid
= cfile
->fid
.persistent_fid
;
3110 volatile_fid
= cfile
->fid
.volatile_fid
;
3111 net_fid
= cfile
->fid
.netfid
;
3112 oplock_break_cancelled
= cfile
->oplock_break_cancelled
;
3114 _cifsFileInfo_put(cfile
, false /* do not wait for ourself */, false);
3116 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3117 * an acknowledgment to be sent when the file has already been closed.
3119 spin_lock(&cinode
->open_file_lock
);
3120 /* check list empty since can race with kill_sb calling tree disconnect */
3121 if (!oplock_break_cancelled
&& !list_empty(&cinode
->openFileList
)) {
3122 spin_unlock(&cinode
->open_file_lock
);
3123 rc
= server
->ops
->oplock_response(tcon
, persistent_fid
,
3124 volatile_fid
, net_fid
, cinode
);
3125 cifs_dbg(FYI
, "Oplock release rc = %d\n", rc
);
3127 spin_unlock(&cinode
->open_file_lock
);
3129 cifs_put_tlink(tlink
);
3131 cifs_done_oplock_break(cinode
);
3134 static int cifs_swap_activate(struct swap_info_struct
*sis
,
3135 struct file
*swap_file
, sector_t
*span
)
3137 struct cifsFileInfo
*cfile
= swap_file
->private_data
;
3138 struct inode
*inode
= swap_file
->f_mapping
->host
;
3139 unsigned long blocks
;
3142 cifs_dbg(FYI
, "swap activate\n");
3144 if (!swap_file
->f_mapping
->a_ops
->swap_rw
)
3145 /* Cannot support swap */
3148 spin_lock(&inode
->i_lock
);
3149 blocks
= inode
->i_blocks
;
3150 isize
= inode
->i_size
;
3151 spin_unlock(&inode
->i_lock
);
3152 if (blocks
*512 < isize
) {
3153 pr_warn("swap activate: swapfile has holes\n");
3158 pr_warn_once("Swap support over SMB3 is experimental\n");
3161 * TODO: consider adding ACL (or documenting how) to prevent other
3162 * users (on this or other systems) from reading it
3166 /* TODO: add sk_set_memalloc(inet) or similar */
3169 cfile
->swapfile
= true;
3171 * TODO: Since file already open, we can't open with DENY_ALL here
3172 * but we could add call to grab a byte range lock to prevent others
3173 * from reading or writing the file
3176 sis
->flags
|= SWP_FS_OPS
;
3177 return add_swap_extent(sis
, 0, sis
->max
, 0);
3180 static void cifs_swap_deactivate(struct file
*file
)
3182 struct cifsFileInfo
*cfile
= file
->private_data
;
3184 cifs_dbg(FYI
, "swap deactivate\n");
3186 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3189 cfile
->swapfile
= false;
3191 /* do we need to unpin (or unlock) the file */
3195 * cifs_swap_rw - SMB3 address space operation for swap I/O
3196 * @iocb: target I/O control block
3199 * Perform IO to the swap-file. This is much like direct IO.
3201 static int cifs_swap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
)
3205 if (iov_iter_rw(iter
) == READ
)
3206 ret
= netfs_unbuffered_read_iter_locked(iocb
, iter
);
3208 ret
= netfs_unbuffered_write_iter_locked(iocb
, iter
, NULL
);
3214 const struct address_space_operations cifs_addr_ops
= {
3215 .read_folio
= netfs_read_folio
,
3216 .readahead
= netfs_readahead
,
3217 .writepages
= netfs_writepages
,
3218 .dirty_folio
= netfs_dirty_folio
,
3219 .release_folio
= netfs_release_folio
,
3220 .direct_IO
= noop_direct_IO
,
3221 .invalidate_folio
= netfs_invalidate_folio
,
3222 .migrate_folio
= filemap_migrate_folio
,
3224 * TODO: investigate and if useful we could add an is_dirty_writeback
3227 .swap_activate
= cifs_swap_activate
,
3228 .swap_deactivate
= cifs_swap_deactivate
,
3229 .swap_rw
= cifs_swap_rw
,
3233 * cifs_readahead requires the server to support a buffer large enough to
3234 * contain the header plus one complete page of data. Otherwise, we need
3235 * to leave cifs_readahead out of the address space operations.
3237 const struct address_space_operations cifs_addr_ops_smallbuf
= {
3238 .read_folio
= netfs_read_folio
,
3239 .writepages
= netfs_writepages
,
3240 .dirty_folio
= netfs_dirty_folio
,
3241 .release_folio
= netfs_release_folio
,
3242 .invalidate_folio
= netfs_invalidate_folio
,
3243 .migrate_folio
= filemap_migrate_folio
,