2 * linux/fs/9p/vfs_file.c
4 * This file contians vfs file ops for 9P2000.
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <asm/uaccess.h>
38 #include <linux/idr.h>
39 #include <net/9p/9p.h>
40 #include <net/9p/client.h>
47 static const struct vm_operations_struct v9fs_file_vm_ops
;
48 static const struct vm_operations_struct v9fs_mmap_file_vm_ops
;
51 * v9fs_file_open - open a file (or directory)
52 * @inode: inode to be opened
53 * @file: file being opened
57 int v9fs_file_open(struct inode
*inode
, struct file
*file
)
60 struct v9fs_inode
*v9inode
;
61 struct v9fs_session_info
*v9ses
;
65 p9_debug(P9_DEBUG_VFS
, "inode: %p file: %p\n", inode
, file
);
66 v9inode
= V9FS_I(inode
);
67 v9ses
= v9fs_inode2v9ses(inode
);
68 if (v9fs_proto_dotl(v9ses
))
69 omode
= v9fs_open_to_dotl_flags(file
->f_flags
);
71 omode
= v9fs_uflags2omode(file
->f_flags
,
72 v9fs_proto_dotu(v9ses
));
73 fid
= file
->private_data
;
75 fid
= v9fs_fid_clone(file
->f_path
.dentry
);
79 err
= p9_client_open(fid
, omode
);
84 if ((file
->f_flags
& O_APPEND
) &&
85 (!v9fs_proto_dotu(v9ses
) && !v9fs_proto_dotl(v9ses
)))
86 generic_file_llseek(file
, 0, SEEK_END
);
89 file
->private_data
= fid
;
90 mutex_lock(&v9inode
->v_mutex
);
91 if ((v9ses
->cache
== CACHE_LOOSE
|| v9ses
->cache
== CACHE_FSCACHE
) &&
92 !v9inode
->writeback_fid
&&
93 ((file
->f_flags
& O_ACCMODE
) != O_RDONLY
)) {
95 * clone a fid and add it to writeback_fid
96 * we do it during open time instead of
97 * page dirty time via write_begin/page_mkwrite
98 * because we want write after unlink usecase
101 fid
= v9fs_writeback_fid(file
->f_path
.dentry
);
104 mutex_unlock(&v9inode
->v_mutex
);
107 v9inode
->writeback_fid
= (void *) fid
;
109 mutex_unlock(&v9inode
->v_mutex
);
110 if (v9ses
->cache
== CACHE_LOOSE
|| v9ses
->cache
== CACHE_FSCACHE
)
111 v9fs_cache_inode_set_cookie(inode
, file
);
114 p9_client_clunk(file
->private_data
);
115 file
->private_data
= NULL
;
120 * v9fs_file_lock - lock a file (or directory)
121 * @filp: file to be locked
123 * @fl: file lock structure
125 * Bugs: this looks like a local only lock, we should extend into 9P
126 * by using open exclusive
129 static int v9fs_file_lock(struct file
*filp
, int cmd
, struct file_lock
*fl
)
132 struct inode
*inode
= file_inode(filp
);
134 p9_debug(P9_DEBUG_VFS
, "filp: %p lock: %p\n", filp
, fl
);
136 /* No mandatory locks */
137 if (__mandatory_lock(inode
) && fl
->fl_type
!= F_UNLCK
)
140 if ((IS_SETLK(cmd
) || IS_SETLKW(cmd
)) && fl
->fl_type
!= F_UNLCK
) {
141 filemap_write_and_wait(inode
->i_mapping
);
142 invalidate_mapping_pages(&inode
->i_data
, 0, -1);
148 static int v9fs_file_do_lock(struct file
*filp
, int cmd
, struct file_lock
*fl
)
150 struct p9_flock flock
;
154 unsigned char fl_type
;
156 fid
= filp
->private_data
;
159 if ((fl
->fl_flags
& FL_POSIX
) != FL_POSIX
)
162 res
= posix_lock_file_wait(filp
, fl
);
166 /* convert posix lock to p9 tlock args */
167 memset(&flock
, 0, sizeof(flock
));
168 /* map the lock type */
169 switch (fl
->fl_type
) {
171 flock
.type
= P9_LOCK_TYPE_RDLCK
;
174 flock
.type
= P9_LOCK_TYPE_WRLCK
;
177 flock
.type
= P9_LOCK_TYPE_UNLCK
;
180 flock
.start
= fl
->fl_start
;
181 if (fl
->fl_end
== OFFSET_MAX
)
184 flock
.length
= fl
->fl_end
- fl
->fl_start
+ 1;
185 flock
.proc_id
= fl
->fl_pid
;
186 flock
.client_id
= fid
->clnt
->name
;
188 flock
.flags
= P9_LOCK_FLAGS_BLOCK
;
191 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
192 * for lock request, keep on trying
195 res
= p9_client_lock_dotl(fid
, &flock
, &status
);
199 if (status
!= P9_LOCK_BLOCKED
)
201 if (status
== P9_LOCK_BLOCKED
&& !IS_SETLKW(cmd
))
203 if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT
) != 0)
207 /* map 9p status to VFS status */
209 case P9_LOCK_SUCCESS
:
212 case P9_LOCK_BLOCKED
:
224 * incase server returned error for lock request, revert
227 if (res
< 0 && fl
->fl_type
!= F_UNLCK
) {
228 fl_type
= fl
->fl_type
;
229 fl
->fl_type
= F_UNLCK
;
230 res
= posix_lock_file_wait(filp
, fl
);
231 fl
->fl_type
= fl_type
;
237 static int v9fs_file_getlock(struct file
*filp
, struct file_lock
*fl
)
239 struct p9_getlock glock
;
243 fid
= filp
->private_data
;
246 posix_test_lock(filp
, fl
);
248 * if we have a conflicting lock locally, no need to validate
251 if (fl
->fl_type
!= F_UNLCK
)
254 /* convert posix lock to p9 tgetlock args */
255 memset(&glock
, 0, sizeof(glock
));
256 glock
.type
= P9_LOCK_TYPE_UNLCK
;
257 glock
.start
= fl
->fl_start
;
258 if (fl
->fl_end
== OFFSET_MAX
)
261 glock
.length
= fl
->fl_end
- fl
->fl_start
+ 1;
262 glock
.proc_id
= fl
->fl_pid
;
263 glock
.client_id
= fid
->clnt
->name
;
265 res
= p9_client_getlock_dotl(fid
, &glock
);
268 /* map 9p lock type to os lock type */
269 switch (glock
.type
) {
270 case P9_LOCK_TYPE_RDLCK
:
271 fl
->fl_type
= F_RDLCK
;
273 case P9_LOCK_TYPE_WRLCK
:
274 fl
->fl_type
= F_WRLCK
;
276 case P9_LOCK_TYPE_UNLCK
:
277 fl
->fl_type
= F_UNLCK
;
280 if (glock
.type
!= P9_LOCK_TYPE_UNLCK
) {
281 fl
->fl_start
= glock
.start
;
282 if (glock
.length
== 0)
283 fl
->fl_end
= OFFSET_MAX
;
285 fl
->fl_end
= glock
.start
+ glock
.length
- 1;
286 fl
->fl_pid
= glock
.proc_id
;
292 * v9fs_file_lock_dotl - lock a file (or directory)
293 * @filp: file to be locked
295 * @fl: file lock structure
299 static int v9fs_file_lock_dotl(struct file
*filp
, int cmd
, struct file_lock
*fl
)
301 struct inode
*inode
= file_inode(filp
);
304 p9_debug(P9_DEBUG_VFS
, "filp: %p cmd:%d lock: %p name: %s\n",
305 filp
, cmd
, fl
, filp
->f_path
.dentry
->d_name
.name
);
307 /* No mandatory locks */
308 if (__mandatory_lock(inode
) && fl
->fl_type
!= F_UNLCK
)
311 if ((IS_SETLK(cmd
) || IS_SETLKW(cmd
)) && fl
->fl_type
!= F_UNLCK
) {
312 filemap_write_and_wait(inode
->i_mapping
);
313 invalidate_mapping_pages(&inode
->i_data
, 0, -1);
316 if (IS_SETLK(cmd
) || IS_SETLKW(cmd
))
317 ret
= v9fs_file_do_lock(filp
, cmd
, fl
);
318 else if (IS_GETLK(cmd
))
319 ret
= v9fs_file_getlock(filp
, fl
);
327 * v9fs_file_flock_dotl - lock a file
328 * @filp: file to be locked
330 * @fl: file lock structure
334 static int v9fs_file_flock_dotl(struct file
*filp
, int cmd
,
335 struct file_lock
*fl
)
337 struct inode
*inode
= file_inode(filp
);
340 p9_debug(P9_DEBUG_VFS
, "filp: %p cmd:%d lock: %p name: %s\n",
341 filp
, cmd
, fl
, filp
->f_path
.dentry
->d_name
.name
);
343 /* No mandatory locks */
344 if (__mandatory_lock(inode
) && fl
->fl_type
!= F_UNLCK
)
347 if (!(fl
->fl_flags
& FL_FLOCK
))
350 if ((IS_SETLK(cmd
) || IS_SETLKW(cmd
)) && fl
->fl_type
!= F_UNLCK
) {
351 filemap_write_and_wait(inode
->i_mapping
);
352 invalidate_mapping_pages(&inode
->i_data
, 0, -1);
354 /* Convert flock to posix lock */
355 fl
->fl_owner
= (fl_owner_t
)filp
;
357 fl
->fl_end
= OFFSET_MAX
;
358 fl
->fl_flags
|= FL_POSIX
;
359 fl
->fl_flags
^= FL_FLOCK
;
361 if (IS_SETLK(cmd
) | IS_SETLKW(cmd
))
362 ret
= v9fs_file_do_lock(filp
, cmd
, fl
);
370 * v9fs_fid_readn - read from a fid
372 * @data: data buffer to read data into
373 * @udata: user data buffer to read data into
374 * @count: size of buffer
375 * @offset: offset at which to read data
379 v9fs_fid_readn(struct p9_fid
*fid
, char *data
, char __user
*udata
, u32 count
,
384 p9_debug(P9_DEBUG_VFS
, "fid %d offset %llu count %d\n",
385 fid
->fid
, (long long unsigned)offset
, count
);
388 size
= fid
->iounit
? fid
->iounit
: fid
->clnt
->msize
- P9_IOHDRSZ
;
390 n
= p9_client_read(fid
, data
, udata
, offset
, count
);
402 } while (count
> 0 && n
== size
);
411 * v9fs_file_readn - read from a file
412 * @filp: file pointer to read
413 * @data: data buffer to read data into
414 * @udata: user data buffer to read data into
415 * @count: size of buffer
416 * @offset: offset at which to read data
420 v9fs_file_readn(struct file
*filp
, char *data
, char __user
*udata
, u32 count
,
423 return v9fs_fid_readn(filp
->private_data
, data
, udata
, count
, offset
);
427 * v9fs_file_read - read from a file
428 * @filp: file pointer to read
429 * @udata: user data buffer to read data into
430 * @count: size of buffer
431 * @offset: offset at which to read data
436 v9fs_file_read(struct file
*filp
, char __user
*udata
, size_t count
,
443 p9_debug(P9_DEBUG_VFS
, "count %zu offset %lld\n", count
, *offset
);
444 fid
= filp
->private_data
;
446 size
= fid
->iounit
? fid
->iounit
: fid
->clnt
->msize
- P9_IOHDRSZ
;
448 ret
= v9fs_file_readn(filp
, NULL
, udata
, count
, *offset
);
450 ret
= p9_client_read(fid
, NULL
, udata
, *offset
, count
);
459 v9fs_file_write_internal(struct inode
*inode
, struct p9_fid
*fid
,
460 const char __user
*data
, size_t count
,
461 loff_t
*offset
, int invalidate
)
466 loff_t origin
= *offset
;
467 unsigned long pg_start
, pg_end
;
469 p9_debug(P9_DEBUG_VFS
, "data %p count %d offset %x\n",
470 data
, (int)count
, (int)*offset
);
473 n
= p9_client_write(fid
, NULL
, data
+total
, origin
+total
, count
);
480 if (invalidate
&& (total
> 0)) {
481 pg_start
= origin
>> PAGE_CACHE_SHIFT
;
482 pg_end
= (origin
+ total
- 1) >> PAGE_CACHE_SHIFT
;
483 if (inode
->i_mapping
&& inode
->i_mapping
->nrpages
)
484 invalidate_inode_pages2_range(inode
->i_mapping
,
487 i_size
= i_size_read(inode
);
488 if (*offset
> i_size
) {
489 inode_add_bytes(inode
, *offset
- i_size
);
490 i_size_write(inode
, *offset
);
500 * v9fs_file_write - write to a file
501 * @filp: file pointer to write
502 * @data: data buffer to write data from
503 * @count: size of buffer
504 * @offset: offset at which to write data
508 v9fs_file_write(struct file
*filp
, const char __user
* data
,
509 size_t count
, loff_t
*offset
)
512 loff_t origin
= *offset
;
515 retval
= generic_write_checks(filp
, &origin
, &count
, 0);
520 if ((ssize_t
) count
< 0)
526 retval
= v9fs_file_write_internal(file_inode(filp
),
528 data
, count
, &origin
, 1);
529 /* update offset on successful write */
537 static int v9fs_file_fsync(struct file
*filp
, loff_t start
, loff_t end
,
541 struct inode
*inode
= filp
->f_mapping
->host
;
542 struct p9_wstat wstat
;
545 retval
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
549 mutex_lock(&inode
->i_mutex
);
550 p9_debug(P9_DEBUG_VFS
, "filp %p datasync %x\n", filp
, datasync
);
552 fid
= filp
->private_data
;
553 v9fs_blank_wstat(&wstat
);
555 retval
= p9_client_wstat(fid
, &wstat
);
556 mutex_unlock(&inode
->i_mutex
);
561 int v9fs_file_fsync_dotl(struct file
*filp
, loff_t start
, loff_t end
,
565 struct inode
*inode
= filp
->f_mapping
->host
;
568 retval
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
572 mutex_lock(&inode
->i_mutex
);
573 p9_debug(P9_DEBUG_VFS
, "filp %p datasync %x\n", filp
, datasync
);
575 fid
= filp
->private_data
;
577 retval
= p9_client_fsync(fid
, datasync
);
578 mutex_unlock(&inode
->i_mutex
);
584 v9fs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
589 retval
= generic_file_mmap(filp
, vma
);
591 vma
->vm_ops
= &v9fs_file_vm_ops
;
597 v9fs_mmap_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
601 struct v9fs_inode
*v9inode
;
604 inode
= file_inode(filp
);
605 v9inode
= V9FS_I(inode
);
606 mutex_lock(&v9inode
->v_mutex
);
607 if (!v9inode
->writeback_fid
&&
608 (vma
->vm_flags
& VM_WRITE
)) {
610 * clone a fid and add it to writeback_fid
611 * we do it during mmap instead of
612 * page dirty time via write_begin/page_mkwrite
613 * because we want write after unlink usecase
616 fid
= v9fs_writeback_fid(filp
->f_path
.dentry
);
618 retval
= PTR_ERR(fid
);
619 mutex_unlock(&v9inode
->v_mutex
);
622 v9inode
->writeback_fid
= (void *) fid
;
624 mutex_unlock(&v9inode
->v_mutex
);
626 retval
= generic_file_mmap(filp
, vma
);
628 vma
->vm_ops
= &v9fs_mmap_file_vm_ops
;
634 v9fs_vm_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
636 struct v9fs_inode
*v9inode
;
637 struct page
*page
= vmf
->page
;
638 struct file
*filp
= vma
->vm_file
;
639 struct inode
*inode
= file_inode(filp
);
642 p9_debug(P9_DEBUG_VFS
, "page %p fid %lx\n",
643 page
, (unsigned long)filp
->private_data
);
645 /* Update file times before taking page lock */
646 file_update_time(filp
);
648 v9inode
= V9FS_I(inode
);
649 /* make sure the cache has finished storing the page */
650 v9fs_fscache_wait_on_page_write(inode
, page
);
651 BUG_ON(!v9inode
->writeback_fid
);
653 if (page
->mapping
!= inode
->i_mapping
)
655 wait_for_stable_page(page
);
657 return VM_FAULT_LOCKED
;
660 return VM_FAULT_NOPAGE
;
664 v9fs_direct_read(struct file
*filp
, char __user
*udata
, size_t count
,
669 struct address_space
*mapping
;
672 mapping
= filp
->f_mapping
;
673 inode
= mapping
->host
;
676 size
= i_size_read(inode
);
678 filemap_write_and_wait_range(mapping
, offset
,
681 return v9fs_file_read(filp
, udata
, count
, offsetp
);
685 * v9fs_cached_file_read - read from a file
686 * @filp: file pointer to read
687 * @udata: user data buffer to read data into
688 * @count: size of buffer
689 * @offset: offset at which to read data
693 v9fs_cached_file_read(struct file
*filp
, char __user
*data
, size_t count
,
696 if (filp
->f_flags
& O_DIRECT
)
697 return v9fs_direct_read(filp
, data
, count
, offset
);
698 return do_sync_read(filp
, data
, count
, offset
);
702 * v9fs_mmap_file_read - read from a file
703 * @filp: file pointer to read
704 * @udata: user data buffer to read data into
705 * @count: size of buffer
706 * @offset: offset at which to read data
710 v9fs_mmap_file_read(struct file
*filp
, char __user
*data
, size_t count
,
713 /* TODO: Check if there are dirty pages */
714 return v9fs_file_read(filp
, data
, count
, offset
);
718 v9fs_direct_write(struct file
*filp
, const char __user
* data
,
719 size_t count
, loff_t
*offsetp
)
724 struct address_space
*mapping
;
727 mapping
= filp
->f_mapping
;
728 inode
= mapping
->host
;
732 mutex_lock(&inode
->i_mutex
);
733 retval
= filemap_write_and_wait_range(mapping
, offset
,
738 * After a write we want buffered reads to be sure to go to disk to get
739 * the new data. We invalidate clean cached page from the region we're
740 * about to write. We do this *before* the write so that if we fail
741 * here we fall back to buffered write
743 if (mapping
->nrpages
) {
744 pgoff_t pg_start
= offset
>> PAGE_CACHE_SHIFT
;
745 pgoff_t pg_end
= (offset
+ count
- 1) >> PAGE_CACHE_SHIFT
;
747 retval
= invalidate_inode_pages2_range(mapping
,
750 * If a page can not be invalidated, fall back
754 if (retval
== -EBUSY
)
759 retval
= v9fs_file_write(filp
, data
, count
, offsetp
);
761 mutex_unlock(&inode
->i_mutex
);
765 mutex_unlock(&inode
->i_mutex
);
766 return do_sync_write(filp
, data
, count
, offsetp
);
770 * v9fs_cached_file_write - write to a file
771 * @filp: file pointer to write
772 * @data: data buffer to write data from
773 * @count: size of buffer
774 * @offset: offset at which to write data
778 v9fs_cached_file_write(struct file
*filp
, const char __user
* data
,
779 size_t count
, loff_t
*offset
)
782 if (filp
->f_flags
& O_DIRECT
)
783 return v9fs_direct_write(filp
, data
, count
, offset
);
784 return do_sync_write(filp
, data
, count
, offset
);
789 * v9fs_mmap_file_write - write to a file
790 * @filp: file pointer to write
791 * @data: data buffer to write data from
792 * @count: size of buffer
793 * @offset: offset at which to write data
797 v9fs_mmap_file_write(struct file
*filp
, const char __user
*data
,
798 size_t count
, loff_t
*offset
)
801 * TODO: invalidate mmaps on filp's inode between
802 * offset and offset+count
804 return v9fs_file_write(filp
, data
, count
, offset
);
807 static void v9fs_mmap_vm_close(struct vm_area_struct
*vma
)
811 struct writeback_control wbc
= {
812 .nr_to_write
= LONG_MAX
,
813 .sync_mode
= WB_SYNC_ALL
,
814 .range_start
= vma
->vm_pgoff
* PAGE_SIZE
,
815 /* absolute end, byte at end included */
816 .range_end
= vma
->vm_pgoff
* PAGE_SIZE
+
817 (vma
->vm_end
- vma
->vm_start
- 1),
821 p9_debug(P9_DEBUG_VFS
, "9p VMA close, %p, flushing", vma
);
823 inode
= file_inode(vma
->vm_file
);
825 if (!mapping_cap_writeback_dirty(inode
->i_mapping
))
829 sync_inode(inode
, &wbc
);
833 static const struct vm_operations_struct v9fs_file_vm_ops
= {
834 .fault
= filemap_fault
,
835 .page_mkwrite
= v9fs_vm_page_mkwrite
,
836 .remap_pages
= generic_file_remap_pages
,
839 static const struct vm_operations_struct v9fs_mmap_file_vm_ops
= {
840 .close
= v9fs_mmap_vm_close
,
841 .fault
= filemap_fault
,
842 .page_mkwrite
= v9fs_vm_page_mkwrite
,
843 .remap_pages
= generic_file_remap_pages
,
847 const struct file_operations v9fs_cached_file_operations
= {
848 .llseek
= generic_file_llseek
,
849 .read
= v9fs_cached_file_read
,
850 .write
= v9fs_cached_file_write
,
851 .aio_read
= generic_file_aio_read
,
852 .aio_write
= generic_file_aio_write
,
853 .open
= v9fs_file_open
,
854 .release
= v9fs_dir_release
,
855 .lock
= v9fs_file_lock
,
856 .mmap
= v9fs_file_mmap
,
857 .fsync
= v9fs_file_fsync
,
860 const struct file_operations v9fs_cached_file_operations_dotl
= {
861 .llseek
= generic_file_llseek
,
862 .read
= v9fs_cached_file_read
,
863 .write
= v9fs_cached_file_write
,
864 .aio_read
= generic_file_aio_read
,
865 .aio_write
= generic_file_aio_write
,
866 .open
= v9fs_file_open
,
867 .release
= v9fs_dir_release
,
868 .lock
= v9fs_file_lock_dotl
,
869 .flock
= v9fs_file_flock_dotl
,
870 .mmap
= v9fs_file_mmap
,
871 .fsync
= v9fs_file_fsync_dotl
,
874 const struct file_operations v9fs_file_operations
= {
875 .llseek
= generic_file_llseek
,
876 .read
= v9fs_file_read
,
877 .write
= v9fs_file_write
,
878 .open
= v9fs_file_open
,
879 .release
= v9fs_dir_release
,
880 .lock
= v9fs_file_lock
,
881 .mmap
= generic_file_readonly_mmap
,
882 .fsync
= v9fs_file_fsync
,
885 const struct file_operations v9fs_file_operations_dotl
= {
886 .llseek
= generic_file_llseek
,
887 .read
= v9fs_file_read
,
888 .write
= v9fs_file_write
,
889 .open
= v9fs_file_open
,
890 .release
= v9fs_dir_release
,
891 .lock
= v9fs_file_lock_dotl
,
892 .flock
= v9fs_file_flock_dotl
,
893 .mmap
= generic_file_readonly_mmap
,
894 .fsync
= v9fs_file_fsync_dotl
,
897 const struct file_operations v9fs_mmap_file_operations
= {
898 .llseek
= generic_file_llseek
,
899 .read
= v9fs_mmap_file_read
,
900 .write
= v9fs_mmap_file_write
,
901 .open
= v9fs_file_open
,
902 .release
= v9fs_dir_release
,
903 .lock
= v9fs_file_lock
,
904 .mmap
= v9fs_mmap_file_mmap
,
905 .fsync
= v9fs_file_fsync
,
908 const struct file_operations v9fs_mmap_file_operations_dotl
= {
909 .llseek
= generic_file_llseek
,
910 .read
= v9fs_mmap_file_read
,
911 .write
= v9fs_mmap_file_write
,
912 .open
= v9fs_file_open
,
913 .release
= v9fs_dir_release
,
914 .lock
= v9fs_file_lock_dotl
,
915 .flock
= v9fs_file_flock_dotl
,
916 .mmap
= v9fs_mmap_file_mmap
,
917 .fsync
= v9fs_file_fsync_dotl
,