2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <linux/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/delay.h>
45 * gfs2_llseek - seek to a location in a file
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50 * SEEK_END requires the glock for the file because it references the
53 * Returns: The new offset, or errno
56 static loff_t
gfs2_llseek(struct file
*file
, loff_t offset
, int whence
)
58 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
59 struct gfs2_holder i_gh
;
64 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
67 error
= generic_file_llseek(file
, offset
, whence
);
68 gfs2_glock_dq_uninit(&i_gh
);
73 error
= gfs2_seek_data(file
, offset
);
77 error
= gfs2_seek_hole(file
, offset
);
83 * These don't reference inode->i_size and don't depend on the
84 * block mapping, so we don't need the glock.
86 error
= generic_file_llseek(file
, offset
, whence
);
96 * gfs2_readdir - Iterator for a directory
97 * @file: The directory to read from
98 * @ctx: What to feed directory entries to
103 static int gfs2_readdir(struct file
*file
, struct dir_context
*ctx
)
105 struct inode
*dir
= file
->f_mapping
->host
;
106 struct gfs2_inode
*dip
= GFS2_I(dir
);
107 struct gfs2_holder d_gh
;
110 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
, 0, &d_gh
);
114 error
= gfs2_dir_read(dir
, ctx
, &file
->f_ra
);
116 gfs2_glock_dq_uninit(&d_gh
);
124 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
125 * and to GFS2_DIF_JDATA for non-directories.
130 } fsflag_gfs2flag
[] = {
131 {FS_SYNC_FL
, GFS2_DIF_SYNC
},
132 {FS_IMMUTABLE_FL
, GFS2_DIF_IMMUTABLE
},
133 {FS_APPEND_FL
, GFS2_DIF_APPENDONLY
},
134 {FS_NOATIME_FL
, GFS2_DIF_NOATIME
},
135 {FS_INDEX_FL
, GFS2_DIF_EXHASH
},
136 {FS_TOPDIR_FL
, GFS2_DIF_TOPDIR
},
137 {FS_JOURNAL_DATA_FL
, GFS2_DIF_JDATA
| GFS2_DIF_INHERIT_JDATA
},
140 static int gfs2_get_flags(struct file
*filp
, u32 __user
*ptr
)
142 struct inode
*inode
= file_inode(filp
);
143 struct gfs2_inode
*ip
= GFS2_I(inode
);
144 struct gfs2_holder gh
;
146 u32 gfsflags
, fsflags
= 0;
148 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
149 error
= gfs2_glock_nq(&gh
);
153 gfsflags
= ip
->i_diskflags
;
154 if (S_ISDIR(inode
->i_mode
))
155 gfsflags
&= ~GFS2_DIF_JDATA
;
157 gfsflags
&= ~GFS2_DIF_INHERIT_JDATA
;
158 for (i
= 0; i
< ARRAY_SIZE(fsflag_gfs2flag
); i
++)
159 if (gfsflags
& fsflag_gfs2flag
[i
].gfsflag
)
160 fsflags
|= fsflag_gfs2flag
[i
].fsflag
;
162 if (put_user(fsflags
, ptr
))
167 gfs2_holder_uninit(&gh
);
171 void gfs2_set_inode_flags(struct inode
*inode
)
173 struct gfs2_inode
*ip
= GFS2_I(inode
);
174 unsigned int flags
= inode
->i_flags
;
176 flags
&= ~(S_SYNC
|S_APPEND
|S_IMMUTABLE
|S_NOATIME
|S_DIRSYNC
|S_NOSEC
);
177 if ((ip
->i_eattr
== 0) && !is_sxid(inode
->i_mode
))
179 if (ip
->i_diskflags
& GFS2_DIF_IMMUTABLE
)
180 flags
|= S_IMMUTABLE
;
181 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
)
183 if (ip
->i_diskflags
& GFS2_DIF_NOATIME
)
185 if (ip
->i_diskflags
& GFS2_DIF_SYNC
)
187 inode
->i_flags
= flags
;
190 /* Flags that can be set by user space */
191 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
192 GFS2_DIF_IMMUTABLE| \
193 GFS2_DIF_APPENDONLY| \
197 GFS2_DIF_INHERIT_JDATA)
200 * do_gfs2_set_flags - set flags on an inode
201 * @filp: file pointer
202 * @reqflags: The flags to set
203 * @mask: Indicates which flags are valid
206 static int do_gfs2_set_flags(struct file
*filp
, u32 reqflags
, u32 mask
)
208 struct inode
*inode
= file_inode(filp
);
209 struct gfs2_inode
*ip
= GFS2_I(inode
);
210 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
211 struct buffer_head
*bh
;
212 struct gfs2_holder gh
;
214 u32 new_flags
, flags
;
216 error
= mnt_want_write_file(filp
);
220 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
225 if (!inode_owner_or_capable(inode
))
229 flags
= ip
->i_diskflags
;
230 new_flags
= (flags
& ~mask
) | (reqflags
& mask
);
231 if ((new_flags
^ flags
) == 0)
235 if (IS_IMMUTABLE(inode
) && (new_flags
& GFS2_DIF_IMMUTABLE
))
237 if (IS_APPEND(inode
) && (new_flags
& GFS2_DIF_APPENDONLY
))
239 if (((new_flags
^ flags
) & GFS2_DIF_IMMUTABLE
) &&
240 !capable(CAP_LINUX_IMMUTABLE
))
242 if (!IS_IMMUTABLE(inode
)) {
243 error
= gfs2_permission(inode
, MAY_WRITE
);
247 if ((flags
^ new_flags
) & GFS2_DIF_JDATA
) {
248 if (new_flags
& GFS2_DIF_JDATA
)
249 gfs2_log_flush(sdp
, ip
->i_gl
,
250 GFS2_LOG_HEAD_FLUSH_NORMAL
|
252 error
= filemap_fdatawrite(inode
->i_mapping
);
255 error
= filemap_fdatawait(inode
->i_mapping
);
258 if (new_flags
& GFS2_DIF_JDATA
)
259 gfs2_ordered_del_inode(ip
);
261 error
= gfs2_trans_begin(sdp
, RES_DINODE
, 0);
264 error
= gfs2_meta_inode_buffer(ip
, &bh
);
267 inode
->i_ctime
= current_time(inode
);
268 gfs2_trans_add_meta(ip
->i_gl
, bh
);
269 ip
->i_diskflags
= new_flags
;
270 gfs2_dinode_out(ip
, bh
->b_data
);
272 gfs2_set_inode_flags(inode
);
273 gfs2_set_aops(inode
);
277 gfs2_glock_dq_uninit(&gh
);
279 mnt_drop_write_file(filp
);
283 static int gfs2_set_flags(struct file
*filp
, u32 __user
*ptr
)
285 struct inode
*inode
= file_inode(filp
);
286 u32 fsflags
, gfsflags
= 0;
290 if (get_user(fsflags
, ptr
))
293 for (i
= 0; i
< ARRAY_SIZE(fsflag_gfs2flag
); i
++) {
294 if (fsflags
& fsflag_gfs2flag
[i
].fsflag
) {
295 fsflags
&= ~fsflag_gfs2flag
[i
].fsflag
;
296 gfsflags
|= fsflag_gfs2flag
[i
].gfsflag
;
299 if (fsflags
|| gfsflags
& ~GFS2_FLAGS_USER_SET
)
302 mask
= GFS2_FLAGS_USER_SET
;
303 if (S_ISDIR(inode
->i_mode
)) {
304 mask
&= ~GFS2_DIF_JDATA
;
306 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
307 if (gfsflags
& GFS2_DIF_TOPDIR
)
309 mask
&= ~(GFS2_DIF_TOPDIR
| GFS2_DIF_INHERIT_JDATA
);
312 return do_gfs2_set_flags(filp
, gfsflags
, mask
);
315 static long gfs2_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
318 case FS_IOC_GETFLAGS
:
319 return gfs2_get_flags(filp
, (u32 __user
*)arg
);
320 case FS_IOC_SETFLAGS
:
321 return gfs2_set_flags(filp
, (u32 __user
*)arg
);
323 return gfs2_fitrim(filp
, (void __user
*)arg
);
329 * gfs2_size_hint - Give a hint to the size of a write request
330 * @filep: The struct file
331 * @offset: The file offset of the write
332 * @size: The length of the write
334 * When we are about to do a write, this function records the total
335 * write size in order to provide a suitable hint to the lower layers
336 * about how many blocks will be required.
340 static void gfs2_size_hint(struct file
*filep
, loff_t offset
, size_t size
)
342 struct inode
*inode
= file_inode(filep
);
343 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
344 struct gfs2_inode
*ip
= GFS2_I(inode
);
345 size_t blks
= (size
+ sdp
->sd_sb
.sb_bsize
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
346 int hint
= min_t(size_t, INT_MAX
, blks
);
348 if (hint
> atomic_read(&ip
->i_res
.rs_sizehint
))
349 atomic_set(&ip
->i_res
.rs_sizehint
, hint
);
353 * gfs2_allocate_page_backing - Use bmap to allocate blocks
354 * @page: The (locked) page to allocate backing for
356 * We try to allocate all the blocks required for the page in
357 * one go. This might fail for various reasons, so we keep
358 * trying until all the blocks to back this page are allocated.
359 * If some of the blocks are already allocated, thats ok too.
362 static int gfs2_allocate_page_backing(struct page
*page
)
364 struct inode
*inode
= page
->mapping
->host
;
365 struct buffer_head bh
;
366 unsigned long size
= PAGE_SIZE
;
367 u64 lblock
= page
->index
<< (PAGE_SHIFT
- inode
->i_blkbits
);
372 gfs2_block_map(inode
, lblock
, &bh
, 1);
373 if (!buffer_mapped(&bh
))
376 lblock
+= (bh
.b_size
>> inode
->i_blkbits
);
382 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
383 * @vma: The virtual memory area
384 * @vmf: The virtual memory fault containing the page to become writable
386 * When the page becomes writable, we need to ensure that we have
387 * blocks allocated on disk to back that page.
390 static int gfs2_page_mkwrite(struct vm_fault
*vmf
)
392 struct page
*page
= vmf
->page
;
393 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
394 struct gfs2_inode
*ip
= GFS2_I(inode
);
395 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
396 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
397 unsigned long last_index
;
398 u64 pos
= page
->index
<< PAGE_SHIFT
;
399 unsigned int data_blocks
, ind_blocks
, rblocks
;
400 struct gfs2_holder gh
;
404 sb_start_pagefault(inode
->i_sb
);
406 ret
= gfs2_rsqa_alloc(ip
);
410 gfs2_size_hint(vmf
->vma
->vm_file
, pos
, PAGE_SIZE
);
412 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
413 ret
= gfs2_glock_nq(&gh
);
417 /* Update file times before taking page lock */
418 file_update_time(vmf
->vma
->vm_file
);
420 set_bit(GLF_DIRTY
, &ip
->i_gl
->gl_flags
);
421 set_bit(GIF_SW_PAGED
, &ip
->i_flags
);
423 if (!gfs2_write_alloc_required(ip
, pos
, PAGE_SIZE
)) {
425 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
) {
432 ret
= gfs2_rindex_update(sdp
);
436 gfs2_write_calc_reserv(ip
, PAGE_SIZE
, &data_blocks
, &ind_blocks
);
437 ap
.target
= data_blocks
+ ind_blocks
;
438 ret
= gfs2_quota_lock_check(ip
, &ap
);
441 ret
= gfs2_inplace_reserve(ip
, &ap
);
443 goto out_quota_unlock
;
445 rblocks
= RES_DINODE
+ ind_blocks
;
446 if (gfs2_is_jdata(ip
))
447 rblocks
+= data_blocks
? data_blocks
: 1;
448 if (ind_blocks
|| data_blocks
) {
449 rblocks
+= RES_STATFS
+ RES_QUOTA
;
450 rblocks
+= gfs2_rg_blocks(ip
, data_blocks
+ ind_blocks
);
452 ret
= gfs2_trans_begin(sdp
, rblocks
, 0);
458 size
= i_size_read(inode
);
459 last_index
= (size
- 1) >> PAGE_SHIFT
;
460 /* Check page index against inode size */
461 if (size
== 0 || (page
->index
> last_index
))
465 /* If truncated, we must retry the operation, we may have raced
466 * with the glock demotion code.
468 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
)
471 /* Unstuff, if required, and allocate backing blocks for page */
473 if (gfs2_is_stuffed(ip
))
474 ret
= gfs2_unstuff_dinode(ip
, page
);
476 ret
= gfs2_allocate_page_backing(page
);
483 gfs2_inplace_release(ip
);
485 gfs2_quota_unlock(ip
);
489 gfs2_holder_uninit(&gh
);
491 set_page_dirty(page
);
492 wait_for_stable_page(page
);
495 sb_end_pagefault(inode
->i_sb
);
496 return block_page_mkwrite_return(ret
);
499 static const struct vm_operations_struct gfs2_vm_ops
= {
500 .fault
= filemap_fault
,
501 .map_pages
= filemap_map_pages
,
502 .page_mkwrite
= gfs2_page_mkwrite
,
507 * @file: The file to map
508 * @vma: The VMA which described the mapping
510 * There is no need to get a lock here unless we should be updating
511 * atime. We ignore any locking errors since the only consequence is
512 * a missed atime update (which will just be deferred until later).
517 static int gfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
519 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
521 if (!(file
->f_flags
& O_NOATIME
) &&
522 !IS_NOATIME(&ip
->i_inode
)) {
523 struct gfs2_holder i_gh
;
526 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
530 /* grab lock to update inode */
531 gfs2_glock_dq_uninit(&i_gh
);
534 vma
->vm_ops
= &gfs2_vm_ops
;
540 * gfs2_open_common - This is common to open and atomic_open
541 * @inode: The inode being opened
542 * @file: The file being opened
544 * This maybe called under a glock or not depending upon how it has
545 * been called. We must always be called under a glock for regular
546 * files, however. For other file types, it does not matter whether
547 * we hold the glock or not.
549 * Returns: Error code or 0 for success
552 int gfs2_open_common(struct inode
*inode
, struct file
*file
)
554 struct gfs2_file
*fp
;
557 if (S_ISREG(inode
->i_mode
)) {
558 ret
= generic_file_open(inode
, file
);
563 fp
= kzalloc(sizeof(struct gfs2_file
), GFP_NOFS
);
567 mutex_init(&fp
->f_fl_mutex
);
569 gfs2_assert_warn(GFS2_SB(inode
), !file
->private_data
);
570 file
->private_data
= fp
;
575 * gfs2_open - open a file
576 * @inode: the inode to open
577 * @file: the struct file for this opening
579 * After atomic_open, this function is only used for opening files
580 * which are already cached. We must still get the glock for regular
581 * files to ensure that we have the file size uptodate for the large
582 * file check which is in the common code. That is only an issue for
583 * regular files though.
588 static int gfs2_open(struct inode
*inode
, struct file
*file
)
590 struct gfs2_inode
*ip
= GFS2_I(inode
);
591 struct gfs2_holder i_gh
;
593 bool need_unlock
= false;
595 if (S_ISREG(ip
->i_inode
.i_mode
)) {
596 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
603 error
= gfs2_open_common(inode
, file
);
606 gfs2_glock_dq_uninit(&i_gh
);
612 * gfs2_release - called to close a struct file
613 * @inode: the inode the struct file belongs to
614 * @file: the struct file being closed
619 static int gfs2_release(struct inode
*inode
, struct file
*file
)
621 struct gfs2_inode
*ip
= GFS2_I(inode
);
623 kfree(file
->private_data
);
624 file
->private_data
= NULL
;
626 if (!(file
->f_mode
& FMODE_WRITE
))
629 gfs2_rsqa_delete(ip
, &inode
->i_writecount
);
634 * gfs2_fsync - sync the dirty data for a file (across the cluster)
635 * @file: the file that points to the dentry
636 * @start: the start position in the file to sync
637 * @end: the end position in the file to sync
638 * @datasync: set if we can ignore timestamp changes
640 * We split the data flushing here so that we don't wait for the data
641 * until after we've also sent the metadata to disk. Note that for
642 * data=ordered, we will write & wait for the data at the log flush
643 * stage anyway, so this is unlikely to make much of a difference
644 * except in the data=writeback case.
646 * If the fdatawrite fails due to any reason except -EIO, we will
647 * continue the remainder of the fsync, although we'll still report
648 * the error at the end. This is to match filemap_write_and_wait_range()
654 static int gfs2_fsync(struct file
*file
, loff_t start
, loff_t end
,
657 struct address_space
*mapping
= file
->f_mapping
;
658 struct inode
*inode
= mapping
->host
;
659 int sync_state
= inode
->i_state
& I_DIRTY_ALL
;
660 struct gfs2_inode
*ip
= GFS2_I(inode
);
661 int ret
= 0, ret1
= 0;
663 if (mapping
->nrpages
) {
664 ret1
= filemap_fdatawrite_range(mapping
, start
, end
);
669 if (!gfs2_is_jdata(ip
))
670 sync_state
&= ~I_DIRTY_PAGES
;
672 sync_state
&= ~(I_DIRTY_SYNC
| I_DIRTY_TIME
);
675 ret
= sync_inode_metadata(inode
, 1);
678 if (gfs2_is_jdata(ip
))
679 ret
= file_write_and_wait(file
);
682 gfs2_ail_flush(ip
->i_gl
, 1);
685 if (mapping
->nrpages
)
686 ret
= file_fdatawait_range(file
, start
, end
);
688 return ret
? ret
: ret1
;
692 * gfs2_file_write_iter - Perform a write to a file
693 * @iocb: The io context
694 * @iov: The data to write
695 * @nr_segs: Number of @iov segments
696 * @pos: The file position
698 * We have to do a lock/unlock here to refresh the inode size for
699 * O_APPEND writes, otherwise we can land up writing at the wrong
700 * offset. There is still a race, but provided the app is using its
701 * own file locking, this will make O_APPEND work as expected.
705 static ssize_t
gfs2_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
707 struct file
*file
= iocb
->ki_filp
;
708 struct gfs2_inode
*ip
= GFS2_I(file_inode(file
));
711 ret
= gfs2_rsqa_alloc(ip
);
715 gfs2_size_hint(file
, iocb
->ki_pos
, iov_iter_count(from
));
717 if (iocb
->ki_flags
& IOCB_APPEND
) {
718 struct gfs2_holder gh
;
720 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
723 gfs2_glock_dq_uninit(&gh
);
726 return generic_file_write_iter(iocb
, from
);
729 static int fallocate_chunk(struct inode
*inode
, loff_t offset
, loff_t len
,
732 struct gfs2_inode
*ip
= GFS2_I(inode
);
733 struct buffer_head
*dibh
;
735 unsigned int nr_blks
;
736 sector_t lblock
= offset
>> inode
->i_blkbits
;
738 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
742 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
744 if (gfs2_is_stuffed(ip
)) {
745 error
= gfs2_unstuff_dinode(ip
, NULL
);
751 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
753 set_buffer_zeronew(&bh_map
);
755 error
= gfs2_block_map(inode
, lblock
, &bh_map
, 1);
758 len
-= bh_map
.b_size
;
759 nr_blks
= bh_map
.b_size
>> inode
->i_blkbits
;
761 if (!buffer_new(&bh_map
))
763 if (unlikely(!buffer_zeronew(&bh_map
))) {
773 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
774 * blocks, determine how many bytes can be written.
775 * @ip: The inode in question.
776 * @len: Max cap of bytes. What we return in *len must be <= this.
777 * @data_blocks: Compute and return the number of data blocks needed
778 * @ind_blocks: Compute and return the number of indirect blocks needed
779 * @max_blocks: The total blocks available to work with.
781 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
783 static void calc_max_reserv(struct gfs2_inode
*ip
, loff_t
*len
,
784 unsigned int *data_blocks
, unsigned int *ind_blocks
,
785 unsigned int max_blocks
)
788 const struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
789 unsigned int tmp
, max_data
= max_blocks
- 3 * (sdp
->sd_max_height
- 1);
791 for (tmp
= max_data
; tmp
> sdp
->sd_diptrs
;) {
792 tmp
= DIV_ROUND_UP(tmp
, sdp
->sd_inptrs
);
796 *data_blocks
= max_data
;
797 *ind_blocks
= max_blocks
- max_data
;
798 *len
= ((loff_t
)max_data
- 3) << sdp
->sd_sb
.sb_bsize_shift
;
801 gfs2_write_calc_reserv(ip
, max
, data_blocks
, ind_blocks
);
805 static long __gfs2_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
807 struct inode
*inode
= file_inode(file
);
808 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
809 struct gfs2_inode
*ip
= GFS2_I(inode
);
810 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
811 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
812 loff_t bytes
, max_bytes
, max_blks
= UINT_MAX
;
814 const loff_t pos
= offset
;
815 const loff_t count
= len
;
816 loff_t bsize_mask
= ~((loff_t
)sdp
->sd_sb
.sb_bsize
- 1);
817 loff_t next
= (offset
+ len
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
818 loff_t max_chunk_size
= UINT_MAX
& bsize_mask
;
820 next
= (next
+ 1) << sdp
->sd_sb
.sb_bsize_shift
;
822 offset
&= bsize_mask
;
825 bytes
= sdp
->sd_max_rg_data
* sdp
->sd_sb
.sb_bsize
/ 2;
830 bytes
= sdp
->sd_sb
.sb_bsize
;
832 gfs2_size_hint(file
, offset
, len
);
834 gfs2_write_calc_reserv(ip
, PAGE_SIZE
, &data_blocks
, &ind_blocks
);
835 ap
.min_target
= data_blocks
+ ind_blocks
;
840 if (!gfs2_write_alloc_required(ip
, offset
, bytes
)) {
846 /* We need to determine how many bytes we can actually
847 * fallocate without exceeding quota or going over the
848 * end of the fs. We start off optimistically by assuming
849 * we can write max_bytes */
850 max_bytes
= (len
> max_chunk_size
) ? max_chunk_size
: len
;
852 /* Since max_bytes is most likely a theoretical max, we
853 * calculate a more realistic 'bytes' to serve as a good
854 * starting point for the number of bytes we may be able
856 gfs2_write_calc_reserv(ip
, bytes
, &data_blocks
, &ind_blocks
);
857 ap
.target
= data_blocks
+ ind_blocks
;
859 error
= gfs2_quota_lock_check(ip
, &ap
);
862 /* ap.allowed tells us how many blocks quota will allow
863 * us to write. Check if this reduces max_blks */
864 if (ap
.allowed
&& ap
.allowed
< max_blks
)
865 max_blks
= ap
.allowed
;
867 error
= gfs2_inplace_reserve(ip
, &ap
);
871 /* check if the selected rgrp limits our max_blks further */
872 if (ap
.allowed
&& ap
.allowed
< max_blks
)
873 max_blks
= ap
.allowed
;
875 /* Almost done. Calculate bytes that can be written using
876 * max_blks. We also recompute max_bytes, data_blocks and
878 calc_max_reserv(ip
, &max_bytes
, &data_blocks
,
879 &ind_blocks
, max_blks
);
881 rblocks
= RES_DINODE
+ ind_blocks
+ RES_STATFS
+ RES_QUOTA
+
882 RES_RG_HDR
+ gfs2_rg_blocks(ip
, data_blocks
+ ind_blocks
);
883 if (gfs2_is_jdata(ip
))
884 rblocks
+= data_blocks
? data_blocks
: 1;
886 error
= gfs2_trans_begin(sdp
, rblocks
,
887 PAGE_SIZE
/sdp
->sd_sb
.sb_bsize
);
891 error
= fallocate_chunk(inode
, offset
, max_bytes
, mode
);
899 gfs2_inplace_release(ip
);
900 gfs2_quota_unlock(ip
);
903 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && (pos
+ count
) > inode
->i_size
) {
904 i_size_write(inode
, pos
+ count
);
905 file_update_time(file
);
906 mark_inode_dirty(inode
);
909 if ((file
->f_flags
& O_DSYNC
) || IS_SYNC(file
->f_mapping
->host
))
910 return vfs_fsync_range(file
, pos
, pos
+ count
- 1,
911 (file
->f_flags
& __O_SYNC
) ? 0 : 1);
915 gfs2_inplace_release(ip
);
917 gfs2_quota_unlock(ip
);
921 static long gfs2_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
923 struct inode
*inode
= file_inode(file
);
924 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
925 struct gfs2_inode
*ip
= GFS2_I(inode
);
926 struct gfs2_holder gh
;
929 if (mode
& ~(FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
))
931 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
932 if (gfs2_is_jdata(ip
) && inode
!= sdp
->sd_rindex
)
937 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
938 ret
= gfs2_glock_nq(&gh
);
942 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
943 (offset
+ len
) > inode
->i_size
) {
944 ret
= inode_newsize_ok(inode
, offset
+ len
);
949 ret
= get_write_access(inode
);
953 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
954 ret
= __gfs2_punch_hole(file
, offset
, len
);
956 ret
= gfs2_rsqa_alloc(ip
);
960 ret
= __gfs2_fallocate(file
, mode
, offset
, len
);
963 gfs2_rs_deltree(&ip
->i_res
);
967 put_write_access(inode
);
971 gfs2_holder_uninit(&gh
);
976 static ssize_t
gfs2_file_splice_write(struct pipe_inode_info
*pipe
,
977 struct file
*out
, loff_t
*ppos
,
978 size_t len
, unsigned int flags
)
981 struct gfs2_inode
*ip
= GFS2_I(out
->f_mapping
->host
);
983 error
= gfs2_rsqa_alloc(ip
);
985 return (ssize_t
)error
;
987 gfs2_size_hint(out
, *ppos
, len
);
989 return iter_file_splice_write(pipe
, out
, ppos
, len
, flags
);
992 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
995 * gfs2_lock - acquire/release a posix lock on a file
996 * @file: the file pointer
997 * @cmd: either modify or retrieve lock state, possibly wait
998 * @fl: type and range of lock
1003 static int gfs2_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1005 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
1006 struct gfs2_sbd
*sdp
= GFS2_SB(file
->f_mapping
->host
);
1007 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
1009 if (!(fl
->fl_flags
& FL_POSIX
))
1011 if (__mandatory_lock(&ip
->i_inode
) && fl
->fl_type
!= F_UNLCK
)
1014 if (cmd
== F_CANCELLK
) {
1017 fl
->fl_type
= F_UNLCK
;
1019 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1020 if (fl
->fl_type
== F_UNLCK
)
1021 locks_lock_file_wait(file
, fl
);
1025 return dlm_posix_get(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
1026 else if (fl
->fl_type
== F_UNLCK
)
1027 return dlm_posix_unlock(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
1029 return dlm_posix_lock(ls
->ls_dlm
, ip
->i_no_addr
, file
, cmd
, fl
);
1032 static int do_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1034 struct gfs2_file
*fp
= file
->private_data
;
1035 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
1036 struct gfs2_inode
*ip
= GFS2_I(file_inode(file
));
1037 struct gfs2_glock
*gl
;
1043 state
= (fl
->fl_type
== F_WRLCK
) ? LM_ST_EXCLUSIVE
: LM_ST_SHARED
;
1044 flags
= (IS_SETLKW(cmd
) ? 0 : LM_FLAG_TRY_1CB
) | GL_EXACT
;
1046 mutex_lock(&fp
->f_fl_mutex
);
1048 if (gfs2_holder_initialized(fl_gh
)) {
1049 if (fl_gh
->gh_state
== state
)
1051 locks_lock_file_wait(file
,
1052 &(struct file_lock
) {
1054 .fl_flags
= FL_FLOCK
1056 gfs2_glock_dq(fl_gh
);
1057 gfs2_holder_reinit(state
, flags
, fl_gh
);
1059 error
= gfs2_glock_get(GFS2_SB(&ip
->i_inode
), ip
->i_no_addr
,
1060 &gfs2_flock_glops
, CREATE
, &gl
);
1063 gfs2_holder_init(gl
, state
, flags
, fl_gh
);
1066 for (sleeptime
= 1; sleeptime
<= 4; sleeptime
<<= 1) {
1067 error
= gfs2_glock_nq(fl_gh
);
1068 if (error
!= GLR_TRYFAILED
)
1070 fl_gh
->gh_flags
= LM_FLAG_TRY
| GL_EXACT
;
1071 fl_gh
->gh_error
= 0;
1075 gfs2_holder_uninit(fl_gh
);
1076 if (error
== GLR_TRYFAILED
)
1079 error
= locks_lock_file_wait(file
, fl
);
1080 gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), !error
);
1084 mutex_unlock(&fp
->f_fl_mutex
);
1088 static void do_unflock(struct file
*file
, struct file_lock
*fl
)
1090 struct gfs2_file
*fp
= file
->private_data
;
1091 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
1093 mutex_lock(&fp
->f_fl_mutex
);
1094 locks_lock_file_wait(file
, fl
);
1095 if (gfs2_holder_initialized(fl_gh
)) {
1096 gfs2_glock_dq(fl_gh
);
1097 gfs2_holder_uninit(fl_gh
);
1099 mutex_unlock(&fp
->f_fl_mutex
);
1103 * gfs2_flock - acquire/release a flock lock on a file
1104 * @file: the file pointer
1105 * @cmd: either modify or retrieve lock state, possibly wait
1106 * @fl: type and range of lock
1111 static int gfs2_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1113 if (!(fl
->fl_flags
& FL_FLOCK
))
1115 if (fl
->fl_type
& LOCK_MAND
)
1118 if (fl
->fl_type
== F_UNLCK
) {
1119 do_unflock(file
, fl
);
1122 return do_flock(file
, cmd
, fl
);
1126 const struct file_operations gfs2_file_fops
= {
1127 .llseek
= gfs2_llseek
,
1128 .read_iter
= generic_file_read_iter
,
1129 .write_iter
= gfs2_file_write_iter
,
1130 .unlocked_ioctl
= gfs2_ioctl
,
1133 .release
= gfs2_release
,
1134 .fsync
= gfs2_fsync
,
1136 .flock
= gfs2_flock
,
1137 .splice_read
= generic_file_splice_read
,
1138 .splice_write
= gfs2_file_splice_write
,
1139 .setlease
= simple_nosetlease
,
1140 .fallocate
= gfs2_fallocate
,
1143 const struct file_operations gfs2_dir_fops
= {
1144 .iterate_shared
= gfs2_readdir
,
1145 .unlocked_ioctl
= gfs2_ioctl
,
1147 .release
= gfs2_release
,
1148 .fsync
= gfs2_fsync
,
1150 .flock
= gfs2_flock
,
1151 .llseek
= default_llseek
,
1154 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1156 const struct file_operations gfs2_file_fops_nolock
= {
1157 .llseek
= gfs2_llseek
,
1158 .read_iter
= generic_file_read_iter
,
1159 .write_iter
= gfs2_file_write_iter
,
1160 .unlocked_ioctl
= gfs2_ioctl
,
1163 .release
= gfs2_release
,
1164 .fsync
= gfs2_fsync
,
1165 .splice_read
= generic_file_splice_read
,
1166 .splice_write
= gfs2_file_splice_write
,
1167 .setlease
= generic_setlease
,
1168 .fallocate
= gfs2_fallocate
,
1171 const struct file_operations gfs2_dir_fops_nolock
= {
1172 .iterate_shared
= gfs2_readdir
,
1173 .unlocked_ioctl
= gfs2_ioctl
,
1175 .release
= gfs2_release
,
1176 .fsync
= gfs2_fsync
,
1177 .llseek
= default_llseek
,