2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51 #include "xfs_vnodeops.h"
53 #include <linux/capability.h>
54 #include <linux/mount.h>
55 #include <linux/writeback.h>
58 #if defined(XFS_RW_TRACE)
68 if (ip
->i_rwtrace
== NULL
)
70 ktrace_enter(ip
->i_rwtrace
,
71 (void *)(unsigned long)tag
,
73 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
74 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
76 (void *)((unsigned long)segs
),
77 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
78 (void *)((unsigned long)(offset
& 0xffffffff)),
79 (void *)((unsigned long)ioflags
),
80 (void *)((unsigned long)((ip
->i_new_size
>> 32) & 0xffffffff)),
81 (void *)((unsigned long)(ip
->i_new_size
& 0xffffffff)),
82 (void *)((unsigned long)current_pid()),
90 xfs_inval_cached_trace(
98 if (ip
->i_rwtrace
== NULL
)
100 ktrace_enter(ip
->i_rwtrace
,
101 (void *)(__psint_t
)XFS_INVAL_CACHED
,
103 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
104 (void *)((unsigned long)(offset
& 0xffffffff)),
105 (void *)((unsigned long)((len
>> 32) & 0xffffffff)),
106 (void *)((unsigned long)(len
& 0xffffffff)),
107 (void *)((unsigned long)((first
>> 32) & 0xffffffff)),
108 (void *)((unsigned long)(first
& 0xffffffff)),
109 (void *)((unsigned long)((last
>> 32) & 0xffffffff)),
110 (void *)((unsigned long)(last
& 0xffffffff)),
111 (void *)((unsigned long)current_pid()),
123 * xfs_iozero clears the specified range of buffer supplied,
124 * and marks all the affected blocks as valid and modified. If
125 * an affected block is not allocated, it will be allocated. If
126 * an affected block is not completely overwritten, and is not
127 * valid before the operation, it will be read from disk before
128 * being partially zeroed.
132 struct xfs_inode
*ip
, /* inode */
133 loff_t pos
, /* offset in file */
134 size_t count
) /* size of data to zero */
137 struct address_space
*mapping
;
140 mapping
= ip
->i_vnode
->i_mapping
;
142 unsigned offset
, bytes
;
145 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
146 bytes
= PAGE_CACHE_SIZE
- offset
;
150 status
= pagecache_write_begin(NULL
, mapping
, pos
, bytes
,
151 AOP_FLAG_UNINTERRUPTIBLE
,
156 zero_user(page
, offset
, bytes
);
158 status
= pagecache_write_end(NULL
, mapping
, pos
, bytes
, bytes
,
160 WARN_ON(status
<= 0); /* can't return less than zero! */
169 ssize_t
/* bytes read, or (-) error */
173 const struct iovec
*iovp
,
178 struct file
*file
= iocb
->ki_filp
;
179 struct inode
*inode
= file
->f_mapping
->host
;
180 xfs_mount_t
*mp
= ip
->i_mount
;
187 XFS_STATS_INC(xs_read_calls
);
189 /* START copy & waste from filemap.c */
190 for (seg
= 0; seg
< segs
; seg
++) {
191 const struct iovec
*iv
= &iovp
[seg
];
194 * If any segment has a negative length, or the cumulative
195 * length ever wraps negative then return -EINVAL.
198 if (unlikely((ssize_t
)(size
|iv
->iov_len
) < 0))
199 return XFS_ERROR(-EINVAL
);
201 /* END copy & waste from filemap.c */
203 if (unlikely(ioflags
& IO_ISDIRECT
)) {
204 xfs_buftarg_t
*target
=
205 XFS_IS_REALTIME_INODE(ip
) ?
206 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
207 if ((*offset
& target
->bt_smask
) ||
208 (size
& target
->bt_smask
)) {
209 if (*offset
== ip
->i_size
) {
212 return -XFS_ERROR(EINVAL
);
216 n
= XFS_MAXIOFFSET(mp
) - *offset
;
217 if ((n
<= 0) || (size
== 0))
223 if (XFS_FORCED_SHUTDOWN(mp
))
226 if (unlikely(ioflags
& IO_ISDIRECT
))
227 mutex_lock(&inode
->i_mutex
);
228 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
230 if (DM_EVENT_ENABLED(ip
, DM_EVENT_READ
) && !(ioflags
& IO_INVIS
)) {
231 int dmflags
= FILP_DELAY_FLAG(file
) | DM_SEM_FLAG_RD(ioflags
);
232 int iolock
= XFS_IOLOCK_SHARED
;
234 ret
= -XFS_SEND_DATA(mp
, DM_EVENT_READ
, ip
, *offset
, size
,
237 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
238 if (unlikely(ioflags
& IO_ISDIRECT
))
239 mutex_unlock(&inode
->i_mutex
);
244 if (unlikely(ioflags
& IO_ISDIRECT
)) {
245 if (inode
->i_mapping
->nrpages
)
246 ret
= xfs_flushinval_pages(ip
, (*offset
& PAGE_CACHE_MASK
),
247 -1, FI_REMAPF_LOCKED
);
248 mutex_unlock(&inode
->i_mutex
);
250 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
255 xfs_rw_enter_trace(XFS_READ_ENTER
, ip
,
256 (void *)iovp
, segs
, *offset
, ioflags
);
258 iocb
->ki_pos
= *offset
;
259 ret
= generic_file_aio_read(iocb
, iovp
, segs
, *offset
);
260 if (ret
== -EIOCBQUEUED
&& !(ioflags
& IO_ISAIO
))
261 ret
= wait_on_sync_kiocb(iocb
);
263 XFS_STATS_ADD(xs_read_bytes
, ret
);
265 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
274 struct pipe_inode_info
*pipe
,
279 xfs_mount_t
*mp
= ip
->i_mount
;
282 XFS_STATS_INC(xs_read_calls
);
283 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
286 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
288 if (DM_EVENT_ENABLED(ip
, DM_EVENT_READ
) && !(ioflags
& IO_INVIS
)) {
289 int iolock
= XFS_IOLOCK_SHARED
;
292 error
= XFS_SEND_DATA(mp
, DM_EVENT_READ
, ip
, *ppos
, count
,
293 FILP_DELAY_FLAG(infilp
), &iolock
);
295 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
299 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER
, ip
,
300 pipe
, count
, *ppos
, ioflags
);
301 ret
= generic_file_splice_read(infilp
, ppos
, pipe
, count
, flags
);
303 XFS_STATS_ADD(xs_read_bytes
, ret
);
305 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
312 struct pipe_inode_info
*pipe
,
313 struct file
*outfilp
,
319 xfs_mount_t
*mp
= ip
->i_mount
;
321 struct inode
*inode
= outfilp
->f_mapping
->host
;
322 xfs_fsize_t isize
, new_size
;
324 XFS_STATS_INC(xs_write_calls
);
325 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
328 xfs_ilock(ip
, XFS_IOLOCK_EXCL
);
330 if (DM_EVENT_ENABLED(ip
, DM_EVENT_WRITE
) && !(ioflags
& IO_INVIS
)) {
331 int iolock
= XFS_IOLOCK_EXCL
;
334 error
= XFS_SEND_DATA(mp
, DM_EVENT_WRITE
, ip
, *ppos
, count
,
335 FILP_DELAY_FLAG(outfilp
), &iolock
);
337 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
342 new_size
= *ppos
+ count
;
344 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
345 if (new_size
> ip
->i_size
)
346 ip
->i_new_size
= new_size
;
347 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
349 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER
, ip
,
350 pipe
, count
, *ppos
, ioflags
);
351 ret
= generic_file_splice_write(pipe
, outfilp
, ppos
, count
, flags
);
353 XFS_STATS_ADD(xs_write_bytes
, ret
);
355 isize
= i_size_read(inode
);
356 if (unlikely(ret
< 0 && ret
!= -EFAULT
&& *ppos
> isize
))
359 if (*ppos
> ip
->i_size
) {
360 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
361 if (*ppos
> ip
->i_size
)
363 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
366 if (ip
->i_new_size
) {
367 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
369 if (ip
->i_d
.di_size
> ip
->i_size
)
370 ip
->i_d
.di_size
= ip
->i_size
;
371 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
373 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
378 * This routine is called to handle zeroing any space in the last
379 * block of the file that is beyond the EOF. We do this since the
380 * size is being increased without writing anything to that block
381 * and we don't want anyone to read the garbage on the disk.
383 STATIC
int /* error (positive) */
389 xfs_fileoff_t last_fsb
;
390 xfs_mount_t
*mp
= ip
->i_mount
;
395 xfs_bmbt_irec_t imap
;
397 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
399 zero_offset
= XFS_B_FSB_OFFSET(mp
, isize
);
400 if (zero_offset
== 0) {
402 * There are no extra bytes in the last block on disk to
408 last_fsb
= XFS_B_TO_FSBT(mp
, isize
);
410 error
= xfs_bmapi(NULL
, ip
, last_fsb
, 1, 0, NULL
, 0, &imap
,
411 &nimaps
, NULL
, NULL
);
417 * If the block underlying isize is just a hole, then there
418 * is nothing to zero.
420 if (imap
.br_startblock
== HOLESTARTBLOCK
) {
424 * Zero the part of the last block beyond the EOF, and write it
425 * out sync. We need to drop the ilock while we do this so we
426 * don't deadlock when the buffer cache calls back to us.
428 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
430 zero_len
= mp
->m_sb
.sb_blocksize
- zero_offset
;
431 if (isize
+ zero_len
> offset
)
432 zero_len
= offset
- isize
;
433 error
= xfs_iozero(ip
, isize
, zero_len
);
435 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
441 * Zero any on disk space between the current EOF and the new,
442 * larger EOF. This handles the normal case of zeroing the remainder
443 * of the last block in the file and the unusual case of zeroing blocks
444 * out beyond the size of the file. This second case only happens
445 * with fixed size extents and when the system crashes before the inode
446 * size was updated but after blocks were allocated. If fill is set,
447 * then any holes in the range are filled and zeroed. If not, the holes
448 * are left alone as holes.
451 int /* error (positive) */
454 xfs_off_t offset
, /* starting I/O offset */
455 xfs_fsize_t isize
) /* current inode size */
457 xfs_mount_t
*mp
= ip
->i_mount
;
458 xfs_fileoff_t start_zero_fsb
;
459 xfs_fileoff_t end_zero_fsb
;
460 xfs_fileoff_t zero_count_fsb
;
461 xfs_fileoff_t last_fsb
;
462 xfs_fileoff_t zero_off
;
463 xfs_fsize_t zero_len
;
466 xfs_bmbt_irec_t imap
;
468 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
469 ASSERT(offset
> isize
);
472 * First handle zeroing the block on which isize resides.
473 * We only zero a part of that block so it is handled specially.
475 error
= xfs_zero_last_block(ip
, offset
, isize
);
477 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
482 * Calculate the range between the new size and the old
483 * where blocks needing to be zeroed may exist. To get the
484 * block where the last byte in the file currently resides,
485 * we need to subtract one from the size and truncate back
486 * to a block boundary. We subtract 1 in case the size is
487 * exactly on a block boundary.
489 last_fsb
= isize
? XFS_B_TO_FSBT(mp
, isize
- 1) : (xfs_fileoff_t
)-1;
490 start_zero_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)isize
);
491 end_zero_fsb
= XFS_B_TO_FSBT(mp
, offset
- 1);
492 ASSERT((xfs_sfiloff_t
)last_fsb
< (xfs_sfiloff_t
)start_zero_fsb
);
493 if (last_fsb
== end_zero_fsb
) {
495 * The size was only incremented on its last block.
496 * We took care of that above, so just return.
501 ASSERT(start_zero_fsb
<= end_zero_fsb
);
502 while (start_zero_fsb
<= end_zero_fsb
) {
504 zero_count_fsb
= end_zero_fsb
- start_zero_fsb
+ 1;
505 error
= xfs_bmapi(NULL
, ip
, start_zero_fsb
, zero_count_fsb
,
506 0, NULL
, 0, &imap
, &nimaps
, NULL
, NULL
);
508 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
513 if (imap
.br_state
== XFS_EXT_UNWRITTEN
||
514 imap
.br_startblock
== HOLESTARTBLOCK
) {
516 * This loop handles initializing pages that were
517 * partially initialized by the code below this
518 * loop. It basically zeroes the part of the page
519 * that sits on a hole and sets the page as P_HOLE
520 * and calls remapf if it is a mapped file.
522 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
523 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
528 * There are blocks we need to zero.
529 * Drop the inode lock while we're doing the I/O.
530 * We'll still have the iolock to protect us.
532 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
534 zero_off
= XFS_FSB_TO_B(mp
, start_zero_fsb
);
535 zero_len
= XFS_FSB_TO_B(mp
, imap
.br_blockcount
);
537 if ((zero_off
+ zero_len
) > offset
)
538 zero_len
= offset
- zero_off
;
540 error
= xfs_iozero(ip
, zero_off
, zero_len
);
545 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
546 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
548 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
554 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
559 ssize_t
/* bytes written, or (-) error */
561 struct xfs_inode
*xip
,
563 const struct iovec
*iovp
,
568 struct file
*file
= iocb
->ki_filp
;
569 struct address_space
*mapping
= file
->f_mapping
;
570 struct inode
*inode
= mapping
->host
;
571 unsigned long segs
= nsegs
;
573 ssize_t ret
= 0, error
= 0;
574 xfs_fsize_t isize
, new_size
;
577 size_t ocount
= 0, count
;
581 XFS_STATS_INC(xs_write_calls
);
583 error
= generic_segment_checks(iovp
, &segs
, &ocount
, VERIFY_READ
);
595 xfs_wait_for_freeze(mp
, SB_FREEZE_WRITE
);
597 if (XFS_FORCED_SHUTDOWN(mp
))
601 if (ioflags
& IO_ISDIRECT
) {
602 iolock
= XFS_IOLOCK_SHARED
;
605 iolock
= XFS_IOLOCK_EXCL
;
607 mutex_lock(&inode
->i_mutex
);
610 xfs_ilock(xip
, XFS_ILOCK_EXCL
|iolock
);
613 error
= -generic_write_checks(file
, &pos
, &count
,
614 S_ISBLK(inode
->i_mode
));
616 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
617 goto out_unlock_mutex
;
620 if ((DM_EVENT_ENABLED(xip
, DM_EVENT_WRITE
) &&
621 !(ioflags
& IO_INVIS
) && !eventsent
)) {
622 int dmflags
= FILP_DELAY_FLAG(file
);
625 dmflags
|= DM_FLAGS_IMUX
;
627 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
628 error
= XFS_SEND_DATA(xip
->i_mount
, DM_EVENT_WRITE
, xip
,
629 pos
, count
, dmflags
, &iolock
);
631 goto out_unlock_internal
;
633 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
637 * The iolock was dropped and reacquired in XFS_SEND_DATA
638 * so we have to recheck the size when appending.
639 * We will only "goto start;" once, since having sent the
640 * event prevents another call to XFS_SEND_DATA, which is
641 * what allows the size to change in the first place.
643 if ((file
->f_flags
& O_APPEND
) && pos
!= xip
->i_size
)
647 if (ioflags
& IO_ISDIRECT
) {
648 xfs_buftarg_t
*target
=
649 XFS_IS_REALTIME_INODE(xip
) ?
650 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
652 if ((pos
& target
->bt_smask
) || (count
& target
->bt_smask
)) {
653 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
654 return XFS_ERROR(-EINVAL
);
657 if (!need_i_mutex
&& (mapping
->nrpages
|| pos
> xip
->i_size
)) {
658 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
659 iolock
= XFS_IOLOCK_EXCL
;
661 mutex_lock(&inode
->i_mutex
);
662 xfs_ilock(xip
, XFS_ILOCK_EXCL
|iolock
);
667 new_size
= pos
+ count
;
668 if (new_size
> xip
->i_size
)
669 xip
->i_new_size
= new_size
;
672 * We're not supposed to change timestamps in readonly-mounted
673 * filesystems. Throw it away if anyone asks us.
675 if (likely(!(ioflags
& IO_INVIS
) &&
676 !mnt_want_write(file
->f_path
.mnt
))) {
677 file_update_time(file
);
678 xfs_ichgtime_fast(xip
, inode
,
679 XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
680 mnt_drop_write(file
->f_path
.mnt
);
684 * If the offset is beyond the size of the file, we have a couple
685 * of things to do. First, if there is already space allocated
686 * we need to either create holes or zero the disk or ...
688 * If there is a page where the previous size lands, we need
689 * to zero it out up to the new size.
692 if (pos
> xip
->i_size
) {
693 error
= xfs_zero_eof(xip
, pos
, xip
->i_size
);
695 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
696 goto out_unlock_internal
;
699 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
702 * If we're writing the file then make sure to clear the
703 * setuid and setgid bits if the process is not being run
704 * by root. This keeps people from modifying setuid and
708 if (((xip
->i_d
.di_mode
& S_ISUID
) ||
709 ((xip
->i_d
.di_mode
& (S_ISGID
| S_IXGRP
)) ==
710 (S_ISGID
| S_IXGRP
))) &&
711 !capable(CAP_FSETID
)) {
712 error
= xfs_write_clear_setuid(xip
);
714 error
= -remove_suid(file
->f_path
.dentry
);
715 if (unlikely(error
)) {
716 goto out_unlock_internal
;
721 /* We can write back this queue in page reclaim */
722 current
->backing_dev_info
= mapping
->backing_dev_info
;
724 if ((ioflags
& IO_ISDIRECT
)) {
725 if (mapping
->nrpages
) {
726 WARN_ON(need_i_mutex
== 0);
727 xfs_inval_cached_trace(xip
, pos
, -1,
728 (pos
& PAGE_CACHE_MASK
), -1);
729 error
= xfs_flushinval_pages(xip
,
730 (pos
& PAGE_CACHE_MASK
),
731 -1, FI_REMAPF_LOCKED
);
733 goto out_unlock_internal
;
737 /* demote the lock now the cached pages are gone */
738 xfs_ilock_demote(xip
, XFS_IOLOCK_EXCL
);
739 mutex_unlock(&inode
->i_mutex
);
741 iolock
= XFS_IOLOCK_SHARED
;
745 xfs_rw_enter_trace(XFS_DIOWR_ENTER
, xip
, (void *)iovp
, segs
,
747 ret
= generic_file_direct_write(iocb
, iovp
,
748 &segs
, pos
, offset
, count
, ocount
);
751 * direct-io write to a hole: fall through to buffered I/O
752 * for completing the rest of the request.
754 if (ret
>= 0 && ret
!= count
) {
755 XFS_STATS_ADD(xs_write_bytes
, ret
);
760 ioflags
&= ~IO_ISDIRECT
;
761 xfs_iunlock(xip
, iolock
);
765 xfs_rw_enter_trace(XFS_WRITE_ENTER
, xip
, (void *)iovp
, segs
,
767 ret
= generic_file_buffered_write(iocb
, iovp
, segs
,
768 pos
, offset
, count
, ret
);
771 current
->backing_dev_info
= NULL
;
773 if (ret
== -EIOCBQUEUED
&& !(ioflags
& IO_ISAIO
))
774 ret
= wait_on_sync_kiocb(iocb
);
776 if (ret
== -ENOSPC
&&
777 DM_EVENT_ENABLED(xip
, DM_EVENT_NOSPACE
) && !(ioflags
& IO_INVIS
)) {
778 xfs_iunlock(xip
, iolock
);
780 mutex_unlock(&inode
->i_mutex
);
781 error
= XFS_SEND_NAMESP(xip
->i_mount
, DM_EVENT_NOSPACE
, xip
,
782 DM_RIGHT_NULL
, xip
, DM_RIGHT_NULL
, NULL
, NULL
,
783 0, 0, 0); /* Delay flag intentionally unused */
785 mutex_lock(&inode
->i_mutex
);
786 xfs_ilock(xip
, iolock
);
788 goto out_unlock_internal
;
794 isize
= i_size_read(inode
);
795 if (unlikely(ret
< 0 && ret
!= -EFAULT
&& *offset
> isize
))
798 if (*offset
> xip
->i_size
) {
799 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
800 if (*offset
> xip
->i_size
)
801 xip
->i_size
= *offset
;
802 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
807 goto out_unlock_internal
;
809 XFS_STATS_ADD(xs_write_bytes
, ret
);
811 /* Handle various SYNC-type writes */
812 if ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
)) {
815 xfs_iunlock(xip
, iolock
);
817 mutex_unlock(&inode
->i_mutex
);
818 error2
= sync_page_range(inode
, mapping
, pos
, ret
);
822 mutex_lock(&inode
->i_mutex
);
823 xfs_ilock(xip
, iolock
);
824 error2
= xfs_write_sync_logforce(mp
, xip
);
830 if (xip
->i_new_size
) {
831 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
834 * If this was a direct or synchronous I/O that failed (such
835 * as ENOSPC) then part of the I/O may have been written to
836 * disk before the error occured. In this case the on-disk
837 * file size may have been adjusted beyond the in-memory file
838 * size and now needs to be truncated back.
840 if (xip
->i_d
.di_size
> xip
->i_size
)
841 xip
->i_d
.di_size
= xip
->i_size
;
842 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
844 xfs_iunlock(xip
, iolock
);
847 mutex_unlock(&inode
->i_mutex
);
852 * All xfs metadata buffers except log state machine buffers
853 * get this attached as their b_bdstrat callback function.
854 * This is so that we can catch a buffer
855 * after prematurely unpinning it to forcibly shutdown the filesystem.
858 xfs_bdstrat_cb(struct xfs_buf
*bp
)
862 mp
= XFS_BUF_FSPRIVATE3(bp
, xfs_mount_t
*);
863 if (!XFS_FORCED_SHUTDOWN(mp
)) {
864 xfs_buf_iorequest(bp
);
867 xfs_buftrace("XFS__BDSTRAT IOERROR", bp
);
869 * Metadata write that didn't get logged but
870 * written delayed anyway. These aren't associated
871 * with a transaction, and can be ignored.
873 if (XFS_BUF_IODONE_FUNC(bp
) == NULL
&&
874 (XFS_BUF_ISREAD(bp
)) == 0)
875 return (xfs_bioerror_relse(bp
));
877 return (xfs_bioerror(bp
));
882 * Wrapper around bdstrat so that we can stop data from going to disk in case
883 * we are shutting down the filesystem. Typically user data goes thru this
884 * path; one of the exceptions is the superblock.
888 struct xfs_mount
*mp
,
892 if (!XFS_FORCED_SHUTDOWN(mp
)) {
893 xfs_buf_iorequest(bp
);
897 xfs_buftrace("XFSBDSTRAT IOERROR", bp
);
898 xfs_bioerror_relse(bp
);
902 * If the underlying (data/log/rt) device is readonly, there are some
903 * operations that cannot proceed.
906 xfs_dev_is_read_only(
910 if (xfs_readonly_buftarg(mp
->m_ddev_targp
) ||
911 xfs_readonly_buftarg(mp
->m_logdev_targp
) ||
912 (mp
->m_rtdev_targp
&& xfs_readonly_buftarg(mp
->m_rtdev_targp
))) {
914 "XFS: %s required on read-only device.", message
);
916 "XFS: write access unavailable, cannot proceed.");