2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
46 #include "xfs_inode_item.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_utils.h"
49 #include "xfs_iomap.h"
50 #include "xfs_vnodeops.h"
52 #include <linux/capability.h>
53 #include <linux/writeback.h>
56 #if defined(XFS_RW_TRACE)
66 if (ip
->i_rwtrace
== NULL
)
68 ktrace_enter(ip
->i_rwtrace
,
69 (void *)(unsigned long)tag
,
71 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
72 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
74 (void *)((unsigned long)segs
),
75 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
76 (void *)((unsigned long)(offset
& 0xffffffff)),
77 (void *)((unsigned long)ioflags
),
78 (void *)((unsigned long)((ip
->i_new_size
>> 32) & 0xffffffff)),
79 (void *)((unsigned long)(ip
->i_new_size
& 0xffffffff)),
80 (void *)((unsigned long)current_pid()),
88 xfs_inval_cached_trace(
96 if (ip
->i_rwtrace
== NULL
)
98 ktrace_enter(ip
->i_rwtrace
,
99 (void *)(__psint_t
)XFS_INVAL_CACHED
,
101 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
102 (void *)((unsigned long)(offset
& 0xffffffff)),
103 (void *)((unsigned long)((len
>> 32) & 0xffffffff)),
104 (void *)((unsigned long)(len
& 0xffffffff)),
105 (void *)((unsigned long)((first
>> 32) & 0xffffffff)),
106 (void *)((unsigned long)(first
& 0xffffffff)),
107 (void *)((unsigned long)((last
>> 32) & 0xffffffff)),
108 (void *)((unsigned long)(last
& 0xffffffff)),
109 (void *)((unsigned long)current_pid()),
121 * xfs_iozero clears the specified range of buffer supplied,
122 * and marks all the affected blocks as valid and modified. If
123 * an affected block is not allocated, it will be allocated. If
124 * an affected block is not completely overwritten, and is not
125 * valid before the operation, it will be read from disk before
126 * being partially zeroed.
130 struct xfs_inode
*ip
, /* inode */
131 loff_t pos
, /* offset in file */
132 size_t count
) /* size of data to zero */
135 struct address_space
*mapping
;
138 mapping
= VFS_I(ip
)->i_mapping
;
140 unsigned offset
, bytes
;
143 offset
= (pos
& (PAGE_CACHE_SIZE
-1)); /* Within page */
144 bytes
= PAGE_CACHE_SIZE
- offset
;
148 status
= pagecache_write_begin(NULL
, mapping
, pos
, bytes
,
149 AOP_FLAG_UNINTERRUPTIBLE
,
154 zero_user(page
, offset
, bytes
);
156 status
= pagecache_write_end(NULL
, mapping
, pos
, bytes
, bytes
,
158 WARN_ON(status
<= 0); /* can't return less than zero! */
167 ssize_t
/* bytes read, or (-) error */
171 const struct iovec
*iovp
,
176 struct file
*file
= iocb
->ki_filp
;
177 struct inode
*inode
= file
->f_mapping
->host
;
178 xfs_mount_t
*mp
= ip
->i_mount
;
185 XFS_STATS_INC(xs_read_calls
);
187 /* START copy & waste from filemap.c */
188 for (seg
= 0; seg
< segs
; seg
++) {
189 const struct iovec
*iv
= &iovp
[seg
];
192 * If any segment has a negative length, or the cumulative
193 * length ever wraps negative then return -EINVAL.
196 if (unlikely((ssize_t
)(size
|iv
->iov_len
) < 0))
197 return XFS_ERROR(-EINVAL
);
199 /* END copy & waste from filemap.c */
201 if (unlikely(ioflags
& IO_ISDIRECT
)) {
202 xfs_buftarg_t
*target
=
203 XFS_IS_REALTIME_INODE(ip
) ?
204 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
205 if ((*offset
& target
->bt_smask
) ||
206 (size
& target
->bt_smask
)) {
207 if (*offset
== ip
->i_size
) {
210 return -XFS_ERROR(EINVAL
);
214 n
= XFS_MAXIOFFSET(mp
) - *offset
;
215 if ((n
<= 0) || (size
== 0))
221 if (XFS_FORCED_SHUTDOWN(mp
))
224 if (unlikely(ioflags
& IO_ISDIRECT
))
225 mutex_lock(&inode
->i_mutex
);
226 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
228 if (DM_EVENT_ENABLED(ip
, DM_EVENT_READ
) && !(ioflags
& IO_INVIS
)) {
229 int dmflags
= FILP_DELAY_FLAG(file
) | DM_SEM_FLAG_RD(ioflags
);
230 int iolock
= XFS_IOLOCK_SHARED
;
232 ret
= -XFS_SEND_DATA(mp
, DM_EVENT_READ
, ip
, *offset
, size
,
235 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
236 if (unlikely(ioflags
& IO_ISDIRECT
))
237 mutex_unlock(&inode
->i_mutex
);
242 if (unlikely(ioflags
& IO_ISDIRECT
)) {
243 if (inode
->i_mapping
->nrpages
)
244 ret
= -xfs_flushinval_pages(ip
, (*offset
& PAGE_CACHE_MASK
),
245 -1, FI_REMAPF_LOCKED
);
246 mutex_unlock(&inode
->i_mutex
);
248 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
253 xfs_rw_enter_trace(XFS_READ_ENTER
, ip
,
254 (void *)iovp
, segs
, *offset
, ioflags
);
256 iocb
->ki_pos
= *offset
;
257 ret
= generic_file_aio_read(iocb
, iovp
, segs
, *offset
);
258 if (ret
== -EIOCBQUEUED
&& !(ioflags
& IO_ISAIO
))
259 ret
= wait_on_sync_kiocb(iocb
);
261 XFS_STATS_ADD(xs_read_bytes
, ret
);
263 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
272 struct pipe_inode_info
*pipe
,
277 xfs_mount_t
*mp
= ip
->i_mount
;
280 XFS_STATS_INC(xs_read_calls
);
281 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
284 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
286 if (DM_EVENT_ENABLED(ip
, DM_EVENT_READ
) && !(ioflags
& IO_INVIS
)) {
287 int iolock
= XFS_IOLOCK_SHARED
;
290 error
= XFS_SEND_DATA(mp
, DM_EVENT_READ
, ip
, *ppos
, count
,
291 FILP_DELAY_FLAG(infilp
), &iolock
);
293 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
297 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER
, ip
,
298 pipe
, count
, *ppos
, ioflags
);
299 ret
= generic_file_splice_read(infilp
, ppos
, pipe
, count
, flags
);
301 XFS_STATS_ADD(xs_read_bytes
, ret
);
303 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
310 struct pipe_inode_info
*pipe
,
311 struct file
*outfilp
,
317 xfs_mount_t
*mp
= ip
->i_mount
;
319 struct inode
*inode
= outfilp
->f_mapping
->host
;
320 xfs_fsize_t isize
, new_size
;
322 XFS_STATS_INC(xs_write_calls
);
323 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
326 xfs_ilock(ip
, XFS_IOLOCK_EXCL
);
328 if (DM_EVENT_ENABLED(ip
, DM_EVENT_WRITE
) && !(ioflags
& IO_INVIS
)) {
329 int iolock
= XFS_IOLOCK_EXCL
;
332 error
= XFS_SEND_DATA(mp
, DM_EVENT_WRITE
, ip
, *ppos
, count
,
333 FILP_DELAY_FLAG(outfilp
), &iolock
);
335 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
340 new_size
= *ppos
+ count
;
342 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
343 if (new_size
> ip
->i_size
)
344 ip
->i_new_size
= new_size
;
345 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
347 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER
, ip
,
348 pipe
, count
, *ppos
, ioflags
);
349 ret
= generic_file_splice_write(pipe
, outfilp
, ppos
, count
, flags
);
351 XFS_STATS_ADD(xs_write_bytes
, ret
);
353 isize
= i_size_read(inode
);
354 if (unlikely(ret
< 0 && ret
!= -EFAULT
&& *ppos
> isize
))
357 if (*ppos
> ip
->i_size
) {
358 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
359 if (*ppos
> ip
->i_size
)
361 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
364 if (ip
->i_new_size
) {
365 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
367 if (ip
->i_d
.di_size
> ip
->i_size
)
368 ip
->i_d
.di_size
= ip
->i_size
;
369 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
371 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
376 * This routine is called to handle zeroing any space in the last
377 * block of the file that is beyond the EOF. We do this since the
378 * size is being increased without writing anything to that block
379 * and we don't want anyone to read the garbage on the disk.
381 STATIC
int /* error (positive) */
387 xfs_fileoff_t last_fsb
;
388 xfs_mount_t
*mp
= ip
->i_mount
;
393 xfs_bmbt_irec_t imap
;
395 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
397 zero_offset
= XFS_B_FSB_OFFSET(mp
, isize
);
398 if (zero_offset
== 0) {
400 * There are no extra bytes in the last block on disk to
406 last_fsb
= XFS_B_TO_FSBT(mp
, isize
);
408 error
= xfs_bmapi(NULL
, ip
, last_fsb
, 1, 0, NULL
, 0, &imap
,
409 &nimaps
, NULL
, NULL
);
415 * If the block underlying isize is just a hole, then there
416 * is nothing to zero.
418 if (imap
.br_startblock
== HOLESTARTBLOCK
) {
422 * Zero the part of the last block beyond the EOF, and write it
423 * out sync. We need to drop the ilock while we do this so we
424 * don't deadlock when the buffer cache calls back to us.
426 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
428 zero_len
= mp
->m_sb
.sb_blocksize
- zero_offset
;
429 if (isize
+ zero_len
> offset
)
430 zero_len
= offset
- isize
;
431 error
= xfs_iozero(ip
, isize
, zero_len
);
433 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
439 * Zero any on disk space between the current EOF and the new,
440 * larger EOF. This handles the normal case of zeroing the remainder
441 * of the last block in the file and the unusual case of zeroing blocks
442 * out beyond the size of the file. This second case only happens
443 * with fixed size extents and when the system crashes before the inode
444 * size was updated but after blocks were allocated. If fill is set,
445 * then any holes in the range are filled and zeroed. If not, the holes
446 * are left alone as holes.
449 int /* error (positive) */
452 xfs_off_t offset
, /* starting I/O offset */
453 xfs_fsize_t isize
) /* current inode size */
455 xfs_mount_t
*mp
= ip
->i_mount
;
456 xfs_fileoff_t start_zero_fsb
;
457 xfs_fileoff_t end_zero_fsb
;
458 xfs_fileoff_t zero_count_fsb
;
459 xfs_fileoff_t last_fsb
;
460 xfs_fileoff_t zero_off
;
461 xfs_fsize_t zero_len
;
464 xfs_bmbt_irec_t imap
;
466 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
467 ASSERT(offset
> isize
);
470 * First handle zeroing the block on which isize resides.
471 * We only zero a part of that block so it is handled specially.
473 error
= xfs_zero_last_block(ip
, offset
, isize
);
475 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
480 * Calculate the range between the new size and the old
481 * where blocks needing to be zeroed may exist. To get the
482 * block where the last byte in the file currently resides,
483 * we need to subtract one from the size and truncate back
484 * to a block boundary. We subtract 1 in case the size is
485 * exactly on a block boundary.
487 last_fsb
= isize
? XFS_B_TO_FSBT(mp
, isize
- 1) : (xfs_fileoff_t
)-1;
488 start_zero_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)isize
);
489 end_zero_fsb
= XFS_B_TO_FSBT(mp
, offset
- 1);
490 ASSERT((xfs_sfiloff_t
)last_fsb
< (xfs_sfiloff_t
)start_zero_fsb
);
491 if (last_fsb
== end_zero_fsb
) {
493 * The size was only incremented on its last block.
494 * We took care of that above, so just return.
499 ASSERT(start_zero_fsb
<= end_zero_fsb
);
500 while (start_zero_fsb
<= end_zero_fsb
) {
502 zero_count_fsb
= end_zero_fsb
- start_zero_fsb
+ 1;
503 error
= xfs_bmapi(NULL
, ip
, start_zero_fsb
, zero_count_fsb
,
504 0, NULL
, 0, &imap
, &nimaps
, NULL
, NULL
);
506 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
511 if (imap
.br_state
== XFS_EXT_UNWRITTEN
||
512 imap
.br_startblock
== HOLESTARTBLOCK
) {
514 * This loop handles initializing pages that were
515 * partially initialized by the code below this
516 * loop. It basically zeroes the part of the page
517 * that sits on a hole and sets the page as P_HOLE
518 * and calls remapf if it is a mapped file.
520 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
521 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
526 * There are blocks we need to zero.
527 * Drop the inode lock while we're doing the I/O.
528 * We'll still have the iolock to protect us.
530 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
532 zero_off
= XFS_FSB_TO_B(mp
, start_zero_fsb
);
533 zero_len
= XFS_FSB_TO_B(mp
, imap
.br_blockcount
);
535 if ((zero_off
+ zero_len
) > offset
)
536 zero_len
= offset
- zero_off
;
538 error
= xfs_iozero(ip
, zero_off
, zero_len
);
543 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
544 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
546 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
552 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
557 ssize_t
/* bytes written, or (-) error */
559 struct xfs_inode
*xip
,
561 const struct iovec
*iovp
,
566 struct file
*file
= iocb
->ki_filp
;
567 struct address_space
*mapping
= file
->f_mapping
;
568 struct inode
*inode
= mapping
->host
;
569 unsigned long segs
= nsegs
;
571 ssize_t ret
= 0, error
= 0;
572 xfs_fsize_t isize
, new_size
;
575 size_t ocount
= 0, count
;
579 XFS_STATS_INC(xs_write_calls
);
581 error
= generic_segment_checks(iovp
, &segs
, &ocount
, VERIFY_READ
);
593 xfs_wait_for_freeze(mp
, SB_FREEZE_WRITE
);
595 if (XFS_FORCED_SHUTDOWN(mp
))
599 if (ioflags
& IO_ISDIRECT
) {
600 iolock
= XFS_IOLOCK_SHARED
;
603 iolock
= XFS_IOLOCK_EXCL
;
605 mutex_lock(&inode
->i_mutex
);
608 xfs_ilock(xip
, XFS_ILOCK_EXCL
|iolock
);
611 error
= -generic_write_checks(file
, &pos
, &count
,
612 S_ISBLK(inode
->i_mode
));
614 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
615 goto out_unlock_mutex
;
618 if ((DM_EVENT_ENABLED(xip
, DM_EVENT_WRITE
) &&
619 !(ioflags
& IO_INVIS
) && !eventsent
)) {
620 int dmflags
= FILP_DELAY_FLAG(file
);
623 dmflags
|= DM_FLAGS_IMUX
;
625 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
626 error
= XFS_SEND_DATA(xip
->i_mount
, DM_EVENT_WRITE
, xip
,
627 pos
, count
, dmflags
, &iolock
);
629 goto out_unlock_internal
;
631 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
635 * The iolock was dropped and reacquired in XFS_SEND_DATA
636 * so we have to recheck the size when appending.
637 * We will only "goto start;" once, since having sent the
638 * event prevents another call to XFS_SEND_DATA, which is
639 * what allows the size to change in the first place.
641 if ((file
->f_flags
& O_APPEND
) && pos
!= xip
->i_size
)
645 if (ioflags
& IO_ISDIRECT
) {
646 xfs_buftarg_t
*target
=
647 XFS_IS_REALTIME_INODE(xip
) ?
648 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
650 if ((pos
& target
->bt_smask
) || (count
& target
->bt_smask
)) {
651 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
652 return XFS_ERROR(-EINVAL
);
655 if (!need_i_mutex
&& (mapping
->nrpages
|| pos
> xip
->i_size
)) {
656 xfs_iunlock(xip
, XFS_ILOCK_EXCL
|iolock
);
657 iolock
= XFS_IOLOCK_EXCL
;
659 mutex_lock(&inode
->i_mutex
);
660 xfs_ilock(xip
, XFS_ILOCK_EXCL
|iolock
);
665 new_size
= pos
+ count
;
666 if (new_size
> xip
->i_size
)
667 xip
->i_new_size
= new_size
;
669 if (likely(!(ioflags
& IO_INVIS
)))
670 file_update_time(file
);
673 * If the offset is beyond the size of the file, we have a couple
674 * of things to do. First, if there is already space allocated
675 * we need to either create holes or zero the disk or ...
677 * If there is a page where the previous size lands, we need
678 * to zero it out up to the new size.
681 if (pos
> xip
->i_size
) {
682 error
= xfs_zero_eof(xip
, pos
, xip
->i_size
);
684 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
685 goto out_unlock_internal
;
688 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
691 * If we're writing the file then make sure to clear the
692 * setuid and setgid bits if the process is not being run
693 * by root. This keeps people from modifying setuid and
697 if (((xip
->i_d
.di_mode
& S_ISUID
) ||
698 ((xip
->i_d
.di_mode
& (S_ISGID
| S_IXGRP
)) ==
699 (S_ISGID
| S_IXGRP
))) &&
700 !capable(CAP_FSETID
)) {
701 error
= xfs_write_clear_setuid(xip
);
703 error
= -file_remove_suid(file
);
704 if (unlikely(error
)) {
705 goto out_unlock_internal
;
709 /* We can write back this queue in page reclaim */
710 current
->backing_dev_info
= mapping
->backing_dev_info
;
712 if ((ioflags
& IO_ISDIRECT
)) {
713 if (mapping
->nrpages
) {
714 WARN_ON(need_i_mutex
== 0);
715 xfs_inval_cached_trace(xip
, pos
, -1,
716 (pos
& PAGE_CACHE_MASK
), -1);
717 error
= xfs_flushinval_pages(xip
,
718 (pos
& PAGE_CACHE_MASK
),
719 -1, FI_REMAPF_LOCKED
);
721 goto out_unlock_internal
;
725 /* demote the lock now the cached pages are gone */
726 xfs_ilock_demote(xip
, XFS_IOLOCK_EXCL
);
727 mutex_unlock(&inode
->i_mutex
);
729 iolock
= XFS_IOLOCK_SHARED
;
733 xfs_rw_enter_trace(XFS_DIOWR_ENTER
, xip
, (void *)iovp
, segs
,
735 ret
= generic_file_direct_write(iocb
, iovp
,
736 &segs
, pos
, offset
, count
, ocount
);
739 * direct-io write to a hole: fall through to buffered I/O
740 * for completing the rest of the request.
742 if (ret
>= 0 && ret
!= count
) {
743 XFS_STATS_ADD(xs_write_bytes
, ret
);
748 ioflags
&= ~IO_ISDIRECT
;
749 xfs_iunlock(xip
, iolock
);
757 xfs_rw_enter_trace(XFS_WRITE_ENTER
, xip
, (void *)iovp
, segs
,
759 ret2
= generic_file_buffered_write(iocb
, iovp
, segs
,
760 pos
, offset
, count
, ret
);
762 * if we just got an ENOSPC, flush the inode now we
763 * aren't holding any page locks and retry *once*
765 if (ret2
== -ENOSPC
&& !enospc
) {
766 error
= xfs_flush_pages(xip
, 0, -1, 0, FI_NONE
);
768 goto out_unlock_internal
;
775 current
->backing_dev_info
= NULL
;
777 if (ret
== -EIOCBQUEUED
&& !(ioflags
& IO_ISAIO
))
778 ret
= wait_on_sync_kiocb(iocb
);
780 isize
= i_size_read(inode
);
781 if (unlikely(ret
< 0 && ret
!= -EFAULT
&& *offset
> isize
))
784 if (*offset
> xip
->i_size
) {
785 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
786 if (*offset
> xip
->i_size
)
787 xip
->i_size
= *offset
;
788 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
791 if (ret
== -ENOSPC
&&
792 DM_EVENT_ENABLED(xip
, DM_EVENT_NOSPACE
) && !(ioflags
& IO_INVIS
)) {
793 xfs_iunlock(xip
, iolock
);
795 mutex_unlock(&inode
->i_mutex
);
796 error
= XFS_SEND_NAMESP(xip
->i_mount
, DM_EVENT_NOSPACE
, xip
,
797 DM_RIGHT_NULL
, xip
, DM_RIGHT_NULL
, NULL
, NULL
,
798 0, 0, 0); /* Delay flag intentionally unused */
800 mutex_lock(&inode
->i_mutex
);
801 xfs_ilock(xip
, iolock
);
803 goto out_unlock_internal
;
809 goto out_unlock_internal
;
811 XFS_STATS_ADD(xs_write_bytes
, ret
);
813 /* Handle various SYNC-type writes */
814 if ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
)) {
815 loff_t end
= pos
+ ret
- 1;
818 xfs_iunlock(xip
, iolock
);
820 mutex_unlock(&inode
->i_mutex
);
822 error2
= filemap_write_and_wait_range(mapping
, pos
, end
);
826 mutex_lock(&inode
->i_mutex
);
827 xfs_ilock(xip
, iolock
);
829 error2
= xfs_fsync(xip
);
835 if (xip
->i_new_size
) {
836 xfs_ilock(xip
, XFS_ILOCK_EXCL
);
839 * If this was a direct or synchronous I/O that failed (such
840 * as ENOSPC) then part of the I/O may have been written to
841 * disk before the error occured. In this case the on-disk
842 * file size may have been adjusted beyond the in-memory file
843 * size and now needs to be truncated back.
845 if (xip
->i_d
.di_size
> xip
->i_size
)
846 xip
->i_d
.di_size
= xip
->i_size
;
847 xfs_iunlock(xip
, XFS_ILOCK_EXCL
);
849 xfs_iunlock(xip
, iolock
);
852 mutex_unlock(&inode
->i_mutex
);
857 * All xfs metadata buffers except log state machine buffers
858 * get this attached as their b_bdstrat callback function.
859 * This is so that we can catch a buffer
860 * after prematurely unpinning it to forcibly shutdown the filesystem.
863 xfs_bdstrat_cb(struct xfs_buf
*bp
)
865 if (XFS_FORCED_SHUTDOWN(bp
->b_mount
)) {
866 xfs_buftrace("XFS__BDSTRAT IOERROR", bp
);
868 * Metadata write that didn't get logged but
869 * written delayed anyway. These aren't associated
870 * with a transaction, and can be ignored.
872 if (XFS_BUF_IODONE_FUNC(bp
) == NULL
&&
873 (XFS_BUF_ISREAD(bp
)) == 0)
874 return (xfs_bioerror_relse(bp
));
876 return (xfs_bioerror(bp
));
879 xfs_buf_iorequest(bp
);
884 * Wrapper around bdstrat so that we can stop data from going to disk in case
885 * we are shutting down the filesystem. Typically user data goes thru this
886 * path; one of the exceptions is the superblock.
890 struct xfs_mount
*mp
,
894 if (!XFS_FORCED_SHUTDOWN(mp
)) {
895 xfs_buf_iorequest(bp
);
899 xfs_buftrace("XFSBDSTRAT IOERROR", bp
);
900 xfs_bioerror_relse(bp
);
904 * If the underlying (data/log/rt) device is readonly, there are some
905 * operations that cannot proceed.
908 xfs_dev_is_read_only(
912 if (xfs_readonly_buftarg(mp
->m_ddev_targp
) ||
913 xfs_readonly_buftarg(mp
->m_logdev_targp
) ||
914 (mp
->m_rtdev_targp
&& xfs_readonly_buftarg(mp
->m_rtdev_targp
))) {
916 "XFS: %s required on read-only device.", message
);
918 "XFS: write access unavailable, cannot proceed.");