4 * (c) 1996 Hans-Joachim Widmaier - Rewritten
6 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
8 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
10 * (C) 1991 Linus Torvalds - minix filesystem
12 * affs regular file handling primitives
18 #error PAGE_SIZE must be at least 4096
21 static int affs_grow_extcache(struct inode
*inode
, u32 lc_idx
);
22 static struct buffer_head
*affs_alloc_extblock(struct inode
*inode
, struct buffer_head
*bh
, u32 ext
);
23 static inline struct buffer_head
*affs_get_extblock(struct inode
*inode
, u32 ext
);
24 static struct buffer_head
*affs_get_extblock_slow(struct inode
*inode
, u32 ext
);
25 static int affs_file_open(struct inode
*inode
, struct file
*filp
);
26 static int affs_file_release(struct inode
*inode
, struct file
*filp
);
28 const struct file_operations affs_file_operations
= {
29 .llseek
= generic_file_llseek
,
31 .aio_read
= generic_file_aio_read
,
32 .write
= do_sync_write
,
33 .aio_write
= generic_file_aio_write
,
34 .mmap
= generic_file_mmap
,
35 .open
= affs_file_open
,
36 .release
= affs_file_release
,
37 .fsync
= affs_file_fsync
,
38 .splice_read
= generic_file_splice_read
,
41 const struct inode_operations affs_file_inode_operations
= {
42 .setattr
= affs_notify_change
,
46 affs_file_open(struct inode
*inode
, struct file
*filp
)
48 pr_debug("AFFS: open(%lu,%d)\n",
49 inode
->i_ino
, atomic_read(&AFFS_I(inode
)->i_opencnt
));
50 atomic_inc(&AFFS_I(inode
)->i_opencnt
);
55 affs_file_release(struct inode
*inode
, struct file
*filp
)
57 pr_debug("AFFS: release(%lu, %d)\n",
58 inode
->i_ino
, atomic_read(&AFFS_I(inode
)->i_opencnt
));
60 if (atomic_dec_and_test(&AFFS_I(inode
)->i_opencnt
)) {
61 mutex_lock(&inode
->i_mutex
);
62 if (inode
->i_size
!= AFFS_I(inode
)->mmu_private
)
64 affs_free_prealloc(inode
);
65 mutex_unlock(&inode
->i_mutex
);
72 affs_grow_extcache(struct inode
*inode
, u32 lc_idx
)
74 struct super_block
*sb
= inode
->i_sb
;
75 struct buffer_head
*bh
;
79 if (!AFFS_I(inode
)->i_lc
) {
80 char *ptr
= (char *)get_zeroed_page(GFP_NOFS
);
83 AFFS_I(inode
)->i_lc
= (u32
*)ptr
;
84 AFFS_I(inode
)->i_ac
= (struct affs_ext_key
*)(ptr
+ AFFS_CACHE_SIZE
/ 2);
87 lc_max
= AFFS_LC_SIZE
<< AFFS_I(inode
)->i_lc_shift
;
89 if (AFFS_I(inode
)->i_extcnt
> lc_max
) {
90 u32 lc_shift
, lc_mask
, tmp
, off
;
92 /* need to recalculate linear cache, start from old size */
93 lc_shift
= AFFS_I(inode
)->i_lc_shift
;
94 tmp
= (AFFS_I(inode
)->i_extcnt
/ AFFS_LC_SIZE
) >> lc_shift
;
95 for (; tmp
; tmp
>>= 1)
97 lc_mask
= (1 << lc_shift
) - 1;
99 /* fix idx and old size to new shift */
100 lc_idx
>>= (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
101 AFFS_I(inode
)->i_lc_size
>>= (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
103 /* first shrink old cache to make more space */
104 off
= 1 << (lc_shift
- AFFS_I(inode
)->i_lc_shift
);
105 for (i
= 1, j
= off
; j
< AFFS_LC_SIZE
; i
++, j
+= off
)
106 AFFS_I(inode
)->i_ac
[i
] = AFFS_I(inode
)->i_ac
[j
];
108 AFFS_I(inode
)->i_lc_shift
= lc_shift
;
109 AFFS_I(inode
)->i_lc_mask
= lc_mask
;
112 /* fill cache to the needed index */
113 i
= AFFS_I(inode
)->i_lc_size
;
114 AFFS_I(inode
)->i_lc_size
= lc_idx
+ 1;
115 for (; i
<= lc_idx
; i
++) {
117 AFFS_I(inode
)->i_lc
[0] = inode
->i_ino
;
120 key
= AFFS_I(inode
)->i_lc
[i
- 1];
121 j
= AFFS_I(inode
)->i_lc_mask
+ 1;
124 bh
= affs_bread(sb
, key
);
127 key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
131 AFFS_I(inode
)->i_lc
[i
] = key
;
141 static struct buffer_head
*
142 affs_alloc_extblock(struct inode
*inode
, struct buffer_head
*bh
, u32 ext
)
144 struct super_block
*sb
= inode
->i_sb
;
145 struct buffer_head
*new_bh
;
148 blocknr
= affs_alloc_block(inode
, bh
->b_blocknr
);
150 return ERR_PTR(-ENOSPC
);
152 new_bh
= affs_getzeroblk(sb
, blocknr
);
154 affs_free_block(sb
, blocknr
);
155 return ERR_PTR(-EIO
);
158 AFFS_HEAD(new_bh
)->ptype
= cpu_to_be32(T_LIST
);
159 AFFS_HEAD(new_bh
)->key
= cpu_to_be32(blocknr
);
160 AFFS_TAIL(sb
, new_bh
)->stype
= cpu_to_be32(ST_FILE
);
161 AFFS_TAIL(sb
, new_bh
)->parent
= cpu_to_be32(inode
->i_ino
);
162 affs_fix_checksum(sb
, new_bh
);
164 mark_buffer_dirty_inode(new_bh
, inode
);
166 tmp
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
168 affs_warning(sb
, "alloc_ext", "previous extension set (%x)", tmp
);
169 AFFS_TAIL(sb
, bh
)->extension
= cpu_to_be32(blocknr
);
170 affs_adjust_checksum(bh
, blocknr
- tmp
);
171 mark_buffer_dirty_inode(bh
, inode
);
173 AFFS_I(inode
)->i_extcnt
++;
174 mark_inode_dirty(inode
);
179 static inline struct buffer_head
*
180 affs_get_extblock(struct inode
*inode
, u32 ext
)
182 /* inline the simplest case: same extended block as last time */
183 struct buffer_head
*bh
= AFFS_I(inode
)->i_ext_bh
;
184 if (ext
== AFFS_I(inode
)->i_ext_last
)
187 /* we have to do more (not inlined) */
188 bh
= affs_get_extblock_slow(inode
, ext
);
193 static struct buffer_head
*
194 affs_get_extblock_slow(struct inode
*inode
, u32 ext
)
196 struct super_block
*sb
= inode
->i_sb
;
197 struct buffer_head
*bh
;
199 u32 lc_idx
, lc_off
, ac_idx
;
202 if (ext
== AFFS_I(inode
)->i_ext_last
+ 1) {
203 /* read the next extended block from the current one */
204 bh
= AFFS_I(inode
)->i_ext_bh
;
205 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
206 if (ext
< AFFS_I(inode
)->i_extcnt
)
208 if (ext
> AFFS_I(inode
)->i_extcnt
)
210 bh
= affs_alloc_extblock(inode
, bh
, ext
);
217 /* we seek back to the file header block */
218 ext_key
= inode
->i_ino
;
222 if (ext
>= AFFS_I(inode
)->i_extcnt
) {
223 struct buffer_head
*prev_bh
;
225 /* allocate a new extended block */
226 if (ext
> AFFS_I(inode
)->i_extcnt
)
229 /* get previous extended block */
230 prev_bh
= affs_get_extblock(inode
, ext
- 1);
233 bh
= affs_alloc_extblock(inode
, prev_bh
, ext
);
234 affs_brelse(prev_bh
);
241 /* check if there is an extended cache and whether it's large enough */
242 lc_idx
= ext
>> AFFS_I(inode
)->i_lc_shift
;
243 lc_off
= ext
& AFFS_I(inode
)->i_lc_mask
;
245 if (lc_idx
>= AFFS_I(inode
)->i_lc_size
) {
248 err
= affs_grow_extcache(inode
, lc_idx
);
254 /* every n'th key we find in the linear cache */
256 ext_key
= AFFS_I(inode
)->i_lc
[lc_idx
];
260 /* maybe it's still in the associative cache */
261 ac_idx
= (ext
- lc_idx
- 1) & AFFS_AC_MASK
;
262 if (AFFS_I(inode
)->i_ac
[ac_idx
].ext
== ext
) {
263 ext_key
= AFFS_I(inode
)->i_ac
[ac_idx
].key
;
267 /* try to find one of the previous extended blocks */
270 while (--tmp
, --lc_off
> 0) {
271 idx
= (idx
- 1) & AFFS_AC_MASK
;
272 if (AFFS_I(inode
)->i_ac
[idx
].ext
== tmp
) {
273 ext_key
= AFFS_I(inode
)->i_ac
[idx
].key
;
278 /* fall back to the linear cache */
279 ext_key
= AFFS_I(inode
)->i_lc
[lc_idx
];
281 /* read all extended blocks until we find the one we need */
284 bh
= affs_bread(sb
, ext_key
);
287 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, bh
)->extension
);
293 /* store it in the associative cache */
294 // recalculate ac_idx?
295 AFFS_I(inode
)->i_ac
[ac_idx
].ext
= ext
;
296 AFFS_I(inode
)->i_ac
[ac_idx
].key
= ext_key
;
299 /* finally read the right extended block */
301 bh
= affs_bread(sb
, ext_key
);
307 /* release old cached extended block and store the new one */
308 affs_brelse(AFFS_I(inode
)->i_ext_bh
);
309 AFFS_I(inode
)->i_ext_last
= ext
;
310 AFFS_I(inode
)->i_ext_bh
= bh
;
317 return ERR_PTR(-EIO
);
321 affs_get_block(struct inode
*inode
, sector_t block
, struct buffer_head
*bh_result
, int create
)
323 struct super_block
*sb
= inode
->i_sb
;
324 struct buffer_head
*ext_bh
;
327 pr_debug("AFFS: get_block(%u, %lu)\n", (u32
)inode
->i_ino
, (unsigned long)block
);
329 BUG_ON(block
> (sector_t
)0x7fffffffUL
);
331 if (block
>= AFFS_I(inode
)->i_blkcnt
) {
332 if (block
> AFFS_I(inode
)->i_blkcnt
|| !create
)
338 affs_lock_ext(inode
);
340 ext
= (u32
)block
/ AFFS_SB(sb
)->s_hashsize
;
341 block
-= ext
* AFFS_SB(sb
)->s_hashsize
;
342 ext_bh
= affs_get_extblock(inode
, ext
);
345 map_bh(bh_result
, sb
, (sector_t
)be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, block
)));
348 u32 blocknr
= affs_alloc_block(inode
, ext_bh
->b_blocknr
);
351 set_buffer_new(bh_result
);
352 AFFS_I(inode
)->mmu_private
+= AFFS_SB(sb
)->s_data_blksize
;
353 AFFS_I(inode
)->i_blkcnt
++;
355 /* store new block */
356 if (bh_result
->b_blocknr
)
357 affs_warning(sb
, "get_block", "block already set (%x)", bh_result
->b_blocknr
);
358 AFFS_BLOCK(sb
, ext_bh
, block
) = cpu_to_be32(blocknr
);
359 AFFS_HEAD(ext_bh
)->block_count
= cpu_to_be32(block
+ 1);
360 affs_adjust_checksum(ext_bh
, blocknr
- bh_result
->b_blocknr
+ 1);
361 bh_result
->b_blocknr
= blocknr
;
364 /* insert first block into header block */
365 u32 tmp
= be32_to_cpu(AFFS_HEAD(ext_bh
)->first_data
);
367 affs_warning(sb
, "get_block", "first block already set (%d)", tmp
);
368 AFFS_HEAD(ext_bh
)->first_data
= cpu_to_be32(blocknr
);
369 affs_adjust_checksum(ext_bh
, blocknr
- tmp
);
375 affs_unlock_ext(inode
);
379 affs_error(inode
->i_sb
,"get_block","strange block request %d", block
);
383 affs_unlock_ext(inode
);
384 return PTR_ERR(ext_bh
);
387 clear_buffer_mapped(bh_result
);
388 bh_result
->b_bdev
= NULL
;
390 affs_unlock_ext(inode
);
394 static int affs_writepage(struct page
*page
, struct writeback_control
*wbc
)
396 return block_write_full_page(page
, affs_get_block
, wbc
);
399 static int affs_readpage(struct file
*file
, struct page
*page
)
401 return block_read_full_page(page
, affs_get_block
);
404 static void affs_write_failed(struct address_space
*mapping
, loff_t to
)
406 struct inode
*inode
= mapping
->host
;
408 if (to
> inode
->i_size
) {
409 truncate_pagecache(inode
, to
, inode
->i_size
);
410 affs_truncate(inode
);
414 static int affs_write_begin(struct file
*file
, struct address_space
*mapping
,
415 loff_t pos
, unsigned len
, unsigned flags
,
416 struct page
**pagep
, void **fsdata
)
421 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
423 &AFFS_I(mapping
->host
)->mmu_private
);
425 affs_write_failed(mapping
, pos
+ len
);
430 static sector_t
_affs_bmap(struct address_space
*mapping
, sector_t block
)
432 return generic_block_bmap(mapping
,block
,affs_get_block
);
435 const struct address_space_operations affs_aops
= {
436 .readpage
= affs_readpage
,
437 .writepage
= affs_writepage
,
438 .write_begin
= affs_write_begin
,
439 .write_end
= generic_write_end
,
443 static inline struct buffer_head
*
444 affs_bread_ino(struct inode
*inode
, int block
, int create
)
446 struct buffer_head
*bh
, tmp_bh
;
450 err
= affs_get_block(inode
, block
, &tmp_bh
, create
);
452 bh
= affs_bread(inode
->i_sb
, tmp_bh
.b_blocknr
);
454 bh
->b_state
|= tmp_bh
.b_state
;
462 static inline struct buffer_head
*
463 affs_getzeroblk_ino(struct inode
*inode
, int block
)
465 struct buffer_head
*bh
, tmp_bh
;
469 err
= affs_get_block(inode
, block
, &tmp_bh
, 1);
471 bh
= affs_getzeroblk(inode
->i_sb
, tmp_bh
.b_blocknr
);
473 bh
->b_state
|= tmp_bh
.b_state
;
481 static inline struct buffer_head
*
482 affs_getemptyblk_ino(struct inode
*inode
, int block
)
484 struct buffer_head
*bh
, tmp_bh
;
488 err
= affs_get_block(inode
, block
, &tmp_bh
, 1);
490 bh
= affs_getemptyblk(inode
->i_sb
, tmp_bh
.b_blocknr
);
492 bh
->b_state
|= tmp_bh
.b_state
;
501 affs_do_readpage_ofs(struct file
*file
, struct page
*page
, unsigned from
, unsigned to
)
503 struct inode
*inode
= page
->mapping
->host
;
504 struct super_block
*sb
= inode
->i_sb
;
505 struct buffer_head
*bh
;
507 u32 bidx
, boff
, bsize
;
510 pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32
)inode
->i_ino
, page
->index
, from
, to
);
511 BUG_ON(from
> to
|| to
> PAGE_CACHE_SIZE
);
513 data
= page_address(page
);
514 bsize
= AFFS_SB(sb
)->s_data_blksize
;
515 tmp
= (page
->index
<< PAGE_CACHE_SHIFT
) + from
;
520 bh
= affs_bread_ino(inode
, bidx
, 0);
523 tmp
= min(bsize
- boff
, to
- from
);
524 BUG_ON(from
+ tmp
> to
|| tmp
> bsize
);
525 memcpy(data
+ from
, AFFS_DATA(bh
) + boff
, tmp
);
531 flush_dcache_page(page
);
537 affs_extent_file_ofs(struct inode
*inode
, u32 newsize
)
539 struct super_block
*sb
= inode
->i_sb
;
540 struct buffer_head
*bh
, *prev_bh
;
545 pr_debug("AFFS: extent_file(%u, %d)\n", (u32
)inode
->i_ino
, newsize
);
546 bsize
= AFFS_SB(sb
)->s_data_blksize
;
548 size
= AFFS_I(inode
)->mmu_private
;
552 bh
= affs_bread_ino(inode
, bidx
, 0);
555 tmp
= min(bsize
- boff
, newsize
- size
);
556 BUG_ON(boff
+ tmp
> bsize
|| tmp
> bsize
);
557 memset(AFFS_DATA(bh
) + boff
, 0, tmp
);
558 be32_add_cpu(&AFFS_DATA_HEAD(bh
)->size
, tmp
);
559 affs_fix_checksum(sb
, bh
);
560 mark_buffer_dirty_inode(bh
, inode
);
564 bh
= affs_bread_ino(inode
, bidx
- 1, 0);
569 while (size
< newsize
) {
571 bh
= affs_getzeroblk_ino(inode
, bidx
);
574 tmp
= min(bsize
, newsize
- size
);
576 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
577 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
578 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
579 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
580 affs_fix_checksum(sb
, bh
);
581 bh
->b_state
&= ~(1UL << BH_New
);
582 mark_buffer_dirty_inode(bh
, inode
);
584 u32 tmp
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
586 affs_warning(sb
, "extent_file_ofs", "next block already set for %d (%d)", bidx
, tmp
);
587 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
588 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp
);
589 mark_buffer_dirty_inode(prev_bh
, inode
);
590 affs_brelse(prev_bh
);
596 inode
->i_size
= AFFS_I(inode
)->mmu_private
= newsize
;
600 inode
->i_size
= AFFS_I(inode
)->mmu_private
= newsize
;
605 affs_readpage_ofs(struct file
*file
, struct page
*page
)
607 struct inode
*inode
= page
->mapping
->host
;
611 pr_debug("AFFS: read_page(%u, %ld)\n", (u32
)inode
->i_ino
, page
->index
);
612 to
= PAGE_CACHE_SIZE
;
613 if (((page
->index
+ 1) << PAGE_CACHE_SHIFT
) > inode
->i_size
) {
614 to
= inode
->i_size
& ~PAGE_CACHE_MASK
;
615 memset(page_address(page
) + to
, 0, PAGE_CACHE_SIZE
- to
);
618 err
= affs_do_readpage_ofs(file
, page
, 0, to
);
620 SetPageUptodate(page
);
625 static int affs_write_begin_ofs(struct file
*file
, struct address_space
*mapping
,
626 loff_t pos
, unsigned len
, unsigned flags
,
627 struct page
**pagep
, void **fsdata
)
629 struct inode
*inode
= mapping
->host
;
634 pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32
)inode
->i_ino
, (unsigned long long)pos
, (unsigned long long)pos
+ len
);
635 if (pos
> AFFS_I(inode
)->mmu_private
) {
636 /* XXX: this probably leaves a too-big i_size in case of
637 * failure. Should really be updating i_size at write_end time
639 err
= affs_extent_file_ofs(inode
, pos
);
644 index
= pos
>> PAGE_CACHE_SHIFT
;
645 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
650 if (PageUptodate(page
))
653 /* XXX: inefficient but safe in the face of short writes */
654 err
= affs_do_readpage_ofs(file
, page
, 0, PAGE_CACHE_SIZE
);
657 page_cache_release(page
);
662 static int affs_write_end_ofs(struct file
*file
, struct address_space
*mapping
,
663 loff_t pos
, unsigned len
, unsigned copied
,
664 struct page
*page
, void *fsdata
)
666 struct inode
*inode
= mapping
->host
;
667 struct super_block
*sb
= inode
->i_sb
;
668 struct buffer_head
*bh
, *prev_bh
;
670 u32 bidx
, boff
, bsize
;
675 from
= pos
& (PAGE_CACHE_SIZE
- 1);
678 * XXX: not sure if this can handle short copies (len < copied), but
679 * we don't have to, because the page should always be uptodate here,
680 * due to write_begin.
683 pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32
)inode
->i_ino
, (unsigned long long)pos
, (unsigned long long)pos
+ len
);
684 bsize
= AFFS_SB(sb
)->s_data_blksize
;
685 data
= page_address(page
);
689 tmp
= (page
->index
<< PAGE_CACHE_SHIFT
) + from
;
693 bh
= affs_bread_ino(inode
, bidx
, 0);
696 tmp
= min(bsize
- boff
, to
- from
);
697 BUG_ON(boff
+ tmp
> bsize
|| tmp
> bsize
);
698 memcpy(AFFS_DATA(bh
) + boff
, data
+ from
, tmp
);
699 be32_add_cpu(&AFFS_DATA_HEAD(bh
)->size
, tmp
);
700 affs_fix_checksum(sb
, bh
);
701 mark_buffer_dirty_inode(bh
, inode
);
706 bh
= affs_bread_ino(inode
, bidx
- 1, 0);
710 while (from
+ bsize
<= to
) {
712 bh
= affs_getemptyblk_ino(inode
, bidx
);
715 memcpy(AFFS_DATA(bh
), data
+ from
, bsize
);
716 if (buffer_new(bh
)) {
717 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
718 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
719 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
720 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(bsize
);
721 AFFS_DATA_HEAD(bh
)->next
= 0;
722 bh
->b_state
&= ~(1UL << BH_New
);
724 u32 tmp
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
726 affs_warning(sb
, "commit_write_ofs", "next block already set for %d (%d)", bidx
, tmp
);
727 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
728 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp
);
729 mark_buffer_dirty_inode(prev_bh
, inode
);
732 affs_brelse(prev_bh
);
733 affs_fix_checksum(sb
, bh
);
734 mark_buffer_dirty_inode(bh
, inode
);
741 bh
= affs_bread_ino(inode
, bidx
, 1);
744 tmp
= min(bsize
, to
- from
);
746 memcpy(AFFS_DATA(bh
), data
+ from
, tmp
);
747 if (buffer_new(bh
)) {
748 AFFS_DATA_HEAD(bh
)->ptype
= cpu_to_be32(T_DATA
);
749 AFFS_DATA_HEAD(bh
)->key
= cpu_to_be32(inode
->i_ino
);
750 AFFS_DATA_HEAD(bh
)->sequence
= cpu_to_be32(bidx
);
751 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
752 AFFS_DATA_HEAD(bh
)->next
= 0;
753 bh
->b_state
&= ~(1UL << BH_New
);
755 u32 tmp
= be32_to_cpu(AFFS_DATA_HEAD(prev_bh
)->next
);
757 affs_warning(sb
, "commit_write_ofs", "next block already set for %d (%d)", bidx
, tmp
);
758 AFFS_DATA_HEAD(prev_bh
)->next
= cpu_to_be32(bh
->b_blocknr
);
759 affs_adjust_checksum(prev_bh
, bh
->b_blocknr
- tmp
);
760 mark_buffer_dirty_inode(prev_bh
, inode
);
762 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh
)->size
) < tmp
)
763 AFFS_DATA_HEAD(bh
)->size
= cpu_to_be32(tmp
);
764 affs_brelse(prev_bh
);
765 affs_fix_checksum(sb
, bh
);
766 mark_buffer_dirty_inode(bh
, inode
);
771 SetPageUptodate(page
);
775 tmp
= (page
->index
<< PAGE_CACHE_SHIFT
) + from
;
776 if (tmp
> inode
->i_size
)
777 inode
->i_size
= AFFS_I(inode
)->mmu_private
= tmp
;
780 page_cache_release(page
);
787 written
= PTR_ERR(bh
);
791 const struct address_space_operations affs_aops_ofs
= {
792 .readpage
= affs_readpage_ofs
,
793 //.writepage = affs_writepage_ofs,
794 .write_begin
= affs_write_begin_ofs
,
795 .write_end
= affs_write_end_ofs
798 /* Free any preallocated blocks. */
801 affs_free_prealloc(struct inode
*inode
)
803 struct super_block
*sb
= inode
->i_sb
;
805 pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode
->i_ino
);
807 while (AFFS_I(inode
)->i_pa_cnt
) {
808 AFFS_I(inode
)->i_pa_cnt
--;
809 affs_free_block(sb
, ++AFFS_I(inode
)->i_lastalloc
);
813 /* Truncate (or enlarge) a file to the requested size. */
816 affs_truncate(struct inode
*inode
)
818 struct super_block
*sb
= inode
->i_sb
;
820 u32 last_blk
, blkcnt
, blk
;
822 struct buffer_head
*ext_bh
;
825 pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
826 (u32
)inode
->i_ino
, (u32
)AFFS_I(inode
)->mmu_private
, (u32
)inode
->i_size
);
831 last_blk
= ((u32
)inode
->i_size
- 1) / AFFS_SB(sb
)->s_data_blksize
;
832 ext
= last_blk
/ AFFS_SB(sb
)->s_hashsize
;
835 if (inode
->i_size
> AFFS_I(inode
)->mmu_private
) {
836 struct address_space
*mapping
= inode
->i_mapping
;
839 u32 size
= inode
->i_size
;
842 res
= mapping
->a_ops
->write_begin(NULL
, mapping
, size
, 0, 0, &page
, &fsdata
);
844 res
= mapping
->a_ops
->write_end(NULL
, mapping
, size
, 0, 0, page
, fsdata
);
846 inode
->i_size
= AFFS_I(inode
)->mmu_private
;
847 mark_inode_dirty(inode
);
849 } else if (inode
->i_size
== AFFS_I(inode
)->mmu_private
)
853 ext_bh
= affs_get_extblock(inode
, ext
);
854 if (IS_ERR(ext_bh
)) {
855 affs_warning(sb
, "truncate", "unexpected read error for ext block %u (%d)",
856 ext
, PTR_ERR(ext_bh
));
859 if (AFFS_I(inode
)->i_lc
) {
860 /* clear linear cache */
861 i
= (ext
+ 1) >> AFFS_I(inode
)->i_lc_shift
;
862 if (AFFS_I(inode
)->i_lc_size
> i
) {
863 AFFS_I(inode
)->i_lc_size
= i
;
864 for (; i
< AFFS_LC_SIZE
; i
++)
865 AFFS_I(inode
)->i_lc
[i
] = 0;
867 /* clear associative cache */
868 for (i
= 0; i
< AFFS_AC_SIZE
; i
++)
869 if (AFFS_I(inode
)->i_ac
[i
].ext
>= ext
)
870 AFFS_I(inode
)->i_ac
[i
].ext
= 0;
872 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, ext_bh
)->extension
);
874 blkcnt
= AFFS_I(inode
)->i_blkcnt
;
878 i
= last_blk
% AFFS_SB(sb
)->s_hashsize
+ 1;
881 AFFS_HEAD(ext_bh
)->first_data
= 0;
882 AFFS_HEAD(ext_bh
)->block_count
= cpu_to_be32(i
);
883 size
= AFFS_SB(sb
)->s_hashsize
;
884 if (size
> blkcnt
- blk
+ i
)
885 size
= blkcnt
- blk
+ i
;
886 for (; i
< size
; i
++, blk
++) {
887 affs_free_block(sb
, be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, i
)));
888 AFFS_BLOCK(sb
, ext_bh
, i
) = 0;
890 AFFS_TAIL(sb
, ext_bh
)->extension
= 0;
891 affs_fix_checksum(sb
, ext_bh
);
892 mark_buffer_dirty_inode(ext_bh
, inode
);
896 AFFS_I(inode
)->i_blkcnt
= last_blk
+ 1;
897 AFFS_I(inode
)->i_extcnt
= ext
+ 1;
898 if (AFFS_SB(sb
)->s_flags
& SF_OFS
) {
899 struct buffer_head
*bh
= affs_bread_ino(inode
, last_blk
, 0);
902 affs_warning(sb
, "truncate", "unexpected read error for last block %u (%d)",
906 tmp
= be32_to_cpu(AFFS_DATA_HEAD(bh
)->next
);
907 AFFS_DATA_HEAD(bh
)->next
= 0;
908 affs_adjust_checksum(bh
, -tmp
);
912 AFFS_I(inode
)->i_blkcnt
= 0;
913 AFFS_I(inode
)->i_extcnt
= 1;
915 AFFS_I(inode
)->mmu_private
= inode
->i_size
;
919 ext_bh
= affs_bread(sb
, ext_key
);
920 size
= AFFS_SB(sb
)->s_hashsize
;
921 if (size
> blkcnt
- blk
)
923 for (i
= 0; i
< size
; i
++, blk
++)
924 affs_free_block(sb
, be32_to_cpu(AFFS_BLOCK(sb
, ext_bh
, i
)));
925 affs_free_block(sb
, ext_key
);
926 ext_key
= be32_to_cpu(AFFS_TAIL(sb
, ext_bh
)->extension
);
929 affs_free_prealloc(inode
);
932 int affs_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
934 struct inode
*inode
= filp
->f_mapping
->host
;
937 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
941 mutex_lock(&inode
->i_mutex
);
942 ret
= write_inode_now(inode
, 0);
943 err
= sync_blockdev(inode
->i_sb
->s_bdev
);
946 mutex_unlock(&inode
->i_mutex
);