2 * Copyright (C) International Business Machines Corp., 2000-2005
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/bio.h>
23 #include <linux/init.h>
24 #include <linux/buffer_head.h>
25 #include <linux/mempool.h>
26 #include "jfs_incore.h"
27 #include "jfs_superblock.h"
28 #include "jfs_filsys.h"
29 #include "jfs_metapage.h"
30 #include "jfs_txnmgr.h"
31 #include "jfs_debug.h"
33 #ifdef CONFIG_JFS_STATISTICS
35 uint pagealloc
; /* # of page allocations */
36 uint pagefree
; /* # of page frees */
37 uint lockwait
; /* # of sleeping lock_metapage() calls */
41 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
42 #define trylock_metapage(mp) test_and_set_bit(META_locked, &(mp)->flag)
44 static inline void unlock_metapage(struct metapage
*mp
)
46 clear_bit(META_locked
, &mp
->flag
);
50 static inline void __lock_metapage(struct metapage
*mp
)
52 DECLARE_WAITQUEUE(wait
, current
);
53 INCREMENT(mpStat
.lockwait
);
54 add_wait_queue_exclusive(&mp
->wait
, &wait
);
56 set_current_state(TASK_UNINTERRUPTIBLE
);
57 if (metapage_locked(mp
)) {
58 unlock_page(mp
->page
);
62 } while (trylock_metapage(mp
));
63 __set_current_state(TASK_RUNNING
);
64 remove_wait_queue(&mp
->wait
, &wait
);
68 * Must have mp->page locked
70 static inline void lock_metapage(struct metapage
*mp
)
72 if (trylock_metapage(mp
))
76 #define METAPOOL_MIN_PAGES 32
77 static struct kmem_cache
*metapage_cache
;
78 static mempool_t
*metapage_mempool
;
80 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
87 struct metapage
*mp
[MPS_PER_PAGE
];
89 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
91 static inline struct metapage
*page_to_mp(struct page
*page
, uint offset
)
93 if (!PagePrivate(page
))
95 return mp_anchor(page
)->mp
[offset
>> L2PSIZE
];
98 static inline int insert_metapage(struct page
*page
, struct metapage
*mp
)
100 struct meta_anchor
*a
;
102 int l2mp_blocks
; /* log2 blocks per metapage */
104 if (PagePrivate(page
))
107 a
= kzalloc(sizeof(struct meta_anchor
), GFP_NOFS
);
110 set_page_private(page
, (unsigned long)a
);
111 SetPagePrivate(page
);
116 l2mp_blocks
= L2PSIZE
- page
->mapping
->host
->i_blkbits
;
117 index
= (mp
->index
>> l2mp_blocks
) & (MPS_PER_PAGE
- 1);
125 static inline void remove_metapage(struct page
*page
, struct metapage
*mp
)
127 struct meta_anchor
*a
= mp_anchor(page
);
128 int l2mp_blocks
= L2PSIZE
- page
->mapping
->host
->i_blkbits
;
131 index
= (mp
->index
>> l2mp_blocks
) & (MPS_PER_PAGE
- 1);
133 BUG_ON(a
->mp
[index
] != mp
);
136 if (--a
->mp_count
== 0) {
138 set_page_private(page
, 0);
139 ClearPagePrivate(page
);
144 static inline void inc_io(struct page
*page
)
146 atomic_inc(&mp_anchor(page
)->io_count
);
149 static inline void dec_io(struct page
*page
, void (*handler
) (struct page
*))
151 if (atomic_dec_and_test(&mp_anchor(page
)->io_count
))
156 static inline struct metapage
*page_to_mp(struct page
*page
, uint offset
)
158 return PagePrivate(page
) ? (struct metapage
*)page_private(page
) : NULL
;
161 static inline int insert_metapage(struct page
*page
, struct metapage
*mp
)
164 set_page_private(page
, (unsigned long)mp
);
165 SetPagePrivate(page
);
171 static inline void remove_metapage(struct page
*page
, struct metapage
*mp
)
173 set_page_private(page
, 0);
174 ClearPagePrivate(page
);
178 #define inc_io(page) do {} while(0)
179 #define dec_io(page, handler) handler(page)
183 static void init_once(struct kmem_cache
*cachep
, void *foo
)
185 struct metapage
*mp
= (struct metapage
*)foo
;
193 set_bit(META_free
, &mp
->flag
);
194 init_waitqueue_head(&mp
->wait
);
197 static inline struct metapage
*alloc_metapage(gfp_t gfp_mask
)
199 return mempool_alloc(metapage_mempool
, gfp_mask
);
202 static inline void free_metapage(struct metapage
*mp
)
205 set_bit(META_free
, &mp
->flag
);
207 mempool_free(mp
, metapage_mempool
);
210 int __init
metapage_init(void)
213 * Allocate the metapage structures
215 metapage_cache
= kmem_cache_create("jfs_mp", sizeof(struct metapage
),
217 if (metapage_cache
== NULL
)
220 metapage_mempool
= mempool_create_slab_pool(METAPOOL_MIN_PAGES
,
223 if (metapage_mempool
== NULL
) {
224 kmem_cache_destroy(metapage_cache
);
231 void metapage_exit(void)
233 mempool_destroy(metapage_mempool
);
234 kmem_cache_destroy(metapage_cache
);
237 static inline void drop_metapage(struct page
*page
, struct metapage
*mp
)
239 if (mp
->count
|| mp
->nohomeok
|| test_bit(META_dirty
, &mp
->flag
) ||
240 test_bit(META_io
, &mp
->flag
))
242 remove_metapage(page
, mp
);
243 INCREMENT(mpStat
.pagefree
);
248 * Metapage address space operations
251 static sector_t
metapage_get_blocks(struct inode
*inode
, sector_t lblock
,
257 sector_t file_blocks
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
260 if (lblock
>= file_blocks
)
262 if (lblock
+ *len
> file_blocks
)
263 *len
= file_blocks
- lblock
;
266 rc
= xtLookup(inode
, (s64
)lblock
, *len
, &xflag
, &xaddr
, len
, 0);
267 if ((rc
== 0) && *len
)
268 lblock
= (sector_t
)xaddr
;
271 } /* else no mapping */
276 static void last_read_complete(struct page
*page
)
278 if (!PageError(page
))
279 SetPageUptodate(page
);
283 static void metapage_read_end_io(struct bio
*bio
, int err
)
285 struct page
*page
= bio
->bi_private
;
287 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
288 printk(KERN_ERR
"metapage_read_end_io: I/O error\n");
292 dec_io(page
, last_read_complete
);
296 static void remove_from_logsync(struct metapage
*mp
)
298 struct jfs_log
*log
= mp
->log
;
301 * This can race. Recheck that log hasn't been set to null, and after
302 * acquiring logsync lock, recheck lsn
307 LOGSYNC_LOCK(log
, flags
);
313 list_del(&mp
->synclist
);
315 LOGSYNC_UNLOCK(log
, flags
);
318 static void last_write_complete(struct page
*page
)
323 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
324 mp
= page_to_mp(page
, offset
);
325 if (mp
&& test_bit(META_io
, &mp
->flag
)) {
327 remove_from_logsync(mp
);
328 clear_bit(META_io
, &mp
->flag
);
331 * I'd like to call drop_metapage here, but I don't think it's
332 * safe unless I have the page locked
335 end_page_writeback(page
);
338 static void metapage_write_end_io(struct bio
*bio
, int err
)
340 struct page
*page
= bio
->bi_private
;
342 BUG_ON(!PagePrivate(page
));
344 if (! test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
345 printk(KERN_ERR
"metapage_write_end_io: I/O error\n");
348 dec_io(page
, last_write_complete
);
352 static int metapage_writepage(struct page
*page
, struct writeback_control
*wbc
)
354 struct bio
*bio
= NULL
;
355 unsigned int block_offset
; /* block offset of mp within page */
356 struct inode
*inode
= page
->mapping
->host
;
357 unsigned int blocks_per_mp
= JFS_SBI(inode
->i_sb
)->nbperpage
;
364 sector_t next_block
= 0;
366 unsigned long bio_bytes
= 0;
367 unsigned long bio_offset
= 0;
370 page_start
= (sector_t
)page
->index
<<
371 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
372 BUG_ON(!PageLocked(page
));
373 BUG_ON(PageWriteback(page
));
375 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
376 mp
= page_to_mp(page
, offset
);
378 if (!mp
|| !test_bit(META_dirty
, &mp
->flag
))
381 if (mp
->nohomeok
&& !test_bit(META_forcewrite
, &mp
->flag
)) {
384 * Make sure this page isn't blocked indefinitely.
385 * If the journal isn't undergoing I/O, push it
387 if (mp
->log
&& !(mp
->log
->cflag
& logGC_PAGEOUT
))
388 jfs_flush_journal(mp
->log
, 0);
392 clear_bit(META_dirty
, &mp
->flag
);
393 block_offset
= offset
>> inode
->i_blkbits
;
394 lblock
= page_start
+ block_offset
;
396 if (xlen
&& lblock
== next_block
) {
397 /* Contiguous, in memory & on disk */
398 len
= min(xlen
, blocks_per_mp
);
400 bio_bytes
+= len
<< inode
->i_blkbits
;
401 set_bit(META_io
, &mp
->flag
);
405 if (bio_add_page(bio
, page
, bio_bytes
, bio_offset
) <
409 * Increment counter before submitting i/o to keep
410 * count from hitting zero before we're through
415 submit_bio(WRITE
, bio
);
418 set_page_writeback(page
);
421 xlen
= (PAGE_CACHE_SIZE
- offset
) >> inode
->i_blkbits
;
422 pblock
= metapage_get_blocks(inode
, lblock
, &xlen
);
424 /* Need better error handling */
425 printk(KERN_ERR
"JFS: metapage_get_blocks failed\n");
426 dec_io(page
, last_write_complete
);
429 set_bit(META_io
, &mp
->flag
);
430 len
= min(xlen
, (uint
) JFS_SBI(inode
->i_sb
)->nbperpage
);
432 bio
= bio_alloc(GFP_NOFS
, 1);
433 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
434 bio
->bi_sector
= pblock
<< (inode
->i_blkbits
- 9);
435 bio
->bi_end_io
= metapage_write_end_io
;
436 bio
->bi_private
= page
;
438 /* Don't call bio_add_page yet, we may add to this vec */
440 bio_bytes
= len
<< inode
->i_blkbits
;
443 next_block
= lblock
+ len
;
446 if (bio_add_page(bio
, page
, bio_bytes
, bio_offset
) < bio_bytes
)
451 submit_bio(WRITE
, bio
);
454 redirty_page_for_writepage(wbc
, page
);
460 /* We should never reach here, since we're only adding one vec */
461 printk(KERN_ERR
"JFS: bio_add_page failed unexpectedly\n");
464 print_hex_dump(KERN_ERR
, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS
, 16,
465 4, bio
, sizeof(*bio
), 0);
469 dec_io(page
, last_write_complete
);
474 static int metapage_readpage(struct file
*fp
, struct page
*page
)
476 struct inode
*inode
= page
->mapping
->host
;
477 struct bio
*bio
= NULL
;
478 unsigned int block_offset
;
479 unsigned int blocks_per_page
= PAGE_CACHE_SIZE
>> inode
->i_blkbits
;
480 sector_t page_start
; /* address of page in fs blocks */
486 BUG_ON(!PageLocked(page
));
487 page_start
= (sector_t
)page
->index
<<
488 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
491 while (block_offset
< blocks_per_page
) {
492 xlen
= blocks_per_page
- block_offset
;
493 pblock
= metapage_get_blocks(inode
, page_start
+ block_offset
,
496 if (!PagePrivate(page
))
497 insert_metapage(page
, NULL
);
500 submit_bio(READ
, bio
);
502 bio
= bio_alloc(GFP_NOFS
, 1);
503 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
504 bio
->bi_sector
= pblock
<< (inode
->i_blkbits
- 9);
505 bio
->bi_end_io
= metapage_read_end_io
;
506 bio
->bi_private
= page
;
507 len
= xlen
<< inode
->i_blkbits
;
508 offset
= block_offset
<< inode
->i_blkbits
;
509 if (bio_add_page(bio
, page
, len
, offset
) < len
)
511 block_offset
+= xlen
;
516 submit_bio(READ
, bio
);
523 printk(KERN_ERR
"JFS: bio_add_page failed unexpectedly\n");
525 dec_io(page
, last_read_complete
);
529 static int metapage_releasepage(struct page
*page
, gfp_t gfp_mask
)
535 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
536 mp
= page_to_mp(page
, offset
);
541 jfs_info("metapage_releasepage: mp = 0x%p", mp
);
542 if (mp
->count
|| mp
->nohomeok
||
543 test_bit(META_dirty
, &mp
->flag
)) {
544 jfs_info("count = %ld, nohomeok = %d", mp
->count
,
550 remove_from_logsync(mp
);
551 remove_metapage(page
, mp
);
552 INCREMENT(mpStat
.pagefree
);
558 static void metapage_invalidatepage(struct page
*page
, unsigned long offset
)
562 BUG_ON(PageWriteback(page
));
564 metapage_releasepage(page
, 0);
567 const struct address_space_operations jfs_metapage_aops
= {
568 .readpage
= metapage_readpage
,
569 .writepage
= metapage_writepage
,
570 .sync_page
= block_sync_page
,
571 .releasepage
= metapage_releasepage
,
572 .invalidatepage
= metapage_invalidatepage
,
573 .set_page_dirty
= __set_page_dirty_nobuffers
,
576 struct metapage
*__get_metapage(struct inode
*inode
, unsigned long lblock
,
577 unsigned int size
, int absolute
,
582 struct address_space
*mapping
;
583 struct metapage
*mp
= NULL
;
585 unsigned long page_index
;
586 unsigned long page_offset
;
588 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
589 inode
->i_ino
, lblock
, absolute
);
591 l2bsize
= inode
->i_blkbits
;
592 l2BlocksPerPage
= PAGE_CACHE_SHIFT
- l2bsize
;
593 page_index
= lblock
>> l2BlocksPerPage
;
594 page_offset
= (lblock
- (page_index
<< l2BlocksPerPage
)) << l2bsize
;
595 if ((page_offset
+ size
) > PAGE_CACHE_SIZE
) {
596 jfs_err("MetaData crosses page boundary!!");
597 jfs_err("lblock = %lx, size = %d", lblock
, size
);
602 mapping
= JFS_SBI(inode
->i_sb
)->direct_inode
->i_mapping
;
605 * If an nfs client tries to read an inode that is larger
606 * than any existing inodes, we may try to read past the
607 * end of the inode map
609 if ((lblock
<< inode
->i_blkbits
) >= inode
->i_size
)
611 mapping
= inode
->i_mapping
;
614 if (new && (PSIZE
== PAGE_CACHE_SIZE
)) {
615 page
= grab_cache_page(mapping
, page_index
);
617 jfs_err("grab_cache_page failed!");
620 SetPageUptodate(page
);
622 page
= read_mapping_page(mapping
, page_index
, NULL
);
623 if (IS_ERR(page
) || !PageUptodate(page
)) {
624 jfs_err("read_mapping_page failed!");
630 mp
= page_to_mp(page
, page_offset
);
632 if (mp
->logical_size
!= size
) {
633 jfs_error(inode
->i_sb
,
634 "__get_metapage: mp->logical_size != size");
635 jfs_err("logical_size = %d, size = %d",
636 mp
->logical_size
, size
);
642 if (test_bit(META_discard
, &mp
->flag
)) {
644 jfs_error(inode
->i_sb
,
645 "__get_metapage: using a "
646 "discarded metapage");
647 discard_metapage(mp
);
650 clear_bit(META_discard
, &mp
->flag
);
653 INCREMENT(mpStat
.pagealloc
);
654 mp
= alloc_metapage(GFP_NOFS
);
657 mp
->xflag
= COMMIT_PAGE
;
660 mp
->logical_size
= size
;
661 mp
->data
= page_address(page
) + page_offset
;
663 if (unlikely(insert_metapage(page
, mp
))) {
671 jfs_info("zeroing mp = 0x%p", mp
);
672 memset(mp
->data
, 0, PSIZE
);
676 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp
, mp
->data
);
684 void grab_metapage(struct metapage
* mp
)
686 jfs_info("grab_metapage: mp = 0x%p", mp
);
687 page_cache_get(mp
->page
);
691 unlock_page(mp
->page
);
694 void force_metapage(struct metapage
*mp
)
696 struct page
*page
= mp
->page
;
697 jfs_info("force_metapage: mp = 0x%p", mp
);
698 set_bit(META_forcewrite
, &mp
->flag
);
699 clear_bit(META_sync
, &mp
->flag
);
700 page_cache_get(page
);
702 set_page_dirty(page
);
703 write_one_page(page
, 1);
704 clear_bit(META_forcewrite
, &mp
->flag
);
705 page_cache_release(page
);
708 void hold_metapage(struct metapage
*mp
)
713 void put_metapage(struct metapage
*mp
)
715 if (mp
->count
|| mp
->nohomeok
) {
716 /* Someone else will release this */
717 unlock_page(mp
->page
);
720 page_cache_get(mp
->page
);
723 unlock_page(mp
->page
);
724 release_metapage(mp
);
727 void release_metapage(struct metapage
* mp
)
729 struct page
*page
= mp
->page
;
730 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp
, mp
->flag
);
738 if (--mp
->count
|| mp
->nohomeok
) {
740 page_cache_release(page
);
744 if (test_bit(META_dirty
, &mp
->flag
)) {
745 set_page_dirty(page
);
746 if (test_bit(META_sync
, &mp
->flag
)) {
747 clear_bit(META_sync
, &mp
->flag
);
748 write_one_page(page
, 1);
749 lock_page(page
); /* write_one_page unlocks the page */
751 } else if (mp
->lsn
) /* discard_metapage doesn't remove it */
752 remove_from_logsync(mp
);
754 /* Try to keep metapages from using up too much memory */
755 drop_metapage(page
, mp
);
758 page_cache_release(page
);
761 void __invalidate_metapages(struct inode
*ip
, s64 addr
, int len
)
764 int l2BlocksPerPage
= PAGE_CACHE_SHIFT
- ip
->i_blkbits
;
765 int BlocksPerPage
= 1 << l2BlocksPerPage
;
766 /* All callers are interested in block device's mapping */
767 struct address_space
*mapping
=
768 JFS_SBI(ip
->i_sb
)->direct_inode
->i_mapping
;
774 * Mark metapages to discard. They will eventually be
775 * released, but should not be written.
777 for (lblock
= addr
& ~(BlocksPerPage
- 1); lblock
< addr
+ len
;
778 lblock
+= BlocksPerPage
) {
779 page
= find_lock_page(mapping
, lblock
>> l2BlocksPerPage
);
782 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
783 mp
= page_to_mp(page
, offset
);
786 if (mp
->index
< addr
)
788 if (mp
->index
>= addr
+ len
)
791 clear_bit(META_dirty
, &mp
->flag
);
792 set_bit(META_discard
, &mp
->flag
);
794 remove_from_logsync(mp
);
797 page_cache_release(page
);
801 #ifdef CONFIG_JFS_STATISTICS
802 int jfs_mpstat_read(char *buffer
, char **start
, off_t offset
, int length
,
803 int *eof
, void *data
)
808 len
+= sprintf(buffer
,
809 "JFS Metapage statistics\n"
810 "=======================\n"
811 "page allocations = %d\n"
819 *start
= buffer
+ begin
;