2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 static void request_complete(struct bio
*bio
, int err
)
18 complete((struct completion
*)bio
->bi_private
);
21 static int sync_request(struct page
*page
, struct block_device
*bdev
, int rw
)
24 struct bio_vec bio_vec
;
25 struct completion complete
;
28 bio
.bi_io_vec
= &bio_vec
;
29 bio_vec
.bv_page
= page
;
30 bio_vec
.bv_len
= PAGE_SIZE
;
31 bio_vec
.bv_offset
= 0;
34 bio
.bi_size
= PAGE_SIZE
;
36 bio
.bi_sector
= page
->index
* (PAGE_SIZE
>> 9);
37 init_completion(&complete
);
38 bio
.bi_private
= &complete
;
39 bio
.bi_end_io
= request_complete
;
42 generic_unplug_device(bdev_get_queue(bdev
));
43 wait_for_completion(&complete
);
44 return test_bit(BIO_UPTODATE
, &bio
.bi_flags
) ? 0 : -EIO
;
47 static int bdev_readpage(void *_sb
, struct page
*page
)
49 struct super_block
*sb
= _sb
;
50 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
53 err
= sync_request(page
, bdev
, READ
);
55 ClearPageUptodate(page
);
58 SetPageUptodate(page
);
65 static DECLARE_WAIT_QUEUE_HEAD(wq
);
67 static void writeseg_end_io(struct bio
*bio
, int err
)
69 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
70 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
71 struct super_block
*sb
= bio
->bi_private
;
72 struct logfs_super
*super
= logfs_super(sb
);
75 BUG_ON(!uptodate
); /* FIXME: Retry io or write elsewhere */
77 BUG_ON(bio
->bi_vcnt
== 0);
80 if (--bvec
>= bio
->bi_io_vec
)
81 prefetchw(&bvec
->bv_page
->flags
);
83 end_page_writeback(page
);
84 page_cache_release(page
);
85 } while (bvec
>= bio
->bi_io_vec
);
87 if (atomic_dec_and_test(&super
->s_pending_writes
))
91 static int __bdev_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
94 struct logfs_super
*super
= logfs_super(sb
);
95 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
98 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
99 unsigned int max_pages
= queue_max_hw_sectors(q
) >> (PAGE_SHIFT
- 9);
102 if (max_pages
> BIO_MAX_PAGES
)
103 max_pages
= BIO_MAX_PAGES
;
104 bio
= bio_alloc(GFP_NOFS
, max_pages
);
107 for (i
= 0; i
< nr_pages
; i
++) {
108 if (i
>= max_pages
) {
109 /* Block layer cannot split bios :( */
112 bio
->bi_size
= i
* PAGE_SIZE
;
113 bio
->bi_bdev
= super
->s_bdev
;
114 bio
->bi_sector
= ofs
>> 9;
115 bio
->bi_private
= sb
;
116 bio
->bi_end_io
= writeseg_end_io
;
117 atomic_inc(&super
->s_pending_writes
);
118 submit_bio(WRITE
, bio
);
120 ofs
+= i
* PAGE_SIZE
;
125 bio
= bio_alloc(GFP_NOFS
, max_pages
);
128 page
= find_lock_page(mapping
, index
+ i
);
130 bio
->bi_io_vec
[i
].bv_page
= page
;
131 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
132 bio
->bi_io_vec
[i
].bv_offset
= 0;
134 BUG_ON(PageWriteback(page
));
135 set_page_writeback(page
);
138 bio
->bi_vcnt
= nr_pages
;
140 bio
->bi_size
= nr_pages
* PAGE_SIZE
;
141 bio
->bi_bdev
= super
->s_bdev
;
142 bio
->bi_sector
= ofs
>> 9;
143 bio
->bi_private
= sb
;
144 bio
->bi_end_io
= writeseg_end_io
;
145 atomic_inc(&super
->s_pending_writes
);
146 submit_bio(WRITE
, bio
);
150 static void bdev_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
152 struct logfs_super
*super
= logfs_super(sb
);
155 BUG_ON(super
->s_flags
& LOGFS_SB_FLAG_RO
);
158 /* This can happen when the object fit perfectly into a
159 * segment, the segment gets written per sync and subsequently
164 head
= ofs
& (PAGE_SIZE
- 1);
169 len
= PAGE_ALIGN(len
);
170 __bdev_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
171 generic_unplug_device(bdev_get_queue(logfs_super(sb
)->s_bdev
));
175 static void erase_end_io(struct bio
*bio
, int err
)
177 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
178 struct super_block
*sb
= bio
->bi_private
;
179 struct logfs_super
*super
= logfs_super(sb
);
181 BUG_ON(!uptodate
); /* FIXME: Retry io or write elsewhere */
183 BUG_ON(bio
->bi_vcnt
== 0);
185 if (atomic_dec_and_test(&super
->s_pending_writes
))
189 static int do_erase(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
192 struct logfs_super
*super
= logfs_super(sb
);
194 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
195 unsigned int max_pages
= queue_max_hw_sectors(q
) >> (PAGE_SHIFT
- 9);
198 if (max_pages
> BIO_MAX_PAGES
)
199 max_pages
= BIO_MAX_PAGES
;
200 bio
= bio_alloc(GFP_NOFS
, max_pages
);
203 for (i
= 0; i
< nr_pages
; i
++) {
204 if (i
>= max_pages
) {
205 /* Block layer cannot split bios :( */
208 bio
->bi_size
= i
* PAGE_SIZE
;
209 bio
->bi_bdev
= super
->s_bdev
;
210 bio
->bi_sector
= ofs
>> 9;
211 bio
->bi_private
= sb
;
212 bio
->bi_end_io
= erase_end_io
;
213 atomic_inc(&super
->s_pending_writes
);
214 submit_bio(WRITE
, bio
);
216 ofs
+= i
* PAGE_SIZE
;
221 bio
= bio_alloc(GFP_NOFS
, max_pages
);
224 bio
->bi_io_vec
[i
].bv_page
= super
->s_erase_page
;
225 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
226 bio
->bi_io_vec
[i
].bv_offset
= 0;
228 bio
->bi_vcnt
= nr_pages
;
230 bio
->bi_size
= nr_pages
* PAGE_SIZE
;
231 bio
->bi_bdev
= super
->s_bdev
;
232 bio
->bi_sector
= ofs
>> 9;
233 bio
->bi_private
= sb
;
234 bio
->bi_end_io
= erase_end_io
;
235 atomic_inc(&super
->s_pending_writes
);
236 submit_bio(WRITE
, bio
);
240 static int bdev_erase(struct super_block
*sb
, loff_t to
, size_t len
,
243 struct logfs_super
*super
= logfs_super(sb
);
245 BUG_ON(to
& (PAGE_SIZE
- 1));
246 BUG_ON(len
& (PAGE_SIZE
- 1));
248 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
253 * Object store doesn't care whether erases happen or not.
254 * But for the journal they are required. Otherwise a scan
255 * can find an old commit entry and assume it is the current
256 * one, travelling back in time.
258 do_erase(sb
, to
, to
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
264 static void bdev_sync(struct super_block
*sb
)
266 struct logfs_super
*super
= logfs_super(sb
);
268 wait_event(wq
, atomic_read(&super
->s_pending_writes
) == 0);
271 static struct page
*bdev_find_first_sb(struct super_block
*sb
, u64
*ofs
)
273 struct logfs_super
*super
= logfs_super(sb
);
274 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
275 filler_t
*filler
= bdev_readpage
;
278 return read_cache_page(mapping
, 0, filler
, sb
);
281 static struct page
*bdev_find_last_sb(struct super_block
*sb
, u64
*ofs
)
283 struct logfs_super
*super
= logfs_super(sb
);
284 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
285 filler_t
*filler
= bdev_readpage
;
286 u64 pos
= (super
->s_bdev
->bd_inode
->i_size
& ~0xfffULL
) - 0x1000;
287 pgoff_t index
= pos
>> PAGE_SHIFT
;
290 return read_cache_page(mapping
, index
, filler
, sb
);
293 static int bdev_write_sb(struct super_block
*sb
, struct page
*page
)
295 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
297 /* Nothing special to do for block devices. */
298 return sync_request(page
, bdev
, WRITE
);
301 static void bdev_put_device(struct super_block
*sb
)
303 close_bdev_exclusive(logfs_super(sb
)->s_bdev
, FMODE_READ
|FMODE_WRITE
);
306 static int bdev_can_write_buf(struct super_block
*sb
, u64 ofs
)
311 static const struct logfs_device_ops bd_devops
= {
312 .find_first_sb
= bdev_find_first_sb
,
313 .find_last_sb
= bdev_find_last_sb
,
314 .write_sb
= bdev_write_sb
,
315 .readpage
= bdev_readpage
,
316 .writeseg
= bdev_writeseg
,
318 .can_write_buf
= bdev_can_write_buf
,
320 .put_device
= bdev_put_device
,
323 int logfs_get_sb_bdev(struct file_system_type
*type
, int flags
,
324 const char *devname
, struct vfsmount
*mnt
)
326 struct block_device
*bdev
;
328 bdev
= open_bdev_exclusive(devname
, FMODE_READ
|FMODE_WRITE
, type
);
330 return PTR_ERR(bdev
);
332 if (MAJOR(bdev
->bd_dev
) == MTD_BLOCK_MAJOR
) {
333 int mtdnr
= MINOR(bdev
->bd_dev
);
334 close_bdev_exclusive(bdev
, FMODE_READ
|FMODE_WRITE
);
335 return logfs_get_sb_mtd(type
, flags
, mtdnr
, mnt
);
338 return logfs_get_sb_device(type
, flags
, NULL
, bdev
, &bd_devops
, mnt
);