2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static int sync_request(struct page
*page
, struct block_device
*bdev
, int rw
)
20 struct bio_vec bio_vec
;
24 bio
.bi_io_vec
= &bio_vec
;
25 bio_vec
.bv_page
= page
;
26 bio_vec
.bv_len
= PAGE_SIZE
;
27 bio_vec
.bv_offset
= 0;
30 bio
.bi_iter
.bi_sector
= page
->index
* (PAGE_SIZE
>> 9);
31 bio
.bi_iter
.bi_size
= PAGE_SIZE
;
33 return submit_bio_wait(rw
, &bio
);
36 static int bdev_readpage(void *_sb
, struct page
*page
)
38 struct super_block
*sb
= _sb
;
39 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
42 err
= sync_request(page
, bdev
, READ
);
44 ClearPageUptodate(page
);
47 SetPageUptodate(page
);
54 static DECLARE_WAIT_QUEUE_HEAD(wq
);
56 static void writeseg_end_io(struct bio
*bio
)
60 struct super_block
*sb
= bio
->bi_private
;
61 struct logfs_super
*super
= logfs_super(sb
);
63 BUG_ON(bio
->bi_error
); /* FIXME: Retry io or write elsewhere */
65 bio_for_each_segment_all(bvec
, bio
, i
) {
66 end_page_writeback(bvec
->bv_page
);
67 page_cache_release(bvec
->bv_page
);
70 if (atomic_dec_and_test(&super
->s_pending_writes
))
74 static int __bdev_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
77 struct logfs_super
*super
= logfs_super(sb
);
78 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
81 unsigned int max_pages
;
84 max_pages
= min_t(size_t, nr_pages
, BIO_MAX_PAGES
);
86 bio
= bio_alloc(GFP_NOFS
, max_pages
);
89 for (i
= 0; i
< nr_pages
; i
++) {
91 /* Block layer cannot split bios :( */
93 bio
->bi_iter
.bi_size
= i
* PAGE_SIZE
;
94 bio
->bi_bdev
= super
->s_bdev
;
95 bio
->bi_iter
.bi_sector
= ofs
>> 9;
97 bio
->bi_end_io
= writeseg_end_io
;
98 atomic_inc(&super
->s_pending_writes
);
99 submit_bio(WRITE
, bio
);
101 ofs
+= i
* PAGE_SIZE
;
106 bio
= bio_alloc(GFP_NOFS
, max_pages
);
109 page
= find_lock_page(mapping
, index
+ i
);
111 bio
->bi_io_vec
[i
].bv_page
= page
;
112 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
113 bio
->bi_io_vec
[i
].bv_offset
= 0;
115 BUG_ON(PageWriteback(page
));
116 set_page_writeback(page
);
119 bio
->bi_vcnt
= nr_pages
;
120 bio
->bi_iter
.bi_size
= nr_pages
* PAGE_SIZE
;
121 bio
->bi_bdev
= super
->s_bdev
;
122 bio
->bi_iter
.bi_sector
= ofs
>> 9;
123 bio
->bi_private
= sb
;
124 bio
->bi_end_io
= writeseg_end_io
;
125 atomic_inc(&super
->s_pending_writes
);
126 submit_bio(WRITE
, bio
);
130 static void bdev_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
132 struct logfs_super
*super
= logfs_super(sb
);
135 BUG_ON(super
->s_flags
& LOGFS_SB_FLAG_RO
);
138 /* This can happen when the object fit perfectly into a
139 * segment, the segment gets written per sync and subsequently
144 head
= ofs
& (PAGE_SIZE
- 1);
149 len
= PAGE_ALIGN(len
);
150 __bdev_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
154 static void erase_end_io(struct bio
*bio
)
156 struct super_block
*sb
= bio
->bi_private
;
157 struct logfs_super
*super
= logfs_super(sb
);
159 BUG_ON(bio
->bi_error
); /* FIXME: Retry io or write elsewhere */
160 BUG_ON(bio
->bi_vcnt
== 0);
162 if (atomic_dec_and_test(&super
->s_pending_writes
))
166 static int do_erase(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
169 struct logfs_super
*super
= logfs_super(sb
);
171 unsigned int max_pages
;
174 max_pages
= min_t(size_t, nr_pages
, BIO_MAX_PAGES
);
176 bio
= bio_alloc(GFP_NOFS
, max_pages
);
179 for (i
= 0; i
< nr_pages
; i
++) {
180 if (i
>= max_pages
) {
181 /* Block layer cannot split bios :( */
183 bio
->bi_iter
.bi_size
= i
* PAGE_SIZE
;
184 bio
->bi_bdev
= super
->s_bdev
;
185 bio
->bi_iter
.bi_sector
= ofs
>> 9;
186 bio
->bi_private
= sb
;
187 bio
->bi_end_io
= erase_end_io
;
188 atomic_inc(&super
->s_pending_writes
);
189 submit_bio(WRITE
, bio
);
191 ofs
+= i
* PAGE_SIZE
;
196 bio
= bio_alloc(GFP_NOFS
, max_pages
);
199 bio
->bi_io_vec
[i
].bv_page
= super
->s_erase_page
;
200 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
201 bio
->bi_io_vec
[i
].bv_offset
= 0;
203 bio
->bi_vcnt
= nr_pages
;
204 bio
->bi_iter
.bi_size
= nr_pages
* PAGE_SIZE
;
205 bio
->bi_bdev
= super
->s_bdev
;
206 bio
->bi_iter
.bi_sector
= ofs
>> 9;
207 bio
->bi_private
= sb
;
208 bio
->bi_end_io
= erase_end_io
;
209 atomic_inc(&super
->s_pending_writes
);
210 submit_bio(WRITE
, bio
);
214 static int bdev_erase(struct super_block
*sb
, loff_t to
, size_t len
,
217 struct logfs_super
*super
= logfs_super(sb
);
219 BUG_ON(to
& (PAGE_SIZE
- 1));
220 BUG_ON(len
& (PAGE_SIZE
- 1));
222 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
227 * Object store doesn't care whether erases happen or not.
228 * But for the journal they are required. Otherwise a scan
229 * can find an old commit entry and assume it is the current
230 * one, travelling back in time.
232 do_erase(sb
, to
, to
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
238 static void bdev_sync(struct super_block
*sb
)
240 struct logfs_super
*super
= logfs_super(sb
);
242 wait_event(wq
, atomic_read(&super
->s_pending_writes
) == 0);
245 static struct page
*bdev_find_first_sb(struct super_block
*sb
, u64
*ofs
)
247 struct logfs_super
*super
= logfs_super(sb
);
248 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
249 filler_t
*filler
= bdev_readpage
;
252 return read_cache_page(mapping
, 0, filler
, sb
);
255 static struct page
*bdev_find_last_sb(struct super_block
*sb
, u64
*ofs
)
257 struct logfs_super
*super
= logfs_super(sb
);
258 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
259 filler_t
*filler
= bdev_readpage
;
260 u64 pos
= (super
->s_bdev
->bd_inode
->i_size
& ~0xfffULL
) - 0x1000;
261 pgoff_t index
= pos
>> PAGE_SHIFT
;
264 return read_cache_page(mapping
, index
, filler
, sb
);
267 static int bdev_write_sb(struct super_block
*sb
, struct page
*page
)
269 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
271 /* Nothing special to do for block devices. */
272 return sync_request(page
, bdev
, WRITE
);
275 static void bdev_put_device(struct logfs_super
*s
)
277 blkdev_put(s
->s_bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
280 static int bdev_can_write_buf(struct super_block
*sb
, u64 ofs
)
285 static const struct logfs_device_ops bd_devops
= {
286 .find_first_sb
= bdev_find_first_sb
,
287 .find_last_sb
= bdev_find_last_sb
,
288 .write_sb
= bdev_write_sb
,
289 .readpage
= bdev_readpage
,
290 .writeseg
= bdev_writeseg
,
292 .can_write_buf
= bdev_can_write_buf
,
294 .put_device
= bdev_put_device
,
297 int logfs_get_sb_bdev(struct logfs_super
*p
, struct file_system_type
*type
,
300 struct block_device
*bdev
;
302 bdev
= blkdev_get_by_path(devname
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
305 return PTR_ERR(bdev
);
307 if (MAJOR(bdev
->bd_dev
) == MTD_BLOCK_MAJOR
) {
308 int mtdnr
= MINOR(bdev
->bd_dev
);
309 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
310 return logfs_get_sb_mtd(p
, mtdnr
);
315 p
->s_devops
= &bd_devops
;