2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static int sync_request(struct page
*page
, struct block_device
*bdev
, int op
)
20 struct bio_vec bio_vec
;
24 bio
.bi_io_vec
= &bio_vec
;
25 bio_vec
.bv_page
= page
;
26 bio_vec
.bv_len
= PAGE_SIZE
;
27 bio_vec
.bv_offset
= 0;
30 bio
.bi_iter
.bi_sector
= page
->index
* (PAGE_SIZE
>> 9);
31 bio
.bi_iter
.bi_size
= PAGE_SIZE
;
32 bio_set_op_attrs(&bio
, op
, 0);
34 return submit_bio_wait(&bio
);
37 static int bdev_readpage(void *_sb
, struct page
*page
)
39 struct super_block
*sb
= _sb
;
40 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
43 err
= sync_request(page
, bdev
, READ
);
45 ClearPageUptodate(page
);
48 SetPageUptodate(page
);
55 static DECLARE_WAIT_QUEUE_HEAD(wq
);
57 static void writeseg_end_io(struct bio
*bio
)
61 struct super_block
*sb
= bio
->bi_private
;
62 struct logfs_super
*super
= logfs_super(sb
);
64 BUG_ON(bio
->bi_error
); /* FIXME: Retry io or write elsewhere */
66 bio_for_each_segment_all(bvec
, bio
, i
) {
67 end_page_writeback(bvec
->bv_page
);
68 put_page(bvec
->bv_page
);
71 if (atomic_dec_and_test(&super
->s_pending_writes
))
75 static int __bdev_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
78 struct logfs_super
*super
= logfs_super(sb
);
79 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
82 unsigned int max_pages
;
85 max_pages
= min_t(size_t, nr_pages
, BIO_MAX_PAGES
);
87 bio
= bio_alloc(GFP_NOFS
, max_pages
);
90 for (i
= 0; i
< nr_pages
; i
++) {
92 /* Block layer cannot split bios :( */
94 bio
->bi_iter
.bi_size
= i
* PAGE_SIZE
;
95 bio
->bi_bdev
= super
->s_bdev
;
96 bio
->bi_iter
.bi_sector
= ofs
>> 9;
98 bio
->bi_end_io
= writeseg_end_io
;
99 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
100 atomic_inc(&super
->s_pending_writes
);
103 ofs
+= i
* PAGE_SIZE
;
108 bio
= bio_alloc(GFP_NOFS
, max_pages
);
111 page
= find_lock_page(mapping
, index
+ i
);
113 bio
->bi_io_vec
[i
].bv_page
= page
;
114 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
115 bio
->bi_io_vec
[i
].bv_offset
= 0;
117 BUG_ON(PageWriteback(page
));
118 set_page_writeback(page
);
121 bio
->bi_vcnt
= nr_pages
;
122 bio
->bi_iter
.bi_size
= nr_pages
* PAGE_SIZE
;
123 bio
->bi_bdev
= super
->s_bdev
;
124 bio
->bi_iter
.bi_sector
= ofs
>> 9;
125 bio
->bi_private
= sb
;
126 bio
->bi_end_io
= writeseg_end_io
;
127 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
128 atomic_inc(&super
->s_pending_writes
);
133 static void bdev_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
135 struct logfs_super
*super
= logfs_super(sb
);
138 BUG_ON(super
->s_flags
& LOGFS_SB_FLAG_RO
);
141 /* This can happen when the object fit perfectly into a
142 * segment, the segment gets written per sync and subsequently
147 head
= ofs
& (PAGE_SIZE
- 1);
152 len
= PAGE_ALIGN(len
);
153 __bdev_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
157 static void erase_end_io(struct bio
*bio
)
159 struct super_block
*sb
= bio
->bi_private
;
160 struct logfs_super
*super
= logfs_super(sb
);
162 BUG_ON(bio
->bi_error
); /* FIXME: Retry io or write elsewhere */
163 BUG_ON(bio
->bi_vcnt
== 0);
165 if (atomic_dec_and_test(&super
->s_pending_writes
))
169 static int do_erase(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
172 struct logfs_super
*super
= logfs_super(sb
);
174 unsigned int max_pages
;
177 max_pages
= min_t(size_t, nr_pages
, BIO_MAX_PAGES
);
179 bio
= bio_alloc(GFP_NOFS
, max_pages
);
182 for (i
= 0; i
< nr_pages
; i
++) {
183 if (i
>= max_pages
) {
184 /* Block layer cannot split bios :( */
186 bio
->bi_iter
.bi_size
= i
* PAGE_SIZE
;
187 bio
->bi_bdev
= super
->s_bdev
;
188 bio
->bi_iter
.bi_sector
= ofs
>> 9;
189 bio
->bi_private
= sb
;
190 bio
->bi_end_io
= erase_end_io
;
191 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
192 atomic_inc(&super
->s_pending_writes
);
195 ofs
+= i
* PAGE_SIZE
;
200 bio
= bio_alloc(GFP_NOFS
, max_pages
);
203 bio
->bi_io_vec
[i
].bv_page
= super
->s_erase_page
;
204 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
205 bio
->bi_io_vec
[i
].bv_offset
= 0;
207 bio
->bi_vcnt
= nr_pages
;
208 bio
->bi_iter
.bi_size
= nr_pages
* PAGE_SIZE
;
209 bio
->bi_bdev
= super
->s_bdev
;
210 bio
->bi_iter
.bi_sector
= ofs
>> 9;
211 bio
->bi_private
= sb
;
212 bio
->bi_end_io
= erase_end_io
;
213 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
214 atomic_inc(&super
->s_pending_writes
);
219 static int bdev_erase(struct super_block
*sb
, loff_t to
, size_t len
,
222 struct logfs_super
*super
= logfs_super(sb
);
224 BUG_ON(to
& (PAGE_SIZE
- 1));
225 BUG_ON(len
& (PAGE_SIZE
- 1));
227 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
232 * Object store doesn't care whether erases happen or not.
233 * But for the journal they are required. Otherwise a scan
234 * can find an old commit entry and assume it is the current
235 * one, travelling back in time.
237 do_erase(sb
, to
, to
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
243 static void bdev_sync(struct super_block
*sb
)
245 struct logfs_super
*super
= logfs_super(sb
);
247 wait_event(wq
, atomic_read(&super
->s_pending_writes
) == 0);
250 static struct page
*bdev_find_first_sb(struct super_block
*sb
, u64
*ofs
)
252 struct logfs_super
*super
= logfs_super(sb
);
253 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
254 filler_t
*filler
= bdev_readpage
;
257 return read_cache_page(mapping
, 0, filler
, sb
);
260 static struct page
*bdev_find_last_sb(struct super_block
*sb
, u64
*ofs
)
262 struct logfs_super
*super
= logfs_super(sb
);
263 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
264 filler_t
*filler
= bdev_readpage
;
265 u64 pos
= (super
->s_bdev
->bd_inode
->i_size
& ~0xfffULL
) - 0x1000;
266 pgoff_t index
= pos
>> PAGE_SHIFT
;
269 return read_cache_page(mapping
, index
, filler
, sb
);
272 static int bdev_write_sb(struct super_block
*sb
, struct page
*page
)
274 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
276 /* Nothing special to do for block devices. */
277 return sync_request(page
, bdev
, WRITE
);
280 static void bdev_put_device(struct logfs_super
*s
)
282 blkdev_put(s
->s_bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
285 static int bdev_can_write_buf(struct super_block
*sb
, u64 ofs
)
290 static const struct logfs_device_ops bd_devops
= {
291 .find_first_sb
= bdev_find_first_sb
,
292 .find_last_sb
= bdev_find_last_sb
,
293 .write_sb
= bdev_write_sb
,
294 .readpage
= bdev_readpage
,
295 .writeseg
= bdev_writeseg
,
297 .can_write_buf
= bdev_can_write_buf
,
299 .put_device
= bdev_put_device
,
302 int logfs_get_sb_bdev(struct logfs_super
*p
, struct file_system_type
*type
,
305 struct block_device
*bdev
;
307 bdev
= blkdev_get_by_path(devname
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
310 return PTR_ERR(bdev
);
312 if (MAJOR(bdev
->bd_dev
) == MTD_BLOCK_MAJOR
) {
313 int mtdnr
= MINOR(bdev
->bd_dev
);
314 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
315 return logfs_get_sb_mtd(p
, mtdnr
);
320 p
->s_devops
= &bd_devops
;