1 // SPDX-License-Identifier: GPL-2.0+
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Ryusuke Konishi.
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/crc32.h>
14 #include <linux/backing-dev.h>
15 #include <linux/slab.h>
20 struct nilfs_write_info
{
21 struct the_nilfs
*nilfs
;
23 int start
, end
; /* The region to be submitted */
30 static int nilfs_segbuf_write(struct nilfs_segment_buffer
*segbuf
,
31 struct the_nilfs
*nilfs
);
32 static int nilfs_segbuf_wait(struct nilfs_segment_buffer
*segbuf
);
34 struct nilfs_segment_buffer
*nilfs_segbuf_new(struct super_block
*sb
)
36 struct nilfs_segment_buffer
*segbuf
;
38 segbuf
= kmem_cache_alloc(nilfs_segbuf_cachep
, GFP_NOFS
);
39 if (unlikely(!segbuf
))
42 segbuf
->sb_super
= sb
;
43 INIT_LIST_HEAD(&segbuf
->sb_list
);
44 INIT_LIST_HEAD(&segbuf
->sb_segsum_buffers
);
45 INIT_LIST_HEAD(&segbuf
->sb_payload_buffers
);
46 segbuf
->sb_super_root
= NULL
;
48 init_completion(&segbuf
->sb_bio_event
);
49 atomic_set(&segbuf
->sb_err
, 0);
55 void nilfs_segbuf_free(struct nilfs_segment_buffer
*segbuf
)
57 kmem_cache_free(nilfs_segbuf_cachep
, segbuf
);
60 void nilfs_segbuf_map(struct nilfs_segment_buffer
*segbuf
, __u64 segnum
,
61 unsigned long offset
, struct the_nilfs
*nilfs
)
63 segbuf
->sb_segnum
= segnum
;
64 nilfs_get_segment_range(nilfs
, segnum
, &segbuf
->sb_fseg_start
,
65 &segbuf
->sb_fseg_end
);
67 segbuf
->sb_pseg_start
= segbuf
->sb_fseg_start
+ offset
;
68 segbuf
->sb_rest_blocks
=
69 segbuf
->sb_fseg_end
- segbuf
->sb_pseg_start
+ 1;
73 * nilfs_segbuf_map_cont - map a new log behind a given log
74 * @segbuf: new segment buffer
75 * @prev: segment buffer containing a log to be continued
77 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer
*segbuf
,
78 struct nilfs_segment_buffer
*prev
)
80 segbuf
->sb_segnum
= prev
->sb_segnum
;
81 segbuf
->sb_fseg_start
= prev
->sb_fseg_start
;
82 segbuf
->sb_fseg_end
= prev
->sb_fseg_end
;
83 segbuf
->sb_pseg_start
= prev
->sb_pseg_start
+ prev
->sb_sum
.nblocks
;
84 segbuf
->sb_rest_blocks
=
85 segbuf
->sb_fseg_end
- segbuf
->sb_pseg_start
+ 1;
88 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer
*segbuf
,
89 __u64 nextnum
, struct the_nilfs
*nilfs
)
91 segbuf
->sb_nextnum
= nextnum
;
92 segbuf
->sb_sum
.next
= nilfs_get_segment_start_blocknr(nilfs
, nextnum
);
95 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer
*segbuf
)
97 struct buffer_head
*bh
;
99 bh
= sb_getblk(segbuf
->sb_super
,
100 segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nsumblk
);
105 if (!buffer_uptodate(bh
)) {
106 memset(bh
->b_data
, 0, bh
->b_size
);
107 set_buffer_uptodate(bh
);
110 nilfs_segbuf_add_segsum_buffer(segbuf
, bh
);
114 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer
*segbuf
,
115 struct buffer_head
**bhp
)
117 struct buffer_head
*bh
;
119 bh
= sb_getblk(segbuf
->sb_super
,
120 segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nblocks
);
124 nilfs_segbuf_add_payload_buffer(segbuf
, bh
);
129 int nilfs_segbuf_reset(struct nilfs_segment_buffer
*segbuf
, unsigned int flags
,
130 time64_t ctime
, __u64 cno
)
134 segbuf
->sb_sum
.nblocks
= segbuf
->sb_sum
.nsumblk
= 0;
135 err
= nilfs_segbuf_extend_segsum(segbuf
);
139 segbuf
->sb_sum
.flags
= flags
;
140 segbuf
->sb_sum
.sumbytes
= sizeof(struct nilfs_segment_summary
);
141 segbuf
->sb_sum
.nfinfo
= segbuf
->sb_sum
.nfileblk
= 0;
142 segbuf
->sb_sum
.ctime
= ctime
;
143 segbuf
->sb_sum
.cno
= cno
;
148 * Setup segment summary
150 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer
*segbuf
)
152 struct nilfs_segment_summary
*raw_sum
;
153 struct buffer_head
*bh_sum
;
155 bh_sum
= list_entry(segbuf
->sb_segsum_buffers
.next
,
156 struct buffer_head
, b_assoc_buffers
);
157 raw_sum
= (struct nilfs_segment_summary
*)bh_sum
->b_data
;
159 raw_sum
->ss_magic
= cpu_to_le32(NILFS_SEGSUM_MAGIC
);
160 raw_sum
->ss_bytes
= cpu_to_le16(sizeof(*raw_sum
));
161 raw_sum
->ss_flags
= cpu_to_le16(segbuf
->sb_sum
.flags
);
162 raw_sum
->ss_seq
= cpu_to_le64(segbuf
->sb_sum
.seg_seq
);
163 raw_sum
->ss_create
= cpu_to_le64(segbuf
->sb_sum
.ctime
);
164 raw_sum
->ss_next
= cpu_to_le64(segbuf
->sb_sum
.next
);
165 raw_sum
->ss_nblocks
= cpu_to_le32(segbuf
->sb_sum
.nblocks
);
166 raw_sum
->ss_nfinfo
= cpu_to_le32(segbuf
->sb_sum
.nfinfo
);
167 raw_sum
->ss_sumbytes
= cpu_to_le32(segbuf
->sb_sum
.sumbytes
);
169 raw_sum
->ss_cno
= cpu_to_le64(segbuf
->sb_sum
.cno
);
173 * CRC calculation routines
176 nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer
*segbuf
, u32 seed
)
178 struct buffer_head
*bh
;
179 struct nilfs_segment_summary
*raw_sum
;
180 unsigned long size
, bytes
= segbuf
->sb_sum
.sumbytes
;
183 bh
= list_entry(segbuf
->sb_segsum_buffers
.next
, struct buffer_head
,
186 raw_sum
= (struct nilfs_segment_summary
*)bh
->b_data
;
187 size
= min_t(unsigned long, bytes
, bh
->b_size
);
189 (unsigned char *)raw_sum
+
190 sizeof(raw_sum
->ss_datasum
) + sizeof(raw_sum
->ss_sumsum
),
191 size
- (sizeof(raw_sum
->ss_datasum
) +
192 sizeof(raw_sum
->ss_sumsum
)));
194 list_for_each_entry_continue(bh
, &segbuf
->sb_segsum_buffers
,
197 size
= min_t(unsigned long, bytes
, bh
->b_size
);
198 crc
= crc32_le(crc
, bh
->b_data
, size
);
200 raw_sum
->ss_sumsum
= cpu_to_le32(crc
);
203 static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer
*segbuf
,
206 struct buffer_head
*bh
;
207 struct nilfs_segment_summary
*raw_sum
;
210 bh
= list_entry(segbuf
->sb_segsum_buffers
.next
, struct buffer_head
,
212 raw_sum
= (struct nilfs_segment_summary
*)bh
->b_data
;
214 (unsigned char *)raw_sum
+ sizeof(raw_sum
->ss_datasum
),
215 bh
->b_size
- sizeof(raw_sum
->ss_datasum
));
217 list_for_each_entry_continue(bh
, &segbuf
->sb_segsum_buffers
,
219 crc
= crc32_le(crc
, bh
->b_data
, bh
->b_size
);
221 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
222 size_t offset
= offset_in_folio(bh
->b_folio
, bh
->b_data
);
225 /* Do not support block sizes larger than PAGE_SIZE */
226 from
= kmap_local_folio(bh
->b_folio
, offset
);
227 crc
= crc32_le(crc
, from
, bh
->b_size
);
230 raw_sum
->ss_datasum
= cpu_to_le32(crc
);
234 nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer
*segbuf
,
237 struct nilfs_super_root
*raw_sr
;
238 struct the_nilfs
*nilfs
= segbuf
->sb_super
->s_fs_info
;
242 raw_sr
= (struct nilfs_super_root
*)segbuf
->sb_super_root
->b_data
;
243 srsize
= NILFS_SR_BYTES(nilfs
->ns_inode_size
);
245 (unsigned char *)raw_sr
+ sizeof(raw_sr
->sr_sum
),
246 srsize
- sizeof(raw_sr
->sr_sum
));
247 raw_sr
->sr_sum
= cpu_to_le32(crc
);
250 static void nilfs_release_buffers(struct list_head
*list
)
252 struct buffer_head
*bh
, *n
;
254 list_for_each_entry_safe(bh
, n
, list
, b_assoc_buffers
) {
255 list_del_init(&bh
->b_assoc_buffers
);
260 static void nilfs_segbuf_clear(struct nilfs_segment_buffer
*segbuf
)
262 nilfs_release_buffers(&segbuf
->sb_segsum_buffers
);
263 nilfs_release_buffers(&segbuf
->sb_payload_buffers
);
264 segbuf
->sb_super_root
= NULL
;
268 * Iterators for segment buffers
270 void nilfs_clear_logs(struct list_head
*logs
)
272 struct nilfs_segment_buffer
*segbuf
;
274 list_for_each_entry(segbuf
, logs
, sb_list
)
275 nilfs_segbuf_clear(segbuf
);
278 void nilfs_truncate_logs(struct list_head
*logs
,
279 struct nilfs_segment_buffer
*last
)
281 struct nilfs_segment_buffer
*n
, *segbuf
;
283 segbuf
= list_prepare_entry(last
, logs
, sb_list
);
284 list_for_each_entry_safe_continue(segbuf
, n
, logs
, sb_list
) {
285 list_del_init(&segbuf
->sb_list
);
286 nilfs_segbuf_clear(segbuf
);
287 nilfs_segbuf_free(segbuf
);
291 int nilfs_write_logs(struct list_head
*logs
, struct the_nilfs
*nilfs
)
293 struct nilfs_segment_buffer
*segbuf
;
296 list_for_each_entry(segbuf
, logs
, sb_list
) {
297 ret
= nilfs_segbuf_write(segbuf
, nilfs
);
304 int nilfs_wait_on_logs(struct list_head
*logs
)
306 struct nilfs_segment_buffer
*segbuf
;
309 list_for_each_entry(segbuf
, logs
, sb_list
) {
310 err
= nilfs_segbuf_wait(segbuf
);
318 * nilfs_add_checksums_on_logs - add checksums on the logs
319 * @logs: list of segment buffers storing target logs
320 * @seed: checksum seed value
322 void nilfs_add_checksums_on_logs(struct list_head
*logs
, u32 seed
)
324 struct nilfs_segment_buffer
*segbuf
;
326 list_for_each_entry(segbuf
, logs
, sb_list
) {
327 if (segbuf
->sb_super_root
)
328 nilfs_segbuf_fill_in_super_root_crc(segbuf
, seed
);
329 nilfs_segbuf_fill_in_segsum_crc(segbuf
, seed
);
330 nilfs_segbuf_fill_in_data_crc(segbuf
, seed
);
337 static void nilfs_end_bio_write(struct bio
*bio
)
339 struct nilfs_segment_buffer
*segbuf
= bio
->bi_private
;
342 atomic_inc(&segbuf
->sb_err
);
345 complete(&segbuf
->sb_bio_event
);
348 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer
*segbuf
,
349 struct nilfs_write_info
*wi
)
351 struct bio
*bio
= wi
->bio
;
353 bio
->bi_end_io
= nilfs_end_bio_write
;
354 bio
->bi_private
= segbuf
;
359 wi
->rest_blocks
-= wi
->end
- wi
->start
;
360 wi
->nr_vecs
= min(wi
->max_pages
, wi
->rest_blocks
);
365 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer
*segbuf
,
366 struct nilfs_write_info
*wi
)
369 wi
->rest_blocks
= segbuf
->sb_sum
.nblocks
;
370 wi
->max_pages
= BIO_MAX_VECS
;
371 wi
->nr_vecs
= min(wi
->max_pages
, wi
->rest_blocks
);
372 wi
->start
= wi
->end
= 0;
373 wi
->blocknr
= segbuf
->sb_pseg_start
;
376 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer
*segbuf
,
377 struct nilfs_write_info
*wi
,
378 struct buffer_head
*bh
)
382 BUG_ON(wi
->nr_vecs
<= 0);
385 wi
->bio
= bio_alloc(wi
->nilfs
->ns_bdev
, wi
->nr_vecs
,
386 REQ_OP_WRITE
, GFP_NOIO
);
387 wi
->bio
->bi_iter
.bi_sector
= (wi
->blocknr
+ wi
->end
) <<
388 (wi
->nilfs
->ns_blocksize_bits
- 9);
391 if (bio_add_folio(wi
->bio
, bh
->b_folio
, bh
->b_size
,
392 offset_in_folio(bh
->b_folio
, bh
->b_data
))) {
397 err
= nilfs_segbuf_submit_bio(segbuf
, wi
);
398 /* never submit current bh */
405 * nilfs_segbuf_write - submit write requests of a log
406 * @segbuf: buffer storing a log to be written
407 * @nilfs: nilfs object
409 * Return Value: On Success, 0 is returned. On Error, one of the following
410 * negative error code is returned.
414 * %-ENOMEM - Insufficient memory available.
416 static int nilfs_segbuf_write(struct nilfs_segment_buffer
*segbuf
,
417 struct the_nilfs
*nilfs
)
419 struct nilfs_write_info wi
;
420 struct buffer_head
*bh
;
424 nilfs_segbuf_prepare_write(segbuf
, &wi
);
426 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
, b_assoc_buffers
) {
427 res
= nilfs_segbuf_submit_bh(segbuf
, &wi
, bh
);
432 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
433 res
= nilfs_segbuf_submit_bh(segbuf
, &wi
, bh
);
440 * Last BIO is always sent through the following
443 wi
.bio
->bi_opf
|= REQ_SYNC
;
444 res
= nilfs_segbuf_submit_bio(segbuf
, &wi
);
452 * nilfs_segbuf_wait - wait for completion of requested BIOs
453 * @segbuf: segment buffer
455 * Return Value: On Success, 0 is returned. On Error, one of the following
456 * negative error code is returned.
460 static int nilfs_segbuf_wait(struct nilfs_segment_buffer
*segbuf
)
464 if (!segbuf
->sb_nbio
)
468 wait_for_completion(&segbuf
->sb_bio_event
);
469 } while (--segbuf
->sb_nbio
> 0);
471 if (unlikely(atomic_read(&segbuf
->sb_err
) > 0)) {
472 nilfs_err(segbuf
->sb_super
,
473 "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
474 (unsigned long long)segbuf
->sb_pseg_start
,
475 segbuf
->sb_sum
.nblocks
,
476 (unsigned long long)segbuf
->sb_segnum
);