1 // SPDX-License-Identifier: GPL-2.0+
3 * dat.c - NILFS disk address translation.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
10 #include <linux/types.h>
11 #include <linux/buffer_head.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
20 #define NILFS_CNO_MIN ((__u64)1)
21 #define NILFS_CNO_MAX (~(__u64)0)
24 * struct nilfs_dat_info - on-memory private data of DAT file
25 * @mi: on-memory private data of metadata file
26 * @palloc_cache: persistent object allocator cache of DAT file
27 * @shadow: shadow map of DAT file
29 struct nilfs_dat_info
{
30 struct nilfs_mdt_info mi
;
31 struct nilfs_palloc_cache palloc_cache
;
32 struct nilfs_shadow_map shadow
;
35 static inline struct nilfs_dat_info
*NILFS_DAT_I(struct inode
*dat
)
37 return (struct nilfs_dat_info
*)NILFS_MDT(dat
);
40 static int nilfs_dat_prepare_entry(struct inode
*dat
,
41 struct nilfs_palloc_req
*req
, int create
)
43 return nilfs_palloc_get_entry_block(dat
, req
->pr_entry_nr
,
44 create
, &req
->pr_entry_bh
);
47 static void nilfs_dat_commit_entry(struct inode
*dat
,
48 struct nilfs_palloc_req
*req
)
50 mark_buffer_dirty(req
->pr_entry_bh
);
51 nilfs_mdt_mark_dirty(dat
);
52 brelse(req
->pr_entry_bh
);
55 static void nilfs_dat_abort_entry(struct inode
*dat
,
56 struct nilfs_palloc_req
*req
)
58 brelse(req
->pr_entry_bh
);
61 int nilfs_dat_prepare_alloc(struct inode
*dat
, struct nilfs_palloc_req
*req
)
65 ret
= nilfs_palloc_prepare_alloc_entry(dat
, req
);
69 ret
= nilfs_dat_prepare_entry(dat
, req
, 1);
71 nilfs_palloc_abort_alloc_entry(dat
, req
);
76 void nilfs_dat_commit_alloc(struct inode
*dat
, struct nilfs_palloc_req
*req
)
78 struct nilfs_dat_entry
*entry
;
81 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
82 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
83 req
->pr_entry_bh
, kaddr
);
84 entry
->de_start
= cpu_to_le64(NILFS_CNO_MIN
);
85 entry
->de_end
= cpu_to_le64(NILFS_CNO_MAX
);
86 entry
->de_blocknr
= cpu_to_le64(0);
89 nilfs_palloc_commit_alloc_entry(dat
, req
);
90 nilfs_dat_commit_entry(dat
, req
);
93 void nilfs_dat_abort_alloc(struct inode
*dat
, struct nilfs_palloc_req
*req
)
95 nilfs_dat_abort_entry(dat
, req
);
96 nilfs_palloc_abort_alloc_entry(dat
, req
);
99 static void nilfs_dat_commit_free(struct inode
*dat
,
100 struct nilfs_palloc_req
*req
)
102 struct nilfs_dat_entry
*entry
;
105 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
106 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
107 req
->pr_entry_bh
, kaddr
);
108 entry
->de_start
= cpu_to_le64(NILFS_CNO_MIN
);
109 entry
->de_end
= cpu_to_le64(NILFS_CNO_MIN
);
110 entry
->de_blocknr
= cpu_to_le64(0);
111 kunmap_atomic(kaddr
);
113 nilfs_dat_commit_entry(dat
, req
);
114 nilfs_palloc_commit_free_entry(dat
, req
);
117 int nilfs_dat_prepare_start(struct inode
*dat
, struct nilfs_palloc_req
*req
)
121 ret
= nilfs_dat_prepare_entry(dat
, req
, 0);
122 WARN_ON(ret
== -ENOENT
);
126 void nilfs_dat_commit_start(struct inode
*dat
, struct nilfs_palloc_req
*req
,
129 struct nilfs_dat_entry
*entry
;
132 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
133 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
134 req
->pr_entry_bh
, kaddr
);
135 entry
->de_start
= cpu_to_le64(nilfs_mdt_cno(dat
));
136 entry
->de_blocknr
= cpu_to_le64(blocknr
);
137 kunmap_atomic(kaddr
);
139 nilfs_dat_commit_entry(dat
, req
);
142 int nilfs_dat_prepare_end(struct inode
*dat
, struct nilfs_palloc_req
*req
)
144 struct nilfs_dat_entry
*entry
;
149 ret
= nilfs_dat_prepare_entry(dat
, req
, 0);
151 WARN_ON(ret
== -ENOENT
);
155 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
156 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
157 req
->pr_entry_bh
, kaddr
);
158 blocknr
= le64_to_cpu(entry
->de_blocknr
);
159 kunmap_atomic(kaddr
);
162 ret
= nilfs_palloc_prepare_free_entry(dat
, req
);
164 nilfs_dat_abort_entry(dat
, req
);
172 void nilfs_dat_commit_end(struct inode
*dat
, struct nilfs_palloc_req
*req
,
175 struct nilfs_dat_entry
*entry
;
180 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
181 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
182 req
->pr_entry_bh
, kaddr
);
183 end
= start
= le64_to_cpu(entry
->de_start
);
185 end
= nilfs_mdt_cno(dat
);
186 WARN_ON(start
> end
);
188 entry
->de_end
= cpu_to_le64(end
);
189 blocknr
= le64_to_cpu(entry
->de_blocknr
);
190 kunmap_atomic(kaddr
);
193 nilfs_dat_commit_free(dat
, req
);
195 nilfs_dat_commit_entry(dat
, req
);
198 void nilfs_dat_abort_end(struct inode
*dat
, struct nilfs_palloc_req
*req
)
200 struct nilfs_dat_entry
*entry
;
205 kaddr
= kmap_atomic(req
->pr_entry_bh
->b_page
);
206 entry
= nilfs_palloc_block_get_entry(dat
, req
->pr_entry_nr
,
207 req
->pr_entry_bh
, kaddr
);
208 start
= le64_to_cpu(entry
->de_start
);
209 blocknr
= le64_to_cpu(entry
->de_blocknr
);
210 kunmap_atomic(kaddr
);
212 if (start
== nilfs_mdt_cno(dat
) && blocknr
== 0)
213 nilfs_palloc_abort_free_entry(dat
, req
);
214 nilfs_dat_abort_entry(dat
, req
);
217 int nilfs_dat_prepare_update(struct inode
*dat
,
218 struct nilfs_palloc_req
*oldreq
,
219 struct nilfs_palloc_req
*newreq
)
223 ret
= nilfs_dat_prepare_end(dat
, oldreq
);
225 ret
= nilfs_dat_prepare_alloc(dat
, newreq
);
227 nilfs_dat_abort_end(dat
, oldreq
);
232 void nilfs_dat_commit_update(struct inode
*dat
,
233 struct nilfs_palloc_req
*oldreq
,
234 struct nilfs_palloc_req
*newreq
, int dead
)
236 nilfs_dat_commit_end(dat
, oldreq
, dead
);
237 nilfs_dat_commit_alloc(dat
, newreq
);
240 void nilfs_dat_abort_update(struct inode
*dat
,
241 struct nilfs_palloc_req
*oldreq
,
242 struct nilfs_palloc_req
*newreq
)
244 nilfs_dat_abort_end(dat
, oldreq
);
245 nilfs_dat_abort_alloc(dat
, newreq
);
249 * nilfs_dat_mark_dirty -
250 * @dat: DAT file inode
251 * @vblocknr: virtual block number
255 * Return Value: On success, 0 is returned. On error, one of the following
256 * negative error codes is returned.
260 * %-ENOMEM - Insufficient amount of memory available.
262 int nilfs_dat_mark_dirty(struct inode
*dat
, __u64 vblocknr
)
264 struct nilfs_palloc_req req
;
267 req
.pr_entry_nr
= vblocknr
;
268 ret
= nilfs_dat_prepare_entry(dat
, &req
, 0);
270 nilfs_dat_commit_entry(dat
, &req
);
275 * nilfs_dat_freev - free virtual block numbers
276 * @dat: DAT file inode
277 * @vblocknrs: array of virtual block numbers
278 * @nitems: number of virtual block numbers
280 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
281 * @vblocknrs and @nitems.
283 * Return Value: On success, 0 is returned. On error, one of the following
284 * negative error codes is returned.
288 * %-ENOMEM - Insufficient amount of memory available.
290 * %-ENOENT - The virtual block number have not been allocated.
292 int nilfs_dat_freev(struct inode
*dat
, __u64
*vblocknrs
, size_t nitems
)
294 return nilfs_palloc_freev(dat
, vblocknrs
, nitems
);
298 * nilfs_dat_move - change a block number
299 * @dat: DAT file inode
300 * @vblocknr: virtual block number
301 * @blocknr: block number
303 * Description: nilfs_dat_move() changes the block number associated with
304 * @vblocknr to @blocknr.
306 * Return Value: On success, 0 is returned. On error, one of the following
307 * negative error codes is returned.
311 * %-ENOMEM - Insufficient amount of memory available.
313 int nilfs_dat_move(struct inode
*dat
, __u64 vblocknr
, sector_t blocknr
)
315 struct buffer_head
*entry_bh
;
316 struct nilfs_dat_entry
*entry
;
320 ret
= nilfs_palloc_get_entry_block(dat
, vblocknr
, 0, &entry_bh
);
325 * The given disk block number (blocknr) is not yet written to
326 * the device at this point.
328 * To prevent nilfs_dat_translate() from returning the
329 * uncommitted block number, this makes a copy of the entry
330 * buffer and redirects nilfs_dat_translate() to the copy.
332 if (!buffer_nilfs_redirected(entry_bh
)) {
333 ret
= nilfs_mdt_freeze_buffer(dat
, entry_bh
);
340 kaddr
= kmap_atomic(entry_bh
->b_page
);
341 entry
= nilfs_palloc_block_get_entry(dat
, vblocknr
, entry_bh
, kaddr
);
342 if (unlikely(entry
->de_blocknr
== cpu_to_le64(0))) {
343 nilfs_msg(dat
->i_sb
, KERN_CRIT
,
344 "%s: invalid vblocknr = %llu, [%llu, %llu)",
345 __func__
, (unsigned long long)vblocknr
,
346 (unsigned long long)le64_to_cpu(entry
->de_start
),
347 (unsigned long long)le64_to_cpu(entry
->de_end
));
348 kunmap_atomic(kaddr
);
352 WARN_ON(blocknr
== 0);
353 entry
->de_blocknr
= cpu_to_le64(blocknr
);
354 kunmap_atomic(kaddr
);
356 mark_buffer_dirty(entry_bh
);
357 nilfs_mdt_mark_dirty(dat
);
365 * nilfs_dat_translate - translate a virtual block number to a block number
366 * @dat: DAT file inode
367 * @vblocknr: virtual block number
368 * @blocknrp: pointer to a block number
370 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
371 * to the corresponding block number.
373 * Return Value: On success, 0 is returned and the block number associated
374 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
375 * of the following negative error codes is returned.
379 * %-ENOMEM - Insufficient amount of memory available.
381 * %-ENOENT - A block number associated with @vblocknr does not exist.
383 int nilfs_dat_translate(struct inode
*dat
, __u64 vblocknr
, sector_t
*blocknrp
)
385 struct buffer_head
*entry_bh
, *bh
;
386 struct nilfs_dat_entry
*entry
;
391 ret
= nilfs_palloc_get_entry_block(dat
, vblocknr
, 0, &entry_bh
);
395 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh
)) {
396 bh
= nilfs_mdt_get_frozen_buffer(dat
, entry_bh
);
398 WARN_ON(!buffer_uptodate(bh
));
404 kaddr
= kmap_atomic(entry_bh
->b_page
);
405 entry
= nilfs_palloc_block_get_entry(dat
, vblocknr
, entry_bh
, kaddr
);
406 blocknr
= le64_to_cpu(entry
->de_blocknr
);
414 kunmap_atomic(kaddr
);
419 ssize_t
nilfs_dat_get_vinfo(struct inode
*dat
, void *buf
, unsigned int visz
,
422 struct buffer_head
*entry_bh
;
423 struct nilfs_dat_entry
*entry
;
424 struct nilfs_vinfo
*vinfo
= buf
;
427 unsigned long entries_per_block
= NILFS_MDT(dat
)->mi_entries_per_block
;
430 for (i
= 0; i
< nvi
; i
+= n
) {
431 ret
= nilfs_palloc_get_entry_block(dat
, vinfo
->vi_vblocknr
,
435 kaddr
= kmap_atomic(entry_bh
->b_page
);
436 /* last virtual block number in this block */
437 first
= vinfo
->vi_vblocknr
;
438 do_div(first
, entries_per_block
);
439 first
*= entries_per_block
;
440 last
= first
+ entries_per_block
- 1;
442 j
< nvi
&& vinfo
->vi_vblocknr
>= first
&&
443 vinfo
->vi_vblocknr
<= last
;
444 j
++, n
++, vinfo
= (void *)vinfo
+ visz
) {
445 entry
= nilfs_palloc_block_get_entry(
446 dat
, vinfo
->vi_vblocknr
, entry_bh
, kaddr
);
447 vinfo
->vi_start
= le64_to_cpu(entry
->de_start
);
448 vinfo
->vi_end
= le64_to_cpu(entry
->de_end
);
449 vinfo
->vi_blocknr
= le64_to_cpu(entry
->de_blocknr
);
451 kunmap_atomic(kaddr
);
459 * nilfs_dat_read - read or get dat inode
460 * @sb: super block instance
461 * @entry_size: size of a dat entry
462 * @raw_inode: on-disk dat inode
463 * @inodep: buffer to store the inode
465 int nilfs_dat_read(struct super_block
*sb
, size_t entry_size
,
466 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
468 static struct lock_class_key dat_lock_key
;
470 struct nilfs_dat_info
*di
;
473 if (entry_size
> sb
->s_blocksize
) {
474 nilfs_msg(sb
, KERN_ERR
, "too large DAT entry size: %zu bytes",
477 } else if (entry_size
< NILFS_MIN_DAT_ENTRY_SIZE
) {
478 nilfs_msg(sb
, KERN_ERR
, "too small DAT entry size: %zu bytes",
483 dat
= nilfs_iget_locked(sb
, NULL
, NILFS_DAT_INO
);
486 if (!(dat
->i_state
& I_NEW
))
489 err
= nilfs_mdt_init(dat
, NILFS_MDT_GFP
, sizeof(*di
));
493 err
= nilfs_palloc_init_blockgroup(dat
, entry_size
);
497 di
= NILFS_DAT_I(dat
);
498 lockdep_set_class(&di
->mi
.mi_sem
, &dat_lock_key
);
499 nilfs_palloc_setup_cache(dat
, &di
->palloc_cache
);
500 nilfs_mdt_setup_shadow_map(dat
, &di
->shadow
);
502 err
= nilfs_read_inode_common(dat
, raw_inode
);
506 unlock_new_inode(dat
);