of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / fs / nilfs2 / dat.c
blob7dc23f100e579df17aec0499a7af609e9c0dc5af
1 /*
2 * dat.c - NILFS disk address translation.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
33 #define NILFS_CNO_MIN ((__u64)1)
34 #define NILFS_CNO_MAX (~(__u64)0)
36 /**
37 * struct nilfs_dat_info - on-memory private data of DAT file
38 * @mi: on-memory private data of metadata file
39 * @palloc_cache: persistent object allocator cache of DAT file
40 * @shadow: shadow map of DAT file
42 struct nilfs_dat_info {
43 struct nilfs_mdt_info mi;
44 struct nilfs_palloc_cache palloc_cache;
45 struct nilfs_shadow_map shadow;
48 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
50 return (struct nilfs_dat_info *)NILFS_MDT(dat);
53 static int nilfs_dat_prepare_entry(struct inode *dat,
54 struct nilfs_palloc_req *req, int create)
56 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
57 create, &req->pr_entry_bh);
60 static void nilfs_dat_commit_entry(struct inode *dat,
61 struct nilfs_palloc_req *req)
63 mark_buffer_dirty(req->pr_entry_bh);
64 nilfs_mdt_mark_dirty(dat);
65 brelse(req->pr_entry_bh);
68 static void nilfs_dat_abort_entry(struct inode *dat,
69 struct nilfs_palloc_req *req)
71 brelse(req->pr_entry_bh);
74 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
76 int ret;
78 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
79 if (ret < 0)
80 return ret;
82 ret = nilfs_dat_prepare_entry(dat, req, 1);
83 if (ret < 0)
84 nilfs_palloc_abort_alloc_entry(dat, req);
86 return ret;
89 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
91 struct nilfs_dat_entry *entry;
92 void *kaddr;
94 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
96 req->pr_entry_bh, kaddr);
97 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
98 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
99 entry->de_blocknr = cpu_to_le64(0);
100 kunmap_atomic(kaddr);
102 nilfs_palloc_commit_alloc_entry(dat, req);
103 nilfs_dat_commit_entry(dat, req);
106 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
108 nilfs_dat_abort_entry(dat, req);
109 nilfs_palloc_abort_alloc_entry(dat, req);
112 static void nilfs_dat_commit_free(struct inode *dat,
113 struct nilfs_palloc_req *req)
115 struct nilfs_dat_entry *entry;
116 void *kaddr;
118 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
120 req->pr_entry_bh, kaddr);
121 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
122 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
123 entry->de_blocknr = cpu_to_le64(0);
124 kunmap_atomic(kaddr);
126 nilfs_dat_commit_entry(dat, req);
127 nilfs_palloc_commit_free_entry(dat, req);
130 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
132 int ret;
134 ret = nilfs_dat_prepare_entry(dat, req, 0);
135 WARN_ON(ret == -ENOENT);
136 return ret;
139 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
140 sector_t blocknr)
142 struct nilfs_dat_entry *entry;
143 void *kaddr;
145 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
146 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
147 req->pr_entry_bh, kaddr);
148 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
149 entry->de_blocknr = cpu_to_le64(blocknr);
150 kunmap_atomic(kaddr);
152 nilfs_dat_commit_entry(dat, req);
155 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
157 struct nilfs_dat_entry *entry;
158 sector_t blocknr;
159 void *kaddr;
160 int ret;
162 ret = nilfs_dat_prepare_entry(dat, req, 0);
163 if (ret < 0) {
164 WARN_ON(ret == -ENOENT);
165 return ret;
168 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
169 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
170 req->pr_entry_bh, kaddr);
171 blocknr = le64_to_cpu(entry->de_blocknr);
172 kunmap_atomic(kaddr);
174 if (blocknr == 0) {
175 ret = nilfs_palloc_prepare_free_entry(dat, req);
176 if (ret < 0) {
177 nilfs_dat_abort_entry(dat, req);
178 return ret;
182 return 0;
185 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
186 int dead)
188 struct nilfs_dat_entry *entry;
189 __u64 start, end;
190 sector_t blocknr;
191 void *kaddr;
193 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
194 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
195 req->pr_entry_bh, kaddr);
196 end = start = le64_to_cpu(entry->de_start);
197 if (!dead) {
198 end = nilfs_mdt_cno(dat);
199 WARN_ON(start > end);
201 entry->de_end = cpu_to_le64(end);
202 blocknr = le64_to_cpu(entry->de_blocknr);
203 kunmap_atomic(kaddr);
205 if (blocknr == 0)
206 nilfs_dat_commit_free(dat, req);
207 else
208 nilfs_dat_commit_entry(dat, req);
211 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
213 struct nilfs_dat_entry *entry;
214 __u64 start;
215 sector_t blocknr;
216 void *kaddr;
218 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
219 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
220 req->pr_entry_bh, kaddr);
221 start = le64_to_cpu(entry->de_start);
222 blocknr = le64_to_cpu(entry->de_blocknr);
223 kunmap_atomic(kaddr);
225 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
226 nilfs_palloc_abort_free_entry(dat, req);
227 nilfs_dat_abort_entry(dat, req);
230 int nilfs_dat_prepare_update(struct inode *dat,
231 struct nilfs_palloc_req *oldreq,
232 struct nilfs_palloc_req *newreq)
234 int ret;
236 ret = nilfs_dat_prepare_end(dat, oldreq);
237 if (!ret) {
238 ret = nilfs_dat_prepare_alloc(dat, newreq);
239 if (ret < 0)
240 nilfs_dat_abort_end(dat, oldreq);
242 return ret;
245 void nilfs_dat_commit_update(struct inode *dat,
246 struct nilfs_palloc_req *oldreq,
247 struct nilfs_palloc_req *newreq, int dead)
249 nilfs_dat_commit_end(dat, oldreq, dead);
250 nilfs_dat_commit_alloc(dat, newreq);
253 void nilfs_dat_abort_update(struct inode *dat,
254 struct nilfs_palloc_req *oldreq,
255 struct nilfs_palloc_req *newreq)
257 nilfs_dat_abort_end(dat, oldreq);
258 nilfs_dat_abort_alloc(dat, newreq);
262 * nilfs_dat_mark_dirty -
263 * @dat: DAT file inode
264 * @vblocknr: virtual block number
266 * Description:
268 * Return Value: On success, 0 is returned. On error, one of the following
269 * negative error codes is returned.
271 * %-EIO - I/O error.
273 * %-ENOMEM - Insufficient amount of memory available.
275 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
277 struct nilfs_palloc_req req;
278 int ret;
280 req.pr_entry_nr = vblocknr;
281 ret = nilfs_dat_prepare_entry(dat, &req, 0);
282 if (ret == 0)
283 nilfs_dat_commit_entry(dat, &req);
284 return ret;
288 * nilfs_dat_freev - free virtual block numbers
289 * @dat: DAT file inode
290 * @vblocknrs: array of virtual block numbers
291 * @nitems: number of virtual block numbers
293 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
294 * @vblocknrs and @nitems.
296 * Return Value: On success, 0 is returned. On error, one of the following
297 * negative error codes is returned.
299 * %-EIO - I/O error.
301 * %-ENOMEM - Insufficient amount of memory available.
303 * %-ENOENT - The virtual block number have not been allocated.
305 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
307 return nilfs_palloc_freev(dat, vblocknrs, nitems);
311 * nilfs_dat_move - change a block number
312 * @dat: DAT file inode
313 * @vblocknr: virtual block number
314 * @blocknr: block number
316 * Description: nilfs_dat_move() changes the block number associated with
317 * @vblocknr to @blocknr.
319 * Return Value: On success, 0 is returned. On error, one of the following
320 * negative error codes is returned.
322 * %-EIO - I/O error.
324 * %-ENOMEM - Insufficient amount of memory available.
326 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
328 struct buffer_head *entry_bh;
329 struct nilfs_dat_entry *entry;
330 void *kaddr;
331 int ret;
333 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
334 if (ret < 0)
335 return ret;
338 * The given disk block number (blocknr) is not yet written to
339 * the device at this point.
341 * To prevent nilfs_dat_translate() from returning the
342 * uncommitted block number, this makes a copy of the entry
343 * buffer and redirects nilfs_dat_translate() to the copy.
345 if (!buffer_nilfs_redirected(entry_bh)) {
346 ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
347 if (ret) {
348 brelse(entry_bh);
349 return ret;
353 kaddr = kmap_atomic(entry_bh->b_page);
354 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
355 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
356 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
357 (unsigned long long)vblocknr,
358 (unsigned long long)le64_to_cpu(entry->de_start),
359 (unsigned long long)le64_to_cpu(entry->de_end));
360 kunmap_atomic(kaddr);
361 brelse(entry_bh);
362 return -EINVAL;
364 WARN_ON(blocknr == 0);
365 entry->de_blocknr = cpu_to_le64(blocknr);
366 kunmap_atomic(kaddr);
368 mark_buffer_dirty(entry_bh);
369 nilfs_mdt_mark_dirty(dat);
371 brelse(entry_bh);
373 return 0;
377 * nilfs_dat_translate - translate a virtual block number to a block number
378 * @dat: DAT file inode
379 * @vblocknr: virtual block number
380 * @blocknrp: pointer to a block number
382 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
383 * to the corresponding block number.
385 * Return Value: On success, 0 is returned and the block number associated
386 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
387 * of the following negative error codes is returned.
389 * %-EIO - I/O error.
391 * %-ENOMEM - Insufficient amount of memory available.
393 * %-ENOENT - A block number associated with @vblocknr does not exist.
395 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
397 struct buffer_head *entry_bh, *bh;
398 struct nilfs_dat_entry *entry;
399 sector_t blocknr;
400 void *kaddr;
401 int ret;
403 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
404 if (ret < 0)
405 return ret;
407 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
408 bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
409 if (bh) {
410 WARN_ON(!buffer_uptodate(bh));
411 brelse(entry_bh);
412 entry_bh = bh;
416 kaddr = kmap_atomic(entry_bh->b_page);
417 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
418 blocknr = le64_to_cpu(entry->de_blocknr);
419 if (blocknr == 0) {
420 ret = -ENOENT;
421 goto out;
423 *blocknrp = blocknr;
425 out:
426 kunmap_atomic(kaddr);
427 brelse(entry_bh);
428 return ret;
431 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
432 size_t nvi)
434 struct buffer_head *entry_bh;
435 struct nilfs_dat_entry *entry;
436 struct nilfs_vinfo *vinfo = buf;
437 __u64 first, last;
438 void *kaddr;
439 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
440 int i, j, n, ret;
442 for (i = 0; i < nvi; i += n) {
443 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
444 0, &entry_bh);
445 if (ret < 0)
446 return ret;
447 kaddr = kmap_atomic(entry_bh->b_page);
448 /* last virtual block number in this block */
449 first = vinfo->vi_vblocknr;
450 do_div(first, entries_per_block);
451 first *= entries_per_block;
452 last = first + entries_per_block - 1;
453 for (j = i, n = 0;
454 j < nvi && vinfo->vi_vblocknr >= first &&
455 vinfo->vi_vblocknr <= last;
456 j++, n++, vinfo = (void *)vinfo + visz) {
457 entry = nilfs_palloc_block_get_entry(
458 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
459 vinfo->vi_start = le64_to_cpu(entry->de_start);
460 vinfo->vi_end = le64_to_cpu(entry->de_end);
461 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
463 kunmap_atomic(kaddr);
464 brelse(entry_bh);
467 return nvi;
471 * nilfs_dat_read - read or get dat inode
472 * @sb: super block instance
473 * @entry_size: size of a dat entry
474 * @raw_inode: on-disk dat inode
475 * @inodep: buffer to store the inode
477 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
478 struct nilfs_inode *raw_inode, struct inode **inodep)
480 static struct lock_class_key dat_lock_key;
481 struct inode *dat;
482 struct nilfs_dat_info *di;
483 int err;
485 if (entry_size > sb->s_blocksize) {
486 printk(KERN_ERR
487 "NILFS: too large DAT entry size: %zu bytes.\n",
488 entry_size);
489 return -EINVAL;
490 } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
491 printk(KERN_ERR
492 "NILFS: too small DAT entry size: %zu bytes.\n",
493 entry_size);
494 return -EINVAL;
497 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
498 if (unlikely(!dat))
499 return -ENOMEM;
500 if (!(dat->i_state & I_NEW))
501 goto out;
503 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
504 if (err)
505 goto failed;
507 err = nilfs_palloc_init_blockgroup(dat, entry_size);
508 if (err)
509 goto failed;
511 di = NILFS_DAT_I(dat);
512 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
513 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
514 nilfs_mdt_setup_shadow_map(dat, &di->shadow);
516 err = nilfs_read_inode_common(dat, raw_inode);
517 if (err)
518 goto failed;
520 unlock_new_inode(dat);
521 out:
522 *inodep = dat;
523 return 0;
524 failed:
525 iget_failed(dat);
526 return err;