x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / fs / nilfs2 / dat.c
blob1ff8e15bd36b52dad460dd1f28e7797916b87165
1 /*
2 * dat.c - NILFS disk address translation.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
33 #define NILFS_CNO_MIN ((__u64)1)
34 #define NILFS_CNO_MAX (~(__u64)0)
36 static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create)
39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 create, &req->pr_entry_bh);
43 static void nilfs_dat_commit_entry(struct inode *dat,
44 struct nilfs_palloc_req *req)
46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 nilfs_mdt_mark_dirty(dat);
48 brelse(req->pr_entry_bh);
51 static void nilfs_dat_abort_entry(struct inode *dat,
52 struct nilfs_palloc_req *req)
54 brelse(req->pr_entry_bh);
57 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
59 int ret;
61 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 if (ret < 0)
63 return ret;
65 ret = nilfs_dat_prepare_entry(dat, req, 1);
66 if (ret < 0)
67 nilfs_palloc_abort_alloc_entry(dat, req);
69 return ret;
72 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
74 struct nilfs_dat_entry *entry;
75 void *kaddr;
77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 req->pr_entry_bh, kaddr);
80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 entry->de_blocknr = cpu_to_le64(0);
83 kunmap_atomic(kaddr, KM_USER0);
85 nilfs_palloc_commit_alloc_entry(dat, req);
86 nilfs_dat_commit_entry(dat, req);
89 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
91 nilfs_dat_abort_entry(dat, req);
92 nilfs_palloc_abort_alloc_entry(dat, req);
95 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
97 struct nilfs_dat_entry *entry;
98 void *kaddr;
100 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
101 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
102 req->pr_entry_bh, kaddr);
103 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
104 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
105 entry->de_blocknr = cpu_to_le64(0);
106 kunmap_atomic(kaddr, KM_USER0);
108 nilfs_dat_commit_entry(dat, req);
109 nilfs_palloc_commit_free_entry(dat, req);
112 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
114 int ret;
116 ret = nilfs_dat_prepare_entry(dat, req, 0);
117 WARN_ON(ret == -ENOENT);
118 return ret;
121 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
122 sector_t blocknr)
124 struct nilfs_dat_entry *entry;
125 void *kaddr;
127 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
128 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
129 req->pr_entry_bh, kaddr);
130 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
131 entry->de_blocknr = cpu_to_le64(blocknr);
132 kunmap_atomic(kaddr, KM_USER0);
134 nilfs_dat_commit_entry(dat, req);
137 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
139 struct nilfs_dat_entry *entry;
140 __u64 start;
141 sector_t blocknr;
142 void *kaddr;
143 int ret;
145 ret = nilfs_dat_prepare_entry(dat, req, 0);
146 if (ret < 0) {
147 WARN_ON(ret == -ENOENT);
148 return ret;
151 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
152 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
153 req->pr_entry_bh, kaddr);
154 start = le64_to_cpu(entry->de_start);
155 blocknr = le64_to_cpu(entry->de_blocknr);
156 kunmap_atomic(kaddr, KM_USER0);
158 if (blocknr == 0) {
159 ret = nilfs_palloc_prepare_free_entry(dat, req);
160 if (ret < 0) {
161 nilfs_dat_abort_entry(dat, req);
162 return ret;
166 return 0;
169 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
170 int dead)
172 struct nilfs_dat_entry *entry;
173 __u64 start, end;
174 sector_t blocknr;
175 void *kaddr;
177 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
178 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
179 req->pr_entry_bh, kaddr);
180 end = start = le64_to_cpu(entry->de_start);
181 if (!dead) {
182 end = nilfs_mdt_cno(dat);
183 WARN_ON(start > end);
185 entry->de_end = cpu_to_le64(end);
186 blocknr = le64_to_cpu(entry->de_blocknr);
187 kunmap_atomic(kaddr, KM_USER0);
189 if (blocknr == 0)
190 nilfs_dat_commit_free(dat, req);
191 else
192 nilfs_dat_commit_entry(dat, req);
195 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
197 struct nilfs_dat_entry *entry;
198 __u64 start;
199 sector_t blocknr;
200 void *kaddr;
202 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
203 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
204 req->pr_entry_bh, kaddr);
205 start = le64_to_cpu(entry->de_start);
206 blocknr = le64_to_cpu(entry->de_blocknr);
207 kunmap_atomic(kaddr, KM_USER0);
209 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
210 nilfs_palloc_abort_free_entry(dat, req);
211 nilfs_dat_abort_entry(dat, req);
214 int nilfs_dat_prepare_update(struct inode *dat,
215 struct nilfs_palloc_req *oldreq,
216 struct nilfs_palloc_req *newreq)
218 int ret;
220 ret = nilfs_dat_prepare_end(dat, oldreq);
221 if (!ret) {
222 ret = nilfs_dat_prepare_alloc(dat, newreq);
223 if (ret < 0)
224 nilfs_dat_abort_end(dat, oldreq);
226 return ret;
229 void nilfs_dat_commit_update(struct inode *dat,
230 struct nilfs_palloc_req *oldreq,
231 struct nilfs_palloc_req *newreq, int dead)
233 nilfs_dat_commit_end(dat, oldreq, dead);
234 nilfs_dat_commit_alloc(dat, newreq);
237 void nilfs_dat_abort_update(struct inode *dat,
238 struct nilfs_palloc_req *oldreq,
239 struct nilfs_palloc_req *newreq)
241 nilfs_dat_abort_end(dat, oldreq);
242 nilfs_dat_abort_alloc(dat, newreq);
246 * nilfs_dat_mark_dirty -
247 * @dat: DAT file inode
248 * @vblocknr: virtual block number
250 * Description:
252 * Return Value: On success, 0 is returned. On error, one of the following
253 * negative error codes is returned.
255 * %-EIO - I/O error.
257 * %-ENOMEM - Insufficient amount of memory available.
259 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
261 struct nilfs_palloc_req req;
262 int ret;
264 req.pr_entry_nr = vblocknr;
265 ret = nilfs_dat_prepare_entry(dat, &req, 0);
266 if (ret == 0)
267 nilfs_dat_commit_entry(dat, &req);
268 return ret;
272 * nilfs_dat_freev - free virtual block numbers
273 * @dat: DAT file inode
274 * @vblocknrs: array of virtual block numbers
275 * @nitems: number of virtual block numbers
277 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
278 * @vblocknrs and @nitems.
280 * Return Value: On success, 0 is returned. On error, one of the following
281 * nagative error codes is returned.
283 * %-EIO - I/O error.
285 * %-ENOMEM - Insufficient amount of memory available.
287 * %-ENOENT - The virtual block number have not been allocated.
289 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
291 return nilfs_palloc_freev(dat, vblocknrs, nitems);
295 * nilfs_dat_move - change a block number
296 * @dat: DAT file inode
297 * @vblocknr: virtual block number
298 * @blocknr: block number
300 * Description: nilfs_dat_move() changes the block number associated with
301 * @vblocknr to @blocknr.
303 * Return Value: On success, 0 is returned. On error, one of the following
304 * negative error codes is returned.
306 * %-EIO - I/O error.
308 * %-ENOMEM - Insufficient amount of memory available.
310 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
312 struct buffer_head *entry_bh;
313 struct nilfs_dat_entry *entry;
314 void *kaddr;
315 int ret;
317 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
318 if (ret < 0)
319 return ret;
320 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
321 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
322 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
323 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
324 (unsigned long long)vblocknr,
325 (unsigned long long)le64_to_cpu(entry->de_start),
326 (unsigned long long)le64_to_cpu(entry->de_end));
327 kunmap_atomic(kaddr, KM_USER0);
328 brelse(entry_bh);
329 return -EINVAL;
331 WARN_ON(blocknr == 0);
332 entry->de_blocknr = cpu_to_le64(blocknr);
333 kunmap_atomic(kaddr, KM_USER0);
335 nilfs_mdt_mark_buffer_dirty(entry_bh);
336 nilfs_mdt_mark_dirty(dat);
338 brelse(entry_bh);
340 return 0;
344 * nilfs_dat_translate - translate a virtual block number to a block number
345 * @dat: DAT file inode
346 * @vblocknr: virtual block number
347 * @blocknrp: pointer to a block number
349 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
350 * to the corresponding block number.
352 * Return Value: On success, 0 is returned and the block number associated
353 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
354 * of the following negative error codes is returned.
356 * %-EIO - I/O error.
358 * %-ENOMEM - Insufficient amount of memory available.
360 * %-ENOENT - A block number associated with @vblocknr does not exist.
362 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
364 struct buffer_head *entry_bh;
365 struct nilfs_dat_entry *entry;
366 sector_t blocknr;
367 void *kaddr;
368 int ret;
370 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
371 if (ret < 0)
372 return ret;
374 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
375 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
376 blocknr = le64_to_cpu(entry->de_blocknr);
377 if (blocknr == 0) {
378 ret = -ENOENT;
379 goto out;
381 if (blocknrp != NULL)
382 *blocknrp = blocknr;
384 out:
385 kunmap_atomic(kaddr, KM_USER0);
386 brelse(entry_bh);
387 return ret;
390 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
391 size_t nvi)
393 struct buffer_head *entry_bh;
394 struct nilfs_dat_entry *entry;
395 struct nilfs_vinfo *vinfo = buf;
396 __u64 first, last;
397 void *kaddr;
398 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
399 int i, j, n, ret;
401 for (i = 0; i < nvi; i += n) {
402 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
403 0, &entry_bh);
404 if (ret < 0)
405 return ret;
406 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
407 /* last virtual block number in this block */
408 first = vinfo->vi_vblocknr;
409 do_div(first, entries_per_block);
410 first *= entries_per_block;
411 last = first + entries_per_block - 1;
412 for (j = i, n = 0;
413 j < nvi && vinfo->vi_vblocknr >= first &&
414 vinfo->vi_vblocknr <= last;
415 j++, n++, vinfo = (void *)vinfo + visz) {
416 entry = nilfs_palloc_block_get_entry(
417 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
418 vinfo->vi_start = le64_to_cpu(entry->de_start);
419 vinfo->vi_end = le64_to_cpu(entry->de_end);
420 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
422 kunmap_atomic(kaddr, KM_USER0);
423 brelse(entry_bh);
426 return nvi;