x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / drivers / nvdimm / btt.c
blobb2feda35966b124652c662f65392231ca464875c
1 /*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
24 #include <linux/fs.h>
25 #include <linux/nd.h>
26 #include "btt.h"
27 #include "nd.h"
29 enum log_ent_request {
30 LOG_NEW_ENT = 0,
31 LOG_OLD_ENT
34 static struct device *to_dev(struct arena_info *arena)
36 return &arena->nd_btt->dev;
39 static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41 return offset + nd_btt->initial_offset;
44 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
45 void *buf, size_t n, unsigned long flags)
47 struct nd_btt *nd_btt = arena->nd_btt;
48 struct nd_namespace_common *ndns = nd_btt->ndns;
50 /* arena offsets may be shifted from the base of the device */
51 offset = adjust_initial_offset(nd_btt, offset);
52 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
55 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
56 void *buf, size_t n, unsigned long flags)
58 struct nd_btt *nd_btt = arena->nd_btt;
59 struct nd_namespace_common *ndns = nd_btt->ndns;
61 /* arena offsets may be shifted from the base of the device */
62 offset = adjust_initial_offset(nd_btt, offset);
63 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
66 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68 int ret;
71 * infooff and info2off should always be at least 512B aligned.
72 * We rely on that to make sure rw_bytes does error clearing
73 * correctly, so make sure that is the case.
75 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
76 "arena->infooff: %#llx is unaligned\n", arena->infooff);
77 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
78 "arena->info2off: %#llx is unaligned\n", arena->info2off);
80 ret = arena_write_bytes(arena, arena->info2off, super,
81 sizeof(struct btt_sb), 0);
82 if (ret)
83 return ret;
85 return arena_write_bytes(arena, arena->infooff, super,
86 sizeof(struct btt_sb), 0);
89 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91 return arena_read_bytes(arena, arena->infooff, super,
92 sizeof(struct btt_sb), 0);
96 * 'raw' version of btt_map write
97 * Assumptions:
98 * mapping is in little-endian
99 * mapping contains 'E' and 'Z' flags as desired
101 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
102 unsigned long flags)
104 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106 if (unlikely(lba >= arena->external_nlba))
107 dev_err_ratelimited(to_dev(arena),
108 "%s: lba %#x out of range (max: %#x)\n",
109 __func__, lba, arena->external_nlba);
110 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
113 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
114 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
116 u32 ze;
117 __le32 mapping_le;
120 * This 'mapping' is supposed to be just the LBA mapping, without
121 * any flags set, so strip the flag bits.
123 mapping = ent_lba(mapping);
125 ze = (z_flag << 1) + e_flag;
126 switch (ze) {
127 case 0:
129 * We want to set neither of the Z or E flags, and
130 * in the actual layout, this means setting the bit
131 * positions of both to '1' to indicate a 'normal'
132 * map entry
134 mapping |= MAP_ENT_NORMAL;
135 break;
136 case 1:
137 mapping |= (1 << MAP_ERR_SHIFT);
138 break;
139 case 2:
140 mapping |= (1 << MAP_TRIM_SHIFT);
141 break;
142 default:
144 * The case where Z and E are both sent in as '1' could be
145 * construed as a valid 'normal' case, but we decide not to,
146 * to avoid confusion
148 dev_err_ratelimited(to_dev(arena),
149 "Invalid use of Z and E flags\n");
150 return -EIO;
153 mapping_le = cpu_to_le32(mapping);
154 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
157 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
158 int *trim, int *error, unsigned long rwb_flags)
160 int ret;
161 __le32 in;
162 u32 raw_mapping, postmap, ze, z_flag, e_flag;
163 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165 if (unlikely(lba >= arena->external_nlba))
166 dev_err_ratelimited(to_dev(arena),
167 "%s: lba %#x out of range (max: %#x)\n",
168 __func__, lba, arena->external_nlba);
170 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
171 if (ret)
172 return ret;
174 raw_mapping = le32_to_cpu(in);
176 z_flag = ent_z_flag(raw_mapping);
177 e_flag = ent_e_flag(raw_mapping);
178 ze = (z_flag << 1) + e_flag;
179 postmap = ent_lba(raw_mapping);
181 /* Reuse the {z,e}_flag variables for *trim and *error */
182 z_flag = 0;
183 e_flag = 0;
185 switch (ze) {
186 case 0:
187 /* Initial state. Return postmap = premap */
188 *mapping = lba;
189 break;
190 case 1:
191 *mapping = postmap;
192 e_flag = 1;
193 break;
194 case 2:
195 *mapping = postmap;
196 z_flag = 1;
197 break;
198 case 3:
199 *mapping = postmap;
200 break;
201 default:
202 return -EIO;
205 if (trim)
206 *trim = z_flag;
207 if (error)
208 *error = e_flag;
210 return ret;
213 static int btt_log_group_read(struct arena_info *arena, u32 lane,
214 struct log_group *log)
216 return arena_read_bytes(arena,
217 arena->logoff + (lane * LOG_GRP_SIZE), log,
218 LOG_GRP_SIZE, 0);
221 static struct dentry *debugfs_root;
223 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
224 int idx)
226 char dirname[32];
227 struct dentry *d;
229 /* If for some reason, parent bttN was not created, exit */
230 if (!parent)
231 return;
233 snprintf(dirname, 32, "arena%d", idx);
234 d = debugfs_create_dir(dirname, parent);
235 if (IS_ERR_OR_NULL(d))
236 return;
237 a->debugfs_dir = d;
239 debugfs_create_x64("size", S_IRUGO, d, &a->size);
240 debugfs_create_x64("external_lba_start", S_IRUGO, d,
241 &a->external_lba_start);
242 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
243 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
244 &a->internal_lbasize);
245 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
246 debugfs_create_u32("external_lbasize", S_IRUGO, d,
247 &a->external_lbasize);
248 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
249 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
250 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
251 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
252 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
253 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
254 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
255 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
256 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
257 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
258 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
259 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
262 static void btt_debugfs_init(struct btt *btt)
264 int i = 0;
265 struct arena_info *arena;
267 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
268 debugfs_root);
269 if (IS_ERR_OR_NULL(btt->debugfs_dir))
270 return;
272 list_for_each_entry(arena, &btt->arena_list, list) {
273 arena_debugfs_init(arena, btt->debugfs_dir, i);
274 i++;
278 static u32 log_seq(struct log_group *log, int log_idx)
280 return le32_to_cpu(log->ent[log_idx].seq);
284 * This function accepts two log entries, and uses the
285 * sequence number to find the 'older' entry.
286 * It also updates the sequence number in this old entry to
287 * make it the 'new' one if the mark_flag is set.
288 * Finally, it returns which of the entries was the older one.
290 * TODO The logic feels a bit kludge-y. make it better..
292 static int btt_log_get_old(struct arena_info *a, struct log_group *log)
294 int idx0 = a->log_index[0];
295 int idx1 = a->log_index[1];
296 int old;
299 * the first ever time this is seen, the entry goes into [0]
300 * the next time, the following logic works out to put this
301 * (next) entry into [1]
303 if (log_seq(log, idx0) == 0) {
304 log->ent[idx0].seq = cpu_to_le32(1);
305 return 0;
308 if (log_seq(log, idx0) == log_seq(log, idx1))
309 return -EINVAL;
310 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
311 return -EINVAL;
313 if (log_seq(log, idx0) < log_seq(log, idx1)) {
314 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
315 old = 0;
316 else
317 old = 1;
318 } else {
319 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
320 old = 1;
321 else
322 old = 0;
325 return old;
329 * This function copies the desired (old/new) log entry into ent if
330 * it is not NULL. It returns the sub-slot number (0 or 1)
331 * where the desired log entry was found. Negative return values
332 * indicate errors.
334 static int btt_log_read(struct arena_info *arena, u32 lane,
335 struct log_entry *ent, int old_flag)
337 int ret;
338 int old_ent, ret_ent;
339 struct log_group log;
341 ret = btt_log_group_read(arena, lane, &log);
342 if (ret)
343 return -EIO;
345 old_ent = btt_log_get_old(arena, &log);
346 if (old_ent < 0 || old_ent > 1) {
347 dev_err(to_dev(arena),
348 "log corruption (%d): lane %d seq [%d, %d]\n",
349 old_ent, lane, log.ent[arena->log_index[0]].seq,
350 log.ent[arena->log_index[1]].seq);
351 /* TODO set error state? */
352 return -EIO;
355 ret_ent = (old_flag ? old_ent : (1 - old_ent));
357 if (ent != NULL)
358 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
360 return ret_ent;
364 * This function commits a log entry to media
365 * It does _not_ prepare the freelist entry for the next write
366 * btt_flog_write is the wrapper for updating the freelist elements
368 static int __btt_log_write(struct arena_info *arena, u32 lane,
369 u32 sub, struct log_entry *ent, unsigned long flags)
371 int ret;
372 u32 group_slot = arena->log_index[sub];
373 unsigned int log_half = LOG_ENT_SIZE / 2;
374 void *src = ent;
375 u64 ns_off;
377 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
378 (group_slot * LOG_ENT_SIZE);
379 /* split the 16B write into atomic, durable halves */
380 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
381 if (ret)
382 return ret;
384 ns_off += log_half;
385 src += log_half;
386 return arena_write_bytes(arena, ns_off, src, log_half, flags);
389 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
390 struct log_entry *ent)
392 int ret;
394 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
395 if (ret)
396 return ret;
398 /* prepare the next free entry */
399 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
400 if (++(arena->freelist[lane].seq) == 4)
401 arena->freelist[lane].seq = 1;
402 if (ent_e_flag(ent->old_map))
403 arena->freelist[lane].has_err = 1;
404 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
406 return ret;
410 * This function initializes the BTT map to the initial state, which is
411 * all-zeroes, and indicates an identity mapping
413 static int btt_map_init(struct arena_info *arena)
415 int ret = -EINVAL;
416 void *zerobuf;
417 size_t offset = 0;
418 size_t chunk_size = SZ_2M;
419 size_t mapsize = arena->logoff - arena->mapoff;
421 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
422 if (!zerobuf)
423 return -ENOMEM;
426 * mapoff should always be at least 512B aligned. We rely on that to
427 * make sure rw_bytes does error clearing correctly, so make sure that
428 * is the case.
430 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
431 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
433 while (mapsize) {
434 size_t size = min(mapsize, chunk_size);
436 dev_WARN_ONCE(to_dev(arena), size < 512,
437 "chunk size: %#zx is unaligned\n", size);
438 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
439 size, 0);
440 if (ret)
441 goto free;
443 offset += size;
444 mapsize -= size;
445 cond_resched();
448 free:
449 kfree(zerobuf);
450 return ret;
454 * This function initializes the BTT log with 'fake' entries pointing
455 * to the initial reserved set of blocks as being free
457 static int btt_log_init(struct arena_info *arena)
459 size_t logsize = arena->info2off - arena->logoff;
460 size_t chunk_size = SZ_4K, offset = 0;
461 struct log_entry ent;
462 void *zerobuf;
463 int ret;
464 u32 i;
466 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
467 if (!zerobuf)
468 return -ENOMEM;
470 * logoff should always be at least 512B aligned. We rely on that to
471 * make sure rw_bytes does error clearing correctly, so make sure that
472 * is the case.
474 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
475 "arena->logoff: %#llx is unaligned\n", arena->logoff);
477 while (logsize) {
478 size_t size = min(logsize, chunk_size);
480 dev_WARN_ONCE(to_dev(arena), size < 512,
481 "chunk size: %#zx is unaligned\n", size);
482 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
483 size, 0);
484 if (ret)
485 goto free;
487 offset += size;
488 logsize -= size;
489 cond_resched();
492 for (i = 0; i < arena->nfree; i++) {
493 ent.lba = cpu_to_le32(i);
494 ent.old_map = cpu_to_le32(arena->external_nlba + i);
495 ent.new_map = cpu_to_le32(arena->external_nlba + i);
496 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
497 ret = __btt_log_write(arena, i, 0, &ent, 0);
498 if (ret)
499 goto free;
502 free:
503 kfree(zerobuf);
504 return ret;
507 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
509 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
512 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
514 int ret = 0;
516 if (arena->freelist[lane].has_err) {
517 void *zero_page = page_address(ZERO_PAGE(0));
518 u32 lba = arena->freelist[lane].block;
519 u64 nsoff = to_namespace_offset(arena, lba);
520 unsigned long len = arena->sector_size;
522 mutex_lock(&arena->err_lock);
524 while (len) {
525 unsigned long chunk = min(len, PAGE_SIZE);
527 ret = arena_write_bytes(arena, nsoff, zero_page,
528 chunk, 0);
529 if (ret)
530 break;
531 len -= chunk;
532 nsoff += chunk;
533 if (len == 0)
534 arena->freelist[lane].has_err = 0;
536 mutex_unlock(&arena->err_lock);
538 return ret;
541 static int btt_freelist_init(struct arena_info *arena)
543 int old, new, ret;
544 u32 i, map_entry;
545 struct log_entry log_new, log_old;
547 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
548 GFP_KERNEL);
549 if (!arena->freelist)
550 return -ENOMEM;
552 for (i = 0; i < arena->nfree; i++) {
553 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
554 if (old < 0)
555 return old;
557 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
558 if (new < 0)
559 return new;
561 /* sub points to the next one to be overwritten */
562 arena->freelist[i].sub = 1 - new;
563 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
564 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
567 * FIXME: if error clearing fails during init, we want to make
568 * the BTT read-only
570 if (ent_e_flag(log_new.old_map)) {
571 ret = arena_clear_freelist_error(arena, i);
572 if (ret)
573 dev_err_ratelimited(to_dev(arena),
574 "Unable to clear known errors\n");
577 /* This implies a newly created or untouched flog entry */
578 if (log_new.old_map == log_new.new_map)
579 continue;
581 /* Check if map recovery is needed */
582 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
583 NULL, NULL, 0);
584 if (ret)
585 return ret;
586 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
587 (le32_to_cpu(log_new.old_map) == map_entry)) {
589 * Last transaction wrote the flog, but wasn't able
590 * to complete the map write. So fix up the map.
592 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
593 le32_to_cpu(log_new.new_map), 0, 0, 0);
594 if (ret)
595 return ret;
599 return 0;
602 static bool ent_is_padding(struct log_entry *ent)
604 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
605 && (ent->seq == 0);
609 * Detecting valid log indices: We read a log group (see the comments in btt.h
610 * for a description of a 'log_group' and its 'slots'), and iterate over its
611 * four slots. We expect that a padding slot will be all-zeroes, and use this
612 * to detect a padding slot vs. an actual entry.
614 * If a log_group is in the initial state, i.e. hasn't been used since the
615 * creation of this BTT layout, it will have three of the four slots with
616 * zeroes. We skip over these log_groups for the detection of log_index. If
617 * all log_groups are in the initial state (i.e. the BTT has never been
618 * written to), it is safe to assume the 'new format' of log entries in slots
619 * (0, 1).
621 static int log_set_indices(struct arena_info *arena)
623 bool idx_set = false, initial_state = true;
624 int ret, log_index[2] = {-1, -1};
625 u32 i, j, next_idx = 0;
626 struct log_group log;
627 u32 pad_count = 0;
629 for (i = 0; i < arena->nfree; i++) {
630 ret = btt_log_group_read(arena, i, &log);
631 if (ret < 0)
632 return ret;
634 for (j = 0; j < 4; j++) {
635 if (!idx_set) {
636 if (ent_is_padding(&log.ent[j])) {
637 pad_count++;
638 continue;
639 } else {
640 /* Skip if index has been recorded */
641 if ((next_idx == 1) &&
642 (j == log_index[0]))
643 continue;
644 /* valid entry, record index */
645 log_index[next_idx] = j;
646 next_idx++;
648 if (next_idx == 2) {
649 /* two valid entries found */
650 idx_set = true;
651 } else if (next_idx > 2) {
652 /* too many valid indices */
653 return -ENXIO;
655 } else {
657 * once the indices have been set, just verify
658 * that all subsequent log groups are either in
659 * their initial state or follow the same
660 * indices.
662 if (j == log_index[0]) {
663 /* entry must be 'valid' */
664 if (ent_is_padding(&log.ent[j]))
665 return -ENXIO;
666 } else if (j == log_index[1]) {
669 * log_index[1] can be padding if the
670 * lane never got used and it is still
671 * in the initial state (three 'padding'
672 * entries)
674 } else {
675 /* entry must be invalid (padding) */
676 if (!ent_is_padding(&log.ent[j]))
677 return -ENXIO;
682 * If any of the log_groups have more than one valid,
683 * non-padding entry, then the we are no longer in the
684 * initial_state
686 if (pad_count < 3)
687 initial_state = false;
688 pad_count = 0;
691 if (!initial_state && !idx_set)
692 return -ENXIO;
695 * If all the entries in the log were in the initial state,
696 * assume new padding scheme
698 if (initial_state)
699 log_index[1] = 1;
702 * Only allow the known permutations of log/padding indices,
703 * i.e. (0, 1), and (0, 2)
705 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
706 ; /* known index possibilities */
707 else {
708 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
709 return -ENXIO;
712 arena->log_index[0] = log_index[0];
713 arena->log_index[1] = log_index[1];
714 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
715 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
716 return 0;
719 static int btt_rtt_init(struct arena_info *arena)
721 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
722 if (arena->rtt == NULL)
723 return -ENOMEM;
725 return 0;
728 static int btt_maplocks_init(struct arena_info *arena)
730 u32 i;
732 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
733 GFP_KERNEL);
734 if (!arena->map_locks)
735 return -ENOMEM;
737 for (i = 0; i < arena->nfree; i++)
738 spin_lock_init(&arena->map_locks[i].lock);
740 return 0;
743 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
744 size_t start, size_t arena_off)
746 struct arena_info *arena;
747 u64 logsize, mapsize, datasize;
748 u64 available = size;
750 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
751 if (!arena)
752 return NULL;
753 arena->nd_btt = btt->nd_btt;
754 arena->sector_size = btt->sector_size;
756 if (!size)
757 return arena;
759 arena->size = size;
760 arena->external_lba_start = start;
761 arena->external_lbasize = btt->lbasize;
762 arena->internal_lbasize = roundup(arena->external_lbasize,
763 INT_LBASIZE_ALIGNMENT);
764 arena->nfree = BTT_DEFAULT_NFREE;
765 arena->version_major = btt->nd_btt->version_major;
766 arena->version_minor = btt->nd_btt->version_minor;
768 if (available % BTT_PG_SIZE)
769 available -= (available % BTT_PG_SIZE);
771 /* Two pages are reserved for the super block and its copy */
772 available -= 2 * BTT_PG_SIZE;
774 /* The log takes a fixed amount of space based on nfree */
775 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
776 available -= logsize;
778 /* Calculate optimal split between map and data area */
779 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
780 arena->internal_lbasize + MAP_ENT_SIZE);
781 arena->external_nlba = arena->internal_nlba - arena->nfree;
783 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
784 datasize = available - mapsize;
786 /* 'Absolute' values, relative to start of storage space */
787 arena->infooff = arena_off;
788 arena->dataoff = arena->infooff + BTT_PG_SIZE;
789 arena->mapoff = arena->dataoff + datasize;
790 arena->logoff = arena->mapoff + mapsize;
791 arena->info2off = arena->logoff + logsize;
793 /* Default log indices are (0,1) */
794 arena->log_index[0] = 0;
795 arena->log_index[1] = 1;
796 return arena;
799 static void free_arenas(struct btt *btt)
801 struct arena_info *arena, *next;
803 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
804 list_del(&arena->list);
805 kfree(arena->rtt);
806 kfree(arena->map_locks);
807 kfree(arena->freelist);
808 debugfs_remove_recursive(arena->debugfs_dir);
809 kfree(arena);
814 * This function reads an existing valid btt superblock and
815 * populates the corresponding arena_info struct
817 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
818 u64 arena_off)
820 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
821 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
822 arena->external_nlba = le32_to_cpu(super->external_nlba);
823 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
824 arena->nfree = le32_to_cpu(super->nfree);
825 arena->version_major = le16_to_cpu(super->version_major);
826 arena->version_minor = le16_to_cpu(super->version_minor);
828 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
829 le64_to_cpu(super->nextoff));
830 arena->infooff = arena_off;
831 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
832 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
833 arena->logoff = arena_off + le64_to_cpu(super->logoff);
834 arena->info2off = arena_off + le64_to_cpu(super->info2off);
836 arena->size = (le64_to_cpu(super->nextoff) > 0)
837 ? (le64_to_cpu(super->nextoff))
838 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
840 arena->flags = le32_to_cpu(super->flags);
843 static int discover_arenas(struct btt *btt)
845 int ret = 0;
846 struct arena_info *arena;
847 struct btt_sb *super;
848 size_t remaining = btt->rawsize;
849 u64 cur_nlba = 0;
850 size_t cur_off = 0;
851 int num_arenas = 0;
853 super = kzalloc(sizeof(*super), GFP_KERNEL);
854 if (!super)
855 return -ENOMEM;
857 while (remaining) {
858 /* Alloc memory for arena */
859 arena = alloc_arena(btt, 0, 0, 0);
860 if (!arena) {
861 ret = -ENOMEM;
862 goto out_super;
865 arena->infooff = cur_off;
866 ret = btt_info_read(arena, super);
867 if (ret)
868 goto out;
870 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
871 if (remaining == btt->rawsize) {
872 btt->init_state = INIT_NOTFOUND;
873 dev_info(to_dev(arena), "No existing arenas\n");
874 goto out;
875 } else {
876 dev_err(to_dev(arena),
877 "Found corrupted metadata!\n");
878 ret = -ENODEV;
879 goto out;
883 arena->external_lba_start = cur_nlba;
884 parse_arena_meta(arena, super, cur_off);
886 ret = log_set_indices(arena);
887 if (ret) {
888 dev_err(to_dev(arena),
889 "Unable to deduce log/padding indices\n");
890 goto out;
893 mutex_init(&arena->err_lock);
894 ret = btt_freelist_init(arena);
895 if (ret)
896 goto out;
898 ret = btt_rtt_init(arena);
899 if (ret)
900 goto out;
902 ret = btt_maplocks_init(arena);
903 if (ret)
904 goto out;
906 list_add_tail(&arena->list, &btt->arena_list);
908 remaining -= arena->size;
909 cur_off += arena->size;
910 cur_nlba += arena->external_nlba;
911 num_arenas++;
913 if (arena->nextoff == 0)
914 break;
916 btt->num_arenas = num_arenas;
917 btt->nlba = cur_nlba;
918 btt->init_state = INIT_READY;
920 kfree(super);
921 return ret;
923 out:
924 kfree(arena);
925 free_arenas(btt);
926 out_super:
927 kfree(super);
928 return ret;
931 static int create_arenas(struct btt *btt)
933 size_t remaining = btt->rawsize;
934 size_t cur_off = 0;
936 while (remaining) {
937 struct arena_info *arena;
938 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
940 remaining -= arena_size;
941 if (arena_size < ARENA_MIN_SIZE)
942 break;
944 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
945 if (!arena) {
946 free_arenas(btt);
947 return -ENOMEM;
949 btt->nlba += arena->external_nlba;
950 if (remaining >= ARENA_MIN_SIZE)
951 arena->nextoff = arena->size;
952 else
953 arena->nextoff = 0;
954 cur_off += arena_size;
955 list_add_tail(&arena->list, &btt->arena_list);
958 return 0;
962 * This function completes arena initialization by writing
963 * all the metadata.
964 * It is only called for an uninitialized arena when a write
965 * to that arena occurs for the first time.
967 static int btt_arena_write_layout(struct arena_info *arena)
969 int ret;
970 u64 sum;
971 struct btt_sb *super;
972 struct nd_btt *nd_btt = arena->nd_btt;
973 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
975 ret = btt_map_init(arena);
976 if (ret)
977 return ret;
979 ret = btt_log_init(arena);
980 if (ret)
981 return ret;
983 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
984 if (!super)
985 return -ENOMEM;
987 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
988 memcpy(super->uuid, nd_btt->uuid, 16);
989 memcpy(super->parent_uuid, parent_uuid, 16);
990 super->flags = cpu_to_le32(arena->flags);
991 super->version_major = cpu_to_le16(arena->version_major);
992 super->version_minor = cpu_to_le16(arena->version_minor);
993 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
994 super->external_nlba = cpu_to_le32(arena->external_nlba);
995 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
996 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
997 super->nfree = cpu_to_le32(arena->nfree);
998 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
999 super->nextoff = cpu_to_le64(arena->nextoff);
1001 * Subtract arena->infooff (arena start) so numbers are relative
1002 * to 'this' arena
1004 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1005 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1006 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1007 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1009 super->flags = 0;
1010 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1011 super->checksum = cpu_to_le64(sum);
1013 ret = btt_info_write(arena, super);
1015 kfree(super);
1016 return ret;
1020 * This function completes the initialization for the BTT namespace
1021 * such that it is ready to accept IOs
1023 static int btt_meta_init(struct btt *btt)
1025 int ret = 0;
1026 struct arena_info *arena;
1028 mutex_lock(&btt->init_lock);
1029 list_for_each_entry(arena, &btt->arena_list, list) {
1030 ret = btt_arena_write_layout(arena);
1031 if (ret)
1032 goto unlock;
1034 ret = btt_freelist_init(arena);
1035 if (ret)
1036 goto unlock;
1038 ret = btt_rtt_init(arena);
1039 if (ret)
1040 goto unlock;
1042 ret = btt_maplocks_init(arena);
1043 if (ret)
1044 goto unlock;
1047 btt->init_state = INIT_READY;
1049 unlock:
1050 mutex_unlock(&btt->init_lock);
1051 return ret;
1054 static u32 btt_meta_size(struct btt *btt)
1056 return btt->lbasize - btt->sector_size;
1060 * This function calculates the arena in which the given LBA lies
1061 * by doing a linear walk. This is acceptable since we expect only
1062 * a few arenas. If we have backing devices that get much larger,
1063 * we can construct a balanced binary tree of arenas at init time
1064 * so that this range search becomes faster.
1066 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1067 struct arena_info **arena)
1069 struct arena_info *arena_list;
1070 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1072 list_for_each_entry(arena_list, &btt->arena_list, list) {
1073 if (lba < arena_list->external_nlba) {
1074 *arena = arena_list;
1075 *premap = lba;
1076 return 0;
1078 lba -= arena_list->external_nlba;
1081 return -EIO;
1085 * The following (lock_map, unlock_map) are mostly just to improve
1086 * readability, since they index into an array of locks
1088 static void lock_map(struct arena_info *arena, u32 premap)
1089 __acquires(&arena->map_locks[idx].lock)
1091 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1093 spin_lock(&arena->map_locks[idx].lock);
1096 static void unlock_map(struct arena_info *arena, u32 premap)
1097 __releases(&arena->map_locks[idx].lock)
1099 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1101 spin_unlock(&arena->map_locks[idx].lock);
1104 static int btt_data_read(struct arena_info *arena, struct page *page,
1105 unsigned int off, u32 lba, u32 len)
1107 int ret;
1108 u64 nsoff = to_namespace_offset(arena, lba);
1109 void *mem = kmap_atomic(page);
1111 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1112 kunmap_atomic(mem);
1114 return ret;
1117 static int btt_data_write(struct arena_info *arena, u32 lba,
1118 struct page *page, unsigned int off, u32 len)
1120 int ret;
1121 u64 nsoff = to_namespace_offset(arena, lba);
1122 void *mem = kmap_atomic(page);
1124 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1125 kunmap_atomic(mem);
1127 return ret;
1130 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1132 void *mem = kmap_atomic(page);
1134 memset(mem + off, 0, len);
1135 kunmap_atomic(mem);
1138 #ifdef CONFIG_BLK_DEV_INTEGRITY
1139 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1140 struct arena_info *arena, u32 postmap, int rw)
1142 unsigned int len = btt_meta_size(btt);
1143 u64 meta_nsoff;
1144 int ret = 0;
1146 if (bip == NULL)
1147 return 0;
1149 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1151 while (len) {
1152 unsigned int cur_len;
1153 struct bio_vec bv;
1154 void *mem;
1156 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1158 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1159 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1160 * can use those directly
1163 cur_len = min(len, bv.bv_len);
1164 mem = kmap_atomic(bv.bv_page);
1165 if (rw)
1166 ret = arena_write_bytes(arena, meta_nsoff,
1167 mem + bv.bv_offset, cur_len,
1168 NVDIMM_IO_ATOMIC);
1169 else
1170 ret = arena_read_bytes(arena, meta_nsoff,
1171 mem + bv.bv_offset, cur_len,
1172 NVDIMM_IO_ATOMIC);
1174 kunmap_atomic(mem);
1175 if (ret)
1176 return ret;
1178 len -= cur_len;
1179 meta_nsoff += cur_len;
1180 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1181 return -EIO;
1184 return ret;
1187 #else /* CONFIG_BLK_DEV_INTEGRITY */
1188 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1189 struct arena_info *arena, u32 postmap, int rw)
1191 return 0;
1193 #endif
1195 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1196 struct page *page, unsigned int off, sector_t sector,
1197 unsigned int len)
1199 int ret = 0;
1200 int t_flag, e_flag;
1201 struct arena_info *arena = NULL;
1202 u32 lane = 0, premap, postmap;
1204 while (len) {
1205 u32 cur_len;
1207 lane = nd_region_acquire_lane(btt->nd_region);
1209 ret = lba_to_arena(btt, sector, &premap, &arena);
1210 if (ret)
1211 goto out_lane;
1213 cur_len = min(btt->sector_size, len);
1215 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1216 NVDIMM_IO_ATOMIC);
1217 if (ret)
1218 goto out_lane;
1221 * We loop to make sure that the post map LBA didn't change
1222 * from under us between writing the RTT and doing the actual
1223 * read.
1225 while (1) {
1226 u32 new_map;
1227 int new_t, new_e;
1229 if (t_flag) {
1230 zero_fill_data(page, off, cur_len);
1231 goto out_lane;
1234 if (e_flag) {
1235 ret = -EIO;
1236 goto out_lane;
1239 arena->rtt[lane] = RTT_VALID | postmap;
1241 * Barrier to make sure this write is not reordered
1242 * to do the verification map_read before the RTT store
1244 barrier();
1246 ret = btt_map_read(arena, premap, &new_map, &new_t,
1247 &new_e, NVDIMM_IO_ATOMIC);
1248 if (ret)
1249 goto out_rtt;
1251 if ((postmap == new_map) && (t_flag == new_t) &&
1252 (e_flag == new_e))
1253 break;
1255 postmap = new_map;
1256 t_flag = new_t;
1257 e_flag = new_e;
1260 ret = btt_data_read(arena, page, off, postmap, cur_len);
1261 if (ret) {
1262 int rc;
1264 /* Media error - set the e_flag */
1265 rc = btt_map_write(arena, premap, postmap, 0, 1,
1266 NVDIMM_IO_ATOMIC);
1267 goto out_rtt;
1270 if (bip) {
1271 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1272 if (ret)
1273 goto out_rtt;
1276 arena->rtt[lane] = RTT_INVALID;
1277 nd_region_release_lane(btt->nd_region, lane);
1279 len -= cur_len;
1280 off += cur_len;
1281 sector += btt->sector_size >> SECTOR_SHIFT;
1284 return 0;
1286 out_rtt:
1287 arena->rtt[lane] = RTT_INVALID;
1288 out_lane:
1289 nd_region_release_lane(btt->nd_region, lane);
1290 return ret;
1294 * Normally, arena_{read,write}_bytes will take care of the initial offset
1295 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1296 * we need the final, raw namespace offset here
1298 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1299 u32 postmap)
1301 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1302 to_namespace_offset(arena, postmap));
1303 sector_t phys_sector = nsoff >> 9;
1305 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1308 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1309 sector_t sector, struct page *page, unsigned int off,
1310 unsigned int len)
1312 int ret = 0;
1313 struct arena_info *arena = NULL;
1314 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1315 struct log_entry log;
1316 int sub;
1318 while (len) {
1319 u32 cur_len;
1320 int e_flag;
1322 retry:
1323 lane = nd_region_acquire_lane(btt->nd_region);
1325 ret = lba_to_arena(btt, sector, &premap, &arena);
1326 if (ret)
1327 goto out_lane;
1328 cur_len = min(btt->sector_size, len);
1330 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1331 ret = -EIO;
1332 goto out_lane;
1335 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1336 arena->freelist[lane].has_err = 1;
1338 if (mutex_is_locked(&arena->err_lock)
1339 || arena->freelist[lane].has_err) {
1340 nd_region_release_lane(btt->nd_region, lane);
1342 ret = arena_clear_freelist_error(arena, lane);
1343 if (ret)
1344 return ret;
1346 /* OK to acquire a different lane/free block */
1347 goto retry;
1350 new_postmap = arena->freelist[lane].block;
1352 /* Wait if the new block is being read from */
1353 for (i = 0; i < arena->nfree; i++)
1354 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1355 cpu_relax();
1358 if (new_postmap >= arena->internal_nlba) {
1359 ret = -EIO;
1360 goto out_lane;
1363 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1364 if (ret)
1365 goto out_lane;
1367 if (bip) {
1368 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1369 WRITE);
1370 if (ret)
1371 goto out_lane;
1374 lock_map(arena, premap);
1375 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1376 NVDIMM_IO_ATOMIC);
1377 if (ret)
1378 goto out_map;
1379 if (old_postmap >= arena->internal_nlba) {
1380 ret = -EIO;
1381 goto out_map;
1383 if (e_flag)
1384 set_e_flag(old_postmap);
1386 log.lba = cpu_to_le32(premap);
1387 log.old_map = cpu_to_le32(old_postmap);
1388 log.new_map = cpu_to_le32(new_postmap);
1389 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1390 sub = arena->freelist[lane].sub;
1391 ret = btt_flog_write(arena, lane, sub, &log);
1392 if (ret)
1393 goto out_map;
1395 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1396 NVDIMM_IO_ATOMIC);
1397 if (ret)
1398 goto out_map;
1400 unlock_map(arena, premap);
1401 nd_region_release_lane(btt->nd_region, lane);
1403 if (e_flag) {
1404 ret = arena_clear_freelist_error(arena, lane);
1405 if (ret)
1406 return ret;
1409 len -= cur_len;
1410 off += cur_len;
1411 sector += btt->sector_size >> SECTOR_SHIFT;
1414 return 0;
1416 out_map:
1417 unlock_map(arena, premap);
1418 out_lane:
1419 nd_region_release_lane(btt->nd_region, lane);
1420 return ret;
1423 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1424 struct page *page, unsigned int len, unsigned int off,
1425 bool is_write, sector_t sector)
1427 int ret;
1429 if (!is_write) {
1430 ret = btt_read_pg(btt, bip, page, off, sector, len);
1431 flush_dcache_page(page);
1432 } else {
1433 flush_dcache_page(page);
1434 ret = btt_write_pg(btt, bip, sector, page, off, len);
1437 return ret;
1440 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1442 struct bio_integrity_payload *bip = bio_integrity(bio);
1443 struct btt *btt = q->queuedata;
1444 struct bvec_iter iter;
1445 unsigned long start;
1446 struct bio_vec bvec;
1447 int err = 0;
1448 bool do_acct;
1450 if (!bio_integrity_prep(bio))
1451 return BLK_QC_T_NONE;
1453 do_acct = nd_iostat_start(bio, &start);
1454 bio_for_each_segment(bvec, bio, iter) {
1455 unsigned int len = bvec.bv_len;
1457 if (len > PAGE_SIZE || len < btt->sector_size ||
1458 len % btt->sector_size) {
1459 dev_err_ratelimited(&btt->nd_btt->dev,
1460 "unaligned bio segment (len: %d)\n", len);
1461 bio->bi_status = BLK_STS_IOERR;
1462 break;
1465 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1466 op_is_write(bio_op(bio)), iter.bi_sector);
1467 if (err) {
1468 dev_err(&btt->nd_btt->dev,
1469 "io error in %s sector %lld, len %d,\n",
1470 (op_is_write(bio_op(bio))) ? "WRITE" :
1471 "READ",
1472 (unsigned long long) iter.bi_sector, len);
1473 bio->bi_status = errno_to_blk_status(err);
1474 break;
1477 if (do_acct)
1478 nd_iostat_end(bio, start);
1480 bio_endio(bio);
1481 return BLK_QC_T_NONE;
1484 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1485 struct page *page, bool is_write)
1487 struct btt *btt = bdev->bd_disk->private_data;
1488 int rc;
1489 unsigned int len;
1491 len = hpage_nr_pages(page) * PAGE_SIZE;
1492 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1493 if (rc == 0)
1494 page_endio(page, is_write, 0);
1496 return rc;
1500 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1502 /* some standard values */
1503 geo->heads = 1 << 6;
1504 geo->sectors = 1 << 5;
1505 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1506 return 0;
1509 static const struct block_device_operations btt_fops = {
1510 .owner = THIS_MODULE,
1511 .rw_page = btt_rw_page,
1512 .getgeo = btt_getgeo,
1513 .revalidate_disk = nvdimm_revalidate_disk,
1516 static int btt_blk_init(struct btt *btt)
1518 struct nd_btt *nd_btt = btt->nd_btt;
1519 struct nd_namespace_common *ndns = nd_btt->ndns;
1521 /* create a new disk and request queue for btt */
1522 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1523 if (!btt->btt_queue)
1524 return -ENOMEM;
1526 btt->btt_disk = alloc_disk(0);
1527 if (!btt->btt_disk) {
1528 blk_cleanup_queue(btt->btt_queue);
1529 return -ENOMEM;
1532 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1533 btt->btt_disk->first_minor = 0;
1534 btt->btt_disk->fops = &btt_fops;
1535 btt->btt_disk->private_data = btt;
1536 btt->btt_disk->queue = btt->btt_queue;
1537 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1539 blk_queue_make_request(btt->btt_queue, btt_make_request);
1540 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1541 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1542 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1543 btt->btt_queue->queuedata = btt;
1545 if (btt_meta_size(btt)) {
1546 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1548 if (rc) {
1549 del_gendisk(btt->btt_disk);
1550 put_disk(btt->btt_disk);
1551 blk_cleanup_queue(btt->btt_queue);
1552 return rc;
1555 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1556 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1557 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1558 revalidate_disk(btt->btt_disk);
1560 return 0;
1563 static void btt_blk_cleanup(struct btt *btt)
1565 del_gendisk(btt->btt_disk);
1566 put_disk(btt->btt_disk);
1567 blk_cleanup_queue(btt->btt_queue);
1571 * btt_init - initialize a block translation table for the given device
1572 * @nd_btt: device with BTT geometry and backing device info
1573 * @rawsize: raw size in bytes of the backing device
1574 * @lbasize: lba size of the backing device
1575 * @uuid: A uuid for the backing device - this is stored on media
1576 * @maxlane: maximum number of parallel requests the device can handle
1578 * Initialize a Block Translation Table on a backing device to provide
1579 * single sector power fail atomicity.
1581 * Context:
1582 * Might sleep.
1584 * Returns:
1585 * Pointer to a new struct btt on success, NULL on failure.
1587 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1588 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1590 int ret;
1591 struct btt *btt;
1592 struct nd_namespace_io *nsio;
1593 struct device *dev = &nd_btt->dev;
1595 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1596 if (!btt)
1597 return NULL;
1599 btt->nd_btt = nd_btt;
1600 btt->rawsize = rawsize;
1601 btt->lbasize = lbasize;
1602 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1603 INIT_LIST_HEAD(&btt->arena_list);
1604 mutex_init(&btt->init_lock);
1605 btt->nd_region = nd_region;
1606 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1607 btt->phys_bb = &nsio->bb;
1609 ret = discover_arenas(btt);
1610 if (ret) {
1611 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1612 return NULL;
1615 if (btt->init_state != INIT_READY && nd_region->ro) {
1616 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1617 dev_name(&nd_region->dev));
1618 return NULL;
1619 } else if (btt->init_state != INIT_READY) {
1620 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1621 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1622 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1623 btt->num_arenas, rawsize);
1625 ret = create_arenas(btt);
1626 if (ret) {
1627 dev_info(dev, "init: create_arenas: %d\n", ret);
1628 return NULL;
1631 ret = btt_meta_init(btt);
1632 if (ret) {
1633 dev_err(dev, "init: error in meta_init: %d\n", ret);
1634 return NULL;
1638 ret = btt_blk_init(btt);
1639 if (ret) {
1640 dev_err(dev, "init: error in blk_init: %d\n", ret);
1641 return NULL;
1644 btt_debugfs_init(btt);
1646 return btt;
1650 * btt_fini - de-initialize a BTT
1651 * @btt: the BTT handle that was generated by btt_init
1653 * De-initialize a Block Translation Table on device removal
1655 * Context:
1656 * Might sleep.
1658 static void btt_fini(struct btt *btt)
1660 if (btt) {
1661 btt_blk_cleanup(btt);
1662 free_arenas(btt);
1663 debugfs_remove_recursive(btt->debugfs_dir);
1667 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1669 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1670 struct nd_region *nd_region;
1671 struct btt_sb *btt_sb;
1672 struct btt *btt;
1673 size_t rawsize;
1675 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1676 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1677 return -ENODEV;
1680 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1681 if (!btt_sb)
1682 return -ENOMEM;
1685 * If this returns < 0, that is ok as it just means there wasn't
1686 * an existing BTT, and we're creating a new one. We still need to
1687 * call this as we need the version dependent fields in nd_btt to be
1688 * set correctly based on the holder class
1690 nd_btt_version(nd_btt, ndns, btt_sb);
1692 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1693 if (rawsize < ARENA_MIN_SIZE) {
1694 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1695 dev_name(&ndns->dev),
1696 ARENA_MIN_SIZE + nd_btt->initial_offset);
1697 return -ENXIO;
1699 nd_region = to_nd_region(nd_btt->dev.parent);
1700 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1701 nd_region);
1702 if (!btt)
1703 return -ENOMEM;
1704 nd_btt->btt = btt;
1706 return 0;
1708 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1710 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1712 struct btt *btt = nd_btt->btt;
1714 btt_fini(btt);
1715 nd_btt->btt = NULL;
1717 return 0;
1719 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1721 static int __init nd_btt_init(void)
1723 int rc = 0;
1725 debugfs_root = debugfs_create_dir("btt", NULL);
1726 if (IS_ERR_OR_NULL(debugfs_root))
1727 rc = -ENXIO;
1729 return rc;
1732 static void __exit nd_btt_exit(void)
1734 debugfs_remove_recursive(debugfs_root);
1737 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1738 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1739 MODULE_LICENSE("GPL v2");
1740 module_init(nd_btt_init);
1741 module_exit(nd_btt_exit);