1 // SPDX-License-Identifier: GPL-2.0-only
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
6 #include <linux/highmem.h>
7 #include <linux/debugfs.h>
8 #include <linux/blkdev.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/mutex.h>
12 #include <linux/hdreg.h>
13 #include <linux/genhd.h>
14 #include <linux/sizes.h>
15 #include <linux/ndctl.h>
18 #include <linux/backing-dev.h>
22 enum log_ent_request
{
27 static struct device
*to_dev(struct arena_info
*arena
)
29 return &arena
->nd_btt
->dev
;
32 static u64
adjust_initial_offset(struct nd_btt
*nd_btt
, u64 offset
)
34 return offset
+ nd_btt
->initial_offset
;
37 static int arena_read_bytes(struct arena_info
*arena
, resource_size_t offset
,
38 void *buf
, size_t n
, unsigned long flags
)
40 struct nd_btt
*nd_btt
= arena
->nd_btt
;
41 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
43 /* arena offsets may be shifted from the base of the device */
44 offset
= adjust_initial_offset(nd_btt
, offset
);
45 return nvdimm_read_bytes(ndns
, offset
, buf
, n
, flags
);
48 static int arena_write_bytes(struct arena_info
*arena
, resource_size_t offset
,
49 void *buf
, size_t n
, unsigned long flags
)
51 struct nd_btt
*nd_btt
= arena
->nd_btt
;
52 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
54 /* arena offsets may be shifted from the base of the device */
55 offset
= adjust_initial_offset(nd_btt
, offset
);
56 return nvdimm_write_bytes(ndns
, offset
, buf
, n
, flags
);
59 static int btt_info_write(struct arena_info
*arena
, struct btt_sb
*super
)
64 * infooff and info2off should always be at least 512B aligned.
65 * We rely on that to make sure rw_bytes does error clearing
66 * correctly, so make sure that is the case.
68 dev_WARN_ONCE(to_dev(arena
), !IS_ALIGNED(arena
->infooff
, 512),
69 "arena->infooff: %#llx is unaligned\n", arena
->infooff
);
70 dev_WARN_ONCE(to_dev(arena
), !IS_ALIGNED(arena
->info2off
, 512),
71 "arena->info2off: %#llx is unaligned\n", arena
->info2off
);
73 ret
= arena_write_bytes(arena
, arena
->info2off
, super
,
74 sizeof(struct btt_sb
), 0);
78 return arena_write_bytes(arena
, arena
->infooff
, super
,
79 sizeof(struct btt_sb
), 0);
82 static int btt_info_read(struct arena_info
*arena
, struct btt_sb
*super
)
84 return arena_read_bytes(arena
, arena
->infooff
, super
,
85 sizeof(struct btt_sb
), 0);
89 * 'raw' version of btt_map write
91 * mapping is in little-endian
92 * mapping contains 'E' and 'Z' flags as desired
94 static int __btt_map_write(struct arena_info
*arena
, u32 lba
, __le32 mapping
,
97 u64 ns_off
= arena
->mapoff
+ (lba
* MAP_ENT_SIZE
);
99 if (unlikely(lba
>= arena
->external_nlba
))
100 dev_err_ratelimited(to_dev(arena
),
101 "%s: lba %#x out of range (max: %#x)\n",
102 __func__
, lba
, arena
->external_nlba
);
103 return arena_write_bytes(arena
, ns_off
, &mapping
, MAP_ENT_SIZE
, flags
);
106 static int btt_map_write(struct arena_info
*arena
, u32 lba
, u32 mapping
,
107 u32 z_flag
, u32 e_flag
, unsigned long rwb_flags
)
113 * This 'mapping' is supposed to be just the LBA mapping, without
114 * any flags set, so strip the flag bits.
116 mapping
= ent_lba(mapping
);
118 ze
= (z_flag
<< 1) + e_flag
;
122 * We want to set neither of the Z or E flags, and
123 * in the actual layout, this means setting the bit
124 * positions of both to '1' to indicate a 'normal'
127 mapping
|= MAP_ENT_NORMAL
;
130 mapping
|= (1 << MAP_ERR_SHIFT
);
133 mapping
|= (1 << MAP_TRIM_SHIFT
);
137 * The case where Z and E are both sent in as '1' could be
138 * construed as a valid 'normal' case, but we decide not to,
141 dev_err_ratelimited(to_dev(arena
),
142 "Invalid use of Z and E flags\n");
146 mapping_le
= cpu_to_le32(mapping
);
147 return __btt_map_write(arena
, lba
, mapping_le
, rwb_flags
);
150 static int btt_map_read(struct arena_info
*arena
, u32 lba
, u32
*mapping
,
151 int *trim
, int *error
, unsigned long rwb_flags
)
155 u32 raw_mapping
, postmap
, ze
, z_flag
, e_flag
;
156 u64 ns_off
= arena
->mapoff
+ (lba
* MAP_ENT_SIZE
);
158 if (unlikely(lba
>= arena
->external_nlba
))
159 dev_err_ratelimited(to_dev(arena
),
160 "%s: lba %#x out of range (max: %#x)\n",
161 __func__
, lba
, arena
->external_nlba
);
163 ret
= arena_read_bytes(arena
, ns_off
, &in
, MAP_ENT_SIZE
, rwb_flags
);
167 raw_mapping
= le32_to_cpu(in
);
169 z_flag
= ent_z_flag(raw_mapping
);
170 e_flag
= ent_e_flag(raw_mapping
);
171 ze
= (z_flag
<< 1) + e_flag
;
172 postmap
= ent_lba(raw_mapping
);
174 /* Reuse the {z,e}_flag variables for *trim and *error */
180 /* Initial state. Return postmap = premap */
206 static int btt_log_group_read(struct arena_info
*arena
, u32 lane
,
207 struct log_group
*log
)
209 return arena_read_bytes(arena
,
210 arena
->logoff
+ (lane
* LOG_GRP_SIZE
), log
,
214 static struct dentry
*debugfs_root
;
216 static void arena_debugfs_init(struct arena_info
*a
, struct dentry
*parent
,
222 /* If for some reason, parent bttN was not created, exit */
226 snprintf(dirname
, 32, "arena%d", idx
);
227 d
= debugfs_create_dir(dirname
, parent
);
228 if (IS_ERR_OR_NULL(d
))
232 debugfs_create_x64("size", S_IRUGO
, d
, &a
->size
);
233 debugfs_create_x64("external_lba_start", S_IRUGO
, d
,
234 &a
->external_lba_start
);
235 debugfs_create_x32("internal_nlba", S_IRUGO
, d
, &a
->internal_nlba
);
236 debugfs_create_u32("internal_lbasize", S_IRUGO
, d
,
237 &a
->internal_lbasize
);
238 debugfs_create_x32("external_nlba", S_IRUGO
, d
, &a
->external_nlba
);
239 debugfs_create_u32("external_lbasize", S_IRUGO
, d
,
240 &a
->external_lbasize
);
241 debugfs_create_u32("nfree", S_IRUGO
, d
, &a
->nfree
);
242 debugfs_create_u16("version_major", S_IRUGO
, d
, &a
->version_major
);
243 debugfs_create_u16("version_minor", S_IRUGO
, d
, &a
->version_minor
);
244 debugfs_create_x64("nextoff", S_IRUGO
, d
, &a
->nextoff
);
245 debugfs_create_x64("infooff", S_IRUGO
, d
, &a
->infooff
);
246 debugfs_create_x64("dataoff", S_IRUGO
, d
, &a
->dataoff
);
247 debugfs_create_x64("mapoff", S_IRUGO
, d
, &a
->mapoff
);
248 debugfs_create_x64("logoff", S_IRUGO
, d
, &a
->logoff
);
249 debugfs_create_x64("info2off", S_IRUGO
, d
, &a
->info2off
);
250 debugfs_create_x32("flags", S_IRUGO
, d
, &a
->flags
);
251 debugfs_create_u32("log_index_0", S_IRUGO
, d
, &a
->log_index
[0]);
252 debugfs_create_u32("log_index_1", S_IRUGO
, d
, &a
->log_index
[1]);
255 static void btt_debugfs_init(struct btt
*btt
)
258 struct arena_info
*arena
;
260 btt
->debugfs_dir
= debugfs_create_dir(dev_name(&btt
->nd_btt
->dev
),
262 if (IS_ERR_OR_NULL(btt
->debugfs_dir
))
265 list_for_each_entry(arena
, &btt
->arena_list
, list
) {
266 arena_debugfs_init(arena
, btt
->debugfs_dir
, i
);
271 static u32
log_seq(struct log_group
*log
, int log_idx
)
273 return le32_to_cpu(log
->ent
[log_idx
].seq
);
277 * This function accepts two log entries, and uses the
278 * sequence number to find the 'older' entry.
279 * It also updates the sequence number in this old entry to
280 * make it the 'new' one if the mark_flag is set.
281 * Finally, it returns which of the entries was the older one.
283 * TODO The logic feels a bit kludge-y. make it better..
285 static int btt_log_get_old(struct arena_info
*a
, struct log_group
*log
)
287 int idx0
= a
->log_index
[0];
288 int idx1
= a
->log_index
[1];
292 * the first ever time this is seen, the entry goes into [0]
293 * the next time, the following logic works out to put this
294 * (next) entry into [1]
296 if (log_seq(log
, idx0
) == 0) {
297 log
->ent
[idx0
].seq
= cpu_to_le32(1);
301 if (log_seq(log
, idx0
) == log_seq(log
, idx1
))
303 if (log_seq(log
, idx0
) + log_seq(log
, idx1
) > 5)
306 if (log_seq(log
, idx0
) < log_seq(log
, idx1
)) {
307 if ((log_seq(log
, idx1
) - log_seq(log
, idx0
)) == 1)
312 if ((log_seq(log
, idx0
) - log_seq(log
, idx1
)) == 1)
322 * This function copies the desired (old/new) log entry into ent if
323 * it is not NULL. It returns the sub-slot number (0 or 1)
324 * where the desired log entry was found. Negative return values
327 static int btt_log_read(struct arena_info
*arena
, u32 lane
,
328 struct log_entry
*ent
, int old_flag
)
331 int old_ent
, ret_ent
;
332 struct log_group log
;
334 ret
= btt_log_group_read(arena
, lane
, &log
);
338 old_ent
= btt_log_get_old(arena
, &log
);
339 if (old_ent
< 0 || old_ent
> 1) {
340 dev_err(to_dev(arena
),
341 "log corruption (%d): lane %d seq [%d, %d]\n",
342 old_ent
, lane
, log
.ent
[arena
->log_index
[0]].seq
,
343 log
.ent
[arena
->log_index
[1]].seq
);
344 /* TODO set error state? */
348 ret_ent
= (old_flag
? old_ent
: (1 - old_ent
));
351 memcpy(ent
, &log
.ent
[arena
->log_index
[ret_ent
]], LOG_ENT_SIZE
);
357 * This function commits a log entry to media
358 * It does _not_ prepare the freelist entry for the next write
359 * btt_flog_write is the wrapper for updating the freelist elements
361 static int __btt_log_write(struct arena_info
*arena
, u32 lane
,
362 u32 sub
, struct log_entry
*ent
, unsigned long flags
)
365 u32 group_slot
= arena
->log_index
[sub
];
366 unsigned int log_half
= LOG_ENT_SIZE
/ 2;
370 ns_off
= arena
->logoff
+ (lane
* LOG_GRP_SIZE
) +
371 (group_slot
* LOG_ENT_SIZE
);
372 /* split the 16B write into atomic, durable halves */
373 ret
= arena_write_bytes(arena
, ns_off
, src
, log_half
, flags
);
379 return arena_write_bytes(arena
, ns_off
, src
, log_half
, flags
);
382 static int btt_flog_write(struct arena_info
*arena
, u32 lane
, u32 sub
,
383 struct log_entry
*ent
)
387 ret
= __btt_log_write(arena
, lane
, sub
, ent
, NVDIMM_IO_ATOMIC
);
391 /* prepare the next free entry */
392 arena
->freelist
[lane
].sub
= 1 - arena
->freelist
[lane
].sub
;
393 if (++(arena
->freelist
[lane
].seq
) == 4)
394 arena
->freelist
[lane
].seq
= 1;
395 if (ent_e_flag(le32_to_cpu(ent
->old_map
)))
396 arena
->freelist
[lane
].has_err
= 1;
397 arena
->freelist
[lane
].block
= ent_lba(le32_to_cpu(ent
->old_map
));
403 * This function initializes the BTT map to the initial state, which is
404 * all-zeroes, and indicates an identity mapping
406 static int btt_map_init(struct arena_info
*arena
)
411 size_t chunk_size
= SZ_2M
;
412 size_t mapsize
= arena
->logoff
- arena
->mapoff
;
414 zerobuf
= kzalloc(chunk_size
, GFP_KERNEL
);
419 * mapoff should always be at least 512B aligned. We rely on that to
420 * make sure rw_bytes does error clearing correctly, so make sure that
423 dev_WARN_ONCE(to_dev(arena
), !IS_ALIGNED(arena
->mapoff
, 512),
424 "arena->mapoff: %#llx is unaligned\n", arena
->mapoff
);
427 size_t size
= min(mapsize
, chunk_size
);
429 dev_WARN_ONCE(to_dev(arena
), size
< 512,
430 "chunk size: %#zx is unaligned\n", size
);
431 ret
= arena_write_bytes(arena
, arena
->mapoff
+ offset
, zerobuf
,
447 * This function initializes the BTT log with 'fake' entries pointing
448 * to the initial reserved set of blocks as being free
450 static int btt_log_init(struct arena_info
*arena
)
452 size_t logsize
= arena
->info2off
- arena
->logoff
;
453 size_t chunk_size
= SZ_4K
, offset
= 0;
454 struct log_entry ent
;
459 zerobuf
= kzalloc(chunk_size
, GFP_KERNEL
);
463 * logoff should always be at least 512B aligned. We rely on that to
464 * make sure rw_bytes does error clearing correctly, so make sure that
467 dev_WARN_ONCE(to_dev(arena
), !IS_ALIGNED(arena
->logoff
, 512),
468 "arena->logoff: %#llx is unaligned\n", arena
->logoff
);
471 size_t size
= min(logsize
, chunk_size
);
473 dev_WARN_ONCE(to_dev(arena
), size
< 512,
474 "chunk size: %#zx is unaligned\n", size
);
475 ret
= arena_write_bytes(arena
, arena
->logoff
+ offset
, zerobuf
,
485 for (i
= 0; i
< arena
->nfree
; i
++) {
486 ent
.lba
= cpu_to_le32(i
);
487 ent
.old_map
= cpu_to_le32(arena
->external_nlba
+ i
);
488 ent
.new_map
= cpu_to_le32(arena
->external_nlba
+ i
);
489 ent
.seq
= cpu_to_le32(LOG_SEQ_INIT
);
490 ret
= __btt_log_write(arena
, i
, 0, &ent
, 0);
500 static u64
to_namespace_offset(struct arena_info
*arena
, u64 lba
)
502 return arena
->dataoff
+ ((u64
)lba
* arena
->internal_lbasize
);
505 static int arena_clear_freelist_error(struct arena_info
*arena
, u32 lane
)
509 if (arena
->freelist
[lane
].has_err
) {
510 void *zero_page
= page_address(ZERO_PAGE(0));
511 u32 lba
= arena
->freelist
[lane
].block
;
512 u64 nsoff
= to_namespace_offset(arena
, lba
);
513 unsigned long len
= arena
->sector_size
;
515 mutex_lock(&arena
->err_lock
);
518 unsigned long chunk
= min(len
, PAGE_SIZE
);
520 ret
= arena_write_bytes(arena
, nsoff
, zero_page
,
527 arena
->freelist
[lane
].has_err
= 0;
529 mutex_unlock(&arena
->err_lock
);
534 static int btt_freelist_init(struct arena_info
*arena
)
537 struct log_entry log_new
;
538 u32 i
, map_entry
, log_oldmap
, log_newmap
;
540 arena
->freelist
= kcalloc(arena
->nfree
, sizeof(struct free_entry
),
542 if (!arena
->freelist
)
545 for (i
= 0; i
< arena
->nfree
; i
++) {
546 new = btt_log_read(arena
, i
, &log_new
, LOG_NEW_ENT
);
550 /* old and new map entries with any flags stripped out */
551 log_oldmap
= ent_lba(le32_to_cpu(log_new
.old_map
));
552 log_newmap
= ent_lba(le32_to_cpu(log_new
.new_map
));
554 /* sub points to the next one to be overwritten */
555 arena
->freelist
[i
].sub
= 1 - new;
556 arena
->freelist
[i
].seq
= nd_inc_seq(le32_to_cpu(log_new
.seq
));
557 arena
->freelist
[i
].block
= log_oldmap
;
560 * FIXME: if error clearing fails during init, we want to make
563 if (ent_e_flag(le32_to_cpu(log_new
.old_map
)) &&
564 !ent_normal(le32_to_cpu(log_new
.old_map
))) {
565 arena
->freelist
[i
].has_err
= 1;
566 ret
= arena_clear_freelist_error(arena
, i
);
568 dev_err_ratelimited(to_dev(arena
),
569 "Unable to clear known errors\n");
572 /* This implies a newly created or untouched flog entry */
573 if (log_oldmap
== log_newmap
)
576 /* Check if map recovery is needed */
577 ret
= btt_map_read(arena
, le32_to_cpu(log_new
.lba
), &map_entry
,
583 * The map_entry from btt_read_map is stripped of any flag bits,
584 * so use the stripped out versions from the log as well for
585 * testing whether recovery is needed. For restoration, use the
586 * 'raw' version of the log entries as that captured what we
587 * were going to write originally.
589 if ((log_newmap
!= map_entry
) && (log_oldmap
== map_entry
)) {
591 * Last transaction wrote the flog, but wasn't able
592 * to complete the map write. So fix up the map.
594 ret
= btt_map_write(arena
, le32_to_cpu(log_new
.lba
),
595 le32_to_cpu(log_new
.new_map
), 0, 0, 0);
604 static bool ent_is_padding(struct log_entry
*ent
)
606 return (ent
->lba
== 0) && (ent
->old_map
== 0) && (ent
->new_map
== 0)
611 * Detecting valid log indices: We read a log group (see the comments in btt.h
612 * for a description of a 'log_group' and its 'slots'), and iterate over its
613 * four slots. We expect that a padding slot will be all-zeroes, and use this
614 * to detect a padding slot vs. an actual entry.
616 * If a log_group is in the initial state, i.e. hasn't been used since the
617 * creation of this BTT layout, it will have three of the four slots with
618 * zeroes. We skip over these log_groups for the detection of log_index. If
619 * all log_groups are in the initial state (i.e. the BTT has never been
620 * written to), it is safe to assume the 'new format' of log entries in slots
623 static int log_set_indices(struct arena_info
*arena
)
625 bool idx_set
= false, initial_state
= true;
626 int ret
, log_index
[2] = {-1, -1};
627 u32 i
, j
, next_idx
= 0;
628 struct log_group log
;
631 for (i
= 0; i
< arena
->nfree
; i
++) {
632 ret
= btt_log_group_read(arena
, i
, &log
);
636 for (j
= 0; j
< 4; j
++) {
638 if (ent_is_padding(&log
.ent
[j
])) {
642 /* Skip if index has been recorded */
643 if ((next_idx
== 1) &&
646 /* valid entry, record index */
647 log_index
[next_idx
] = j
;
651 /* two valid entries found */
653 } else if (next_idx
> 2) {
654 /* too many valid indices */
659 * once the indices have been set, just verify
660 * that all subsequent log groups are either in
661 * their initial state or follow the same
664 if (j
== log_index
[0]) {
665 /* entry must be 'valid' */
666 if (ent_is_padding(&log
.ent
[j
]))
668 } else if (j
== log_index
[1]) {
671 * log_index[1] can be padding if the
672 * lane never got used and it is still
673 * in the initial state (three 'padding'
677 /* entry must be invalid (padding) */
678 if (!ent_is_padding(&log
.ent
[j
]))
684 * If any of the log_groups have more than one valid,
685 * non-padding entry, then the we are no longer in the
689 initial_state
= false;
693 if (!initial_state
&& !idx_set
)
697 * If all the entries in the log were in the initial state,
698 * assume new padding scheme
704 * Only allow the known permutations of log/padding indices,
705 * i.e. (0, 1), and (0, 2)
707 if ((log_index
[0] == 0) && ((log_index
[1] == 1) || (log_index
[1] == 2)))
708 ; /* known index possibilities */
710 dev_err(to_dev(arena
), "Found an unknown padding scheme\n");
714 arena
->log_index
[0] = log_index
[0];
715 arena
->log_index
[1] = log_index
[1];
716 dev_dbg(to_dev(arena
), "log_index_0 = %d\n", log_index
[0]);
717 dev_dbg(to_dev(arena
), "log_index_1 = %d\n", log_index
[1]);
721 static int btt_rtt_init(struct arena_info
*arena
)
723 arena
->rtt
= kcalloc(arena
->nfree
, sizeof(u32
), GFP_KERNEL
);
724 if (arena
->rtt
== NULL
)
730 static int btt_maplocks_init(struct arena_info
*arena
)
734 arena
->map_locks
= kcalloc(arena
->nfree
, sizeof(struct aligned_lock
),
736 if (!arena
->map_locks
)
739 for (i
= 0; i
< arena
->nfree
; i
++)
740 spin_lock_init(&arena
->map_locks
[i
].lock
);
745 static struct arena_info
*alloc_arena(struct btt
*btt
, size_t size
,
746 size_t start
, size_t arena_off
)
748 struct arena_info
*arena
;
749 u64 logsize
, mapsize
, datasize
;
750 u64 available
= size
;
752 arena
= kzalloc(sizeof(struct arena_info
), GFP_KERNEL
);
755 arena
->nd_btt
= btt
->nd_btt
;
756 arena
->sector_size
= btt
->sector_size
;
757 mutex_init(&arena
->err_lock
);
763 arena
->external_lba_start
= start
;
764 arena
->external_lbasize
= btt
->lbasize
;
765 arena
->internal_lbasize
= roundup(arena
->external_lbasize
,
766 INT_LBASIZE_ALIGNMENT
);
767 arena
->nfree
= BTT_DEFAULT_NFREE
;
768 arena
->version_major
= btt
->nd_btt
->version_major
;
769 arena
->version_minor
= btt
->nd_btt
->version_minor
;
771 if (available
% BTT_PG_SIZE
)
772 available
-= (available
% BTT_PG_SIZE
);
774 /* Two pages are reserved for the super block and its copy */
775 available
-= 2 * BTT_PG_SIZE
;
777 /* The log takes a fixed amount of space based on nfree */
778 logsize
= roundup(arena
->nfree
* LOG_GRP_SIZE
, BTT_PG_SIZE
);
779 available
-= logsize
;
781 /* Calculate optimal split between map and data area */
782 arena
->internal_nlba
= div_u64(available
- BTT_PG_SIZE
,
783 arena
->internal_lbasize
+ MAP_ENT_SIZE
);
784 arena
->external_nlba
= arena
->internal_nlba
- arena
->nfree
;
786 mapsize
= roundup((arena
->external_nlba
* MAP_ENT_SIZE
), BTT_PG_SIZE
);
787 datasize
= available
- mapsize
;
789 /* 'Absolute' values, relative to start of storage space */
790 arena
->infooff
= arena_off
;
791 arena
->dataoff
= arena
->infooff
+ BTT_PG_SIZE
;
792 arena
->mapoff
= arena
->dataoff
+ datasize
;
793 arena
->logoff
= arena
->mapoff
+ mapsize
;
794 arena
->info2off
= arena
->logoff
+ logsize
;
796 /* Default log indices are (0,1) */
797 arena
->log_index
[0] = 0;
798 arena
->log_index
[1] = 1;
802 static void free_arenas(struct btt
*btt
)
804 struct arena_info
*arena
, *next
;
806 list_for_each_entry_safe(arena
, next
, &btt
->arena_list
, list
) {
807 list_del(&arena
->list
);
809 kfree(arena
->map_locks
);
810 kfree(arena
->freelist
);
811 debugfs_remove_recursive(arena
->debugfs_dir
);
817 * This function reads an existing valid btt superblock and
818 * populates the corresponding arena_info struct
820 static void parse_arena_meta(struct arena_info
*arena
, struct btt_sb
*super
,
823 arena
->internal_nlba
= le32_to_cpu(super
->internal_nlba
);
824 arena
->internal_lbasize
= le32_to_cpu(super
->internal_lbasize
);
825 arena
->external_nlba
= le32_to_cpu(super
->external_nlba
);
826 arena
->external_lbasize
= le32_to_cpu(super
->external_lbasize
);
827 arena
->nfree
= le32_to_cpu(super
->nfree
);
828 arena
->version_major
= le16_to_cpu(super
->version_major
);
829 arena
->version_minor
= le16_to_cpu(super
->version_minor
);
831 arena
->nextoff
= (super
->nextoff
== 0) ? 0 : (arena_off
+
832 le64_to_cpu(super
->nextoff
));
833 arena
->infooff
= arena_off
;
834 arena
->dataoff
= arena_off
+ le64_to_cpu(super
->dataoff
);
835 arena
->mapoff
= arena_off
+ le64_to_cpu(super
->mapoff
);
836 arena
->logoff
= arena_off
+ le64_to_cpu(super
->logoff
);
837 arena
->info2off
= arena_off
+ le64_to_cpu(super
->info2off
);
839 arena
->size
= (le64_to_cpu(super
->nextoff
) > 0)
840 ? (le64_to_cpu(super
->nextoff
))
841 : (arena
->info2off
- arena
->infooff
+ BTT_PG_SIZE
);
843 arena
->flags
= le32_to_cpu(super
->flags
);
846 static int discover_arenas(struct btt
*btt
)
849 struct arena_info
*arena
;
850 struct btt_sb
*super
;
851 size_t remaining
= btt
->rawsize
;
856 super
= kzalloc(sizeof(*super
), GFP_KERNEL
);
861 /* Alloc memory for arena */
862 arena
= alloc_arena(btt
, 0, 0, 0);
868 arena
->infooff
= cur_off
;
869 ret
= btt_info_read(arena
, super
);
873 if (!nd_btt_arena_is_valid(btt
->nd_btt
, super
)) {
874 if (remaining
== btt
->rawsize
) {
875 btt
->init_state
= INIT_NOTFOUND
;
876 dev_info(to_dev(arena
), "No existing arenas\n");
879 dev_err(to_dev(arena
),
880 "Found corrupted metadata!\n");
886 arena
->external_lba_start
= cur_nlba
;
887 parse_arena_meta(arena
, super
, cur_off
);
889 ret
= log_set_indices(arena
);
891 dev_err(to_dev(arena
),
892 "Unable to deduce log/padding indices\n");
896 ret
= btt_freelist_init(arena
);
900 ret
= btt_rtt_init(arena
);
904 ret
= btt_maplocks_init(arena
);
908 list_add_tail(&arena
->list
, &btt
->arena_list
);
910 remaining
-= arena
->size
;
911 cur_off
+= arena
->size
;
912 cur_nlba
+= arena
->external_nlba
;
915 if (arena
->nextoff
== 0)
918 btt
->num_arenas
= num_arenas
;
919 btt
->nlba
= cur_nlba
;
920 btt
->init_state
= INIT_READY
;
933 static int create_arenas(struct btt
*btt
)
935 size_t remaining
= btt
->rawsize
;
939 struct arena_info
*arena
;
940 size_t arena_size
= min_t(u64
, ARENA_MAX_SIZE
, remaining
);
942 remaining
-= arena_size
;
943 if (arena_size
< ARENA_MIN_SIZE
)
946 arena
= alloc_arena(btt
, arena_size
, btt
->nlba
, cur_off
);
951 btt
->nlba
+= arena
->external_nlba
;
952 if (remaining
>= ARENA_MIN_SIZE
)
953 arena
->nextoff
= arena
->size
;
956 cur_off
+= arena_size
;
957 list_add_tail(&arena
->list
, &btt
->arena_list
);
964 * This function completes arena initialization by writing
966 * It is only called for an uninitialized arena when a write
967 * to that arena occurs for the first time.
969 static int btt_arena_write_layout(struct arena_info
*arena
)
973 struct btt_sb
*super
;
974 struct nd_btt
*nd_btt
= arena
->nd_btt
;
975 const u8
*parent_uuid
= nd_dev_to_uuid(&nd_btt
->ndns
->dev
);
977 ret
= btt_map_init(arena
);
981 ret
= btt_log_init(arena
);
985 super
= kzalloc(sizeof(struct btt_sb
), GFP_NOIO
);
989 strncpy(super
->signature
, BTT_SIG
, BTT_SIG_LEN
);
990 memcpy(super
->uuid
, nd_btt
->uuid
, 16);
991 memcpy(super
->parent_uuid
, parent_uuid
, 16);
992 super
->flags
= cpu_to_le32(arena
->flags
);
993 super
->version_major
= cpu_to_le16(arena
->version_major
);
994 super
->version_minor
= cpu_to_le16(arena
->version_minor
);
995 super
->external_lbasize
= cpu_to_le32(arena
->external_lbasize
);
996 super
->external_nlba
= cpu_to_le32(arena
->external_nlba
);
997 super
->internal_lbasize
= cpu_to_le32(arena
->internal_lbasize
);
998 super
->internal_nlba
= cpu_to_le32(arena
->internal_nlba
);
999 super
->nfree
= cpu_to_le32(arena
->nfree
);
1000 super
->infosize
= cpu_to_le32(sizeof(struct btt_sb
));
1001 super
->nextoff
= cpu_to_le64(arena
->nextoff
);
1003 * Subtract arena->infooff (arena start) so numbers are relative
1006 super
->dataoff
= cpu_to_le64(arena
->dataoff
- arena
->infooff
);
1007 super
->mapoff
= cpu_to_le64(arena
->mapoff
- arena
->infooff
);
1008 super
->logoff
= cpu_to_le64(arena
->logoff
- arena
->infooff
);
1009 super
->info2off
= cpu_to_le64(arena
->info2off
- arena
->infooff
);
1012 sum
= nd_sb_checksum((struct nd_gen_sb
*) super
);
1013 super
->checksum
= cpu_to_le64(sum
);
1015 ret
= btt_info_write(arena
, super
);
1022 * This function completes the initialization for the BTT namespace
1023 * such that it is ready to accept IOs
1025 static int btt_meta_init(struct btt
*btt
)
1028 struct arena_info
*arena
;
1030 mutex_lock(&btt
->init_lock
);
1031 list_for_each_entry(arena
, &btt
->arena_list
, list
) {
1032 ret
= btt_arena_write_layout(arena
);
1036 ret
= btt_freelist_init(arena
);
1040 ret
= btt_rtt_init(arena
);
1044 ret
= btt_maplocks_init(arena
);
1049 btt
->init_state
= INIT_READY
;
1052 mutex_unlock(&btt
->init_lock
);
1056 static u32
btt_meta_size(struct btt
*btt
)
1058 return btt
->lbasize
- btt
->sector_size
;
1062 * This function calculates the arena in which the given LBA lies
1063 * by doing a linear walk. This is acceptable since we expect only
1064 * a few arenas. If we have backing devices that get much larger,
1065 * we can construct a balanced binary tree of arenas at init time
1066 * so that this range search becomes faster.
1068 static int lba_to_arena(struct btt
*btt
, sector_t sector
, __u32
*premap
,
1069 struct arena_info
**arena
)
1071 struct arena_info
*arena_list
;
1072 __u64 lba
= div_u64(sector
<< SECTOR_SHIFT
, btt
->sector_size
);
1074 list_for_each_entry(arena_list
, &btt
->arena_list
, list
) {
1075 if (lba
< arena_list
->external_nlba
) {
1076 *arena
= arena_list
;
1080 lba
-= arena_list
->external_nlba
;
1087 * The following (lock_map, unlock_map) are mostly just to improve
1088 * readability, since they index into an array of locks
1090 static void lock_map(struct arena_info
*arena
, u32 premap
)
1091 __acquires(&arena
->map_locks
[idx
].lock
)
1093 u32 idx
= (premap
* MAP_ENT_SIZE
/ L1_CACHE_BYTES
) % arena
->nfree
;
1095 spin_lock(&arena
->map_locks
[idx
].lock
);
1098 static void unlock_map(struct arena_info
*arena
, u32 premap
)
1099 __releases(&arena
->map_locks
[idx
].lock
)
1101 u32 idx
= (premap
* MAP_ENT_SIZE
/ L1_CACHE_BYTES
) % arena
->nfree
;
1103 spin_unlock(&arena
->map_locks
[idx
].lock
);
1106 static int btt_data_read(struct arena_info
*arena
, struct page
*page
,
1107 unsigned int off
, u32 lba
, u32 len
)
1110 u64 nsoff
= to_namespace_offset(arena
, lba
);
1111 void *mem
= kmap_atomic(page
);
1113 ret
= arena_read_bytes(arena
, nsoff
, mem
+ off
, len
, NVDIMM_IO_ATOMIC
);
1119 static int btt_data_write(struct arena_info
*arena
, u32 lba
,
1120 struct page
*page
, unsigned int off
, u32 len
)
1123 u64 nsoff
= to_namespace_offset(arena
, lba
);
1124 void *mem
= kmap_atomic(page
);
1126 ret
= arena_write_bytes(arena
, nsoff
, mem
+ off
, len
, NVDIMM_IO_ATOMIC
);
1132 static void zero_fill_data(struct page
*page
, unsigned int off
, u32 len
)
1134 void *mem
= kmap_atomic(page
);
1136 memset(mem
+ off
, 0, len
);
1140 #ifdef CONFIG_BLK_DEV_INTEGRITY
1141 static int btt_rw_integrity(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1142 struct arena_info
*arena
, u32 postmap
, int rw
)
1144 unsigned int len
= btt_meta_size(btt
);
1151 meta_nsoff
= to_namespace_offset(arena
, postmap
) + btt
->sector_size
;
1154 unsigned int cur_len
;
1158 bv
= bvec_iter_bvec(bip
->bip_vec
, bip
->bip_iter
);
1160 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1161 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1162 * can use those directly
1165 cur_len
= min(len
, bv
.bv_len
);
1166 mem
= kmap_atomic(bv
.bv_page
);
1168 ret
= arena_write_bytes(arena
, meta_nsoff
,
1169 mem
+ bv
.bv_offset
, cur_len
,
1172 ret
= arena_read_bytes(arena
, meta_nsoff
,
1173 mem
+ bv
.bv_offset
, cur_len
,
1181 meta_nsoff
+= cur_len
;
1182 if (!bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, cur_len
))
1189 #else /* CONFIG_BLK_DEV_INTEGRITY */
1190 static int btt_rw_integrity(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1191 struct arena_info
*arena
, u32 postmap
, int rw
)
1197 static int btt_read_pg(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1198 struct page
*page
, unsigned int off
, sector_t sector
,
1203 struct arena_info
*arena
= NULL
;
1204 u32 lane
= 0, premap
, postmap
;
1209 lane
= nd_region_acquire_lane(btt
->nd_region
);
1211 ret
= lba_to_arena(btt
, sector
, &premap
, &arena
);
1215 cur_len
= min(btt
->sector_size
, len
);
1217 ret
= btt_map_read(arena
, premap
, &postmap
, &t_flag
, &e_flag
,
1223 * We loop to make sure that the post map LBA didn't change
1224 * from under us between writing the RTT and doing the actual
1232 zero_fill_data(page
, off
, cur_len
);
1241 arena
->rtt
[lane
] = RTT_VALID
| postmap
;
1243 * Barrier to make sure this write is not reordered
1244 * to do the verification map_read before the RTT store
1248 ret
= btt_map_read(arena
, premap
, &new_map
, &new_t
,
1249 &new_e
, NVDIMM_IO_ATOMIC
);
1253 if ((postmap
== new_map
) && (t_flag
== new_t
) &&
1262 ret
= btt_data_read(arena
, page
, off
, postmap
, cur_len
);
1264 /* Media error - set the e_flag */
1265 if (btt_map_write(arena
, premap
, postmap
, 0, 1, NVDIMM_IO_ATOMIC
))
1266 dev_warn_ratelimited(to_dev(arena
),
1267 "Error persistently tracking bad blocks at %#x\n",
1273 ret
= btt_rw_integrity(btt
, bip
, arena
, postmap
, READ
);
1278 arena
->rtt
[lane
] = RTT_INVALID
;
1279 nd_region_release_lane(btt
->nd_region
, lane
);
1283 sector
+= btt
->sector_size
>> SECTOR_SHIFT
;
1289 arena
->rtt
[lane
] = RTT_INVALID
;
1291 nd_region_release_lane(btt
->nd_region
, lane
);
1296 * Normally, arena_{read,write}_bytes will take care of the initial offset
1297 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1298 * we need the final, raw namespace offset here
1300 static bool btt_is_badblock(struct btt
*btt
, struct arena_info
*arena
,
1303 u64 nsoff
= adjust_initial_offset(arena
->nd_btt
,
1304 to_namespace_offset(arena
, postmap
));
1305 sector_t phys_sector
= nsoff
>> 9;
1307 return is_bad_pmem(btt
->phys_bb
, phys_sector
, arena
->internal_lbasize
);
1310 static int btt_write_pg(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1311 sector_t sector
, struct page
*page
, unsigned int off
,
1315 struct arena_info
*arena
= NULL
;
1316 u32 premap
= 0, old_postmap
, new_postmap
, lane
= 0, i
;
1317 struct log_entry log
;
1325 lane
= nd_region_acquire_lane(btt
->nd_region
);
1327 ret
= lba_to_arena(btt
, sector
, &premap
, &arena
);
1330 cur_len
= min(btt
->sector_size
, len
);
1332 if ((arena
->flags
& IB_FLAG_ERROR_MASK
) != 0) {
1337 if (btt_is_badblock(btt
, arena
, arena
->freelist
[lane
].block
))
1338 arena
->freelist
[lane
].has_err
= 1;
1340 if (mutex_is_locked(&arena
->err_lock
)
1341 || arena
->freelist
[lane
].has_err
) {
1342 nd_region_release_lane(btt
->nd_region
, lane
);
1344 ret
= arena_clear_freelist_error(arena
, lane
);
1348 /* OK to acquire a different lane/free block */
1352 new_postmap
= arena
->freelist
[lane
].block
;
1354 /* Wait if the new block is being read from */
1355 for (i
= 0; i
< arena
->nfree
; i
++)
1356 while (arena
->rtt
[i
] == (RTT_VALID
| new_postmap
))
1360 if (new_postmap
>= arena
->internal_nlba
) {
1365 ret
= btt_data_write(arena
, new_postmap
, page
, off
, cur_len
);
1370 ret
= btt_rw_integrity(btt
, bip
, arena
, new_postmap
,
1376 lock_map(arena
, premap
);
1377 ret
= btt_map_read(arena
, premap
, &old_postmap
, NULL
, &e_flag
,
1381 if (old_postmap
>= arena
->internal_nlba
) {
1386 set_e_flag(old_postmap
);
1388 log
.lba
= cpu_to_le32(premap
);
1389 log
.old_map
= cpu_to_le32(old_postmap
);
1390 log
.new_map
= cpu_to_le32(new_postmap
);
1391 log
.seq
= cpu_to_le32(arena
->freelist
[lane
].seq
);
1392 sub
= arena
->freelist
[lane
].sub
;
1393 ret
= btt_flog_write(arena
, lane
, sub
, &log
);
1397 ret
= btt_map_write(arena
, premap
, new_postmap
, 0, 0,
1402 unlock_map(arena
, premap
);
1403 nd_region_release_lane(btt
->nd_region
, lane
);
1406 ret
= arena_clear_freelist_error(arena
, lane
);
1413 sector
+= btt
->sector_size
>> SECTOR_SHIFT
;
1419 unlock_map(arena
, premap
);
1421 nd_region_release_lane(btt
->nd_region
, lane
);
1425 static int btt_do_bvec(struct btt
*btt
, struct bio_integrity_payload
*bip
,
1426 struct page
*page
, unsigned int len
, unsigned int off
,
1427 unsigned int op
, sector_t sector
)
1431 if (!op_is_write(op
)) {
1432 ret
= btt_read_pg(btt
, bip
, page
, off
, sector
, len
);
1433 flush_dcache_page(page
);
1435 flush_dcache_page(page
);
1436 ret
= btt_write_pg(btt
, bip
, sector
, page
, off
, len
);
1442 static blk_qc_t
btt_make_request(struct request_queue
*q
, struct bio
*bio
)
1444 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
1445 struct btt
*btt
= q
->queuedata
;
1446 struct bvec_iter iter
;
1447 unsigned long start
;
1448 struct bio_vec bvec
;
1452 if (!bio_integrity_prep(bio
))
1453 return BLK_QC_T_NONE
;
1455 do_acct
= nd_iostat_start(bio
, &start
);
1456 bio_for_each_segment(bvec
, bio
, iter
) {
1457 unsigned int len
= bvec
.bv_len
;
1459 if (len
> PAGE_SIZE
|| len
< btt
->sector_size
||
1460 len
% btt
->sector_size
) {
1461 dev_err_ratelimited(&btt
->nd_btt
->dev
,
1462 "unaligned bio segment (len: %d)\n", len
);
1463 bio
->bi_status
= BLK_STS_IOERR
;
1467 err
= btt_do_bvec(btt
, bip
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1468 bio_op(bio
), iter
.bi_sector
);
1470 dev_err(&btt
->nd_btt
->dev
,
1471 "io error in %s sector %lld, len %d,\n",
1472 (op_is_write(bio_op(bio
))) ? "WRITE" :
1474 (unsigned long long) iter
.bi_sector
, len
);
1475 bio
->bi_status
= errno_to_blk_status(err
);
1480 nd_iostat_end(bio
, start
);
1483 return BLK_QC_T_NONE
;
1486 static int btt_rw_page(struct block_device
*bdev
, sector_t sector
,
1487 struct page
*page
, unsigned int op
)
1489 struct btt
*btt
= bdev
->bd_disk
->private_data
;
1493 len
= hpage_nr_pages(page
) * PAGE_SIZE
;
1494 rc
= btt_do_bvec(btt
, NULL
, page
, len
, 0, op
, sector
);
1496 page_endio(page
, op_is_write(op
), 0);
1502 static int btt_getgeo(struct block_device
*bd
, struct hd_geometry
*geo
)
1504 /* some standard values */
1505 geo
->heads
= 1 << 6;
1506 geo
->sectors
= 1 << 5;
1507 geo
->cylinders
= get_capacity(bd
->bd_disk
) >> 11;
1511 static const struct block_device_operations btt_fops
= {
1512 .owner
= THIS_MODULE
,
1513 .rw_page
= btt_rw_page
,
1514 .getgeo
= btt_getgeo
,
1515 .revalidate_disk
= nvdimm_revalidate_disk
,
1518 static int btt_blk_init(struct btt
*btt
)
1520 struct nd_btt
*nd_btt
= btt
->nd_btt
;
1521 struct nd_namespace_common
*ndns
= nd_btt
->ndns
;
1523 /* create a new disk and request queue for btt */
1524 btt
->btt_queue
= blk_alloc_queue(GFP_KERNEL
);
1525 if (!btt
->btt_queue
)
1528 btt
->btt_disk
= alloc_disk(0);
1529 if (!btt
->btt_disk
) {
1530 blk_cleanup_queue(btt
->btt_queue
);
1534 nvdimm_namespace_disk_name(ndns
, btt
->btt_disk
->disk_name
);
1535 btt
->btt_disk
->first_minor
= 0;
1536 btt
->btt_disk
->fops
= &btt_fops
;
1537 btt
->btt_disk
->private_data
= btt
;
1538 btt
->btt_disk
->queue
= btt
->btt_queue
;
1539 btt
->btt_disk
->flags
= GENHD_FL_EXT_DEVT
;
1540 btt
->btt_disk
->queue
->backing_dev_info
->capabilities
|=
1541 BDI_CAP_SYNCHRONOUS_IO
;
1543 blk_queue_make_request(btt
->btt_queue
, btt_make_request
);
1544 blk_queue_logical_block_size(btt
->btt_queue
, btt
->sector_size
);
1545 blk_queue_max_hw_sectors(btt
->btt_queue
, UINT_MAX
);
1546 blk_queue_flag_set(QUEUE_FLAG_NONROT
, btt
->btt_queue
);
1547 btt
->btt_queue
->queuedata
= btt
;
1549 if (btt_meta_size(btt
)) {
1550 int rc
= nd_integrity_init(btt
->btt_disk
, btt_meta_size(btt
));
1553 del_gendisk(btt
->btt_disk
);
1554 put_disk(btt
->btt_disk
);
1555 blk_cleanup_queue(btt
->btt_queue
);
1559 set_capacity(btt
->btt_disk
, btt
->nlba
* btt
->sector_size
>> 9);
1560 device_add_disk(&btt
->nd_btt
->dev
, btt
->btt_disk
, NULL
);
1561 btt
->nd_btt
->size
= btt
->nlba
* (u64
)btt
->sector_size
;
1562 revalidate_disk(btt
->btt_disk
);
1567 static void btt_blk_cleanup(struct btt
*btt
)
1569 del_gendisk(btt
->btt_disk
);
1570 put_disk(btt
->btt_disk
);
1571 blk_cleanup_queue(btt
->btt_queue
);
1575 * btt_init - initialize a block translation table for the given device
1576 * @nd_btt: device with BTT geometry and backing device info
1577 * @rawsize: raw size in bytes of the backing device
1578 * @lbasize: lba size of the backing device
1579 * @uuid: A uuid for the backing device - this is stored on media
1580 * @maxlane: maximum number of parallel requests the device can handle
1582 * Initialize a Block Translation Table on a backing device to provide
1583 * single sector power fail atomicity.
1589 * Pointer to a new struct btt on success, NULL on failure.
1591 static struct btt
*btt_init(struct nd_btt
*nd_btt
, unsigned long long rawsize
,
1592 u32 lbasize
, u8
*uuid
, struct nd_region
*nd_region
)
1596 struct nd_namespace_io
*nsio
;
1597 struct device
*dev
= &nd_btt
->dev
;
1599 btt
= devm_kzalloc(dev
, sizeof(struct btt
), GFP_KERNEL
);
1603 btt
->nd_btt
= nd_btt
;
1604 btt
->rawsize
= rawsize
;
1605 btt
->lbasize
= lbasize
;
1606 btt
->sector_size
= ((lbasize
>= 4096) ? 4096 : 512);
1607 INIT_LIST_HEAD(&btt
->arena_list
);
1608 mutex_init(&btt
->init_lock
);
1609 btt
->nd_region
= nd_region
;
1610 nsio
= to_nd_namespace_io(&nd_btt
->ndns
->dev
);
1611 btt
->phys_bb
= &nsio
->bb
;
1613 ret
= discover_arenas(btt
);
1615 dev_err(dev
, "init: error in arena_discover: %d\n", ret
);
1619 if (btt
->init_state
!= INIT_READY
&& nd_region
->ro
) {
1620 dev_warn(dev
, "%s is read-only, unable to init btt metadata\n",
1621 dev_name(&nd_region
->dev
));
1623 } else if (btt
->init_state
!= INIT_READY
) {
1624 btt
->num_arenas
= (rawsize
/ ARENA_MAX_SIZE
) +
1625 ((rawsize
% ARENA_MAX_SIZE
) ? 1 : 0);
1626 dev_dbg(dev
, "init: %d arenas for %llu rawsize\n",
1627 btt
->num_arenas
, rawsize
);
1629 ret
= create_arenas(btt
);
1631 dev_info(dev
, "init: create_arenas: %d\n", ret
);
1635 ret
= btt_meta_init(btt
);
1637 dev_err(dev
, "init: error in meta_init: %d\n", ret
);
1642 ret
= btt_blk_init(btt
);
1644 dev_err(dev
, "init: error in blk_init: %d\n", ret
);
1648 btt_debugfs_init(btt
);
1654 * btt_fini - de-initialize a BTT
1655 * @btt: the BTT handle that was generated by btt_init
1657 * De-initialize a Block Translation Table on device removal
1662 static void btt_fini(struct btt
*btt
)
1665 btt_blk_cleanup(btt
);
1667 debugfs_remove_recursive(btt
->debugfs_dir
);
1671 int nvdimm_namespace_attach_btt(struct nd_namespace_common
*ndns
)
1673 struct nd_btt
*nd_btt
= to_nd_btt(ndns
->claim
);
1674 struct nd_region
*nd_region
;
1675 struct btt_sb
*btt_sb
;
1677 size_t size
, rawsize
;
1680 if (!nd_btt
->uuid
|| !nd_btt
->ndns
|| !nd_btt
->lbasize
) {
1681 dev_dbg(&nd_btt
->dev
, "incomplete btt configuration\n");
1685 btt_sb
= devm_kzalloc(&nd_btt
->dev
, sizeof(*btt_sb
), GFP_KERNEL
);
1689 size
= nvdimm_namespace_capacity(ndns
);
1690 rc
= devm_namespace_enable(&nd_btt
->dev
, ndns
, size
);
1695 * If this returns < 0, that is ok as it just means there wasn't
1696 * an existing BTT, and we're creating a new one. We still need to
1697 * call this as we need the version dependent fields in nd_btt to be
1698 * set correctly based on the holder class
1700 nd_btt_version(nd_btt
, ndns
, btt_sb
);
1702 rawsize
= size
- nd_btt
->initial_offset
;
1703 if (rawsize
< ARENA_MIN_SIZE
) {
1704 dev_dbg(&nd_btt
->dev
, "%s must be at least %ld bytes\n",
1705 dev_name(&ndns
->dev
),
1706 ARENA_MIN_SIZE
+ nd_btt
->initial_offset
);
1709 nd_region
= to_nd_region(nd_btt
->dev
.parent
);
1710 btt
= btt_init(nd_btt
, rawsize
, nd_btt
->lbasize
, nd_btt
->uuid
,
1718 EXPORT_SYMBOL(nvdimm_namespace_attach_btt
);
1720 int nvdimm_namespace_detach_btt(struct nd_btt
*nd_btt
)
1722 struct btt
*btt
= nd_btt
->btt
;
1729 EXPORT_SYMBOL(nvdimm_namespace_detach_btt
);
1731 static int __init
nd_btt_init(void)
1735 debugfs_root
= debugfs_create_dir("btt", NULL
);
1736 if (IS_ERR_OR_NULL(debugfs_root
))
1742 static void __exit
nd_btt_exit(void)
1744 debugfs_remove_recursive(debugfs_root
);
1747 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT
);
1748 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1749 MODULE_LICENSE("GPL v2");
1750 module_init(nd_btt_init
);
1751 module_exit(nd_btt_exit
);