2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include "dm-bio-record.h"
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
26 #define DM_MSG_PREFIX "integrity"
28 #define DEFAULT_INTERLEAVE_SECTORS 32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
31 #define DEFAULT_BUFFER_SECTORS 128
32 #define DEFAULT_JOURNAL_WATERMARK 50
33 #define DEFAULT_SYNC_MSEC 10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
35 #define MIN_LOG2_INTERLEAVE_SECTORS 3
36 #define MAX_LOG2_INTERLEAVE_SECTORS 31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
38 #define RECALC_SECTORS 8192
39 #define RECALC_WRITE_SUPER 16
40 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
41 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
44 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
45 * so it should not be enabled in the official kernel
48 //#define INTERNAL_VERIFY
54 #define SB_MAGIC "integrt"
55 #define SB_VERSION_1 1
56 #define SB_VERSION_2 2
57 #define SB_VERSION_3 3
59 #define MAX_SECTORS_PER_BLOCK 8
64 __u8 log2_interleave_sectors
;
65 __u16 integrity_tag_size
;
66 __u32 journal_sections
;
67 __u64 provided_data_sectors
; /* userspace uses this value */
69 __u8 log2_sectors_per_block
;
70 __u8 log2_blocks_per_bitmap_bit
;
75 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
76 #define SB_FLAG_RECALCULATING 0x2
77 #define SB_FLAG_DIRTY_BITMAP 0x4
79 #define JOURNAL_ENTRY_ROUNDUP 8
81 typedef __u64 commit_id_t
;
82 #define JOURNAL_MAC_PER_SECTOR 8
84 struct journal_entry
{
92 commit_id_t last_bytes
[0];
96 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
98 #if BITS_PER_LONG == 64
99 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
101 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
103 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
104 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
105 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
106 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
107 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
109 #define JOURNAL_BLOCK_SECTORS 8
110 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
111 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
113 struct journal_sector
{
114 __u8 entries
[JOURNAL_SECTOR_DATA
- JOURNAL_MAC_PER_SECTOR
];
115 __u8 mac
[JOURNAL_MAC_PER_SECTOR
];
116 commit_id_t commit_id
;
119 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
121 #define METADATA_PADDING_SECTORS 8
123 #define N_COMMIT_IDS 4
125 static unsigned char prev_commit_seq(unsigned char seq
)
127 return (seq
+ N_COMMIT_IDS
- 1) % N_COMMIT_IDS
;
130 static unsigned char next_commit_seq(unsigned char seq
)
132 return (seq
+ 1) % N_COMMIT_IDS
;
136 * In-memory structures
139 struct journal_node
{
151 struct dm_integrity_c
{
153 struct dm_dev
*meta_dev
;
157 mempool_t journal_io_mempool
;
158 struct dm_io_client
*io
;
159 struct dm_bufio_client
*bufio
;
160 struct workqueue_struct
*metadata_wq
;
161 struct superblock
*sb
;
162 unsigned journal_pages
;
163 unsigned n_bitmap_blocks
;
165 struct page_list
*journal
;
166 struct page_list
*journal_io
;
167 struct page_list
*journal_xor
;
168 struct page_list
*recalc_bitmap
;
169 struct page_list
*may_write_bitmap
;
170 struct bitmap_block_status
*bbs
;
171 unsigned bitmap_flush_interval
;
172 int synchronous_mode
;
173 struct bio_list synchronous_bios
;
174 struct delayed_work bitmap_flush_work
;
176 struct crypto_skcipher
*journal_crypt
;
177 struct scatterlist
**journal_scatterlist
;
178 struct scatterlist
**journal_io_scatterlist
;
179 struct skcipher_request
**sk_requests
;
181 struct crypto_shash
*journal_mac
;
183 struct journal_node
*journal_tree
;
184 struct rb_root journal_tree_root
;
186 sector_t provided_data_sectors
;
188 unsigned short journal_entry_size
;
189 unsigned char journal_entries_per_sector
;
190 unsigned char journal_section_entries
;
191 unsigned short journal_section_sectors
;
192 unsigned journal_sections
;
193 unsigned journal_entries
;
194 sector_t data_device_sectors
;
195 sector_t meta_device_sectors
;
196 unsigned initial_sectors
;
197 unsigned metadata_run
;
198 __s8 log2_metadata_run
;
199 __u8 log2_buffer_sectors
;
200 __u8 sectors_per_block
;
201 __u8 log2_blocks_per_bitmap_bit
;
207 struct crypto_shash
*internal_hash
;
209 struct dm_target
*ti
;
211 /* these variables are locked with endio_wait.lock */
212 struct rb_root in_progress
;
213 struct list_head wait_list
;
214 wait_queue_head_t endio_wait
;
215 struct workqueue_struct
*wait_wq
;
216 struct workqueue_struct
*offload_wq
;
218 unsigned char commit_seq
;
219 commit_id_t commit_ids
[N_COMMIT_IDS
];
221 unsigned committed_section
;
222 unsigned n_committed_sections
;
224 unsigned uncommitted_section
;
225 unsigned n_uncommitted_sections
;
227 unsigned free_section
;
228 unsigned char free_section_entry
;
229 unsigned free_sectors
;
231 unsigned free_sectors_threshold
;
233 struct workqueue_struct
*commit_wq
;
234 struct work_struct commit_work
;
236 struct workqueue_struct
*writer_wq
;
237 struct work_struct writer_work
;
239 struct workqueue_struct
*recalc_wq
;
240 struct work_struct recalc_work
;
244 struct bio_list flush_bio_list
;
246 unsigned long autocommit_jiffies
;
247 struct timer_list autocommit_timer
;
248 unsigned autocommit_msec
;
250 wait_queue_head_t copy_to_journal_wait
;
252 struct completion crypto_backoff
;
254 bool journal_uptodate
;
256 bool recalculate_flag
;
258 struct alg_spec internal_hash_alg
;
259 struct alg_spec journal_crypt_alg
;
260 struct alg_spec journal_mac_alg
;
262 atomic64_t number_of_mismatches
;
264 struct notifier_block reboot_notifier
;
267 struct dm_integrity_range
{
268 sector_t logical_sector
;
274 struct task_struct
*task
;
275 struct list_head wait_entry
;
280 struct dm_integrity_io
{
281 struct work_struct work
;
283 struct dm_integrity_c
*ic
;
287 struct dm_integrity_range range
;
289 sector_t metadata_block
;
290 unsigned metadata_offset
;
293 blk_status_t bi_status
;
295 struct completion
*completion
;
297 struct dm_bio_details bio_details
;
300 struct journal_completion
{
301 struct dm_integrity_c
*ic
;
303 struct completion comp
;
307 struct dm_integrity_range range
;
308 struct journal_completion
*comp
;
311 struct bitmap_block_status
{
312 struct work_struct work
;
313 struct dm_integrity_c
*ic
;
315 unsigned long *bitmap
;
316 struct bio_list bio_queue
;
317 spinlock_t bio_queue_lock
;
321 static struct kmem_cache
*journal_io_cache
;
323 #define JOURNAL_IO_MEMPOOL 32
326 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
327 static void __DEBUG_bytes(__u8
*bytes
, size_t len
, const char *msg
, ...)
336 pr_cont(" %02x", *bytes
);
342 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
344 #define DEBUG_print(x, ...) do { } while (0)
345 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
348 static void dm_integrity_prepare(struct request
*rq
)
352 static void dm_integrity_complete(struct request
*rq
, unsigned int nr_bytes
)
357 * DM Integrity profile, protection is performed layer above (dm-crypt)
359 static const struct blk_integrity_profile dm_integrity_profile
= {
360 .name
= "DM-DIF-EXT-TAG",
363 .prepare_fn
= dm_integrity_prepare
,
364 .complete_fn
= dm_integrity_complete
,
367 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
);
368 static void integrity_bio_wait(struct work_struct
*w
);
369 static void dm_integrity_dtr(struct dm_target
*ti
);
371 static void dm_integrity_io_error(struct dm_integrity_c
*ic
, const char *msg
, int err
)
374 atomic64_inc(&ic
->number_of_mismatches
);
375 if (!cmpxchg(&ic
->failed
, 0, err
))
376 DMERR("Error on %s: %d", msg
, err
);
379 static int dm_integrity_failed(struct dm_integrity_c
*ic
)
381 return READ_ONCE(ic
->failed
);
384 static commit_id_t
dm_integrity_commit_id(struct dm_integrity_c
*ic
, unsigned i
,
385 unsigned j
, unsigned char seq
)
388 * Xor the number with section and sector, so that if a piece of
389 * journal is written at wrong place, it is detected.
391 return ic
->commit_ids
[seq
] ^ cpu_to_le64(((__u64
)i
<< 32) ^ j
);
394 static void get_area_and_offset(struct dm_integrity_c
*ic
, sector_t data_sector
,
395 sector_t
*area
, sector_t
*offset
)
398 __u8 log2_interleave_sectors
= ic
->sb
->log2_interleave_sectors
;
399 *area
= data_sector
>> log2_interleave_sectors
;
400 *offset
= (unsigned)data_sector
& ((1U << log2_interleave_sectors
) - 1);
403 *offset
= data_sector
;
407 #define sector_to_block(ic, n) \
409 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
410 (n) >>= (ic)->sb->log2_sectors_per_block; \
413 static __u64
get_metadata_sector_and_offset(struct dm_integrity_c
*ic
, sector_t area
,
414 sector_t offset
, unsigned *metadata_offset
)
419 ms
= area
<< ic
->sb
->log2_interleave_sectors
;
420 if (likely(ic
->log2_metadata_run
>= 0))
421 ms
+= area
<< ic
->log2_metadata_run
;
423 ms
+= area
* ic
->metadata_run
;
424 ms
>>= ic
->log2_buffer_sectors
;
426 sector_to_block(ic
, offset
);
428 if (likely(ic
->log2_tag_size
>= 0)) {
429 ms
+= offset
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
- ic
->log2_tag_size
);
430 mo
= (offset
<< ic
->log2_tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
432 ms
+= (__u64
)offset
* ic
->tag_size
>> (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
);
433 mo
= (offset
* ic
->tag_size
) & ((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - 1);
435 *metadata_offset
= mo
;
439 static sector_t
get_data_sector(struct dm_integrity_c
*ic
, sector_t area
, sector_t offset
)
446 result
= area
<< ic
->sb
->log2_interleave_sectors
;
447 if (likely(ic
->log2_metadata_run
>= 0))
448 result
+= (area
+ 1) << ic
->log2_metadata_run
;
450 result
+= (area
+ 1) * ic
->metadata_run
;
452 result
+= (sector_t
)ic
->initial_sectors
+ offset
;
458 static void wraparound_section(struct dm_integrity_c
*ic
, unsigned *sec_ptr
)
460 if (unlikely(*sec_ptr
>= ic
->journal_sections
))
461 *sec_ptr
-= ic
->journal_sections
;
464 static void sb_set_version(struct dm_integrity_c
*ic
)
466 if (ic
->mode
== 'B' || ic
->sb
->flags
& cpu_to_le32(SB_FLAG_DIRTY_BITMAP
))
467 ic
->sb
->version
= SB_VERSION_3
;
468 else if (ic
->meta_dev
|| ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
469 ic
->sb
->version
= SB_VERSION_2
;
471 ic
->sb
->version
= SB_VERSION_1
;
474 static int sync_rw_sb(struct dm_integrity_c
*ic
, int op
, int op_flags
)
476 struct dm_io_request io_req
;
477 struct dm_io_region io_loc
;
480 io_req
.bi_op_flags
= op_flags
;
481 io_req
.mem
.type
= DM_IO_KMEM
;
482 io_req
.mem
.ptr
.addr
= ic
->sb
;
483 io_req
.notify
.fn
= NULL
;
484 io_req
.client
= ic
->io
;
485 io_loc
.bdev
= ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
;
486 io_loc
.sector
= ic
->start
;
487 io_loc
.count
= SB_SECTORS
;
489 if (op
== REQ_OP_WRITE
)
492 return dm_io(&io_req
, 1, &io_loc
, NULL
);
495 #define BITMAP_OP_TEST_ALL_SET 0
496 #define BITMAP_OP_TEST_ALL_CLEAR 1
497 #define BITMAP_OP_SET 2
498 #define BITMAP_OP_CLEAR 3
500 static bool block_bitmap_op(struct dm_integrity_c
*ic
, struct page_list
*bitmap
,
501 sector_t sector
, sector_t n_sectors
, int mode
)
503 unsigned long bit
, end_bit
, this_end_bit
, page
, end_page
;
506 if (unlikely(((sector
| n_sectors
) & ((1 << ic
->sb
->log2_sectors_per_block
) - 1)) != 0)) {
507 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
508 (unsigned long long)sector
,
509 (unsigned long long)n_sectors
,
510 ic
->sb
->log2_sectors_per_block
,
511 ic
->log2_blocks_per_bitmap_bit
,
516 if (unlikely(!n_sectors
))
519 bit
= sector
>> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
520 end_bit
= (sector
+ n_sectors
- 1) >>
521 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
523 page
= bit
/ (PAGE_SIZE
* 8);
524 bit
%= PAGE_SIZE
* 8;
526 end_page
= end_bit
/ (PAGE_SIZE
* 8);
527 end_bit
%= PAGE_SIZE
* 8;
530 if (page
< end_page
) {
531 this_end_bit
= PAGE_SIZE
* 8 - 1;
533 this_end_bit
= end_bit
;
536 data
= lowmem_page_address(bitmap
[page
].page
);
538 if (mode
== BITMAP_OP_TEST_ALL_SET
) {
539 while (bit
<= this_end_bit
) {
540 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
542 if (data
[bit
/ BITS_PER_LONG
] != -1)
544 bit
+= BITS_PER_LONG
;
545 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
548 if (!test_bit(bit
, data
))
552 } else if (mode
== BITMAP_OP_TEST_ALL_CLEAR
) {
553 while (bit
<= this_end_bit
) {
554 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
556 if (data
[bit
/ BITS_PER_LONG
] != 0)
558 bit
+= BITS_PER_LONG
;
559 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
562 if (test_bit(bit
, data
))
566 } else if (mode
== BITMAP_OP_SET
) {
567 while (bit
<= this_end_bit
) {
568 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
570 data
[bit
/ BITS_PER_LONG
] = -1;
571 bit
+= BITS_PER_LONG
;
572 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
575 __set_bit(bit
, data
);
578 } else if (mode
== BITMAP_OP_CLEAR
) {
579 if (!bit
&& this_end_bit
== PAGE_SIZE
* 8 - 1)
581 else while (bit
<= this_end_bit
) {
582 if (!(bit
% BITS_PER_LONG
) && this_end_bit
>= bit
+ BITS_PER_LONG
- 1) {
584 data
[bit
/ BITS_PER_LONG
] = 0;
585 bit
+= BITS_PER_LONG
;
586 } while (this_end_bit
>= bit
+ BITS_PER_LONG
- 1);
589 __clear_bit(bit
, data
);
596 if (unlikely(page
< end_page
)) {
605 static void block_bitmap_copy(struct dm_integrity_c
*ic
, struct page_list
*dst
, struct page_list
*src
)
607 unsigned n_bitmap_pages
= DIV_ROUND_UP(ic
->n_bitmap_blocks
, PAGE_SIZE
/ BITMAP_BLOCK_SIZE
);
610 for (i
= 0; i
< n_bitmap_pages
; i
++) {
611 unsigned long *dst_data
= lowmem_page_address(dst
[i
].page
);
612 unsigned long *src_data
= lowmem_page_address(src
[i
].page
);
613 copy_page(dst_data
, src_data
);
617 static struct bitmap_block_status
*sector_to_bitmap_block(struct dm_integrity_c
*ic
, sector_t sector
)
619 unsigned bit
= sector
>> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
620 unsigned bitmap_block
= bit
/ (BITMAP_BLOCK_SIZE
* 8);
622 BUG_ON(bitmap_block
>= ic
->n_bitmap_blocks
);
623 return &ic
->bbs
[bitmap_block
];
626 static void access_journal_check(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
627 bool e
, const char *function
)
629 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
630 unsigned limit
= e
? ic
->journal_section_entries
: ic
->journal_section_sectors
;
632 if (unlikely(section
>= ic
->journal_sections
) ||
633 unlikely(offset
>= limit
)) {
634 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
635 function
, section
, offset
, ic
->journal_sections
, limit
);
641 static void page_list_location(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
642 unsigned *pl_index
, unsigned *pl_offset
)
646 access_journal_check(ic
, section
, offset
, false, "page_list_location");
648 sector
= section
* ic
->journal_section_sectors
+ offset
;
650 *pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
651 *pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
654 static struct journal_sector
*access_page_list(struct dm_integrity_c
*ic
, struct page_list
*pl
,
655 unsigned section
, unsigned offset
, unsigned *n_sectors
)
657 unsigned pl_index
, pl_offset
;
660 page_list_location(ic
, section
, offset
, &pl_index
, &pl_offset
);
663 *n_sectors
= (PAGE_SIZE
- pl_offset
) >> SECTOR_SHIFT
;
665 va
= lowmem_page_address(pl
[pl_index
].page
);
667 return (struct journal_sector
*)(va
+ pl_offset
);
670 static struct journal_sector
*access_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
)
672 return access_page_list(ic
, ic
->journal
, section
, offset
, NULL
);
675 static struct journal_entry
*access_journal_entry(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
677 unsigned rel_sector
, offset
;
678 struct journal_sector
*js
;
680 access_journal_check(ic
, section
, n
, true, "access_journal_entry");
682 rel_sector
= n
% JOURNAL_BLOCK_SECTORS
;
683 offset
= n
/ JOURNAL_BLOCK_SECTORS
;
685 js
= access_journal(ic
, section
, rel_sector
);
686 return (struct journal_entry
*)((char *)js
+ offset
* ic
->journal_entry_size
);
689 static struct journal_sector
*access_journal_data(struct dm_integrity_c
*ic
, unsigned section
, unsigned n
)
691 n
<<= ic
->sb
->log2_sectors_per_block
;
693 n
+= JOURNAL_BLOCK_SECTORS
;
695 access_journal_check(ic
, section
, n
, false, "access_journal_data");
697 return access_journal(ic
, section
, n
);
700 static void section_mac(struct dm_integrity_c
*ic
, unsigned section
, __u8 result
[JOURNAL_MAC_SIZE
])
702 SHASH_DESC_ON_STACK(desc
, ic
->journal_mac
);
706 desc
->tfm
= ic
->journal_mac
;
708 r
= crypto_shash_init(desc
);
710 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
714 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
715 struct journal_entry
*je
= access_journal_entry(ic
, section
, j
);
716 r
= crypto_shash_update(desc
, (__u8
*)&je
->u
.sector
, sizeof je
->u
.sector
);
718 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
723 size
= crypto_shash_digestsize(ic
->journal_mac
);
725 if (likely(size
<= JOURNAL_MAC_SIZE
)) {
726 r
= crypto_shash_final(desc
, result
);
728 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
731 memset(result
+ size
, 0, JOURNAL_MAC_SIZE
- size
);
733 __u8 digest
[HASH_MAX_DIGESTSIZE
];
735 if (WARN_ON(size
> sizeof(digest
))) {
736 dm_integrity_io_error(ic
, "digest_size", -EINVAL
);
739 r
= crypto_shash_final(desc
, digest
);
741 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
744 memcpy(result
, digest
, JOURNAL_MAC_SIZE
);
749 memset(result
, 0, JOURNAL_MAC_SIZE
);
752 static void rw_section_mac(struct dm_integrity_c
*ic
, unsigned section
, bool wr
)
754 __u8 result
[JOURNAL_MAC_SIZE
];
757 if (!ic
->journal_mac
)
760 section_mac(ic
, section
, result
);
762 for (j
= 0; j
< JOURNAL_BLOCK_SECTORS
; j
++) {
763 struct journal_sector
*js
= access_journal(ic
, section
, j
);
766 memcpy(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
);
768 if (memcmp(&js
->mac
, result
+ (j
* JOURNAL_MAC_PER_SECTOR
), JOURNAL_MAC_PER_SECTOR
))
769 dm_integrity_io_error(ic
, "journal mac", -EILSEQ
);
774 static void complete_journal_op(void *context
)
776 struct journal_completion
*comp
= context
;
777 BUG_ON(!atomic_read(&comp
->in_flight
));
778 if (likely(atomic_dec_and_test(&comp
->in_flight
)))
779 complete(&comp
->comp
);
782 static void xor_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
783 unsigned n_sections
, struct journal_completion
*comp
)
785 struct async_submit_ctl submit
;
786 size_t n_bytes
= (size_t)(n_sections
* ic
->journal_section_sectors
) << SECTOR_SHIFT
;
787 unsigned pl_index
, pl_offset
, section_index
;
788 struct page_list
*source_pl
, *target_pl
;
790 if (likely(encrypt
)) {
791 source_pl
= ic
->journal
;
792 target_pl
= ic
->journal_io
;
794 source_pl
= ic
->journal_io
;
795 target_pl
= ic
->journal
;
798 page_list_location(ic
, section
, 0, &pl_index
, &pl_offset
);
800 atomic_add(roundup(pl_offset
+ n_bytes
, PAGE_SIZE
) >> PAGE_SHIFT
, &comp
->in_flight
);
802 init_async_submit(&submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
, complete_journal_op
, comp
, NULL
);
804 section_index
= pl_index
;
808 struct page
*src_pages
[2];
809 struct page
*dst_page
;
811 while (unlikely(pl_index
== section_index
)) {
814 rw_section_mac(ic
, section
, true);
819 page_list_location(ic
, section
, 0, §ion_index
, &dummy
);
822 this_step
= min(n_bytes
, (size_t)PAGE_SIZE
- pl_offset
);
823 dst_page
= target_pl
[pl_index
].page
;
824 src_pages
[0] = source_pl
[pl_index
].page
;
825 src_pages
[1] = ic
->journal_xor
[pl_index
].page
;
827 async_xor(dst_page
, src_pages
, pl_offset
, 2, this_step
, &submit
);
831 n_bytes
-= this_step
;
836 async_tx_issue_pending_all();
839 static void complete_journal_encrypt(struct crypto_async_request
*req
, int err
)
841 struct journal_completion
*comp
= req
->data
;
843 if (likely(err
== -EINPROGRESS
)) {
844 complete(&comp
->ic
->crypto_backoff
);
847 dm_integrity_io_error(comp
->ic
, "asynchronous encrypt", err
);
849 complete_journal_op(comp
);
852 static bool do_crypt(bool encrypt
, struct skcipher_request
*req
, struct journal_completion
*comp
)
855 skcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
856 complete_journal_encrypt
, comp
);
858 r
= crypto_skcipher_encrypt(req
);
860 r
= crypto_skcipher_decrypt(req
);
863 if (likely(r
== -EINPROGRESS
))
865 if (likely(r
== -EBUSY
)) {
866 wait_for_completion(&comp
->ic
->crypto_backoff
);
867 reinit_completion(&comp
->ic
->crypto_backoff
);
870 dm_integrity_io_error(comp
->ic
, "encrypt", r
);
874 static void crypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
875 unsigned n_sections
, struct journal_completion
*comp
)
877 struct scatterlist
**source_sg
;
878 struct scatterlist
**target_sg
;
880 atomic_add(2, &comp
->in_flight
);
882 if (likely(encrypt
)) {
883 source_sg
= ic
->journal_scatterlist
;
884 target_sg
= ic
->journal_io_scatterlist
;
886 source_sg
= ic
->journal_io_scatterlist
;
887 target_sg
= ic
->journal_scatterlist
;
891 struct skcipher_request
*req
;
896 rw_section_mac(ic
, section
, true);
898 req
= ic
->sk_requests
[section
];
899 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
902 memcpy(iv
, iv
+ ivsize
, ivsize
);
904 req
->src
= source_sg
[section
];
905 req
->dst
= target_sg
[section
];
907 if (unlikely(do_crypt(encrypt
, req
, comp
)))
908 atomic_inc(&comp
->in_flight
);
912 } while (n_sections
);
914 atomic_dec(&comp
->in_flight
);
915 complete_journal_op(comp
);
918 static void encrypt_journal(struct dm_integrity_c
*ic
, bool encrypt
, unsigned section
,
919 unsigned n_sections
, struct journal_completion
*comp
)
922 return xor_journal(ic
, encrypt
, section
, n_sections
, comp
);
924 return crypt_journal(ic
, encrypt
, section
, n_sections
, comp
);
927 static void complete_journal_io(unsigned long error
, void *context
)
929 struct journal_completion
*comp
= context
;
930 if (unlikely(error
!= 0))
931 dm_integrity_io_error(comp
->ic
, "writing journal", -EIO
);
932 complete_journal_op(comp
);
935 static void rw_journal_sectors(struct dm_integrity_c
*ic
, int op
, int op_flags
,
936 unsigned sector
, unsigned n_sectors
, struct journal_completion
*comp
)
938 struct dm_io_request io_req
;
939 struct dm_io_region io_loc
;
940 unsigned pl_index
, pl_offset
;
943 if (unlikely(dm_integrity_failed(ic
))) {
945 complete_journal_io(-1UL, comp
);
949 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
950 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
953 io_req
.bi_op_flags
= op_flags
;
954 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
956 io_req
.mem
.ptr
.pl
= &ic
->journal_io
[pl_index
];
958 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
959 io_req
.mem
.offset
= pl_offset
;
960 if (likely(comp
!= NULL
)) {
961 io_req
.notify
.fn
= complete_journal_io
;
962 io_req
.notify
.context
= comp
;
964 io_req
.notify
.fn
= NULL
;
966 io_req
.client
= ic
->io
;
967 io_loc
.bdev
= ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
;
968 io_loc
.sector
= ic
->start
+ SB_SECTORS
+ sector
;
969 io_loc
.count
= n_sectors
;
971 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
973 dm_integrity_io_error(ic
, op
== REQ_OP_READ
? "reading journal" : "writing journal", r
);
975 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
976 complete_journal_io(-1UL, comp
);
981 static void rw_journal(struct dm_integrity_c
*ic
, int op
, int op_flags
, unsigned section
,
982 unsigned n_sections
, struct journal_completion
*comp
)
984 unsigned sector
, n_sectors
;
986 sector
= section
* ic
->journal_section_sectors
;
987 n_sectors
= n_sections
* ic
->journal_section_sectors
;
989 rw_journal_sectors(ic
, op
, op_flags
, sector
, n_sectors
, comp
);
992 static void write_journal(struct dm_integrity_c
*ic
, unsigned commit_start
, unsigned commit_sections
)
994 struct journal_completion io_comp
;
995 struct journal_completion crypt_comp_1
;
996 struct journal_completion crypt_comp_2
;
1000 init_completion(&io_comp
.comp
);
1002 if (commit_start
+ commit_sections
<= ic
->journal_sections
) {
1003 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
1004 if (ic
->journal_io
) {
1005 crypt_comp_1
.ic
= ic
;
1006 init_completion(&crypt_comp_1
.comp
);
1007 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1008 encrypt_journal(ic
, true, commit_start
, commit_sections
, &crypt_comp_1
);
1009 wait_for_completion_io(&crypt_comp_1
.comp
);
1011 for (i
= 0; i
< commit_sections
; i
++)
1012 rw_section_mac(ic
, commit_start
+ i
, true);
1014 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, commit_start
,
1015 commit_sections
, &io_comp
);
1018 io_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(2);
1019 to_end
= ic
->journal_sections
- commit_start
;
1020 if (ic
->journal_io
) {
1021 crypt_comp_1
.ic
= ic
;
1022 init_completion(&crypt_comp_1
.comp
);
1023 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1024 encrypt_journal(ic
, true, commit_start
, to_end
, &crypt_comp_1
);
1025 if (try_wait_for_completion(&crypt_comp_1
.comp
)) {
1026 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1027 reinit_completion(&crypt_comp_1
.comp
);
1028 crypt_comp_1
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1029 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_1
);
1030 wait_for_completion_io(&crypt_comp_1
.comp
);
1032 crypt_comp_2
.ic
= ic
;
1033 init_completion(&crypt_comp_2
.comp
);
1034 crypt_comp_2
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
1035 encrypt_journal(ic
, true, 0, commit_sections
- to_end
, &crypt_comp_2
);
1036 wait_for_completion_io(&crypt_comp_1
.comp
);
1037 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1038 wait_for_completion_io(&crypt_comp_2
.comp
);
1041 for (i
= 0; i
< to_end
; i
++)
1042 rw_section_mac(ic
, commit_start
+ i
, true);
1043 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, commit_start
, to_end
, &io_comp
);
1044 for (i
= 0; i
< commit_sections
- to_end
; i
++)
1045 rw_section_mac(ic
, i
, true);
1047 rw_journal(ic
, REQ_OP_WRITE
, REQ_FUA
, 0, commit_sections
- to_end
, &io_comp
);
1050 wait_for_completion_io(&io_comp
.comp
);
1053 static void copy_from_journal(struct dm_integrity_c
*ic
, unsigned section
, unsigned offset
,
1054 unsigned n_sectors
, sector_t target
, io_notify_fn fn
, void *data
)
1056 struct dm_io_request io_req
;
1057 struct dm_io_region io_loc
;
1059 unsigned sector
, pl_index
, pl_offset
;
1061 BUG_ON((target
| n_sectors
| offset
) & (unsigned)(ic
->sectors_per_block
- 1));
1063 if (unlikely(dm_integrity_failed(ic
))) {
1068 sector
= section
* ic
->journal_section_sectors
+ JOURNAL_BLOCK_SECTORS
+ offset
;
1070 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
1071 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
1073 io_req
.bi_op
= REQ_OP_WRITE
;
1074 io_req
.bi_op_flags
= 0;
1075 io_req
.mem
.type
= DM_IO_PAGE_LIST
;
1076 io_req
.mem
.ptr
.pl
= &ic
->journal
[pl_index
];
1077 io_req
.mem
.offset
= pl_offset
;
1078 io_req
.notify
.fn
= fn
;
1079 io_req
.notify
.context
= data
;
1080 io_req
.client
= ic
->io
;
1081 io_loc
.bdev
= ic
->dev
->bdev
;
1082 io_loc
.sector
= target
;
1083 io_loc
.count
= n_sectors
;
1085 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
1087 WARN_ONCE(1, "asynchronous dm_io failed: %d", r
);
1092 static bool ranges_overlap(struct dm_integrity_range
*range1
, struct dm_integrity_range
*range2
)
1094 return range1
->logical_sector
< range2
->logical_sector
+ range2
->n_sectors
&&
1095 range1
->logical_sector
+ range1
->n_sectors
> range2
->logical_sector
;
1098 static bool add_new_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
, bool check_waiting
)
1100 struct rb_node
**n
= &ic
->in_progress
.rb_node
;
1101 struct rb_node
*parent
;
1103 BUG_ON((new_range
->logical_sector
| new_range
->n_sectors
) & (unsigned)(ic
->sectors_per_block
- 1));
1105 if (likely(check_waiting
)) {
1106 struct dm_integrity_range
*range
;
1107 list_for_each_entry(range
, &ic
->wait_list
, wait_entry
) {
1108 if (unlikely(ranges_overlap(range
, new_range
)))
1116 struct dm_integrity_range
*range
= container_of(*n
, struct dm_integrity_range
, node
);
1119 if (new_range
->logical_sector
+ new_range
->n_sectors
<= range
->logical_sector
) {
1120 n
= &range
->node
.rb_left
;
1121 } else if (new_range
->logical_sector
>= range
->logical_sector
+ range
->n_sectors
) {
1122 n
= &range
->node
.rb_right
;
1128 rb_link_node(&new_range
->node
, parent
, n
);
1129 rb_insert_color(&new_range
->node
, &ic
->in_progress
);
1134 static void remove_range_unlocked(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
1136 rb_erase(&range
->node
, &ic
->in_progress
);
1137 while (unlikely(!list_empty(&ic
->wait_list
))) {
1138 struct dm_integrity_range
*last_range
=
1139 list_first_entry(&ic
->wait_list
, struct dm_integrity_range
, wait_entry
);
1140 struct task_struct
*last_range_task
;
1141 last_range_task
= last_range
->task
;
1142 list_del(&last_range
->wait_entry
);
1143 if (!add_new_range(ic
, last_range
, false)) {
1144 last_range
->task
= last_range_task
;
1145 list_add(&last_range
->wait_entry
, &ic
->wait_list
);
1148 last_range
->waiting
= false;
1149 wake_up_process(last_range_task
);
1153 static void remove_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*range
)
1155 unsigned long flags
;
1157 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1158 remove_range_unlocked(ic
, range
);
1159 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1162 static void wait_and_add_new_range(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
)
1164 new_range
->waiting
= true;
1165 list_add_tail(&new_range
->wait_entry
, &ic
->wait_list
);
1166 new_range
->task
= current
;
1168 __set_current_state(TASK_UNINTERRUPTIBLE
);
1169 spin_unlock_irq(&ic
->endio_wait
.lock
);
1171 spin_lock_irq(&ic
->endio_wait
.lock
);
1172 } while (unlikely(new_range
->waiting
));
1175 static void add_new_range_and_wait(struct dm_integrity_c
*ic
, struct dm_integrity_range
*new_range
)
1177 if (unlikely(!add_new_range(ic
, new_range
, true)))
1178 wait_and_add_new_range(ic
, new_range
);
1181 static void init_journal_node(struct journal_node
*node
)
1183 RB_CLEAR_NODE(&node
->node
);
1184 node
->sector
= (sector_t
)-1;
1187 static void add_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
, sector_t sector
)
1189 struct rb_node
**link
;
1190 struct rb_node
*parent
;
1192 node
->sector
= sector
;
1193 BUG_ON(!RB_EMPTY_NODE(&node
->node
));
1195 link
= &ic
->journal_tree_root
.rb_node
;
1199 struct journal_node
*j
;
1201 j
= container_of(parent
, struct journal_node
, node
);
1202 if (sector
< j
->sector
)
1203 link
= &j
->node
.rb_left
;
1205 link
= &j
->node
.rb_right
;
1208 rb_link_node(&node
->node
, parent
, link
);
1209 rb_insert_color(&node
->node
, &ic
->journal_tree_root
);
1212 static void remove_journal_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
1214 BUG_ON(RB_EMPTY_NODE(&node
->node
));
1215 rb_erase(&node
->node
, &ic
->journal_tree_root
);
1216 init_journal_node(node
);
1219 #define NOT_FOUND (-1U)
1221 static unsigned find_journal_node(struct dm_integrity_c
*ic
, sector_t sector
, sector_t
*next_sector
)
1223 struct rb_node
*n
= ic
->journal_tree_root
.rb_node
;
1224 unsigned found
= NOT_FOUND
;
1225 *next_sector
= (sector_t
)-1;
1227 struct journal_node
*j
= container_of(n
, struct journal_node
, node
);
1228 if (sector
== j
->sector
) {
1229 found
= j
- ic
->journal_tree
;
1231 if (sector
< j
->sector
) {
1232 *next_sector
= j
->sector
;
1233 n
= j
->node
.rb_left
;
1235 n
= j
->node
.rb_right
;
1242 static bool test_journal_node(struct dm_integrity_c
*ic
, unsigned pos
, sector_t sector
)
1244 struct journal_node
*node
, *next_node
;
1245 struct rb_node
*next
;
1247 if (unlikely(pos
>= ic
->journal_entries
))
1249 node
= &ic
->journal_tree
[pos
];
1250 if (unlikely(RB_EMPTY_NODE(&node
->node
)))
1252 if (unlikely(node
->sector
!= sector
))
1255 next
= rb_next(&node
->node
);
1256 if (unlikely(!next
))
1259 next_node
= container_of(next
, struct journal_node
, node
);
1260 return next_node
->sector
!= sector
;
1263 static bool find_newer_committed_node(struct dm_integrity_c
*ic
, struct journal_node
*node
)
1265 struct rb_node
*next
;
1266 struct journal_node
*next_node
;
1267 unsigned next_section
;
1269 BUG_ON(RB_EMPTY_NODE(&node
->node
));
1271 next
= rb_next(&node
->node
);
1272 if (unlikely(!next
))
1275 next_node
= container_of(next
, struct journal_node
, node
);
1277 if (next_node
->sector
!= node
->sector
)
1280 next_section
= (unsigned)(next_node
- ic
->journal_tree
) / ic
->journal_section_entries
;
1281 if (next_section
>= ic
->committed_section
&&
1282 next_section
< ic
->committed_section
+ ic
->n_committed_sections
)
1284 if (next_section
+ ic
->journal_sections
< ic
->committed_section
+ ic
->n_committed_sections
)
1294 static int dm_integrity_rw_tag(struct dm_integrity_c
*ic
, unsigned char *tag
, sector_t
*metadata_block
,
1295 unsigned *metadata_offset
, unsigned total_size
, int op
)
1298 unsigned char *data
, *dp
;
1299 struct dm_buffer
*b
;
1303 r
= dm_integrity_failed(ic
);
1307 data
= dm_bufio_read(ic
->bufio
, *metadata_block
, &b
);
1309 return PTR_ERR(data
);
1311 to_copy
= min((1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
) - *metadata_offset
, total_size
);
1312 dp
= data
+ *metadata_offset
;
1313 if (op
== TAG_READ
) {
1314 memcpy(tag
, dp
, to_copy
);
1315 } else if (op
== TAG_WRITE
) {
1316 memcpy(dp
, tag
, to_copy
);
1317 dm_bufio_mark_partial_buffer_dirty(b
, *metadata_offset
, *metadata_offset
+ to_copy
);
1319 /* e.g.: op == TAG_CMP */
1320 if (unlikely(memcmp(dp
, tag
, to_copy
))) {
1323 for (i
= 0; i
< to_copy
; i
++) {
1324 if (dp
[i
] != tag
[i
])
1328 dm_bufio_release(b
);
1332 dm_bufio_release(b
);
1335 *metadata_offset
+= to_copy
;
1336 if (unlikely(*metadata_offset
== 1U << SECTOR_SHIFT
<< ic
->log2_buffer_sectors
)) {
1337 (*metadata_block
)++;
1338 *metadata_offset
= 0;
1340 total_size
-= to_copy
;
1341 } while (unlikely(total_size
));
1346 static void dm_integrity_flush_buffers(struct dm_integrity_c
*ic
)
1349 r
= dm_bufio_write_dirty_buffers(ic
->bufio
);
1351 dm_integrity_io_error(ic
, "writing tags", r
);
1354 static void sleep_on_endio_wait(struct dm_integrity_c
*ic
)
1356 DECLARE_WAITQUEUE(wait
, current
);
1357 __add_wait_queue(&ic
->endio_wait
, &wait
);
1358 __set_current_state(TASK_UNINTERRUPTIBLE
);
1359 spin_unlock_irq(&ic
->endio_wait
.lock
);
1361 spin_lock_irq(&ic
->endio_wait
.lock
);
1362 __remove_wait_queue(&ic
->endio_wait
, &wait
);
1365 static void autocommit_fn(struct timer_list
*t
)
1367 struct dm_integrity_c
*ic
= from_timer(ic
, t
, autocommit_timer
);
1369 if (likely(!dm_integrity_failed(ic
)))
1370 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1373 static void schedule_autocommit(struct dm_integrity_c
*ic
)
1375 if (!timer_pending(&ic
->autocommit_timer
))
1376 mod_timer(&ic
->autocommit_timer
, jiffies
+ ic
->autocommit_jiffies
);
1379 static void submit_flush_bio(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1382 unsigned long flags
;
1384 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1385 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1386 bio_list_add(&ic
->flush_bio_list
, bio
);
1387 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1389 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1392 static void do_endio(struct dm_integrity_c
*ic
, struct bio
*bio
)
1394 int r
= dm_integrity_failed(ic
);
1395 if (unlikely(r
) && !bio
->bi_status
)
1396 bio
->bi_status
= errno_to_blk_status(r
);
1397 if (unlikely(ic
->synchronous_mode
) && bio_op(bio
) == REQ_OP_WRITE
) {
1398 unsigned long flags
;
1399 spin_lock_irqsave(&ic
->endio_wait
.lock
, flags
);
1400 bio_list_add(&ic
->synchronous_bios
, bio
);
1401 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
1402 spin_unlock_irqrestore(&ic
->endio_wait
.lock
, flags
);
1408 static void do_endio_flush(struct dm_integrity_c
*ic
, struct dm_integrity_io
*dio
)
1410 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1412 if (unlikely(dio
->fua
) && likely(!bio
->bi_status
) && likely(!dm_integrity_failed(ic
)))
1413 submit_flush_bio(ic
, dio
);
1418 static void dec_in_flight(struct dm_integrity_io
*dio
)
1420 if (atomic_dec_and_test(&dio
->in_flight
)) {
1421 struct dm_integrity_c
*ic
= dio
->ic
;
1424 remove_range(ic
, &dio
->range
);
1426 if (unlikely(dio
->write
))
1427 schedule_autocommit(ic
);
1429 bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1431 if (unlikely(dio
->bi_status
) && !bio
->bi_status
)
1432 bio
->bi_status
= dio
->bi_status
;
1433 if (likely(!bio
->bi_status
) && unlikely(bio_sectors(bio
) != dio
->range
.n_sectors
)) {
1434 dio
->range
.logical_sector
+= dio
->range
.n_sectors
;
1435 bio_advance(bio
, dio
->range
.n_sectors
<< SECTOR_SHIFT
);
1436 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1437 queue_work(ic
->offload_wq
, &dio
->work
);
1440 do_endio_flush(ic
, dio
);
1444 static void integrity_end_io(struct bio
*bio
)
1446 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1448 dm_bio_restore(&dio
->bio_details
, bio
);
1449 if (bio
->bi_integrity
)
1450 bio
->bi_opf
|= REQ_INTEGRITY
;
1452 if (dio
->completion
)
1453 complete(dio
->completion
);
1458 static void integrity_sector_checksum(struct dm_integrity_c
*ic
, sector_t sector
,
1459 const char *data
, char *result
)
1461 __u64 sector_le
= cpu_to_le64(sector
);
1462 SHASH_DESC_ON_STACK(req
, ic
->internal_hash
);
1464 unsigned digest_size
;
1466 req
->tfm
= ic
->internal_hash
;
1468 r
= crypto_shash_init(req
);
1469 if (unlikely(r
< 0)) {
1470 dm_integrity_io_error(ic
, "crypto_shash_init", r
);
1474 r
= crypto_shash_update(req
, (const __u8
*)§or_le
, sizeof sector_le
);
1475 if (unlikely(r
< 0)) {
1476 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1480 r
= crypto_shash_update(req
, data
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
1481 if (unlikely(r
< 0)) {
1482 dm_integrity_io_error(ic
, "crypto_shash_update", r
);
1486 r
= crypto_shash_final(req
, result
);
1487 if (unlikely(r
< 0)) {
1488 dm_integrity_io_error(ic
, "crypto_shash_final", r
);
1492 digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1493 if (unlikely(digest_size
< ic
->tag_size
))
1494 memset(result
+ digest_size
, 0, ic
->tag_size
- digest_size
);
1499 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1500 get_random_bytes(result
, ic
->tag_size
);
1503 static void integrity_metadata(struct work_struct
*w
)
1505 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
1506 struct dm_integrity_c
*ic
= dio
->ic
;
1510 if (ic
->internal_hash
) {
1511 struct bvec_iter iter
;
1513 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1514 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1516 unsigned extra_space
= unlikely(digest_size
> ic
->tag_size
) ? digest_size
- ic
->tag_size
: 0;
1517 char checksums_onstack
[max((size_t)HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
1518 unsigned sectors_to_process
= dio
->range
.n_sectors
;
1519 sector_t sector
= dio
->range
.logical_sector
;
1521 if (unlikely(ic
->mode
== 'R'))
1524 checksums
= kmalloc((PAGE_SIZE
>> SECTOR_SHIFT
>> ic
->sb
->log2_sectors_per_block
) * ic
->tag_size
+ extra_space
,
1525 GFP_NOIO
| __GFP_NORETRY
| __GFP_NOWARN
);
1527 checksums
= checksums_onstack
;
1528 if (WARN_ON(extra_space
&&
1529 digest_size
> sizeof(checksums_onstack
))) {
1535 __bio_for_each_segment(bv
, bio
, iter
, dio
->bio_details
.bi_iter
) {
1537 char *mem
, *checksums_ptr
;
1540 mem
= (char *)kmap_atomic(bv
.bv_page
) + bv
.bv_offset
;
1542 checksums_ptr
= checksums
;
1544 integrity_sector_checksum(ic
, sector
, mem
+ pos
, checksums_ptr
);
1545 checksums_ptr
+= ic
->tag_size
;
1546 sectors_to_process
-= ic
->sectors_per_block
;
1547 pos
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
1548 sector
+= ic
->sectors_per_block
;
1549 } while (pos
< bv
.bv_len
&& sectors_to_process
&& checksums
!= checksums_onstack
);
1552 r
= dm_integrity_rw_tag(ic
, checksums
, &dio
->metadata_block
, &dio
->metadata_offset
,
1553 checksums_ptr
- checksums
, !dio
->write
? TAG_CMP
: TAG_WRITE
);
1556 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1557 (unsigned long long)(sector
- ((r
+ ic
->tag_size
- 1) / ic
->tag_size
)));
1559 atomic64_inc(&ic
->number_of_mismatches
);
1561 if (likely(checksums
!= checksums_onstack
))
1566 if (!sectors_to_process
)
1569 if (unlikely(pos
< bv
.bv_len
)) {
1570 bv
.bv_offset
+= pos
;
1576 if (likely(checksums
!= checksums_onstack
))
1579 struct bio_integrity_payload
*bip
= dio
->bio_details
.bi_integrity
;
1583 struct bvec_iter iter
;
1584 unsigned data_to_process
= dio
->range
.n_sectors
;
1585 sector_to_block(ic
, data_to_process
);
1586 data_to_process
*= ic
->tag_size
;
1588 bip_for_each_vec(biv
, bip
, iter
) {
1592 BUG_ON(PageHighMem(biv
.bv_page
));
1593 tag
= lowmem_page_address(biv
.bv_page
) + biv
.bv_offset
;
1594 this_len
= min(biv
.bv_len
, data_to_process
);
1595 r
= dm_integrity_rw_tag(ic
, tag
, &dio
->metadata_block
, &dio
->metadata_offset
,
1596 this_len
, !dio
->write
? TAG_READ
: TAG_WRITE
);
1599 data_to_process
-= this_len
;
1600 if (!data_to_process
)
1609 dio
->bi_status
= errno_to_blk_status(r
);
1613 static int dm_integrity_map(struct dm_target
*ti
, struct bio
*bio
)
1615 struct dm_integrity_c
*ic
= ti
->private;
1616 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
1617 struct bio_integrity_payload
*bip
;
1619 sector_t area
, offset
;
1624 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1625 submit_flush_bio(ic
, dio
);
1626 return DM_MAPIO_SUBMITTED
;
1629 dio
->range
.logical_sector
= dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1630 dio
->write
= bio_op(bio
) == REQ_OP_WRITE
;
1631 dio
->fua
= dio
->write
&& bio
->bi_opf
& REQ_FUA
;
1632 if (unlikely(dio
->fua
)) {
1634 * Don't pass down the FUA flag because we have to flush
1635 * disk cache anyway.
1637 bio
->bi_opf
&= ~REQ_FUA
;
1639 if (unlikely(dio
->range
.logical_sector
+ bio_sectors(bio
) > ic
->provided_data_sectors
)) {
1640 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1641 (unsigned long long)dio
->range
.logical_sector
, bio_sectors(bio
),
1642 (unsigned long long)ic
->provided_data_sectors
);
1643 return DM_MAPIO_KILL
;
1645 if (unlikely((dio
->range
.logical_sector
| bio_sectors(bio
)) & (unsigned)(ic
->sectors_per_block
- 1))) {
1646 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1647 ic
->sectors_per_block
,
1648 (unsigned long long)dio
->range
.logical_sector
, bio_sectors(bio
));
1649 return DM_MAPIO_KILL
;
1652 if (ic
->sectors_per_block
> 1) {
1653 struct bvec_iter iter
;
1655 bio_for_each_segment(bv
, bio
, iter
) {
1656 if (unlikely(bv
.bv_len
& ((ic
->sectors_per_block
<< SECTOR_SHIFT
) - 1))) {
1657 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1658 bv
.bv_offset
, bv
.bv_len
, ic
->sectors_per_block
);
1659 return DM_MAPIO_KILL
;
1664 bip
= bio_integrity(bio
);
1665 if (!ic
->internal_hash
) {
1667 unsigned wanted_tag_size
= bio_sectors(bio
) >> ic
->sb
->log2_sectors_per_block
;
1668 if (ic
->log2_tag_size
>= 0)
1669 wanted_tag_size
<<= ic
->log2_tag_size
;
1671 wanted_tag_size
*= ic
->tag_size
;
1672 if (unlikely(wanted_tag_size
!= bip
->bip_iter
.bi_size
)) {
1673 DMERR("Invalid integrity data size %u, expected %u",
1674 bip
->bip_iter
.bi_size
, wanted_tag_size
);
1675 return DM_MAPIO_KILL
;
1679 if (unlikely(bip
!= NULL
)) {
1680 DMERR("Unexpected integrity data when using internal hash");
1681 return DM_MAPIO_KILL
;
1685 if (unlikely(ic
->mode
== 'R') && unlikely(dio
->write
))
1686 return DM_MAPIO_KILL
;
1688 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
1689 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
1690 bio
->bi_iter
.bi_sector
= get_data_sector(ic
, area
, offset
);
1692 dm_integrity_map_continue(dio
, true);
1693 return DM_MAPIO_SUBMITTED
;
1696 static bool __journal_read_write(struct dm_integrity_io
*dio
, struct bio
*bio
,
1697 unsigned journal_section
, unsigned journal_entry
)
1699 struct dm_integrity_c
*ic
= dio
->ic
;
1700 sector_t logical_sector
;
1703 logical_sector
= dio
->range
.logical_sector
;
1704 n_sectors
= dio
->range
.n_sectors
;
1706 struct bio_vec bv
= bio_iovec(bio
);
1709 if (unlikely(bv
.bv_len
>> SECTOR_SHIFT
> n_sectors
))
1710 bv
.bv_len
= n_sectors
<< SECTOR_SHIFT
;
1711 n_sectors
-= bv
.bv_len
>> SECTOR_SHIFT
;
1712 bio_advance_iter(bio
, &bio
->bi_iter
, bv
.bv_len
);
1714 mem
= kmap_atomic(bv
.bv_page
);
1715 if (likely(dio
->write
))
1716 flush_dcache_page(bv
.bv_page
);
1719 struct journal_entry
*je
= access_journal_entry(ic
, journal_section
, journal_entry
);
1721 if (unlikely(!dio
->write
)) {
1722 struct journal_sector
*js
;
1726 if (unlikely(journal_entry_is_inprogress(je
))) {
1727 flush_dcache_page(bv
.bv_page
);
1730 __io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
1734 BUG_ON(journal_entry_get_sector(je
) != logical_sector
);
1735 js
= access_journal_data(ic
, journal_section
, journal_entry
);
1736 mem_ptr
= mem
+ bv
.bv_offset
;
1739 memcpy(mem_ptr
, js
, JOURNAL_SECTOR_DATA
);
1740 *(commit_id_t
*)(mem_ptr
+ JOURNAL_SECTOR_DATA
) = je
->last_bytes
[s
];
1742 mem_ptr
+= 1 << SECTOR_SHIFT
;
1743 } while (++s
< ic
->sectors_per_block
);
1744 #ifdef INTERNAL_VERIFY
1745 if (ic
->internal_hash
) {
1746 char checksums_onstack
[max((size_t)HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
1748 integrity_sector_checksum(ic
, logical_sector
, mem
+ bv
.bv_offset
, checksums_onstack
);
1749 if (unlikely(memcmp(checksums_onstack
, journal_entry_tag(ic
, je
), ic
->tag_size
))) {
1750 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1751 (unsigned long long)logical_sector
);
1757 if (!ic
->internal_hash
) {
1758 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
1759 unsigned tag_todo
= ic
->tag_size
;
1760 char *tag_ptr
= journal_entry_tag(ic
, je
);
1763 struct bio_vec biv
= bvec_iter_bvec(bip
->bip_vec
, bip
->bip_iter
);
1764 unsigned tag_now
= min(biv
.bv_len
, tag_todo
);
1766 BUG_ON(PageHighMem(biv
.bv_page
));
1767 tag_addr
= lowmem_page_address(biv
.bv_page
) + biv
.bv_offset
;
1768 if (likely(dio
->write
))
1769 memcpy(tag_ptr
, tag_addr
, tag_now
);
1771 memcpy(tag_addr
, tag_ptr
, tag_now
);
1772 bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, tag_now
);
1774 tag_todo
-= tag_now
;
1775 } while (unlikely(tag_todo
)); else {
1776 if (likely(dio
->write
))
1777 memset(tag_ptr
, 0, tag_todo
);
1781 if (likely(dio
->write
)) {
1782 struct journal_sector
*js
;
1785 js
= access_journal_data(ic
, journal_section
, journal_entry
);
1786 memcpy(js
, mem
+ bv
.bv_offset
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
1790 je
->last_bytes
[s
] = js
[s
].commit_id
;
1791 } while (++s
< ic
->sectors_per_block
);
1793 if (ic
->internal_hash
) {
1794 unsigned digest_size
= crypto_shash_digestsize(ic
->internal_hash
);
1795 if (unlikely(digest_size
> ic
->tag_size
)) {
1796 char checksums_onstack
[HASH_MAX_DIGESTSIZE
];
1797 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, checksums_onstack
);
1798 memcpy(journal_entry_tag(ic
, je
), checksums_onstack
, ic
->tag_size
);
1800 integrity_sector_checksum(ic
, logical_sector
, (char *)js
, journal_entry_tag(ic
, je
));
1803 journal_entry_set_sector(je
, logical_sector
);
1805 logical_sector
+= ic
->sectors_per_block
;
1808 if (unlikely(journal_entry
== ic
->journal_section_entries
)) {
1811 wraparound_section(ic
, &journal_section
);
1814 bv
.bv_offset
+= ic
->sectors_per_block
<< SECTOR_SHIFT
;
1815 } while (bv
.bv_len
-= ic
->sectors_per_block
<< SECTOR_SHIFT
);
1817 if (unlikely(!dio
->write
))
1818 flush_dcache_page(bv
.bv_page
);
1820 } while (n_sectors
);
1822 if (likely(dio
->write
)) {
1824 if (unlikely(waitqueue_active(&ic
->copy_to_journal_wait
)))
1825 wake_up(&ic
->copy_to_journal_wait
);
1826 if (READ_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
) {
1827 queue_work(ic
->commit_wq
, &ic
->commit_work
);
1829 schedule_autocommit(ic
);
1832 remove_range(ic
, &dio
->range
);
1835 if (unlikely(bio
->bi_iter
.bi_size
)) {
1836 sector_t area
, offset
;
1838 dio
->range
.logical_sector
= logical_sector
;
1839 get_area_and_offset(ic
, dio
->range
.logical_sector
, &area
, &offset
);
1840 dio
->metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &dio
->metadata_offset
);
1847 static void dm_integrity_map_continue(struct dm_integrity_io
*dio
, bool from_map
)
1849 struct dm_integrity_c
*ic
= dio
->ic
;
1850 struct bio
*bio
= dm_bio_from_per_bio_data(dio
, sizeof(struct dm_integrity_io
));
1851 unsigned journal_section
, journal_entry
;
1852 unsigned journal_read_pos
;
1853 struct completion read_comp
;
1854 bool need_sync_io
= ic
->internal_hash
&& !dio
->write
;
1856 if (need_sync_io
&& from_map
) {
1857 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1858 queue_work(ic
->offload_wq
, &dio
->work
);
1863 spin_lock_irq(&ic
->endio_wait
.lock
);
1865 if (unlikely(dm_integrity_failed(ic
))) {
1866 spin_unlock_irq(&ic
->endio_wait
.lock
);
1870 dio
->range
.n_sectors
= bio_sectors(bio
);
1871 journal_read_pos
= NOT_FOUND
;
1872 if (likely(ic
->mode
== 'J')) {
1874 unsigned next_entry
, i
, pos
;
1875 unsigned ws
, we
, range_sectors
;
1877 dio
->range
.n_sectors
= min(dio
->range
.n_sectors
,
1878 (sector_t
)ic
->free_sectors
<< ic
->sb
->log2_sectors_per_block
);
1879 if (unlikely(!dio
->range
.n_sectors
)) {
1881 goto offload_to_thread
;
1882 sleep_on_endio_wait(ic
);
1885 range_sectors
= dio
->range
.n_sectors
>> ic
->sb
->log2_sectors_per_block
;
1886 ic
->free_sectors
-= range_sectors
;
1887 journal_section
= ic
->free_section
;
1888 journal_entry
= ic
->free_section_entry
;
1890 next_entry
= ic
->free_section_entry
+ range_sectors
;
1891 ic
->free_section_entry
= next_entry
% ic
->journal_section_entries
;
1892 ic
->free_section
+= next_entry
/ ic
->journal_section_entries
;
1893 ic
->n_uncommitted_sections
+= next_entry
/ ic
->journal_section_entries
;
1894 wraparound_section(ic
, &ic
->free_section
);
1896 pos
= journal_section
* ic
->journal_section_entries
+ journal_entry
;
1897 ws
= journal_section
;
1901 struct journal_entry
*je
;
1903 add_journal_node(ic
, &ic
->journal_tree
[pos
], dio
->range
.logical_sector
+ i
);
1905 if (unlikely(pos
>= ic
->journal_entries
))
1908 je
= access_journal_entry(ic
, ws
, we
);
1909 BUG_ON(!journal_entry_is_unused(je
));
1910 journal_entry_set_inprogress(je
);
1912 if (unlikely(we
== ic
->journal_section_entries
)) {
1915 wraparound_section(ic
, &ws
);
1917 } while ((i
+= ic
->sectors_per_block
) < dio
->range
.n_sectors
);
1919 spin_unlock_irq(&ic
->endio_wait
.lock
);
1920 goto journal_read_write
;
1922 sector_t next_sector
;
1923 journal_read_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
1924 if (likely(journal_read_pos
== NOT_FOUND
)) {
1925 if (unlikely(dio
->range
.n_sectors
> next_sector
- dio
->range
.logical_sector
))
1926 dio
->range
.n_sectors
= next_sector
- dio
->range
.logical_sector
;
1929 unsigned jp
= journal_read_pos
+ 1;
1930 for (i
= ic
->sectors_per_block
; i
< dio
->range
.n_sectors
; i
+= ic
->sectors_per_block
, jp
++) {
1931 if (!test_journal_node(ic
, jp
, dio
->range
.logical_sector
+ i
))
1934 dio
->range
.n_sectors
= i
;
1938 if (unlikely(!add_new_range(ic
, &dio
->range
, true))) {
1940 * We must not sleep in the request routine because it could
1941 * stall bios on current->bio_list.
1942 * So, we offload the bio to a workqueue if we have to sleep.
1946 spin_unlock_irq(&ic
->endio_wait
.lock
);
1947 INIT_WORK(&dio
->work
, integrity_bio_wait
);
1948 queue_work(ic
->wait_wq
, &dio
->work
);
1951 if (journal_read_pos
!= NOT_FOUND
)
1952 dio
->range
.n_sectors
= ic
->sectors_per_block
;
1953 wait_and_add_new_range(ic
, &dio
->range
);
1955 * wait_and_add_new_range drops the spinlock, so the journal
1956 * may have been changed arbitrarily. We need to recheck.
1957 * To simplify the code, we restrict I/O size to just one block.
1959 if (journal_read_pos
!= NOT_FOUND
) {
1960 sector_t next_sector
;
1961 unsigned new_pos
= find_journal_node(ic
, dio
->range
.logical_sector
, &next_sector
);
1962 if (unlikely(new_pos
!= journal_read_pos
)) {
1963 remove_range_unlocked(ic
, &dio
->range
);
1968 spin_unlock_irq(&ic
->endio_wait
.lock
);
1970 if (unlikely(journal_read_pos
!= NOT_FOUND
)) {
1971 journal_section
= journal_read_pos
/ ic
->journal_section_entries
;
1972 journal_entry
= journal_read_pos
% ic
->journal_section_entries
;
1973 goto journal_read_write
;
1976 if (ic
->mode
== 'B' && dio
->write
) {
1977 if (!block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
1978 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_SET
)) {
1979 struct bitmap_block_status
*bbs
;
1981 bbs
= sector_to_bitmap_block(ic
, dio
->range
.logical_sector
);
1982 spin_lock(&bbs
->bio_queue_lock
);
1983 bio_list_add(&bbs
->bio_queue
, bio
);
1984 spin_unlock(&bbs
->bio_queue_lock
);
1985 queue_work(ic
->writer_wq
, &bbs
->work
);
1990 dio
->in_flight
= (atomic_t
)ATOMIC_INIT(2);
1993 init_completion(&read_comp
);
1994 dio
->completion
= &read_comp
;
1996 dio
->completion
= NULL
;
1998 dm_bio_record(&dio
->bio_details
, bio
);
1999 bio_set_dev(bio
, ic
->dev
->bdev
);
2000 bio
->bi_integrity
= NULL
;
2001 bio
->bi_opf
&= ~REQ_INTEGRITY
;
2002 bio
->bi_end_io
= integrity_end_io
;
2003 bio
->bi_iter
.bi_size
= dio
->range
.n_sectors
<< SECTOR_SHIFT
;
2005 generic_make_request(bio
);
2008 wait_for_completion_io(&read_comp
);
2009 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
) &&
2010 dio
->range
.logical_sector
+ dio
->range
.n_sectors
> le64_to_cpu(ic
->sb
->recalc_sector
))
2012 if (ic
->mode
== 'B') {
2013 if (!block_bitmap_op(ic
, ic
->recalc_bitmap
, dio
->range
.logical_sector
,
2014 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_CLEAR
))
2018 if (likely(!bio
->bi_status
))
2019 integrity_metadata(&dio
->work
);
2025 INIT_WORK(&dio
->work
, integrity_metadata
);
2026 queue_work(ic
->metadata_wq
, &dio
->work
);
2032 if (unlikely(__journal_read_write(dio
, bio
, journal_section
, journal_entry
)))
2035 do_endio_flush(ic
, dio
);
2039 static void integrity_bio_wait(struct work_struct
*w
)
2041 struct dm_integrity_io
*dio
= container_of(w
, struct dm_integrity_io
, work
);
2043 dm_integrity_map_continue(dio
, false);
2046 static void pad_uncommitted(struct dm_integrity_c
*ic
)
2048 if (ic
->free_section_entry
) {
2049 ic
->free_sectors
-= ic
->journal_section_entries
- ic
->free_section_entry
;
2050 ic
->free_section_entry
= 0;
2052 wraparound_section(ic
, &ic
->free_section
);
2053 ic
->n_uncommitted_sections
++;
2055 if (WARN_ON(ic
->journal_sections
* ic
->journal_section_entries
!=
2056 (ic
->n_uncommitted_sections
+ ic
->n_committed_sections
) *
2057 ic
->journal_section_entries
+ ic
->free_sectors
)) {
2058 DMCRIT("journal_sections %u, journal_section_entries %u, "
2059 "n_uncommitted_sections %u, n_committed_sections %u, "
2060 "journal_section_entries %u, free_sectors %u",
2061 ic
->journal_sections
, ic
->journal_section_entries
,
2062 ic
->n_uncommitted_sections
, ic
->n_committed_sections
,
2063 ic
->journal_section_entries
, ic
->free_sectors
);
2067 static void integrity_commit(struct work_struct
*w
)
2069 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, commit_work
);
2070 unsigned commit_start
, commit_sections
;
2072 struct bio
*flushes
;
2074 del_timer(&ic
->autocommit_timer
);
2076 spin_lock_irq(&ic
->endio_wait
.lock
);
2077 flushes
= bio_list_get(&ic
->flush_bio_list
);
2078 if (unlikely(ic
->mode
!= 'J')) {
2079 spin_unlock_irq(&ic
->endio_wait
.lock
);
2080 dm_integrity_flush_buffers(ic
);
2081 goto release_flush_bios
;
2084 pad_uncommitted(ic
);
2085 commit_start
= ic
->uncommitted_section
;
2086 commit_sections
= ic
->n_uncommitted_sections
;
2087 spin_unlock_irq(&ic
->endio_wait
.lock
);
2089 if (!commit_sections
)
2090 goto release_flush_bios
;
2093 for (n
= 0; n
< commit_sections
; n
++) {
2094 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2095 struct journal_entry
*je
;
2096 je
= access_journal_entry(ic
, i
, j
);
2097 io_wait_event(ic
->copy_to_journal_wait
, !journal_entry_is_inprogress(je
));
2099 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2100 struct journal_sector
*js
;
2101 js
= access_journal(ic
, i
, j
);
2102 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, ic
->commit_seq
);
2105 if (unlikely(i
>= ic
->journal_sections
))
2106 ic
->commit_seq
= next_commit_seq(ic
->commit_seq
);
2107 wraparound_section(ic
, &i
);
2111 write_journal(ic
, commit_start
, commit_sections
);
2113 spin_lock_irq(&ic
->endio_wait
.lock
);
2114 ic
->uncommitted_section
+= commit_sections
;
2115 wraparound_section(ic
, &ic
->uncommitted_section
);
2116 ic
->n_uncommitted_sections
-= commit_sections
;
2117 ic
->n_committed_sections
+= commit_sections
;
2118 spin_unlock_irq(&ic
->endio_wait
.lock
);
2120 if (READ_ONCE(ic
->free_sectors
) <= ic
->free_sectors_threshold
)
2121 queue_work(ic
->writer_wq
, &ic
->writer_work
);
2125 struct bio
*next
= flushes
->bi_next
;
2126 flushes
->bi_next
= NULL
;
2127 do_endio(ic
, flushes
);
2132 static void complete_copy_from_journal(unsigned long error
, void *context
)
2134 struct journal_io
*io
= context
;
2135 struct journal_completion
*comp
= io
->comp
;
2136 struct dm_integrity_c
*ic
= comp
->ic
;
2137 remove_range(ic
, &io
->range
);
2138 mempool_free(io
, &ic
->journal_io_mempool
);
2139 if (unlikely(error
!= 0))
2140 dm_integrity_io_error(ic
, "copying from journal", -EIO
);
2141 complete_journal_op(comp
);
2144 static void restore_last_bytes(struct dm_integrity_c
*ic
, struct journal_sector
*js
,
2145 struct journal_entry
*je
)
2149 js
->commit_id
= je
->last_bytes
[s
];
2151 } while (++s
< ic
->sectors_per_block
);
2154 static void do_journal_write(struct dm_integrity_c
*ic
, unsigned write_start
,
2155 unsigned write_sections
, bool from_replay
)
2158 struct journal_completion comp
;
2159 struct blk_plug plug
;
2161 blk_start_plug(&plug
);
2164 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
2165 init_completion(&comp
.comp
);
2168 for (n
= 0; n
< write_sections
; n
++, i
++, wraparound_section(ic
, &i
)) {
2169 #ifndef INTERNAL_VERIFY
2170 if (unlikely(from_replay
))
2172 rw_section_mac(ic
, i
, false);
2173 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2174 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2175 sector_t sec
, area
, offset
;
2176 unsigned k
, l
, next_loop
;
2177 sector_t metadata_block
;
2178 unsigned metadata_offset
;
2179 struct journal_io
*io
;
2181 if (journal_entry_is_unused(je
))
2183 BUG_ON(unlikely(journal_entry_is_inprogress(je
)) && !from_replay
);
2184 sec
= journal_entry_get_sector(je
);
2185 if (unlikely(from_replay
)) {
2186 if (unlikely(sec
& (unsigned)(ic
->sectors_per_block
- 1))) {
2187 dm_integrity_io_error(ic
, "invalid sector in journal", -EIO
);
2188 sec
&= ~(sector_t
)(ic
->sectors_per_block
- 1);
2191 get_area_and_offset(ic
, sec
, &area
, &offset
);
2192 restore_last_bytes(ic
, access_journal_data(ic
, i
, j
), je
);
2193 for (k
= j
+ 1; k
< ic
->journal_section_entries
; k
++) {
2194 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
);
2195 sector_t sec2
, area2
, offset2
;
2196 if (journal_entry_is_unused(je2
))
2198 BUG_ON(unlikely(journal_entry_is_inprogress(je2
)) && !from_replay
);
2199 sec2
= journal_entry_get_sector(je2
);
2200 get_area_and_offset(ic
, sec2
, &area2
, &offset2
);
2201 if (area2
!= area
|| offset2
!= offset
+ ((k
- j
) << ic
->sb
->log2_sectors_per_block
))
2203 restore_last_bytes(ic
, access_journal_data(ic
, i
, k
), je2
);
2207 io
= mempool_alloc(&ic
->journal_io_mempool
, GFP_NOIO
);
2209 io
->range
.logical_sector
= sec
;
2210 io
->range
.n_sectors
= (k
- j
) << ic
->sb
->log2_sectors_per_block
;
2212 spin_lock_irq(&ic
->endio_wait
.lock
);
2213 add_new_range_and_wait(ic
, &io
->range
);
2215 if (likely(!from_replay
)) {
2216 struct journal_node
*section_node
= &ic
->journal_tree
[i
* ic
->journal_section_entries
];
2218 /* don't write if there is newer committed sector */
2219 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[j
])) {
2220 struct journal_entry
*je2
= access_journal_entry(ic
, i
, j
);
2222 journal_entry_set_unused(je2
);
2223 remove_journal_node(ic
, §ion_node
[j
]);
2225 sec
+= ic
->sectors_per_block
;
2226 offset
+= ic
->sectors_per_block
;
2228 while (j
< k
&& find_newer_committed_node(ic
, §ion_node
[k
- 1])) {
2229 struct journal_entry
*je2
= access_journal_entry(ic
, i
, k
- 1);
2231 journal_entry_set_unused(je2
);
2232 remove_journal_node(ic
, §ion_node
[k
- 1]);
2236 remove_range_unlocked(ic
, &io
->range
);
2237 spin_unlock_irq(&ic
->endio_wait
.lock
);
2238 mempool_free(io
, &ic
->journal_io_mempool
);
2241 for (l
= j
; l
< k
; l
++) {
2242 remove_journal_node(ic
, §ion_node
[l
]);
2245 spin_unlock_irq(&ic
->endio_wait
.lock
);
2247 metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &metadata_offset
);
2248 for (l
= j
; l
< k
; l
++) {
2250 struct journal_entry
*je2
= access_journal_entry(ic
, i
, l
);
2253 #ifndef INTERNAL_VERIFY
2254 unlikely(from_replay
) &&
2256 ic
->internal_hash
) {
2257 char test_tag
[max_t(size_t, HASH_MAX_DIGESTSIZE
, MAX_TAG_SIZE
)];
2259 integrity_sector_checksum(ic
, sec
+ ((l
- j
) << ic
->sb
->log2_sectors_per_block
),
2260 (char *)access_journal_data(ic
, i
, l
), test_tag
);
2261 if (unlikely(memcmp(test_tag
, journal_entry_tag(ic
, je2
), ic
->tag_size
)))
2262 dm_integrity_io_error(ic
, "tag mismatch when replaying journal", -EILSEQ
);
2265 journal_entry_set_unused(je2
);
2266 r
= dm_integrity_rw_tag(ic
, journal_entry_tag(ic
, je2
), &metadata_block
, &metadata_offset
,
2267 ic
->tag_size
, TAG_WRITE
);
2269 dm_integrity_io_error(ic
, "reading tags", r
);
2273 atomic_inc(&comp
.in_flight
);
2274 copy_from_journal(ic
, i
, j
<< ic
->sb
->log2_sectors_per_block
,
2275 (k
- j
) << ic
->sb
->log2_sectors_per_block
,
2276 get_data_sector(ic
, area
, offset
),
2277 complete_copy_from_journal
, io
);
2283 dm_bufio_write_dirty_buffers_async(ic
->bufio
);
2285 blk_finish_plug(&plug
);
2287 complete_journal_op(&comp
);
2288 wait_for_completion_io(&comp
.comp
);
2290 dm_integrity_flush_buffers(ic
);
2293 static void integrity_writer(struct work_struct
*w
)
2295 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, writer_work
);
2296 unsigned write_start
, write_sections
;
2298 unsigned prev_free_sectors
;
2300 /* the following test is not needed, but it tests the replay code */
2301 if (unlikely(dm_post_suspending(ic
->ti
)) && !ic
->meta_dev
)
2304 spin_lock_irq(&ic
->endio_wait
.lock
);
2305 write_start
= ic
->committed_section
;
2306 write_sections
= ic
->n_committed_sections
;
2307 spin_unlock_irq(&ic
->endio_wait
.lock
);
2309 if (!write_sections
)
2312 do_journal_write(ic
, write_start
, write_sections
, false);
2314 spin_lock_irq(&ic
->endio_wait
.lock
);
2316 ic
->committed_section
+= write_sections
;
2317 wraparound_section(ic
, &ic
->committed_section
);
2318 ic
->n_committed_sections
-= write_sections
;
2320 prev_free_sectors
= ic
->free_sectors
;
2321 ic
->free_sectors
+= write_sections
* ic
->journal_section_entries
;
2322 if (unlikely(!prev_free_sectors
))
2323 wake_up_locked(&ic
->endio_wait
);
2325 spin_unlock_irq(&ic
->endio_wait
.lock
);
2328 static void recalc_write_super(struct dm_integrity_c
*ic
)
2332 dm_integrity_flush_buffers(ic
);
2333 if (dm_integrity_failed(ic
))
2336 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, 0);
2338 dm_integrity_io_error(ic
, "writing superblock", r
);
2341 static void integrity_recalc(struct work_struct
*w
)
2343 struct dm_integrity_c
*ic
= container_of(w
, struct dm_integrity_c
, recalc_work
);
2344 struct dm_integrity_range range
;
2345 struct dm_io_request io_req
;
2346 struct dm_io_region io_loc
;
2347 sector_t area
, offset
;
2348 sector_t metadata_block
;
2349 unsigned metadata_offset
;
2350 sector_t logical_sector
, n_sectors
;
2354 unsigned super_counter
= 0;
2356 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic
->sb
->recalc_sector
));
2358 spin_lock_irq(&ic
->endio_wait
.lock
);
2362 if (unlikely(dm_post_suspending(ic
->ti
)))
2365 range
.logical_sector
= le64_to_cpu(ic
->sb
->recalc_sector
);
2366 if (unlikely(range
.logical_sector
>= ic
->provided_data_sectors
)) {
2367 if (ic
->mode
== 'B') {
2368 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
2369 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2370 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
2375 get_area_and_offset(ic
, range
.logical_sector
, &area
, &offset
);
2376 range
.n_sectors
= min((sector_t
)RECALC_SECTORS
, ic
->provided_data_sectors
- range
.logical_sector
);
2378 range
.n_sectors
= min(range
.n_sectors
, ((sector_t
)1U << ic
->sb
->log2_interleave_sectors
) - (unsigned)offset
);
2380 add_new_range_and_wait(ic
, &range
);
2381 spin_unlock_irq(&ic
->endio_wait
.lock
);
2382 logical_sector
= range
.logical_sector
;
2383 n_sectors
= range
.n_sectors
;
2385 if (ic
->mode
== 'B') {
2386 if (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
, n_sectors
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2387 goto advance_and_next
;
2389 while (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
,
2390 ic
->sectors_per_block
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2391 logical_sector
+= ic
->sectors_per_block
;
2392 n_sectors
-= ic
->sectors_per_block
;
2395 while (block_bitmap_op(ic
, ic
->recalc_bitmap
, logical_sector
+ n_sectors
- ic
->sectors_per_block
,
2396 ic
->sectors_per_block
, BITMAP_OP_TEST_ALL_CLEAR
)) {
2397 n_sectors
-= ic
->sectors_per_block
;
2400 get_area_and_offset(ic
, logical_sector
, &area
, &offset
);
2403 DEBUG_print("recalculating: %lx, %lx\n", logical_sector
, n_sectors
);
2405 if (unlikely(++super_counter
== RECALC_WRITE_SUPER
)) {
2406 recalc_write_super(ic
);
2407 if (ic
->mode
== 'B') {
2408 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, ic
->bitmap_flush_interval
);
2413 if (unlikely(dm_integrity_failed(ic
)))
2416 io_req
.bi_op
= REQ_OP_READ
;
2417 io_req
.bi_op_flags
= 0;
2418 io_req
.mem
.type
= DM_IO_VMA
;
2419 io_req
.mem
.ptr
.addr
= ic
->recalc_buffer
;
2420 io_req
.notify
.fn
= NULL
;
2421 io_req
.client
= ic
->io
;
2422 io_loc
.bdev
= ic
->dev
->bdev
;
2423 io_loc
.sector
= get_data_sector(ic
, area
, offset
);
2424 io_loc
.count
= n_sectors
;
2426 r
= dm_io(&io_req
, 1, &io_loc
, NULL
);
2428 dm_integrity_io_error(ic
, "reading data", r
);
2432 t
= ic
->recalc_tags
;
2433 for (i
= 0; i
< n_sectors
; i
+= ic
->sectors_per_block
) {
2434 integrity_sector_checksum(ic
, logical_sector
+ i
, ic
->recalc_buffer
+ (i
<< SECTOR_SHIFT
), t
);
2438 metadata_block
= get_metadata_sector_and_offset(ic
, area
, offset
, &metadata_offset
);
2440 r
= dm_integrity_rw_tag(ic
, ic
->recalc_tags
, &metadata_block
, &metadata_offset
, t
- ic
->recalc_tags
, TAG_WRITE
);
2442 dm_integrity_io_error(ic
, "writing tags", r
);
2446 if (ic
->mode
== 'B') {
2447 sector_t start
, end
;
2448 start
= (range
.logical_sector
>>
2449 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)) <<
2450 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2451 end
= ((range
.logical_sector
+ range
.n_sectors
) >>
2452 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)) <<
2453 (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2454 block_bitmap_op(ic
, ic
->recalc_bitmap
, start
, end
- start
, BITMAP_OP_CLEAR
);
2460 spin_lock_irq(&ic
->endio_wait
.lock
);
2461 remove_range_unlocked(ic
, &range
);
2462 ic
->sb
->recalc_sector
= cpu_to_le64(range
.logical_sector
+ range
.n_sectors
);
2466 remove_range(ic
, &range
);
2470 spin_unlock_irq(&ic
->endio_wait
.lock
);
2472 recalc_write_super(ic
);
2475 static void bitmap_block_work(struct work_struct
*w
)
2477 struct bitmap_block_status
*bbs
= container_of(w
, struct bitmap_block_status
, work
);
2478 struct dm_integrity_c
*ic
= bbs
->ic
;
2480 struct bio_list bio_queue
;
2481 struct bio_list waiting
;
2483 bio_list_init(&waiting
);
2485 spin_lock(&bbs
->bio_queue_lock
);
2486 bio_queue
= bbs
->bio_queue
;
2487 bio_list_init(&bbs
->bio_queue
);
2488 spin_unlock(&bbs
->bio_queue_lock
);
2490 while ((bio
= bio_list_pop(&bio_queue
))) {
2491 struct dm_integrity_io
*dio
;
2493 dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
2495 if (block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
2496 dio
->range
.n_sectors
, BITMAP_OP_TEST_ALL_SET
)) {
2497 remove_range(ic
, &dio
->range
);
2498 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2499 queue_work(ic
->offload_wq
, &dio
->work
);
2501 block_bitmap_op(ic
, ic
->journal
, dio
->range
.logical_sector
,
2502 dio
->range
.n_sectors
, BITMAP_OP_SET
);
2503 bio_list_add(&waiting
, bio
);
2507 if (bio_list_empty(&waiting
))
2510 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
,
2511 bbs
->idx
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
),
2512 BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
, NULL
);
2514 while ((bio
= bio_list_pop(&waiting
))) {
2515 struct dm_integrity_io
*dio
= dm_per_bio_data(bio
, sizeof(struct dm_integrity_io
));
2517 block_bitmap_op(ic
, ic
->may_write_bitmap
, dio
->range
.logical_sector
,
2518 dio
->range
.n_sectors
, BITMAP_OP_SET
);
2520 remove_range(ic
, &dio
->range
);
2521 INIT_WORK(&dio
->work
, integrity_bio_wait
);
2522 queue_work(ic
->offload_wq
, &dio
->work
);
2525 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, ic
->bitmap_flush_interval
);
2528 static void bitmap_flush_work(struct work_struct
*work
)
2530 struct dm_integrity_c
*ic
= container_of(work
, struct dm_integrity_c
, bitmap_flush_work
.work
);
2531 struct dm_integrity_range range
;
2532 unsigned long limit
;
2535 dm_integrity_flush_buffers(ic
);
2537 range
.logical_sector
= 0;
2538 range
.n_sectors
= ic
->provided_data_sectors
;
2540 spin_lock_irq(&ic
->endio_wait
.lock
);
2541 add_new_range_and_wait(ic
, &range
);
2542 spin_unlock_irq(&ic
->endio_wait
.lock
);
2544 dm_integrity_flush_buffers(ic
);
2546 blkdev_issue_flush(ic
->dev
->bdev
, GFP_NOIO
, NULL
);
2548 limit
= ic
->provided_data_sectors
;
2549 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) {
2550 limit
= le64_to_cpu(ic
->sb
->recalc_sector
)
2551 >> (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
)
2552 << (ic
->sb
->log2_sectors_per_block
+ ic
->log2_blocks_per_bitmap_bit
);
2554 /*DEBUG_print("zeroing journal\n");*/
2555 block_bitmap_op(ic
, ic
->journal
, 0, limit
, BITMAP_OP_CLEAR
);
2556 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, limit
, BITMAP_OP_CLEAR
);
2558 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
2559 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
2561 spin_lock_irq(&ic
->endio_wait
.lock
);
2562 remove_range_unlocked(ic
, &range
);
2563 while (unlikely((bio
= bio_list_pop(&ic
->synchronous_bios
)) != NULL
)) {
2565 spin_unlock_irq(&ic
->endio_wait
.lock
);
2566 spin_lock_irq(&ic
->endio_wait
.lock
);
2568 spin_unlock_irq(&ic
->endio_wait
.lock
);
2572 static void init_journal(struct dm_integrity_c
*ic
, unsigned start_section
,
2573 unsigned n_sections
, unsigned char commit_seq
)
2580 for (n
= 0; n
< n_sections
; n
++) {
2581 i
= start_section
+ n
;
2582 wraparound_section(ic
, &i
);
2583 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2584 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2585 memset(&js
->entries
, 0, JOURNAL_SECTOR_DATA
);
2586 js
->commit_id
= dm_integrity_commit_id(ic
, i
, j
, commit_seq
);
2588 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2589 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2590 journal_entry_set_unused(je
);
2594 write_journal(ic
, start_section
, n_sections
);
2597 static int find_commit_seq(struct dm_integrity_c
*ic
, unsigned i
, unsigned j
, commit_id_t id
)
2600 for (k
= 0; k
< N_COMMIT_IDS
; k
++) {
2601 if (dm_integrity_commit_id(ic
, i
, j
, k
) == id
)
2604 dm_integrity_io_error(ic
, "journal commit id", -EIO
);
2608 static void replay_journal(struct dm_integrity_c
*ic
)
2611 bool used_commit_ids
[N_COMMIT_IDS
];
2612 unsigned max_commit_id_sections
[N_COMMIT_IDS
];
2613 unsigned write_start
, write_sections
;
2614 unsigned continue_section
;
2616 unsigned char unused
, last_used
, want_commit_seq
;
2618 if (ic
->mode
== 'R')
2621 if (ic
->journal_uptodate
)
2627 if (!ic
->just_formatted
) {
2628 DEBUG_print("reading journal\n");
2629 rw_journal(ic
, REQ_OP_READ
, 0, 0, ic
->journal_sections
, NULL
);
2631 DEBUG_bytes(lowmem_page_address(ic
->journal_io
[0].page
), 64, "read journal");
2632 if (ic
->journal_io
) {
2633 struct journal_completion crypt_comp
;
2635 init_completion(&crypt_comp
.comp
);
2636 crypt_comp
.in_flight
= (atomic_t
)ATOMIC_INIT(0);
2637 encrypt_journal(ic
, false, 0, ic
->journal_sections
, &crypt_comp
);
2638 wait_for_completion(&crypt_comp
.comp
);
2640 DEBUG_bytes(lowmem_page_address(ic
->journal
[0].page
), 64, "decrypted journal");
2643 if (dm_integrity_failed(ic
))
2646 journal_empty
= true;
2647 memset(used_commit_ids
, 0, sizeof used_commit_ids
);
2648 memset(max_commit_id_sections
, 0, sizeof max_commit_id_sections
);
2649 for (i
= 0; i
< ic
->journal_sections
; i
++) {
2650 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2652 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2653 k
= find_commit_seq(ic
, i
, j
, js
->commit_id
);
2656 used_commit_ids
[k
] = true;
2657 max_commit_id_sections
[k
] = i
;
2659 if (journal_empty
) {
2660 for (j
= 0; j
< ic
->journal_section_entries
; j
++) {
2661 struct journal_entry
*je
= access_journal_entry(ic
, i
, j
);
2662 if (!journal_entry_is_unused(je
)) {
2663 journal_empty
= false;
2670 if (!used_commit_ids
[N_COMMIT_IDS
- 1]) {
2671 unused
= N_COMMIT_IDS
- 1;
2672 while (unused
&& !used_commit_ids
[unused
- 1])
2675 for (unused
= 0; unused
< N_COMMIT_IDS
; unused
++)
2676 if (!used_commit_ids
[unused
])
2678 if (unused
== N_COMMIT_IDS
) {
2679 dm_integrity_io_error(ic
, "journal commit ids", -EIO
);
2683 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2684 unused
, used_commit_ids
[0], used_commit_ids
[1],
2685 used_commit_ids
[2], used_commit_ids
[3]);
2687 last_used
= prev_commit_seq(unused
);
2688 want_commit_seq
= prev_commit_seq(last_used
);
2690 if (!used_commit_ids
[want_commit_seq
] && used_commit_ids
[prev_commit_seq(want_commit_seq
)])
2691 journal_empty
= true;
2693 write_start
= max_commit_id_sections
[last_used
] + 1;
2694 if (unlikely(write_start
>= ic
->journal_sections
))
2695 want_commit_seq
= next_commit_seq(want_commit_seq
);
2696 wraparound_section(ic
, &write_start
);
2699 for (write_sections
= 0; write_sections
< ic
->journal_sections
; write_sections
++) {
2700 for (j
= 0; j
< ic
->journal_section_sectors
; j
++) {
2701 struct journal_sector
*js
= access_journal(ic
, i
, j
);
2703 if (js
->commit_id
!= dm_integrity_commit_id(ic
, i
, j
, want_commit_seq
)) {
2705 * This could be caused by crash during writing.
2706 * We won't replay the inconsistent part of the
2709 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2710 i
, j
, find_commit_seq(ic
, i
, j
, js
->commit_id
), want_commit_seq
);
2715 if (unlikely(i
>= ic
->journal_sections
))
2716 want_commit_seq
= next_commit_seq(want_commit_seq
);
2717 wraparound_section(ic
, &i
);
2721 if (!journal_empty
) {
2722 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2723 write_sections
, write_start
, want_commit_seq
);
2724 do_journal_write(ic
, write_start
, write_sections
, true);
2727 if (write_sections
== ic
->journal_sections
&& (ic
->mode
== 'J' || journal_empty
)) {
2728 continue_section
= write_start
;
2729 ic
->commit_seq
= want_commit_seq
;
2730 DEBUG_print("continuing from section %u, commit seq %d\n", write_start
, ic
->commit_seq
);
2733 unsigned char erase_seq
;
2735 DEBUG_print("clearing journal\n");
2737 erase_seq
= prev_commit_seq(prev_commit_seq(last_used
));
2739 init_journal(ic
, s
, 1, erase_seq
);
2741 wraparound_section(ic
, &s
);
2742 if (ic
->journal_sections
>= 2) {
2743 init_journal(ic
, s
, ic
->journal_sections
- 2, erase_seq
);
2744 s
+= ic
->journal_sections
- 2;
2745 wraparound_section(ic
, &s
);
2746 init_journal(ic
, s
, 1, erase_seq
);
2749 continue_section
= 0;
2750 ic
->commit_seq
= next_commit_seq(erase_seq
);
2753 ic
->committed_section
= continue_section
;
2754 ic
->n_committed_sections
= 0;
2756 ic
->uncommitted_section
= continue_section
;
2757 ic
->n_uncommitted_sections
= 0;
2759 ic
->free_section
= continue_section
;
2760 ic
->free_section_entry
= 0;
2761 ic
->free_sectors
= ic
->journal_entries
;
2763 ic
->journal_tree_root
= RB_ROOT
;
2764 for (i
= 0; i
< ic
->journal_entries
; i
++)
2765 init_journal_node(&ic
->journal_tree
[i
]);
2768 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c
*ic
)
2770 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2772 if (ic
->mode
== 'B') {
2773 ic
->bitmap_flush_interval
= msecs_to_jiffies(10) + 1;
2774 ic
->synchronous_mode
= 1;
2776 cancel_delayed_work_sync(&ic
->bitmap_flush_work
);
2777 queue_delayed_work(ic
->commit_wq
, &ic
->bitmap_flush_work
, 0);
2778 flush_workqueue(ic
->commit_wq
);
2782 static int dm_integrity_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
2784 struct dm_integrity_c
*ic
= container_of(n
, struct dm_integrity_c
, reboot_notifier
);
2786 DEBUG_print("dm_integrity_reboot\n");
2788 dm_integrity_enter_synchronous_mode(ic
);
2793 static void dm_integrity_postsuspend(struct dm_target
*ti
)
2795 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2798 WARN_ON(unregister_reboot_notifier(&ic
->reboot_notifier
));
2800 del_timer_sync(&ic
->autocommit_timer
);
2803 drain_workqueue(ic
->recalc_wq
);
2805 if (ic
->mode
== 'B')
2806 cancel_delayed_work_sync(&ic
->bitmap_flush_work
);
2808 queue_work(ic
->commit_wq
, &ic
->commit_work
);
2809 drain_workqueue(ic
->commit_wq
);
2811 if (ic
->mode
== 'J') {
2813 queue_work(ic
->writer_wq
, &ic
->writer_work
);
2814 drain_workqueue(ic
->writer_wq
);
2815 dm_integrity_flush_buffers(ic
);
2818 if (ic
->mode
== 'B') {
2819 dm_integrity_flush_buffers(ic
);
2821 /* set to 0 to test bitmap replay code */
2822 init_journal(ic
, 0, ic
->journal_sections
, 0);
2823 ic
->sb
->flags
&= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
2824 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
2826 dm_integrity_io_error(ic
, "writing superblock", r
);
2830 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
2832 ic
->journal_uptodate
= true;
2835 static void dm_integrity_resume(struct dm_target
*ti
)
2837 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2839 DEBUG_print("resume\n");
2841 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_DIRTY_BITMAP
)) {
2842 DEBUG_print("resume dirty_bitmap\n");
2843 rw_journal_sectors(ic
, REQ_OP_READ
, 0, 0,
2844 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
2845 if (ic
->mode
== 'B') {
2846 if (ic
->sb
->log2_blocks_per_bitmap_bit
== ic
->log2_blocks_per_bitmap_bit
) {
2847 block_bitmap_copy(ic
, ic
->recalc_bitmap
, ic
->journal
);
2848 block_bitmap_copy(ic
, ic
->may_write_bitmap
, ic
->journal
);
2849 if (!block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
,
2850 BITMAP_OP_TEST_ALL_CLEAR
)) {
2851 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
2852 ic
->sb
->recalc_sector
= cpu_to_le64(0);
2855 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2856 ic
->sb
->log2_blocks_per_bitmap_bit
, ic
->log2_blocks_per_bitmap_bit
);
2857 ic
->sb
->log2_blocks_per_bitmap_bit
= ic
->log2_blocks_per_bitmap_bit
;
2858 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
2859 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
2860 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_SET
);
2861 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
2862 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
2863 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
2864 ic
->sb
->recalc_sector
= cpu_to_le64(0);
2867 if (!(ic
->sb
->log2_blocks_per_bitmap_bit
== ic
->log2_blocks_per_bitmap_bit
&&
2868 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_TEST_ALL_CLEAR
))) {
2869 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
2870 ic
->sb
->recalc_sector
= cpu_to_le64(0);
2872 init_journal(ic
, 0, ic
->journal_sections
, 0);
2874 ic
->sb
->flags
&= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
2876 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
2878 dm_integrity_io_error(ic
, "writing superblock", r
);
2881 if (ic
->mode
== 'B') {
2882 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_DIRTY_BITMAP
);
2883 ic
->sb
->log2_blocks_per_bitmap_bit
= ic
->log2_blocks_per_bitmap_bit
;
2884 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
2886 dm_integrity_io_error(ic
, "writing superblock", r
);
2888 block_bitmap_op(ic
, ic
->journal
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
2889 block_bitmap_op(ic
, ic
->recalc_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
2890 block_bitmap_op(ic
, ic
->may_write_bitmap
, 0, ic
->provided_data_sectors
, BITMAP_OP_CLEAR
);
2891 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
) &&
2892 le64_to_cpu(ic
->sb
->recalc_sector
) < ic
->provided_data_sectors
) {
2893 block_bitmap_op(ic
, ic
->journal
, le64_to_cpu(ic
->sb
->recalc_sector
),
2894 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
2895 block_bitmap_op(ic
, ic
->recalc_bitmap
, le64_to_cpu(ic
->sb
->recalc_sector
),
2896 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
2897 block_bitmap_op(ic
, ic
->may_write_bitmap
, le64_to_cpu(ic
->sb
->recalc_sector
),
2898 ic
->provided_data_sectors
- le64_to_cpu(ic
->sb
->recalc_sector
), BITMAP_OP_SET
);
2900 rw_journal_sectors(ic
, REQ_OP_WRITE
, REQ_FUA
| REQ_SYNC
, 0,
2901 ic
->n_bitmap_blocks
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
), NULL
);
2905 DEBUG_print("testing recalc: %x\n", ic
->sb
->flags
);
2906 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
)) {
2907 __u64 recalc_pos
= le64_to_cpu(ic
->sb
->recalc_sector
);
2908 DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos
, ic
->provided_data_sectors
);
2909 if (recalc_pos
< ic
->provided_data_sectors
) {
2910 queue_work(ic
->recalc_wq
, &ic
->recalc_work
);
2911 } else if (recalc_pos
> ic
->provided_data_sectors
) {
2912 ic
->sb
->recalc_sector
= cpu_to_le64(ic
->provided_data_sectors
);
2913 recalc_write_super(ic
);
2917 ic
->reboot_notifier
.notifier_call
= dm_integrity_reboot
;
2918 ic
->reboot_notifier
.next
= NULL
;
2919 ic
->reboot_notifier
.priority
= INT_MAX
- 1; /* be notified after md and before hardware drivers */
2920 WARN_ON(register_reboot_notifier(&ic
->reboot_notifier
));
2923 /* set to 1 to stress test synchronous mode */
2924 dm_integrity_enter_synchronous_mode(ic
);
2928 static void dm_integrity_status(struct dm_target
*ti
, status_type_t type
,
2929 unsigned status_flags
, char *result
, unsigned maxlen
)
2931 struct dm_integrity_c
*ic
= (struct dm_integrity_c
*)ti
->private;
2936 case STATUSTYPE_INFO
:
2938 (unsigned long long)atomic64_read(&ic
->number_of_mismatches
),
2939 (unsigned long long)ic
->provided_data_sectors
);
2940 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
2941 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic
->sb
->recalc_sector
));
2946 case STATUSTYPE_TABLE
: {
2947 __u64 watermark_percentage
= (__u64
)(ic
->journal_entries
- ic
->free_sectors_threshold
) * 100;
2948 watermark_percentage
+= ic
->journal_entries
/ 2;
2949 do_div(watermark_percentage
, ic
->journal_entries
);
2951 arg_count
+= !!ic
->meta_dev
;
2952 arg_count
+= ic
->sectors_per_block
!= 1;
2953 arg_count
+= !!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
));
2954 arg_count
+= ic
->mode
== 'J';
2955 arg_count
+= ic
->mode
== 'J';
2956 arg_count
+= ic
->mode
== 'B';
2957 arg_count
+= ic
->mode
== 'B';
2958 arg_count
+= !!ic
->internal_hash_alg
.alg_string
;
2959 arg_count
+= !!ic
->journal_crypt_alg
.alg_string
;
2960 arg_count
+= !!ic
->journal_mac_alg
.alg_string
;
2961 DMEMIT("%s %llu %u %c %u", ic
->dev
->name
, (unsigned long long)ic
->start
,
2962 ic
->tag_size
, ic
->mode
, arg_count
);
2964 DMEMIT(" meta_device:%s", ic
->meta_dev
->name
);
2965 if (ic
->sectors_per_block
!= 1)
2966 DMEMIT(" block_size:%u", ic
->sectors_per_block
<< SECTOR_SHIFT
);
2967 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))
2968 DMEMIT(" recalculate");
2969 DMEMIT(" journal_sectors:%u", ic
->initial_sectors
- SB_SECTORS
);
2970 DMEMIT(" interleave_sectors:%u", 1U << ic
->sb
->log2_interleave_sectors
);
2971 DMEMIT(" buffer_sectors:%u", 1U << ic
->log2_buffer_sectors
);
2972 if (ic
->mode
== 'J') {
2973 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage
);
2974 DMEMIT(" commit_time:%u", ic
->autocommit_msec
);
2976 if (ic
->mode
== 'B') {
2977 DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic
->sectors_per_block
<< ic
->log2_blocks_per_bitmap_bit
);
2978 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic
->bitmap_flush_interval
));
2981 #define EMIT_ALG(a, n) \
2983 if (ic->a.alg_string) { \
2984 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2985 if (ic->a.key_string) \
2986 DMEMIT(":%s", ic->a.key_string);\
2989 EMIT_ALG(internal_hash_alg
, "internal_hash");
2990 EMIT_ALG(journal_crypt_alg
, "journal_crypt");
2991 EMIT_ALG(journal_mac_alg
, "journal_mac");
2997 static int dm_integrity_iterate_devices(struct dm_target
*ti
,
2998 iterate_devices_callout_fn fn
, void *data
)
3000 struct dm_integrity_c
*ic
= ti
->private;
3003 return fn(ti
, ic
->dev
, ic
->start
+ ic
->initial_sectors
+ ic
->metadata_run
, ti
->len
, data
);
3005 return fn(ti
, ic
->dev
, 0, ti
->len
, data
);
3008 static void dm_integrity_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
3010 struct dm_integrity_c
*ic
= ti
->private;
3012 if (ic
->sectors_per_block
> 1) {
3013 limits
->logical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
3014 limits
->physical_block_size
= ic
->sectors_per_block
<< SECTOR_SHIFT
;
3015 blk_limits_io_min(limits
, ic
->sectors_per_block
<< SECTOR_SHIFT
);
3019 static void calculate_journal_section_size(struct dm_integrity_c
*ic
)
3021 unsigned sector_space
= JOURNAL_SECTOR_DATA
;
3023 ic
->journal_sections
= le32_to_cpu(ic
->sb
->journal_sections
);
3024 ic
->journal_entry_size
= roundup(offsetof(struct journal_entry
, last_bytes
[ic
->sectors_per_block
]) + ic
->tag_size
,
3025 JOURNAL_ENTRY_ROUNDUP
);
3027 if (ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
))
3028 sector_space
-= JOURNAL_MAC_PER_SECTOR
;
3029 ic
->journal_entries_per_sector
= sector_space
/ ic
->journal_entry_size
;
3030 ic
->journal_section_entries
= ic
->journal_entries_per_sector
* JOURNAL_BLOCK_SECTORS
;
3031 ic
->journal_section_sectors
= (ic
->journal_section_entries
<< ic
->sb
->log2_sectors_per_block
) + JOURNAL_BLOCK_SECTORS
;
3032 ic
->journal_entries
= ic
->journal_section_entries
* ic
->journal_sections
;
3035 static int calculate_device_limits(struct dm_integrity_c
*ic
)
3037 __u64 initial_sectors
;
3039 calculate_journal_section_size(ic
);
3040 initial_sectors
= SB_SECTORS
+ (__u64
)ic
->journal_section_sectors
* ic
->journal_sections
;
3041 if (initial_sectors
+ METADATA_PADDING_SECTORS
>= ic
->meta_device_sectors
|| initial_sectors
> UINT_MAX
)
3043 ic
->initial_sectors
= initial_sectors
;
3045 if (!ic
->meta_dev
) {
3046 sector_t last_sector
, last_area
, last_offset
;
3048 ic
->metadata_run
= roundup((__u64
)ic
->tag_size
<< (ic
->sb
->log2_interleave_sectors
- ic
->sb
->log2_sectors_per_block
),
3049 (__u64
)(1 << SECTOR_SHIFT
<< METADATA_PADDING_SECTORS
)) >> SECTOR_SHIFT
;
3050 if (!(ic
->metadata_run
& (ic
->metadata_run
- 1)))
3051 ic
->log2_metadata_run
= __ffs(ic
->metadata_run
);
3053 ic
->log2_metadata_run
= -1;
3055 get_area_and_offset(ic
, ic
->provided_data_sectors
- 1, &last_area
, &last_offset
);
3056 last_sector
= get_data_sector(ic
, last_area
, last_offset
);
3057 if (last_sector
< ic
->start
|| last_sector
>= ic
->meta_device_sectors
)
3060 __u64 meta_size
= (ic
->provided_data_sectors
>> ic
->sb
->log2_sectors_per_block
) * ic
->tag_size
;
3061 meta_size
= (meta_size
+ ((1U << (ic
->log2_buffer_sectors
+ SECTOR_SHIFT
)) - 1))
3062 >> (ic
->log2_buffer_sectors
+ SECTOR_SHIFT
);
3063 meta_size
<<= ic
->log2_buffer_sectors
;
3064 if (ic
->initial_sectors
+ meta_size
< ic
->initial_sectors
||
3065 ic
->initial_sectors
+ meta_size
> ic
->meta_device_sectors
)
3067 ic
->metadata_run
= 1;
3068 ic
->log2_metadata_run
= 0;
3074 static int initialize_superblock(struct dm_integrity_c
*ic
, unsigned journal_sectors
, unsigned interleave_sectors
)
3076 unsigned journal_sections
;
3079 memset(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
);
3080 memcpy(ic
->sb
->magic
, SB_MAGIC
, 8);
3081 ic
->sb
->integrity_tag_size
= cpu_to_le16(ic
->tag_size
);
3082 ic
->sb
->log2_sectors_per_block
= __ffs(ic
->sectors_per_block
);
3083 if (ic
->journal_mac_alg
.alg_string
)
3084 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
);
3086 calculate_journal_section_size(ic
);
3087 journal_sections
= journal_sectors
/ ic
->journal_section_sectors
;
3088 if (!journal_sections
)
3089 journal_sections
= 1;
3091 if (!ic
->meta_dev
) {
3092 ic
->sb
->journal_sections
= cpu_to_le32(journal_sections
);
3093 if (!interleave_sectors
)
3094 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
3095 ic
->sb
->log2_interleave_sectors
= __fls(interleave_sectors
);
3096 ic
->sb
->log2_interleave_sectors
= max((__u8
)MIN_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
3097 ic
->sb
->log2_interleave_sectors
= min((__u8
)MAX_LOG2_INTERLEAVE_SECTORS
, ic
->sb
->log2_interleave_sectors
);
3099 ic
->provided_data_sectors
= 0;
3100 for (test_bit
= fls64(ic
->meta_device_sectors
) - 1; test_bit
>= 3; test_bit
--) {
3101 __u64 prev_data_sectors
= ic
->provided_data_sectors
;
3103 ic
->provided_data_sectors
|= (sector_t
)1 << test_bit
;
3104 if (calculate_device_limits(ic
))
3105 ic
->provided_data_sectors
= prev_data_sectors
;
3107 if (!ic
->provided_data_sectors
)
3110 ic
->sb
->log2_interleave_sectors
= 0;
3111 ic
->provided_data_sectors
= ic
->data_device_sectors
;
3112 ic
->provided_data_sectors
&= ~(sector_t
)(ic
->sectors_per_block
- 1);
3115 ic
->sb
->journal_sections
= cpu_to_le32(0);
3116 for (test_bit
= fls(journal_sections
) - 1; test_bit
>= 0; test_bit
--) {
3117 __u32 prev_journal_sections
= le32_to_cpu(ic
->sb
->journal_sections
);
3118 __u32 test_journal_sections
= prev_journal_sections
| (1U << test_bit
);
3119 if (test_journal_sections
> journal_sections
)
3121 ic
->sb
->journal_sections
= cpu_to_le32(test_journal_sections
);
3122 if (calculate_device_limits(ic
))
3123 ic
->sb
->journal_sections
= cpu_to_le32(prev_journal_sections
);
3126 if (!le32_to_cpu(ic
->sb
->journal_sections
)) {
3127 if (ic
->log2_buffer_sectors
> 3) {
3128 ic
->log2_buffer_sectors
--;
3129 goto try_smaller_buffer
;
3135 ic
->sb
->provided_data_sectors
= cpu_to_le64(ic
->provided_data_sectors
);
3142 static void dm_integrity_set(struct dm_target
*ti
, struct dm_integrity_c
*ic
)
3144 struct gendisk
*disk
= dm_disk(dm_table_get_md(ti
->table
));
3145 struct blk_integrity bi
;
3147 memset(&bi
, 0, sizeof(bi
));
3148 bi
.profile
= &dm_integrity_profile
;
3149 bi
.tuple_size
= ic
->tag_size
;
3150 bi
.tag_size
= bi
.tuple_size
;
3151 bi
.interval_exp
= ic
->sb
->log2_sectors_per_block
+ SECTOR_SHIFT
;
3153 blk_integrity_register(disk
, &bi
);
3154 blk_queue_max_integrity_segments(disk
->queue
, UINT_MAX
);
3157 static void dm_integrity_free_page_list(struct page_list
*pl
)
3163 for (i
= 0; pl
[i
].page
; i
++)
3164 __free_page(pl
[i
].page
);
3168 static struct page_list
*dm_integrity_alloc_page_list(unsigned n_pages
)
3170 struct page_list
*pl
;
3173 pl
= kvmalloc_array(n_pages
+ 1, sizeof(struct page_list
), GFP_KERNEL
| __GFP_ZERO
);
3177 for (i
= 0; i
< n_pages
; i
++) {
3178 pl
[i
].page
= alloc_page(GFP_KERNEL
);
3180 dm_integrity_free_page_list(pl
);
3184 pl
[i
- 1].next
= &pl
[i
];
3192 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c
*ic
, struct scatterlist
**sl
)
3195 for (i
= 0; i
< ic
->journal_sections
; i
++)
3200 static struct scatterlist
**dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c
*ic
,
3201 struct page_list
*pl
)
3203 struct scatterlist
**sl
;
3206 sl
= kvmalloc_array(ic
->journal_sections
,
3207 sizeof(struct scatterlist
*),
3208 GFP_KERNEL
| __GFP_ZERO
);
3212 for (i
= 0; i
< ic
->journal_sections
; i
++) {
3213 struct scatterlist
*s
;
3214 unsigned start_index
, start_offset
;
3215 unsigned end_index
, end_offset
;
3219 page_list_location(ic
, i
, 0, &start_index
, &start_offset
);
3220 page_list_location(ic
, i
, ic
->journal_section_sectors
- 1,
3221 &end_index
, &end_offset
);
3223 n_pages
= (end_index
- start_index
+ 1);
3225 s
= kvmalloc_array(n_pages
, sizeof(struct scatterlist
),
3228 dm_integrity_free_journal_scatterlist(ic
, sl
);
3232 sg_init_table(s
, n_pages
);
3233 for (idx
= start_index
; idx
<= end_index
; idx
++) {
3234 char *va
= lowmem_page_address(pl
[idx
].page
);
3235 unsigned start
= 0, end
= PAGE_SIZE
;
3236 if (idx
== start_index
)
3237 start
= start_offset
;
3238 if (idx
== end_index
)
3239 end
= end_offset
+ (1 << SECTOR_SHIFT
);
3240 sg_set_buf(&s
[idx
- start_index
], va
+ start
, end
- start
);
3249 static void free_alg(struct alg_spec
*a
)
3251 kzfree(a
->alg_string
);
3253 memset(a
, 0, sizeof *a
);
3256 static int get_alg_and_key(const char *arg
, struct alg_spec
*a
, char **error
, char *error_inval
)
3262 a
->alg_string
= kstrdup(strchr(arg
, ':') + 1, GFP_KERNEL
);
3266 k
= strchr(a
->alg_string
, ':');
3269 a
->key_string
= k
+ 1;
3270 if (strlen(a
->key_string
) & 1)
3273 a
->key_size
= strlen(a
->key_string
) / 2;
3274 a
->key
= kmalloc(a
->key_size
, GFP_KERNEL
);
3277 if (hex2bin(a
->key
, a
->key_string
, a
->key_size
))
3283 *error
= error_inval
;
3286 *error
= "Out of memory for an argument";
3290 static int get_mac(struct crypto_shash
**hash
, struct alg_spec
*a
, char **error
,
3291 char *error_alg
, char *error_key
)
3295 if (a
->alg_string
) {
3296 *hash
= crypto_alloc_shash(a
->alg_string
, 0, 0);
3297 if (IS_ERR(*hash
)) {
3305 r
= crypto_shash_setkey(*hash
, a
->key
, a
->key_size
);
3310 } else if (crypto_shash_get_flags(*hash
) & CRYPTO_TFM_NEED_KEY
) {
3319 static int create_journal(struct dm_integrity_c
*ic
, char **error
)
3323 __u64 journal_pages
, journal_desc_size
, journal_tree_size
;
3324 unsigned char *crypt_data
= NULL
, *crypt_iv
= NULL
;
3325 struct skcipher_request
*req
= NULL
;
3327 ic
->commit_ids
[0] = cpu_to_le64(0x1111111111111111ULL
);
3328 ic
->commit_ids
[1] = cpu_to_le64(0x2222222222222222ULL
);
3329 ic
->commit_ids
[2] = cpu_to_le64(0x3333333333333333ULL
);
3330 ic
->commit_ids
[3] = cpu_to_le64(0x4444444444444444ULL
);
3332 journal_pages
= roundup((__u64
)ic
->journal_sections
* ic
->journal_section_sectors
,
3333 PAGE_SIZE
>> SECTOR_SHIFT
) >> (PAGE_SHIFT
- SECTOR_SHIFT
);
3334 journal_desc_size
= journal_pages
* sizeof(struct page_list
);
3335 if (journal_pages
>= totalram_pages() - totalhigh_pages() || journal_desc_size
> ULONG_MAX
) {
3336 *error
= "Journal doesn't fit into memory";
3340 ic
->journal_pages
= journal_pages
;
3342 ic
->journal
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3344 *error
= "Could not allocate memory for journal";
3348 if (ic
->journal_crypt_alg
.alg_string
) {
3349 unsigned ivsize
, blocksize
;
3350 struct journal_completion comp
;
3353 ic
->journal_crypt
= crypto_alloc_skcipher(ic
->journal_crypt_alg
.alg_string
, 0, 0);
3354 if (IS_ERR(ic
->journal_crypt
)) {
3355 *error
= "Invalid journal cipher";
3356 r
= PTR_ERR(ic
->journal_crypt
);
3357 ic
->journal_crypt
= NULL
;
3360 ivsize
= crypto_skcipher_ivsize(ic
->journal_crypt
);
3361 blocksize
= crypto_skcipher_blocksize(ic
->journal_crypt
);
3363 if (ic
->journal_crypt_alg
.key
) {
3364 r
= crypto_skcipher_setkey(ic
->journal_crypt
, ic
->journal_crypt_alg
.key
,
3365 ic
->journal_crypt_alg
.key_size
);
3367 *error
= "Error setting encryption key";
3371 DEBUG_print("cipher %s, block size %u iv size %u\n",
3372 ic
->journal_crypt_alg
.alg_string
, blocksize
, ivsize
);
3374 ic
->journal_io
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3375 if (!ic
->journal_io
) {
3376 *error
= "Could not allocate memory for journal io";
3381 if (blocksize
== 1) {
3382 struct scatterlist
*sg
;
3384 req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3386 *error
= "Could not allocate crypt request";
3391 crypt_iv
= kzalloc(ivsize
, GFP_KERNEL
);
3393 *error
= "Could not allocate iv";
3398 ic
->journal_xor
= dm_integrity_alloc_page_list(ic
->journal_pages
);
3399 if (!ic
->journal_xor
) {
3400 *error
= "Could not allocate memory for journal xor";
3405 sg
= kvmalloc_array(ic
->journal_pages
+ 1,
3406 sizeof(struct scatterlist
),
3409 *error
= "Unable to allocate sg list";
3413 sg_init_table(sg
, ic
->journal_pages
+ 1);
3414 for (i
= 0; i
< ic
->journal_pages
; i
++) {
3415 char *va
= lowmem_page_address(ic
->journal_xor
[i
].page
);
3417 sg_set_buf(&sg
[i
], va
, PAGE_SIZE
);
3419 sg_set_buf(&sg
[i
], &ic
->commit_ids
, sizeof ic
->commit_ids
);
3421 skcipher_request_set_crypt(req
, sg
, sg
,
3422 PAGE_SIZE
* ic
->journal_pages
+ sizeof ic
->commit_ids
, crypt_iv
);
3423 init_completion(&comp
.comp
);
3424 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
3425 if (do_crypt(true, req
, &comp
))
3426 wait_for_completion(&comp
.comp
);
3428 r
= dm_integrity_failed(ic
);
3430 *error
= "Unable to encrypt journal";
3433 DEBUG_bytes(lowmem_page_address(ic
->journal_xor
[0].page
), 64, "xor data");
3435 crypto_free_skcipher(ic
->journal_crypt
);
3436 ic
->journal_crypt
= NULL
;
3438 unsigned crypt_len
= roundup(ivsize
, blocksize
);
3440 req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3442 *error
= "Could not allocate crypt request";
3447 crypt_iv
= kmalloc(ivsize
, GFP_KERNEL
);
3449 *error
= "Could not allocate iv";
3454 crypt_data
= kmalloc(crypt_len
, GFP_KERNEL
);
3456 *error
= "Unable to allocate crypt data";
3461 ic
->journal_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal
);
3462 if (!ic
->journal_scatterlist
) {
3463 *error
= "Unable to allocate sg list";
3467 ic
->journal_io_scatterlist
= dm_integrity_alloc_journal_scatterlist(ic
, ic
->journal_io
);
3468 if (!ic
->journal_io_scatterlist
) {
3469 *error
= "Unable to allocate sg list";
3473 ic
->sk_requests
= kvmalloc_array(ic
->journal_sections
,
3474 sizeof(struct skcipher_request
*),
3475 GFP_KERNEL
| __GFP_ZERO
);
3476 if (!ic
->sk_requests
) {
3477 *error
= "Unable to allocate sk requests";
3481 for (i
= 0; i
< ic
->journal_sections
; i
++) {
3482 struct scatterlist sg
;
3483 struct skcipher_request
*section_req
;
3484 __u32 section_le
= cpu_to_le32(i
);
3486 memset(crypt_iv
, 0x00, ivsize
);
3487 memset(crypt_data
, 0x00, crypt_len
);
3488 memcpy(crypt_data
, §ion_le
, min((size_t)crypt_len
, sizeof(section_le
)));
3490 sg_init_one(&sg
, crypt_data
, crypt_len
);
3491 skcipher_request_set_crypt(req
, &sg
, &sg
, crypt_len
, crypt_iv
);
3492 init_completion(&comp
.comp
);
3493 comp
.in_flight
= (atomic_t
)ATOMIC_INIT(1);
3494 if (do_crypt(true, req
, &comp
))
3495 wait_for_completion(&comp
.comp
);
3497 r
= dm_integrity_failed(ic
);
3499 *error
= "Unable to generate iv";
3503 section_req
= skcipher_request_alloc(ic
->journal_crypt
, GFP_KERNEL
);
3505 *error
= "Unable to allocate crypt request";
3509 section_req
->iv
= kmalloc_array(ivsize
, 2,
3511 if (!section_req
->iv
) {
3512 skcipher_request_free(section_req
);
3513 *error
= "Unable to allocate iv";
3517 memcpy(section_req
->iv
+ ivsize
, crypt_data
, ivsize
);
3518 section_req
->cryptlen
= (size_t)ic
->journal_section_sectors
<< SECTOR_SHIFT
;
3519 ic
->sk_requests
[i
] = section_req
;
3520 DEBUG_bytes(crypt_data
, ivsize
, "iv(%u)", i
);
3525 for (i
= 0; i
< N_COMMIT_IDS
; i
++) {
3528 for (j
= 0; j
< i
; j
++) {
3529 if (ic
->commit_ids
[j
] == ic
->commit_ids
[i
]) {
3530 ic
->commit_ids
[i
] = cpu_to_le64(le64_to_cpu(ic
->commit_ids
[i
]) + 1);
3531 goto retest_commit_id
;
3534 DEBUG_print("commit id %u: %016llx\n", i
, ic
->commit_ids
[i
]);
3537 journal_tree_size
= (__u64
)ic
->journal_entries
* sizeof(struct journal_node
);
3538 if (journal_tree_size
> ULONG_MAX
) {
3539 *error
= "Journal doesn't fit into memory";
3543 ic
->journal_tree
= kvmalloc(journal_tree_size
, GFP_KERNEL
);
3544 if (!ic
->journal_tree
) {
3545 *error
= "Could not allocate memory for journal tree";
3551 skcipher_request_free(req
);
3557 * Construct a integrity mapping
3561 * offset from the start of the device
3563 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3564 * number of optional arguments
3565 * optional arguments:
3567 * interleave_sectors
3574 * bitmap_flush_interval
3580 static int dm_integrity_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
3582 struct dm_integrity_c
*ic
;
3585 unsigned extra_args
;
3586 struct dm_arg_set as
;
3587 static const struct dm_arg _args
[] = {
3588 {0, 9, "Invalid number of feature args"},
3590 unsigned journal_sectors
, interleave_sectors
, buffer_sectors
, journal_watermark
, sync_msec
;
3591 bool should_write_sb
;
3593 unsigned long long start
;
3594 __s8 log2_sectors_per_bitmap_bit
= -1;
3595 __s8 log2_blocks_per_bitmap_bit
;
3596 __u64 bits_in_journal
;
3597 __u64 n_bitmap_bits
;
3599 #define DIRECT_ARGUMENTS 4
3601 if (argc
<= DIRECT_ARGUMENTS
) {
3602 ti
->error
= "Invalid argument count";
3606 ic
= kzalloc(sizeof(struct dm_integrity_c
), GFP_KERNEL
);
3608 ti
->error
= "Cannot allocate integrity context";
3612 ti
->per_io_data_size
= sizeof(struct dm_integrity_io
);
3615 ic
->in_progress
= RB_ROOT
;
3616 INIT_LIST_HEAD(&ic
->wait_list
);
3617 init_waitqueue_head(&ic
->endio_wait
);
3618 bio_list_init(&ic
->flush_bio_list
);
3619 init_waitqueue_head(&ic
->copy_to_journal_wait
);
3620 init_completion(&ic
->crypto_backoff
);
3621 atomic64_set(&ic
->number_of_mismatches
, 0);
3622 ic
->bitmap_flush_interval
= BITMAP_FLUSH_INTERVAL
;
3624 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &ic
->dev
);
3626 ti
->error
= "Device lookup failed";
3630 if (sscanf(argv
[1], "%llu%c", &start
, &dummy
) != 1 || start
!= (sector_t
)start
) {
3631 ti
->error
= "Invalid starting offset";
3637 if (strcmp(argv
[2], "-")) {
3638 if (sscanf(argv
[2], "%u%c", &ic
->tag_size
, &dummy
) != 1 || !ic
->tag_size
) {
3639 ti
->error
= "Invalid tag size";
3645 if (!strcmp(argv
[3], "J") || !strcmp(argv
[3], "B") ||
3646 !strcmp(argv
[3], "D") || !strcmp(argv
[3], "R")) {
3647 ic
->mode
= argv
[3][0];
3649 ti
->error
= "Invalid mode (expecting J, B, D, R)";
3654 journal_sectors
= 0;
3655 interleave_sectors
= DEFAULT_INTERLEAVE_SECTORS
;
3656 buffer_sectors
= DEFAULT_BUFFER_SECTORS
;
3657 journal_watermark
= DEFAULT_JOURNAL_WATERMARK
;
3658 sync_msec
= DEFAULT_SYNC_MSEC
;
3659 ic
->sectors_per_block
= 1;
3661 as
.argc
= argc
- DIRECT_ARGUMENTS
;
3662 as
.argv
= argv
+ DIRECT_ARGUMENTS
;
3663 r
= dm_read_arg_group(_args
, &as
, &extra_args
, &ti
->error
);
3667 while (extra_args
--) {
3668 const char *opt_string
;
3670 unsigned long long llval
;
3671 opt_string
= dm_shift_arg(&as
);
3674 ti
->error
= "Not enough feature arguments";
3677 if (sscanf(opt_string
, "journal_sectors:%u%c", &val
, &dummy
) == 1)
3678 journal_sectors
= val
? val
: 1;
3679 else if (sscanf(opt_string
, "interleave_sectors:%u%c", &val
, &dummy
) == 1)
3680 interleave_sectors
= val
;
3681 else if (sscanf(opt_string
, "buffer_sectors:%u%c", &val
, &dummy
) == 1)
3682 buffer_sectors
= val
;
3683 else if (sscanf(opt_string
, "journal_watermark:%u%c", &val
, &dummy
) == 1 && val
<= 100)
3684 journal_watermark
= val
;
3685 else if (sscanf(opt_string
, "commit_time:%u%c", &val
, &dummy
) == 1)
3687 else if (!strncmp(opt_string
, "meta_device:", strlen("meta_device:"))) {
3689 dm_put_device(ti
, ic
->meta_dev
);
3690 ic
->meta_dev
= NULL
;
3692 r
= dm_get_device(ti
, strchr(opt_string
, ':') + 1,
3693 dm_table_get_mode(ti
->table
), &ic
->meta_dev
);
3695 ti
->error
= "Device lookup failed";
3698 } else if (sscanf(opt_string
, "block_size:%u%c", &val
, &dummy
) == 1) {
3699 if (val
< 1 << SECTOR_SHIFT
||
3700 val
> MAX_SECTORS_PER_BLOCK
<< SECTOR_SHIFT
||
3703 ti
->error
= "Invalid block_size argument";
3706 ic
->sectors_per_block
= val
>> SECTOR_SHIFT
;
3707 } else if (sscanf(opt_string
, "sectors_per_bit:%llu%c", &llval
, &dummy
) == 1) {
3708 log2_sectors_per_bitmap_bit
= !llval
? 0 : __ilog2_u64(llval
);
3709 } else if (sscanf(opt_string
, "bitmap_flush_interval:%u%c", &val
, &dummy
) == 1) {
3710 if (val
>= (uint64_t)UINT_MAX
* 1000 / HZ
) {
3712 ti
->error
= "Invalid bitmap_flush_interval argument";
3714 ic
->bitmap_flush_interval
= msecs_to_jiffies(val
);
3715 } else if (!strncmp(opt_string
, "internal_hash:", strlen("internal_hash:"))) {
3716 r
= get_alg_and_key(opt_string
, &ic
->internal_hash_alg
, &ti
->error
,
3717 "Invalid internal_hash argument");
3720 } else if (!strncmp(opt_string
, "journal_crypt:", strlen("journal_crypt:"))) {
3721 r
= get_alg_and_key(opt_string
, &ic
->journal_crypt_alg
, &ti
->error
,
3722 "Invalid journal_crypt argument");
3725 } else if (!strncmp(opt_string
, "journal_mac:", strlen("journal_mac:"))) {
3726 r
= get_alg_and_key(opt_string
, &ic
->journal_mac_alg
, &ti
->error
,
3727 "Invalid journal_mac argument");
3730 } else if (!strcmp(opt_string
, "recalculate")) {
3731 ic
->recalculate_flag
= true;
3734 ti
->error
= "Invalid argument";
3739 ic
->data_device_sectors
= i_size_read(ic
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
3741 ic
->meta_device_sectors
= ic
->data_device_sectors
;
3743 ic
->meta_device_sectors
= i_size_read(ic
->meta_dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
3745 if (!journal_sectors
) {
3746 journal_sectors
= min((sector_t
)DEFAULT_MAX_JOURNAL_SECTORS
,
3747 ic
->data_device_sectors
>> DEFAULT_JOURNAL_SIZE_FACTOR
);
3750 if (!buffer_sectors
)
3752 ic
->log2_buffer_sectors
= min((int)__fls(buffer_sectors
), 31 - SECTOR_SHIFT
);
3754 r
= get_mac(&ic
->internal_hash
, &ic
->internal_hash_alg
, &ti
->error
,
3755 "Invalid internal hash", "Error setting internal hash key");
3759 r
= get_mac(&ic
->journal_mac
, &ic
->journal_mac_alg
, &ti
->error
,
3760 "Invalid journal mac", "Error setting journal mac key");
3764 if (!ic
->tag_size
) {
3765 if (!ic
->internal_hash
) {
3766 ti
->error
= "Unknown tag size";
3770 ic
->tag_size
= crypto_shash_digestsize(ic
->internal_hash
);
3772 if (ic
->tag_size
> MAX_TAG_SIZE
) {
3773 ti
->error
= "Too big tag size";
3777 if (!(ic
->tag_size
& (ic
->tag_size
- 1)))
3778 ic
->log2_tag_size
= __ffs(ic
->tag_size
);
3780 ic
->log2_tag_size
= -1;
3782 if (ic
->mode
== 'B' && !ic
->internal_hash
) {
3784 ti
->error
= "Bitmap mode can be only used with internal hash";
3788 ic
->autocommit_jiffies
= msecs_to_jiffies(sync_msec
);
3789 ic
->autocommit_msec
= sync_msec
;
3790 timer_setup(&ic
->autocommit_timer
, autocommit_fn
, 0);
3792 ic
->io
= dm_io_client_create();
3793 if (IS_ERR(ic
->io
)) {
3794 r
= PTR_ERR(ic
->io
);
3796 ti
->error
= "Cannot allocate dm io";
3800 r
= mempool_init_slab_pool(&ic
->journal_io_mempool
, JOURNAL_IO_MEMPOOL
, journal_io_cache
);
3802 ti
->error
= "Cannot allocate mempool";
3806 ic
->metadata_wq
= alloc_workqueue("dm-integrity-metadata",
3807 WQ_MEM_RECLAIM
, METADATA_WORKQUEUE_MAX_ACTIVE
);
3808 if (!ic
->metadata_wq
) {
3809 ti
->error
= "Cannot allocate workqueue";
3815 * If this workqueue were percpu, it would cause bio reordering
3816 * and reduced performance.
3818 ic
->wait_wq
= alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1);
3820 ti
->error
= "Cannot allocate workqueue";
3825 ic
->offload_wq
= alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM
,
3826 METADATA_WORKQUEUE_MAX_ACTIVE
);
3827 if (!ic
->offload_wq
) {
3828 ti
->error
= "Cannot allocate workqueue";
3833 ic
->commit_wq
= alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM
, 1);
3834 if (!ic
->commit_wq
) {
3835 ti
->error
= "Cannot allocate workqueue";
3839 INIT_WORK(&ic
->commit_work
, integrity_commit
);
3841 if (ic
->mode
== 'J' || ic
->mode
== 'B') {
3842 ic
->writer_wq
= alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM
, 1);
3843 if (!ic
->writer_wq
) {
3844 ti
->error
= "Cannot allocate workqueue";
3848 INIT_WORK(&ic
->writer_work
, integrity_writer
);
3851 ic
->sb
= alloc_pages_exact(SB_SECTORS
<< SECTOR_SHIFT
, GFP_KERNEL
);
3854 ti
->error
= "Cannot allocate superblock area";
3858 r
= sync_rw_sb(ic
, REQ_OP_READ
, 0);
3860 ti
->error
= "Error reading superblock";
3863 should_write_sb
= false;
3864 if (memcmp(ic
->sb
->magic
, SB_MAGIC
, 8)) {
3865 if (ic
->mode
!= 'R') {
3866 if (memchr_inv(ic
->sb
, 0, SB_SECTORS
<< SECTOR_SHIFT
)) {
3868 ti
->error
= "The device is not initialized";
3873 r
= initialize_superblock(ic
, journal_sectors
, interleave_sectors
);
3875 ti
->error
= "Could not initialize superblock";
3878 if (ic
->mode
!= 'R')
3879 should_write_sb
= true;
3882 if (!ic
->sb
->version
|| ic
->sb
->version
> SB_VERSION_3
) {
3884 ti
->error
= "Unknown version";
3887 if (le16_to_cpu(ic
->sb
->integrity_tag_size
) != ic
->tag_size
) {
3889 ti
->error
= "Tag size doesn't match the information in superblock";
3892 if (ic
->sb
->log2_sectors_per_block
!= __ffs(ic
->sectors_per_block
)) {
3894 ti
->error
= "Block size doesn't match the information in superblock";
3897 if (!le32_to_cpu(ic
->sb
->journal_sections
)) {
3899 ti
->error
= "Corrupted superblock, journal_sections is 0";
3902 /* make sure that ti->max_io_len doesn't overflow */
3903 if (!ic
->meta_dev
) {
3904 if (ic
->sb
->log2_interleave_sectors
< MIN_LOG2_INTERLEAVE_SECTORS
||
3905 ic
->sb
->log2_interleave_sectors
> MAX_LOG2_INTERLEAVE_SECTORS
) {
3907 ti
->error
= "Invalid interleave_sectors in the superblock";
3911 if (ic
->sb
->log2_interleave_sectors
) {
3913 ti
->error
= "Invalid interleave_sectors in the superblock";
3917 ic
->provided_data_sectors
= le64_to_cpu(ic
->sb
->provided_data_sectors
);
3918 if (ic
->provided_data_sectors
!= le64_to_cpu(ic
->sb
->provided_data_sectors
)) {
3919 /* test for overflow */
3921 ti
->error
= "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3924 if (!!(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC
)) != !!ic
->journal_mac_alg
.alg_string
) {
3926 ti
->error
= "Journal mac mismatch";
3931 r
= calculate_device_limits(ic
);
3934 if (ic
->log2_buffer_sectors
> 3) {
3935 ic
->log2_buffer_sectors
--;
3936 goto try_smaller_buffer
;
3939 ti
->error
= "The device is too small";
3943 if (log2_sectors_per_bitmap_bit
< 0)
3944 log2_sectors_per_bitmap_bit
= __fls(DEFAULT_SECTORS_PER_BITMAP_BIT
);
3945 if (log2_sectors_per_bitmap_bit
< ic
->sb
->log2_sectors_per_block
)
3946 log2_sectors_per_bitmap_bit
= ic
->sb
->log2_sectors_per_block
;
3948 bits_in_journal
= ((__u64
)ic
->journal_section_sectors
* ic
->journal_sections
) << (SECTOR_SHIFT
+ 3);
3949 if (bits_in_journal
> UINT_MAX
)
3950 bits_in_journal
= UINT_MAX
;
3951 while (bits_in_journal
< (ic
->provided_data_sectors
+ ((sector_t
)1 << log2_sectors_per_bitmap_bit
) - 1) >> log2_sectors_per_bitmap_bit
)
3952 log2_sectors_per_bitmap_bit
++;
3954 log2_blocks_per_bitmap_bit
= log2_sectors_per_bitmap_bit
- ic
->sb
->log2_sectors_per_block
;
3955 ic
->log2_blocks_per_bitmap_bit
= log2_blocks_per_bitmap_bit
;
3956 if (should_write_sb
) {
3957 ic
->sb
->log2_blocks_per_bitmap_bit
= log2_blocks_per_bitmap_bit
;
3959 n_bitmap_bits
= ((ic
->provided_data_sectors
>> ic
->sb
->log2_sectors_per_block
)
3960 + (((sector_t
)1 << log2_blocks_per_bitmap_bit
) - 1)) >> log2_blocks_per_bitmap_bit
;
3961 ic
->n_bitmap_blocks
= DIV_ROUND_UP(n_bitmap_bits
, BITMAP_BLOCK_SIZE
* 8);
3964 ic
->log2_buffer_sectors
= min(ic
->log2_buffer_sectors
, (__u8
)__ffs(ic
->metadata_run
));
3966 if (ti
->len
> ic
->provided_data_sectors
) {
3968 ti
->error
= "Not enough provided sectors for requested mapping size";
3973 threshold
= (__u64
)ic
->journal_entries
* (100 - journal_watermark
);
3975 do_div(threshold
, 100);
3976 ic
->free_sectors_threshold
= threshold
;
3978 DEBUG_print("initialized:\n");
3979 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic
->sb
->integrity_tag_size
));
3980 DEBUG_print(" journal_entry_size %u\n", ic
->journal_entry_size
);
3981 DEBUG_print(" journal_entries_per_sector %u\n", ic
->journal_entries_per_sector
);
3982 DEBUG_print(" journal_section_entries %u\n", ic
->journal_section_entries
);
3983 DEBUG_print(" journal_section_sectors %u\n", ic
->journal_section_sectors
);
3984 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic
->sb
->journal_sections
));
3985 DEBUG_print(" journal_entries %u\n", ic
->journal_entries
);
3986 DEBUG_print(" log2_interleave_sectors %d\n", ic
->sb
->log2_interleave_sectors
);
3987 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
);
3988 DEBUG_print(" initial_sectors 0x%x\n", ic
->initial_sectors
);
3989 DEBUG_print(" metadata_run 0x%x\n", ic
->metadata_run
);
3990 DEBUG_print(" log2_metadata_run %d\n", ic
->log2_metadata_run
);
3991 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic
->provided_data_sectors
,
3992 (unsigned long long)ic
->provided_data_sectors
);
3993 DEBUG_print(" log2_buffer_sectors %u\n", ic
->log2_buffer_sectors
);
3994 DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal
);
3996 if (ic
->recalculate_flag
&& !(ic
->sb
->flags
& cpu_to_le32(SB_FLAG_RECALCULATING
))) {
3997 ic
->sb
->flags
|= cpu_to_le32(SB_FLAG_RECALCULATING
);
3998 ic
->sb
->recalc_sector
= cpu_to_le64(0);
4001 if (ic
->internal_hash
) {
4002 ic
->recalc_wq
= alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM
, 1);
4003 if (!ic
->recalc_wq
) {
4004 ti
->error
= "Cannot allocate workqueue";
4008 INIT_WORK(&ic
->recalc_work
, integrity_recalc
);
4009 ic
->recalc_buffer
= vmalloc(RECALC_SECTORS
<< SECTOR_SHIFT
);
4010 if (!ic
->recalc_buffer
) {
4011 ti
->error
= "Cannot allocate buffer for recalculating";
4015 ic
->recalc_tags
= kvmalloc_array(RECALC_SECTORS
>> ic
->sb
->log2_sectors_per_block
,
4016 ic
->tag_size
, GFP_KERNEL
);
4017 if (!ic
->recalc_tags
) {
4018 ti
->error
= "Cannot allocate tags for recalculating";
4024 ic
->bufio
= dm_bufio_client_create(ic
->meta_dev
? ic
->meta_dev
->bdev
: ic
->dev
->bdev
,
4025 1U << (SECTOR_SHIFT
+ ic
->log2_buffer_sectors
), 1, 0, NULL
, NULL
);
4026 if (IS_ERR(ic
->bufio
)) {
4027 r
= PTR_ERR(ic
->bufio
);
4028 ti
->error
= "Cannot initialize dm-bufio";
4032 dm_bufio_set_sector_offset(ic
->bufio
, ic
->start
+ ic
->initial_sectors
);
4034 if (ic
->mode
!= 'R') {
4035 r
= create_journal(ic
, &ti
->error
);
4041 if (ic
->mode
== 'B') {
4043 unsigned n_bitmap_pages
= DIV_ROUND_UP(ic
->n_bitmap_blocks
, PAGE_SIZE
/ BITMAP_BLOCK_SIZE
);
4045 ic
->recalc_bitmap
= dm_integrity_alloc_page_list(n_bitmap_pages
);
4046 if (!ic
->recalc_bitmap
) {
4050 ic
->may_write_bitmap
= dm_integrity_alloc_page_list(n_bitmap_pages
);
4051 if (!ic
->may_write_bitmap
) {
4055 ic
->bbs
= kvmalloc_array(ic
->n_bitmap_blocks
, sizeof(struct bitmap_block_status
), GFP_KERNEL
);
4060 INIT_DELAYED_WORK(&ic
->bitmap_flush_work
, bitmap_flush_work
);
4061 for (i
= 0; i
< ic
->n_bitmap_blocks
; i
++) {
4062 struct bitmap_block_status
*bbs
= &ic
->bbs
[i
];
4063 unsigned sector
, pl_index
, pl_offset
;
4065 INIT_WORK(&bbs
->work
, bitmap_block_work
);
4068 bio_list_init(&bbs
->bio_queue
);
4069 spin_lock_init(&bbs
->bio_queue_lock
);
4071 sector
= i
* (BITMAP_BLOCK_SIZE
>> SECTOR_SHIFT
);
4072 pl_index
= sector
>> (PAGE_SHIFT
- SECTOR_SHIFT
);
4073 pl_offset
= (sector
<< SECTOR_SHIFT
) & (PAGE_SIZE
- 1);
4075 bbs
->bitmap
= lowmem_page_address(ic
->journal
[pl_index
].page
) + pl_offset
;
4079 if (should_write_sb
) {
4082 init_journal(ic
, 0, ic
->journal_sections
, 0);
4083 r
= dm_integrity_failed(ic
);
4085 ti
->error
= "Error initializing journal";
4088 r
= sync_rw_sb(ic
, REQ_OP_WRITE
, REQ_FUA
);
4090 ti
->error
= "Error initializing superblock";
4093 ic
->just_formatted
= true;
4096 if (!ic
->meta_dev
) {
4097 r
= dm_set_target_max_io_len(ti
, 1U << ic
->sb
->log2_interleave_sectors
);
4101 if (ic
->mode
== 'B') {
4102 unsigned max_io_len
= ((sector_t
)ic
->sectors_per_block
<< ic
->log2_blocks_per_bitmap_bit
) * (BITMAP_BLOCK_SIZE
* 8);
4104 max_io_len
= 1U << 31;
4105 DEBUG_print("max_io_len: old %u, new %u\n", ti
->max_io_len
, max_io_len
);
4106 if (!ti
->max_io_len
|| ti
->max_io_len
> max_io_len
) {
4107 r
= dm_set_target_max_io_len(ti
, max_io_len
);
4113 if (!ic
->internal_hash
)
4114 dm_integrity_set(ti
, ic
);
4116 ti
->num_flush_bios
= 1;
4117 ti
->flush_supported
= true;
4122 dm_integrity_dtr(ti
);
4126 static void dm_integrity_dtr(struct dm_target
*ti
)
4128 struct dm_integrity_c
*ic
= ti
->private;
4130 BUG_ON(!RB_EMPTY_ROOT(&ic
->in_progress
));
4131 BUG_ON(!list_empty(&ic
->wait_list
));
4133 if (ic
->metadata_wq
)
4134 destroy_workqueue(ic
->metadata_wq
);
4136 destroy_workqueue(ic
->wait_wq
);
4138 destroy_workqueue(ic
->offload_wq
);
4140 destroy_workqueue(ic
->commit_wq
);
4142 destroy_workqueue(ic
->writer_wq
);
4144 destroy_workqueue(ic
->recalc_wq
);
4145 vfree(ic
->recalc_buffer
);
4146 kvfree(ic
->recalc_tags
);
4149 dm_bufio_client_destroy(ic
->bufio
);
4150 mempool_exit(&ic
->journal_io_mempool
);
4152 dm_io_client_destroy(ic
->io
);
4154 dm_put_device(ti
, ic
->dev
);
4156 dm_put_device(ti
, ic
->meta_dev
);
4157 dm_integrity_free_page_list(ic
->journal
);
4158 dm_integrity_free_page_list(ic
->journal_io
);
4159 dm_integrity_free_page_list(ic
->journal_xor
);
4160 dm_integrity_free_page_list(ic
->recalc_bitmap
);
4161 dm_integrity_free_page_list(ic
->may_write_bitmap
);
4162 if (ic
->journal_scatterlist
)
4163 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_scatterlist
);
4164 if (ic
->journal_io_scatterlist
)
4165 dm_integrity_free_journal_scatterlist(ic
, ic
->journal_io_scatterlist
);
4166 if (ic
->sk_requests
) {
4169 for (i
= 0; i
< ic
->journal_sections
; i
++) {
4170 struct skcipher_request
*req
= ic
->sk_requests
[i
];
4173 skcipher_request_free(req
);
4176 kvfree(ic
->sk_requests
);
4178 kvfree(ic
->journal_tree
);
4180 free_pages_exact(ic
->sb
, SB_SECTORS
<< SECTOR_SHIFT
);
4182 if (ic
->internal_hash
)
4183 crypto_free_shash(ic
->internal_hash
);
4184 free_alg(&ic
->internal_hash_alg
);
4186 if (ic
->journal_crypt
)
4187 crypto_free_skcipher(ic
->journal_crypt
);
4188 free_alg(&ic
->journal_crypt_alg
);
4190 if (ic
->journal_mac
)
4191 crypto_free_shash(ic
->journal_mac
);
4192 free_alg(&ic
->journal_mac_alg
);
4197 static struct target_type integrity_target
= {
4198 .name
= "integrity",
4199 .version
= {1, 3, 0},
4200 .module
= THIS_MODULE
,
4201 .features
= DM_TARGET_SINGLETON
| DM_TARGET_INTEGRITY
,
4202 .ctr
= dm_integrity_ctr
,
4203 .dtr
= dm_integrity_dtr
,
4204 .map
= dm_integrity_map
,
4205 .postsuspend
= dm_integrity_postsuspend
,
4206 .resume
= dm_integrity_resume
,
4207 .status
= dm_integrity_status
,
4208 .iterate_devices
= dm_integrity_iterate_devices
,
4209 .io_hints
= dm_integrity_io_hints
,
4212 static int __init
dm_integrity_init(void)
4216 journal_io_cache
= kmem_cache_create("integrity_journal_io",
4217 sizeof(struct journal_io
), 0, 0, NULL
);
4218 if (!journal_io_cache
) {
4219 DMERR("can't allocate journal io cache");
4223 r
= dm_register_target(&integrity_target
);
4226 DMERR("register failed %d", r
);
4231 static void __exit
dm_integrity_exit(void)
4233 dm_unregister_target(&integrity_target
);
4234 kmem_cache_destroy(journal_io_cache
);
4237 module_init(dm_integrity_init
);
4238 module_exit(dm_integrity_exit
);
4240 MODULE_AUTHOR("Milan Broz");
4241 MODULE_AUTHOR("Mikulas Patocka");
4242 MODULE_DESCRIPTION(DM_NAME
" target for integrity tags extension");
4243 MODULE_LICENSE("GPL");