1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
14 static inline int compare_attr(const struct ATTRIB
*left
, enum ATTR_TYPE type
,
15 const __le16
*name
, u8 name_len
,
18 /* First, compare the type codes. */
19 int diff
= le32_to_cpu(left
->type
) - le32_to_cpu(type
);
24 /* They have the same type code, so we have to compare the names. */
25 return ntfs_cmp_names(attr_name(left
), left
->name_len
, name
, name_len
,
32 * Return: Unused attribute id that is less than mrec->next_attr_id.
34 static __le16
mi_new_attt_id(struct ntfs_inode
*ni
, struct mft_inode
*mi
)
36 u16 free_id
, max_id
, t16
;
37 struct MFT_REC
*rec
= mi
->mrec
;
41 id
= rec
->next_attr_id
;
42 free_id
= le16_to_cpu(id
);
43 if (free_id
< 0x7FFF) {
44 rec
->next_attr_id
= cpu_to_le16(free_id
+ 1);
48 /* One record can store up to 1024/24 ~= 42 attributes. */
55 attr
= mi_enum_attr(ni
, mi
, attr
);
57 rec
->next_attr_id
= cpu_to_le16(max_id
+ 1);
59 return cpu_to_le16(free_id
);
62 t16
= le16_to_cpu(attr
->id
);
66 } else if (max_id
< t16
)
71 int mi_get(struct ntfs_sb_info
*sbi
, CLST rno
, struct mft_inode
**mi
)
74 struct mft_inode
*m
= kzalloc(sizeof(struct mft_inode
), GFP_NOFS
);
79 err
= mi_init(m
, sbi
, rno
);
85 err
= mi_read(m
, false);
95 void mi_put(struct mft_inode
*mi
)
101 int mi_init(struct mft_inode
*mi
, struct ntfs_sb_info
*sbi
, CLST rno
)
105 mi
->mrec
= kmalloc(sbi
->record_size
, GFP_NOFS
);
113 * mi_read - Read MFT data.
115 int mi_read(struct mft_inode
*mi
, bool is_mft
)
118 struct MFT_REC
*rec
= mi
->mrec
;
119 struct ntfs_sb_info
*sbi
= mi
->sbi
;
120 u32 bpr
= sbi
->record_size
;
121 u64 vbo
= (u64
)mi
->rno
<< sbi
->record_bits
;
122 struct ntfs_inode
*mft_ni
= sbi
->mft
.ni
;
123 struct runs_tree
*run
= mft_ni
? &mft_ni
->file
.run
: NULL
;
124 struct rw_semaphore
*rw_lock
= NULL
;
126 if (is_mounted(sbi
)) {
127 if (!is_mft
&& mft_ni
) {
128 rw_lock
= &mft_ni
->file
.run_lock
;
133 err
= ntfs_read_bh(sbi
, run
, vbo
, &rec
->rhdr
, bpr
, &mi
->nb
);
139 if (err
== -E_NTFS_FIXUP
) {
151 err
= attr_load_runs_vcn(mft_ni
, ATTR_DATA
, NULL
, 0, run
,
152 vbo
>> sbi
->cluster_bits
);
162 err
= ntfs_read_bh(sbi
, run
, vbo
, &rec
->rhdr
, bpr
, &mi
->nb
);
166 if (err
== -E_NTFS_FIXUP
) {
174 /* Check field 'total' only here. */
175 if (le32_to_cpu(rec
->total
) != bpr
) {
183 if (err
== -E_NTFS_CORRUPT
) {
184 ntfs_err(sbi
->sb
, "mft corrupted");
185 ntfs_set_state(sbi
, NTFS_DIRTY_ERROR
);
193 * mi_enum_attr - start/continue attributes enumeration in record.
195 * NOTE: mi->mrec - memory of size sbi->record_size
196 * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
198 struct ATTRIB
*mi_enum_attr(struct ntfs_inode
*ni
, struct mft_inode
*mi
,
201 const struct MFT_REC
*rec
= mi
->mrec
;
202 u32 used
= le32_to_cpu(rec
->used
);
203 u32 t32
, off
, asize
, prev_type
;
205 u64 data_size
, alloc_size
, tot_size
;
208 u32 total
= le32_to_cpu(rec
->total
);
210 off
= le16_to_cpu(rec
->attr_off
);
215 if (off
>= used
|| off
< MFTRECORD_FIXUP_OFFSET_1
||
216 !IS_ALIGNED(off
, 8)) {
220 /* Skip non-resident records. */
221 if (!is_rec_inuse(rec
))
225 attr
= Add2Ptr(rec
, off
);
228 * We don't need to check previous attr here. There is
229 * a bounds checking in the previous round.
231 off
= PtrOffset(rec
, attr
);
233 asize
= le32_to_cpu(attr
->size
);
235 prev_type
= le32_to_cpu(attr
->type
);
236 attr
= Add2Ptr(attr
, asize
);
241 * Can we use the first fields:
245 if (off
+ 8 > used
) {
246 static_assert(ALIGN(sizeof(enum ATTR_TYPE
), 8) == 8);
250 if (attr
->type
== ATTR_END
) {
251 /* End of enumeration. */
255 /* 0x100 is last known attribute for now. */
256 t32
= le32_to_cpu(attr
->type
);
257 if (!t32
|| (t32
& 0xf) || (t32
> 0x100))
260 /* attributes in record must be ordered by type */
264 asize
= le32_to_cpu(attr
->size
);
266 if (!IS_ALIGNED(asize
, 8))
269 /* Check overflow and boundary. */
270 if (off
+ asize
< off
|| off
+ asize
> used
)
273 /* Can we use the field attr->non_res. */
277 /* Check size of attribute. */
278 if (!attr
->non_res
) {
279 /* Check resident fields. */
280 if (asize
< SIZEOF_RESIDENT
)
283 t16
= le16_to_cpu(attr
->res
.data_off
);
287 if (le32_to_cpu(attr
->res
.data_size
) > asize
- t16
)
290 t32
= sizeof(short) * attr
->name_len
;
291 if (t32
&& le16_to_cpu(attr
->name_off
) + t32
> t16
)
297 /* Check nonresident fields. */
298 if (attr
->non_res
!= 1)
301 /* Can we use memory including attr->nres.valid_size? */
302 if (asize
< SIZEOF_NONRESIDENT
)
305 t16
= le16_to_cpu(attr
->nres
.run_off
);
309 t32
= sizeof(short) * attr
->name_len
;
310 if (t32
&& le16_to_cpu(attr
->name_off
) + t32
> t16
)
313 /* Check start/end vcn. */
314 if (le64_to_cpu(attr
->nres
.svcn
) > le64_to_cpu(attr
->nres
.evcn
) + 1)
317 data_size
= le64_to_cpu(attr
->nres
.data_size
);
318 if (le64_to_cpu(attr
->nres
.valid_size
) > data_size
)
321 alloc_size
= le64_to_cpu(attr
->nres
.alloc_size
);
322 if (data_size
> alloc_size
)
325 t32
= mi
->sbi
->cluster_mask
;
326 if (alloc_size
& t32
)
329 if (!attr
->nres
.svcn
&& is_attr_ext(attr
)) {
330 /* First segment of sparse/compressed attribute */
331 /* Can we use memory including attr->nres.total_size? */
332 if (asize
< SIZEOF_NONRESIDENT_EX
)
335 tot_size
= le64_to_cpu(attr
->nres
.total_size
);
339 if (tot_size
> alloc_size
)
342 if (attr
->nres
.c_unit
)
345 if (alloc_size
> mi
->sbi
->volume
.size
)
352 _ntfs_bad_inode(&ni
->vfs_inode
);
357 * mi_find_attr - Find the attribute by type and name and id.
359 struct ATTRIB
*mi_find_attr(struct ntfs_inode
*ni
, struct mft_inode
*mi
,
360 struct ATTRIB
*attr
, enum ATTR_TYPE type
,
361 const __le16
*name
, u8 name_len
, const __le16
*id
)
363 u32 type_in
= le32_to_cpu(type
);
367 attr
= mi_enum_attr(ni
, mi
, attr
);
371 atype
= le32_to_cpu(attr
->type
);
378 if (attr
->name_len
!= name_len
)
381 if (name_len
&& memcmp(attr_name(attr
), name
, name_len
* sizeof(short)))
384 if (id
&& *id
!= attr
->id
)
390 int mi_write(struct mft_inode
*mi
, int wait
)
394 struct ntfs_sb_info
*sbi
;
402 err
= ntfs_write_bh(sbi
, &rec
->rhdr
, &mi
->nb
, wait
);
406 if (mi
->rno
< sbi
->mft
.recs_mirr
)
407 sbi
->flags
|= NTFS_FLAGS_MFTMIRR
;
414 int mi_format_new(struct mft_inode
*mi
, struct ntfs_sb_info
*sbi
, CLST rno
,
415 __le16 flags
, bool is_mft
)
420 u64 vbo
= (u64
)rno
<< sbi
->record_bits
;
422 err
= mi_init(mi
, sbi
, rno
);
428 if (rno
== MFT_REC_MFT
) {
430 } else if (rno
< MFT_REC_FREE
) {
432 } else if (rno
>= sbi
->mft
.used
) {
434 } else if (mi_read(mi
, is_mft
)) {
436 } else if (rec
->rhdr
.sign
== NTFS_FILE_SIGNATURE
) {
437 /* Record is reused. Update its sequence number. */
438 seq
= le16_to_cpu(rec
->seq
) + 1;
443 memcpy(rec
, sbi
->new_rec
, sbi
->record_size
);
445 rec
->seq
= cpu_to_le16(seq
);
446 rec
->flags
= RECORD_FLAG_IN_USE
| flags
;
447 if (MFTRECORD_FIXUP_OFFSET
== MFTRECORD_FIXUP_OFFSET_3
)
448 rec
->mft_record
= cpu_to_le32(rno
);
453 struct ntfs_inode
*ni
= sbi
->mft
.ni
;
456 if (is_mounted(sbi
) && !is_mft
) {
457 down_read(&ni
->file
.run_lock
);
461 err
= ntfs_get_bh(sbi
, &ni
->file
.run
, vbo
, sbi
->record_size
,
464 up_read(&ni
->file
.run_lock
);
471 * mi_insert_attr - Reserve space for new attribute.
473 * Return: Not full constructed attribute or NULL if not possible to create.
475 struct ATTRIB
*mi_insert_attr(struct ntfs_inode
*ni
, struct mft_inode
*mi
,
476 enum ATTR_TYPE type
, const __le16
*name
,
477 u8 name_len
, u32 asize
, u16 name_off
)
482 struct MFT_REC
*rec
= mi
->mrec
;
483 struct ntfs_sb_info
*sbi
= mi
->sbi
;
484 u32 used
= le32_to_cpu(rec
->used
);
485 const u16
*upcase
= sbi
->upcase
;
487 /* Can we insert mi attribute? */
488 if (used
+ asize
> sbi
->record_size
)
492 * Scan through the list of attributes to find the point
493 * at which we should insert it.
496 while ((attr
= mi_enum_attr(ni
, mi
, attr
))) {
497 int diff
= compare_attr(attr
, type
, name
, name_len
, upcase
);
502 if (!diff
&& !is_attr_indexed(attr
))
510 attr
= Add2Ptr(rec
, used
- 8);
512 /* Insert before 'attr'. */
513 tail
= used
- PtrOffset(rec
, attr
);
516 id
= mi_new_attt_id(ni
, mi
);
518 memmove(Add2Ptr(attr
, asize
), attr
, tail
);
519 memset(attr
, 0, asize
);
522 attr
->size
= cpu_to_le32(asize
);
523 attr
->name_len
= name_len
;
524 attr
->name_off
= cpu_to_le16(name_off
);
527 memmove(Add2Ptr(attr
, name_off
), name
, name_len
* sizeof(short));
528 rec
->used
= cpu_to_le32(used
+ asize
);
536 * mi_remove_attr - Remove the attribute from record.
538 * NOTE: The source attr will point to next attribute.
540 bool mi_remove_attr(struct ntfs_inode
*ni
, struct mft_inode
*mi
,
543 struct MFT_REC
*rec
= mi
->mrec
;
544 u32 aoff
= PtrOffset(rec
, attr
);
545 u32 used
= le32_to_cpu(rec
->used
);
546 u32 asize
= le32_to_cpu(attr
->size
);
548 if (aoff
+ asize
> used
)
551 if (ni
&& is_attr_indexed(attr
) && attr
->type
== ATTR_NAME
) {
552 u16 links
= le16_to_cpu(ni
->mi
.mrec
->hard_links
);
554 /* minor error. Not critical. */
556 ni
->mi
.mrec
->hard_links
= cpu_to_le16(links
- 1);
562 memmove(attr
, Add2Ptr(attr
, asize
), used
- aoff
);
563 rec
->used
= cpu_to_le32(used
);
569 /* bytes = "new attribute size" - "old attribute size" */
570 bool mi_resize_attr(struct mft_inode
*mi
, struct ATTRIB
*attr
, int bytes
)
572 struct MFT_REC
*rec
= mi
->mrec
;
573 u32 aoff
= PtrOffset(rec
, attr
);
574 u32 total
, used
= le32_to_cpu(rec
->used
);
575 u32 nsize
, asize
= le32_to_cpu(attr
->size
);
576 u32 rsize
= le32_to_cpu(attr
->res
.data_size
);
577 int tail
= (int)(used
- aoff
- asize
);
581 if (tail
< 0 || aoff
>= used
)
587 total
= le32_to_cpu(rec
->total
);
588 next
= Add2Ptr(attr
, asize
);
591 dsize
= ALIGN(bytes
, 8);
592 if (used
+ dsize
> total
)
594 nsize
= asize
+ dsize
;
596 memmove(next
+ dsize
, next
, tail
);
597 memset(next
, 0, dsize
);
601 dsize
= ALIGN(-bytes
, 8);
604 nsize
= asize
- dsize
;
605 memmove(next
- dsize
, next
, tail
);
610 rec
->used
= cpu_to_le32(used
);
611 attr
->size
= cpu_to_le32(nsize
);
613 attr
->res
.data_size
= cpu_to_le32(rsize
);
620 * Pack runs in MFT record.
621 * If failed record is not changed.
623 int mi_pack_runs(struct mft_inode
*mi
, struct ATTRIB
*attr
,
624 struct runs_tree
*run
, CLST len
)
627 struct ntfs_sb_info
*sbi
= mi
->sbi
;
630 struct MFT_REC
*rec
= mi
->mrec
;
631 CLST svcn
= le64_to_cpu(attr
->nres
.svcn
);
632 u32 used
= le32_to_cpu(rec
->used
);
633 u32 aoff
= PtrOffset(rec
, attr
);
634 u32 asize
= le32_to_cpu(attr
->size
);
635 char *next
= Add2Ptr(attr
, asize
);
636 u16 run_off
= le16_to_cpu(attr
->nres
.run_off
);
637 u32 run_size
= asize
- run_off
;
638 u32 tail
= used
- aoff
- asize
;
639 u32 dsize
= sbi
->record_size
- used
;
641 /* Make a maximum gap in current record. */
642 memmove(next
+ dsize
, next
, tail
);
644 /* Pack as much as possible. */
645 err
= run_pack(run
, svcn
, len
, Add2Ptr(attr
, run_off
), run_size
+ dsize
,
648 memmove(next
, next
+ dsize
, tail
);
652 new_run_size
= ALIGN(err
, 8);
654 memmove(next
+ new_run_size
- run_size
, next
+ dsize
, tail
);
656 attr
->size
= cpu_to_le32(asize
+ new_run_size
- run_size
);
657 attr
->nres
.evcn
= cpu_to_le64(svcn
+ plen
- 1);
658 rec
->used
= cpu_to_le32(used
+ new_run_size
- run_size
);