drm/bridge: convert to use devm_platform_ioremap_resource()
[drm/drm-misc.git] / fs / ntfs3 / record.c
blob714c7ecedca830757df17743b043443240c074e5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 */
8 #include <linux/fs.h>
10 #include "debug.h"
11 #include "ntfs.h"
12 #include "ntfs_fs.h"
14 static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15 const __le16 *name, u8 name_len,
16 const u16 *upcase)
18 /* First, compare the type codes. */
19 int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
21 if (diff)
22 return diff;
24 /* They have the same type code, so we have to compare the names. */
25 return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
26 upcase, true);
30 * mi_new_attt_id
32 * Return: Unused attribute id that is less than mrec->next_attr_id.
34 static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi)
36 u16 free_id, max_id, t16;
37 struct MFT_REC *rec = mi->mrec;
38 struct ATTRIB *attr;
39 __le16 id;
41 id = rec->next_attr_id;
42 free_id = le16_to_cpu(id);
43 if (free_id < 0x7FFF) {
44 rec->next_attr_id = cpu_to_le16(free_id + 1);
45 return id;
48 /* One record can store up to 1024/24 ~= 42 attributes. */
49 free_id = 0;
50 max_id = 0;
52 attr = NULL;
54 for (;;) {
55 attr = mi_enum_attr(ni, mi, attr);
56 if (!attr) {
57 rec->next_attr_id = cpu_to_le16(max_id + 1);
58 mi->dirty = true;
59 return cpu_to_le16(free_id);
62 t16 = le16_to_cpu(attr->id);
63 if (t16 == free_id) {
64 free_id += 1;
65 attr = NULL;
66 } else if (max_id < t16)
67 max_id = t16;
71 int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
73 int err;
74 struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
76 if (!m)
77 return -ENOMEM;
79 err = mi_init(m, sbi, rno);
80 if (err) {
81 kfree(m);
82 return err;
85 err = mi_read(m, false);
86 if (err) {
87 mi_put(m);
88 return err;
91 *mi = m;
92 return 0;
95 void mi_put(struct mft_inode *mi)
97 mi_clear(mi);
98 kfree(mi);
101 int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
103 mi->sbi = sbi;
104 mi->rno = rno;
105 mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
106 if (!mi->mrec)
107 return -ENOMEM;
109 return 0;
113 * mi_read - Read MFT data.
115 int mi_read(struct mft_inode *mi, bool is_mft)
117 int err;
118 struct MFT_REC *rec = mi->mrec;
119 struct ntfs_sb_info *sbi = mi->sbi;
120 u32 bpr = sbi->record_size;
121 u64 vbo = (u64)mi->rno << sbi->record_bits;
122 struct ntfs_inode *mft_ni = sbi->mft.ni;
123 struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124 struct rw_semaphore *rw_lock = NULL;
126 if (is_mounted(sbi)) {
127 if (!is_mft && mft_ni) {
128 rw_lock = &mft_ni->file.run_lock;
129 down_read(rw_lock);
133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
134 if (rw_lock)
135 up_read(rw_lock);
136 if (!err)
137 goto ok;
139 if (err == -E_NTFS_FIXUP) {
140 mi->dirty = true;
141 goto ok;
144 if (err != -ENOENT)
145 goto out;
147 if (rw_lock) {
148 ni_lock(mft_ni);
149 down_write(rw_lock);
151 err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
152 vbo >> sbi->cluster_bits);
153 if (rw_lock) {
154 up_write(rw_lock);
155 ni_unlock(mft_ni);
157 if (err)
158 goto out;
160 if (rw_lock)
161 down_read(rw_lock);
162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
163 if (rw_lock)
164 up_read(rw_lock);
166 if (err == -E_NTFS_FIXUP) {
167 mi->dirty = true;
168 goto ok;
170 if (err)
171 goto out;
174 /* Check field 'total' only here. */
175 if (le32_to_cpu(rec->total) != bpr) {
176 err = -EINVAL;
177 goto out;
180 return 0;
182 out:
183 if (err == -E_NTFS_CORRUPT) {
184 ntfs_err(sbi->sb, "mft corrupted");
185 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
186 err = -EINVAL;
189 return err;
193 * mi_enum_attr - start/continue attributes enumeration in record.
195 * NOTE: mi->mrec - memory of size sbi->record_size
196 * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
198 struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
199 struct ATTRIB *attr)
201 const struct MFT_REC *rec = mi->mrec;
202 u32 used = le32_to_cpu(rec->used);
203 u32 t32, off, asize, prev_type;
204 u16 t16;
205 u64 data_size, alloc_size, tot_size;
207 if (!attr) {
208 u32 total = le32_to_cpu(rec->total);
210 off = le16_to_cpu(rec->attr_off);
212 if (used > total)
213 goto out;
215 if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
216 !IS_ALIGNED(off, 8)) {
217 goto out;
220 /* Skip non-resident records. */
221 if (!is_rec_inuse(rec))
222 return NULL;
224 prev_type = 0;
225 attr = Add2Ptr(rec, off);
226 } else {
228 * We don't need to check previous attr here. There is
229 * a bounds checking in the previous round.
231 off = PtrOffset(rec, attr);
233 asize = le32_to_cpu(attr->size);
235 prev_type = le32_to_cpu(attr->type);
236 attr = Add2Ptr(attr, asize);
237 off += asize;
241 * Can we use the first fields:
242 * attr->type,
243 * attr->size
245 if (off + 8 > used) {
246 static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
247 goto out;
250 if (attr->type == ATTR_END) {
251 /* End of enumeration. */
252 return NULL;
255 /* 0x100 is last known attribute for now. */
256 t32 = le32_to_cpu(attr->type);
257 if (!t32 || (t32 & 0xf) || (t32 > 0x100))
258 goto out;
260 /* attributes in record must be ordered by type */
261 if (t32 < prev_type)
262 goto out;
264 asize = le32_to_cpu(attr->size);
266 if (!IS_ALIGNED(asize, 8))
267 goto out;
269 /* Check overflow and boundary. */
270 if (off + asize < off || off + asize > used)
271 goto out;
273 /* Can we use the field attr->non_res. */
274 if (off + 9 > used)
275 goto out;
277 /* Check size of attribute. */
278 if (!attr->non_res) {
279 /* Check resident fields. */
280 if (asize < SIZEOF_RESIDENT)
281 goto out;
283 t16 = le16_to_cpu(attr->res.data_off);
284 if (t16 > asize)
285 goto out;
287 if (le32_to_cpu(attr->res.data_size) > asize - t16)
288 goto out;
290 t32 = sizeof(short) * attr->name_len;
291 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
292 goto out;
294 return attr;
297 /* Check nonresident fields. */
298 if (attr->non_res != 1)
299 goto out;
301 /* Can we use memory including attr->nres.valid_size? */
302 if (asize < SIZEOF_NONRESIDENT)
303 goto out;
305 t16 = le16_to_cpu(attr->nres.run_off);
306 if (t16 > asize)
307 goto out;
309 t32 = sizeof(short) * attr->name_len;
310 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
311 goto out;
313 /* Check start/end vcn. */
314 if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
315 goto out;
317 data_size = le64_to_cpu(attr->nres.data_size);
318 if (le64_to_cpu(attr->nres.valid_size) > data_size)
319 goto out;
321 alloc_size = le64_to_cpu(attr->nres.alloc_size);
322 if (data_size > alloc_size)
323 goto out;
325 t32 = mi->sbi->cluster_mask;
326 if (alloc_size & t32)
327 goto out;
329 if (!attr->nres.svcn && is_attr_ext(attr)) {
330 /* First segment of sparse/compressed attribute */
331 /* Can we use memory including attr->nres.total_size? */
332 if (asize < SIZEOF_NONRESIDENT_EX)
333 goto out;
335 tot_size = le64_to_cpu(attr->nres.total_size);
336 if (tot_size & t32)
337 goto out;
339 if (tot_size > alloc_size)
340 goto out;
341 } else {
342 if (attr->nres.c_unit)
343 goto out;
345 if (alloc_size > mi->sbi->volume.size)
346 goto out;
349 return attr;
351 out:
352 _ntfs_bad_inode(&ni->vfs_inode);
353 return NULL;
357 * mi_find_attr - Find the attribute by type and name and id.
359 struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
360 struct ATTRIB *attr, enum ATTR_TYPE type,
361 const __le16 *name, u8 name_len, const __le16 *id)
363 u32 type_in = le32_to_cpu(type);
364 u32 atype;
366 next_attr:
367 attr = mi_enum_attr(ni, mi, attr);
368 if (!attr)
369 return NULL;
371 atype = le32_to_cpu(attr->type);
372 if (atype > type_in)
373 return NULL;
375 if (atype < type_in)
376 goto next_attr;
378 if (attr->name_len != name_len)
379 goto next_attr;
381 if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
382 goto next_attr;
384 if (id && *id != attr->id)
385 goto next_attr;
387 return attr;
390 int mi_write(struct mft_inode *mi, int wait)
392 struct MFT_REC *rec;
393 int err;
394 struct ntfs_sb_info *sbi;
396 if (!mi->dirty)
397 return 0;
399 sbi = mi->sbi;
400 rec = mi->mrec;
402 err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
403 if (err)
404 return err;
406 if (mi->rno < sbi->mft.recs_mirr)
407 sbi->flags |= NTFS_FLAGS_MFTMIRR;
409 mi->dirty = false;
411 return 0;
414 int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
415 __le16 flags, bool is_mft)
417 int err;
418 u16 seq = 1;
419 struct MFT_REC *rec;
420 u64 vbo = (u64)rno << sbi->record_bits;
422 err = mi_init(mi, sbi, rno);
423 if (err)
424 return err;
426 rec = mi->mrec;
428 if (rno == MFT_REC_MFT) {
430 } else if (rno < MFT_REC_FREE) {
431 seq = rno;
432 } else if (rno >= sbi->mft.used) {
434 } else if (mi_read(mi, is_mft)) {
436 } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
437 /* Record is reused. Update its sequence number. */
438 seq = le16_to_cpu(rec->seq) + 1;
439 if (!seq)
440 seq = 1;
443 memcpy(rec, sbi->new_rec, sbi->record_size);
445 rec->seq = cpu_to_le16(seq);
446 rec->flags = RECORD_FLAG_IN_USE | flags;
447 if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
448 rec->mft_record = cpu_to_le32(rno);
450 mi->dirty = true;
452 if (!mi->nb.nbufs) {
453 struct ntfs_inode *ni = sbi->mft.ni;
454 bool lock = false;
456 if (is_mounted(sbi) && !is_mft) {
457 down_read(&ni->file.run_lock);
458 lock = true;
461 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
462 &mi->nb);
463 if (lock)
464 up_read(&ni->file.run_lock);
467 return err;
471 * mi_insert_attr - Reserve space for new attribute.
473 * Return: Not full constructed attribute or NULL if not possible to create.
475 struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
476 enum ATTR_TYPE type, const __le16 *name,
477 u8 name_len, u32 asize, u16 name_off)
479 size_t tail;
480 struct ATTRIB *attr;
481 __le16 id;
482 struct MFT_REC *rec = mi->mrec;
483 struct ntfs_sb_info *sbi = mi->sbi;
484 u32 used = le32_to_cpu(rec->used);
485 const u16 *upcase = sbi->upcase;
487 /* Can we insert mi attribute? */
488 if (used + asize > sbi->record_size)
489 return NULL;
492 * Scan through the list of attributes to find the point
493 * at which we should insert it.
495 attr = NULL;
496 while ((attr = mi_enum_attr(ni, mi, attr))) {
497 int diff = compare_attr(attr, type, name, name_len, upcase);
499 if (diff < 0)
500 continue;
502 if (!diff && !is_attr_indexed(attr))
503 return NULL;
504 break;
507 if (!attr) {
508 /* Append. */
509 tail = 8;
510 attr = Add2Ptr(rec, used - 8);
511 } else {
512 /* Insert before 'attr'. */
513 tail = used - PtrOffset(rec, attr);
516 id = mi_new_attt_id(ni, mi);
518 memmove(Add2Ptr(attr, asize), attr, tail);
519 memset(attr, 0, asize);
521 attr->type = type;
522 attr->size = cpu_to_le32(asize);
523 attr->name_len = name_len;
524 attr->name_off = cpu_to_le16(name_off);
525 attr->id = id;
527 memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
528 rec->used = cpu_to_le32(used + asize);
530 mi->dirty = true;
532 return attr;
536 * mi_remove_attr - Remove the attribute from record.
538 * NOTE: The source attr will point to next attribute.
540 bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
541 struct ATTRIB *attr)
543 struct MFT_REC *rec = mi->mrec;
544 u32 aoff = PtrOffset(rec, attr);
545 u32 used = le32_to_cpu(rec->used);
546 u32 asize = le32_to_cpu(attr->size);
548 if (aoff + asize > used)
549 return false;
551 if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
552 u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
553 if (!links) {
554 /* minor error. Not critical. */
555 } else {
556 ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
557 ni->mi.dirty = true;
561 used -= asize;
562 memmove(attr, Add2Ptr(attr, asize), used - aoff);
563 rec->used = cpu_to_le32(used);
564 mi->dirty = true;
566 return true;
569 /* bytes = "new attribute size" - "old attribute size" */
570 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
572 struct MFT_REC *rec = mi->mrec;
573 u32 aoff = PtrOffset(rec, attr);
574 u32 total, used = le32_to_cpu(rec->used);
575 u32 nsize, asize = le32_to_cpu(attr->size);
576 u32 rsize = le32_to_cpu(attr->res.data_size);
577 int tail = (int)(used - aoff - asize);
578 int dsize;
579 char *next;
581 if (tail < 0 || aoff >= used)
582 return false;
584 if (!bytes)
585 return true;
587 total = le32_to_cpu(rec->total);
588 next = Add2Ptr(attr, asize);
590 if (bytes > 0) {
591 dsize = ALIGN(bytes, 8);
592 if (used + dsize > total)
593 return false;
594 nsize = asize + dsize;
595 /* Move tail */
596 memmove(next + dsize, next, tail);
597 memset(next, 0, dsize);
598 used += dsize;
599 rsize += dsize;
600 } else {
601 dsize = ALIGN(-bytes, 8);
602 if (dsize > asize)
603 return false;
604 nsize = asize - dsize;
605 memmove(next - dsize, next, tail);
606 used -= dsize;
607 rsize -= dsize;
610 rec->used = cpu_to_le32(used);
611 attr->size = cpu_to_le32(nsize);
612 if (!attr->non_res)
613 attr->res.data_size = cpu_to_le32(rsize);
614 mi->dirty = true;
616 return true;
620 * Pack runs in MFT record.
621 * If failed record is not changed.
623 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
624 struct runs_tree *run, CLST len)
626 int err = 0;
627 struct ntfs_sb_info *sbi = mi->sbi;
628 u32 new_run_size;
629 CLST plen;
630 struct MFT_REC *rec = mi->mrec;
631 CLST svcn = le64_to_cpu(attr->nres.svcn);
632 u32 used = le32_to_cpu(rec->used);
633 u32 aoff = PtrOffset(rec, attr);
634 u32 asize = le32_to_cpu(attr->size);
635 char *next = Add2Ptr(attr, asize);
636 u16 run_off = le16_to_cpu(attr->nres.run_off);
637 u32 run_size = asize - run_off;
638 u32 tail = used - aoff - asize;
639 u32 dsize = sbi->record_size - used;
641 /* Make a maximum gap in current record. */
642 memmove(next + dsize, next, tail);
644 /* Pack as much as possible. */
645 err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
646 &plen);
647 if (err < 0) {
648 memmove(next, next + dsize, tail);
649 return err;
652 new_run_size = ALIGN(err, 8);
654 memmove(next + new_run_size - run_size, next + dsize, tail);
656 attr->size = cpu_to_le32(asize + new_run_size - run_size);
657 attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
658 rec->used = cpu_to_le32(used + new_run_size - run_size);
659 mi->dirty = true;
661 return 0;