1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
19 const struct cpu_str NAME_MFT
= {
20 4, 0, { '$', 'M', 'F', 'T' },
22 const struct cpu_str NAME_MIRROR
= {
23 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
25 const struct cpu_str NAME_LOGFILE
= {
26 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
28 const struct cpu_str NAME_VOLUME
= {
29 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
31 const struct cpu_str NAME_ATTRDEF
= {
32 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
34 const struct cpu_str NAME_ROOT
= {
37 const struct cpu_str NAME_BITMAP
= {
38 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
40 const struct cpu_str NAME_BOOT
= {
41 5, 0, { '$', 'B', 'o', 'o', 't' },
43 const struct cpu_str NAME_BADCLUS
= {
44 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
46 const struct cpu_str NAME_QUOTA
= {
47 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
49 const struct cpu_str NAME_SECURE
= {
50 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
52 const struct cpu_str NAME_UPCASE
= {
53 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
55 const struct cpu_str NAME_EXTEND
= {
56 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
58 const struct cpu_str NAME_OBJID
= {
59 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
61 const struct cpu_str NAME_REPARSE
= {
62 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
64 const struct cpu_str NAME_USNJRNL
= {
65 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
67 const __le16 BAD_NAME
[4] = {
68 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
70 const __le16 I30_NAME
[4] = {
71 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
73 const __le16 SII_NAME
[4] = {
74 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
76 const __le16 SDH_NAME
[4] = {
77 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
79 const __le16 SDS_NAME
[4] = {
80 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
82 const __le16 SO_NAME
[2] = {
83 cpu_to_le16('$'), cpu_to_le16('O'),
85 const __le16 SQ_NAME
[2] = {
86 cpu_to_le16('$'), cpu_to_le16('Q'),
88 const __le16 SR_NAME
[2] = {
89 cpu_to_le16('$'), cpu_to_le16('R'),
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME
[17] = {
94 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
102 static const __le16 CON_NAME
[3] = {
103 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
106 static const __le16 NUL_NAME
[3] = {
107 cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
110 static const __le16 AUX_NAME
[3] = {
111 cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
114 static const __le16 PRN_NAME
[3] = {
115 cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
118 static const __le16 COM_NAME
[3] = {
119 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
122 static const __le16 LPT_NAME
[3] = {
123 cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
129 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER
*rhdr
, size_t bytes
)
135 u16 fo
= le16_to_cpu(rhdr
->fix_off
);
136 u16 fn
= le16_to_cpu(rhdr
->fix_num
);
138 if ((fo
& 1) || fo
+ fn
* sizeof(short) > SECTOR_SIZE
|| !fn
-- ||
139 fn
* SECTOR_SIZE
> bytes
) {
143 /* Get fixup pointer. */
144 fixup
= Add2Ptr(rhdr
, fo
);
146 if (*fixup
>= 0x7FFF)
153 ptr
= Add2Ptr(rhdr
, SECTOR_SIZE
- sizeof(short));
158 ptr
+= SECTOR_SIZE
/ sizeof(short);
164 * ntfs_fix_post_read - Remove fixups after reading from disk.
166 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER
*rhdr
, size_t bytes
,
175 fo
= le16_to_cpu(rhdr
->fix_off
);
176 fn
= simple
? ((bytes
>> SECTOR_SHIFT
) + 1) :
177 le16_to_cpu(rhdr
->fix_num
);
180 if ((fo
& 1) || fo
+ fn
* sizeof(short) > SECTOR_SIZE
|| !fn
-- ||
181 fn
* SECTOR_SIZE
> bytes
) {
182 return -E_NTFS_CORRUPT
;
185 /* Get fixup pointer. */
186 fixup
= Add2Ptr(rhdr
, fo
);
188 ptr
= Add2Ptr(rhdr
, SECTOR_SIZE
- sizeof(short));
192 /* Test current word. */
193 if (*ptr
!= sample
) {
194 /* Fixup does not match! Is it serious error? */
200 ptr
+= SECTOR_SIZE
/ sizeof(short);
207 * ntfs_extend_init - Load $Extend file.
209 int ntfs_extend_init(struct ntfs_sb_info
*sbi
)
212 struct super_block
*sb
= sbi
->sb
;
213 struct inode
*inode
, *inode2
;
216 if (sbi
->volume
.major_ver
< 3) {
217 ntfs_notice(sb
, "Skip $Extend 'cause NTFS version");
221 ref
.low
= cpu_to_le32(MFT_REC_EXTEND
);
223 ref
.seq
= cpu_to_le16(MFT_REC_EXTEND
);
224 inode
= ntfs_iget5(sb
, &ref
, &NAME_EXTEND
);
226 err
= PTR_ERR(inode
);
227 ntfs_err(sb
, "Failed to load $Extend (%d).", err
);
232 /* If ntfs_iget5() reads from disk it never returns bad inode. */
233 if (!S_ISDIR(inode
->i_mode
)) {
238 /* Try to find $ObjId */
239 inode2
= dir_search_u(inode
, &NAME_OBJID
, NULL
);
240 if (inode2
&& !IS_ERR(inode2
)) {
241 if (is_bad_inode(inode2
)) {
244 sbi
->objid
.ni
= ntfs_i(inode2
);
245 sbi
->objid_no
= inode2
->i_ino
;
249 /* Try to find $Quota */
250 inode2
= dir_search_u(inode
, &NAME_QUOTA
, NULL
);
251 if (inode2
&& !IS_ERR(inode2
)) {
252 sbi
->quota_no
= inode2
->i_ino
;
256 /* Try to find $Reparse */
257 inode2
= dir_search_u(inode
, &NAME_REPARSE
, NULL
);
258 if (inode2
&& !IS_ERR(inode2
)) {
259 sbi
->reparse
.ni
= ntfs_i(inode2
);
260 sbi
->reparse_no
= inode2
->i_ino
;
263 /* Try to find $UsnJrnl */
264 inode2
= dir_search_u(inode
, &NAME_USNJRNL
, NULL
);
265 if (inode2
&& !IS_ERR(inode2
)) {
266 sbi
->usn_jrnl_no
= inode2
->i_ino
;
276 int ntfs_loadlog_and_replay(struct ntfs_inode
*ni
, struct ntfs_sb_info
*sbi
)
279 struct super_block
*sb
= sbi
->sb
;
280 bool initialized
= false;
285 if (ni
->vfs_inode
.i_size
>= 0x100000000ull
) {
286 ntfs_err(sb
, "\x24LogFile is large than 4G.");
291 sbi
->flags
|= NTFS_FLAGS_LOG_REPLAYING
;
293 ref
.low
= cpu_to_le32(MFT_REC_MFT
);
295 ref
.seq
= cpu_to_le16(1);
297 inode
= ntfs_iget5(sb
, &ref
, NULL
);
303 /* Try to use MFT copy. */
304 u64 t64
= sbi
->mft
.lbo
;
306 sbi
->mft
.lbo
= sbi
->mft
.lbo2
;
307 inode
= ntfs_iget5(sb
, &ref
, NULL
);
315 ntfs_err(sb
, "Failed to load $MFT.");
319 sbi
->mft
.ni
= ntfs_i(inode
);
321 /* LogFile should not contains attribute list. */
322 err
= ni_load_all_mi(sbi
->mft
.ni
);
324 err
= log_replay(ni
, &initialized
);
329 sync_blockdev(sb
->s_bdev
);
330 invalidate_bdev(sb
->s_bdev
);
332 if (sbi
->flags
& NTFS_FLAGS_NEED_REPLAY
) {
337 if (sb_rdonly(sb
) || !initialized
)
340 /* Fill LogFile by '-1' if it is initialized. */
341 err
= ntfs_bio_fill_1(sbi
, &ni
->file
.run
);
344 sbi
->flags
&= ~NTFS_FLAGS_LOG_REPLAYING
;
350 * ntfs_look_for_free_space - Look for a free space in bitmap.
352 int ntfs_look_for_free_space(struct ntfs_sb_info
*sbi
, CLST lcn
, CLST len
,
353 CLST
*new_lcn
, CLST
*new_len
,
354 enum ALLOCATE_OPT opt
)
358 struct super_block
*sb
= sbi
->sb
;
359 size_t alcn
, zlen
, zeroes
, zlcn
, zlen2
, ztrim
, new_zlen
;
360 struct wnd_bitmap
*wnd
= &sbi
->used
.bitmap
;
362 down_write_nested(&wnd
->rw_lock
, BITMAP_MUTEX_CLUSTERS
);
363 if (opt
& ALLOCATE_MFT
) {
364 zlen
= wnd_zone_len(wnd
);
367 err
= ntfs_refresh_zone(sbi
);
371 zlen
= wnd_zone_len(wnd
);
375 ntfs_err(sbi
->sb
, "no free space to extend mft");
380 lcn
= wnd_zone_bit(wnd
);
381 alen
= min_t(CLST
, len
, zlen
);
383 wnd_zone_set(wnd
, lcn
+ alen
, zlen
- alen
);
385 err
= wnd_set_used(wnd
, lcn
, alen
);
393 * 'Cause cluster 0 is always used this value means that we should use
394 * cached value of 'next_free_lcn' to improve performance.
397 lcn
= sbi
->used
.next_free_lcn
;
399 if (lcn
>= wnd
->nbits
)
402 alen
= wnd_find(wnd
, len
, lcn
, BITMAP_FIND_MARK_AS_USED
, &alcn
);
406 /* Try to use clusters from MftZone. */
407 zlen
= wnd_zone_len(wnd
);
408 zeroes
= wnd_zeroes(wnd
);
410 /* Check too big request */
411 if (len
> zeroes
+ zlen
|| zlen
<= NTFS_MIN_MFT_ZONE
) {
416 /* How many clusters to cat from zone. */
417 zlcn
= wnd_zone_bit(wnd
);
419 ztrim
= clamp_val(len
, zlen2
, zlen
);
420 new_zlen
= max_t(size_t, zlen
- ztrim
, NTFS_MIN_MFT_ZONE
);
422 wnd_zone_set(wnd
, zlcn
, new_zlen
);
424 /* Allocate continues clusters. */
425 alen
= wnd_find(wnd
, len
, 0,
426 BITMAP_FIND_MARK_AS_USED
| BITMAP_FIND_FULL
, &alcn
);
437 ntfs_unmap_meta(sb
, alcn
, alen
);
439 /* Set hint for next requests. */
440 if (!(opt
& ALLOCATE_MFT
))
441 sbi
->used
.next_free_lcn
= alcn
+ alen
;
443 up_write(&wnd
->rw_lock
);
448 * ntfs_check_for_free_space
450 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
452 bool ntfs_check_for_free_space(struct ntfs_sb_info
*sbi
, CLST clen
, CLST mlen
)
454 size_t free
, zlen
, avail
;
455 struct wnd_bitmap
*wnd
;
457 wnd
= &sbi
->used
.bitmap
;
458 down_read_nested(&wnd
->rw_lock
, BITMAP_MUTEX_CLUSTERS
);
459 free
= wnd_zeroes(wnd
);
460 zlen
= min_t(size_t, NTFS_MIN_MFT_ZONE
, wnd_zone_len(wnd
));
461 up_read(&wnd
->rw_lock
);
463 if (free
< zlen
+ clen
)
466 avail
= free
- (zlen
+ clen
);
468 wnd
= &sbi
->mft
.bitmap
;
469 down_read_nested(&wnd
->rw_lock
, BITMAP_MUTEX_MFT
);
470 free
= wnd_zeroes(wnd
);
471 zlen
= wnd_zone_len(wnd
);
472 up_read(&wnd
->rw_lock
);
474 if (free
>= zlen
+ mlen
)
477 return avail
>= bytes_to_cluster(sbi
, mlen
<< sbi
->record_bits
);
481 * ntfs_extend_mft - Allocate additional MFT records.
483 * sbi->mft.bitmap is locked for write.
486 * ntfs_look_free_mft ->
489 * ni_insert_nonresident ->
492 * ntfs_look_free_mft ->
495 * To avoid recursive always allocate space for two new MFT records
496 * see attrib.c: "at least two MFT to avoid recursive loop".
498 static int ntfs_extend_mft(struct ntfs_sb_info
*sbi
)
501 struct ntfs_inode
*ni
= sbi
->mft
.ni
;
502 size_t new_mft_total
;
503 u64 new_mft_bytes
, new_bitmap_bytes
;
505 struct wnd_bitmap
*wnd
= &sbi
->mft
.bitmap
;
507 new_mft_total
= ALIGN(wnd
->nbits
+ NTFS_MFT_INCREASE_STEP
, 128);
508 new_mft_bytes
= (u64
)new_mft_total
<< sbi
->record_bits
;
510 /* Step 1: Resize $MFT::DATA. */
511 down_write(&ni
->file
.run_lock
);
512 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
,
513 new_mft_bytes
, NULL
, false, &attr
);
516 up_write(&ni
->file
.run_lock
);
520 attr
->nres
.valid_size
= attr
->nres
.data_size
;
521 new_mft_total
= le64_to_cpu(attr
->nres
.alloc_size
) >> sbi
->record_bits
;
524 /* Step 2: Resize $MFT::BITMAP. */
525 new_bitmap_bytes
= ntfs3_bitmap_size(new_mft_total
);
527 err
= attr_set_size(ni
, ATTR_BITMAP
, NULL
, 0, &sbi
->mft
.bitmap
.run
,
528 new_bitmap_bytes
, &new_bitmap_bytes
, true, NULL
);
530 /* Refresh MFT Zone if necessary. */
531 down_write_nested(&sbi
->used
.bitmap
.rw_lock
, BITMAP_MUTEX_CLUSTERS
);
533 ntfs_refresh_zone(sbi
);
535 up_write(&sbi
->used
.bitmap
.rw_lock
);
536 up_write(&ni
->file
.run_lock
);
541 err
= wnd_extend(wnd
, new_mft_total
);
546 ntfs_clear_mft_tail(sbi
, sbi
->mft
.used
, new_mft_total
);
548 err
= _ni_write_inode(&ni
->vfs_inode
, 0);
554 * ntfs_look_free_mft - Look for a free MFT record.
556 int ntfs_look_free_mft(struct ntfs_sb_info
*sbi
, CLST
*rno
, bool mft
,
557 struct ntfs_inode
*ni
, struct mft_inode
**mi
)
560 size_t zbit
, zlen
, from
, to
, fr
;
563 struct super_block
*sb
= sbi
->sb
;
564 struct wnd_bitmap
*wnd
= &sbi
->mft
.bitmap
;
567 static_assert(sizeof(sbi
->mft
.reserved_bitmap
) * 8 >=
568 MFT_REC_FREE
- MFT_REC_RESERVED
);
571 down_write_nested(&wnd
->rw_lock
, BITMAP_MUTEX_MFT
);
573 zlen
= wnd_zone_len(wnd
);
575 /* Always reserve space for MFT. */
578 zbit
= wnd_zone_bit(wnd
);
580 wnd_zone_set(wnd
, zbit
+ 1, zlen
- 1);
585 /* No MFT zone. Find the nearest to '0' free MFT. */
586 if (!wnd_find(wnd
, 1, MFT_REC_FREE
, 0, &zbit
)) {
588 mft_total
= wnd
->nbits
;
590 err
= ntfs_extend_mft(sbi
);
596 if (!mft
|| MFT_REC_FREE
== sbi
->mft
.next_reserved
)
602 * Look for free record reserved area [11-16) ==
603 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
606 if (!sbi
->mft
.reserved_bitmap
) {
607 /* Once per session create internal bitmap for 5 bits. */
608 sbi
->mft
.reserved_bitmap
= 0xFF;
611 for (ir
= MFT_REC_RESERVED
; ir
< MFT_REC_FREE
; ir
++) {
613 struct ntfs_inode
*ni
;
614 struct MFT_REC
*mrec
;
616 ref
.low
= cpu_to_le32(ir
);
617 ref
.seq
= cpu_to_le16(ir
);
619 i
= ntfs_iget5(sb
, &ref
, NULL
);
624 "Invalid reserved record %x",
628 if (is_bad_inode(i
)) {
637 if (!is_rec_base(mrec
))
640 if (mrec
->hard_links
)
646 if (ni_find_attr(ni
, NULL
, NULL
, ATTR_NAME
,
647 NULL
, 0, NULL
, NULL
))
650 __clear_bit(ir
- MFT_REC_RESERVED
,
651 &sbi
->mft
.reserved_bitmap
);
655 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 zbit
= find_next_zero_bit(&sbi
->mft
.reserved_bitmap
,
657 MFT_REC_FREE
, MFT_REC_RESERVED
);
658 if (zbit
>= MFT_REC_FREE
) {
659 sbi
->mft
.next_reserved
= MFT_REC_FREE
;
664 sbi
->mft
.next_reserved
= zbit
;
667 zlen
= zbit
== MFT_REC_FREE
? (MFT_REC_USER
- MFT_REC_FREE
) : 4;
668 if (zbit
+ zlen
> wnd
->nbits
)
669 zlen
= wnd
->nbits
- zbit
;
671 while (zlen
> 1 && !wnd_is_free(wnd
, zbit
, zlen
))
674 /* [zbit, zbit + zlen) will be used for MFT itself. */
675 from
= sbi
->mft
.used
;
680 ntfs_clear_mft_tail(sbi
, from
, to
);
691 wnd_zone_set(wnd
, zbit
, zlen
);
695 /* The request to get record for general purpose. */
696 if (sbi
->mft
.next_free
< MFT_REC_USER
)
697 sbi
->mft
.next_free
= MFT_REC_USER
;
700 if (sbi
->mft
.next_free
>= sbi
->mft
.bitmap
.nbits
) {
701 } else if (!wnd_find(wnd
, 1, MFT_REC_USER
, 0, &fr
)) {
702 sbi
->mft
.next_free
= sbi
->mft
.bitmap
.nbits
;
705 sbi
->mft
.next_free
= *rno
+ 1;
709 err
= ntfs_extend_mft(sbi
);
715 if (ni
&& !ni_add_subrecord(ni
, *rno
, mi
)) {
720 /* We have found a record that are not reserved for next MFT. */
721 if (*rno
>= MFT_REC_FREE
)
722 wnd_set_used(wnd
, *rno
, 1);
723 else if (*rno
>= MFT_REC_RESERVED
&& sbi
->mft
.reserved_bitmap_inited
)
724 __set_bit(*rno
- MFT_REC_RESERVED
, &sbi
->mft
.reserved_bitmap
);
728 up_write(&wnd
->rw_lock
);
734 * ntfs_mark_rec_free - Mark record as free.
735 * is_mft - true if we are changing MFT
737 void ntfs_mark_rec_free(struct ntfs_sb_info
*sbi
, CLST rno
, bool is_mft
)
739 struct wnd_bitmap
*wnd
= &sbi
->mft
.bitmap
;
742 down_write_nested(&wnd
->rw_lock
, BITMAP_MUTEX_MFT
);
743 if (rno
>= wnd
->nbits
)
746 if (rno
>= MFT_REC_FREE
) {
747 if (!wnd_is_used(wnd
, rno
, 1))
748 ntfs_set_state(sbi
, NTFS_DIRTY_ERROR
);
750 wnd_set_free(wnd
, rno
, 1);
751 } else if (rno
>= MFT_REC_RESERVED
&& sbi
->mft
.reserved_bitmap_inited
) {
752 __clear_bit(rno
- MFT_REC_RESERVED
, &sbi
->mft
.reserved_bitmap
);
755 if (rno
< wnd_zone_bit(wnd
))
756 wnd_zone_set(wnd
, rno
, 1);
757 else if (rno
< sbi
->mft
.next_free
&& rno
>= MFT_REC_USER
)
758 sbi
->mft
.next_free
= rno
;
762 up_write(&wnd
->rw_lock
);
766 * ntfs_clear_mft_tail - Format empty records [from, to).
768 * sbi->mft.bitmap is locked for write.
770 int ntfs_clear_mft_tail(struct ntfs_sb_info
*sbi
, size_t from
, size_t to
)
775 struct runs_tree
*run
;
776 struct ntfs_inode
*ni
;
781 rs
= sbi
->record_size
;
785 down_read(&ni
->file
.run_lock
);
786 vbo
= (u64
)from
* rs
;
787 for (; from
< to
; from
++, vbo
+= rs
) {
788 struct ntfs_buffers nb
;
790 err
= ntfs_get_bh(sbi
, run
, vbo
, rs
, &nb
);
794 err
= ntfs_write_bh(sbi
, &sbi
->new_rec
->rhdr
, &nb
, 0);
801 sbi
->mft
.used
= from
;
802 up_read(&ni
->file
.run_lock
);
807 * ntfs_refresh_zone - Refresh MFT zone.
809 * sbi->used.bitmap is locked for rw.
810 * sbi->mft.bitmap is locked for write.
811 * sbi->mft.ni->file.run_lock for write.
813 int ntfs_refresh_zone(struct ntfs_sb_info
*sbi
)
817 struct wnd_bitmap
*wnd
= &sbi
->used
.bitmap
;
818 struct ntfs_inode
*ni
= sbi
->mft
.ni
;
820 /* Do not change anything unless we have non empty MFT zone. */
821 if (wnd_zone_len(wnd
))
824 vcn
= bytes_to_cluster(sbi
,
825 (u64
)sbi
->mft
.bitmap
.nbits
<< sbi
->record_bits
);
827 if (!run_lookup_entry(&ni
->file
.run
, vcn
- 1, &lcn
, &len
, NULL
))
830 /* We should always find Last Lcn for MFT. */
831 if (lcn
== SPARSE_LCN
)
836 /* Try to allocate clusters after last MFT run. */
837 zlen
= wnd_find(wnd
, sbi
->zone_max
, lcn_s
, 0, &lcn_s
);
838 wnd_zone_set(wnd
, lcn_s
, zlen
);
844 * ntfs_update_mftmirr - Update $MFTMirr data.
846 void ntfs_update_mftmirr(struct ntfs_sb_info
*sbi
, int wait
)
849 struct super_block
*sb
= sbi
->sb
;
850 u32 blocksize
, bytes
;
851 sector_t block1
, block2
;
854 * sb can be NULL here. In this case sbi->flags should be 0 too.
856 if (!sb
|| !(sbi
->flags
& NTFS_FLAGS_MFTMIRR
) ||
857 unlikely(ntfs3_forced_shutdown(sb
)))
860 blocksize
= sb
->s_blocksize
;
861 bytes
= sbi
->mft
.recs_mirr
<< sbi
->record_bits
;
862 block1
= sbi
->mft
.lbo
>> sb
->s_blocksize_bits
;
863 block2
= sbi
->mft
.lbo2
>> sb
->s_blocksize_bits
;
865 for (; bytes
>= blocksize
; bytes
-= blocksize
) {
866 struct buffer_head
*bh1
, *bh2
;
868 bh1
= sb_bread(sb
, block1
++);
872 bh2
= sb_getblk(sb
, block2
++);
878 if (buffer_locked(bh2
))
879 __wait_on_buffer(bh2
);
882 memcpy(bh2
->b_data
, bh1
->b_data
, blocksize
);
883 set_buffer_uptodate(bh2
);
884 mark_buffer_dirty(bh2
);
890 err
= wait
? sync_dirty_buffer(bh2
) : 0;
897 sbi
->flags
&= ~NTFS_FLAGS_MFTMIRR
;
903 * Marks inode as bad and marks fs as 'dirty'
905 void ntfs_bad_inode(struct inode
*inode
, const char *hint
)
907 struct ntfs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
909 ntfs_inode_err(inode
, "%s", hint
);
910 make_bad_inode(inode
);
911 ntfs_set_state(sbi
, NTFS_DIRTY_ERROR
);
917 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
918 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
919 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
921 int ntfs_set_state(struct ntfs_sb_info
*sbi
, enum NTFS_DIRTY_FLAGS dirty
)
925 struct VOLUME_INFO
*info
;
926 struct mft_inode
*mi
;
927 struct ntfs_inode
*ni
;
931 * Do not change state if fs was real_dirty.
932 * Do not change state if fs already dirty(clear).
933 * Do not change any thing if mounted read only.
935 if (sbi
->volume
.real_dirty
|| sb_rdonly(sbi
->sb
))
938 /* Check cached value. */
939 if ((dirty
== NTFS_DIRTY_CLEAR
? 0 : VOLUME_FLAG_DIRTY
) ==
940 (sbi
->volume
.flags
& VOLUME_FLAG_DIRTY
))
947 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_DIRTY
);
949 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_VOL_INFO
, NULL
, 0, NULL
, &mi
);
955 info
= resident_data_ex(attr
, SIZEOF_ATTRIBUTE_VOLUME_INFO
);
961 info_flags
= info
->flags
;
964 case NTFS_DIRTY_ERROR
:
965 ntfs_notice(sbi
->sb
, "Mark volume as dirty due to NTFS errors");
966 sbi
->volume
.real_dirty
= true;
968 case NTFS_DIRTY_DIRTY
:
969 info
->flags
|= VOLUME_FLAG_DIRTY
;
971 case NTFS_DIRTY_CLEAR
:
972 info
->flags
&= ~VOLUME_FLAG_DIRTY
;
975 /* Cache current volume flags. */
976 if (info_flags
!= info
->flags
) {
977 sbi
->volume
.flags
= info
->flags
;
987 mark_inode_dirty_sync(&ni
->vfs_inode
);
988 /* verify(!ntfs_update_mftmirr()); */
990 /* write mft record on disk. */
991 err
= _ni_write_inode(&ni
->vfs_inode
, 1);
997 * security_hash - Calculates a hash of security descriptor.
999 static inline __le32
security_hash(const void *sd
, size_t bytes
)
1002 const __le32
*ptr
= sd
;
1006 hash
= ((hash
>> 0x1D) | (hash
<< 3)) + le32_to_cpu(*ptr
++);
1007 return cpu_to_le32(hash
);
1011 * simple wrapper for sb_bread_unmovable.
1013 struct buffer_head
*ntfs_bread(struct super_block
*sb
, sector_t block
)
1015 struct ntfs_sb_info
*sbi
= sb
->s_fs_info
;
1016 struct buffer_head
*bh
;
1018 if (unlikely(block
>= sbi
->volume
.blocks
)) {
1019 /* prevent generic message "attempt to access beyond end of device" */
1020 ntfs_err(sb
, "try to read out of volume at offset 0x%llx",
1021 (u64
)block
<< sb
->s_blocksize_bits
);
1025 bh
= sb_bread_unmovable(sb
, block
);
1029 ntfs_err(sb
, "failed to read volume at offset 0x%llx",
1030 (u64
)block
<< sb
->s_blocksize_bits
);
1034 int ntfs_sb_read(struct super_block
*sb
, u64 lbo
, size_t bytes
, void *buffer
)
1036 struct block_device
*bdev
= sb
->s_bdev
;
1037 u32 blocksize
= sb
->s_blocksize
;
1038 u64 block
= lbo
>> sb
->s_blocksize_bits
;
1039 u32 off
= lbo
& (blocksize
- 1);
1040 u32 op
= blocksize
- off
;
1042 for (; bytes
; block
+= 1, off
= 0, op
= blocksize
) {
1043 struct buffer_head
*bh
= __bread(bdev
, block
, blocksize
);
1051 memcpy(buffer
, bh
->b_data
+ off
, op
);
1056 buffer
= Add2Ptr(buffer
, op
);
1062 int ntfs_sb_write(struct super_block
*sb
, u64 lbo
, size_t bytes
,
1063 const void *buf
, int wait
)
1065 u32 blocksize
= sb
->s_blocksize
;
1066 struct block_device
*bdev
= sb
->s_bdev
;
1067 sector_t block
= lbo
>> sb
->s_blocksize_bits
;
1068 u32 off
= lbo
& (blocksize
- 1);
1069 u32 op
= blocksize
- off
;
1070 struct buffer_head
*bh
;
1072 if (!wait
&& (sb
->s_flags
& SB_SYNCHRONOUS
))
1075 for (; bytes
; block
+= 1, off
= 0, op
= blocksize
) {
1079 if (op
< blocksize
) {
1080 bh
= __bread(bdev
, block
, blocksize
);
1082 ntfs_err(sb
, "failed to read block %llx",
1087 bh
= __getblk(bdev
, block
, blocksize
);
1092 if (buffer_locked(bh
))
1093 __wait_on_buffer(bh
);
1097 memcpy(bh
->b_data
+ off
, buf
, op
);
1098 buf
= Add2Ptr(buf
, op
);
1100 memset(bh
->b_data
+ off
, -1, op
);
1103 set_buffer_uptodate(bh
);
1104 mark_buffer_dirty(bh
);
1108 int err
= sync_dirty_buffer(bh
);
1113 "failed to sync buffer at block %llx, error %d",
1127 int ntfs_sb_write_run(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
,
1128 u64 vbo
, const void *buf
, size_t bytes
, int sync
)
1130 struct super_block
*sb
= sbi
->sb
;
1131 u8 cluster_bits
= sbi
->cluster_bits
;
1132 u32 off
= vbo
& sbi
->cluster_mask
;
1133 CLST lcn
, clen
, vcn
= vbo
>> cluster_bits
, vcn_next
;
1137 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1140 if (lcn
== SPARSE_LCN
)
1143 lbo
= ((u64
)lcn
<< cluster_bits
) + off
;
1144 len
= ((u64
)clen
<< cluster_bits
) - off
;
1147 u32 op
= min_t(u64
, len
, bytes
);
1148 int err
= ntfs_sb_write(sb
, lbo
, op
, buf
, sync
);
1157 vcn_next
= vcn
+ clen
;
1158 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1162 if (lcn
== SPARSE_LCN
)
1166 buf
= Add2Ptr(buf
, op
);
1168 lbo
= ((u64
)lcn
<< cluster_bits
);
1169 len
= ((u64
)clen
<< cluster_bits
);
1175 struct buffer_head
*ntfs_bread_run(struct ntfs_sb_info
*sbi
,
1176 const struct runs_tree
*run
, u64 vbo
)
1178 struct super_block
*sb
= sbi
->sb
;
1179 u8 cluster_bits
= sbi
->cluster_bits
;
1183 if (!run_lookup_entry(run
, vbo
>> cluster_bits
, &lcn
, NULL
, NULL
))
1184 return ERR_PTR(-ENOENT
);
1186 lbo
= ((u64
)lcn
<< cluster_bits
) + (vbo
& sbi
->cluster_mask
);
1188 return ntfs_bread(sb
, lbo
>> sb
->s_blocksize_bits
);
1191 int ntfs_read_run_nb(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
,
1192 u64 vbo
, void *buf
, u32 bytes
, struct ntfs_buffers
*nb
)
1195 struct super_block
*sb
= sbi
->sb
;
1196 u32 blocksize
= sb
->s_blocksize
;
1197 u8 cluster_bits
= sbi
->cluster_bits
;
1198 u32 off
= vbo
& sbi
->cluster_mask
;
1200 CLST vcn_next
, vcn
= vbo
>> cluster_bits
;
1204 struct buffer_head
*bh
;
1207 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1208 if (vbo
> MFT_REC_VOL
* sbi
->record_size
) {
1213 /* Use absolute boot's 'MFTCluster' to read record. */
1214 lbo
= vbo
+ sbi
->mft
.lbo
;
1215 len
= sbi
->record_size
;
1216 } else if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
1220 if (lcn
== SPARSE_LCN
) {
1225 lbo
= ((u64
)lcn
<< cluster_bits
) + off
;
1226 len
= ((u64
)clen
<< cluster_bits
) - off
;
1229 off
= lbo
& (blocksize
- 1);
1236 u32 len32
= len
>= bytes
? bytes
: len
;
1237 sector_t block
= lbo
>> sb
->s_blocksize_bits
;
1240 u32 op
= blocksize
- off
;
1245 bh
= ntfs_bread(sb
, block
);
1252 memcpy(buf
, bh
->b_data
+ off
, op
);
1253 buf
= Add2Ptr(buf
, op
);
1258 } else if (nbh
>= ARRAY_SIZE(nb
->bh
)) {
1275 vcn_next
= vcn
+ clen
;
1276 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1282 if (lcn
== SPARSE_LCN
) {
1287 lbo
= ((u64
)lcn
<< cluster_bits
);
1288 len
= ((u64
)clen
<< cluster_bits
);
1296 put_bh(nb
->bh
[--nbh
]);
1307 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1309 int ntfs_read_bh(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
, u64 vbo
,
1310 struct NTFS_RECORD_HEADER
*rhdr
, u32 bytes
,
1311 struct ntfs_buffers
*nb
)
1313 int err
= ntfs_read_run_nb(sbi
, run
, vbo
, rhdr
, bytes
, nb
);
1317 return ntfs_fix_post_read(rhdr
, nb
->bytes
, true);
1320 int ntfs_get_bh(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
, u64 vbo
,
1321 u32 bytes
, struct ntfs_buffers
*nb
)
1324 struct super_block
*sb
= sbi
->sb
;
1325 u32 blocksize
= sb
->s_blocksize
;
1326 u8 cluster_bits
= sbi
->cluster_bits
;
1327 CLST vcn_next
, vcn
= vbo
>> cluster_bits
;
1336 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
1341 off
= vbo
& sbi
->cluster_mask
;
1342 lbo
= ((u64
)lcn
<< cluster_bits
) + off
;
1343 len
= ((u64
)clen
<< cluster_bits
) - off
;
1345 nb
->off
= off
= lbo
& (blocksize
- 1);
1348 u32 len32
= min_t(u64
, len
, bytes
);
1349 sector_t block
= lbo
>> sb
->s_blocksize_bits
;
1353 struct buffer_head
*bh
;
1355 if (nbh
>= ARRAY_SIZE(nb
->bh
)) {
1360 op
= blocksize
- off
;
1364 if (op
== blocksize
) {
1365 bh
= sb_getblk(sb
, block
);
1370 if (buffer_locked(bh
))
1371 __wait_on_buffer(bh
);
1372 set_buffer_uptodate(bh
);
1374 bh
= ntfs_bread(sb
, block
);
1393 vcn_next
= vcn
+ clen
;
1394 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1400 lbo
= ((u64
)lcn
<< cluster_bits
);
1401 len
= ((u64
)clen
<< cluster_bits
);
1406 put_bh(nb
->bh
[--nbh
]);
1415 int ntfs_write_bh(struct ntfs_sb_info
*sbi
, struct NTFS_RECORD_HEADER
*rhdr
,
1416 struct ntfs_buffers
*nb
, int sync
)
1419 struct super_block
*sb
= sbi
->sb
;
1420 u32 block_size
= sb
->s_blocksize
;
1421 u32 bytes
= nb
->bytes
;
1423 u16 fo
= le16_to_cpu(rhdr
->fix_off
);
1424 u16 fn
= le16_to_cpu(rhdr
->fix_num
);
1429 if ((fo
& 1) || fo
+ fn
* sizeof(short) > SECTOR_SIZE
|| !fn
-- ||
1430 fn
* SECTOR_SIZE
> bytes
) {
1434 for (idx
= 0; bytes
&& idx
< nb
->nbufs
; idx
+= 1, off
= 0) {
1435 u32 op
= block_size
- off
;
1437 struct buffer_head
*bh
= nb
->bh
[idx
];
1438 __le16
*ptr
, *end_data
;
1443 if (buffer_locked(bh
))
1444 __wait_on_buffer(bh
);
1448 bh_data
= bh
->b_data
+ off
;
1449 end_data
= Add2Ptr(bh_data
, op
);
1450 memcpy(bh_data
, rhdr
, op
);
1455 fixup
= Add2Ptr(bh_data
, fo
);
1457 t16
= le16_to_cpu(sample
);
1458 if (t16
>= 0x7FFF) {
1459 sample
= *fixup
= cpu_to_le16(1);
1461 sample
= cpu_to_le16(t16
+ 1);
1465 *(__le16
*)Add2Ptr(rhdr
, fo
) = sample
;
1468 ptr
= Add2Ptr(bh_data
, SECTOR_SIZE
- sizeof(short));
1473 ptr
+= SECTOR_SIZE
/ sizeof(short);
1474 } while (ptr
< end_data
);
1476 set_buffer_uptodate(bh
);
1477 mark_buffer_dirty(bh
);
1481 int err2
= sync_dirty_buffer(bh
);
1488 rhdr
= Add2Ptr(rhdr
, op
);
1495 * ntfs_bio_pages - Read/write pages from/to disk.
1497 int ntfs_bio_pages(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
,
1498 struct page
**pages
, u32 nr_pages
, u64 vbo
, u32 bytes
,
1502 struct bio
*new, *bio
= NULL
;
1503 struct super_block
*sb
= sbi
->sb
;
1504 struct block_device
*bdev
= sb
->s_bdev
;
1506 u8 cluster_bits
= sbi
->cluster_bits
;
1507 CLST lcn
, clen
, vcn
, vcn_next
;
1508 u32 add
, off
, page_idx
;
1511 struct blk_plug plug
;
1516 blk_start_plug(&plug
);
1518 /* Align vbo and bytes to be 512 bytes aligned. */
1519 lbo
= (vbo
+ bytes
+ 511) & ~511ull;
1520 vbo
= vbo
& ~511ull;
1523 vcn
= vbo
>> cluster_bits
;
1524 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &run_idx
)) {
1528 off
= vbo
& sbi
->cluster_mask
;
1533 lbo
= ((u64
)lcn
<< cluster_bits
) + off
;
1534 len
= ((u64
)clen
<< cluster_bits
) - off
;
1536 new = bio_alloc(bdev
, nr_pages
- page_idx
, op
, GFP_NOFS
);
1538 bio_chain(bio
, new);
1542 bio
->bi_iter
.bi_sector
= lbo
>> 9;
1545 off
= vbo
& (PAGE_SIZE
- 1);
1546 add
= off
+ len
> PAGE_SIZE
? (PAGE_SIZE
- off
) : len
;
1548 if (bio_add_page(bio
, page
, add
, off
) < add
)
1556 if (add
+ off
== PAGE_SIZE
) {
1558 if (WARN_ON(page_idx
>= nr_pages
)) {
1562 page
= pages
[page_idx
];
1571 vcn_next
= vcn
+ clen
;
1572 if (!run_get_entry(run
, ++run_idx
, &vcn
, &lcn
, &clen
) ||
1582 err
= submit_bio_wait(bio
);
1585 blk_finish_plug(&plug
);
1591 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1593 * Fill on-disk logfile range by (-1)
1594 * this means empty logfile.
1596 int ntfs_bio_fill_1(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
)
1599 struct super_block
*sb
= sbi
->sb
;
1600 struct block_device
*bdev
= sb
->s_bdev
;
1601 u8 cluster_bits
= sbi
->cluster_bits
;
1602 struct bio
*new, *bio
= NULL
;
1608 struct blk_plug plug
;
1610 fill
= alloc_page(GFP_KERNEL
);
1614 kaddr
= kmap_atomic(fill
);
1615 memset(kaddr
, -1, PAGE_SIZE
);
1616 kunmap_atomic(kaddr
);
1617 flush_dcache_page(fill
);
1620 if (!run_lookup_entry(run
, 0, &lcn
, &clen
, &run_idx
)) {
1626 * TODO: Try blkdev_issue_write_same.
1628 blk_start_plug(&plug
);
1630 lbo
= (u64
)lcn
<< cluster_bits
;
1631 len
= (u64
)clen
<< cluster_bits
;
1633 new = bio_alloc(bdev
, BIO_MAX_VECS
, REQ_OP_WRITE
, GFP_NOFS
);
1635 bio_chain(bio
, new);
1639 bio
->bi_iter
.bi_sector
= lbo
>> 9;
1642 u32 add
= len
> PAGE_SIZE
? PAGE_SIZE
: len
;
1644 if (bio_add_page(bio
, fill
, add
, 0) < add
)
1652 } while (run_get_entry(run
, ++run_idx
, NULL
, &lcn
, &clen
));
1655 err
= submit_bio_wait(bio
);
1658 blk_finish_plug(&plug
);
1666 int ntfs_vbo_to_lbo(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
,
1667 u64 vbo
, u64
*lbo
, u64
*bytes
)
1671 u8 cluster_bits
= sbi
->cluster_bits
;
1673 if (!run_lookup_entry(run
, vbo
>> cluster_bits
, &lcn
, &len
, NULL
))
1676 off
= vbo
& sbi
->cluster_mask
;
1677 *lbo
= lcn
== SPARSE_LCN
? -1 : (((u64
)lcn
<< cluster_bits
) + off
);
1678 *bytes
= ((u64
)len
<< cluster_bits
) - off
;
1683 struct ntfs_inode
*ntfs_new_inode(struct ntfs_sb_info
*sbi
, CLST rno
,
1684 enum RECORD_FLAG flag
)
1687 struct super_block
*sb
= sbi
->sb
;
1688 struct inode
*inode
= new_inode(sb
);
1689 struct ntfs_inode
*ni
;
1692 return ERR_PTR(-ENOMEM
);
1696 err
= mi_format_new(&ni
->mi
, sbi
, rno
, flag
, false);
1701 if (insert_inode_locked(inode
) < 0) {
1708 make_bad_inode(inode
);
1716 * O:BAG:BAD:(A;OICI;FA;;;WD)
1717 * Owner S-1-5-32-544 (Administrators)
1718 * Group S-1-5-32-544 (Administrators)
1719 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1721 const u8 s_default_security
[] __aligned(8) = {
1722 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1723 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1724 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1725 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1726 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1727 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1728 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1731 static_assert(sizeof(s_default_security
) == 0x50);
1733 static inline u32
sid_length(const struct SID
*sid
)
1735 return struct_size(sid
, SubAuthority
, sid
->SubAuthorityCount
);
1741 * Thanks Mark Harmstone for idea.
1743 static bool is_acl_valid(const struct ACL
*acl
, u32 len
)
1745 const struct ACE_HEADER
*ace
;
1747 u16 ace_count
, ace_size
;
1749 if (acl
->AclRevision
!= ACL_REVISION
&&
1750 acl
->AclRevision
!= ACL_REVISION_DS
) {
1752 * This value should be ACL_REVISION, unless the ACL contains an
1753 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1754 * All ACEs in an ACL must be at the same revision level.
1762 if (le16_to_cpu(acl
->AclSize
) > len
)
1768 len
-= sizeof(struct ACL
);
1769 ace
= (struct ACE_HEADER
*)&acl
[1];
1770 ace_count
= le16_to_cpu(acl
->AceCount
);
1772 for (i
= 0; i
< ace_count
; i
++) {
1773 if (len
< sizeof(struct ACE_HEADER
))
1776 ace_size
= le16_to_cpu(ace
->AceSize
);
1781 ace
= Add2Ptr(ace
, ace_size
);
1787 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE
*sd
, u32 len
)
1789 u32 sd_owner
, sd_group
, sd_sacl
, sd_dacl
;
1791 if (len
< sizeof(struct SECURITY_DESCRIPTOR_RELATIVE
))
1794 if (sd
->Revision
!= 1)
1800 if (!(sd
->Control
& SE_SELF_RELATIVE
))
1803 sd_owner
= le32_to_cpu(sd
->Owner
);
1805 const struct SID
*owner
= Add2Ptr(sd
, sd_owner
);
1807 if (sd_owner
+ offsetof(struct SID
, SubAuthority
) > len
)
1810 if (owner
->Revision
!= 1)
1813 if (sd_owner
+ sid_length(owner
) > len
)
1817 sd_group
= le32_to_cpu(sd
->Group
);
1819 const struct SID
*group
= Add2Ptr(sd
, sd_group
);
1821 if (sd_group
+ offsetof(struct SID
, SubAuthority
) > len
)
1824 if (group
->Revision
!= 1)
1827 if (sd_group
+ sid_length(group
) > len
)
1831 sd_sacl
= le32_to_cpu(sd
->Sacl
);
1833 const struct ACL
*sacl
= Add2Ptr(sd
, sd_sacl
);
1835 if (sd_sacl
+ sizeof(struct ACL
) > len
)
1838 if (!is_acl_valid(sacl
, len
- sd_sacl
))
1842 sd_dacl
= le32_to_cpu(sd
->Dacl
);
1844 const struct ACL
*dacl
= Add2Ptr(sd
, sd_dacl
);
1846 if (sd_dacl
+ sizeof(struct ACL
) > len
)
1849 if (!is_acl_valid(dacl
, len
- sd_dacl
))
1857 * ntfs_security_init - Load and parse $Secure.
1859 int ntfs_security_init(struct ntfs_sb_info
*sbi
)
1862 struct super_block
*sb
= sbi
->sb
;
1863 struct inode
*inode
;
1864 struct ntfs_inode
*ni
;
1866 struct ATTRIB
*attr
;
1867 struct ATTR_LIST_ENTRY
*le
;
1871 struct NTFS_DE_SII
*sii_e
;
1872 struct ntfs_fnd
*fnd_sii
= NULL
;
1873 const struct INDEX_ROOT
*root_sii
;
1874 const struct INDEX_ROOT
*root_sdh
;
1875 struct ntfs_index
*indx_sdh
= &sbi
->security
.index_sdh
;
1876 struct ntfs_index
*indx_sii
= &sbi
->security
.index_sii
;
1878 ref
.low
= cpu_to_le32(MFT_REC_SECURE
);
1880 ref
.seq
= cpu_to_le16(MFT_REC_SECURE
);
1882 inode
= ntfs_iget5(sb
, &ref
, &NAME_SECURE
);
1883 if (IS_ERR(inode
)) {
1884 err
= PTR_ERR(inode
);
1885 ntfs_err(sb
, "Failed to load $Secure (%d).", err
);
1894 attr
= ni_find_attr(ni
, NULL
, &le
, ATTR_ROOT
, SDH_NAME
,
1895 ARRAY_SIZE(SDH_NAME
), NULL
, NULL
);
1897 !(root_sdh
= resident_data_ex(attr
, sizeof(struct INDEX_ROOT
))) ||
1898 root_sdh
->type
!= ATTR_ZERO
||
1899 root_sdh
->rule
!= NTFS_COLLATION_TYPE_SECURITY_HASH
||
1900 offsetof(struct INDEX_ROOT
, ihdr
) +
1901 le32_to_cpu(root_sdh
->ihdr
.used
) >
1902 le32_to_cpu(attr
->res
.data_size
)) {
1903 ntfs_err(sb
, "$Secure::$SDH is corrupted.");
1908 err
= indx_init(indx_sdh
, sbi
, attr
, INDEX_MUTEX_SDH
);
1910 ntfs_err(sb
, "Failed to initialize $Secure::$SDH (%d).", err
);
1914 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_ROOT
, SII_NAME
,
1915 ARRAY_SIZE(SII_NAME
), NULL
, NULL
);
1917 !(root_sii
= resident_data_ex(attr
, sizeof(struct INDEX_ROOT
))) ||
1918 root_sii
->type
!= ATTR_ZERO
||
1919 root_sii
->rule
!= NTFS_COLLATION_TYPE_UINT
||
1920 offsetof(struct INDEX_ROOT
, ihdr
) +
1921 le32_to_cpu(root_sii
->ihdr
.used
) >
1922 le32_to_cpu(attr
->res
.data_size
)) {
1923 ntfs_err(sb
, "$Secure::$SII is corrupted.");
1928 err
= indx_init(indx_sii
, sbi
, attr
, INDEX_MUTEX_SII
);
1930 ntfs_err(sb
, "Failed to initialize $Secure::$SII (%d).", err
);
1934 fnd_sii
= fnd_get();
1940 sds_size
= inode
->i_size
;
1942 /* Find the last valid Id. */
1943 sbi
->security
.next_id
= SECURITY_ID_FIRST
;
1944 /* Always write new security at the end of bucket. */
1945 sbi
->security
.next_off
=
1946 ALIGN(sds_size
- SecurityDescriptorsBlockSize
, 16);
1954 err
= indx_find_raw(indx_sii
, ni
, root_sii
, &ne
, &off
, fnd_sii
);
1958 sii_e
= (struct NTFS_DE_SII
*)ne
;
1959 if (le16_to_cpu(ne
->view
.data_size
) < sizeof(sii_e
->sec_hdr
))
1962 next_id
= le32_to_cpu(sii_e
->sec_id
) + 1;
1963 if (next_id
>= sbi
->security
.next_id
)
1964 sbi
->security
.next_id
= next_id
;
1967 sbi
->security
.ni
= ni
;
1977 * ntfs_get_security_by_id - Read security descriptor by id.
1979 int ntfs_get_security_by_id(struct ntfs_sb_info
*sbi
, __le32 security_id
,
1980 struct SECURITY_DESCRIPTOR_RELATIVE
**sd
,
1985 struct ntfs_inode
*ni
= sbi
->security
.ni
;
1986 struct ntfs_index
*indx
= &sbi
->security
.index_sii
;
1988 struct NTFS_DE_SII
*sii_e
;
1989 struct ntfs_fnd
*fnd_sii
;
1990 struct SECURITY_HDR d_security
;
1991 const struct INDEX_ROOT
*root_sii
;
1996 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_SECURITY
);
1998 fnd_sii
= fnd_get();
2004 root_sii
= indx_get_root(indx
, ni
, NULL
, NULL
);
2010 /* Try to find this SECURITY descriptor in SII indexes. */
2011 err
= indx_find(indx
, ni
, root_sii
, &security_id
, sizeof(security_id
),
2012 NULL
, &diff
, (struct NTFS_DE
**)&sii_e
, fnd_sii
);
2019 t32
= le32_to_cpu(sii_e
->sec_hdr
.size
);
2020 if (t32
< sizeof(struct SECURITY_HDR
)) {
2025 if (t32
> sizeof(struct SECURITY_HDR
) + 0x10000) {
2026 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2031 *size
= t32
- sizeof(struct SECURITY_HDR
);
2033 p
= kmalloc(*size
, GFP_NOFS
);
2039 err
= ntfs_read_run_nb(sbi
, &ni
->file
.run
,
2040 le64_to_cpu(sii_e
->sec_hdr
.off
), &d_security
,
2041 sizeof(d_security
), NULL
);
2045 if (memcmp(&d_security
, &sii_e
->sec_hdr
, sizeof(d_security
))) {
2050 err
= ntfs_read_run_nb(sbi
, &ni
->file
.run
,
2051 le64_to_cpu(sii_e
->sec_hdr
.off
) +
2052 sizeof(struct SECURITY_HDR
),
2069 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2071 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2072 * and it contains a mirror copy of each security descriptor. When writing
2073 * to a security descriptor at location X, another copy will be written at
2074 * location (X+256K).
2075 * When writing a security descriptor that will cross the 256K boundary,
2076 * the pointer will be advanced by 256K to skip
2077 * over the mirror portion.
2079 int ntfs_insert_security(struct ntfs_sb_info
*sbi
,
2080 const struct SECURITY_DESCRIPTOR_RELATIVE
*sd
,
2081 u32 size_sd
, __le32
*security_id
, bool *inserted
)
2084 struct ntfs_inode
*ni
= sbi
->security
.ni
;
2085 struct ntfs_index
*indx_sdh
= &sbi
->security
.index_sdh
;
2086 struct ntfs_index
*indx_sii
= &sbi
->security
.index_sii
;
2087 struct NTFS_DE_SDH
*e
;
2088 struct NTFS_DE_SDH sdh_e
;
2089 struct NTFS_DE_SII sii_e
;
2090 struct SECURITY_HDR
*d_security
;
2091 u32 new_sec_size
= size_sd
+ sizeof(struct SECURITY_HDR
);
2092 u32 aligned_sec_size
= ALIGN(new_sec_size
, 16);
2093 struct SECURITY_KEY hash_key
;
2094 struct ntfs_fnd
*fnd_sdh
= NULL
;
2095 const struct INDEX_ROOT
*root_sdh
;
2096 const struct INDEX_ROOT
*root_sii
;
2097 u64 mirr_off
, new_sds_size
;
2100 static_assert((1 << Log2OfSecurityDescriptorsBlockSize
) ==
2101 SecurityDescriptorsBlockSize
);
2103 hash_key
.hash
= security_hash(sd
, size_sd
);
2104 hash_key
.sec_id
= SECURITY_ID_INVALID
;
2108 *security_id
= SECURITY_ID_INVALID
;
2110 /* Allocate a temporal buffer. */
2111 d_security
= kzalloc(aligned_sec_size
, GFP_NOFS
);
2115 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_SECURITY
);
2117 fnd_sdh
= fnd_get();
2123 root_sdh
= indx_get_root(indx_sdh
, ni
, NULL
, NULL
);
2129 root_sii
= indx_get_root(indx_sii
, ni
, NULL
, NULL
);
2136 * Check if such security already exists.
2137 * Use "SDH" and hash -> to get the offset in "SDS".
2139 err
= indx_find(indx_sdh
, ni
, root_sdh
, &hash_key
, sizeof(hash_key
),
2140 &d_security
->key
.sec_id
, &diff
, (struct NTFS_DE
**)&e
,
2146 if (le32_to_cpu(e
->sec_hdr
.size
) == new_sec_size
) {
2147 err
= ntfs_read_run_nb(sbi
, &ni
->file
.run
,
2148 le64_to_cpu(e
->sec_hdr
.off
),
2149 d_security
, new_sec_size
, NULL
);
2153 if (le32_to_cpu(d_security
->size
) == new_sec_size
&&
2154 d_security
->key
.hash
== hash_key
.hash
&&
2155 !memcmp(d_security
+ 1, sd
, size_sd
)) {
2156 /* Such security already exists. */
2157 *security_id
= d_security
->key
.sec_id
;
2163 err
= indx_find_sort(indx_sdh
, ni
, root_sdh
,
2164 (struct NTFS_DE
**)&e
, fnd_sdh
);
2168 if (!e
|| e
->key
.hash
!= hash_key
.hash
)
2172 /* Zero unused space. */
2173 next
= sbi
->security
.next_off
& (SecurityDescriptorsBlockSize
- 1);
2174 left
= SecurityDescriptorsBlockSize
- next
;
2176 /* Zero gap until SecurityDescriptorsBlockSize. */
2177 if (left
< new_sec_size
) {
2178 /* Zero "left" bytes from sbi->security.next_off. */
2179 sbi
->security
.next_off
+= SecurityDescriptorsBlockSize
+ left
;
2182 /* Zero tail of previous security. */
2183 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2187 * 0x40438 == ni->vfs_inode.i_size
2188 * 0x00440 == sbi->security.next_off
2189 * need to zero [0x438-0x440)
2190 * if (next > used) {
2191 * u32 tozero = next - used;
2192 * zero "tozero" bytes from sbi->security.next_off - tozero
2195 /* Format new security descriptor. */
2196 d_security
->key
.hash
= hash_key
.hash
;
2197 d_security
->key
.sec_id
= cpu_to_le32(sbi
->security
.next_id
);
2198 d_security
->off
= cpu_to_le64(sbi
->security
.next_off
);
2199 d_security
->size
= cpu_to_le32(new_sec_size
);
2200 memcpy(d_security
+ 1, sd
, size_sd
);
2202 /* Write main SDS bucket. */
2203 err
= ntfs_sb_write_run(sbi
, &ni
->file
.run
, sbi
->security
.next_off
,
2204 d_security
, aligned_sec_size
, 0);
2209 mirr_off
= sbi
->security
.next_off
+ SecurityDescriptorsBlockSize
;
2210 new_sds_size
= mirr_off
+ aligned_sec_size
;
2212 if (new_sds_size
> ni
->vfs_inode
.i_size
) {
2213 err
= attr_set_size(ni
, ATTR_DATA
, SDS_NAME
,
2214 ARRAY_SIZE(SDS_NAME
), &ni
->file
.run
,
2215 new_sds_size
, &new_sds_size
, false, NULL
);
2220 /* Write copy SDS bucket. */
2221 err
= ntfs_sb_write_run(sbi
, &ni
->file
.run
, mirr_off
, d_security
,
2222 aligned_sec_size
, 0);
2226 /* Fill SII entry. */
2227 sii_e
.de
.view
.data_off
=
2228 cpu_to_le16(offsetof(struct NTFS_DE_SII
, sec_hdr
));
2229 sii_e
.de
.view
.data_size
= cpu_to_le16(sizeof(struct SECURITY_HDR
));
2230 sii_e
.de
.view
.res
= 0;
2231 sii_e
.de
.size
= cpu_to_le16(sizeof(struct NTFS_DE_SII
));
2232 sii_e
.de
.key_size
= cpu_to_le16(sizeof(d_security
->key
.sec_id
));
2235 sii_e
.sec_id
= d_security
->key
.sec_id
;
2236 memcpy(&sii_e
.sec_hdr
, d_security
, sizeof(struct SECURITY_HDR
));
2238 err
= indx_insert_entry(indx_sii
, ni
, &sii_e
.de
, NULL
, NULL
, 0);
2242 /* Fill SDH entry. */
2243 sdh_e
.de
.view
.data_off
=
2244 cpu_to_le16(offsetof(struct NTFS_DE_SDH
, sec_hdr
));
2245 sdh_e
.de
.view
.data_size
= cpu_to_le16(sizeof(struct SECURITY_HDR
));
2246 sdh_e
.de
.view
.res
= 0;
2247 sdh_e
.de
.size
= cpu_to_le16(SIZEOF_SDH_DIRENTRY
);
2248 sdh_e
.de
.key_size
= cpu_to_le16(sizeof(sdh_e
.key
));
2251 sdh_e
.key
.hash
= d_security
->key
.hash
;
2252 sdh_e
.key
.sec_id
= d_security
->key
.sec_id
;
2253 memcpy(&sdh_e
.sec_hdr
, d_security
, sizeof(struct SECURITY_HDR
));
2254 sdh_e
.magic
[0] = cpu_to_le16('I');
2255 sdh_e
.magic
[1] = cpu_to_le16('I');
2258 err
= indx_insert_entry(indx_sdh
, ni
, &sdh_e
.de
, (void *)(size_t)1,
2263 *security_id
= d_security
->key
.sec_id
;
2267 /* Update Id and offset for next descriptor. */
2268 sbi
->security
.next_id
+= 1;
2269 sbi
->security
.next_off
+= aligned_sec_size
;
2273 mark_inode_dirty(&ni
->vfs_inode
);
2281 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2283 int ntfs_reparse_init(struct ntfs_sb_info
*sbi
)
2286 struct ntfs_inode
*ni
= sbi
->reparse
.ni
;
2287 struct ntfs_index
*indx
= &sbi
->reparse
.index_r
;
2288 struct ATTRIB
*attr
;
2289 struct ATTR_LIST_ENTRY
*le
;
2290 const struct INDEX_ROOT
*root_r
;
2296 attr
= ni_find_attr(ni
, NULL
, &le
, ATTR_ROOT
, SR_NAME
,
2297 ARRAY_SIZE(SR_NAME
), NULL
, NULL
);
2303 root_r
= resident_data(attr
);
2304 if (root_r
->type
!= ATTR_ZERO
||
2305 root_r
->rule
!= NTFS_COLLATION_TYPE_UINTS
) {
2310 err
= indx_init(indx
, sbi
, attr
, INDEX_MUTEX_SR
);
2319 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2321 int ntfs_objid_init(struct ntfs_sb_info
*sbi
)
2324 struct ntfs_inode
*ni
= sbi
->objid
.ni
;
2325 struct ntfs_index
*indx
= &sbi
->objid
.index_o
;
2326 struct ATTRIB
*attr
;
2327 struct ATTR_LIST_ENTRY
*le
;
2328 const struct INDEX_ROOT
*root
;
2334 attr
= ni_find_attr(ni
, NULL
, &le
, ATTR_ROOT
, SO_NAME
,
2335 ARRAY_SIZE(SO_NAME
), NULL
, NULL
);
2341 root
= resident_data(attr
);
2342 if (root
->type
!= ATTR_ZERO
||
2343 root
->rule
!= NTFS_COLLATION_TYPE_UINTS
) {
2348 err
= indx_init(indx
, sbi
, attr
, INDEX_MUTEX_SO
);
2356 int ntfs_objid_remove(struct ntfs_sb_info
*sbi
, struct GUID
*guid
)
2359 struct ntfs_inode
*ni
= sbi
->objid
.ni
;
2360 struct ntfs_index
*indx
= &sbi
->objid
.index_o
;
2365 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_OBJID
);
2367 err
= indx_delete_entry(indx
, ni
, guid
, sizeof(*guid
), NULL
);
2369 mark_inode_dirty(&ni
->vfs_inode
);
2375 int ntfs_insert_reparse(struct ntfs_sb_info
*sbi
, __le32 rtag
,
2376 const struct MFT_REF
*ref
)
2379 struct ntfs_inode
*ni
= sbi
->reparse
.ni
;
2380 struct ntfs_index
*indx
= &sbi
->reparse
.index_r
;
2381 struct NTFS_DE_R re
;
2386 memset(&re
, 0, sizeof(re
));
2388 re
.de
.view
.data_off
= cpu_to_le16(offsetof(struct NTFS_DE_R
, zero
));
2389 re
.de
.size
= cpu_to_le16(sizeof(struct NTFS_DE_R
));
2390 re
.de
.key_size
= cpu_to_le16(sizeof(re
.key
));
2392 re
.key
.ReparseTag
= rtag
;
2393 memcpy(&re
.key
.ref
, ref
, sizeof(*ref
));
2395 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_REPARSE
);
2397 err
= indx_insert_entry(indx
, ni
, &re
.de
, NULL
, NULL
, 0);
2399 mark_inode_dirty(&ni
->vfs_inode
);
2405 int ntfs_remove_reparse(struct ntfs_sb_info
*sbi
, __le32 rtag
,
2406 const struct MFT_REF
*ref
)
2409 struct ntfs_inode
*ni
= sbi
->reparse
.ni
;
2410 struct ntfs_index
*indx
= &sbi
->reparse
.index_r
;
2411 struct ntfs_fnd
*fnd
= NULL
;
2412 struct REPARSE_KEY rkey
;
2413 struct NTFS_DE_R
*re
;
2414 struct INDEX_ROOT
*root_r
;
2419 rkey
.ReparseTag
= rtag
;
2422 mutex_lock_nested(&ni
->ni_lock
, NTFS_INODE_MUTEX_REPARSE
);
2425 err
= indx_delete_entry(indx
, ni
, &rkey
, sizeof(rkey
), NULL
);
2435 root_r
= indx_get_root(indx
, ni
, NULL
, NULL
);
2441 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2442 err
= indx_find(indx
, ni
, root_r
, &rkey
, sizeof(rkey
), (void *)1, &diff
,
2443 (struct NTFS_DE
**)&re
, fnd
);
2447 if (memcmp(&re
->key
.ref
, ref
, sizeof(*ref
))) {
2448 /* Impossible. Looks like volume corrupt? */
2452 memcpy(&rkey
, &re
->key
, sizeof(rkey
));
2457 err
= indx_delete_entry(indx
, ni
, &rkey
, sizeof(rkey
), NULL
);
2465 mark_inode_dirty(&ni
->vfs_inode
);
2471 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info
*sbi
, CLST lcn
,
2474 ntfs_unmap_meta(sbi
->sb
, lcn
, len
);
2475 ntfs_discard(sbi
, lcn
, len
);
2478 void mark_as_free_ex(struct ntfs_sb_info
*sbi
, CLST lcn
, CLST len
, bool trim
)
2480 CLST end
, i
, zone_len
, zlen
;
2481 struct wnd_bitmap
*wnd
= &sbi
->used
.bitmap
;
2484 down_write_nested(&wnd
->rw_lock
, BITMAP_MUTEX_CLUSTERS
);
2485 if (!wnd_is_used(wnd
, lcn
, len
)) {
2486 /* mark volume as dirty out of wnd->rw_lock */
2491 for (i
= lcn
; i
< end
; i
++) {
2492 if (wnd_is_used(wnd
, i
, 1)) {
2503 ntfs_unmap_and_discard(sbi
, lcn
, len
);
2505 wnd_set_free(wnd
, lcn
, len
);
2514 ntfs_unmap_and_discard(sbi
, lcn
, len
);
2515 wnd_set_free(wnd
, lcn
, len
);
2517 /* append to MFT zone, if possible. */
2518 zone_len
= wnd_zone_len(wnd
);
2519 zlen
= min(zone_len
+ len
, sbi
->zone_max
);
2521 if (zlen
== zone_len
) {
2522 /* MFT zone already has maximum size. */
2523 } else if (!zone_len
) {
2524 /* Create MFT zone only if 'zlen' is large enough. */
2525 if (zlen
== sbi
->zone_max
)
2526 wnd_zone_set(wnd
, lcn
, zlen
);
2528 CLST zone_lcn
= wnd_zone_bit(wnd
);
2530 if (lcn
+ len
== zone_lcn
) {
2531 /* Append into head MFT zone. */
2532 wnd_zone_set(wnd
, lcn
, zlen
);
2533 } else if (zone_lcn
+ zone_len
== lcn
) {
2534 /* Append into tail MFT zone. */
2535 wnd_zone_set(wnd
, zone_lcn
, zlen
);
2540 up_write(&wnd
->rw_lock
);
2542 ntfs_set_state(sbi
, NTFS_DIRTY_ERROR
);
2546 * run_deallocate - Deallocate clusters.
2548 int run_deallocate(struct ntfs_sb_info
*sbi
, const struct runs_tree
*run
,
2554 while (run_get_entry(run
, idx
++, NULL
, &lcn
, &len
)) {
2555 if (lcn
== SPARSE_LCN
)
2558 mark_as_free_ex(sbi
, lcn
, len
, trim
);
2564 static inline bool name_has_forbidden_chars(const struct le_str
*fname
)
2568 /* check for forbidden chars */
2569 for (i
= 0; i
< fname
->len
; ++i
) {
2570 ch
= le16_to_cpu(fname
->name
[i
]);
2577 /* disallowed by Windows */
2595 /* file names cannot end with space or . */
2596 if (fname
->len
> 0) {
2597 ch
= le16_to_cpu(fname
->name
[fname
->len
- 1]);
2598 if (ch
== ' ' || ch
== '.')
2605 static inline bool is_reserved_name(const struct ntfs_sb_info
*sbi
,
2606 const struct le_str
*fname
)
2609 const __le16
*name
= fname
->name
;
2610 int len
= fname
->len
;
2611 const u16
*upcase
= sbi
->upcase
;
2613 /* check for 3 chars reserved names (device names) */
2614 /* name by itself or with any extension is forbidden */
2615 if (len
== 3 || (len
> 3 && le16_to_cpu(name
[3]) == '.'))
2616 if (!ntfs_cmp_names(name
, 3, CON_NAME
, 3, upcase
, false) ||
2617 !ntfs_cmp_names(name
, 3, NUL_NAME
, 3, upcase
, false) ||
2618 !ntfs_cmp_names(name
, 3, AUX_NAME
, 3, upcase
, false) ||
2619 !ntfs_cmp_names(name
, 3, PRN_NAME
, 3, upcase
, false))
2622 /* check for 4 chars reserved names (port name followed by 1..9) */
2623 /* name by itself or with any extension is forbidden */
2624 if (len
== 4 || (len
> 4 && le16_to_cpu(name
[4]) == '.')) {
2625 port_digit
= le16_to_cpu(name
[3]);
2626 if (port_digit
>= '1' && port_digit
<= '9')
2627 if (!ntfs_cmp_names(name
, 3, COM_NAME
, 3, upcase
,
2629 !ntfs_cmp_names(name
, 3, LPT_NAME
, 3, upcase
,
2638 * valid_windows_name - Check if a file name is valid in Windows.
2640 bool valid_windows_name(struct ntfs_sb_info
*sbi
, const struct le_str
*fname
)
2642 return !name_has_forbidden_chars(fname
) &&
2643 !is_reserved_name(sbi
, fname
);
2647 * ntfs_set_label - updates current ntfs label.
2649 int ntfs_set_label(struct ntfs_sb_info
*sbi
, u8
*label
, int len
)
2652 struct ATTRIB
*attr
;
2654 struct ntfs_inode
*ni
= sbi
->volume
.ni
;
2655 /* Allocate PATH_MAX bytes. */
2656 struct cpu_str
*uni
= __getname();
2661 err
= ntfs_nls_to_utf16(sbi
, label
, len
, uni
, (PATH_MAX
- 2) / 2,
2662 UTF16_LITTLE_ENDIAN
);
2666 uni_bytes
= uni
->len
* sizeof(u16
);
2667 if (uni_bytes
> NTFS_LABEL_MAX_LENGTH
* sizeof(u16
)) {
2668 ntfs_warn(sbi
->sb
, "new label is too long");
2675 /* Ignore any errors. */
2676 ni_remove_attr(ni
, ATTR_LABEL
, NULL
, 0, false, NULL
);
2678 err
= ni_insert_resident(ni
, uni_bytes
, ATTR_LABEL
, NULL
, 0, &attr
,
2683 /* write new label in on-disk struct. */
2684 memcpy(resident_data(attr
), uni
->name
, uni_bytes
);
2686 /* update cached value of current label. */
2687 if (len
>= ARRAY_SIZE(sbi
->volume
.label
))
2688 len
= ARRAY_SIZE(sbi
->volume
.label
) - 1;
2689 memcpy(sbi
->volume
.label
, label
, len
);
2690 sbi
->volume
.label
[len
] = 0;
2691 mark_inode_dirty_sync(&ni
->vfs_inode
);
2697 err
= _ni_write_inode(&ni
->vfs_inode
, 0);