2 * lcnalloc.c - Cluster (de)allocation code. Part of the Linux-NTFS project.
4 * Copyright (c) 2004 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pagemap.h>
37 * ntfs_cluster_free_from_rl_nolock - free clusters from runlist
38 * @vol: mounted ntfs volume on which to free the clusters
39 * @rl: runlist describing the clusters to free
41 * Free all the clusters described by the runlist @rl on the volume @vol. In
42 * the case of an error being returned, at least some of the clusters were not
45 * Return 0 on success and -errno on error.
47 * Locking: - The volume lcn bitmap must be locked for writing on entry and is
48 * left locked on return.
50 int ntfs_cluster_free_from_rl_nolock(ntfs_volume
*vol
,
51 const runlist_element
*rl
)
53 struct inode
*lcnbmp_vi
= vol
->lcnbmp_ino
;
56 ntfs_debug("Entering.");
57 for (; rl
->length
; rl
++) {
62 err
= ntfs_bitmap_clear_run(lcnbmp_vi
, rl
->lcn
, rl
->length
);
63 if (unlikely(err
&& (!ret
|| ret
== ENOMEM
) && ret
!= err
))
71 * ntfs_cluster_alloc - allocate clusters on an ntfs volume
72 * @vol: mounted ntfs volume on which to allocate the clusters
73 * @start_vcn: vcn to use for the first allocated cluster
74 * @count: number of clusters to allocate
75 * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
76 * @zone: zone from which to allocate the clusters
78 * Allocate @count clusters preferably starting at cluster @start_lcn or at the
79 * current allocator position if @start_lcn is -1, on the mounted ntfs volume
80 * @vol. @zone is either DATA_ZONE for allocation of normal clusters or
81 * MFT_ZONE for allocation of clusters for the master file table, i.e. the
82 * $MFT/$DATA attribute.
84 * @start_vcn specifies the vcn of the first allocated cluster. This makes
85 * merging the resulting runlist with the old runlist easier.
87 * You need to check the return value with IS_ERR(). If this is false, the
88 * function was successful and the return value is a runlist describing the
89 * allocated cluster(s). If IS_ERR() is true, the function failed and
90 * PTR_ERR() gives you the error code.
92 * Notes on the allocation algorithm
93 * =================================
95 * There are two data zones. First is the area between the end of the mft zone
96 * and the end of the volume, and second is the area between the start of the
97 * volume and the start of the mft zone. On unmodified/standard NTFS 1.x
98 * volumes, the second data zone does not exist due to the mft zone being
99 * expanded to cover the start of the volume in order to reserve space for the
100 * mft bitmap attribute.
102 * This is not the prettiest function but the complexity stems from the need of
103 * implementing the mft vs data zoned approach and from the fact that we have
104 * access to the lcn bitmap in portions of up to 8192 bytes at a time, so we
105 * need to cope with crossing over boundaries of two buffers. Further, the
106 * fact that the allocator allows for caller supplied hints as to the location
107 * of where allocation should begin and the fact that the allocator keeps track
108 * of where in the data zones the next natural allocation should occur,
109 * contribute to the complexity of the function. But it should all be
110 * worthwhile, because this allocator should: 1) be a full implementation of
111 * the MFT zone approach used by Windows NT, 2) cause reduction in
112 * fragmentation, and 3) be speedy in allocations (the code is not optimized
113 * for speed, but the algorithm is, so further speed improvements are probably
116 * FIXME: We should be monitoring cluster allocation and increment the MFT zone
117 * size dynamically but this is something for the future. We will just cause
118 * heavier fragmentation by not doing it and I am not even sure Windows would
119 * grow the MFT zone dynamically, so it might even be correct not to do this.
120 * The overhead in doing dynamic MFT zone expansion would be very large and
121 * unlikely worth the effort. (AIA)
123 * TODO: I have added in double the required zone position pointer wrap around
124 * logic which can be optimized to having only one of the two logic sets.
125 * However, having the double logic will work fine, but if we have only one of
126 * the sets and we get it wrong somewhere, then we get into trouble, so
127 * removing the duplicate logic requires _very_ careful consideration of _all_
128 * possible code paths. So at least for now, I am leaving the double logic -
129 * better safe than sorry... (AIA)
131 * Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked
133 * - This function takes the volume lcn bitmap lock for writing and
134 * modifies the bitmap contents.
136 runlist_element
*ntfs_cluster_alloc(ntfs_volume
*vol
, const VCN start_vcn
,
137 const s64 count
, const LCN start_lcn
,
138 const NTFS_CLUSTER_ALLOCATION_ZONES zone
)
140 LCN zone_start
, zone_end
, bmp_pos
, bmp_initial_pos
, last_read_pos
, lcn
;
141 LCN prev_lcn
= 0, prev_run_len
= 0, mft_zone_size
;
143 struct inode
*lcnbmp_vi
;
144 runlist_element
*rl
= NULL
;
145 struct address_space
*mapping
;
146 struct page
*page
= NULL
;
148 int err
= 0, rlpos
, rlsize
, buf_size
;
149 u8 pass
, done_zones
, search_zone
, need_writeback
= 0, bit
;
151 ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn "
152 "0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn
,
153 (unsigned long long)count
,
154 (unsigned long long)start_lcn
,
155 zone
== MFT_ZONE
? "MFT" : "DATA");
157 lcnbmp_vi
= vol
->lcnbmp_ino
;
159 BUG_ON(start_vcn
< 0);
161 BUG_ON(start_lcn
< -1);
162 BUG_ON(zone
< FIRST_ZONE
);
163 BUG_ON(zone
> LAST_ZONE
);
165 /* Return empty runlist if @count == 0 */
166 // FIXME: Do we want to just return NULL instead? (AIA)
168 rl
= ntfs_malloc_nofs(PAGE_SIZE
);
170 return ERR_PTR(-ENOMEM
);
171 rl
[0].vcn
= start_vcn
;
172 rl
[0].lcn
= LCN_RL_NOT_MAPPED
;
176 /* Take the lcnbmp lock for writing. */
177 down_write(&vol
->lcnbmp_lock
);
179 * If no specific @start_lcn was requested, use the current data zone
180 * position, otherwise use the requested @start_lcn but make sure it
181 * lies outside the mft zone. Also set done_zones to 0 (no zones done)
182 * and pass depending on whether we are starting inside a zone (1) or
183 * at the beginning of a zone (2). If requesting from the MFT_ZONE,
184 * we either start at the current position within the mft zone or at
185 * the specified position. If the latter is out of bounds then we start
186 * at the beginning of the MFT_ZONE.
191 * zone_start and zone_end are the current search range. search_zone
192 * is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of
193 * volume) and 4 for data zone 2 (start of volume till start of mft
196 zone_start
= start_lcn
;
197 if (zone_start
< 0) {
198 if (zone
== DATA_ZONE
)
199 zone_start
= vol
->data1_zone_pos
;
201 zone_start
= vol
->mft_zone_pos
;
204 * Zone starts at beginning of volume which means a
205 * single pass is sufficient.
209 } else if (zone
== DATA_ZONE
&& zone_start
>= vol
->mft_zone_start
&&
210 zone_start
< vol
->mft_zone_end
) {
211 zone_start
= vol
->mft_zone_end
;
213 * Starting at beginning of data1_zone which means a single
214 * pass in this zone is sufficient.
217 } else if (zone
== MFT_ZONE
&& (zone_start
< vol
->mft_zone_start
||
218 zone_start
>= vol
->mft_zone_end
)) {
219 zone_start
= vol
->mft_lcn
;
220 if (!vol
->mft_zone_end
)
223 * Starting at beginning of volume which means a single pass
228 if (zone
== MFT_ZONE
) {
229 zone_end
= vol
->mft_zone_end
;
231 } else /* if (zone == DATA_ZONE) */ {
232 /* Skip searching the mft zone. */
234 if (zone_start
>= vol
->mft_zone_end
) {
235 zone_end
= vol
->nr_clusters
;
238 zone_end
= vol
->mft_zone_start
;
243 * bmp_pos is the current bit position inside the bitmap. We use
244 * bmp_initial_pos to determine whether or not to do a zone switch.
246 bmp_pos
= bmp_initial_pos
= zone_start
;
248 /* Loop until all clusters are allocated, i.e. clusters == 0. */
251 mapping
= lcnbmp_vi
->i_mapping
;
253 ntfs_debug("Start of outer while loop: done_zones 0x%x, "
254 "search_zone %i, pass %i, zone_start 0x%llx, "
255 "zone_end 0x%llx, bmp_initial_pos 0x%llx, "
256 "bmp_pos 0x%llx, rlpos %i, rlsize %i.",
257 done_zones
, search_zone
, pass
,
258 (unsigned long long)zone_start
,
259 (unsigned long long)zone_end
,
260 (unsigned long long)bmp_initial_pos
,
261 (unsigned long long)bmp_pos
, rlpos
, rlsize
);
262 /* Loop until we run out of free clusters. */
263 last_read_pos
= bmp_pos
>> 3;
264 ntfs_debug("last_read_pos 0x%llx.",
265 (unsigned long long)last_read_pos
);
266 if (last_read_pos
> lcnbmp_vi
->i_size
) {
267 ntfs_debug("End of attribute reached. "
268 "Skipping to zone_pass_done.");
272 if (need_writeback
) {
273 ntfs_debug("Marking page dirty.");
274 flush_dcache_page(page
);
275 set_page_dirty(page
);
278 ntfs_unmap_page(page
);
280 page
= ntfs_map_page(mapping
, last_read_pos
>>
284 ntfs_error(vol
->sb
, "Failed to map page.");
287 buf_size
= last_read_pos
& ~PAGE_CACHE_MASK
;
288 buf
= page_address(page
) + buf_size
;
289 buf_size
= PAGE_CACHE_SIZE
- buf_size
;
290 if (unlikely(last_read_pos
+ buf_size
> lcnbmp_vi
->i_size
))
291 buf_size
= lcnbmp_vi
->i_size
- last_read_pos
;
295 ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, "
296 "bmp_pos 0x%llx, need_writeback %i.", buf_size
,
297 (unsigned long long)lcn
,
298 (unsigned long long)bmp_pos
, need_writeback
);
299 while (lcn
< buf_size
&& lcn
+ bmp_pos
< zone_end
) {
300 byte
= buf
+ (lcn
>> 3);
301 ntfs_debug("In inner while loop: buf_size %i, "
302 "lcn 0x%llx, bmp_pos 0x%llx, "
303 "need_writeback %i, byte ofs 0x%x, "
304 "*byte 0x%x.", buf_size
,
305 (unsigned long long)lcn
,
306 (unsigned long long)bmp_pos
,
308 (unsigned int)(lcn
>> 3),
309 (unsigned int)*byte
);
310 /* Skip full bytes. */
312 lcn
= (lcn
+ 8) & ~7;
313 ntfs_debug("Continuing while loop 1.");
316 bit
= 1 << (lcn
& 7);
317 ntfs_debug("bit %i.", bit
);
318 /* If the bit is already set, go onto the next one. */
321 ntfs_debug("Continuing while loop 2.");
325 * Allocate more memory if needed, including space for
326 * the terminator element.
327 * ntfs_malloc_nofs() operates on whole pages only.
329 if ((rlpos
+ 2) * sizeof(*rl
) > rlsize
) {
330 runlist_element
*rl2
;
332 ntfs_debug("Reallocating memory.");
334 ntfs_debug("First free bit is at LCN "
338 rl2
= ntfs_malloc_nofs(rlsize
+ (int)PAGE_SIZE
);
339 if (unlikely(!rl2
)) {
341 ntfs_error(vol
->sb
, "Failed to "
345 memcpy(rl2
, rl
, rlsize
);
349 ntfs_debug("Reallocated memory, rlsize 0x%x.",
352 /* Allocate the bitmap bit. */
354 /* We need to write this bitmap page to disk. */
356 ntfs_debug("*byte 0x%x, need_writeback is set.",
357 (unsigned int)*byte
);
359 * Coalesce with previous run if adjacent LCNs.
360 * Otherwise, append a new run.
362 ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), "
363 "prev_lcn 0x%llx, lcn 0x%llx, "
364 "bmp_pos 0x%llx, prev_run_len 0x%llx, "
366 (unsigned long long)(lcn
+ bmp_pos
),
367 1ULL, (unsigned long long)prev_lcn
,
368 (unsigned long long)lcn
,
369 (unsigned long long)bmp_pos
,
370 (unsigned long long)prev_run_len
,
372 if (prev_lcn
== lcn
+ bmp_pos
- prev_run_len
&& rlpos
) {
373 ntfs_debug("Coalescing to run (lcn 0x%llx, "
378 rl
[rlpos
- 1].length
);
379 rl
[rlpos
- 1].length
= ++prev_run_len
;
380 ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), "
381 "prev_run_len 0x%llx.",
385 rl
[rlpos
- 1].length
,
390 ntfs_debug("Adding new run, (previous "
396 rl
[rlpos
- 1].length
);
397 rl
[rlpos
].vcn
= rl
[rlpos
- 1].vcn
+
400 ntfs_debug("Adding new run, is first "
402 rl
[rlpos
].vcn
= start_vcn
;
404 rl
[rlpos
].lcn
= prev_lcn
= lcn
+ bmp_pos
;
405 rl
[rlpos
].length
= prev_run_len
= 1;
412 * Update the current zone position. Positions
413 * of already scanned zones have been updated
414 * during the respective zone switches.
416 tc
= lcn
+ bmp_pos
+ 1;
417 ntfs_debug("Done. Updating current zone "
418 "position, tc 0x%llx, "
420 (unsigned long long)tc
,
422 switch (search_zone
) {
424 ntfs_debug("Before checks, "
429 if (tc
>= vol
->mft_zone_end
) {
432 if (!vol
->mft_zone_end
)
433 vol
->mft_zone_pos
= 0;
434 } else if ((bmp_initial_pos
>=
436 tc
> vol
->mft_zone_pos
)
437 && tc
>= vol
->mft_lcn
)
438 vol
->mft_zone_pos
= tc
;
439 ntfs_debug("After checks, "
446 ntfs_debug("Before checks, "
447 "vol->data1_zone_pos "
450 vol
->data1_zone_pos
);
451 if (tc
>= vol
->nr_clusters
)
452 vol
->data1_zone_pos
=
454 else if ((bmp_initial_pos
>=
455 vol
->data1_zone_pos
||
456 tc
> vol
->data1_zone_pos
)
457 && tc
>= vol
->mft_zone_end
)
458 vol
->data1_zone_pos
= tc
;
459 ntfs_debug("After checks, "
460 "vol->data1_zone_pos "
463 vol
->data1_zone_pos
);
466 ntfs_debug("Before checks, "
467 "vol->data2_zone_pos "
470 vol
->data2_zone_pos
);
471 if (tc
>= vol
->mft_zone_start
)
472 vol
->data2_zone_pos
= 0;
473 else if (bmp_initial_pos
>=
474 vol
->data2_zone_pos
||
475 tc
> vol
->data2_zone_pos
)
476 vol
->data2_zone_pos
= tc
;
477 ntfs_debug("After checks, "
478 "vol->data2_zone_pos "
481 vol
->data2_zone_pos
);
486 ntfs_debug("Finished. Going to out.");
492 ntfs_debug("After inner while loop: buf_size 0x%x, lcn "
493 "0x%llx, bmp_pos 0x%llx, need_writeback %i.",
494 buf_size
, (unsigned long long)lcn
,
495 (unsigned long long)bmp_pos
, need_writeback
);
496 if (bmp_pos
< zone_end
) {
497 ntfs_debug("Continuing outer while loop, "
498 "bmp_pos 0x%llx, zone_end 0x%llx.",
499 (unsigned long long)bmp_pos
,
500 (unsigned long long)zone_end
);
503 zone_pass_done
: /* Finished with the current zone pass. */
504 ntfs_debug("At zone_pass_done, pass %i.", pass
);
507 * Now do pass 2, scanning the first part of the zone
508 * we omitted in pass 1.
511 zone_end
= zone_start
;
512 switch (search_zone
) {
513 case 1: /* mft_zone */
514 zone_start
= vol
->mft_zone_start
;
516 case 2: /* data1_zone */
517 zone_start
= vol
->mft_zone_end
;
519 case 4: /* data2_zone */
526 if (zone_end
< zone_start
)
527 zone_end
= zone_start
;
528 bmp_pos
= zone_start
;
529 ntfs_debug("Continuing outer while loop, pass 2, "
530 "zone_start 0x%llx, zone_end 0x%llx, "
532 (unsigned long long)zone_start
,
533 (unsigned long long)zone_end
,
534 (unsigned long long)bmp_pos
);
538 ntfs_debug("At done_zones_check, search_zone %i, done_zones "
539 "before 0x%x, done_zones after 0x%x.",
540 search_zone
, done_zones
,
541 done_zones
| search_zone
);
542 done_zones
|= search_zone
;
543 if (done_zones
< 7) {
544 ntfs_debug("Switching zone.");
545 /* Now switch to the next zone we haven't done yet. */
547 switch (search_zone
) {
549 ntfs_debug("Switching from mft zone to data1 "
551 /* Update mft zone position. */
555 ntfs_debug("Before checks, "
560 tc
= rl
[rlpos
- 1].lcn
+
561 rl
[rlpos
- 1].length
;
562 if (tc
>= vol
->mft_zone_end
) {
565 if (!vol
->mft_zone_end
)
566 vol
->mft_zone_pos
= 0;
567 } else if ((bmp_initial_pos
>=
569 tc
> vol
->mft_zone_pos
)
570 && tc
>= vol
->mft_lcn
)
571 vol
->mft_zone_pos
= tc
;
572 ntfs_debug("After checks, "
578 /* Switch from mft zone to data1 zone. */
579 switch_to_data1_zone
: search_zone
= 2;
580 zone_start
= bmp_initial_pos
=
582 zone_end
= vol
->nr_clusters
;
583 if (zone_start
== vol
->mft_zone_end
)
585 if (zone_start
>= zone_end
) {
586 vol
->data1_zone_pos
= zone_start
=
592 ntfs_debug("Switching from data1 zone to "
594 /* Update data1 zone position. */
598 ntfs_debug("Before checks, "
599 "vol->data1_zone_pos "
602 vol
->data1_zone_pos
);
603 tc
= rl
[rlpos
- 1].lcn
+
604 rl
[rlpos
- 1].length
;
605 if (tc
>= vol
->nr_clusters
)
606 vol
->data1_zone_pos
=
608 else if ((bmp_initial_pos
>=
609 vol
->data1_zone_pos
||
610 tc
> vol
->data1_zone_pos
)
611 && tc
>= vol
->mft_zone_end
)
612 vol
->data1_zone_pos
= tc
;
613 ntfs_debug("After checks, "
614 "vol->data1_zone_pos "
617 vol
->data1_zone_pos
);
619 /* Switch from data1 zone to data2 zone. */
621 zone_start
= bmp_initial_pos
=
623 zone_end
= vol
->mft_zone_start
;
626 if (zone_start
>= zone_end
) {
627 vol
->data2_zone_pos
= zone_start
=
633 ntfs_debug("Switching from data2 zone to "
635 /* Update data2 zone position. */
639 ntfs_debug("Before checks, "
640 "vol->data2_zone_pos "
643 vol
->data2_zone_pos
);
644 tc
= rl
[rlpos
- 1].lcn
+
645 rl
[rlpos
- 1].length
;
646 if (tc
>= vol
->mft_zone_start
)
647 vol
->data2_zone_pos
= 0;
648 else if (bmp_initial_pos
>=
649 vol
->data2_zone_pos
||
650 tc
> vol
->data2_zone_pos
)
651 vol
->data2_zone_pos
= tc
;
652 ntfs_debug("After checks, "
653 "vol->data2_zone_pos "
656 vol
->data2_zone_pos
);
658 /* Switch from data2 zone to data1 zone. */
659 goto switch_to_data1_zone
;
663 ntfs_debug("After zone switch, search_zone %i, "
664 "pass %i, bmp_initial_pos 0x%llx, "
665 "zone_start 0x%llx, zone_end 0x%llx.",
667 (unsigned long long)bmp_initial_pos
,
668 (unsigned long long)zone_start
,
669 (unsigned long long)zone_end
);
670 bmp_pos
= zone_start
;
671 if (zone_start
== zone_end
) {
672 ntfs_debug("Empty zone, going to "
673 "done_zones_check.");
674 /* Empty zone. Don't bother searching it. */
675 goto done_zones_check
;
677 ntfs_debug("Continuing outer while loop.");
679 } /* done_zones == 7 */
680 ntfs_debug("All zones are finished.");
682 * All zones are finished! If DATA_ZONE, shrink mft zone. If
683 * MFT_ZONE, we have really run out of space.
685 mft_zone_size
= vol
->mft_zone_end
- vol
->mft_zone_start
;
686 ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end "
687 "0x%llx, mft_zone_size 0x%llx.",
688 (unsigned long long)vol
->mft_zone_start
,
689 (unsigned long long)vol
->mft_zone_end
,
690 (unsigned long long)mft_zone_size
);
691 if (zone
== MFT_ZONE
|| mft_zone_size
<= 0) {
692 ntfs_debug("No free clusters left, going to out.");
693 /* Really no more space left on device. */
696 } /* zone == DATA_ZONE && mft_zone_size > 0 */
697 ntfs_debug("Shrinking mft zone.");
698 zone_end
= vol
->mft_zone_end
;
700 if (mft_zone_size
> 0)
701 vol
->mft_zone_end
= vol
->mft_zone_start
+ mft_zone_size
;
702 else /* mft zone and data2 zone no longer exist. */
703 vol
->data2_zone_pos
= vol
->mft_zone_start
=
704 vol
->mft_zone_end
= 0;
705 if (vol
->mft_zone_pos
>= vol
->mft_zone_end
) {
706 vol
->mft_zone_pos
= vol
->mft_lcn
;
707 if (!vol
->mft_zone_end
)
708 vol
->mft_zone_pos
= 0;
710 bmp_pos
= zone_start
= bmp_initial_pos
=
711 vol
->data1_zone_pos
= vol
->mft_zone_end
;
715 ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, "
716 "vol->mft_zone_start 0x%llx, "
717 "vol->mft_zone_end 0x%llx, "
718 "vol->mft_zone_pos 0x%llx, search_zone 2, "
719 "pass 2, dones_zones 0x%x, zone_start 0x%llx, "
720 "zone_end 0x%llx, vol->data1_zone_pos 0x%llx, "
721 "continuing outer while loop.",
722 (unsigned long long)mft_zone_size
,
723 (unsigned long long)vol
->mft_zone_start
,
724 (unsigned long long)vol
->mft_zone_end
,
725 (unsigned long long)vol
->mft_zone_pos
,
726 done_zones
, (unsigned long long)zone_start
,
727 (unsigned long long)zone_end
,
728 (unsigned long long)vol
->data1_zone_pos
);
730 ntfs_debug("After outer while loop.");
732 ntfs_debug("At out.");
733 /* Add runlist terminator element. */
735 rl
[rlpos
].vcn
= rl
[rlpos
- 1].vcn
+ rl
[rlpos
- 1].length
;
736 rl
[rlpos
].lcn
= LCN_RL_NOT_MAPPED
;
737 rl
[rlpos
].length
= 0;
739 if (likely(page
&& !IS_ERR(page
))) {
740 if (need_writeback
) {
741 ntfs_debug("Marking page dirty.");
742 flush_dcache_page(page
);
743 set_page_dirty(page
);
746 ntfs_unmap_page(page
);
749 up_write(&vol
->lcnbmp_lock
);
753 ntfs_error(vol
->sb
, "Failed to allocate clusters, aborting "
759 ntfs_debug("Not enough space to complete allocation, "
760 "err ENOSPC, first free lcn 0x%llx, "
761 "could allocate up to 0x%llx "
763 (unsigned long long)rl
[0].lcn
,
764 (unsigned long long)count
- clusters
);
765 /* Deallocate all allocated clusters. */
766 ntfs_debug("Attempting rollback...");
767 err2
= ntfs_cluster_free_from_rl_nolock(vol
, rl
);
769 ntfs_error(vol
->sb
, "Failed to rollback (error %i). "
770 "Leaving inconsistent metadata! "
771 "Unmount and run chkdsk.", err2
);
774 /* Free the runlist. */
776 } else if (err
== ENOSPC
)
777 ntfs_debug("No space left at all, err = ENOSPC, "
778 "first free lcn = 0x%llx.",
779 (unsigned long long)vol
->data1_zone_pos
);
780 up_write(&vol
->lcnbmp_lock
);
785 * __ntfs_cluster_free - free clusters on an ntfs volume
786 * @vi: vfs inode whose runlist describes the clusters to free
787 * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters
788 * @count: number of clusters to free or -1 for all clusters
789 * @is_rollback: if TRUE this is a rollback operation
791 * Free @count clusters starting at the cluster @start_vcn in the runlist
792 * described by the vfs inode @vi.
794 * If @count is -1, all clusters from @start_vcn to the end of the runlist are
795 * deallocated. Thus, to completely free all clusters in a runlist, use
796 * @start_vcn = 0 and @count = -1.
798 * @is_rollback should always be FALSE, it is for internal use to rollback
799 * errors. You probably want to use ntfs_cluster_free() instead.
801 * Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
802 * has to deal with it later.
804 * Return the number of deallocated clusters (not counting sparse ones) on
805 * success and -errno on error.
807 * Locking: - The runlist described by @vi must be unlocked on entry and is
808 * unlocked on return.
809 * - This function takes the runlist lock of @vi for reading and
810 * sometimes for writing and sometimes modifies the runlist.
811 * - The volume lcn bitmap must be unlocked on entry and is unlocked
813 * - This function takes the volume lcn bitmap lock for writing and
814 * modifies the bitmap contents.
816 s64
__ntfs_cluster_free(struct inode
*vi
, const VCN start_vcn
, s64 count
,
817 const BOOL is_rollback
)
819 s64 delta
, to_free
, total_freed
, real_freed
;
822 struct inode
*lcnbmp_vi
;
827 ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count "
828 "0x%llx.%s", vi
->i_ino
, (unsigned long long)start_vcn
,
829 (unsigned long long)count
,
830 is_rollback
? " (rollback)" : "");
833 lcnbmp_vi
= vol
->lcnbmp_ino
;
835 BUG_ON(start_vcn
< 0);
838 * Lock the lcn bitmap for writing but only if not rolling back. We
839 * must hold the lock all the way including through rollback otherwise
840 * rollback is not possible because once we have cleared a bit and
841 * dropped the lock, anyone could have set the bit again, thus
842 * allocating the cluster for another use.
844 if (likely(!is_rollback
))
845 down_write(&vol
->lcnbmp_lock
);
847 total_freed
= real_freed
= 0;
849 /* This returns with ni->runlist locked for reading on success. */
850 rl
= ntfs_find_vcn(ni
, start_vcn
, FALSE
);
853 ntfs_error(vol
->sb
, "Failed to find first runlist "
854 "element (error %li), aborting.",
859 if (unlikely(rl
->lcn
< LCN_HOLE
)) {
861 ntfs_error(vol
->sb
, "First runlist element has "
862 "invalid lcn, aborting.");
866 /* Find the starting cluster inside the run that needs freeing. */
867 delta
= start_vcn
- rl
->vcn
;
869 /* The number of clusters in this run that need freeing. */
870 to_free
= rl
->length
- delta
;
871 if (count
>= 0 && to_free
> count
)
874 if (likely(rl
->lcn
>= 0)) {
875 /* Do the actual freeing of the clusters in this run. */
876 err
= ntfs_bitmap_set_bits_in_run(lcnbmp_vi
, rl
->lcn
+ delta
,
877 to_free
, likely(!is_rollback
) ? 0 : 1);
880 ntfs_error(vol
->sb
, "Failed to clear first run "
881 "(error %i), aborting.", err
);
884 /* We have freed @to_free real clusters. */
885 real_freed
= to_free
;
887 /* Go to the next run and adjust the number of clusters left to free. */
892 /* Keep track of the total "freed" clusters, including sparse ones. */
893 total_freed
= to_free
;
895 * Loop over the remaining runs, using @count as a capping value, and
898 for (; rl
->length
&& count
!= 0; ++rl
) {
899 if (unlikely(rl
->lcn
< LCN_HOLE
)) {
903 * Attempt to map runlist, dropping runlist lock for
907 up_read(&ni
->runlist
.lock
);
908 err
= ntfs_map_runlist(ni
, vcn
);
911 ntfs_error(vol
->sb
, "Failed to map "
912 "runlist fragment.");
913 if (err
== -EINVAL
|| err
== -ENOENT
)
918 * This returns with ni->runlist locked for reading on
921 rl
= ntfs_find_vcn(ni
, vcn
, FALSE
);
925 ntfs_error(vol
->sb
, "Failed to find "
926 "subsequent runlist "
930 if (unlikely(rl
->lcn
< LCN_HOLE
)) {
932 ntfs_error(vol
->sb
, "Runlist element "
941 /* The number of clusters in this run that need freeing. */
942 to_free
= rl
->length
;
943 if (count
>= 0 && to_free
> count
)
946 if (likely(rl
->lcn
>= 0)) {
947 /* Do the actual freeing of the clusters in the run. */
948 err
= ntfs_bitmap_set_bits_in_run(lcnbmp_vi
, rl
->lcn
,
949 to_free
, likely(!is_rollback
) ? 0 : 1);
952 ntfs_error(vol
->sb
, "Failed to clear "
956 /* We have freed @to_free real clusters. */
957 real_freed
+= to_free
;
959 /* Adjust the number of clusters left to free. */
963 /* Update the total done clusters. */
964 total_freed
+= to_free
;
966 up_read(&ni
->runlist
.lock
);
967 if (likely(!is_rollback
))
968 up_write(&vol
->lcnbmp_lock
);
972 /* We are done. Return the number of actually freed clusters. */
976 up_read(&ni
->runlist
.lock
);
980 /* If no real clusters were freed, no need to rollback. */
982 up_write(&vol
->lcnbmp_lock
);
986 * Attempt to rollback and if that succeeds just return the error code.
987 * If rollback fails, set the volume errors flag, emit an error
988 * message, and return the error code.
990 delta
= __ntfs_cluster_free(vi
, start_vcn
, total_freed
, TRUE
);
992 ntfs_error(vol
->sb
, "Failed to rollback (error %i). Leaving "
993 "inconsistent metadata! Unmount and run "
994 "chkdsk.", (int)delta
);
997 up_write(&vol
->lcnbmp_lock
);
998 ntfs_error(vol
->sb
, "Aborting (error %i).", err
);
1002 #endif /* NTFS_RW */