1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file is part of UBIFS.
5 * Copyright (C) 2006-2008 Nokia Corporation.
7 * Authors: Adrian Hunter
8 * Artem Bityutskiy (Битюцкий Артём)
12 * This file implements functions that manage the running of the commit process.
13 * Each affected module has its own functions to accomplish their part in the
14 * commit and those functions are called here.
16 * The commit is the process whereby all updates to the index and LEB properties
17 * are written out together and the journal becomes empty. This keeps the
18 * file system consistent - at all times the state can be recreated by reading
19 * the index and LEB properties and then replaying the journal.
21 * The commit is split into two parts named "commit start" and "commit end".
22 * During commit start, the commit process has exclusive access to the journal
23 * by holding the commit semaphore down for writing. As few I/O operations as
24 * possible are performed during commit start, instead the nodes that are to be
25 * written are merely identified. During commit end, the commit semaphore is no
26 * longer held and the journal is again in operation, allowing users to continue
27 * to use the file system while the bulk of the commit I/O is performed. The
28 * purpose of this two-step approach is to prevent the commit from causing any
29 * latency blips. Note that in any case, the commit does not prevent lookups
30 * (as permitted by the TNC mutex), or access to VFS data structures e.g. page
34 #include <linux/freezer.h>
35 #include <linux/kthread.h>
36 #include <linux/slab.h>
40 * nothing_to_commit - check if there is nothing to commit.
41 * @c: UBIFS file-system description object
43 * This is a helper function which checks if there is anything to commit. It is
44 * used as an optimization to avoid starting the commit if it is not really
45 * necessary. Indeed, the commit operation always assumes flash I/O (e.g.,
46 * writing the commit start node to the log), and it is better to avoid doing
47 * this unnecessarily. E.g., 'ubifs_sync_fs()' runs the commit, but if there is
48 * nothing to commit, it is more optimal to avoid any flash I/O.
50 * This function has to be called with @c->commit_sem locked for writing -
51 * this function does not take LPT/TNC locks because the @c->commit_sem
52 * guarantees that we have exclusive access to the TNC and LPT data structures.
54 * This function returns %1 if there is nothing to commit and %0 otherwise.
56 static int nothing_to_commit(struct ubifs_info
*c
)
59 * During mounting or remounting from R/O mode to R/W mode we may
60 * commit for various recovery-related reasons.
62 if (c
->mounting
|| c
->remounting_rw
)
66 * If the root TNC node is dirty, we definitely have something to
69 if (c
->zroot
.znode
&& ubifs_zn_dirty(c
->zroot
.znode
))
73 * Increasing @c->dirty_pn_cnt/@c->dirty_nn_cnt and marking
74 * nnodes/pnodes as dirty in run_gc() could race with following
75 * checking, which leads inconsistent states between @c->nroot
76 * and @c->dirty_pn_cnt/@c->dirty_nn_cnt, holding @c->lp_mutex
79 mutex_lock(&c
->lp_mutex
);
81 * Even though the TNC is clean, the LPT tree may have dirty nodes. For
82 * example, this may happen if the budgeting subsystem invoked GC to
83 * make some free space, and the GC found an LEB with only dirty and
84 * free space. In this case GC would just change the lprops of this
85 * LEB (by turning all space into free space) and unmap it.
87 if (c
->nroot
&& test_bit(DIRTY_CNODE
, &c
->nroot
->flags
)) {
88 mutex_unlock(&c
->lp_mutex
);
92 ubifs_assert(c
, atomic_long_read(&c
->dirty_zn_cnt
) == 0);
93 ubifs_assert(c
, c
->dirty_pn_cnt
== 0);
94 ubifs_assert(c
, c
->dirty_nn_cnt
== 0);
95 mutex_unlock(&c
->lp_mutex
);
101 * do_commit - commit the journal.
102 * @c: UBIFS file-system description object
104 * This function implements UBIFS commit. It has to be called with commit lock
105 * locked. Returns zero in case of success and a negative error code in case of
108 static int do_commit(struct ubifs_info
*c
)
110 int err
, new_ltail_lnum
, old_ltail_lnum
, i
;
111 struct ubifs_zbranch zroot
;
112 struct ubifs_lp_stats lst
;
115 ubifs_assert(c
, !c
->ro_media
&& !c
->ro_mount
);
122 if (nothing_to_commit(c
)) {
123 up_write(&c
->commit_sem
);
128 /* Sync all write buffers (necessary for recovery) */
129 for (i
= 0; i
< c
->jhead_cnt
; i
++) {
130 err
= ubifs_wbuf_sync(&c
->jheads
[i
].wbuf
);
136 err
= ubifs_gc_start_commit(c
);
139 err
= dbg_check_lprops(c
);
142 err
= ubifs_log_start_commit(c
, &new_ltail_lnum
);
145 err
= ubifs_tnc_start_commit(c
, &zroot
);
148 err
= ubifs_lpt_start_commit(c
);
151 err
= ubifs_orphan_start_commit(c
);
155 ubifs_get_lp_stats(c
, &lst
);
157 up_write(&c
->commit_sem
);
159 err
= ubifs_tnc_end_commit(c
);
162 err
= ubifs_lpt_end_commit(c
);
165 err
= ubifs_orphan_end_commit(c
);
168 err
= dbg_check_old_index(c
, &zroot
);
172 c
->mst_node
->cmt_no
= cpu_to_le64(c
->cmt_no
);
173 c
->mst_node
->log_lnum
= cpu_to_le32(new_ltail_lnum
);
174 c
->mst_node
->root_lnum
= cpu_to_le32(zroot
.lnum
);
175 c
->mst_node
->root_offs
= cpu_to_le32(zroot
.offs
);
176 c
->mst_node
->root_len
= cpu_to_le32(zroot
.len
);
177 c
->mst_node
->ihead_lnum
= cpu_to_le32(c
->ihead_lnum
);
178 c
->mst_node
->ihead_offs
= cpu_to_le32(c
->ihead_offs
);
179 c
->mst_node
->index_size
= cpu_to_le64(c
->bi
.old_idx_sz
);
180 c
->mst_node
->lpt_lnum
= cpu_to_le32(c
->lpt_lnum
);
181 c
->mst_node
->lpt_offs
= cpu_to_le32(c
->lpt_offs
);
182 c
->mst_node
->nhead_lnum
= cpu_to_le32(c
->nhead_lnum
);
183 c
->mst_node
->nhead_offs
= cpu_to_le32(c
->nhead_offs
);
184 c
->mst_node
->ltab_lnum
= cpu_to_le32(c
->ltab_lnum
);
185 c
->mst_node
->ltab_offs
= cpu_to_le32(c
->ltab_offs
);
186 c
->mst_node
->lsave_lnum
= cpu_to_le32(c
->lsave_lnum
);
187 c
->mst_node
->lsave_offs
= cpu_to_le32(c
->lsave_offs
);
188 c
->mst_node
->lscan_lnum
= cpu_to_le32(c
->lscan_lnum
);
189 c
->mst_node
->empty_lebs
= cpu_to_le32(lst
.empty_lebs
);
190 c
->mst_node
->idx_lebs
= cpu_to_le32(lst
.idx_lebs
);
191 c
->mst_node
->total_free
= cpu_to_le64(lst
.total_free
);
192 c
->mst_node
->total_dirty
= cpu_to_le64(lst
.total_dirty
);
193 c
->mst_node
->total_used
= cpu_to_le64(lst
.total_used
);
194 c
->mst_node
->total_dead
= cpu_to_le64(lst
.total_dead
);
195 c
->mst_node
->total_dark
= cpu_to_le64(lst
.total_dark
);
197 c
->mst_node
->flags
|= cpu_to_le32(UBIFS_MST_NO_ORPHS
);
199 c
->mst_node
->flags
&= ~cpu_to_le32(UBIFS_MST_NO_ORPHS
);
201 old_ltail_lnum
= c
->ltail_lnum
;
202 err
= ubifs_log_end_commit(c
, new_ltail_lnum
);
206 err
= ubifs_log_post_commit(c
, old_ltail_lnum
);
209 err
= ubifs_gc_end_commit(c
);
212 err
= ubifs_lpt_post_commit(c
);
217 spin_lock(&c
->cs_lock
);
218 c
->cmt_state
= COMMIT_RESTING
;
220 dbg_cmt("commit end");
221 spin_unlock(&c
->cs_lock
);
225 up_write(&c
->commit_sem
);
227 ubifs_err(c
, "commit failed, error %d", err
);
228 spin_lock(&c
->cs_lock
);
229 c
->cmt_state
= COMMIT_BROKEN
;
231 spin_unlock(&c
->cs_lock
);
232 ubifs_ro_mode(c
, err
);
237 * run_bg_commit - run background commit if it is needed.
238 * @c: UBIFS file-system description object
240 * This function runs background commit if it is needed. Returns zero in case
241 * of success and a negative error code in case of failure.
243 static int run_bg_commit(struct ubifs_info
*c
)
245 spin_lock(&c
->cs_lock
);
247 * Run background commit only if background commit was requested or if
248 * commit is required.
250 if (c
->cmt_state
!= COMMIT_BACKGROUND
&&
251 c
->cmt_state
!= COMMIT_REQUIRED
)
253 spin_unlock(&c
->cs_lock
);
255 down_write(&c
->commit_sem
);
256 spin_lock(&c
->cs_lock
);
257 if (c
->cmt_state
== COMMIT_REQUIRED
)
258 c
->cmt_state
= COMMIT_RUNNING_REQUIRED
;
259 else if (c
->cmt_state
== COMMIT_BACKGROUND
)
260 c
->cmt_state
= COMMIT_RUNNING_BACKGROUND
;
263 spin_unlock(&c
->cs_lock
);
268 up_write(&c
->commit_sem
);
270 spin_unlock(&c
->cs_lock
);
275 * ubifs_bg_thread - UBIFS background thread function.
276 * @info: points to the file-system description object
278 * This function implements various file-system background activities:
279 * o when a write-buffer timer expires it synchronizes the appropriate
281 * o when the journal is about to be full, it starts in-advance commit.
283 * Note, other stuff like background garbage collection may be added here in
286 int ubifs_bg_thread(void *info
)
289 struct ubifs_info
*c
= info
;
291 ubifs_msg(c
, "background thread \"%s\" started, PID %d",
292 c
->bgt_name
, current
->pid
);
296 if (kthread_should_stop())
302 set_current_state(TASK_INTERRUPTIBLE
);
303 /* Check if there is something to do */
306 * Nothing prevents us from going sleep now and
307 * be never woken up and block the task which
308 * could wait in 'kthread_stop()' forever.
310 if (kthread_should_stop())
315 __set_current_state(TASK_RUNNING
);
318 err
= ubifs_bg_wbufs_sync(c
);
320 ubifs_ro_mode(c
, err
);
326 ubifs_msg(c
, "background thread \"%s\" stops", c
->bgt_name
);
331 * ubifs_commit_required - set commit state to "required".
332 * @c: UBIFS file-system description object
334 * This function is called if a commit is required but cannot be done from the
335 * calling function, so it is just flagged instead.
337 void ubifs_commit_required(struct ubifs_info
*c
)
339 spin_lock(&c
->cs_lock
);
340 switch (c
->cmt_state
) {
342 case COMMIT_BACKGROUND
:
343 dbg_cmt("old: %s, new: %s", dbg_cstate(c
->cmt_state
),
344 dbg_cstate(COMMIT_REQUIRED
));
345 c
->cmt_state
= COMMIT_REQUIRED
;
347 case COMMIT_RUNNING_BACKGROUND
:
348 dbg_cmt("old: %s, new: %s", dbg_cstate(c
->cmt_state
),
349 dbg_cstate(COMMIT_RUNNING_REQUIRED
));
350 c
->cmt_state
= COMMIT_RUNNING_REQUIRED
;
352 case COMMIT_REQUIRED
:
353 case COMMIT_RUNNING_REQUIRED
:
357 spin_unlock(&c
->cs_lock
);
361 * ubifs_request_bg_commit - notify the background thread to do a commit.
362 * @c: UBIFS file-system description object
364 * This function is called if the journal is full enough to make a commit
365 * worthwhile, so background thread is kicked to start it.
367 void ubifs_request_bg_commit(struct ubifs_info
*c
)
369 spin_lock(&c
->cs_lock
);
370 if (c
->cmt_state
== COMMIT_RESTING
) {
371 dbg_cmt("old: %s, new: %s", dbg_cstate(c
->cmt_state
),
372 dbg_cstate(COMMIT_BACKGROUND
));
373 c
->cmt_state
= COMMIT_BACKGROUND
;
374 spin_unlock(&c
->cs_lock
);
375 ubifs_wake_up_bgt(c
);
377 spin_unlock(&c
->cs_lock
);
381 * wait_for_commit - wait for commit.
382 * @c: UBIFS file-system description object
384 * This function sleeps until the commit operation is no longer running.
386 static int wait_for_commit(struct ubifs_info
*c
)
388 dbg_cmt("pid %d goes sleep", current
->pid
);
391 * The following sleeps if the condition is false, and will be woken
392 * when the commit ends. It is possible, although very unlikely, that we
393 * will wake up and see the subsequent commit running, rather than the
394 * one we were waiting for, and go back to sleep. However, we will be
395 * woken again, so there is no danger of sleeping forever.
397 wait_event(c
->cmt_wq
, c
->cmt_state
!= COMMIT_RUNNING_BACKGROUND
&&
398 c
->cmt_state
!= COMMIT_RUNNING_REQUIRED
);
399 dbg_cmt("commit finished, pid %d woke up", current
->pid
);
404 * ubifs_run_commit - run or wait for commit.
405 * @c: UBIFS file-system description object
407 * This function runs commit and returns zero in case of success and a negative
408 * error code in case of failure.
410 int ubifs_run_commit(struct ubifs_info
*c
)
414 spin_lock(&c
->cs_lock
);
415 if (c
->cmt_state
== COMMIT_BROKEN
) {
420 if (c
->cmt_state
== COMMIT_RUNNING_BACKGROUND
)
422 * We set the commit state to 'running required' to indicate
423 * that we want it to complete as quickly as possible.
425 c
->cmt_state
= COMMIT_RUNNING_REQUIRED
;
427 if (c
->cmt_state
== COMMIT_RUNNING_REQUIRED
) {
428 spin_unlock(&c
->cs_lock
);
429 return wait_for_commit(c
);
431 spin_unlock(&c
->cs_lock
);
433 /* Ok, the commit is indeed needed */
435 down_write(&c
->commit_sem
);
436 spin_lock(&c
->cs_lock
);
438 * Since we unlocked 'c->cs_lock', the state may have changed, so
441 if (c
->cmt_state
== COMMIT_BROKEN
) {
446 if (c
->cmt_state
== COMMIT_RUNNING_BACKGROUND
)
447 c
->cmt_state
= COMMIT_RUNNING_REQUIRED
;
449 if (c
->cmt_state
== COMMIT_RUNNING_REQUIRED
) {
450 up_write(&c
->commit_sem
);
451 spin_unlock(&c
->cs_lock
);
452 return wait_for_commit(c
);
454 c
->cmt_state
= COMMIT_RUNNING_REQUIRED
;
455 spin_unlock(&c
->cs_lock
);
461 up_write(&c
->commit_sem
);
463 spin_unlock(&c
->cs_lock
);
468 * ubifs_gc_should_commit - determine if it is time for GC to run commit.
469 * @c: UBIFS file-system description object
471 * This function is called by garbage collection to determine if commit should
472 * be run. If commit state is @COMMIT_BACKGROUND, which means that the journal
473 * is full enough to start commit, this function returns true. It is not
474 * absolutely necessary to commit yet, but it feels like this should be better
475 * then to keep doing GC. This function returns %1 if GC has to initiate commit
478 int ubifs_gc_should_commit(struct ubifs_info
*c
)
482 spin_lock(&c
->cs_lock
);
483 if (c
->cmt_state
== COMMIT_BACKGROUND
) {
484 dbg_cmt("commit required now");
485 c
->cmt_state
= COMMIT_REQUIRED
;
487 dbg_cmt("commit not requested");
488 if (c
->cmt_state
== COMMIT_REQUIRED
)
490 spin_unlock(&c
->cs_lock
);
495 * Everything below is related to debugging.
499 * struct idx_node - hold index nodes during index tree traversal.
501 * @iip: index in parent (slot number of this indexing node in the parent
503 * @upper_key: all keys in this indexing node have to be less or equivalent to
505 * @idx: index node (8-byte aligned because all node structures must be 8-byte
509 struct list_head list
;
511 union ubifs_key upper_key
;
512 struct ubifs_idx_node idx
__aligned(8);
516 * dbg_old_index_check_init - get information for the next old index check.
517 * @c: UBIFS file-system description object
518 * @zroot: root of the index
520 * This function records information about the index that will be needed for the
521 * next old index check i.e. 'dbg_check_old_index()'.
523 * This function returns %0 on success and a negative error code on failure.
525 int dbg_old_index_check_init(struct ubifs_info
*c
, struct ubifs_zbranch
*zroot
)
527 struct ubifs_idx_node
*idx
;
528 int lnum
, offs
, len
, err
= 0;
529 struct ubifs_debug_info
*d
= c
->dbg
;
531 d
->old_zroot
= *zroot
;
532 lnum
= d
->old_zroot
.lnum
;
533 offs
= d
->old_zroot
.offs
;
534 len
= d
->old_zroot
.len
;
536 idx
= kmalloc(c
->max_idx_node_sz
, GFP_NOFS
);
540 err
= ubifs_read_node(c
, idx
, UBIFS_IDX_NODE
, len
, lnum
, offs
);
544 d
->old_zroot_level
= le16_to_cpu(idx
->level
);
545 d
->old_zroot_sqnum
= le64_to_cpu(idx
->ch
.sqnum
);
552 * dbg_check_old_index - check the old copy of the index.
553 * @c: UBIFS file-system description object
554 * @zroot: root of the new index
556 * In order to be able to recover from an unclean unmount, a complete copy of
557 * the index must exist on flash. This is the "old" index. The commit process
558 * must write the "new" index to flash without overwriting or destroying any
559 * part of the old index. This function is run at commit end in order to check
560 * that the old index does indeed exist completely intact.
562 * This function returns %0 on success and a negative error code on failure.
564 int dbg_check_old_index(struct ubifs_info
*c
, struct ubifs_zbranch
*zroot
)
566 int lnum
, offs
, len
, err
= 0, last_level
, child_cnt
;
568 struct ubifs_debug_info
*d
= c
->dbg
;
569 union ubifs_key lower_key
, upper_key
, l_key
, u_key
;
570 unsigned long long last_sqnum
;
571 struct ubifs_idx_node
*idx
;
572 struct list_head list
;
576 if (!dbg_is_chk_index(c
))
579 INIT_LIST_HEAD(&list
);
581 sz
= sizeof(struct idx_node
) + ubifs_idx_node_sz(c
, c
->fanout
) -
584 /* Start at the old zroot */
585 lnum
= d
->old_zroot
.lnum
;
586 offs
= d
->old_zroot
.offs
;
587 len
= d
->old_zroot
.len
;
591 * Traverse the index tree preorder depth-first i.e. do a node and then
592 * its subtrees from left to right.
595 struct ubifs_branch
*br
;
597 /* Get the next index node */
598 i
= kmalloc(sz
, GFP_NOFS
);
604 /* Keep the index nodes on our path in a linked list */
605 list_add_tail(&i
->list
, &list
);
606 /* Read the index node */
608 err
= ubifs_read_node(c
, idx
, UBIFS_IDX_NODE
, len
, lnum
, offs
);
611 /* Validate index node */
612 child_cnt
= le16_to_cpu(idx
->child_cnt
);
613 if (child_cnt
< 1 || child_cnt
> c
->fanout
) {
619 /* Check root level and sqnum */
620 if (le16_to_cpu(idx
->level
) != d
->old_zroot_level
) {
624 if (le64_to_cpu(idx
->ch
.sqnum
) != d
->old_zroot_sqnum
) {
628 /* Set last values as though root had a parent */
629 last_level
= le16_to_cpu(idx
->level
) + 1;
630 last_sqnum
= le64_to_cpu(idx
->ch
.sqnum
) + 1;
631 key_read(c
, ubifs_idx_key(c
, idx
), &lower_key
);
632 highest_ino_key(c
, &upper_key
, INUM_WATERMARK
);
634 key_copy(c
, &upper_key
, &i
->upper_key
);
635 if (le16_to_cpu(idx
->level
) != last_level
- 1) {
640 * The index is always written bottom up hence a child's sqnum
641 * is always less than the parents.
643 if (le64_to_cpu(idx
->ch
.sqnum
) >= last_sqnum
) {
647 /* Check key range */
648 key_read(c
, ubifs_idx_key(c
, idx
), &l_key
);
649 br
= ubifs_idx_branch(c
, idx
, child_cnt
- 1);
650 key_read(c
, &br
->key
, &u_key
);
651 if (keys_cmp(c
, &lower_key
, &l_key
) > 0) {
655 if (keys_cmp(c
, &upper_key
, &u_key
) < 0) {
659 if (keys_cmp(c
, &upper_key
, &u_key
) == 0)
660 if (!is_hash_key(c
, &u_key
)) {
664 /* Go to next index node */
665 if (le16_to_cpu(idx
->level
) == 0) {
666 /* At the bottom, so go up until can go right */
668 /* Drop the bottom of the list */
671 /* No more list means we are done */
672 if (list_empty(&list
))
674 /* Look at the new bottom */
675 i
= list_entry(list
.prev
, struct idx_node
,
678 /* Can we go right */
679 if (iip
+ 1 < le16_to_cpu(idx
->child_cnt
)) {
683 /* Nope, so go up again */
690 * We have the parent in 'idx' and now we set up for reading the
691 * child pointed to by slot 'iip'.
693 last_level
= le16_to_cpu(idx
->level
);
694 last_sqnum
= le64_to_cpu(idx
->ch
.sqnum
);
695 br
= ubifs_idx_branch(c
, idx
, iip
);
696 lnum
= le32_to_cpu(br
->lnum
);
697 offs
= le32_to_cpu(br
->offs
);
698 len
= le32_to_cpu(br
->len
);
699 key_read(c
, &br
->key
, &lower_key
);
700 if (iip
+ 1 < le16_to_cpu(idx
->child_cnt
)) {
701 br
= ubifs_idx_branch(c
, idx
, iip
+ 1);
702 key_read(c
, &br
->key
, &upper_key
);
704 key_copy(c
, &i
->upper_key
, &upper_key
);
707 err
= dbg_old_index_check_init(c
, zroot
);
714 ubifs_err(c
, "dumping index node (iip=%d)", i
->iip
);
715 ubifs_dump_node(c
, idx
, ubifs_idx_node_sz(c
, c
->fanout
));
718 if (!list_empty(&list
)) {
719 i
= list_entry(list
.prev
, struct idx_node
, list
);
720 ubifs_err(c
, "dumping parent index node");
721 ubifs_dump_node(c
, &i
->idx
, ubifs_idx_node_sz(c
, c
->fanout
));
724 while (!list_empty(&list
)) {
725 i
= list_entry(list
.next
, struct idx_node
, list
);
729 ubifs_err(c
, "failed, error %d", err
);