drm/panel-edp: Add STA 116QHD024002
[drm/drm-misc.git] / fs / xfs / libxfs / xfs_btree_mem.c
blobdf3d613675a15aac1413e7a0a1e37b7a5b2eff94
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_btree.h"
15 #include "xfs_error.h"
16 #include "xfs_buf_mem.h"
17 #include "xfs_btree_mem.h"
18 #include "xfs_ag.h"
19 #include "xfs_buf_item.h"
20 #include "xfs_trace.h"
22 /* Set the root of an in-memory btree. */
23 void
24 xfbtree_set_root(
25 struct xfs_btree_cur *cur,
26 const union xfs_btree_ptr *ptr,
27 int inc)
29 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
31 cur->bc_mem.xfbtree->root = *ptr;
32 cur->bc_mem.xfbtree->nlevels += inc;
35 /* Initialize a pointer from the in-memory btree header. */
36 void
37 xfbtree_init_ptr_from_cur(
38 struct xfs_btree_cur *cur,
39 union xfs_btree_ptr *ptr)
41 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
43 *ptr = cur->bc_mem.xfbtree->root;
46 /* Duplicate an in-memory btree cursor. */
47 struct xfs_btree_cur *
48 xfbtree_dup_cursor(
49 struct xfs_btree_cur *cur)
51 struct xfs_btree_cur *ncur;
53 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
55 ncur = xfs_btree_alloc_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ops,
56 cur->bc_maxlevels, cur->bc_cache);
57 ncur->bc_flags = cur->bc_flags;
58 ncur->bc_nlevels = cur->bc_nlevels;
59 ncur->bc_mem.xfbtree = cur->bc_mem.xfbtree;
60 if (cur->bc_group)
61 ncur->bc_group = xfs_group_hold(cur->bc_group);
62 return ncur;
65 /* Close the btree xfile and release all resources. */
66 void
67 xfbtree_destroy(
68 struct xfbtree *xfbt)
70 xfs_buftarg_drain(xfbt->target);
73 /* Compute the number of bytes available for records. */
74 static inline unsigned int
75 xfbtree_rec_bytes(
76 struct xfs_mount *mp,
77 const struct xfs_btree_ops *ops)
79 return XMBUF_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
82 /* Initialize an empty leaf block as the btree root. */
83 STATIC int
84 xfbtree_init_leaf_block(
85 struct xfs_mount *mp,
86 struct xfbtree *xfbt,
87 const struct xfs_btree_ops *ops)
89 struct xfs_buf *bp;
90 xfbno_t bno = xfbt->highest_bno++;
91 int error;
93 error = xfs_buf_get(xfbt->target, xfbno_to_daddr(bno), XFBNO_BBSIZE,
94 &bp);
95 if (error)
96 return error;
98 trace_xfbtree_create_root_buf(xfbt, bp);
100 bp->b_ops = ops->buf_ops;
101 xfs_btree_init_buf(mp, bp, ops, 0, 0, xfbt->owner);
102 xfs_buf_relse(bp);
104 xfbt->root.l = cpu_to_be64(bno);
105 return 0;
109 * Create an in-memory btree root that can be used with the given xmbuf.
110 * Callers must set xfbt->owner.
113 xfbtree_init(
114 struct xfs_mount *mp,
115 struct xfbtree *xfbt,
116 struct xfs_buftarg *btp,
117 const struct xfs_btree_ops *ops)
119 unsigned int blocklen = xfbtree_rec_bytes(mp, ops);
120 unsigned int keyptr_len;
121 int error;
123 /* Requires a long-format CRC-format btree */
124 if (!xfs_has_crc(mp)) {
125 ASSERT(xfs_has_crc(mp));
126 return -EINVAL;
128 if (ops->ptr_len != XFS_BTREE_LONG_PTR_LEN) {
129 ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN);
130 return -EINVAL;
133 memset(xfbt, 0, sizeof(*xfbt));
134 xfbt->target = btp;
136 /* Set up min/maxrecs for this btree. */
137 keyptr_len = ops->key_len + sizeof(__be64);
138 xfbt->maxrecs[0] = blocklen / ops->rec_len;
139 xfbt->maxrecs[1] = blocklen / keyptr_len;
140 xfbt->minrecs[0] = xfbt->maxrecs[0] / 2;
141 xfbt->minrecs[1] = xfbt->maxrecs[1] / 2;
142 xfbt->highest_bno = 0;
143 xfbt->nlevels = 1;
145 /* Initialize the empty btree. */
146 error = xfbtree_init_leaf_block(mp, xfbt, ops);
147 if (error)
148 goto err_freesp;
150 trace_xfbtree_init(mp, xfbt, ops);
152 return 0;
154 err_freesp:
155 xfs_buftarg_drain(xfbt->target);
156 return error;
159 /* Allocate a block to our in-memory btree. */
161 xfbtree_alloc_block(
162 struct xfs_btree_cur *cur,
163 const union xfs_btree_ptr *start,
164 union xfs_btree_ptr *new,
165 int *stat)
167 struct xfbtree *xfbt = cur->bc_mem.xfbtree;
168 xfbno_t bno = xfbt->highest_bno++;
170 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
172 trace_xfbtree_alloc_block(xfbt, cur, bno);
174 /* Fail if the block address exceeds the maximum for the buftarg. */
175 if (!xfbtree_verify_bno(xfbt, bno)) {
176 ASSERT(xfbtree_verify_bno(xfbt, bno));
177 *stat = 0;
178 return 0;
181 new->l = cpu_to_be64(bno);
182 *stat = 1;
183 return 0;
186 /* Free a block from our in-memory btree. */
188 xfbtree_free_block(
189 struct xfs_btree_cur *cur,
190 struct xfs_buf *bp)
192 struct xfbtree *xfbt = cur->bc_mem.xfbtree;
193 xfs_daddr_t daddr = xfs_buf_daddr(bp);
194 xfbno_t bno = xfs_daddr_to_xfbno(daddr);
196 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
198 trace_xfbtree_free_block(xfbt, cur, bno);
200 if (bno + 1 == xfbt->highest_bno)
201 xfbt->highest_bno--;
203 return 0;
206 /* Return the minimum number of records for a btree block. */
208 xfbtree_get_minrecs(
209 struct xfs_btree_cur *cur,
210 int level)
212 struct xfbtree *xfbt = cur->bc_mem.xfbtree;
214 return xfbt->minrecs[level != 0];
217 /* Return the maximum number of records for a btree block. */
219 xfbtree_get_maxrecs(
220 struct xfs_btree_cur *cur,
221 int level)
223 struct xfbtree *xfbt = cur->bc_mem.xfbtree;
225 return xfbt->maxrecs[level != 0];
228 /* If this log item is a buffer item that came from the xfbtree, return it. */
229 static inline struct xfs_buf *
230 xfbtree_buf_match(
231 struct xfbtree *xfbt,
232 const struct xfs_log_item *lip)
234 const struct xfs_buf_log_item *bli;
235 struct xfs_buf *bp;
237 if (lip->li_type != XFS_LI_BUF)
238 return NULL;
240 bli = container_of(lip, struct xfs_buf_log_item, bli_item);
241 bp = bli->bli_buf;
242 if (bp->b_target != xfbt->target)
243 return NULL;
245 return bp;
249 * Commit changes to the incore btree immediately by writing all dirty xfbtree
250 * buffers to the backing xfile. This detaches all xfbtree buffers from the
251 * transaction, even on failure. The buffer locks are dropped between the
252 * delwri queue and submit, so the caller must synchronize btree access.
254 * Normally we'd let the buffers commit with the transaction and get written to
255 * the xfile via the log, but online repair stages ephemeral btrees in memory
256 * and uses the btree_staging functions to write new btrees to disk atomically.
257 * The in-memory btree (and its backing store) are discarded at the end of the
258 * repair phase, which means that xfbtree buffers cannot commit with the rest
259 * of a transaction.
261 * In other words, online repair only needs the transaction to collect buffer
262 * pointers and to avoid buffer deadlocks, not to guarantee consistency of
263 * updates.
266 xfbtree_trans_commit(
267 struct xfbtree *xfbt,
268 struct xfs_trans *tp)
270 struct xfs_log_item *lip, *n;
271 bool tp_dirty = false;
272 int error = 0;
275 * For each xfbtree buffer attached to the transaction, write the dirty
276 * buffers to the xfile and release them.
278 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
279 struct xfs_buf *bp = xfbtree_buf_match(xfbt, lip);
281 if (!bp) {
282 if (test_bit(XFS_LI_DIRTY, &lip->li_flags))
283 tp_dirty |= true;
284 continue;
287 trace_xfbtree_trans_commit_buf(xfbt, bp);
289 xmbuf_trans_bdetach(tp, bp);
292 * If the buffer fails verification, note the failure but
293 * continue walking the transaction items so that we remove all
294 * ephemeral btree buffers.
296 if (!error)
297 error = xmbuf_finalize(bp);
299 xfs_buf_relse(bp);
303 * Reset the transaction's dirty flag to reflect the dirty state of the
304 * log items that are still attached.
306 tp->t_flags = (tp->t_flags & ~XFS_TRANS_DIRTY) |
307 (tp_dirty ? XFS_TRANS_DIRTY : 0);
309 return error;
313 * Cancel changes to the incore btree by detaching all the xfbtree buffers.
314 * Changes are not undone, so callers must not access the btree ever again.
316 void
317 xfbtree_trans_cancel(
318 struct xfbtree *xfbt,
319 struct xfs_trans *tp)
321 struct xfs_log_item *lip, *n;
322 bool tp_dirty = false;
324 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
325 struct xfs_buf *bp = xfbtree_buf_match(xfbt, lip);
327 if (!bp) {
328 if (test_bit(XFS_LI_DIRTY, &lip->li_flags))
329 tp_dirty |= true;
330 continue;
333 trace_xfbtree_trans_cancel_buf(xfbt, bp);
335 xmbuf_trans_bdetach(tp, bp);
336 xfs_buf_relse(bp);
340 * Reset the transaction's dirty flag to reflect the dirty state of the
341 * log items that are still attached.
343 tp->t_flags = (tp->t_flags & ~XFS_TRANS_DIRTY) |
344 (tp_dirty ? XFS_TRANS_DIRTY : 0);