2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_da_format.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_inode.h"
29 #include "xfs_trans.h"
30 #include "xfs_inode_item.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_attr_remote.h"
35 #include "xfs_attr_leaf.h"
36 #include "xfs_error.h"
37 #include "xfs_trace.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_cksum.h"
43 xfs_attr_shortform_compare(const void *a
, const void *b
)
45 xfs_attr_sf_sort_t
*sa
, *sb
;
47 sa
= (xfs_attr_sf_sort_t
*)a
;
48 sb
= (xfs_attr_sf_sort_t
*)b
;
49 if (sa
->hash
< sb
->hash
) {
51 } else if (sa
->hash
> sb
->hash
) {
54 return sa
->entno
- sb
->entno
;
58 #define XFS_ISRESET_CURSOR(cursor) \
59 (!((cursor)->initted) && !((cursor)->hashval) && \
60 !((cursor)->blkno) && !((cursor)->offset))
62 * Copy out entries of shortform attribute lists for attr_list().
63 * Shortform attribute lists are not stored in hashval sorted order.
64 * If the output buffer is not large enough to hold them all, then we
65 * we have to calculate each entries' hashvalue and sort them before
66 * we can begin returning them to the user.
69 xfs_attr_shortform_list(xfs_attr_list_context_t
*context
)
71 attrlist_cursor_kern_t
*cursor
;
72 xfs_attr_sf_sort_t
*sbuf
, *sbp
;
73 xfs_attr_shortform_t
*sf
;
74 xfs_attr_sf_entry_t
*sfe
;
76 int sbsize
, nsbuf
, count
, i
;
78 ASSERT(context
!= NULL
);
81 ASSERT(dp
->i_afp
!= NULL
);
82 sf
= (xfs_attr_shortform_t
*)dp
->i_afp
->if_u1
.if_data
;
86 cursor
= context
->cursor
;
87 ASSERT(cursor
!= NULL
);
89 trace_xfs_attr_list_sf(context
);
92 * If the buffer is large enough and the cursor is at the start,
93 * do not bother with sorting since we will return everything in
94 * one buffer and another call using the cursor won't need to be
96 * Note the generous fudge factor of 16 overhead bytes per entry.
97 * If bufsize is zero then put_listent must be a search function
98 * and can just scan through what we have.
100 if (context
->bufsize
== 0 ||
101 (XFS_ISRESET_CURSOR(cursor
) &&
102 (dp
->i_afp
->if_bytes
+ sf
->hdr
.count
* 16) < context
->bufsize
)) {
103 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
104 context
->put_listent(context
,
110 * Either search callback finished early or
111 * didn't fit it all in the buffer after all.
113 if (context
->seen_enough
)
115 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
117 trace_xfs_attr_list_sf_all(context
);
121 /* do no more for a search callback */
122 if (context
->bufsize
== 0)
126 * It didn't all fit, so we have to sort everything on hashval.
128 sbsize
= sf
->hdr
.count
* sizeof(*sbuf
);
129 sbp
= sbuf
= kmem_alloc(sbsize
, KM_SLEEP
| KM_NOFS
);
132 * Scan the attribute list for the rest of the entries, storing
133 * the relevant info from only those that match into a buffer.
136 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
138 ((char *)sfe
< (char *)sf
) ||
139 ((char *)sfe
>= ((char *)sf
+ dp
->i_afp
->if_bytes
)))) {
140 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
142 context
->dp
->i_mount
, sfe
);
144 return -EFSCORRUPTED
;
148 sbp
->hash
= xfs_da_hashname(sfe
->nameval
, sfe
->namelen
);
149 sbp
->name
= sfe
->nameval
;
150 sbp
->namelen
= sfe
->namelen
;
151 /* These are bytes, and both on-disk, don't endian-flip */
152 sbp
->valuelen
= sfe
->valuelen
;
153 sbp
->flags
= sfe
->flags
;
154 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
160 * Sort the entries on hash then entno.
162 xfs_sort(sbuf
, nsbuf
, sizeof(*sbuf
), xfs_attr_shortform_compare
);
165 * Re-find our place IN THE SORTED LIST.
170 for (sbp
= sbuf
, i
= 0; i
< nsbuf
; i
++, sbp
++) {
171 if (sbp
->hash
== cursor
->hashval
) {
172 if (cursor
->offset
== count
) {
176 } else if (sbp
->hash
> cursor
->hashval
) {
186 * Loop putting entries into the user buffer.
188 for ( ; i
< nsbuf
; i
++, sbp
++) {
189 if (cursor
->hashval
!= sbp
->hash
) {
190 cursor
->hashval
= sbp
->hash
;
193 context
->put_listent(context
,
198 if (context
->seen_enough
)
208 xfs_attr_node_list(xfs_attr_list_context_t
*context
)
210 attrlist_cursor_kern_t
*cursor
;
211 xfs_attr_leafblock_t
*leaf
;
212 xfs_da_intnode_t
*node
;
213 struct xfs_attr3_icleaf_hdr leafhdr
;
214 struct xfs_da3_icnode_hdr nodehdr
;
215 struct xfs_da_node_entry
*btree
;
218 struct xfs_inode
*dp
= context
->dp
;
219 struct xfs_mount
*mp
= dp
->i_mount
;
221 trace_xfs_attr_node_list(context
);
223 cursor
= context
->cursor
;
227 * Do all sorts of validation on the passed-in cursor structure.
228 * If anything is amiss, ignore the cursor and look up the hashval
229 * starting from the btree root.
232 if (cursor
->blkno
> 0) {
233 error
= xfs_da3_node_read(context
->tp
, dp
, cursor
->blkno
, -1,
235 if ((error
!= 0) && (error
!= -EFSCORRUPTED
))
238 struct xfs_attr_leaf_entry
*entries
;
241 switch (be16_to_cpu(node
->hdr
.info
.magic
)) {
242 case XFS_DA_NODE_MAGIC
:
243 case XFS_DA3_NODE_MAGIC
:
244 trace_xfs_attr_list_wrong_blk(context
);
245 xfs_trans_brelse(context
->tp
, bp
);
248 case XFS_ATTR_LEAF_MAGIC
:
249 case XFS_ATTR3_LEAF_MAGIC
:
251 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
,
253 entries
= xfs_attr3_leaf_entryp(leaf
);
254 if (cursor
->hashval
> be32_to_cpu(
255 entries
[leafhdr
.count
- 1].hashval
)) {
256 trace_xfs_attr_list_wrong_blk(context
);
257 xfs_trans_brelse(context
->tp
, bp
);
259 } else if (cursor
->hashval
<= be32_to_cpu(
260 entries
[0].hashval
)) {
261 trace_xfs_attr_list_wrong_blk(context
);
262 xfs_trans_brelse(context
->tp
, bp
);
267 trace_xfs_attr_list_wrong_blk(context
);
268 xfs_trans_brelse(context
->tp
, bp
);
275 * We did not find what we expected given the cursor's contents,
276 * so we start from the top and work down based on the hash value.
277 * Note that start of node block is same as start of leaf block.
284 error
= xfs_da3_node_read(context
->tp
, dp
,
285 cursor
->blkno
, -1, &bp
,
290 magic
= be16_to_cpu(node
->hdr
.info
.magic
);
291 if (magic
== XFS_ATTR_LEAF_MAGIC
||
292 magic
== XFS_ATTR3_LEAF_MAGIC
)
294 if (magic
!= XFS_DA_NODE_MAGIC
&&
295 magic
!= XFS_DA3_NODE_MAGIC
) {
296 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
298 context
->dp
->i_mount
,
300 xfs_trans_brelse(context
->tp
, bp
);
301 return -EFSCORRUPTED
;
304 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
305 btree
= dp
->d_ops
->node_tree_p(node
);
306 for (i
= 0; i
< nodehdr
.count
; btree
++, i
++) {
308 <= be32_to_cpu(btree
->hashval
)) {
309 cursor
->blkno
= be32_to_cpu(btree
->before
);
310 trace_xfs_attr_list_node_descend(context
,
315 if (i
== nodehdr
.count
) {
316 xfs_trans_brelse(context
->tp
, bp
);
319 xfs_trans_brelse(context
->tp
, bp
);
325 * Roll upward through the blocks, processing each leaf block in
326 * order. As long as there is space in the result buffer, keep
327 * adding the information.
331 xfs_attr3_leaf_list_int(bp
, context
);
332 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &leafhdr
, leaf
);
333 if (context
->seen_enough
|| leafhdr
.forw
== 0)
335 cursor
->blkno
= leafhdr
.forw
;
336 xfs_trans_brelse(context
->tp
, bp
);
337 error
= xfs_attr3_leaf_read(context
->tp
, dp
, cursor
->blkno
, -1, &bp
);
341 xfs_trans_brelse(context
->tp
, bp
);
346 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
349 xfs_attr3_leaf_list_int(
351 struct xfs_attr_list_context
*context
)
353 struct attrlist_cursor_kern
*cursor
;
354 struct xfs_attr_leafblock
*leaf
;
355 struct xfs_attr3_icleaf_hdr ichdr
;
356 struct xfs_attr_leaf_entry
*entries
;
357 struct xfs_attr_leaf_entry
*entry
;
359 struct xfs_mount
*mp
= context
->dp
->i_mount
;
361 trace_xfs_attr_list_leaf(context
);
364 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &ichdr
, leaf
);
365 entries
= xfs_attr3_leaf_entryp(leaf
);
367 cursor
= context
->cursor
;
371 * Re-find our place in the leaf block if this is a new syscall.
373 if (context
->resynch
) {
375 for (i
= 0; i
< ichdr
.count
; entry
++, i
++) {
376 if (be32_to_cpu(entry
->hashval
) == cursor
->hashval
) {
377 if (cursor
->offset
== context
->dupcnt
) {
382 } else if (be32_to_cpu(entry
->hashval
) >
388 if (i
== ichdr
.count
) {
389 trace_xfs_attr_list_notfound(context
);
396 context
->resynch
= 0;
399 * We have found our place, start copying out the new attributes.
401 for (; i
< ichdr
.count
; entry
++, i
++) {
403 int namelen
, valuelen
;
405 if (be32_to_cpu(entry
->hashval
) != cursor
->hashval
) {
406 cursor
->hashval
= be32_to_cpu(entry
->hashval
);
410 if (entry
->flags
& XFS_ATTR_INCOMPLETE
)
411 continue; /* skip incomplete entries */
413 if (entry
->flags
& XFS_ATTR_LOCAL
) {
414 xfs_attr_leaf_name_local_t
*name_loc
;
416 name_loc
= xfs_attr3_leaf_name_local(leaf
, i
);
417 name
= name_loc
->nameval
;
418 namelen
= name_loc
->namelen
;
419 valuelen
= be16_to_cpu(name_loc
->valuelen
);
421 xfs_attr_leaf_name_remote_t
*name_rmt
;
423 name_rmt
= xfs_attr3_leaf_name_remote(leaf
, i
);
424 name
= name_rmt
->name
;
425 namelen
= name_rmt
->namelen
;
426 valuelen
= be32_to_cpu(name_rmt
->valuelen
);
429 context
->put_listent(context
, entry
->flags
,
430 name
, namelen
, valuelen
);
431 if (context
->seen_enough
)
435 trace_xfs_attr_list_leaf_end(context
);
440 * Copy out attribute entries for attr_list(), for leaf attribute lists.
443 xfs_attr_leaf_list(xfs_attr_list_context_t
*context
)
448 trace_xfs_attr_leaf_list(context
);
450 context
->cursor
->blkno
= 0;
451 error
= xfs_attr3_leaf_read(context
->tp
, context
->dp
, 0, -1, &bp
);
455 xfs_attr3_leaf_list_int(bp
, context
);
456 xfs_trans_brelse(context
->tp
, bp
);
461 xfs_attr_list_int_ilocked(
462 struct xfs_attr_list_context
*context
)
464 struct xfs_inode
*dp
= context
->dp
;
466 ASSERT(xfs_isilocked(dp
, XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
469 * Decide on what work routines to call based on the inode size.
471 if (!xfs_inode_hasattr(dp
))
473 else if (dp
->i_d
.di_aformat
== XFS_DINODE_FMT_LOCAL
)
474 return xfs_attr_shortform_list(context
);
475 else if (xfs_bmap_one_block(dp
, XFS_ATTR_FORK
))
476 return xfs_attr_leaf_list(context
);
477 return xfs_attr_node_list(context
);
482 xfs_attr_list_context_t
*context
)
485 xfs_inode_t
*dp
= context
->dp
;
488 XFS_STATS_INC(dp
->i_mount
, xs_attr_list
);
490 if (XFS_FORCED_SHUTDOWN(dp
->i_mount
))
493 lock_mode
= xfs_ilock_attr_map_shared(dp
);
494 error
= xfs_attr_list_int_ilocked(context
);
495 xfs_iunlock(dp
, lock_mode
);
499 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
500 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
501 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
502 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
503 & ~(sizeof(u_int32_t)-1))
506 * Format an attribute and copy it out to the user's buffer.
507 * Take care to check values and protect against them changing later,
508 * we may be reading them directly out of a user buffer.
511 xfs_attr_put_listent(
512 xfs_attr_list_context_t
*context
,
518 struct attrlist
*alist
= (struct attrlist
*)context
->alist
;
522 ASSERT(!(context
->flags
& ATTR_KERNOVAL
));
523 ASSERT(context
->count
>= 0);
524 ASSERT(context
->count
< (ATTR_MAX_VALUELEN
/8));
525 ASSERT(context
->firstu
>= sizeof(*alist
));
526 ASSERT(context
->firstu
<= context
->bufsize
);
529 * Only list entries in the right namespace.
531 if (((context
->flags
& ATTR_SECURE
) == 0) !=
532 ((flags
& XFS_ATTR_SECURE
) == 0))
534 if (((context
->flags
& ATTR_ROOT
) == 0) !=
535 ((flags
& XFS_ATTR_ROOT
) == 0))
538 arraytop
= sizeof(*alist
) +
539 context
->count
* sizeof(alist
->al_offset
[0]);
540 context
->firstu
-= ATTR_ENTSIZE(namelen
);
541 if (context
->firstu
< arraytop
) {
542 trace_xfs_attr_list_full(context
);
544 context
->seen_enough
= 1;
548 aep
= (attrlist_ent_t
*)&context
->alist
[context
->firstu
];
549 aep
->a_valuelen
= valuelen
;
550 memcpy(aep
->a_name
, name
, namelen
);
551 aep
->a_name
[namelen
] = 0;
552 alist
->al_offset
[context
->count
++] = context
->firstu
;
553 alist
->al_count
= context
->count
;
554 trace_xfs_attr_list_add(context
);
559 * Generate a list of extended attribute names and optionally
560 * also value lengths. Positive return value follows the XFS
561 * convention of being an error, zero or negative return code
562 * is the length of the buffer returned (negated), indicating
571 attrlist_cursor_kern_t
*cursor
)
573 xfs_attr_list_context_t context
;
574 struct attrlist
*alist
;
578 * Validate the cursor.
580 if (cursor
->pad1
|| cursor
->pad2
)
582 if ((cursor
->initted
== 0) &&
583 (cursor
->hashval
|| cursor
->blkno
|| cursor
->offset
))
587 * Check for a properly aligned buffer.
589 if (((long)buffer
) & (sizeof(int)-1))
591 if (flags
& ATTR_KERNOVAL
)
595 * Initialize the output buffer.
597 memset(&context
, 0, sizeof(context
));
599 context
.cursor
= cursor
;
601 context
.flags
= flags
;
602 context
.alist
= buffer
;
603 context
.bufsize
= (bufsize
& ~(sizeof(int)-1)); /* align */
604 context
.firstu
= context
.bufsize
;
605 context
.put_listent
= xfs_attr_put_listent
;
607 alist
= (struct attrlist
*)context
.alist
;
610 alist
->al_offset
[0] = context
.bufsize
;
612 error
= xfs_attr_list_int(&context
);