2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_attr_remote.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
41 #include "xfs_attr_leaf.h"
42 #include "xfs_error.h"
43 #include "xfs_trace.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_cksum.h"
48 xfs_attr_shortform_compare(const void *a
, const void *b
)
50 xfs_attr_sf_sort_t
*sa
, *sb
;
52 sa
= (xfs_attr_sf_sort_t
*)a
;
53 sb
= (xfs_attr_sf_sort_t
*)b
;
54 if (sa
->hash
< sb
->hash
) {
56 } else if (sa
->hash
> sb
->hash
) {
59 return(sa
->entno
- sb
->entno
);
63 #define XFS_ISRESET_CURSOR(cursor) \
64 (!((cursor)->initted) && !((cursor)->hashval) && \
65 !((cursor)->blkno) && !((cursor)->offset))
67 * Copy out entries of shortform attribute lists for attr_list().
68 * Shortform attribute lists are not stored in hashval sorted order.
69 * If the output buffer is not large enough to hold them all, then we
70 * we have to calculate each entries' hashvalue and sort them before
71 * we can begin returning them to the user.
74 xfs_attr_shortform_list(xfs_attr_list_context_t
*context
)
76 attrlist_cursor_kern_t
*cursor
;
77 xfs_attr_sf_sort_t
*sbuf
, *sbp
;
78 xfs_attr_shortform_t
*sf
;
79 xfs_attr_sf_entry_t
*sfe
;
81 int sbsize
, nsbuf
, count
, i
;
84 ASSERT(context
!= NULL
);
87 ASSERT(dp
->i_afp
!= NULL
);
88 sf
= (xfs_attr_shortform_t
*)dp
->i_afp
->if_u1
.if_data
;
92 cursor
= context
->cursor
;
93 ASSERT(cursor
!= NULL
);
95 trace_xfs_attr_list_sf(context
);
98 * If the buffer is large enough and the cursor is at the start,
99 * do not bother with sorting since we will return everything in
100 * one buffer and another call using the cursor won't need to be
102 * Note the generous fudge factor of 16 overhead bytes per entry.
103 * If bufsize is zero then put_listent must be a search function
104 * and can just scan through what we have.
106 if (context
->bufsize
== 0 ||
107 (XFS_ISRESET_CURSOR(cursor
) &&
108 (dp
->i_afp
->if_bytes
+ sf
->hdr
.count
* 16) < context
->bufsize
)) {
109 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
110 error
= context
->put_listent(context
,
115 &sfe
->nameval
[sfe
->namelen
]);
118 * Either search callback finished early or
119 * didn't fit it all in the buffer after all.
121 if (context
->seen_enough
)
126 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
128 trace_xfs_attr_list_sf_all(context
);
132 /* do no more for a search callback */
133 if (context
->bufsize
== 0)
137 * It didn't all fit, so we have to sort everything on hashval.
139 sbsize
= sf
->hdr
.count
* sizeof(*sbuf
);
140 sbp
= sbuf
= kmem_alloc(sbsize
, KM_SLEEP
| KM_NOFS
);
143 * Scan the attribute list for the rest of the entries, storing
144 * the relevant info from only those that match into a buffer.
147 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
149 ((char *)sfe
< (char *)sf
) ||
150 ((char *)sfe
>= ((char *)sf
+ dp
->i_afp
->if_bytes
)))) {
151 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
153 context
->dp
->i_mount
, sfe
);
155 return XFS_ERROR(EFSCORRUPTED
);
159 sbp
->hash
= xfs_da_hashname(sfe
->nameval
, sfe
->namelen
);
160 sbp
->name
= sfe
->nameval
;
161 sbp
->namelen
= sfe
->namelen
;
162 /* These are bytes, and both on-disk, don't endian-flip */
163 sbp
->valuelen
= sfe
->valuelen
;
164 sbp
->flags
= sfe
->flags
;
165 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
171 * Sort the entries on hash then entno.
173 xfs_sort(sbuf
, nsbuf
, sizeof(*sbuf
), xfs_attr_shortform_compare
);
176 * Re-find our place IN THE SORTED LIST.
181 for (sbp
= sbuf
, i
= 0; i
< nsbuf
; i
++, sbp
++) {
182 if (sbp
->hash
== cursor
->hashval
) {
183 if (cursor
->offset
== count
) {
187 } else if (sbp
->hash
> cursor
->hashval
) {
197 * Loop putting entries into the user buffer.
199 for ( ; i
< nsbuf
; i
++, sbp
++) {
200 if (cursor
->hashval
!= sbp
->hash
) {
201 cursor
->hashval
= sbp
->hash
;
204 error
= context
->put_listent(context
,
209 &sbp
->name
[sbp
->namelen
]);
212 if (context
->seen_enough
)
222 xfs_attr_node_list(xfs_attr_list_context_t
*context
)
224 attrlist_cursor_kern_t
*cursor
;
225 xfs_attr_leafblock_t
*leaf
;
226 xfs_da_intnode_t
*node
;
227 struct xfs_attr3_icleaf_hdr leafhdr
;
228 struct xfs_da3_icnode_hdr nodehdr
;
229 struct xfs_da_node_entry
*btree
;
233 trace_xfs_attr_node_list(context
);
235 cursor
= context
->cursor
;
239 * Do all sorts of validation on the passed-in cursor structure.
240 * If anything is amiss, ignore the cursor and look up the hashval
241 * starting from the btree root.
244 if (cursor
->blkno
> 0) {
245 error
= xfs_da3_node_read(NULL
, context
->dp
, cursor
->blkno
, -1,
247 if ((error
!= 0) && (error
!= EFSCORRUPTED
))
250 struct xfs_attr_leaf_entry
*entries
;
253 switch (be16_to_cpu(node
->hdr
.info
.magic
)) {
254 case XFS_DA_NODE_MAGIC
:
255 case XFS_DA3_NODE_MAGIC
:
256 trace_xfs_attr_list_wrong_blk(context
);
257 xfs_trans_brelse(NULL
, bp
);
260 case XFS_ATTR_LEAF_MAGIC
:
261 case XFS_ATTR3_LEAF_MAGIC
:
263 xfs_attr3_leaf_hdr_from_disk(&leafhdr
, leaf
);
264 entries
= xfs_attr3_leaf_entryp(leaf
);
265 if (cursor
->hashval
> be32_to_cpu(
266 entries
[leafhdr
.count
- 1].hashval
)) {
267 trace_xfs_attr_list_wrong_blk(context
);
268 xfs_trans_brelse(NULL
, bp
);
270 } else if (cursor
->hashval
<= be32_to_cpu(
271 entries
[0].hashval
)) {
272 trace_xfs_attr_list_wrong_blk(context
);
273 xfs_trans_brelse(NULL
, bp
);
278 trace_xfs_attr_list_wrong_blk(context
);
279 xfs_trans_brelse(NULL
, bp
);
286 * We did not find what we expected given the cursor's contents,
287 * so we start from the top and work down based on the hash value.
288 * Note that start of node block is same as start of leaf block.
295 error
= xfs_da3_node_read(NULL
, context
->dp
,
296 cursor
->blkno
, -1, &bp
,
301 magic
= be16_to_cpu(node
->hdr
.info
.magic
);
302 if (magic
== XFS_ATTR_LEAF_MAGIC
||
303 magic
== XFS_ATTR3_LEAF_MAGIC
)
305 if (magic
!= XFS_DA_NODE_MAGIC
&&
306 magic
!= XFS_DA3_NODE_MAGIC
) {
307 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
309 context
->dp
->i_mount
,
311 xfs_trans_brelse(NULL
, bp
);
312 return XFS_ERROR(EFSCORRUPTED
);
315 xfs_da3_node_hdr_from_disk(&nodehdr
, node
);
316 btree
= xfs_da3_node_tree_p(node
);
317 for (i
= 0; i
< nodehdr
.count
; btree
++, i
++) {
319 <= be32_to_cpu(btree
->hashval
)) {
320 cursor
->blkno
= be32_to_cpu(btree
->before
);
321 trace_xfs_attr_list_node_descend(context
,
326 if (i
== nodehdr
.count
) {
327 xfs_trans_brelse(NULL
, bp
);
330 xfs_trans_brelse(NULL
, bp
);
336 * Roll upward through the blocks, processing each leaf block in
337 * order. As long as there is space in the result buffer, keep
338 * adding the information.
342 error
= xfs_attr3_leaf_list_int(bp
, context
);
344 xfs_trans_brelse(NULL
, bp
);
347 xfs_attr3_leaf_hdr_from_disk(&leafhdr
, leaf
);
348 if (context
->seen_enough
|| leafhdr
.forw
== 0)
350 cursor
->blkno
= leafhdr
.forw
;
351 xfs_trans_brelse(NULL
, bp
);
352 error
= xfs_attr3_leaf_read(NULL
, context
->dp
, cursor
->blkno
, -1,
357 xfs_trans_brelse(NULL
, bp
);
362 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
365 xfs_attr3_leaf_list_int(
367 struct xfs_attr_list_context
*context
)
369 struct attrlist_cursor_kern
*cursor
;
370 struct xfs_attr_leafblock
*leaf
;
371 struct xfs_attr3_icleaf_hdr ichdr
;
372 struct xfs_attr_leaf_entry
*entries
;
373 struct xfs_attr_leaf_entry
*entry
;
377 trace_xfs_attr_list_leaf(context
);
380 xfs_attr3_leaf_hdr_from_disk(&ichdr
, leaf
);
381 entries
= xfs_attr3_leaf_entryp(leaf
);
383 cursor
= context
->cursor
;
387 * Re-find our place in the leaf block if this is a new syscall.
389 if (context
->resynch
) {
391 for (i
= 0; i
< ichdr
.count
; entry
++, i
++) {
392 if (be32_to_cpu(entry
->hashval
) == cursor
->hashval
) {
393 if (cursor
->offset
== context
->dupcnt
) {
398 } else if (be32_to_cpu(entry
->hashval
) >
404 if (i
== ichdr
.count
) {
405 trace_xfs_attr_list_notfound(context
);
412 context
->resynch
= 0;
415 * We have found our place, start copying out the new attributes.
418 for (; i
< ichdr
.count
; entry
++, i
++) {
419 if (be32_to_cpu(entry
->hashval
) != cursor
->hashval
) {
420 cursor
->hashval
= be32_to_cpu(entry
->hashval
);
424 if (entry
->flags
& XFS_ATTR_INCOMPLETE
)
425 continue; /* skip incomplete entries */
427 if (entry
->flags
& XFS_ATTR_LOCAL
) {
428 xfs_attr_leaf_name_local_t
*name_loc
=
429 xfs_attr3_leaf_name_local(leaf
, i
);
431 retval
= context
->put_listent(context
,
434 (int)name_loc
->namelen
,
435 be16_to_cpu(name_loc
->valuelen
),
436 &name_loc
->nameval
[name_loc
->namelen
]);
440 xfs_attr_leaf_name_remote_t
*name_rmt
=
441 xfs_attr3_leaf_name_remote(leaf
, i
);
443 int valuelen
= be32_to_cpu(name_rmt
->valuelen
);
445 if (context
->put_value
) {
448 memset((char *)&args
, 0, sizeof(args
));
449 args
.dp
= context
->dp
;
450 args
.whichfork
= XFS_ATTR_FORK
;
451 args
.valuelen
= valuelen
;
452 args
.value
= kmem_alloc(valuelen
, KM_SLEEP
| KM_NOFS
);
453 args
.rmtblkno
= be32_to_cpu(name_rmt
->valueblk
);
454 args
.rmtblkcnt
= xfs_attr3_rmt_blocks(
455 args
.dp
->i_mount
, valuelen
);
456 retval
= xfs_attr_rmtval_get(&args
);
459 retval
= context
->put_listent(context
,
462 (int)name_rmt
->namelen
,
465 kmem_free(args
.value
);
467 retval
= context
->put_listent(context
,
470 (int)name_rmt
->namelen
,
477 if (context
->seen_enough
)
481 trace_xfs_attr_list_leaf_end(context
);
486 * Copy out attribute entries for attr_list(), for leaf attribute lists.
489 xfs_attr_leaf_list(xfs_attr_list_context_t
*context
)
494 trace_xfs_attr_leaf_list(context
);
496 context
->cursor
->blkno
= 0;
497 error
= xfs_attr3_leaf_read(NULL
, context
->dp
, 0, -1, &bp
);
499 return XFS_ERROR(error
);
501 error
= xfs_attr3_leaf_list_int(bp
, context
);
502 xfs_trans_brelse(NULL
, bp
);
503 return XFS_ERROR(error
);
508 xfs_attr_list_context_t
*context
)
511 xfs_inode_t
*dp
= context
->dp
;
513 XFS_STATS_INC(xs_attr_list
);
515 if (XFS_FORCED_SHUTDOWN(dp
->i_mount
))
518 xfs_ilock(dp
, XFS_ILOCK_SHARED
);
521 * Decide on what work routines to call based on the inode size.
523 if (!xfs_inode_hasattr(dp
)) {
525 } else if (dp
->i_d
.di_aformat
== XFS_DINODE_FMT_LOCAL
) {
526 error
= xfs_attr_shortform_list(context
);
527 } else if (xfs_bmap_one_block(dp
, XFS_ATTR_FORK
)) {
528 error
= xfs_attr_leaf_list(context
);
530 error
= xfs_attr_node_list(context
);
533 xfs_iunlock(dp
, XFS_ILOCK_SHARED
);
538 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
539 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
540 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
541 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
542 & ~(sizeof(u_int32_t)-1))
545 * Format an attribute and copy it out to the user's buffer.
546 * Take care to check values and protect against them changing later,
547 * we may be reading them directly out of a user buffer.
550 xfs_attr_put_listent(
551 xfs_attr_list_context_t
*context
,
556 unsigned char *value
)
558 struct attrlist
*alist
= (struct attrlist
*)context
->alist
;
562 ASSERT(!(context
->flags
& ATTR_KERNOVAL
));
563 ASSERT(context
->count
>= 0);
564 ASSERT(context
->count
< (ATTR_MAX_VALUELEN
/8));
565 ASSERT(context
->firstu
>= sizeof(*alist
));
566 ASSERT(context
->firstu
<= context
->bufsize
);
569 * Only list entries in the right namespace.
571 if (((context
->flags
& ATTR_SECURE
) == 0) !=
572 ((flags
& XFS_ATTR_SECURE
) == 0))
574 if (((context
->flags
& ATTR_ROOT
) == 0) !=
575 ((flags
& XFS_ATTR_ROOT
) == 0))
578 arraytop
= sizeof(*alist
) +
579 context
->count
* sizeof(alist
->al_offset
[0]);
580 context
->firstu
-= ATTR_ENTSIZE(namelen
);
581 if (context
->firstu
< arraytop
) {
582 trace_xfs_attr_list_full(context
);
584 context
->seen_enough
= 1;
588 aep
= (attrlist_ent_t
*)&context
->alist
[context
->firstu
];
589 aep
->a_valuelen
= valuelen
;
590 memcpy(aep
->a_name
, name
, namelen
);
591 aep
->a_name
[namelen
] = 0;
592 alist
->al_offset
[context
->count
++] = context
->firstu
;
593 alist
->al_count
= context
->count
;
594 trace_xfs_attr_list_add(context
);
599 * Generate a list of extended attribute names and optionally
600 * also value lengths. Positive return value follows the XFS
601 * convention of being an error, zero or negative return code
602 * is the length of the buffer returned (negated), indicating
611 attrlist_cursor_kern_t
*cursor
)
613 xfs_attr_list_context_t context
;
614 struct attrlist
*alist
;
618 * Validate the cursor.
620 if (cursor
->pad1
|| cursor
->pad2
)
621 return(XFS_ERROR(EINVAL
));
622 if ((cursor
->initted
== 0) &&
623 (cursor
->hashval
|| cursor
->blkno
|| cursor
->offset
))
624 return XFS_ERROR(EINVAL
);
627 * Check for a properly aligned buffer.
629 if (((long)buffer
) & (sizeof(int)-1))
630 return XFS_ERROR(EFAULT
);
631 if (flags
& ATTR_KERNOVAL
)
635 * Initialize the output buffer.
637 memset(&context
, 0, sizeof(context
));
639 context
.cursor
= cursor
;
641 context
.flags
= flags
;
642 context
.alist
= buffer
;
643 context
.bufsize
= (bufsize
& ~(sizeof(int)-1)); /* align */
644 context
.firstu
= context
.bufsize
;
645 context
.put_listent
= xfs_attr_put_listent
;
647 alist
= (struct attrlist
*)context
.alist
;
650 alist
->al_offset
[0] = context
.bufsize
;
652 error
= xfs_attr_list_int(&context
);