2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_inode_item.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_attr_remote.h"
37 #include "xfs_attr_leaf.h"
38 #include "xfs_error.h"
39 #include "xfs_trace.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_cksum.h"
42 #include "xfs_dinode.h"
46 xfs_attr_shortform_compare(const void *a
, const void *b
)
48 xfs_attr_sf_sort_t
*sa
, *sb
;
50 sa
= (xfs_attr_sf_sort_t
*)a
;
51 sb
= (xfs_attr_sf_sort_t
*)b
;
52 if (sa
->hash
< sb
->hash
) {
54 } else if (sa
->hash
> sb
->hash
) {
57 return(sa
->entno
- sb
->entno
);
61 #define XFS_ISRESET_CURSOR(cursor) \
62 (!((cursor)->initted) && !((cursor)->hashval) && \
63 !((cursor)->blkno) && !((cursor)->offset))
65 * Copy out entries of shortform attribute lists for attr_list().
66 * Shortform attribute lists are not stored in hashval sorted order.
67 * If the output buffer is not large enough to hold them all, then we
68 * we have to calculate each entries' hashvalue and sort them before
69 * we can begin returning them to the user.
72 xfs_attr_shortform_list(xfs_attr_list_context_t
*context
)
74 attrlist_cursor_kern_t
*cursor
;
75 xfs_attr_sf_sort_t
*sbuf
, *sbp
;
76 xfs_attr_shortform_t
*sf
;
77 xfs_attr_sf_entry_t
*sfe
;
79 int sbsize
, nsbuf
, count
, i
;
82 ASSERT(context
!= NULL
);
85 ASSERT(dp
->i_afp
!= NULL
);
86 sf
= (xfs_attr_shortform_t
*)dp
->i_afp
->if_u1
.if_data
;
90 cursor
= context
->cursor
;
91 ASSERT(cursor
!= NULL
);
93 trace_xfs_attr_list_sf(context
);
96 * If the buffer is large enough and the cursor is at the start,
97 * do not bother with sorting since we will return everything in
98 * one buffer and another call using the cursor won't need to be
100 * Note the generous fudge factor of 16 overhead bytes per entry.
101 * If bufsize is zero then put_listent must be a search function
102 * and can just scan through what we have.
104 if (context
->bufsize
== 0 ||
105 (XFS_ISRESET_CURSOR(cursor
) &&
106 (dp
->i_afp
->if_bytes
+ sf
->hdr
.count
* 16) < context
->bufsize
)) {
107 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
108 error
= context
->put_listent(context
,
113 &sfe
->nameval
[sfe
->namelen
]);
116 * Either search callback finished early or
117 * didn't fit it all in the buffer after all.
119 if (context
->seen_enough
)
124 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
126 trace_xfs_attr_list_sf_all(context
);
130 /* do no more for a search callback */
131 if (context
->bufsize
== 0)
135 * It didn't all fit, so we have to sort everything on hashval.
137 sbsize
= sf
->hdr
.count
* sizeof(*sbuf
);
138 sbp
= sbuf
= kmem_alloc(sbsize
, KM_SLEEP
| KM_NOFS
);
141 * Scan the attribute list for the rest of the entries, storing
142 * the relevant info from only those that match into a buffer.
145 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
147 ((char *)sfe
< (char *)sf
) ||
148 ((char *)sfe
>= ((char *)sf
+ dp
->i_afp
->if_bytes
)))) {
149 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
151 context
->dp
->i_mount
, sfe
);
153 return XFS_ERROR(EFSCORRUPTED
);
157 sbp
->hash
= xfs_da_hashname(sfe
->nameval
, sfe
->namelen
);
158 sbp
->name
= sfe
->nameval
;
159 sbp
->namelen
= sfe
->namelen
;
160 /* These are bytes, and both on-disk, don't endian-flip */
161 sbp
->valuelen
= sfe
->valuelen
;
162 sbp
->flags
= sfe
->flags
;
163 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
169 * Sort the entries on hash then entno.
171 xfs_sort(sbuf
, nsbuf
, sizeof(*sbuf
), xfs_attr_shortform_compare
);
174 * Re-find our place IN THE SORTED LIST.
179 for (sbp
= sbuf
, i
= 0; i
< nsbuf
; i
++, sbp
++) {
180 if (sbp
->hash
== cursor
->hashval
) {
181 if (cursor
->offset
== count
) {
185 } else if (sbp
->hash
> cursor
->hashval
) {
195 * Loop putting entries into the user buffer.
197 for ( ; i
< nsbuf
; i
++, sbp
++) {
198 if (cursor
->hashval
!= sbp
->hash
) {
199 cursor
->hashval
= sbp
->hash
;
202 error
= context
->put_listent(context
,
207 &sbp
->name
[sbp
->namelen
]);
210 if (context
->seen_enough
)
220 xfs_attr_node_list(xfs_attr_list_context_t
*context
)
222 attrlist_cursor_kern_t
*cursor
;
223 xfs_attr_leafblock_t
*leaf
;
224 xfs_da_intnode_t
*node
;
225 struct xfs_attr3_icleaf_hdr leafhdr
;
226 struct xfs_da3_icnode_hdr nodehdr
;
227 struct xfs_da_node_entry
*btree
;
230 struct xfs_inode
*dp
= context
->dp
;
232 trace_xfs_attr_node_list(context
);
234 cursor
= context
->cursor
;
238 * Do all sorts of validation on the passed-in cursor structure.
239 * If anything is amiss, ignore the cursor and look up the hashval
240 * starting from the btree root.
243 if (cursor
->blkno
> 0) {
244 error
= xfs_da3_node_read(NULL
, dp
, cursor
->blkno
, -1,
246 if ((error
!= 0) && (error
!= EFSCORRUPTED
))
249 struct xfs_attr_leaf_entry
*entries
;
252 switch (be16_to_cpu(node
->hdr
.info
.magic
)) {
253 case XFS_DA_NODE_MAGIC
:
254 case XFS_DA3_NODE_MAGIC
:
255 trace_xfs_attr_list_wrong_blk(context
);
256 xfs_trans_brelse(NULL
, bp
);
259 case XFS_ATTR_LEAF_MAGIC
:
260 case XFS_ATTR3_LEAF_MAGIC
:
262 xfs_attr3_leaf_hdr_from_disk(&leafhdr
, leaf
);
263 entries
= xfs_attr3_leaf_entryp(leaf
);
264 if (cursor
->hashval
> be32_to_cpu(
265 entries
[leafhdr
.count
- 1].hashval
)) {
266 trace_xfs_attr_list_wrong_blk(context
);
267 xfs_trans_brelse(NULL
, bp
);
269 } else if (cursor
->hashval
<= be32_to_cpu(
270 entries
[0].hashval
)) {
271 trace_xfs_attr_list_wrong_blk(context
);
272 xfs_trans_brelse(NULL
, bp
);
277 trace_xfs_attr_list_wrong_blk(context
);
278 xfs_trans_brelse(NULL
, bp
);
285 * We did not find what we expected given the cursor's contents,
286 * so we start from the top and work down based on the hash value.
287 * Note that start of node block is same as start of leaf block.
294 error
= xfs_da3_node_read(NULL
, dp
,
295 cursor
->blkno
, -1, &bp
,
300 magic
= be16_to_cpu(node
->hdr
.info
.magic
);
301 if (magic
== XFS_ATTR_LEAF_MAGIC
||
302 magic
== XFS_ATTR3_LEAF_MAGIC
)
304 if (magic
!= XFS_DA_NODE_MAGIC
&&
305 magic
!= XFS_DA3_NODE_MAGIC
) {
306 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
308 context
->dp
->i_mount
,
310 xfs_trans_brelse(NULL
, bp
);
311 return XFS_ERROR(EFSCORRUPTED
);
314 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
315 btree
= dp
->d_ops
->node_tree_p(node
);
316 for (i
= 0; i
< nodehdr
.count
; btree
++, i
++) {
318 <= be32_to_cpu(btree
->hashval
)) {
319 cursor
->blkno
= be32_to_cpu(btree
->before
);
320 trace_xfs_attr_list_node_descend(context
,
325 if (i
== nodehdr
.count
) {
326 xfs_trans_brelse(NULL
, bp
);
329 xfs_trans_brelse(NULL
, bp
);
335 * Roll upward through the blocks, processing each leaf block in
336 * order. As long as there is space in the result buffer, keep
337 * adding the information.
341 error
= xfs_attr3_leaf_list_int(bp
, context
);
343 xfs_trans_brelse(NULL
, bp
);
346 xfs_attr3_leaf_hdr_from_disk(&leafhdr
, leaf
);
347 if (context
->seen_enough
|| leafhdr
.forw
== 0)
349 cursor
->blkno
= leafhdr
.forw
;
350 xfs_trans_brelse(NULL
, bp
);
351 error
= xfs_attr3_leaf_read(NULL
, dp
, cursor
->blkno
, -1, &bp
);
355 xfs_trans_brelse(NULL
, bp
);
360 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
363 xfs_attr3_leaf_list_int(
365 struct xfs_attr_list_context
*context
)
367 struct attrlist_cursor_kern
*cursor
;
368 struct xfs_attr_leafblock
*leaf
;
369 struct xfs_attr3_icleaf_hdr ichdr
;
370 struct xfs_attr_leaf_entry
*entries
;
371 struct xfs_attr_leaf_entry
*entry
;
375 trace_xfs_attr_list_leaf(context
);
378 xfs_attr3_leaf_hdr_from_disk(&ichdr
, leaf
);
379 entries
= xfs_attr3_leaf_entryp(leaf
);
381 cursor
= context
->cursor
;
385 * Re-find our place in the leaf block if this is a new syscall.
387 if (context
->resynch
) {
389 for (i
= 0; i
< ichdr
.count
; entry
++, i
++) {
390 if (be32_to_cpu(entry
->hashval
) == cursor
->hashval
) {
391 if (cursor
->offset
== context
->dupcnt
) {
396 } else if (be32_to_cpu(entry
->hashval
) >
402 if (i
== ichdr
.count
) {
403 trace_xfs_attr_list_notfound(context
);
410 context
->resynch
= 0;
413 * We have found our place, start copying out the new attributes.
416 for (; i
< ichdr
.count
; entry
++, i
++) {
417 if (be32_to_cpu(entry
->hashval
) != cursor
->hashval
) {
418 cursor
->hashval
= be32_to_cpu(entry
->hashval
);
422 if (entry
->flags
& XFS_ATTR_INCOMPLETE
)
423 continue; /* skip incomplete entries */
425 if (entry
->flags
& XFS_ATTR_LOCAL
) {
426 xfs_attr_leaf_name_local_t
*name_loc
=
427 xfs_attr3_leaf_name_local(leaf
, i
);
429 retval
= context
->put_listent(context
,
432 (int)name_loc
->namelen
,
433 be16_to_cpu(name_loc
->valuelen
),
434 &name_loc
->nameval
[name_loc
->namelen
]);
438 xfs_attr_leaf_name_remote_t
*name_rmt
=
439 xfs_attr3_leaf_name_remote(leaf
, i
);
441 int valuelen
= be32_to_cpu(name_rmt
->valuelen
);
443 if (context
->put_value
) {
446 memset((char *)&args
, 0, sizeof(args
));
447 args
.dp
= context
->dp
;
448 args
.whichfork
= XFS_ATTR_FORK
;
449 args
.valuelen
= valuelen
;
450 args
.value
= kmem_alloc(valuelen
, KM_SLEEP
| KM_NOFS
);
451 args
.rmtblkno
= be32_to_cpu(name_rmt
->valueblk
);
452 args
.rmtblkcnt
= xfs_attr3_rmt_blocks(
453 args
.dp
->i_mount
, valuelen
);
454 retval
= xfs_attr_rmtval_get(&args
);
457 retval
= context
->put_listent(context
,
460 (int)name_rmt
->namelen
,
463 kmem_free(args
.value
);
465 retval
= context
->put_listent(context
,
468 (int)name_rmt
->namelen
,
475 if (context
->seen_enough
)
479 trace_xfs_attr_list_leaf_end(context
);
484 * Copy out attribute entries for attr_list(), for leaf attribute lists.
487 xfs_attr_leaf_list(xfs_attr_list_context_t
*context
)
492 trace_xfs_attr_leaf_list(context
);
494 context
->cursor
->blkno
= 0;
495 error
= xfs_attr3_leaf_read(NULL
, context
->dp
, 0, -1, &bp
);
497 return XFS_ERROR(error
);
499 error
= xfs_attr3_leaf_list_int(bp
, context
);
500 xfs_trans_brelse(NULL
, bp
);
501 return XFS_ERROR(error
);
506 xfs_attr_list_context_t
*context
)
509 xfs_inode_t
*dp
= context
->dp
;
512 XFS_STATS_INC(xs_attr_list
);
514 if (XFS_FORCED_SHUTDOWN(dp
->i_mount
))
518 * Decide on what work routines to call based on the inode size.
520 lock_mode
= xfs_ilock_attr_map_shared(dp
);
521 if (!xfs_inode_hasattr(dp
)) {
523 } else if (dp
->i_d
.di_aformat
== XFS_DINODE_FMT_LOCAL
) {
524 error
= xfs_attr_shortform_list(context
);
525 } else if (xfs_bmap_one_block(dp
, XFS_ATTR_FORK
)) {
526 error
= xfs_attr_leaf_list(context
);
528 error
= xfs_attr_node_list(context
);
530 xfs_iunlock(dp
, lock_mode
);
534 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
535 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
536 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
537 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
538 & ~(sizeof(u_int32_t)-1))
541 * Format an attribute and copy it out to the user's buffer.
542 * Take care to check values and protect against them changing later,
543 * we may be reading them directly out of a user buffer.
546 xfs_attr_put_listent(
547 xfs_attr_list_context_t
*context
,
552 unsigned char *value
)
554 struct attrlist
*alist
= (struct attrlist
*)context
->alist
;
558 ASSERT(!(context
->flags
& ATTR_KERNOVAL
));
559 ASSERT(context
->count
>= 0);
560 ASSERT(context
->count
< (ATTR_MAX_VALUELEN
/8));
561 ASSERT(context
->firstu
>= sizeof(*alist
));
562 ASSERT(context
->firstu
<= context
->bufsize
);
565 * Only list entries in the right namespace.
567 if (((context
->flags
& ATTR_SECURE
) == 0) !=
568 ((flags
& XFS_ATTR_SECURE
) == 0))
570 if (((context
->flags
& ATTR_ROOT
) == 0) !=
571 ((flags
& XFS_ATTR_ROOT
) == 0))
574 arraytop
= sizeof(*alist
) +
575 context
->count
* sizeof(alist
->al_offset
[0]);
576 context
->firstu
-= ATTR_ENTSIZE(namelen
);
577 if (context
->firstu
< arraytop
) {
578 trace_xfs_attr_list_full(context
);
580 context
->seen_enough
= 1;
584 aep
= (attrlist_ent_t
*)&context
->alist
[context
->firstu
];
585 aep
->a_valuelen
= valuelen
;
586 memcpy(aep
->a_name
, name
, namelen
);
587 aep
->a_name
[namelen
] = 0;
588 alist
->al_offset
[context
->count
++] = context
->firstu
;
589 alist
->al_count
= context
->count
;
590 trace_xfs_attr_list_add(context
);
595 * Generate a list of extended attribute names and optionally
596 * also value lengths. Positive return value follows the XFS
597 * convention of being an error, zero or negative return code
598 * is the length of the buffer returned (negated), indicating
607 attrlist_cursor_kern_t
*cursor
)
609 xfs_attr_list_context_t context
;
610 struct attrlist
*alist
;
614 * Validate the cursor.
616 if (cursor
->pad1
|| cursor
->pad2
)
617 return(XFS_ERROR(EINVAL
));
618 if ((cursor
->initted
== 0) &&
619 (cursor
->hashval
|| cursor
->blkno
|| cursor
->offset
))
620 return XFS_ERROR(EINVAL
);
623 * Check for a properly aligned buffer.
625 if (((long)buffer
) & (sizeof(int)-1))
626 return XFS_ERROR(EFAULT
);
627 if (flags
& ATTR_KERNOVAL
)
631 * Initialize the output buffer.
633 memset(&context
, 0, sizeof(context
));
635 context
.cursor
= cursor
;
637 context
.flags
= flags
;
638 context
.alist
= buffer
;
639 context
.bufsize
= (bufsize
& ~(sizeof(int)-1)); /* align */
640 context
.firstu
= context
.bufsize
;
641 context
.put_listent
= xfs_attr_put_listent
;
643 alist
= (struct attrlist
*)context
.alist
;
646 alist
->al_offset
[0] = context
.bufsize
;
648 error
= xfs_attr_list_int(&context
);