2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_da_format.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_inode.h"
29 #include "xfs_trans.h"
30 #include "xfs_inode_item.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_attr_remote.h"
35 #include "xfs_attr_leaf.h"
36 #include "xfs_error.h"
37 #include "xfs_trace.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_cksum.h"
43 xfs_attr_shortform_compare(const void *a
, const void *b
)
45 xfs_attr_sf_sort_t
*sa
, *sb
;
47 sa
= (xfs_attr_sf_sort_t
*)a
;
48 sb
= (xfs_attr_sf_sort_t
*)b
;
49 if (sa
->hash
< sb
->hash
) {
51 } else if (sa
->hash
> sb
->hash
) {
54 return sa
->entno
- sb
->entno
;
58 #define XFS_ISRESET_CURSOR(cursor) \
59 (!((cursor)->initted) && !((cursor)->hashval) && \
60 !((cursor)->blkno) && !((cursor)->offset))
62 * Copy out entries of shortform attribute lists for attr_list().
63 * Shortform attribute lists are not stored in hashval sorted order.
64 * If the output buffer is not large enough to hold them all, then we
65 * we have to calculate each entries' hashvalue and sort them before
66 * we can begin returning them to the user.
69 xfs_attr_shortform_list(xfs_attr_list_context_t
*context
)
71 attrlist_cursor_kern_t
*cursor
;
72 xfs_attr_sf_sort_t
*sbuf
, *sbp
;
73 xfs_attr_shortform_t
*sf
;
74 xfs_attr_sf_entry_t
*sfe
;
76 int sbsize
, nsbuf
, count
, i
;
79 ASSERT(context
!= NULL
);
82 ASSERT(dp
->i_afp
!= NULL
);
83 sf
= (xfs_attr_shortform_t
*)dp
->i_afp
->if_u1
.if_data
;
87 cursor
= context
->cursor
;
88 ASSERT(cursor
!= NULL
);
90 trace_xfs_attr_list_sf(context
);
93 * If the buffer is large enough and the cursor is at the start,
94 * do not bother with sorting since we will return everything in
95 * one buffer and another call using the cursor won't need to be
97 * Note the generous fudge factor of 16 overhead bytes per entry.
98 * If bufsize is zero then put_listent must be a search function
99 * and can just scan through what we have.
101 if (context
->bufsize
== 0 ||
102 (XFS_ISRESET_CURSOR(cursor
) &&
103 (dp
->i_afp
->if_bytes
+ sf
->hdr
.count
* 16) < context
->bufsize
)) {
104 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
105 error
= context
->put_listent(context
,
110 &sfe
->nameval
[sfe
->namelen
]);
113 * Either search callback finished early or
114 * didn't fit it all in the buffer after all.
116 if (context
->seen_enough
)
121 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
123 trace_xfs_attr_list_sf_all(context
);
127 /* do no more for a search callback */
128 if (context
->bufsize
== 0)
132 * It didn't all fit, so we have to sort everything on hashval.
134 sbsize
= sf
->hdr
.count
* sizeof(*sbuf
);
135 sbp
= sbuf
= kmem_alloc(sbsize
, KM_SLEEP
| KM_NOFS
);
138 * Scan the attribute list for the rest of the entries, storing
139 * the relevant info from only those that match into a buffer.
142 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
144 ((char *)sfe
< (char *)sf
) ||
145 ((char *)sfe
>= ((char *)sf
+ dp
->i_afp
->if_bytes
)))) {
146 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
148 context
->dp
->i_mount
, sfe
);
150 return -EFSCORRUPTED
;
154 sbp
->hash
= xfs_da_hashname(sfe
->nameval
, sfe
->namelen
);
155 sbp
->name
= sfe
->nameval
;
156 sbp
->namelen
= sfe
->namelen
;
157 /* These are bytes, and both on-disk, don't endian-flip */
158 sbp
->valuelen
= sfe
->valuelen
;
159 sbp
->flags
= sfe
->flags
;
160 sfe
= XFS_ATTR_SF_NEXTENTRY(sfe
);
166 * Sort the entries on hash then entno.
168 xfs_sort(sbuf
, nsbuf
, sizeof(*sbuf
), xfs_attr_shortform_compare
);
171 * Re-find our place IN THE SORTED LIST.
176 for (sbp
= sbuf
, i
= 0; i
< nsbuf
; i
++, sbp
++) {
177 if (sbp
->hash
== cursor
->hashval
) {
178 if (cursor
->offset
== count
) {
182 } else if (sbp
->hash
> cursor
->hashval
) {
192 * Loop putting entries into the user buffer.
194 for ( ; i
< nsbuf
; i
++, sbp
++) {
195 if (cursor
->hashval
!= sbp
->hash
) {
196 cursor
->hashval
= sbp
->hash
;
199 error
= context
->put_listent(context
,
204 &sbp
->name
[sbp
->namelen
]);
209 if (context
->seen_enough
)
219 xfs_attr_node_list(xfs_attr_list_context_t
*context
)
221 attrlist_cursor_kern_t
*cursor
;
222 xfs_attr_leafblock_t
*leaf
;
223 xfs_da_intnode_t
*node
;
224 struct xfs_attr3_icleaf_hdr leafhdr
;
225 struct xfs_da3_icnode_hdr nodehdr
;
226 struct xfs_da_node_entry
*btree
;
229 struct xfs_inode
*dp
= context
->dp
;
230 struct xfs_mount
*mp
= dp
->i_mount
;
232 trace_xfs_attr_node_list(context
);
234 cursor
= context
->cursor
;
238 * Do all sorts of validation on the passed-in cursor structure.
239 * If anything is amiss, ignore the cursor and look up the hashval
240 * starting from the btree root.
243 if (cursor
->blkno
> 0) {
244 error
= xfs_da3_node_read(NULL
, dp
, cursor
->blkno
, -1,
246 if ((error
!= 0) && (error
!= -EFSCORRUPTED
))
249 struct xfs_attr_leaf_entry
*entries
;
252 switch (be16_to_cpu(node
->hdr
.info
.magic
)) {
253 case XFS_DA_NODE_MAGIC
:
254 case XFS_DA3_NODE_MAGIC
:
255 trace_xfs_attr_list_wrong_blk(context
);
256 xfs_trans_brelse(NULL
, bp
);
259 case XFS_ATTR_LEAF_MAGIC
:
260 case XFS_ATTR3_LEAF_MAGIC
:
262 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
,
264 entries
= xfs_attr3_leaf_entryp(leaf
);
265 if (cursor
->hashval
> be32_to_cpu(
266 entries
[leafhdr
.count
- 1].hashval
)) {
267 trace_xfs_attr_list_wrong_blk(context
);
268 xfs_trans_brelse(NULL
, bp
);
270 } else if (cursor
->hashval
<= be32_to_cpu(
271 entries
[0].hashval
)) {
272 trace_xfs_attr_list_wrong_blk(context
);
273 xfs_trans_brelse(NULL
, bp
);
278 trace_xfs_attr_list_wrong_blk(context
);
279 xfs_trans_brelse(NULL
, bp
);
286 * We did not find what we expected given the cursor's contents,
287 * so we start from the top and work down based on the hash value.
288 * Note that start of node block is same as start of leaf block.
295 error
= xfs_da3_node_read(NULL
, dp
,
296 cursor
->blkno
, -1, &bp
,
301 magic
= be16_to_cpu(node
->hdr
.info
.magic
);
302 if (magic
== XFS_ATTR_LEAF_MAGIC
||
303 magic
== XFS_ATTR3_LEAF_MAGIC
)
305 if (magic
!= XFS_DA_NODE_MAGIC
&&
306 magic
!= XFS_DA3_NODE_MAGIC
) {
307 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
309 context
->dp
->i_mount
,
311 xfs_trans_brelse(NULL
, bp
);
312 return -EFSCORRUPTED
;
315 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
316 btree
= dp
->d_ops
->node_tree_p(node
);
317 for (i
= 0; i
< nodehdr
.count
; btree
++, i
++) {
319 <= be32_to_cpu(btree
->hashval
)) {
320 cursor
->blkno
= be32_to_cpu(btree
->before
);
321 trace_xfs_attr_list_node_descend(context
,
326 if (i
== nodehdr
.count
) {
327 xfs_trans_brelse(NULL
, bp
);
330 xfs_trans_brelse(NULL
, bp
);
336 * Roll upward through the blocks, processing each leaf block in
337 * order. As long as there is space in the result buffer, keep
338 * adding the information.
342 error
= xfs_attr3_leaf_list_int(bp
, context
);
344 xfs_trans_brelse(NULL
, bp
);
347 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &leafhdr
, leaf
);
348 if (context
->seen_enough
|| leafhdr
.forw
== 0)
350 cursor
->blkno
= leafhdr
.forw
;
351 xfs_trans_brelse(NULL
, bp
);
352 error
= xfs_attr3_leaf_read(NULL
, dp
, cursor
->blkno
, -1, &bp
);
356 xfs_trans_brelse(NULL
, bp
);
361 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
364 xfs_attr3_leaf_list_int(
366 struct xfs_attr_list_context
*context
)
368 struct attrlist_cursor_kern
*cursor
;
369 struct xfs_attr_leafblock
*leaf
;
370 struct xfs_attr3_icleaf_hdr ichdr
;
371 struct xfs_attr_leaf_entry
*entries
;
372 struct xfs_attr_leaf_entry
*entry
;
375 struct xfs_mount
*mp
= context
->dp
->i_mount
;
377 trace_xfs_attr_list_leaf(context
);
380 xfs_attr3_leaf_hdr_from_disk(mp
->m_attr_geo
, &ichdr
, leaf
);
381 entries
= xfs_attr3_leaf_entryp(leaf
);
383 cursor
= context
->cursor
;
387 * Re-find our place in the leaf block if this is a new syscall.
389 if (context
->resynch
) {
391 for (i
= 0; i
< ichdr
.count
; entry
++, i
++) {
392 if (be32_to_cpu(entry
->hashval
) == cursor
->hashval
) {
393 if (cursor
->offset
== context
->dupcnt
) {
398 } else if (be32_to_cpu(entry
->hashval
) >
404 if (i
== ichdr
.count
) {
405 trace_xfs_attr_list_notfound(context
);
412 context
->resynch
= 0;
415 * We have found our place, start copying out the new attributes.
418 for (; i
< ichdr
.count
; entry
++, i
++) {
419 if (be32_to_cpu(entry
->hashval
) != cursor
->hashval
) {
420 cursor
->hashval
= be32_to_cpu(entry
->hashval
);
424 if (entry
->flags
& XFS_ATTR_INCOMPLETE
)
425 continue; /* skip incomplete entries */
427 if (entry
->flags
& XFS_ATTR_LOCAL
) {
428 xfs_attr_leaf_name_local_t
*name_loc
=
429 xfs_attr3_leaf_name_local(leaf
, i
);
431 retval
= context
->put_listent(context
,
434 (int)name_loc
->namelen
,
435 be16_to_cpu(name_loc
->valuelen
),
436 &name_loc
->nameval
[name_loc
->namelen
]);
440 xfs_attr_leaf_name_remote_t
*name_rmt
=
441 xfs_attr3_leaf_name_remote(leaf
, i
);
443 int valuelen
= be32_to_cpu(name_rmt
->valuelen
);
445 if (context
->put_value
) {
448 memset((char *)&args
, 0, sizeof(args
));
449 args
.geo
= context
->dp
->i_mount
->m_attr_geo
;
450 args
.dp
= context
->dp
;
451 args
.whichfork
= XFS_ATTR_FORK
;
452 args
.valuelen
= valuelen
;
453 args
.rmtvaluelen
= valuelen
;
454 args
.value
= kmem_alloc(valuelen
, KM_SLEEP
| KM_NOFS
);
455 args
.rmtblkno
= be32_to_cpu(name_rmt
->valueblk
);
456 args
.rmtblkcnt
= xfs_attr3_rmt_blocks(
457 args
.dp
->i_mount
, valuelen
);
458 retval
= xfs_attr_rmtval_get(&args
);
460 retval
= context
->put_listent(context
,
463 (int)name_rmt
->namelen
,
466 kmem_free(args
.value
);
468 retval
= context
->put_listent(context
,
471 (int)name_rmt
->namelen
,
478 if (context
->seen_enough
)
482 trace_xfs_attr_list_leaf_end(context
);
487 * Copy out attribute entries for attr_list(), for leaf attribute lists.
490 xfs_attr_leaf_list(xfs_attr_list_context_t
*context
)
495 trace_xfs_attr_leaf_list(context
);
497 context
->cursor
->blkno
= 0;
498 error
= xfs_attr3_leaf_read(NULL
, context
->dp
, 0, -1, &bp
);
502 error
= xfs_attr3_leaf_list_int(bp
, context
);
503 xfs_trans_brelse(NULL
, bp
);
509 xfs_attr_list_context_t
*context
)
512 xfs_inode_t
*dp
= context
->dp
;
515 XFS_STATS_INC(dp
->i_mount
, xs_attr_list
);
517 if (XFS_FORCED_SHUTDOWN(dp
->i_mount
))
521 * Decide on what work routines to call based on the inode size.
523 lock_mode
= xfs_ilock_attr_map_shared(dp
);
524 if (!xfs_inode_hasattr(dp
)) {
526 } else if (dp
->i_d
.di_aformat
== XFS_DINODE_FMT_LOCAL
) {
527 error
= xfs_attr_shortform_list(context
);
528 } else if (xfs_bmap_one_block(dp
, XFS_ATTR_FORK
)) {
529 error
= xfs_attr_leaf_list(context
);
531 error
= xfs_attr_node_list(context
);
533 xfs_iunlock(dp
, lock_mode
);
537 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
538 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
539 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
540 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
541 & ~(sizeof(u_int32_t)-1))
544 * Format an attribute and copy it out to the user's buffer.
545 * Take care to check values and protect against them changing later,
546 * we may be reading them directly out of a user buffer.
549 xfs_attr_put_listent(
550 xfs_attr_list_context_t
*context
,
555 unsigned char *value
)
557 struct attrlist
*alist
= (struct attrlist
*)context
->alist
;
561 ASSERT(!(context
->flags
& ATTR_KERNOVAL
));
562 ASSERT(context
->count
>= 0);
563 ASSERT(context
->count
< (ATTR_MAX_VALUELEN
/8));
564 ASSERT(context
->firstu
>= sizeof(*alist
));
565 ASSERT(context
->firstu
<= context
->bufsize
);
568 * Only list entries in the right namespace.
570 if (((context
->flags
& ATTR_SECURE
) == 0) !=
571 ((flags
& XFS_ATTR_SECURE
) == 0))
573 if (((context
->flags
& ATTR_ROOT
) == 0) !=
574 ((flags
& XFS_ATTR_ROOT
) == 0))
577 arraytop
= sizeof(*alist
) +
578 context
->count
* sizeof(alist
->al_offset
[0]);
579 context
->firstu
-= ATTR_ENTSIZE(namelen
);
580 if (context
->firstu
< arraytop
) {
581 trace_xfs_attr_list_full(context
);
583 context
->seen_enough
= 1;
587 aep
= (attrlist_ent_t
*)&context
->alist
[context
->firstu
];
588 aep
->a_valuelen
= valuelen
;
589 memcpy(aep
->a_name
, name
, namelen
);
590 aep
->a_name
[namelen
] = 0;
591 alist
->al_offset
[context
->count
++] = context
->firstu
;
592 alist
->al_count
= context
->count
;
593 trace_xfs_attr_list_add(context
);
598 * Generate a list of extended attribute names and optionally
599 * also value lengths. Positive return value follows the XFS
600 * convention of being an error, zero or negative return code
601 * is the length of the buffer returned (negated), indicating
610 attrlist_cursor_kern_t
*cursor
)
612 xfs_attr_list_context_t context
;
613 struct attrlist
*alist
;
617 * Validate the cursor.
619 if (cursor
->pad1
|| cursor
->pad2
)
621 if ((cursor
->initted
== 0) &&
622 (cursor
->hashval
|| cursor
->blkno
|| cursor
->offset
))
626 * Check for a properly aligned buffer.
628 if (((long)buffer
) & (sizeof(int)-1))
630 if (flags
& ATTR_KERNOVAL
)
634 * Initialize the output buffer.
636 memset(&context
, 0, sizeof(context
));
638 context
.cursor
= cursor
;
640 context
.flags
= flags
;
641 context
.alist
= buffer
;
642 context
.bufsize
= (bufsize
& ~(sizeof(int)-1)); /* align */
643 context
.firstu
= context
.bufsize
;
644 context
.put_listent
= xfs_attr_put_listent
;
646 alist
= (struct attrlist
*)context
.alist
;
649 alist
->al_offset
[0] = context
.bufsize
;
651 error
= xfs_attr_list_int(&context
);