4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/systeminfo.h>
32 #include <exacct_impl.h>
33 #include <sys/exacct_impl.h>
43 #define EXACCT_HDR_STR "exacct"
44 #define EXACCT_HDR_LEN 7
46 #define DEFAULT_ENTRIES 4
47 #define SYSINFO_BUFSIZE 256
49 static thread_key_t errkey
= THR_ONCE_KEY
;
50 static int exacct_errval
= 0;
53 * extended accounting file access routines
55 * exacct_ops.c implements the library-specific routines of libexacct: the
56 * operations associated with file access and record traversal. (The
57 * complementary routines which permit hierarchy building and record packing
58 * are provided in exacct_core.c, which is used by both libexacct and the
59 * kernel.) At its heart are the unpack, get, and next routines, which
60 * navigate the packed records produced by ea_pack_object.
64 * Group stack manipulation code. As groups can be nested, we need a mechanism
65 * for saving and restoring the current position within the outer groups. This
66 * state stack is stored within the ea_file_impl_t structure, in the ef_depth,
67 * ef_ndeep and ef_mxdeep members. On error all these functions set
68 * exacct_error and return -1.
72 * If the stack is NULL, create and initialise it.
73 * If is is not NULL, check it still has space - if not, double its size.
75 static int stack_check(ea_file_impl_t
*f
)
77 if (f
->ef_depth
== NULL
) {
79 ea_alloc(DEFAULT_ENTRIES
* sizeof (ea_file_depth_t
)))
81 /* exacct_errno set above. */
84 bzero(f
->ef_depth
, DEFAULT_ENTRIES
* sizeof (ea_file_depth_t
));
85 f
->ef_mxdeep
= DEFAULT_ENTRIES
;
87 } else if (f
->ef_ndeep
+ 1 >= f
->ef_mxdeep
) {
88 ea_file_depth_t
*newstack
;
91 ea_alloc(f
->ef_mxdeep
* 2 * sizeof (ea_file_depth_t
)))
93 /* exacct_errno set above. */
96 bcopy(f
->ef_depth
, newstack
,
97 f
->ef_mxdeep
* sizeof (ea_file_depth_t
));
98 bzero(newstack
+ f
->ef_mxdeep
,
99 f
->ef_mxdeep
* sizeof (ea_file_depth_t
));
100 ea_free(f
->ef_depth
, f
->ef_mxdeep
* sizeof (ea_file_depth_t
));
102 f
->ef_depth
= newstack
;
110 static void stack_free(ea_file_impl_t
*f
)
112 if (f
->ef_depth
!= NULL
) {
113 ea_free(f
->ef_depth
, f
->ef_mxdeep
* sizeof (ea_file_depth_t
));
121 * Add a new group onto the stack, pushing down one frame. nobj is the number
122 * of items in the group. We have to read this many objects before popping
123 * back up to an enclosing group - see next_object() and previous_object()
126 static int stack_new_group(ea_file_impl_t
*f
, int nobjs
)
128 if (stack_check(f
) != 0) {
130 /* exacct_errno set above. */
134 f
->ef_depth
[f
->ef_ndeep
].efd_obj
= 0;
135 f
->ef_depth
[f
->ef_ndeep
].efd_nobjs
= nobjs
;
140 * Step forwards along the objects within the current group. If we are still
141 * within a group, return 1. If we have reached the end of the current group,
142 * unwind the stack back up to the nearest enclosing group that still has
143 * unprocessed objects and return 0. On EOF or error, set exacct_error
144 * accordingly and return -1. xread() is required so that this function can
145 * work either on files or memory buffers.
150 size_t (*xread
)(ea_file_impl_t
*, void *, size_t))
155 * If the stack is empty we are not in a group, so there will be no
156 * stack manipulation to do and no large backskips to step over.
158 if (f
->ef_ndeep
< 0) {
163 * Otherwise we must be in a group. If there are objects left in the
164 * group, move onto the next one in the group and return.
166 if (++f
->ef_depth
[f
->ef_ndeep
].efd_obj
<
167 f
->ef_depth
[f
->ef_ndeep
].efd_nobjs
) {
171 * If we are at the end of a group we need to move backwards up the
172 * stack, consuming the large backskips as we go, until we find a group
173 * that still contains unprocessed items, or until we have unwound back
174 * off the bottom of the stack (i.e. out of all the groups).
177 while (f
->ef_ndeep
>= 0 &&
178 ++f
->ef_depth
[f
->ef_ndeep
].efd_obj
>=
179 f
->ef_depth
[f
->ef_ndeep
].efd_nobjs
) {
180 /* Read the large backskip. */
182 if (xread(f
, &scratch32
, sizeof (scratch32
)) !=
183 sizeof (scratch32
)) {
184 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
193 * Step backwards along the objects within the current group. If we are still
194 * within a group, return 1. If we have reached the end of the current group,
195 * unwind the stack back up to the enclosing group and return 0.
197 static int stack_previous_object(ea_file_impl_t
*f
)
200 * If the stack is empty we are not in a group, so there will be no
201 * stack manipulation to do.
203 if (f
->ef_ndeep
< 0) {
208 * Otherwise we must be in a group. If there are objects left in the
209 * group, move onto the previous one in the group and return.
211 if (--f
->ef_depth
[f
->ef_ndeep
].efd_obj
>= 0) {
214 /* Otherwise, step one level back up the group stack. */
222 * read/seek/pos virtualisation wrappers. Because objects can come either from
223 * a file or memory, the read/seek/pos functions need to be wrapped to allow
224 * them to be used on either a file handle or a memory buffer.
228 fread_wrapper(ea_file_impl_t
*f
, void *buf
, size_t sz
)
232 retval
= fread(buf
, 1, sz
, f
->ef_fp
);
233 if (retval
== 0 && ferror(f
->ef_fp
)) {
240 bufread_wrapper(ea_file_impl_t
*f
, void *buf
, size_t sz
)
242 if (f
->ef_bufsize
== 0 && sz
!= 0)
245 if (f
->ef_bufsize
< sz
)
248 bcopy(f
->ef_buf
, buf
, sz
);
256 fseek_wrapper(ea_file_impl_t
*f
, off_t adv
)
258 return (fseeko(f
->ef_fp
, adv
, SEEK_CUR
));
262 bufseek_wrapper(ea_file_impl_t
*f
, off_t adv
)
264 if (f
->ef_bufsize
== 0 && adv
!= 0)
267 if (f
->ef_bufsize
< adv
)
271 f
->ef_bufsize
-= adv
;
278 fpos_wrapper(ea_file_impl_t
*f
)
284 bufpos_wrapper(ea_file_impl_t
*f
)
294 exacct_seterr(int errval
)
297 exacct_errval
= errval
;
300 (void) thr_keycreate_once(&errkey
, 0);
301 (void) thr_setspecific(errkey
, (void *)(intptr_t)errval
);
308 return (exacct_errval
);
309 if (errkey
== THR_ONCE_KEY
)
311 return ((int)(uintptr_t)pthread_getspecific(errkey
));
315 * ea_next_object(), ea_previous_object(), and ea_get_object() are written such
316 * that the file cursor is always located on an object boundary.
319 ea_next_object(ea_file_t
*ef
, ea_object_t
*obj
)
321 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
327 * If ef_advance is zero, then we are executing after a get or previous
328 * operation and do not move to the next or previous object. Otherwise,
329 * advance to the next available item. Note that ef_advance does NOT
330 * include the large backskip at the end of a object, this being dealt
331 * with by the depth stack handling in stack_next_object.
333 if (f
->ef_advance
!= 0) {
334 if (fseeko(f
->ef_fp
, (off_t
)f
->ef_advance
, SEEK_CUR
) == -1) {
335 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
338 if (stack_next_object(f
, fread_wrapper
) == -1) {
339 /* exacct_error set above. */
345 /* Read the catalog tag */
346 ret
= fread(&obj
->eo_catalog
, 1, sizeof (ea_catalog_t
), f
->ef_fp
);
348 EXACCT_SET_ERR(EXR_EOF
);
350 } else if (ret
< sizeof (ea_catalog_t
)) {
351 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
354 exacct_order32(&obj
->eo_catalog
);
356 backup
= sizeof (ea_catalog_t
);
357 obj
->eo_type
= EO_ITEM
;
359 /* Figure out the offset to just before the large backskip. */
360 switch (obj
->eo_catalog
& EXT_TYPE_MASK
) {
362 obj
->eo_type
= EO_GROUP
;
363 f
->ef_advance
= sizeof (uint32_t);
366 case EXT_EXACCT_OBJECT
:
368 if (fread(&len
, 1, sizeof (ea_size_t
), f
->ef_fp
)
369 < sizeof (ea_size_t
)) {
370 obj
->eo_type
= EO_NONE
;
371 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
374 exacct_order64(&len
);
375 /* Note: len already includes the size of the backskip. */
376 f
->ef_advance
+= sizeof (ea_catalog_t
) +
377 sizeof (ea_size_t
) + len
;
378 backup
+= sizeof (ea_size_t
);
381 f
->ef_advance
= sizeof (ea_catalog_t
) + sizeof (uint8_t) +
385 f
->ef_advance
= sizeof (ea_catalog_t
) + sizeof (uint16_t) +
389 f
->ef_advance
= sizeof (ea_catalog_t
) + sizeof (uint32_t) +
393 f
->ef_advance
= sizeof (ea_catalog_t
) + sizeof (uint64_t) +
397 f
->ef_advance
= sizeof (ea_catalog_t
) + sizeof (double) +
401 obj
->eo_type
= EO_NONE
;
402 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
406 /* Reposition to the start of this object. */
407 if (fseeko(f
->ef_fp
, -backup
, SEEK_CUR
) == -1) {
408 obj
->eo_type
= EO_NONE
;
410 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
414 EXACCT_SET_ERR(EXR_OK
);
415 return (obj
->eo_type
);
419 ea_previous_object(ea_file_t
*ef
, ea_object_t
*obj
)
421 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
425 if (fseeko(f
->ef_fp
, -((off_t
)sizeof (uint32_t)), SEEK_CUR
) == -1) {
426 if (errno
== EINVAL
) {
427 EXACCT_SET_ERR(EXR_EOF
);
429 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
434 if ((r
= fread(&bkskip
, 1, sizeof (uint32_t), f
->ef_fp
)) !=
437 EXACCT_SET_ERR(EXR_EOF
);
439 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
443 exacct_order32(&bkskip
);
446 * A backskip of 0 means that the current record can't be skipped over.
447 * This will be true for the header record, and for records longer than
451 EXACCT_SET_ERR(EXR_EOF
);
454 (void) stack_previous_object(f
);
456 if (fseeko(f
->ef_fp
, -((off_t
)bkskip
), SEEK_CUR
) == -1) {
457 if (errno
== EINVAL
) {
459 * If we attempted to seek past BOF, then the file was
460 * corrupt, as we can only trust the backskip we read.
462 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
464 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
470 return (ea_next_object(ef
, obj
));
474 * xget_object() contains the logic for extracting an individual object from a
475 * packed buffer, which it consumes using xread() and xseek() operations
476 * provided by the caller. flags may be set to either EUP_ALLOC, in which case
477 * new memory is allocated for the variable length items unpacked, or
478 * EUP_NOALLOC, in which case item data pointer indicate locations within the
479 * buffer, using the provided xpos() function. EUP_NOALLOC is generally not
480 * useful for callers representing interaction with actual file streams, and
481 * should not be specified thereby.
483 static ea_object_type_t
487 size_t (*xread
)(ea_file_impl_t
*, void *, size_t),
488 off_t (*xseek
)(ea_file_impl_t
*, off_t
),
489 void *(*xpos
)(ea_file_impl_t
*),
493 uint32_t gp_backskip
, scratch32
;
497 /* Read the catalog tag. */
498 if ((r
= xread(f
, &obj
->eo_catalog
, sizeof (ea_catalog_t
))) == 0) {
499 EXACCT_SET_ERR(EXR_EOF
);
501 } else if (r
!= sizeof (ea_catalog_t
)) {
502 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
505 exacct_order32(&obj
->eo_catalog
);
508 * If this is a record group, we treat it separately: only record
509 * groups cause us to allocate new depth frames.
511 if ((obj
->eo_catalog
& EXT_TYPE_MASK
) == EXT_GROUP
) {
512 obj
->eo_type
= EO_GROUP
;
514 /* Read size field, and number of objects. */
515 if (xread(f
, &sz
, sizeof (ea_size_t
)) != sizeof (ea_size_t
)) {
516 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
520 if (xread(f
, &obj
->eo_group
.eg_nobjs
, sizeof (uint32_t)) !=
522 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
525 exacct_order32(&obj
->eo_group
.eg_nobjs
);
527 /* Now read the group's small backskip. */
528 if (xread(f
, &gp_backskip
, sizeof (uint32_t)) !=
530 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
534 /* Push a new depth stack frame. */
535 if (stack_new_group(f
, obj
->eo_group
.eg_nobjs
) != 0) {
536 /* exacct_error set above */
541 * If the group has no items, we now need to position to the
542 * end of the group, because there will be no subsequent calls
543 * to process the group, it being empty.
545 if (obj
->eo_group
.eg_nobjs
== 0) {
546 if (stack_next_object(f
, xread
) == -1) {
547 /* exacct_error set above. */
553 EXACCT_SET_ERR(EXR_OK
);
554 return (obj
->eo_type
);
558 * Otherwise we are reading an item.
560 obj
->eo_type
= EO_ITEM
;
561 switch (obj
->eo_catalog
& EXT_TYPE_MASK
) {
563 case EXT_EXACCT_OBJECT
:
565 if (xread(f
, &sz
, sizeof (ea_size_t
)) != sizeof (ea_size_t
)) {
566 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
571 * Subtract backskip value from size.
573 sz
-= sizeof (uint32_t);
574 if ((flags
& EUP_ALLOC_MASK
) == EUP_NOALLOC
) {
576 if (xseek(f
, sz
) == -1) {
577 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
581 if ((buf
= ea_alloc(sz
)) == NULL
)
582 /* exacct_error set above. */
584 if (xread(f
, buf
, sz
) != sz
) {
586 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
590 obj
->eo_item
.ei_string
= buf
;
592 * Maintain our consistent convention that string lengths
593 * include the terminating NULL character.
595 obj
->eo_item
.ei_size
= sz
;
598 if (xread(f
, &obj
->eo_item
.ei_uint8
, sizeof (uint8_t)) !=
600 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
603 obj
->eo_item
.ei_size
= sizeof (uint8_t);
606 if (xread(f
, &obj
->eo_item
.ei_uint16
, sizeof (uint16_t)) !=
608 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
611 exacct_order16(&obj
->eo_item
.ei_uint16
);
612 obj
->eo_item
.ei_size
= sizeof (uint16_t);
615 if (xread(f
, &obj
->eo_item
.ei_uint32
, sizeof (uint32_t)) !=
617 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
620 exacct_order32(&obj
->eo_item
.ei_uint32
);
621 obj
->eo_item
.ei_size
= sizeof (uint32_t);
624 if (xread(f
, &obj
->eo_item
.ei_uint64
, sizeof (uint64_t)) !=
626 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
629 exacct_order64(&obj
->eo_item
.ei_uint64
);
630 obj
->eo_item
.ei_size
= sizeof (uint64_t);
633 if (xread(f
, &obj
->eo_item
.ei_double
, sizeof (double)) !=
635 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
638 exacct_order64((uint64_t *)&obj
->eo_item
.ei_double
);
639 obj
->eo_item
.ei_size
= sizeof (double);
643 * We've encountered an unknown type value. Flag the error and
646 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
651 * Advance over current large backskip value,
652 * and position at the start of the next object.
654 if (xread(f
, &scratch32
, sizeof (scratch32
)) != sizeof (scratch32
)) {
655 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
658 if (stack_next_object(f
, xread
) == -1) {
659 /* exacct_error set above. */
664 EXACCT_SET_ERR(EXR_OK
);
665 return (obj
->eo_type
);
669 ea_get_object(ea_file_t
*ef
, ea_object_t
*obj
)
672 return (xget_object((ea_file_impl_t
*)ef
, obj
, fread_wrapper
,
673 fseek_wrapper
, fpos_wrapper
, EUP_ALLOC
));
677 * unpack_group() recursively unpacks record groups from the buffer tucked
678 * within the passed ea_file, and attaches them to grp.
681 unpack_group(ea_file_impl_t
*f
, ea_object_t
*grp
, int flag
)
684 uint_t nobjs
= grp
->eo_group
.eg_nobjs
;
688 * Set the group's object count to zero, as we will rebuild it via the
689 * individual object attachments.
691 grp
->eo_group
.eg_nobjs
= 0;
692 grp
->eo_group
.eg_objs
= NULL
;
694 for (i
= 0; i
< nobjs
; i
++) {
695 if ((obj
= ea_alloc(sizeof (ea_object_t
))) == NULL
) {
696 /* exacct_errno set above. */
700 if (xget_object(f
, obj
, bufread_wrapper
, bufseek_wrapper
,
701 bufpos_wrapper
, flag
) == -1) {
702 ea_free(obj
, sizeof (ea_object_t
));
703 /* exacct_errno set above. */
707 (void) ea_attach_to_group(grp
, obj
);
709 if (obj
->eo_type
== EO_GROUP
&&
710 unpack_group(f
, obj
, flag
) == -1) {
711 /* exacct_errno set above. */
716 if (nobjs
!= grp
->eo_group
.eg_nobjs
) {
717 EXACCT_SET_ERR(EXR_CORRUPT_FILE
);
720 EXACCT_SET_ERR(EXR_OK
);
725 * ea_unpack_object() can be considered as a finite series of get operations on
726 * a given buffer, that rebuilds the hierarchy of objects compacted by a pack
727 * operation. Because there is complex state associated with the group depth,
728 * ea_unpack_object() must complete as one operation on a given buffer.
731 ea_unpack_object(ea_object_t
**objp
, int flag
, void *buf
, size_t bufsize
)
735 ea_object_type_t first_obj_type
;
739 EXACCT_SET_ERR(EXR_INVALID_BUF
);
743 /* Set up the structures needed for unpacking */
744 bzero(&fake
, sizeof (ea_file_impl_t
));
745 if (stack_check(&fake
) == -1) {
746 /* exacct_errno set above. */
750 fake
.ef_bufsize
= bufsize
;
752 /* Unpack the first object in the buffer - this should succeed. */
753 if ((obj
= ea_alloc(sizeof (ea_object_t
))) == NULL
) {
755 /* exacct_errno set above. */
759 if ((first_obj_type
= xget_object(&fake
, obj
, bufread_wrapper
,
760 bufseek_wrapper
, bufpos_wrapper
, flag
)) == -1) {
762 ea_free(obj
, sizeof (ea_object_t
));
763 /* exacct_errno set above. */
767 if (obj
->eo_type
== EO_GROUP
&& unpack_group(&fake
, obj
, flag
) == -1) {
769 ea_free_object(obj
, flag
);
770 /* exacct_errno set above. */
776 * There may be other objects in the buffer - if so, chain them onto
777 * the end of the list. We have reached the end of the list when
778 * xget_object() returns -1 with exacct_error set to EXR_EOF.
781 if ((obj
= ea_alloc(sizeof (ea_object_t
))) == NULL
) {
783 ea_free_object(*objp
, flag
);
785 /* exacct_errno set above. */
789 if (xget_object(&fake
, obj
, bufread_wrapper
, bufseek_wrapper
,
790 bufpos_wrapper
, flag
) == -1) {
792 ea_free(obj
, sizeof (ea_object_t
));
793 if (ea_error() == EXR_EOF
) {
794 EXACCT_SET_ERR(EXR_OK
);
795 return (first_obj_type
);
797 ea_free_object(*objp
, flag
);
799 /* exacct_error set above. */
804 (void) ea_attach_to_object(*objp
, obj
);
806 if (obj
->eo_type
== EO_GROUP
&&
807 unpack_group(&fake
, obj
, flag
) == -1) {
809 ea_free(obj
, sizeof (ea_object_t
));
810 ea_free_object(*objp
, flag
);
812 /* exacct_errno set above. */
819 ea_write_object(ea_file_t
*ef
, ea_object_t
*obj
)
823 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
826 * If we weren't opened for writing, this call fails.
828 if ((f
->ef_oflags
& O_RDWR
) == 0 &&
829 (f
->ef_oflags
& O_WRONLY
) == 0) {
830 EXACCT_SET_ERR(EXR_NOTSUPP
);
834 /* Pack with a null buffer to get the size. */
835 sz
= ea_pack_object(obj
, NULL
, 0);
836 if (sz
== -1 || (buf
= ea_alloc(sz
)) == NULL
) {
837 /* exacct_error set above. */
840 if (ea_pack_object(obj
, buf
, sz
) == (size_t)-1) {
842 /* exacct_error set above. */
845 if (fwrite(buf
, sizeof (char), sz
, f
->ef_fp
) != sz
) {
847 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
851 EXACCT_SET_ERR(EXR_OK
);
856 * validate_header() must be kept in sync with write_header(), given below, and
857 * exacct_create_header(), in kernel/os/exacct.c.
860 validate_header(ea_file_t
*ef
, const char *creator
)
863 ea_object_t scratch_obj
;
868 int saw_hostname
= 0;
870 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
872 bzero(&hdr_grp
, sizeof (ea_object_t
));
874 if (ea_get_object(ef
, &hdr_grp
) != EO_GROUP
) {
879 if (hdr_grp
.eo_catalog
!=
880 (EXT_GROUP
| EXC_DEFAULT
| EXD_GROUP_HEADER
)) {
881 error
= EXR_CORRUPT_FILE
;
885 for (n
= 0; n
< hdr_grp
.eo_group
.eg_nobjs
; n
++) {
886 bzero(&scratch_obj
, sizeof (ea_object_t
));
887 if (ea_get_object(ef
, &scratch_obj
) == -1) {
892 switch (scratch_obj
.eo_catalog
) {
893 case EXT_UINT32
| EXC_DEFAULT
| EXD_VERSION
:
894 if (scratch_obj
.eo_item
.ei_uint32
!= EXACCT_VERSION
) {
895 error
= EXR_UNKN_VERSION
;
900 case EXT_STRING
| EXC_DEFAULT
| EXD_FILETYPE
:
901 if (strcmp(scratch_obj
.eo_item
.ei_string
,
902 EXACCT_HDR_STR
) != 0) {
903 error
= EXR_CORRUPT_FILE
;
908 case EXT_STRING
| EXC_DEFAULT
| EXD_CREATOR
:
910 ea_strdup(scratch_obj
.eo_item
.ei_string
);
911 if (f
->ef_creator
== NULL
) {
917 /* The hostname is an optional field. */
918 case EXT_STRING
| EXC_DEFAULT
| EXD_HOSTNAME
:
920 ea_strdup(scratch_obj
.eo_item
.ei_string
);
921 if (f
->ef_hostname
== NULL
) {
928 /* ignore unrecognized header members */
931 (void) ea_free_item(&scratch_obj
, EUP_ALLOC
);
934 if (saw_version
&& saw_type
&& saw_creator
) {
935 if (creator
&& strcmp(f
->ef_creator
, creator
) != 0) {
936 error
= EXR_NO_CREATOR
;
939 EXACCT_SET_ERR(EXR_OK
);
944 (void) ea_free_item(&scratch_obj
, EUP_ALLOC
);
946 ea_strfree(f
->ef_hostname
);
948 ea_strfree(f
->ef_creator
);
949 EXACCT_SET_ERR(error
);
954 write_header(ea_file_t
*ef
)
957 ea_object_t vers_obj
;
958 ea_object_t creator_obj
;
959 ea_object_t filetype_obj
;
960 ea_object_t hostname_obj
;
962 const uint32_t version
= EXACCT_VERSION
;
963 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
966 char hostbuf
[SYSINFO_BUFSIZE
];
969 bzero(&hdr_grp
, sizeof (ea_object_t
));
970 bzero(&vers_obj
, sizeof (ea_object_t
));
971 bzero(&creator_obj
, sizeof (ea_object_t
));
972 bzero(&filetype_obj
, sizeof (ea_object_t
));
973 bzero(&hostname_obj
, sizeof (ea_object_t
));
974 bzero(hostbuf
, SYSINFO_BUFSIZE
);
976 (void) sysinfo(SI_HOSTNAME
, hostbuf
, SYSINFO_BUFSIZE
);
978 if (ea_set_item(&vers_obj
, EXT_UINT32
| EXC_DEFAULT
| EXD_VERSION
,
979 (void *)&version
, 0) == -1 ||
980 ea_set_item(&creator_obj
, EXT_STRING
| EXC_DEFAULT
| EXD_CREATOR
,
981 f
->ef_creator
, strlen(f
->ef_creator
)) == -1 ||
982 ea_set_item(&filetype_obj
, EXT_STRING
| EXC_DEFAULT
| EXD_FILETYPE
,
983 EXACCT_HDR_STR
, strlen(EXACCT_HDR_STR
)) == -1 ||
984 ea_set_item(&hostname_obj
, EXT_STRING
| EXC_DEFAULT
| EXD_HOSTNAME
,
985 hostbuf
, strlen(hostbuf
)) == -1) {
990 (void) ea_set_group(&hdr_grp
,
991 EXT_GROUP
| EXC_DEFAULT
| EXD_GROUP_HEADER
);
992 (void) ea_attach_to_group(&hdr_grp
, &vers_obj
);
993 (void) ea_attach_to_group(&hdr_grp
, &creator_obj
);
994 (void) ea_attach_to_group(&hdr_grp
, &filetype_obj
);
995 (void) ea_attach_to_group(&hdr_grp
, &hostname_obj
);
997 /* Get the required size by passing a null buffer. */
998 bufsize
= ea_pack_object(&hdr_grp
, NULL
, 0);
999 if ((buf
= ea_alloc(bufsize
)) == NULL
) {
1004 if (ea_pack_object(&hdr_grp
, buf
, bufsize
) == (size_t)-1) {
1010 * To prevent reading the header when reading the file backwards,
1011 * set the large backskip of the header group to 0 (last 4 bytes).
1014 exacct_order32(&bskip
);
1015 bcopy(&bskip
, (char *)buf
+ bufsize
- sizeof (bskip
),
1018 if (fwrite(buf
, sizeof (char), bufsize
, f
->ef_fp
) != bufsize
||
1019 fflush(f
->ef_fp
) == EOF
) {
1020 error
= EXR_SYSCALL_FAIL
;
1025 ea_free(buf
, bufsize
);
1027 (void) ea_free_item(&vers_obj
, EUP_ALLOC
);
1028 (void) ea_free_item(&creator_obj
, EUP_ALLOC
);
1029 (void) ea_free_item(&filetype_obj
, EUP_ALLOC
);
1030 (void) ea_free_item(&hostname_obj
, EUP_ALLOC
);
1031 EXACCT_SET_ERR(error
);
1032 return (error
== EXR_OK
? 0 : -1);
1036 ea_get_creator(ea_file_t
*ef
)
1038 return ((const char *)((ea_file_impl_t
*)ef
)->ef_creator
);
1042 ea_get_hostname(ea_file_t
*ef
)
1044 return ((const char *)((ea_file_impl_t
*)ef
)->ef_hostname
);
1048 ea_fdopen(ea_file_t
*ef
, int fd
, const char *creator
, int aflags
, int oflags
)
1050 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
1052 bzero(f
, sizeof (*f
));
1053 f
->ef_oflags
= oflags
;
1056 /* Initialize depth stack. */
1057 if (stack_check(f
) == -1) {
1058 /* exacct_error set above. */
1063 * 1. If we are O_CREAT, then we will need to write a header
1064 * after opening name.
1066 if (oflags
& O_CREAT
) {
1067 if (creator
== NULL
) {
1068 EXACCT_SET_ERR(EXR_NO_CREATOR
);
1071 if ((f
->ef_creator
= ea_strdup(creator
)) == NULL
) {
1072 /* exacct_error set above. */
1075 if ((f
->ef_fp
= fdopen(f
->ef_fd
, "w")) == NULL
) {
1076 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1079 if (write_header(ef
) == -1) {
1080 /* exacct_error set above. */
1085 * 2. If we are not O_CREAT, but are RDWR or WRONLY, we need to
1086 * seek to EOF so that appends will succeed.
1088 } else if (oflags
& O_RDWR
|| oflags
& O_WRONLY
) {
1089 if ((f
->ef_fp
= fdopen(f
->ef_fd
, "r+")) == NULL
) {
1090 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1094 if ((aflags
& EO_VALIDATE_MSK
) == EO_VALID_HDR
) {
1095 if (validate_header(ef
, creator
) < 0) {
1096 /* exacct_error set above. */
1101 if (fseeko(f
->ef_fp
, 0, SEEK_END
) == -1) {
1102 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1107 * 3. This is an undefined manner for opening an exacct file.
1109 } else if (oflags
!= O_RDONLY
) {
1110 EXACCT_SET_ERR(EXR_NOTSUPP
);
1114 * 4a. If we are RDONLY, then we are in a position such that
1115 * either a ea_get_object or an ea_next_object will succeed. If
1116 * aflags was set to EO_TAIL, seek to the end of the file.
1119 if ((f
->ef_fp
= fdopen(f
->ef_fd
, "r")) == NULL
) {
1120 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1124 if ((aflags
& EO_VALIDATE_MSK
) == EO_VALID_HDR
) {
1125 if (validate_header(ef
, creator
) == -1) {
1126 /* exacct_error set above. */
1132 * 4b. Handle the "open at end" option, for consumers who want
1133 * to go backwards through the file (i.e. lastcomm).
1135 if ((aflags
& EO_POSN_MSK
) == EO_TAIL
) {
1136 if (fseeko(f
->ef_fp
, 0, SEEK_END
) < 0) {
1137 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1143 EXACCT_SET_ERR(EXR_OK
);
1146 /* Error cleanup code */
1148 ea_strfree(f
->ef_creator
);
1152 bzero(f
, sizeof (*f
));
1157 ea_open(ea_file_t
*ef
, const char *name
, const char *creator
,
1158 int aflags
, int oflags
, mode_t mode
)
1163 * If overwriting an existing file, make sure to truncate it
1164 * to prevent the file being created corrupt.
1166 if (oflags
& O_CREAT
)
1169 if ((fd
= open(name
, oflags
, mode
)) == -1) {
1170 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1174 if (ea_fdopen(ef
, fd
, creator
, aflags
, oflags
) == -1) {
1183 * ea_close() performs all appropriate close operations on the open exacct file,
1184 * including releasing any memory allocated while parsing the file.
1187 ea_close(ea_file_t
*ef
)
1189 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
1191 if (f
->ef_creator
!= NULL
)
1192 ea_strfree(f
->ef_creator
);
1193 if (f
->ef_hostname
!= NULL
)
1194 ea_strfree(f
->ef_hostname
);
1196 ea_free(f
->ef_depth
, f
->ef_mxdeep
* sizeof (ea_file_depth_t
));
1198 if (fclose(f
->ef_fp
)) {
1199 EXACCT_SET_ERR(EXR_SYSCALL_FAIL
);
1203 EXACCT_SET_ERR(EXR_OK
);
1208 * Empty the input buffer and clear any underlying EOF or error bits set on the
1209 * underlying FILE. This can be used by any library clients who wish to handle
1210 * files that are in motion or who wish to seek the underlying file descriptor.
1213 ea_clear(ea_file_t
*ef
)
1215 ea_file_impl_t
*f
= (ea_file_impl_t
*)ef
;
1217 (void) fflush(f
->ef_fp
);
1222 * Copy an ea_object_t. Note that in the case of a group, just the group
1223 * object will be copied, and not its list of members. To recursively copy
1224 * a group or a list of items use ea_copy_tree().
1227 ea_copy_object(const ea_object_t
*src
)
1231 /* Allocate a new object and copy to it. */
1232 if ((dst
= ea_alloc(sizeof (ea_object_t
))) == NULL
) {
1235 bcopy(src
, dst
, sizeof (ea_object_t
));
1236 dst
->eo_next
= NULL
;
1238 switch (src
->eo_type
) {
1240 dst
->eo_group
.eg_nobjs
= 0;
1241 dst
->eo_group
.eg_objs
= NULL
;
1244 /* Items containing pointers need special treatment. */
1245 switch (src
->eo_catalog
& EXT_TYPE_MASK
) {
1247 if (src
->eo_item
.ei_string
!= NULL
) {
1248 dst
->eo_item
.ei_string
=
1249 ea_strdup(src
->eo_item
.ei_string
);
1250 if (dst
->eo_item
.ei_string
== NULL
) {
1251 ea_free_object(dst
, EUP_ALLOC
);
1257 if (src
->eo_item
.ei_raw
!= NULL
) {
1258 dst
->eo_item
.ei_raw
=
1259 ea_alloc(src
->eo_item
.ei_size
);
1260 if (dst
->eo_item
.ei_raw
== NULL
) {
1261 ea_free_object(dst
, EUP_ALLOC
);
1264 bcopy(src
->eo_item
.ei_raw
, dst
->eo_item
.ei_raw
,
1265 (size_t)src
->eo_item
.ei_size
);
1268 case EXT_EXACCT_OBJECT
:
1269 if (src
->eo_item
.ei_object
!= NULL
) {
1270 dst
->eo_item
.ei_object
=
1271 ea_alloc(src
->eo_item
.ei_size
);
1272 if (dst
->eo_item
.ei_object
== NULL
) {
1273 ea_free_object(dst
, EUP_ALLOC
);
1276 bcopy(src
->eo_item
.ei_raw
, dst
->eo_item
.ei_raw
,
1277 (size_t)src
->eo_item
.ei_size
);
1281 /* Other item types require no special handling. */
1286 ea_free_object(dst
, EUP_ALLOC
);
1287 EXACCT_SET_ERR(EXR_INVALID_OBJ
);
1290 EXACCT_SET_ERR(EXR_OK
);
1295 * Recursively copy a list of ea_object_t. All the elements in the eo_next
1296 * list will be copied, and any group objects will be recursively copied.
1299 ea_copy_object_tree(const ea_object_t
*src
)
1301 ea_object_t
*ret_obj
, *dst
, *last
;
1303 for (ret_obj
= last
= NULL
; src
!= NULL
;
1304 last
= dst
, src
= src
->eo_next
) {
1306 /* Allocate a new object and copy to it. */
1307 if ((dst
= ea_copy_object(src
)) == NULL
) {
1308 ea_free_object(ret_obj
, EUP_ALLOC
);
1312 /* Groups need the object list copying. */
1313 if (src
->eo_type
== EO_GROUP
) {
1314 dst
->eo_group
.eg_objs
=
1315 ea_copy_object_tree(src
->eo_group
.eg_objs
);
1316 if (dst
->eo_group
.eg_objs
== NULL
) {
1317 ea_free_object(ret_obj
, EUP_ALLOC
);
1320 dst
->eo_group
.eg_nobjs
= src
->eo_group
.eg_nobjs
;
1323 /* Remember the list head the first time round. */
1324 if (ret_obj
== NULL
) {
1328 /* Link together if not at the list head. */
1330 last
->eo_next
= dst
;
1333 EXACCT_SET_ERR(EXR_OK
);
1338 * Read in the specified number of objects, returning the same data
1339 * structure that would have originally been passed to ea_write().
1342 ea_get_object_tree(ea_file_t
*ef
, uint32_t nobj
)
1344 ea_object_t
*first_obj
, *prev_obj
, *obj
;
1346 first_obj
= prev_obj
= NULL
;
1348 /* Allocate space for the new object. */
1349 obj
= ea_alloc(sizeof (ea_object_t
));
1350 bzero(obj
, sizeof (*obj
));
1353 if (ea_get_object(ef
, obj
) == -1) {
1354 ea_free(obj
, sizeof (ea_object_t
));
1355 if (first_obj
!= NULL
) {
1356 ea_free_object(first_obj
, EUP_ALLOC
);
1361 /* Link it into the list. */
1362 if (first_obj
== NULL
) {
1365 if (prev_obj
!= NULL
) {
1366 prev_obj
->eo_next
= obj
;
1370 /* Recurse if the object is a group with contents. */
1371 if (obj
->eo_type
== EO_GROUP
&& obj
->eo_group
.eg_nobjs
> 0) {
1372 if ((obj
->eo_group
.eg_objs
= ea_get_object_tree(ef
,
1373 obj
->eo_group
.eg_nobjs
)) == NULL
) {
1374 /* exacct_error set above. */
1375 ea_free_object(first_obj
, EUP_ALLOC
);
1380 EXACCT_SET_ERR(EXR_OK
);