1 // SPDX-License-Identifier: GPL-2.0-only
3 * AppArmor security module
5 * This file contains AppArmor functions for unpacking policy loaded from
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zlib.h>
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/match.h"
26 #include "include/path.h"
27 #include "include/policy.h"
28 #include "include/policy_unpack.h"
30 #define K_ABI_MASK 0x3ff
31 #define FORCE_COMPLAIN_FLAG 0x800
32 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
33 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
35 #define v5 5 /* base version */
36 #define v6 6 /* per entry policydb mediation check */
38 #define v8 8 /* full network masking */
41 * The AppArmor interface treats data as a type byte followed by the
42 * actual data. The interface has the notion of a a named entry
43 * which has a name (AA_NAME typecode followed by name string) followed by
44 * the entries typecode and data. Named types allow for optional
45 * elements and extensions to be added and tested for without breaking
46 * backwards compatibility.
54 AA_NAME
, /* same as string except it is items name */
66 * aa_ext is the read of the buffer containing the serialized profile. The
67 * data is copied into a kernel buffer in apparmorfs and then handed off to
68 * the unpack routines.
73 void *pos
; /* pointer to current position in the buffer */
77 /* audit callback for unpack fields */
78 static void audit_cb(struct audit_buffer
*ab
, void *va
)
80 struct common_audit_data
*sa
= va
;
82 if (aad(sa
)->iface
.ns
) {
83 audit_log_format(ab
, " ns=");
84 audit_log_untrustedstring(ab
, aad(sa
)->iface
.ns
);
87 audit_log_format(ab
, " name=");
88 audit_log_untrustedstring(ab
, aad(sa
)->name
);
90 if (aad(sa
)->iface
.pos
)
91 audit_log_format(ab
, " offset=%ld", aad(sa
)->iface
.pos
);
95 * audit_iface - do audit message for policy unpacking/load/replace/remove
96 * @new: profile if it has been allocated (MAYBE NULL)
97 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
98 * @name: name of the profile being manipulated (MAYBE NULL)
99 * @info: any extra info about the failure (MAYBE NULL)
100 * @e: buffer position info
103 * Returns: %0 or error
105 static int audit_iface(struct aa_profile
*new, const char *ns_name
,
106 const char *name
, const char *info
, struct aa_ext
*e
,
109 struct aa_profile
*profile
= labels_profile(aa_current_raw_label());
110 DEFINE_AUDIT_DATA(sa
, LSM_AUDIT_DATA_NONE
, NULL
);
112 aad(&sa
)->iface
.pos
= e
->pos
- e
->start
;
113 aad(&sa
)->iface
.ns
= ns_name
;
115 aad(&sa
)->name
= new->base
.hname
;
117 aad(&sa
)->name
= name
;
118 aad(&sa
)->info
= info
;
119 aad(&sa
)->error
= error
;
121 return aa_audit(AUDIT_APPARMOR_STATUS
, profile
, &sa
, audit_cb
);
124 void __aa_loaddata_update(struct aa_loaddata
*data
, long revision
)
128 AA_BUG(!data
->dents
[AAFS_LOADDATA_REVISION
]);
129 AA_BUG(!mutex_is_locked(&data
->ns
->lock
));
130 AA_BUG(data
->revision
> revision
);
132 data
->revision
= revision
;
133 d_inode(data
->dents
[AAFS_LOADDATA_DIR
])->i_mtime
=
134 current_time(d_inode(data
->dents
[AAFS_LOADDATA_DIR
]));
135 d_inode(data
->dents
[AAFS_LOADDATA_REVISION
])->i_mtime
=
136 current_time(d_inode(data
->dents
[AAFS_LOADDATA_REVISION
]));
139 bool aa_rawdata_eq(struct aa_loaddata
*l
, struct aa_loaddata
*r
)
141 if (l
->size
!= r
->size
)
143 if (l
->compressed_size
!= r
->compressed_size
)
145 if (aa_g_hash_policy
&& memcmp(l
->hash
, r
->hash
, aa_hash_size()) != 0)
147 return memcmp(l
->data
, r
->data
, r
->compressed_size
?: r
->size
) == 0;
151 * need to take the ns mutex lock which is NOT safe most places that
152 * put_loaddata is called, so we have to delay freeing it
154 static void do_loaddata_free(struct work_struct
*work
)
156 struct aa_loaddata
*d
= container_of(work
, struct aa_loaddata
, work
);
157 struct aa_ns
*ns
= aa_get_ns(d
->ns
);
160 mutex_lock_nested(&ns
->lock
, ns
->level
);
161 __aa_fs_remove_rawdata(d
);
162 mutex_unlock(&ns
->lock
);
172 void aa_loaddata_kref(struct kref
*kref
)
174 struct aa_loaddata
*d
= container_of(kref
, struct aa_loaddata
, count
);
177 INIT_WORK(&d
->work
, do_loaddata_free
);
178 schedule_work(&d
->work
);
182 struct aa_loaddata
*aa_loaddata_alloc(size_t size
)
184 struct aa_loaddata
*d
;
186 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
188 return ERR_PTR(-ENOMEM
);
189 d
->data
= kvzalloc(size
, GFP_KERNEL
);
192 return ERR_PTR(-ENOMEM
);
194 kref_init(&d
->count
);
195 INIT_LIST_HEAD(&d
->list
);
200 /* test if read will be in packed data bounds */
201 static bool inbounds(struct aa_ext
*e
, size_t size
)
203 return (size
<= e
->end
- e
->pos
);
206 static void *kvmemdup(const void *src
, size_t len
)
208 void *p
= kvmalloc(len
, GFP_KERNEL
);
216 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
217 * @e: serialized data read head (NOT NULL)
218 * @chunk: start address for chunk of data (NOT NULL)
220 * Returns: the size of chunk found with the read head at the end of the chunk.
222 static size_t unpack_u16_chunk(struct aa_ext
*e
, char **chunk
)
227 if (!inbounds(e
, sizeof(u16
)))
229 size
= le16_to_cpu(get_unaligned((__le16
*) e
->pos
));
230 e
->pos
+= sizeof(__le16
);
231 if (!inbounds(e
, size
))
242 /* unpack control byte */
243 static bool unpack_X(struct aa_ext
*e
, enum aa_code code
)
247 if (*(u8
*) e
->pos
!= code
)
254 * unpack_nameX - check is the next element is of type X with a name of @name
255 * @e: serialized data extent information (NOT NULL)
257 * @name: name to match to the serialized element. (MAYBE NULL)
259 * check that the next serialized data element is of type X and has a tag
260 * name @name. If @name is specified then there must be a matching
261 * name element in the stream. If @name is NULL any name element will be
262 * skipped and only the typecode will be tested.
264 * Returns 1 on success (both type code and name tests match) and the read
265 * head is advanced past the headers
267 * Returns: 0 if either match fails, the read head does not move
269 static bool unpack_nameX(struct aa_ext
*e
, enum aa_code code
, const char *name
)
272 * May need to reset pos if name or type doesn't match
276 * Check for presence of a tagname, and if present name size
277 * AA_NAME tag value is a u16.
279 if (unpack_X(e
, AA_NAME
)) {
281 size_t size
= unpack_u16_chunk(e
, &tag
);
282 /* if a name is specified it must match. otherwise skip tag */
283 if (name
&& (!size
|| tag
[size
-1] != '\0' || strcmp(name
, tag
)))
286 /* if a name is specified and there is no name tag fail */
290 /* now check if type code matches */
291 if (unpack_X(e
, code
))
299 static bool unpack_u8(struct aa_ext
*e
, u8
*data
, const char *name
)
303 if (unpack_nameX(e
, AA_U8
, name
)) {
304 if (!inbounds(e
, sizeof(u8
)))
307 *data
= get_unaligned((u8
*)e
->pos
);
308 e
->pos
+= sizeof(u8
);
317 static bool unpack_u32(struct aa_ext
*e
, u32
*data
, const char *name
)
321 if (unpack_nameX(e
, AA_U32
, name
)) {
322 if (!inbounds(e
, sizeof(u32
)))
325 *data
= le32_to_cpu(get_unaligned((__le32
*) e
->pos
));
326 e
->pos
+= sizeof(u32
);
335 static bool unpack_u64(struct aa_ext
*e
, u64
*data
, const char *name
)
339 if (unpack_nameX(e
, AA_U64
, name
)) {
340 if (!inbounds(e
, sizeof(u64
)))
343 *data
= le64_to_cpu(get_unaligned((__le64
*) e
->pos
));
344 e
->pos
+= sizeof(u64
);
353 static size_t unpack_array(struct aa_ext
*e
, const char *name
)
357 if (unpack_nameX(e
, AA_ARRAY
, name
)) {
359 if (!inbounds(e
, sizeof(u16
)))
361 size
= (int)le16_to_cpu(get_unaligned((__le16
*) e
->pos
));
362 e
->pos
+= sizeof(u16
);
371 static size_t unpack_blob(struct aa_ext
*e
, char **blob
, const char *name
)
375 if (unpack_nameX(e
, AA_BLOB
, name
)) {
377 if (!inbounds(e
, sizeof(u32
)))
379 size
= le32_to_cpu(get_unaligned((__le32
*) e
->pos
));
380 e
->pos
+= sizeof(u32
);
381 if (inbounds(e
, (size_t) size
)) {
393 static int unpack_str(struct aa_ext
*e
, const char **string
, const char *name
)
399 if (unpack_nameX(e
, AA_STRING
, name
)) {
400 size
= unpack_u16_chunk(e
, &src_str
);
402 /* strings are null terminated, length is size - 1 */
403 if (src_str
[size
- 1] != 0)
416 static int unpack_strdup(struct aa_ext
*e
, char **string
, const char *name
)
420 int res
= unpack_str(e
, &tmp
, name
);
426 *string
= kmemdup(tmp
, res
, GFP_KERNEL
);
437 * unpack_dfa - unpack a file rule dfa
438 * @e: serialized data extent information (NOT NULL)
440 * returns dfa or ERR_PTR or NULL if no dfa
442 static struct aa_dfa
*unpack_dfa(struct aa_ext
*e
)
446 struct aa_dfa
*dfa
= NULL
;
448 size
= unpack_blob(e
, &blob
, "aadfa");
451 * The dfa is aligned with in the blob to 8 bytes
452 * from the beginning of the stream.
453 * alignment adjust needed by dfa unpack
455 size_t sz
= blob
- (char *) e
->start
-
456 ((e
->pos
- e
->start
) & 7);
457 size_t pad
= ALIGN(sz
, 8) - sz
;
458 int flags
= TO_ACCEPT1_FLAG(YYTD_DATA32
) |
459 TO_ACCEPT2_FLAG(YYTD_DATA32
) | DFA_FLAG_VERIFY_STATES
;
460 dfa
= aa_dfa_unpack(blob
+ pad
, size
- pad
, flags
);
471 * unpack_trans_table - unpack a profile transition table
472 * @e: serialized data extent information (NOT NULL)
473 * @profile: profile to add the accept table to (NOT NULL)
475 * Returns: 1 if table successfully unpacked
477 static bool unpack_trans_table(struct aa_ext
*e
, struct aa_profile
*profile
)
479 void *saved_pos
= e
->pos
;
481 /* exec table is optional */
482 if (unpack_nameX(e
, AA_STRUCT
, "xtable")) {
485 size
= unpack_array(e
, NULL
);
486 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
489 profile
->file
.trans
.table
= kcalloc(size
, sizeof(char *),
491 if (!profile
->file
.trans
.table
)
494 profile
->file
.trans
.size
= size
;
495 for (i
= 0; i
< size
; i
++) {
497 int c
, j
, pos
, size2
= unpack_strdup(e
, &str
, NULL
);
498 /* unpack_strdup verifies that the last character is
499 * null termination byte.
503 profile
->file
.trans
.table
[i
] = str
;
504 /* verify that name doesn't start with space */
508 /* count internal # of internal \0 */
509 for (c
= j
= 0; j
< size2
- 1; j
++) {
516 /* first character after : must be valid */
519 /* beginning with : requires an embedded \0,
520 * verify that exactly 1 internal \0 exists
521 * trailing \0 already verified by unpack_strdup
523 * convert \0 back to : for label_parse
530 /* fail - all other cases with embedded \0 */
533 if (!unpack_nameX(e
, AA_ARRAYEND
, NULL
))
535 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
541 aa_free_domain_entries(&profile
->file
.trans
);
546 static bool unpack_xattrs(struct aa_ext
*e
, struct aa_profile
*profile
)
550 if (unpack_nameX(e
, AA_STRUCT
, "xattrs")) {
553 size
= unpack_array(e
, NULL
);
554 profile
->xattr_count
= size
;
555 profile
->xattrs
= kcalloc(size
, sizeof(char *), GFP_KERNEL
);
556 if (!profile
->xattrs
)
558 for (i
= 0; i
< size
; i
++) {
559 if (!unpack_strdup(e
, &profile
->xattrs
[i
], NULL
))
562 if (!unpack_nameX(e
, AA_ARRAYEND
, NULL
))
564 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
575 static bool unpack_secmark(struct aa_ext
*e
, struct aa_profile
*profile
)
580 if (unpack_nameX(e
, AA_STRUCT
, "secmark")) {
581 size
= unpack_array(e
, NULL
);
583 profile
->secmark
= kcalloc(size
, sizeof(struct aa_secmark
),
585 if (!profile
->secmark
)
588 profile
->secmark_count
= size
;
590 for (i
= 0; i
< size
; i
++) {
591 if (!unpack_u8(e
, &profile
->secmark
[i
].audit
, NULL
))
593 if (!unpack_u8(e
, &profile
->secmark
[i
].deny
, NULL
))
595 if (!unpack_strdup(e
, &profile
->secmark
[i
].label
, NULL
))
598 if (!unpack_nameX(e
, AA_ARRAYEND
, NULL
))
600 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
607 if (profile
->secmark
) {
608 for (i
= 0; i
< size
; i
++)
609 kfree(profile
->secmark
[i
].label
);
610 kfree(profile
->secmark
);
611 profile
->secmark_count
= 0;
612 profile
->secmark
= NULL
;
619 static bool unpack_rlimits(struct aa_ext
*e
, struct aa_profile
*profile
)
623 /* rlimits are optional */
624 if (unpack_nameX(e
, AA_STRUCT
, "rlimits")) {
627 if (!unpack_u32(e
, &tmp
, NULL
))
629 profile
->rlimits
.mask
= tmp
;
631 size
= unpack_array(e
, NULL
);
632 if (size
> RLIM_NLIMITS
)
634 for (i
= 0; i
< size
; i
++) {
636 int a
= aa_map_resource(i
);
637 if (!unpack_u64(e
, &tmp2
, NULL
))
639 profile
->rlimits
.limits
[a
].rlim_max
= tmp2
;
641 if (!unpack_nameX(e
, AA_ARRAYEND
, NULL
))
643 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
653 static u32
strhash(const void *data
, u32 len
, u32 seed
)
655 const char * const *key
= data
;
657 return jhash(*key
, strlen(*key
), seed
);
660 static int datacmp(struct rhashtable_compare_arg
*arg
, const void *obj
)
662 const struct aa_data
*data
= obj
;
663 const char * const *key
= arg
->key
;
665 return strcmp(data
->key
, *key
);
669 * unpack_profile - unpack a serialized profile
670 * @e: serialized data extent information (NOT NULL)
672 * NOTE: unpack profile sets audit struct if there is a failure
674 static struct aa_profile
*unpack_profile(struct aa_ext
*e
, char **ns_name
)
676 struct aa_profile
*profile
= NULL
;
677 const char *tmpname
, *tmpns
= NULL
, *name
= NULL
;
678 const char *info
= "failed to unpack profile";
680 struct rhashtable_params params
= { 0 };
682 struct aa_data
*data
;
683 int i
, error
= -EPROTO
;
689 /* check that we have the right struct being passed */
690 if (!unpack_nameX(e
, AA_STRUCT
, "profile"))
692 if (!unpack_str(e
, &name
, NULL
))
697 tmpname
= aa_splitn_fqname(name
, strlen(name
), &tmpns
, &ns_len
);
699 *ns_name
= kstrndup(tmpns
, ns_len
, GFP_KERNEL
);
701 info
= "out of memory";
707 profile
= aa_alloc_profile(name
, NULL
, GFP_KERNEL
);
709 return ERR_PTR(-ENOMEM
);
711 /* profile renaming is optional */
712 (void) unpack_str(e
, &profile
->rename
, "rename");
714 /* attachment string is optional */
715 (void) unpack_str(e
, &profile
->attach
, "attach");
717 /* xmatch is optional and may be NULL */
718 profile
->xmatch
= unpack_dfa(e
);
719 if (IS_ERR(profile
->xmatch
)) {
720 error
= PTR_ERR(profile
->xmatch
);
721 profile
->xmatch
= NULL
;
725 /* xmatch_len is not optional if xmatch is set */
726 if (profile
->xmatch
) {
727 if (!unpack_u32(e
, &tmp
, NULL
)) {
728 info
= "missing xmatch len";
731 profile
->xmatch_len
= tmp
;
734 /* disconnected attachment string is optional */
735 (void) unpack_str(e
, &profile
->disconnected
, "disconnected");
737 /* per profile debug flags (complain, audit) */
738 if (!unpack_nameX(e
, AA_STRUCT
, "flags")) {
739 info
= "profile missing flags";
742 info
= "failed to unpack profile flags";
743 if (!unpack_u32(e
, &tmp
, NULL
))
745 if (tmp
& PACKED_FLAG_HAT
)
746 profile
->label
.flags
|= FLAG_HAT
;
747 if (!unpack_u32(e
, &tmp
, NULL
))
749 if (tmp
== PACKED_MODE_COMPLAIN
|| (e
->version
& FORCE_COMPLAIN_FLAG
))
750 profile
->mode
= APPARMOR_COMPLAIN
;
751 else if (tmp
== PACKED_MODE_KILL
)
752 profile
->mode
= APPARMOR_KILL
;
753 else if (tmp
== PACKED_MODE_UNCONFINED
)
754 profile
->mode
= APPARMOR_UNCONFINED
;
755 if (!unpack_u32(e
, &tmp
, NULL
))
758 profile
->audit
= AUDIT_ALL
;
760 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
763 /* path_flags is optional */
764 if (unpack_u32(e
, &profile
->path_flags
, "path_flags"))
765 profile
->path_flags
|= profile
->label
.flags
&
766 PATH_MEDIATE_DELETED
;
768 /* set a default value if path_flags field is not present */
769 profile
->path_flags
= PATH_MEDIATE_DELETED
;
771 info
= "failed to unpack profile capabilities";
772 if (!unpack_u32(e
, &(profile
->caps
.allow
.cap
[0]), NULL
))
774 if (!unpack_u32(e
, &(profile
->caps
.audit
.cap
[0]), NULL
))
776 if (!unpack_u32(e
, &(profile
->caps
.quiet
.cap
[0]), NULL
))
778 if (!unpack_u32(e
, &tmpcap
.cap
[0], NULL
))
781 info
= "failed to unpack upper profile capabilities";
782 if (unpack_nameX(e
, AA_STRUCT
, "caps64")) {
783 /* optional upper half of 64 bit caps */
784 if (!unpack_u32(e
, &(profile
->caps
.allow
.cap
[1]), NULL
))
786 if (!unpack_u32(e
, &(profile
->caps
.audit
.cap
[1]), NULL
))
788 if (!unpack_u32(e
, &(profile
->caps
.quiet
.cap
[1]), NULL
))
790 if (!unpack_u32(e
, &(tmpcap
.cap
[1]), NULL
))
792 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
796 info
= "failed to unpack extended profile capabilities";
797 if (unpack_nameX(e
, AA_STRUCT
, "capsx")) {
798 /* optional extended caps mediation mask */
799 if (!unpack_u32(e
, &(profile
->caps
.extended
.cap
[0]), NULL
))
801 if (!unpack_u32(e
, &(profile
->caps
.extended
.cap
[1]), NULL
))
803 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
807 if (!unpack_xattrs(e
, profile
)) {
808 info
= "failed to unpack profile xattrs";
812 if (!unpack_rlimits(e
, profile
)) {
813 info
= "failed to unpack profile rlimits";
817 if (!unpack_secmark(e
, profile
)) {
818 info
= "failed to unpack profile secmark rules";
822 if (unpack_nameX(e
, AA_STRUCT
, "policydb")) {
823 /* generic policy dfa - optional and may be NULL */
824 info
= "failed to unpack policydb";
825 profile
->policy
.dfa
= unpack_dfa(e
);
826 if (IS_ERR(profile
->policy
.dfa
)) {
827 error
= PTR_ERR(profile
->policy
.dfa
);
828 profile
->policy
.dfa
= NULL
;
830 } else if (!profile
->policy
.dfa
) {
834 if (!unpack_u32(e
, &profile
->policy
.start
[0], "start"))
835 /* default start state */
836 profile
->policy
.start
[0] = DFA_START
;
837 /* setup class index */
838 for (i
= AA_CLASS_FILE
; i
<= AA_CLASS_LAST
; i
++) {
839 profile
->policy
.start
[i
] =
840 aa_dfa_next(profile
->policy
.dfa
,
841 profile
->policy
.start
[0],
844 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
))
847 profile
->policy
.dfa
= aa_get_dfa(nulldfa
);
850 profile
->file
.dfa
= unpack_dfa(e
);
851 if (IS_ERR(profile
->file
.dfa
)) {
852 error
= PTR_ERR(profile
->file
.dfa
);
853 profile
->file
.dfa
= NULL
;
854 info
= "failed to unpack profile file rules";
856 } else if (profile
->file
.dfa
) {
857 if (!unpack_u32(e
, &profile
->file
.start
, "dfa_start"))
858 /* default start state */
859 profile
->file
.start
= DFA_START
;
860 } else if (profile
->policy
.dfa
&&
861 profile
->policy
.start
[AA_CLASS_FILE
]) {
862 profile
->file
.dfa
= aa_get_dfa(profile
->policy
.dfa
);
863 profile
->file
.start
= profile
->policy
.start
[AA_CLASS_FILE
];
865 profile
->file
.dfa
= aa_get_dfa(nulldfa
);
867 if (!unpack_trans_table(e
, profile
)) {
868 info
= "failed to unpack profile transition table";
872 if (unpack_nameX(e
, AA_STRUCT
, "data")) {
873 info
= "out of memory";
874 profile
->data
= kzalloc(sizeof(*profile
->data
), GFP_KERNEL
);
878 params
.nelem_hint
= 3;
879 params
.key_len
= sizeof(void *);
880 params
.key_offset
= offsetof(struct aa_data
, key
);
881 params
.head_offset
= offsetof(struct aa_data
, head
);
882 params
.hashfn
= strhash
;
883 params
.obj_cmpfn
= datacmp
;
885 if (rhashtable_init(profile
->data
, ¶ms
)) {
886 info
= "failed to init key, value hash table";
890 while (unpack_strdup(e
, &key
, NULL
)) {
891 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
898 data
->size
= unpack_blob(e
, &data
->data
, NULL
);
899 data
->data
= kvmemdup(data
->data
, data
->size
);
900 if (data
->size
&& !data
->data
) {
906 rhashtable_insert_fast(profile
->data
, &data
->head
,
910 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
)) {
911 info
= "failed to unpack end of key, value data table";
916 if (!unpack_nameX(e
, AA_STRUCTEND
, NULL
)) {
917 info
= "failed to unpack end of profile";
928 audit_iface(profile
, NULL
, name
, info
, e
, error
);
929 aa_free_profile(profile
);
931 return ERR_PTR(error
);
935 * verify_head - unpack serialized stream header
936 * @e: serialized data read head (NOT NULL)
937 * @required: whether the header is required or optional
938 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
940 * Returns: error or 0 if header is good
942 static int verify_header(struct aa_ext
*e
, int required
, const char **ns
)
944 int error
= -EPROTONOSUPPORT
;
945 const char *name
= NULL
;
948 /* get the interface version */
949 if (!unpack_u32(e
, &e
->version
, "version")) {
951 audit_iface(NULL
, NULL
, NULL
, "invalid profile format",
957 /* Check that the interface version is currently supported.
958 * if not specified use previous version
959 * Mask off everything that is not kernel abi version
961 if (VERSION_LT(e
->version
, v5
) || VERSION_GT(e
->version
, v7
)) {
962 audit_iface(NULL
, NULL
, NULL
, "unsupported interface version",
967 /* read the namespace if present */
968 if (unpack_str(e
, &name
, "namespace")) {
970 audit_iface(NULL
, NULL
, NULL
, "invalid namespace name",
974 if (*ns
&& strcmp(*ns
, name
)) {
975 audit_iface(NULL
, NULL
, NULL
, "invalid ns change", e
,
978 *ns
= kstrdup(name
, GFP_KERNEL
);
987 static bool verify_xindex(int xindex
, int table_size
)
990 xtype
= xindex
& AA_X_TYPE_MASK
;
991 index
= xindex
& AA_X_INDEX_MASK
;
992 if (xtype
== AA_X_TABLE
&& index
>= table_size
)
997 /* verify dfa xindexes are in range of transition tables */
998 static bool verify_dfa_xindex(struct aa_dfa
*dfa
, int table_size
)
1001 for (i
= 0; i
< dfa
->tables
[YYTD_ID_ACCEPT
]->td_lolen
; i
++) {
1002 if (!verify_xindex(dfa_user_xindex(dfa
, i
), table_size
))
1004 if (!verify_xindex(dfa_other_xindex(dfa
, i
), table_size
))
1011 * verify_profile - Do post unpack analysis to verify profile consistency
1012 * @profile: profile to verify (NOT NULL)
1014 * Returns: 0 if passes verification else error
1016 static int verify_profile(struct aa_profile
*profile
)
1018 if (profile
->file
.dfa
&&
1019 !verify_dfa_xindex(profile
->file
.dfa
,
1020 profile
->file
.trans
.size
)) {
1021 audit_iface(profile
, NULL
, NULL
, "Invalid named transition",
1029 void aa_load_ent_free(struct aa_load_ent
*ent
)
1032 aa_put_profile(ent
->rename
);
1033 aa_put_profile(ent
->old
);
1034 aa_put_profile(ent
->new);
1035 kfree(ent
->ns_name
);
1040 struct aa_load_ent
*aa_load_ent_alloc(void)
1042 struct aa_load_ent
*ent
= kzalloc(sizeof(*ent
), GFP_KERNEL
);
1044 INIT_LIST_HEAD(&ent
->list
);
1048 static int deflate_compress(const char *src
, size_t slen
, char **dst
,
1052 struct z_stream_s strm
;
1053 void *stgbuf
, *dstbuf
;
1054 size_t stglen
= deflateBound(slen
);
1056 memset(&strm
, 0, sizeof(strm
));
1061 strm
.workspace
= kvzalloc(zlib_deflate_workspacesize(MAX_WBITS
,
1064 if (!strm
.workspace
)
1067 error
= zlib_deflateInit(&strm
, aa_g_rawdata_compression_level
);
1068 if (error
!= Z_OK
) {
1070 goto fail_deflate_init
;
1073 stgbuf
= kvzalloc(stglen
, GFP_KERNEL
);
1076 goto fail_stg_alloc
;
1080 strm
.avail_in
= slen
;
1081 strm
.next_out
= stgbuf
;
1082 strm
.avail_out
= stglen
;
1084 error
= zlib_deflate(&strm
, Z_FINISH
);
1085 if (error
!= Z_STREAM_END
) {
1091 if (is_vmalloc_addr(stgbuf
)) {
1092 dstbuf
= kvzalloc(strm
.total_out
, GFP_KERNEL
);
1094 memcpy(dstbuf
, stgbuf
, strm
.total_out
);
1099 * If the staging buffer was kmalloc'd, then using krealloc is
1100 * probably going to be faster. The destination buffer will
1101 * always be smaller, so it's just shrunk, avoiding a memcpy
1103 dstbuf
= krealloc(stgbuf
, strm
.total_out
, GFP_KERNEL
);
1111 *dlen
= strm
.total_out
;
1114 zlib_deflateEnd(&strm
);
1116 kvfree(strm
.workspace
);
1121 goto fail_stg_alloc
;
1124 static int compress_loaddata(struct aa_loaddata
*data
)
1127 AA_BUG(data
->compressed_size
> 0);
1130 * Shortcut the no compression case, else we increase the amount of
1131 * storage required by a small amount
1133 if (aa_g_rawdata_compression_level
!= 0) {
1134 void *udata
= data
->data
;
1135 int error
= deflate_compress(udata
, data
->size
, &data
->data
,
1136 &data
->compressed_size
);
1142 data
->compressed_size
= data
->size
;
1148 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1149 * @udata: user data copied to kmem (NOT NULL)
1150 * @lh: list to place unpacked profiles in a aa_repl_ws
1151 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1153 * Unpack user data and return refcounted allocated profile(s) stored in
1154 * @lh in order of discovery, with the list chain stored in base.list
1157 * Returns: profile(s) on @lh else error pointer if fails to unpack
1159 int aa_unpack(struct aa_loaddata
*udata
, struct list_head
*lh
,
1162 struct aa_load_ent
*tmp
, *ent
;
1163 struct aa_profile
*profile
= NULL
;
1166 .start
= udata
->data
,
1167 .end
= udata
->data
+ udata
->size
,
1172 while (e
.pos
< e
.end
) {
1173 char *ns_name
= NULL
;
1175 error
= verify_header(&e
, e
.pos
== e
.start
, ns
);
1180 profile
= unpack_profile(&e
, &ns_name
);
1181 if (IS_ERR(profile
)) {
1182 error
= PTR_ERR(profile
);
1186 error
= verify_profile(profile
);
1190 if (aa_g_hash_policy
)
1191 error
= aa_calc_profile_hash(profile
, e
.version
, start
,
1196 ent
= aa_load_ent_alloc();
1203 ent
->ns_name
= ns_name
;
1204 list_add_tail(&ent
->list
, lh
);
1206 udata
->abi
= e
.version
& K_ABI_MASK
;
1207 if (aa_g_hash_policy
) {
1208 udata
->hash
= aa_calc_hash(udata
->data
, udata
->size
);
1209 if (IS_ERR(udata
->hash
)) {
1210 error
= PTR_ERR(udata
->hash
);
1215 error
= compress_loaddata(udata
);
1221 aa_put_profile(profile
);
1224 list_for_each_entry_safe(ent
, tmp
, lh
, list
) {
1225 list_del_init(&ent
->list
);
1226 aa_load_ent_free(ent
);
1232 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1233 #include "policy_unpack_test.c"
1234 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */