1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/security.h>
13 #include <linux/seq_file.h>
14 #include <linux/err.h>
15 #include <linux/user_namespace.h>
16 #include <linux/nsproxy.h>
17 #include <keys/keyring-type.h>
18 #include <keys/user-type.h>
19 #include <linux/assoc_array_priv.h>
20 #include <linux/uaccess.h>
21 #include <net/net_namespace.h>
25 * When plumbing the depths of the key tree, this sets a hard limit
26 * set on how deep we're willing to go.
28 #define KEYRING_SEARCH_MAX_DEPTH 6
31 * We mark pointers we pass to the associative array with bit 1 set if
32 * they're keyrings and clear otherwise.
34 #define KEYRING_PTR_SUBTYPE 0x2UL
36 static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr
*x
)
38 return (unsigned long)x
& KEYRING_PTR_SUBTYPE
;
40 static inline struct key
*keyring_ptr_to_key(const struct assoc_array_ptr
*x
)
42 void *object
= assoc_array_ptr_to_leaf(x
);
43 return (struct key
*)((unsigned long)object
& ~KEYRING_PTR_SUBTYPE
);
45 static inline void *keyring_key_to_ptr(struct key
*key
)
47 if (key
->type
== &key_type_keyring
)
48 return (void *)((unsigned long)key
| KEYRING_PTR_SUBTYPE
);
52 static DEFINE_RWLOCK(keyring_name_lock
);
55 * Clean up the bits of user_namespace that belong to us.
57 void key_free_user_ns(struct user_namespace
*ns
)
59 write_lock(&keyring_name_lock
);
60 list_del_init(&ns
->keyring_name_list
);
61 write_unlock(&keyring_name_lock
);
63 key_put(ns
->user_keyring_register
);
64 #ifdef CONFIG_PERSISTENT_KEYRINGS
65 key_put(ns
->persistent_keyring_register
);
70 * The keyring key type definition. Keyrings are simply keys of this type and
71 * can be treated as ordinary keys in addition to having their own special
74 static int keyring_preparse(struct key_preparsed_payload
*prep
);
75 static void keyring_free_preparse(struct key_preparsed_payload
*prep
);
76 static int keyring_instantiate(struct key
*keyring
,
77 struct key_preparsed_payload
*prep
);
78 static void keyring_revoke(struct key
*keyring
);
79 static void keyring_destroy(struct key
*keyring
);
80 static void keyring_describe(const struct key
*keyring
, struct seq_file
*m
);
81 static long keyring_read(const struct key
*keyring
,
82 char __user
*buffer
, size_t buflen
);
84 struct key_type key_type_keyring
= {
87 .preparse
= keyring_preparse
,
88 .free_preparse
= keyring_free_preparse
,
89 .instantiate
= keyring_instantiate
,
90 .revoke
= keyring_revoke
,
91 .destroy
= keyring_destroy
,
92 .describe
= keyring_describe
,
95 EXPORT_SYMBOL(key_type_keyring
);
98 * Semaphore to serialise link/link calls to prevent two link calls in parallel
99 * introducing a cycle.
101 static DEFINE_MUTEX(keyring_serialise_link_lock
);
104 * Publish the name of a keyring so that it can be found by name (if it has
105 * one and it doesn't begin with a dot).
107 static void keyring_publish_name(struct key
*keyring
)
109 struct user_namespace
*ns
= current_user_ns();
111 if (keyring
->description
&&
112 keyring
->description
[0] &&
113 keyring
->description
[0] != '.') {
114 write_lock(&keyring_name_lock
);
115 list_add_tail(&keyring
->name_link
, &ns
->keyring_name_list
);
116 write_unlock(&keyring_name_lock
);
121 * Preparse a keyring payload
123 static int keyring_preparse(struct key_preparsed_payload
*prep
)
125 return prep
->datalen
!= 0 ? -EINVAL
: 0;
129 * Free a preparse of a user defined key payload
131 static void keyring_free_preparse(struct key_preparsed_payload
*prep
)
136 * Initialise a keyring.
138 * Returns 0 on success, -EINVAL if given any data.
140 static int keyring_instantiate(struct key
*keyring
,
141 struct key_preparsed_payload
*prep
)
143 assoc_array_init(&keyring
->keys
);
144 /* make the keyring available by name if it has one */
145 keyring_publish_name(keyring
);
150 * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
151 * fold the carry back too, but that requires inline asm.
153 static u64
mult_64x32_and_fold(u64 x
, u32 y
)
155 u64 hi
= (u64
)(u32
)(x
>> 32) * y
;
156 u64 lo
= (u64
)(u32
)(x
) * y
;
157 return lo
+ ((u64
)(u32
)hi
<< 32) + (u32
)(hi
>> 32);
161 * Hash a key type and description.
163 static void hash_key_type_and_desc(struct keyring_index_key
*index_key
)
165 const unsigned level_shift
= ASSOC_ARRAY_LEVEL_STEP
;
166 const unsigned long fan_mask
= ASSOC_ARRAY_FAN_MASK
;
167 const char *description
= index_key
->description
;
168 unsigned long hash
, type
;
171 int n
, desc_len
= index_key
->desc_len
;
173 type
= (unsigned long)index_key
->type
;
174 acc
= mult_64x32_and_fold(type
, desc_len
+ 13);
175 acc
= mult_64x32_and_fold(acc
, 9207);
176 piece
= (unsigned long)index_key
->domain_tag
;
177 acc
= mult_64x32_and_fold(acc
, piece
);
178 acc
= mult_64x32_and_fold(acc
, 9207);
187 memcpy(&piece
, description
, n
);
190 acc
= mult_64x32_and_fold(acc
, piece
);
191 acc
= mult_64x32_and_fold(acc
, 9207);
194 /* Fold the hash down to 32 bits if need be. */
196 if (ASSOC_ARRAY_KEY_CHUNK_SIZE
== 32)
199 /* Squidge all the keyrings into a separate part of the tree to
200 * ordinary keys by making sure the lowest level segment in the hash is
201 * zero for keyrings and non-zero otherwise.
203 if (index_key
->type
!= &key_type_keyring
&& (hash
& fan_mask
) == 0)
204 hash
|= (hash
>> (ASSOC_ARRAY_KEY_CHUNK_SIZE
- level_shift
)) | 1;
205 else if (index_key
->type
== &key_type_keyring
&& (hash
& fan_mask
) != 0)
206 hash
= (hash
+ (hash
<< level_shift
)) & ~fan_mask
;
207 index_key
->hash
= hash
;
211 * Finalise an index key to include a part of the description actually in the
212 * index key, to set the domain tag and to calculate the hash.
214 void key_set_index_key(struct keyring_index_key
*index_key
)
216 static struct key_tag default_domain_tag
= { .usage
= REFCOUNT_INIT(1), };
217 size_t n
= min_t(size_t, index_key
->desc_len
, sizeof(index_key
->desc
));
219 memcpy(index_key
->desc
, index_key
->description
, n
);
221 if (!index_key
->domain_tag
) {
222 if (index_key
->type
->flags
& KEY_TYPE_NET_DOMAIN
)
223 index_key
->domain_tag
= current
->nsproxy
->net_ns
->key_domain
;
225 index_key
->domain_tag
= &default_domain_tag
;
228 hash_key_type_and_desc(index_key
);
232 * key_put_tag - Release a ref on a tag.
233 * @tag: The tag to release.
235 * This releases a reference the given tag and returns true if that ref was the
238 bool key_put_tag(struct key_tag
*tag
)
240 if (refcount_dec_and_test(&tag
->usage
)) {
249 * key_remove_domain - Kill off a key domain and gc its keys
250 * @domain_tag: The domain tag to release.
252 * This marks a domain tag as being dead and releases a ref on it. If that
253 * wasn't the last reference, the garbage collector is poked to try and delete
254 * all keys that were in the domain.
256 void key_remove_domain(struct key_tag
*domain_tag
)
258 domain_tag
->removed
= true;
259 if (!key_put_tag(domain_tag
))
260 key_schedule_gc_links();
264 * Build the next index key chunk.
266 * We return it one word-sized chunk at a time.
268 static unsigned long keyring_get_key_chunk(const void *data
, int level
)
270 const struct keyring_index_key
*index_key
= data
;
271 unsigned long chunk
= 0;
273 int desc_len
= index_key
->desc_len
, n
= sizeof(chunk
);
275 level
/= ASSOC_ARRAY_KEY_CHUNK_SIZE
;
278 return index_key
->hash
;
282 return (unsigned long)index_key
->type
;
284 return (unsigned long)index_key
->domain_tag
;
287 if (desc_len
<= sizeof(index_key
->desc
))
290 d
= index_key
->description
+ sizeof(index_key
->desc
);
291 d
+= level
* sizeof(long);
292 desc_len
-= sizeof(index_key
->desc
);
298 } while (--desc_len
> 0);
303 static unsigned long keyring_get_object_key_chunk(const void *object
, int level
)
305 const struct key
*key
= keyring_ptr_to_key(object
);
306 return keyring_get_key_chunk(&key
->index_key
, level
);
309 static bool keyring_compare_object(const void *object
, const void *data
)
311 const struct keyring_index_key
*index_key
= data
;
312 const struct key
*key
= keyring_ptr_to_key(object
);
314 return key
->index_key
.type
== index_key
->type
&&
315 key
->index_key
.domain_tag
== index_key
->domain_tag
&&
316 key
->index_key
.desc_len
== index_key
->desc_len
&&
317 memcmp(key
->index_key
.description
, index_key
->description
,
318 index_key
->desc_len
) == 0;
322 * Compare the index keys of a pair of objects and determine the bit position
323 * at which they differ - if they differ.
325 static int keyring_diff_objects(const void *object
, const void *data
)
327 const struct key
*key_a
= keyring_ptr_to_key(object
);
328 const struct keyring_index_key
*a
= &key_a
->index_key
;
329 const struct keyring_index_key
*b
= data
;
330 unsigned long seg_a
, seg_b
;
336 if ((seg_a
^ seg_b
) != 0)
338 level
+= ASSOC_ARRAY_KEY_CHUNK_SIZE
/ 8;
340 /* The number of bits contributed by the hash is controlled by a
341 * constant in the assoc_array headers. Everything else thereafter we
342 * can deal with as being machine word-size dependent.
346 if ((seg_a
^ seg_b
) != 0)
348 level
+= sizeof(unsigned long);
350 /* The next bit may not work on big endian */
351 seg_a
= (unsigned long)a
->type
;
352 seg_b
= (unsigned long)b
->type
;
353 if ((seg_a
^ seg_b
) != 0)
355 level
+= sizeof(unsigned long);
357 seg_a
= (unsigned long)a
->domain_tag
;
358 seg_b
= (unsigned long)b
->domain_tag
;
359 if ((seg_a
^ seg_b
) != 0)
361 level
+= sizeof(unsigned long);
364 if (a
->desc_len
<= i
)
367 for (; i
< a
->desc_len
; i
++) {
368 seg_a
= *(unsigned char *)(a
->description
+ i
);
369 seg_b
= *(unsigned char *)(b
->description
+ i
);
370 if ((seg_a
^ seg_b
) != 0)
380 i
= level
* 8 + __ffs(seg_a
^ seg_b
);
385 * Free an object after stripping the keyring flag off of the pointer.
387 static void keyring_free_object(void *object
)
389 key_put(keyring_ptr_to_key(object
));
393 * Operations for keyring management by the index-tree routines.
395 static const struct assoc_array_ops keyring_assoc_array_ops
= {
396 .get_key_chunk
= keyring_get_key_chunk
,
397 .get_object_key_chunk
= keyring_get_object_key_chunk
,
398 .compare_object
= keyring_compare_object
,
399 .diff_objects
= keyring_diff_objects
,
400 .free_object
= keyring_free_object
,
404 * Clean up a keyring when it is destroyed. Unpublish its name if it had one
405 * and dispose of its data.
407 * The garbage collector detects the final key_put(), removes the keyring from
408 * the serial number tree and then does RCU synchronisation before coming here,
409 * so we shouldn't need to worry about code poking around here with the RCU
410 * readlock held by this time.
412 static void keyring_destroy(struct key
*keyring
)
414 if (keyring
->description
) {
415 write_lock(&keyring_name_lock
);
417 if (keyring
->name_link
.next
!= NULL
&&
418 !list_empty(&keyring
->name_link
))
419 list_del(&keyring
->name_link
);
421 write_unlock(&keyring_name_lock
);
424 if (keyring
->restrict_link
) {
425 struct key_restriction
*keyres
= keyring
->restrict_link
;
427 key_put(keyres
->key
);
431 assoc_array_destroy(&keyring
->keys
, &keyring_assoc_array_ops
);
435 * Describe a keyring for /proc.
437 static void keyring_describe(const struct key
*keyring
, struct seq_file
*m
)
439 if (keyring
->description
)
440 seq_puts(m
, keyring
->description
);
442 seq_puts(m
, "[anon]");
444 if (key_is_positive(keyring
)) {
445 if (keyring
->keys
.nr_leaves_on_tree
!= 0)
446 seq_printf(m
, ": %lu", keyring
->keys
.nr_leaves_on_tree
);
448 seq_puts(m
, ": empty");
452 struct keyring_read_iterator_context
{
455 key_serial_t __user
*buffer
;
458 static int keyring_read_iterator(const void *object
, void *data
)
460 struct keyring_read_iterator_context
*ctx
= data
;
461 const struct key
*key
= keyring_ptr_to_key(object
);
464 kenter("{%s,%d},,{%zu/%zu}",
465 key
->type
->name
, key
->serial
, ctx
->count
, ctx
->buflen
);
467 if (ctx
->count
>= ctx
->buflen
)
470 ret
= put_user(key
->serial
, ctx
->buffer
);
474 ctx
->count
+= sizeof(key
->serial
);
479 * Read a list of key IDs from the keyring's contents in binary form
481 * The keyring's semaphore is read-locked by the caller. This prevents someone
482 * from modifying it under us - which could cause us to read key IDs multiple
485 static long keyring_read(const struct key
*keyring
,
486 char __user
*buffer
, size_t buflen
)
488 struct keyring_read_iterator_context ctx
;
491 kenter("{%d},,%zu", key_serial(keyring
), buflen
);
493 if (buflen
& (sizeof(key_serial_t
) - 1))
496 /* Copy as many key IDs as fit into the buffer */
497 if (buffer
&& buflen
) {
498 ctx
.buffer
= (key_serial_t __user
*)buffer
;
501 ret
= assoc_array_iterate(&keyring
->keys
,
502 keyring_read_iterator
, &ctx
);
504 kleave(" = %ld [iterate]", ret
);
509 /* Return the size of the buffer needed */
510 ret
= keyring
->keys
.nr_leaves_on_tree
* sizeof(key_serial_t
);
512 kleave("= %ld [ok]", ret
);
514 kleave("= %ld [buffer too small]", ret
);
519 * Allocate a keyring and link into the destination keyring.
521 struct key
*keyring_alloc(const char *description
, kuid_t uid
, kgid_t gid
,
522 const struct cred
*cred
, key_perm_t perm
,
524 struct key_restriction
*restrict_link
,
530 keyring
= key_alloc(&key_type_keyring
, description
,
531 uid
, gid
, cred
, perm
, flags
, restrict_link
);
532 if (!IS_ERR(keyring
)) {
533 ret
= key_instantiate_and_link(keyring
, NULL
, 0, dest
, NULL
);
536 keyring
= ERR_PTR(ret
);
542 EXPORT_SYMBOL(keyring_alloc
);
545 * restrict_link_reject - Give -EPERM to restrict link
546 * @keyring: The keyring being added to.
547 * @type: The type of key being added.
548 * @payload: The payload of the key intended to be added.
549 * @restriction_key: Keys providing additional data for evaluating restriction.
551 * Reject the addition of any links to a keyring. It can be overridden by
552 * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
553 * adding a key to a keyring.
555 * This is meant to be stored in a key_restriction structure which is passed
556 * in the restrict_link parameter to keyring_alloc().
558 int restrict_link_reject(struct key
*keyring
,
559 const struct key_type
*type
,
560 const union key_payload
*payload
,
561 struct key
*restriction_key
)
567 * By default, we keys found by getting an exact match on their descriptions.
569 bool key_default_cmp(const struct key
*key
,
570 const struct key_match_data
*match_data
)
572 return strcmp(key
->description
, match_data
->raw_data
) == 0;
576 * Iteration function to consider each key found.
578 static int keyring_search_iterator(const void *object
, void *iterator_data
)
580 struct keyring_search_context
*ctx
= iterator_data
;
581 const struct key
*key
= keyring_ptr_to_key(object
);
582 unsigned long kflags
= READ_ONCE(key
->flags
);
583 short state
= READ_ONCE(key
->state
);
585 kenter("{%d}", key
->serial
);
587 /* ignore keys not of this type */
588 if (key
->type
!= ctx
->index_key
.type
) {
589 kleave(" = 0 [!type]");
593 /* skip invalidated, revoked and expired keys */
594 if (ctx
->flags
& KEYRING_SEARCH_DO_STATE_CHECK
) {
595 time64_t expiry
= READ_ONCE(key
->expiry
);
597 if (kflags
& ((1 << KEY_FLAG_INVALIDATED
) |
598 (1 << KEY_FLAG_REVOKED
))) {
599 ctx
->result
= ERR_PTR(-EKEYREVOKED
);
600 kleave(" = %d [invrev]", ctx
->skipped_ret
);
604 if (expiry
&& ctx
->now
>= expiry
) {
605 if (!(ctx
->flags
& KEYRING_SEARCH_SKIP_EXPIRED
))
606 ctx
->result
= ERR_PTR(-EKEYEXPIRED
);
607 kleave(" = %d [expire]", ctx
->skipped_ret
);
612 /* keys that don't match */
613 if (!ctx
->match_data
.cmp(key
, &ctx
->match_data
)) {
614 kleave(" = 0 [!match]");
618 /* key must have search permissions */
619 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
) &&
620 key_task_permission(make_key_ref(key
, ctx
->possessed
),
621 ctx
->cred
, KEY_NEED_SEARCH
) < 0) {
622 ctx
->result
= ERR_PTR(-EACCES
);
623 kleave(" = %d [!perm]", ctx
->skipped_ret
);
627 if (ctx
->flags
& KEYRING_SEARCH_DO_STATE_CHECK
) {
628 /* we set a different error code if we pass a negative key */
630 ctx
->result
= ERR_PTR(state
);
631 kleave(" = %d [neg]", ctx
->skipped_ret
);
637 ctx
->result
= make_key_ref(key
, ctx
->possessed
);
638 kleave(" = 1 [found]");
642 return ctx
->skipped_ret
;
646 * Search inside a keyring for a key. We can search by walking to it
647 * directly based on its index-key or we can iterate over the entire
648 * tree looking for it, based on the match function.
650 static int search_keyring(struct key
*keyring
, struct keyring_search_context
*ctx
)
652 if (ctx
->match_data
.lookup_type
== KEYRING_SEARCH_LOOKUP_DIRECT
) {
655 object
= assoc_array_find(&keyring
->keys
,
656 &keyring_assoc_array_ops
,
658 return object
? ctx
->iterator(object
, ctx
) : 0;
660 return assoc_array_iterate(&keyring
->keys
, ctx
->iterator
, ctx
);
664 * Search a tree of keyrings that point to other keyrings up to the maximum
667 static bool search_nested_keyrings(struct key
*keyring
,
668 struct keyring_search_context
*ctx
)
672 struct assoc_array_node
*node
;
674 } stack
[KEYRING_SEARCH_MAX_DEPTH
];
676 struct assoc_array_shortcut
*shortcut
;
677 struct assoc_array_node
*node
;
678 struct assoc_array_ptr
*ptr
;
682 kenter("{%d},{%s,%s}",
684 ctx
->index_key
.type
->name
,
685 ctx
->index_key
.description
);
687 #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
688 BUG_ON((ctx
->flags
& STATE_CHECKS
) == 0 ||
689 (ctx
->flags
& STATE_CHECKS
) == STATE_CHECKS
);
691 if (ctx
->index_key
.description
)
692 key_set_index_key(&ctx
->index_key
);
694 /* Check to see if this top-level keyring is what we are looking for
695 * and whether it is valid or not.
697 if (ctx
->match_data
.lookup_type
== KEYRING_SEARCH_LOOKUP_ITERATE
||
698 keyring_compare_object(keyring
, &ctx
->index_key
)) {
699 ctx
->skipped_ret
= 2;
700 switch (ctx
->iterator(keyring_key_to_ptr(keyring
), ctx
)) {
710 ctx
->skipped_ret
= 0;
712 /* Start processing a new keyring */
714 kdebug("descend to %d", keyring
->serial
);
715 if (keyring
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
716 (1 << KEY_FLAG_REVOKED
)))
717 goto not_this_keyring
;
719 /* Search through the keys in this keyring before its searching its
722 if (search_keyring(keyring
, ctx
))
725 /* Then manually iterate through the keyrings nested in this one.
727 * Start from the root node of the index tree. Because of the way the
728 * hash function has been set up, keyrings cluster on the leftmost
729 * branch of the root node (root slot 0) or in the root node itself.
730 * Non-keyrings avoid the leftmost branch of the root entirely (root
733 if (!(ctx
->flags
& KEYRING_SEARCH_RECURSE
))
734 goto not_this_keyring
;
736 ptr
= READ_ONCE(keyring
->keys
.root
);
738 goto not_this_keyring
;
740 if (assoc_array_ptr_is_shortcut(ptr
)) {
741 /* If the root is a shortcut, either the keyring only contains
742 * keyring pointers (everything clusters behind root slot 0) or
743 * doesn't contain any keyring pointers.
745 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
746 if ((shortcut
->index_key
[0] & ASSOC_ARRAY_FAN_MASK
) != 0)
747 goto not_this_keyring
;
749 ptr
= READ_ONCE(shortcut
->next_node
);
750 node
= assoc_array_ptr_to_node(ptr
);
754 node
= assoc_array_ptr_to_node(ptr
);
755 ptr
= node
->slots
[0];
756 if (!assoc_array_ptr_is_meta(ptr
))
760 /* Descend to a more distal node in this keyring's content tree and go
764 if (assoc_array_ptr_is_shortcut(ptr
)) {
765 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
766 ptr
= READ_ONCE(shortcut
->next_node
);
767 BUG_ON(!assoc_array_ptr_is_node(ptr
));
769 node
= assoc_array_ptr_to_node(ptr
);
772 kdebug("begin_node");
775 /* Go through the slots in a node */
776 for (; slot
< ASSOC_ARRAY_FAN_OUT
; slot
++) {
777 ptr
= READ_ONCE(node
->slots
[slot
]);
779 if (assoc_array_ptr_is_meta(ptr
) && node
->back_pointer
)
780 goto descend_to_node
;
782 if (!keyring_ptr_is_keyring(ptr
))
785 key
= keyring_ptr_to_key(ptr
);
787 if (sp
>= KEYRING_SEARCH_MAX_DEPTH
) {
788 if (ctx
->flags
& KEYRING_SEARCH_DETECT_TOO_DEEP
) {
789 ctx
->result
= ERR_PTR(-ELOOP
);
792 goto not_this_keyring
;
795 /* Search a nested keyring */
796 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
) &&
797 key_task_permission(make_key_ref(key
, ctx
->possessed
),
798 ctx
->cred
, KEY_NEED_SEARCH
) < 0)
801 /* stack the current position */
802 stack
[sp
].keyring
= keyring
;
803 stack
[sp
].node
= node
;
804 stack
[sp
].slot
= slot
;
807 /* begin again with the new keyring */
809 goto descend_to_keyring
;
812 /* We've dealt with all the slots in the current node, so now we need
813 * to ascend to the parent and continue processing there.
815 ptr
= READ_ONCE(node
->back_pointer
);
816 slot
= node
->parent_slot
;
818 if (ptr
&& assoc_array_ptr_is_shortcut(ptr
)) {
819 shortcut
= assoc_array_ptr_to_shortcut(ptr
);
820 ptr
= READ_ONCE(shortcut
->back_pointer
);
821 slot
= shortcut
->parent_slot
;
824 goto not_this_keyring
;
825 node
= assoc_array_ptr_to_node(ptr
);
828 /* If we've ascended to the root (zero backpointer), we must have just
829 * finished processing the leftmost branch rather than the root slots -
830 * so there can't be any more keyrings for us to find.
832 if (node
->back_pointer
) {
833 kdebug("ascend %d", slot
);
837 /* The keyring we're looking at was disqualified or didn't contain a
841 kdebug("not_this_keyring %d", sp
);
847 /* Resume the processing of a keyring higher up in the tree */
849 keyring
= stack
[sp
].keyring
;
850 node
= stack
[sp
].node
;
851 slot
= stack
[sp
].slot
+ 1;
852 kdebug("ascend to %d [%d]", keyring
->serial
, slot
);
855 /* We found a viable match */
857 key
= key_ref_to_ptr(ctx
->result
);
859 if (!(ctx
->flags
& KEYRING_SEARCH_NO_UPDATE_TIME
)) {
860 key
->last_used_at
= ctx
->now
;
861 keyring
->last_used_at
= ctx
->now
;
863 stack
[--sp
].keyring
->last_used_at
= ctx
->now
;
870 * keyring_search_rcu - Search a keyring tree for a matching key under RCU
871 * @keyring_ref: A pointer to the keyring with possession indicator.
872 * @ctx: The keyring search context.
874 * Search the supplied keyring tree for a key that matches the criteria given.
875 * The root keyring and any linked keyrings must grant Search permission to the
876 * caller to be searchable and keys can only be found if they too grant Search
877 * to the caller. The possession flag on the root keyring pointer controls use
878 * of the possessor bits in permissions checking of the entire tree. In
879 * addition, the LSM gets to forbid keyring searches and key matches.
881 * The search is performed as a breadth-then-depth search up to the prescribed
882 * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to
883 * prevent keyrings from being destroyed or rearranged whilst they are being
886 * Keys are matched to the type provided and are then filtered by the match
887 * function, which is given the description to use in any way it sees fit. The
888 * match function may use any attributes of a key that it wishes to to
889 * determine the match. Normally the match function from the key type would be
892 * RCU can be used to prevent the keyring key lists from disappearing without
893 * the need to take lots of locks.
895 * Returns a pointer to the found key and increments the key usage count if
896 * successful; -EAGAIN if no matching keys were found, or if expired or revoked
897 * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
898 * specified keyring wasn't a keyring.
900 * In the case of a successful return, the possession attribute from
901 * @keyring_ref is propagated to the returned key reference.
903 key_ref_t
keyring_search_rcu(key_ref_t keyring_ref
,
904 struct keyring_search_context
*ctx
)
909 ctx
->iterator
= keyring_search_iterator
;
910 ctx
->possessed
= is_key_possessed(keyring_ref
);
911 ctx
->result
= ERR_PTR(-EAGAIN
);
913 keyring
= key_ref_to_ptr(keyring_ref
);
916 if (keyring
->type
!= &key_type_keyring
)
917 return ERR_PTR(-ENOTDIR
);
919 if (!(ctx
->flags
& KEYRING_SEARCH_NO_CHECK_PERM
)) {
920 err
= key_task_permission(keyring_ref
, ctx
->cred
, KEY_NEED_SEARCH
);
925 ctx
->now
= ktime_get_real_seconds();
926 if (search_nested_keyrings(keyring
, ctx
))
927 __key_get(key_ref_to_ptr(ctx
->result
));
932 * keyring_search - Search the supplied keyring tree for a matching key
933 * @keyring: The root of the keyring tree to be searched.
934 * @type: The type of keyring we want to find.
935 * @description: The name of the keyring we want to find.
936 * @recurse: True to search the children of @keyring also
938 * As keyring_search_rcu() above, but using the current task's credentials and
939 * type's default matching function and preferred search method.
941 key_ref_t
keyring_search(key_ref_t keyring
,
942 struct key_type
*type
,
943 const char *description
,
946 struct keyring_search_context ctx
= {
947 .index_key
.type
= type
,
948 .index_key
.description
= description
,
949 .index_key
.desc_len
= strlen(description
),
950 .cred
= current_cred(),
951 .match_data
.cmp
= key_default_cmp
,
952 .match_data
.raw_data
= description
,
953 .match_data
.lookup_type
= KEYRING_SEARCH_LOOKUP_DIRECT
,
954 .flags
= KEYRING_SEARCH_DO_STATE_CHECK
,
960 ctx
.flags
|= KEYRING_SEARCH_RECURSE
;
961 if (type
->match_preparse
) {
962 ret
= type
->match_preparse(&ctx
.match_data
);
968 key
= keyring_search_rcu(keyring
, &ctx
);
971 if (type
->match_free
)
972 type
->match_free(&ctx
.match_data
);
975 EXPORT_SYMBOL(keyring_search
);
977 static struct key_restriction
*keyring_restriction_alloc(
978 key_restrict_link_func_t check
)
980 struct key_restriction
*keyres
=
981 kzalloc(sizeof(struct key_restriction
), GFP_KERNEL
);
984 return ERR_PTR(-ENOMEM
);
986 keyres
->check
= check
;
992 * Semaphore to serialise restriction setup to prevent reference count
993 * cycles through restriction key pointers.
995 static DECLARE_RWSEM(keyring_serialise_restrict_sem
);
998 * Check for restriction cycles that would prevent keyring garbage collection.
999 * keyring_serialise_restrict_sem must be held.
1001 static bool keyring_detect_restriction_cycle(const struct key
*dest_keyring
,
1002 struct key_restriction
*keyres
)
1004 while (keyres
&& keyres
->key
&&
1005 keyres
->key
->type
== &key_type_keyring
) {
1006 if (keyres
->key
== dest_keyring
)
1009 keyres
= keyres
->key
->restrict_link
;
1016 * keyring_restrict - Look up and apply a restriction to a keyring
1017 * @keyring_ref: The keyring to be restricted
1018 * @type: The key type that will provide the restriction checker.
1019 * @restriction: The restriction options to apply to the keyring
1021 * Look up a keyring and apply a restriction to it. The restriction is managed
1022 * by the specific key type, but can be configured by the options specified in
1023 * the restriction string.
1025 int keyring_restrict(key_ref_t keyring_ref
, const char *type
,
1026 const char *restriction
)
1028 struct key
*keyring
;
1029 struct key_type
*restrict_type
= NULL
;
1030 struct key_restriction
*restrict_link
;
1033 keyring
= key_ref_to_ptr(keyring_ref
);
1036 if (keyring
->type
!= &key_type_keyring
)
1040 restrict_link
= keyring_restriction_alloc(restrict_link_reject
);
1042 restrict_type
= key_type_lookup(type
);
1044 if (IS_ERR(restrict_type
))
1045 return PTR_ERR(restrict_type
);
1047 if (!restrict_type
->lookup_restriction
) {
1052 restrict_link
= restrict_type
->lookup_restriction(restriction
);
1055 if (IS_ERR(restrict_link
)) {
1056 ret
= PTR_ERR(restrict_link
);
1060 down_write(&keyring
->sem
);
1061 down_write(&keyring_serialise_restrict_sem
);
1063 if (keyring
->restrict_link
)
1065 else if (keyring_detect_restriction_cycle(keyring
, restrict_link
))
1068 keyring
->restrict_link
= restrict_link
;
1070 up_write(&keyring_serialise_restrict_sem
);
1071 up_write(&keyring
->sem
);
1074 key_put(restrict_link
->key
);
1075 kfree(restrict_link
);
1080 key_type_put(restrict_type
);
1084 EXPORT_SYMBOL(keyring_restrict
);
1087 * Search the given keyring for a key that might be updated.
1089 * The caller must guarantee that the keyring is a keyring and that the
1090 * permission is granted to modify the keyring as no check is made here. The
1091 * caller must also hold a lock on the keyring semaphore.
1093 * Returns a pointer to the found key with usage count incremented if
1094 * successful and returns NULL if not found. Revoked and invalidated keys are
1097 * If successful, the possession indicator is propagated from the keyring ref
1098 * to the returned key reference.
1100 key_ref_t
find_key_to_update(key_ref_t keyring_ref
,
1101 const struct keyring_index_key
*index_key
)
1103 struct key
*keyring
, *key
;
1106 keyring
= key_ref_to_ptr(keyring_ref
);
1108 kenter("{%d},{%s,%s}",
1109 keyring
->serial
, index_key
->type
->name
, index_key
->description
);
1111 object
= assoc_array_find(&keyring
->keys
, &keyring_assoc_array_ops
,
1121 key
= keyring_ptr_to_key(object
);
1122 if (key
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
1123 (1 << KEY_FLAG_REVOKED
))) {
1124 kleave(" = NULL [x]");
1128 kleave(" = {%d}", key
->serial
);
1129 return make_key_ref(key
, is_key_possessed(keyring_ref
));
1133 * Find a keyring with the specified name.
1135 * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
1136 * user in the current user namespace are considered. If @uid_keyring is %true,
1137 * the keyring additionally must have been allocated as a user or user session
1138 * keyring; otherwise, it must grant Search permission directly to the caller.
1140 * Returns a pointer to the keyring with the keyring's refcount having being
1141 * incremented on success. -ENOKEY is returned if a key could not be found.
1143 struct key
*find_keyring_by_name(const char *name
, bool uid_keyring
)
1145 struct user_namespace
*ns
= current_user_ns();
1146 struct key
*keyring
;
1149 return ERR_PTR(-EINVAL
);
1151 read_lock(&keyring_name_lock
);
1153 /* Search this hash bucket for a keyring with a matching name that
1154 * grants Search permission and that hasn't been revoked
1156 list_for_each_entry(keyring
, &ns
->keyring_name_list
, name_link
) {
1157 if (!kuid_has_mapping(ns
, keyring
->user
->uid
))
1160 if (test_bit(KEY_FLAG_REVOKED
, &keyring
->flags
))
1163 if (strcmp(keyring
->description
, name
) != 0)
1167 if (!test_bit(KEY_FLAG_UID_KEYRING
,
1171 if (key_permission(make_key_ref(keyring
, 0),
1172 KEY_NEED_SEARCH
) < 0)
1176 /* we've got a match but we might end up racing with
1177 * key_cleanup() if the keyring is currently 'dead'
1178 * (ie. it has a zero usage count) */
1179 if (!refcount_inc_not_zero(&keyring
->usage
))
1181 keyring
->last_used_at
= ktime_get_real_seconds();
1185 keyring
= ERR_PTR(-ENOKEY
);
1187 read_unlock(&keyring_name_lock
);
1191 static int keyring_detect_cycle_iterator(const void *object
,
1192 void *iterator_data
)
1194 struct keyring_search_context
*ctx
= iterator_data
;
1195 const struct key
*key
= keyring_ptr_to_key(object
);
1197 kenter("{%d}", key
->serial
);
1199 /* We might get a keyring with matching index-key that is nonetheless a
1200 * different keyring. */
1201 if (key
!= ctx
->match_data
.raw_data
)
1204 ctx
->result
= ERR_PTR(-EDEADLK
);
1209 * See if a cycle will will be created by inserting acyclic tree B in acyclic
1210 * tree A at the topmost level (ie: as a direct child of A).
1212 * Since we are adding B to A at the top level, checking for cycles should just
1213 * be a matter of seeing if node A is somewhere in tree B.
1215 static int keyring_detect_cycle(struct key
*A
, struct key
*B
)
1217 struct keyring_search_context ctx
= {
1218 .index_key
= A
->index_key
,
1219 .match_data
.raw_data
= A
,
1220 .match_data
.lookup_type
= KEYRING_SEARCH_LOOKUP_DIRECT
,
1221 .iterator
= keyring_detect_cycle_iterator
,
1222 .flags
= (KEYRING_SEARCH_NO_STATE_CHECK
|
1223 KEYRING_SEARCH_NO_UPDATE_TIME
|
1224 KEYRING_SEARCH_NO_CHECK_PERM
|
1225 KEYRING_SEARCH_DETECT_TOO_DEEP
|
1226 KEYRING_SEARCH_RECURSE
),
1230 search_nested_keyrings(B
, &ctx
);
1232 return PTR_ERR(ctx
.result
) == -EAGAIN
? 0 : PTR_ERR(ctx
.result
);
1236 * Lock keyring for link.
1238 int __key_link_lock(struct key
*keyring
,
1239 const struct keyring_index_key
*index_key
)
1240 __acquires(&keyring
->sem
)
1241 __acquires(&keyring_serialise_link_lock
)
1243 if (keyring
->type
!= &key_type_keyring
)
1246 down_write(&keyring
->sem
);
1248 /* Serialise link/link calls to prevent parallel calls causing a cycle
1249 * when linking two keyring in opposite orders.
1251 if (index_key
->type
== &key_type_keyring
)
1252 mutex_lock(&keyring_serialise_link_lock
);
1258 * Lock keyrings for move (link/unlink combination).
1260 int __key_move_lock(struct key
*l_keyring
, struct key
*u_keyring
,
1261 const struct keyring_index_key
*index_key
)
1262 __acquires(&l_keyring
->sem
)
1263 __acquires(&u_keyring
->sem
)
1264 __acquires(&keyring_serialise_link_lock
)
1266 if (l_keyring
->type
!= &key_type_keyring
||
1267 u_keyring
->type
!= &key_type_keyring
)
1270 /* We have to be very careful here to take the keyring locks in the
1271 * right order, lest we open ourselves to deadlocking against another
1274 if (l_keyring
< u_keyring
) {
1275 down_write(&l_keyring
->sem
);
1276 down_write_nested(&u_keyring
->sem
, 1);
1278 down_write(&u_keyring
->sem
);
1279 down_write_nested(&l_keyring
->sem
, 1);
1282 /* Serialise link/link calls to prevent parallel calls causing a cycle
1283 * when linking two keyring in opposite orders.
1285 if (index_key
->type
== &key_type_keyring
)
1286 mutex_lock(&keyring_serialise_link_lock
);
1292 * Preallocate memory so that a key can be linked into to a keyring.
1294 int __key_link_begin(struct key
*keyring
,
1295 const struct keyring_index_key
*index_key
,
1296 struct assoc_array_edit
**_edit
)
1298 struct assoc_array_edit
*edit
;
1302 keyring
->serial
, index_key
->type
->name
, index_key
->description
);
1304 BUG_ON(index_key
->desc_len
== 0);
1305 BUG_ON(*_edit
!= NULL
);
1310 if (test_bit(KEY_FLAG_REVOKED
, &keyring
->flags
))
1313 /* Create an edit script that will insert/replace the key in the
1316 edit
= assoc_array_insert(&keyring
->keys
,
1317 &keyring_assoc_array_ops
,
1321 ret
= PTR_ERR(edit
);
1325 /* If we're not replacing a link in-place then we're going to need some
1328 if (!edit
->dead_leaf
) {
1329 ret
= key_payload_reserve(keyring
,
1330 keyring
->datalen
+ KEYQUOTA_LINK_BYTES
);
1340 assoc_array_cancel_edit(edit
);
1342 kleave(" = %d", ret
);
1347 * Check already instantiated keys aren't going to be a problem.
1349 * The caller must have called __key_link_begin(). Don't need to call this for
1350 * keys that were created since __key_link_begin() was called.
1352 int __key_link_check_live_key(struct key
*keyring
, struct key
*key
)
1354 if (key
->type
== &key_type_keyring
)
1355 /* check that we aren't going to create a cycle by linking one
1356 * keyring to another */
1357 return keyring_detect_cycle(keyring
, key
);
1362 * Link a key into to a keyring.
1364 * Must be called with __key_link_begin() having being called. Discards any
1365 * already extant link to matching key if there is one, so that each keyring
1366 * holds at most one link to any given key of a particular type+description
1369 void __key_link(struct key
*key
, struct assoc_array_edit
**_edit
)
1372 assoc_array_insert_set_object(*_edit
, keyring_key_to_ptr(key
));
1373 assoc_array_apply_edit(*_edit
);
1378 * Finish linking a key into to a keyring.
1380 * Must be called with __key_link_begin() having being called.
1382 void __key_link_end(struct key
*keyring
,
1383 const struct keyring_index_key
*index_key
,
1384 struct assoc_array_edit
*edit
)
1385 __releases(&keyring
->sem
)
1386 __releases(&keyring_serialise_link_lock
)
1388 BUG_ON(index_key
->type
== NULL
);
1389 kenter("%d,%s,", keyring
->serial
, index_key
->type
->name
);
1392 if (!edit
->dead_leaf
) {
1393 key_payload_reserve(keyring
,
1394 keyring
->datalen
- KEYQUOTA_LINK_BYTES
);
1396 assoc_array_cancel_edit(edit
);
1398 up_write(&keyring
->sem
);
1400 if (index_key
->type
== &key_type_keyring
)
1401 mutex_unlock(&keyring_serialise_link_lock
);
1405 * Check addition of keys to restricted keyrings.
1407 static int __key_link_check_restriction(struct key
*keyring
, struct key
*key
)
1409 if (!keyring
->restrict_link
|| !keyring
->restrict_link
->check
)
1411 return keyring
->restrict_link
->check(keyring
, key
->type
, &key
->payload
,
1412 keyring
->restrict_link
->key
);
1416 * key_link - Link a key to a keyring
1417 * @keyring: The keyring to make the link in.
1418 * @key: The key to link to.
1420 * Make a link in a keyring to a key, such that the keyring holds a reference
1421 * on that key and the key can potentially be found by searching that keyring.
1423 * This function will write-lock the keyring's semaphore and will consume some
1424 * of the user's key data quota to hold the link.
1426 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
1427 * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
1428 * full, -EDQUOT if there is insufficient key data quota remaining to add
1429 * another link or -ENOMEM if there's insufficient memory.
1431 * It is assumed that the caller has checked that it is permitted for a link to
1432 * be made (the keyring should have Write permission and the key Link
1435 int key_link(struct key
*keyring
, struct key
*key
)
1437 struct assoc_array_edit
*edit
= NULL
;
1440 kenter("{%d,%d}", keyring
->serial
, refcount_read(&keyring
->usage
));
1445 ret
= __key_link_lock(keyring
, &key
->index_key
);
1449 ret
= __key_link_begin(keyring
, &key
->index_key
, &edit
);
1453 kdebug("begun {%d,%d}", keyring
->serial
, refcount_read(&keyring
->usage
));
1454 ret
= __key_link_check_restriction(keyring
, key
);
1456 ret
= __key_link_check_live_key(keyring
, key
);
1458 __key_link(key
, &edit
);
1461 __key_link_end(keyring
, &key
->index_key
, edit
);
1463 kleave(" = %d {%d,%d}", ret
, keyring
->serial
, refcount_read(&keyring
->usage
));
1466 EXPORT_SYMBOL(key_link
);
1469 * Lock a keyring for unlink.
1471 static int __key_unlink_lock(struct key
*keyring
)
1472 __acquires(&keyring
->sem
)
1474 if (keyring
->type
!= &key_type_keyring
)
1477 down_write(&keyring
->sem
);
1482 * Begin the process of unlinking a key from a keyring.
1484 static int __key_unlink_begin(struct key
*keyring
, struct key
*key
,
1485 struct assoc_array_edit
**_edit
)
1487 struct assoc_array_edit
*edit
;
1489 BUG_ON(*_edit
!= NULL
);
1491 edit
= assoc_array_delete(&keyring
->keys
, &keyring_assoc_array_ops
,
1494 return PTR_ERR(edit
);
1504 * Apply an unlink change.
1506 static void __key_unlink(struct key
*keyring
, struct key
*key
,
1507 struct assoc_array_edit
**_edit
)
1509 assoc_array_apply_edit(*_edit
);
1511 key_payload_reserve(keyring
, keyring
->datalen
- KEYQUOTA_LINK_BYTES
);
1515 * Finish unlinking a key from to a keyring.
1517 static void __key_unlink_end(struct key
*keyring
,
1519 struct assoc_array_edit
*edit
)
1520 __releases(&keyring
->sem
)
1523 assoc_array_cancel_edit(edit
);
1524 up_write(&keyring
->sem
);
1528 * key_unlink - Unlink the first link to a key from a keyring.
1529 * @keyring: The keyring to remove the link from.
1530 * @key: The key the link is to.
1532 * Remove a link from a keyring to a key.
1534 * This function will write-lock the keyring's semaphore.
1536 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
1537 * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
1540 * It is assumed that the caller has checked that it is permitted for a link to
1541 * be removed (the keyring should have Write permission; no permissions are
1542 * required on the key).
1544 int key_unlink(struct key
*keyring
, struct key
*key
)
1546 struct assoc_array_edit
*edit
= NULL
;
1552 ret
= __key_unlink_lock(keyring
);
1556 ret
= __key_unlink_begin(keyring
, key
, &edit
);
1558 __key_unlink(keyring
, key
, &edit
);
1559 __key_unlink_end(keyring
, key
, edit
);
1562 EXPORT_SYMBOL(key_unlink
);
1565 * key_move - Move a key from one keyring to another
1566 * @key: The key to move
1567 * @from_keyring: The keyring to remove the link from.
1568 * @to_keyring: The keyring to make the link in.
1569 * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
1571 * Make a link in @to_keyring to a key, such that the keyring holds a reference
1572 * on that key and the key can potentially be found by searching that keyring
1573 * whilst simultaneously removing a link to the key from @from_keyring.
1575 * This function will write-lock both keyring's semaphores and will consume
1576 * some of the user's key data quota to hold the link on @to_keyring.
1578 * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
1579 * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
1580 * keyring is full, -EDQUOT if there is insufficient key data quota remaining
1581 * to add another link or -ENOMEM if there's insufficient memory. If
1582 * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
1583 * matching key in @to_keyring.
1585 * It is assumed that the caller has checked that it is permitted for a link to
1586 * be made (the keyring should have Write permission and the key Link
1589 int key_move(struct key
*key
,
1590 struct key
*from_keyring
,
1591 struct key
*to_keyring
,
1594 struct assoc_array_edit
*from_edit
= NULL
, *to_edit
= NULL
;
1597 kenter("%d,%d,%d", key
->serial
, from_keyring
->serial
, to_keyring
->serial
);
1599 if (from_keyring
== to_keyring
)
1603 key_check(from_keyring
);
1604 key_check(to_keyring
);
1606 ret
= __key_move_lock(from_keyring
, to_keyring
, &key
->index_key
);
1609 ret
= __key_unlink_begin(from_keyring
, key
, &from_edit
);
1612 ret
= __key_link_begin(to_keyring
, &key
->index_key
, &to_edit
);
1617 if (to_edit
->dead_leaf
&& (flags
& KEYCTL_MOVE_EXCL
))
1620 ret
= __key_link_check_restriction(to_keyring
, key
);
1623 ret
= __key_link_check_live_key(to_keyring
, key
);
1627 __key_unlink(from_keyring
, key
, &from_edit
);
1628 __key_link(key
, &to_edit
);
1630 __key_link_end(to_keyring
, &key
->index_key
, to_edit
);
1631 __key_unlink_end(from_keyring
, key
, from_edit
);
1633 kleave(" = %d", ret
);
1636 EXPORT_SYMBOL(key_move
);
1639 * keyring_clear - Clear a keyring
1640 * @keyring: The keyring to clear.
1642 * Clear the contents of the specified keyring.
1644 * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
1646 int keyring_clear(struct key
*keyring
)
1648 struct assoc_array_edit
*edit
;
1651 if (keyring
->type
!= &key_type_keyring
)
1654 down_write(&keyring
->sem
);
1656 edit
= assoc_array_clear(&keyring
->keys
, &keyring_assoc_array_ops
);
1658 ret
= PTR_ERR(edit
);
1661 assoc_array_apply_edit(edit
);
1662 key_payload_reserve(keyring
, 0);
1666 up_write(&keyring
->sem
);
1669 EXPORT_SYMBOL(keyring_clear
);
1672 * Dispose of the links from a revoked keyring.
1674 * This is called with the key sem write-locked.
1676 static void keyring_revoke(struct key
*keyring
)
1678 struct assoc_array_edit
*edit
;
1680 edit
= assoc_array_clear(&keyring
->keys
, &keyring_assoc_array_ops
);
1681 if (!IS_ERR(edit
)) {
1683 assoc_array_apply_edit(edit
);
1684 key_payload_reserve(keyring
, 0);
1688 static bool keyring_gc_select_iterator(void *object
, void *iterator_data
)
1690 struct key
*key
= keyring_ptr_to_key(object
);
1691 time64_t
*limit
= iterator_data
;
1693 if (key_is_dead(key
, *limit
))
1699 static int keyring_gc_check_iterator(const void *object
, void *iterator_data
)
1701 const struct key
*key
= keyring_ptr_to_key(object
);
1702 time64_t
*limit
= iterator_data
;
1705 return key_is_dead(key
, *limit
);
1709 * Garbage collect pointers from a keyring.
1711 * Not called with any locks held. The keyring's key struct will not be
1712 * deallocated under us as only our caller may deallocate it.
1714 void keyring_gc(struct key
*keyring
, time64_t limit
)
1718 kenter("%x{%s}", keyring
->serial
, keyring
->description
?: "");
1720 if (keyring
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
1721 (1 << KEY_FLAG_REVOKED
)))
1724 /* scan the keyring looking for dead keys */
1726 result
= assoc_array_iterate(&keyring
->keys
,
1727 keyring_gc_check_iterator
, &limit
);
1737 down_write(&keyring
->sem
);
1738 assoc_array_gc(&keyring
->keys
, &keyring_assoc_array_ops
,
1739 keyring_gc_select_iterator
, &limit
);
1740 up_write(&keyring
->sem
);
1745 * Garbage collect restriction pointers from a keyring.
1747 * Keyring restrictions are associated with a key type, and must be cleaned
1748 * up if the key type is unregistered. The restriction is altered to always
1749 * reject additional keys so a keyring cannot be opened up by unregistering
1752 * Not called with any keyring locks held. The keyring's key struct will not
1753 * be deallocated under us as only our caller may deallocate it.
1755 * The caller is required to hold key_types_sem and dead_type->sem. This is
1756 * fulfilled by key_gc_keytype() holding the locks on behalf of
1757 * key_garbage_collector(), which it invokes on a workqueue.
1759 void keyring_restriction_gc(struct key
*keyring
, struct key_type
*dead_type
)
1761 struct key_restriction
*keyres
;
1763 kenter("%x{%s}", keyring
->serial
, keyring
->description
?: "");
1766 * keyring->restrict_link is only assigned at key allocation time
1767 * or with the key type locked, so the only values that could be
1768 * concurrently assigned to keyring->restrict_link are for key
1769 * types other than dead_type. Given this, it's ok to check
1770 * the key type before acquiring keyring->sem.
1772 if (!dead_type
|| !keyring
->restrict_link
||
1773 keyring
->restrict_link
->keytype
!= dead_type
) {
1774 kleave(" [no restriction gc]");
1778 /* Lock the keyring to ensure that a link is not in progress */
1779 down_write(&keyring
->sem
);
1781 keyres
= keyring
->restrict_link
;
1783 keyres
->check
= restrict_link_reject
;
1785 key_put(keyres
->key
);
1787 keyres
->keytype
= NULL
;
1789 up_write(&keyring
->sem
);
1791 kleave(" [restriction gc]");