1 // SPDX-License-Identifier: GPL-2.0-only
3 * Landlock LSM - Ruleset management
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
9 #include <linux/bits.h>
10 #include <linux/bug.h>
11 #include <linux/cleanup.h>
12 #include <linux/compiler_types.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/lockdep.h>
17 #include <linux/mutex.h>
18 #include <linux/overflow.h>
19 #include <linux/rbtree.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/workqueue.h>
30 static struct landlock_ruleset
*create_ruleset(const u32 num_layers
)
32 struct landlock_ruleset
*new_ruleset
;
35 kzalloc(struct_size(new_ruleset
, access_masks
, num_layers
),
38 return ERR_PTR(-ENOMEM
);
39 refcount_set(&new_ruleset
->usage
, 1);
40 mutex_init(&new_ruleset
->lock
);
41 new_ruleset
->root_inode
= RB_ROOT
;
43 #if IS_ENABLED(CONFIG_INET)
44 new_ruleset
->root_net_port
= RB_ROOT
;
45 #endif /* IS_ENABLED(CONFIG_INET) */
47 new_ruleset
->num_layers
= num_layers
;
56 struct landlock_ruleset
*
57 landlock_create_ruleset(const access_mask_t fs_access_mask
,
58 const access_mask_t net_access_mask
,
59 const access_mask_t scope_mask
)
61 struct landlock_ruleset
*new_ruleset
;
63 /* Informs about useless ruleset. */
64 if (!fs_access_mask
&& !net_access_mask
&& !scope_mask
)
65 return ERR_PTR(-ENOMSG
);
66 new_ruleset
= create_ruleset(1);
67 if (IS_ERR(new_ruleset
))
70 landlock_add_fs_access_mask(new_ruleset
, fs_access_mask
, 0);
72 landlock_add_net_access_mask(new_ruleset
, net_access_mask
, 0);
74 landlock_add_scope_mask(new_ruleset
, scope_mask
, 0);
78 static void build_check_rule(void)
80 const struct landlock_rule rule
= {
84 BUILD_BUG_ON(rule
.num_layers
< LANDLOCK_MAX_NUM_LAYERS
);
87 static bool is_object_pointer(const enum landlock_key_type key_type
)
90 case LANDLOCK_KEY_INODE
:
93 #if IS_ENABLED(CONFIG_INET)
94 case LANDLOCK_KEY_NET_PORT
:
96 #endif /* IS_ENABLED(CONFIG_INET) */
104 static struct landlock_rule
*
105 create_rule(const struct landlock_id id
,
106 const struct landlock_layer (*const layers
)[], const u32 num_layers
,
107 const struct landlock_layer
*const new_layer
)
109 struct landlock_rule
*new_rule
;
114 /* Should already be checked by landlock_merge_ruleset(). */
115 if (WARN_ON_ONCE(num_layers
>= LANDLOCK_MAX_NUM_LAYERS
))
116 return ERR_PTR(-E2BIG
);
117 new_num_layers
= num_layers
+ 1;
119 new_num_layers
= num_layers
;
121 new_rule
= kzalloc(struct_size(new_rule
, layers
, new_num_layers
),
124 return ERR_PTR(-ENOMEM
);
125 RB_CLEAR_NODE(&new_rule
->node
);
126 if (is_object_pointer(id
.type
)) {
127 /* This should be catched by insert_rule(). */
128 WARN_ON_ONCE(!id
.key
.object
);
129 landlock_get_object(id
.key
.object
);
132 new_rule
->key
= id
.key
;
133 new_rule
->num_layers
= new_num_layers
;
134 /* Copies the original layer stack. */
135 memcpy(new_rule
->layers
, layers
,
136 flex_array_size(new_rule
, layers
, num_layers
));
138 /* Adds a copy of @new_layer on the layer stack. */
139 new_rule
->layers
[new_rule
->num_layers
- 1] = *new_layer
;
143 static struct rb_root
*get_root(struct landlock_ruleset
*const ruleset
,
144 const enum landlock_key_type key_type
)
147 case LANDLOCK_KEY_INODE
:
148 return &ruleset
->root_inode
;
150 #if IS_ENABLED(CONFIG_INET)
151 case LANDLOCK_KEY_NET_PORT
:
152 return &ruleset
->root_net_port
;
153 #endif /* IS_ENABLED(CONFIG_INET) */
157 return ERR_PTR(-EINVAL
);
161 static void free_rule(struct landlock_rule
*const rule
,
162 const enum landlock_key_type key_type
)
167 if (is_object_pointer(key_type
))
168 landlock_put_object(rule
->key
.object
);
172 static void build_check_ruleset(void)
174 const struct landlock_ruleset ruleset
= {
179 BUILD_BUG_ON(ruleset
.num_rules
< LANDLOCK_MAX_NUM_RULES
);
180 BUILD_BUG_ON(ruleset
.num_layers
< LANDLOCK_MAX_NUM_LAYERS
);
184 * insert_rule - Create and insert a rule in a ruleset
186 * @ruleset: The ruleset to be updated.
187 * @id: The ID to build the new rule with. The underlying kernel object, if
188 * any, must be held by the caller.
189 * @layers: One or multiple layers to be copied into the new rule.
190 * @num_layers: The number of @layers entries.
192 * When user space requests to add a new rule to a ruleset, @layers only
193 * contains one entry and this entry is not assigned to any level. In this
194 * case, the new rule will extend @ruleset, similarly to a boolean OR between
197 * When merging a ruleset in a domain, or copying a domain, @layers will be
198 * added to @ruleset as new constraints, similarly to a boolean AND between
201 static int insert_rule(struct landlock_ruleset
*const ruleset
,
202 const struct landlock_id id
,
203 const struct landlock_layer (*const layers
)[],
204 const size_t num_layers
)
206 struct rb_node
**walker_node
;
207 struct rb_node
*parent_node
= NULL
;
208 struct landlock_rule
*new_rule
;
209 struct rb_root
*root
;
212 lockdep_assert_held(&ruleset
->lock
);
213 if (WARN_ON_ONCE(!layers
))
216 if (is_object_pointer(id
.type
) && WARN_ON_ONCE(!id
.key
.object
))
219 root
= get_root(ruleset
, id
.type
);
221 return PTR_ERR(root
);
223 walker_node
= &root
->rb_node
;
224 while (*walker_node
) {
225 struct landlock_rule
*const this =
226 rb_entry(*walker_node
, struct landlock_rule
, node
);
228 if (this->key
.data
!= id
.key
.data
) {
229 parent_node
= *walker_node
;
230 if (this->key
.data
< id
.key
.data
)
231 walker_node
= &((*walker_node
)->rb_right
);
233 walker_node
= &((*walker_node
)->rb_left
);
237 /* Only a single-level layer should match an existing rule. */
238 if (WARN_ON_ONCE(num_layers
!= 1))
241 /* If there is a matching rule, updates it. */
242 if ((*layers
)[0].level
== 0) {
244 * Extends access rights when the request comes from
245 * landlock_add_rule(2), i.e. @ruleset is not a domain.
247 if (WARN_ON_ONCE(this->num_layers
!= 1))
249 if (WARN_ON_ONCE(this->layers
[0].level
!= 0))
251 this->layers
[0].access
|= (*layers
)[0].access
;
255 if (WARN_ON_ONCE(this->layers
[0].level
== 0))
259 * Intersects access rights when it is a merge between a
260 * ruleset and a domain.
262 new_rule
= create_rule(id
, &this->layers
, this->num_layers
,
264 if (IS_ERR(new_rule
))
265 return PTR_ERR(new_rule
);
266 rb_replace_node(&this->node
, &new_rule
->node
, root
);
267 free_rule(this, id
.type
);
271 /* There is no match for @id. */
272 build_check_ruleset();
273 if (ruleset
->num_rules
>= LANDLOCK_MAX_NUM_RULES
)
275 new_rule
= create_rule(id
, layers
, num_layers
, NULL
);
276 if (IS_ERR(new_rule
))
277 return PTR_ERR(new_rule
);
278 rb_link_node(&new_rule
->node
, parent_node
, walker_node
);
279 rb_insert_color(&new_rule
->node
, root
);
280 ruleset
->num_rules
++;
284 static void build_check_layer(void)
286 const struct landlock_layer layer
= {
291 BUILD_BUG_ON(layer
.level
< LANDLOCK_MAX_NUM_LAYERS
);
292 BUILD_BUG_ON(layer
.access
< LANDLOCK_MASK_ACCESS_FS
);
295 /* @ruleset must be locked by the caller. */
296 int landlock_insert_rule(struct landlock_ruleset
*const ruleset
,
297 const struct landlock_id id
,
298 const access_mask_t access
)
300 struct landlock_layer layers
[] = { {
302 /* When @level is zero, insert_rule() extends @ruleset. */
307 return insert_rule(ruleset
, id
, &layers
, ARRAY_SIZE(layers
));
310 static void get_hierarchy(struct landlock_hierarchy
*const hierarchy
)
313 refcount_inc(&hierarchy
->usage
);
316 static void put_hierarchy(struct landlock_hierarchy
*hierarchy
)
318 while (hierarchy
&& refcount_dec_and_test(&hierarchy
->usage
)) {
319 const struct landlock_hierarchy
*const freeme
= hierarchy
;
321 hierarchy
= hierarchy
->parent
;
326 static int merge_tree(struct landlock_ruleset
*const dst
,
327 struct landlock_ruleset
*const src
,
328 const enum landlock_key_type key_type
)
330 struct landlock_rule
*walker_rule
, *next_rule
;
331 struct rb_root
*src_root
;
335 lockdep_assert_held(&dst
->lock
);
336 lockdep_assert_held(&src
->lock
);
338 src_root
= get_root(src
, key_type
);
339 if (IS_ERR(src_root
))
340 return PTR_ERR(src_root
);
342 /* Merges the @src tree. */
343 rbtree_postorder_for_each_entry_safe(walker_rule
, next_rule
, src_root
,
345 struct landlock_layer layers
[] = { {
346 .level
= dst
->num_layers
,
348 const struct landlock_id id
= {
349 .key
= walker_rule
->key
,
353 if (WARN_ON_ONCE(walker_rule
->num_layers
!= 1))
356 if (WARN_ON_ONCE(walker_rule
->layers
[0].level
!= 0))
359 layers
[0].access
= walker_rule
->layers
[0].access
;
361 err
= insert_rule(dst
, id
, &layers
, ARRAY_SIZE(layers
));
368 static int merge_ruleset(struct landlock_ruleset
*const dst
,
369 struct landlock_ruleset
*const src
)
374 /* Should already be checked by landlock_merge_ruleset() */
375 if (WARN_ON_ONCE(!src
))
377 /* Only merge into a domain. */
378 if (WARN_ON_ONCE(!dst
|| !dst
->hierarchy
))
381 /* Locks @dst first because we are its only owner. */
382 mutex_lock(&dst
->lock
);
383 mutex_lock_nested(&src
->lock
, SINGLE_DEPTH_NESTING
);
385 /* Stacks the new layer. */
386 if (WARN_ON_ONCE(src
->num_layers
!= 1 || dst
->num_layers
< 1)) {
390 dst
->access_masks
[dst
->num_layers
- 1] =
391 landlock_upgrade_handled_access_masks(src
->access_masks
[0]);
393 /* Merges the @src inode tree. */
394 err
= merge_tree(dst
, src
, LANDLOCK_KEY_INODE
);
398 #if IS_ENABLED(CONFIG_INET)
399 /* Merges the @src network port tree. */
400 err
= merge_tree(dst
, src
, LANDLOCK_KEY_NET_PORT
);
403 #endif /* IS_ENABLED(CONFIG_INET) */
406 mutex_unlock(&src
->lock
);
407 mutex_unlock(&dst
->lock
);
411 static int inherit_tree(struct landlock_ruleset
*const parent
,
412 struct landlock_ruleset
*const child
,
413 const enum landlock_key_type key_type
)
415 struct landlock_rule
*walker_rule
, *next_rule
;
416 struct rb_root
*parent_root
;
420 lockdep_assert_held(&parent
->lock
);
421 lockdep_assert_held(&child
->lock
);
423 parent_root
= get_root(parent
, key_type
);
424 if (IS_ERR(parent_root
))
425 return PTR_ERR(parent_root
);
427 /* Copies the @parent inode or network tree. */
428 rbtree_postorder_for_each_entry_safe(walker_rule
, next_rule
,
430 const struct landlock_id id
= {
431 .key
= walker_rule
->key
,
435 err
= insert_rule(child
, id
, &walker_rule
->layers
,
436 walker_rule
->num_layers
);
443 static int inherit_ruleset(struct landlock_ruleset
*const parent
,
444 struct landlock_ruleset
*const child
)
452 /* Locks @child first because we are its only owner. */
453 mutex_lock(&child
->lock
);
454 mutex_lock_nested(&parent
->lock
, SINGLE_DEPTH_NESTING
);
456 /* Copies the @parent inode tree. */
457 err
= inherit_tree(parent
, child
, LANDLOCK_KEY_INODE
);
461 #if IS_ENABLED(CONFIG_INET)
462 /* Copies the @parent network port tree. */
463 err
= inherit_tree(parent
, child
, LANDLOCK_KEY_NET_PORT
);
466 #endif /* IS_ENABLED(CONFIG_INET) */
468 if (WARN_ON_ONCE(child
->num_layers
<= parent
->num_layers
)) {
472 /* Copies the parent layer stack and leaves a space for the new layer. */
473 memcpy(child
->access_masks
, parent
->access_masks
,
474 flex_array_size(parent
, access_masks
, parent
->num_layers
));
476 if (WARN_ON_ONCE(!parent
->hierarchy
)) {
480 get_hierarchy(parent
->hierarchy
);
481 child
->hierarchy
->parent
= parent
->hierarchy
;
484 mutex_unlock(&parent
->lock
);
485 mutex_unlock(&child
->lock
);
489 static void free_ruleset(struct landlock_ruleset
*const ruleset
)
491 struct landlock_rule
*freeme
, *next
;
494 rbtree_postorder_for_each_entry_safe(freeme
, next
, &ruleset
->root_inode
,
496 free_rule(freeme
, LANDLOCK_KEY_INODE
);
498 #if IS_ENABLED(CONFIG_INET)
499 rbtree_postorder_for_each_entry_safe(freeme
, next
,
500 &ruleset
->root_net_port
, node
)
501 free_rule(freeme
, LANDLOCK_KEY_NET_PORT
);
502 #endif /* IS_ENABLED(CONFIG_INET) */
504 put_hierarchy(ruleset
->hierarchy
);
508 void landlock_put_ruleset(struct landlock_ruleset
*const ruleset
)
511 if (ruleset
&& refcount_dec_and_test(&ruleset
->usage
))
512 free_ruleset(ruleset
);
515 static void free_ruleset_work(struct work_struct
*const work
)
517 struct landlock_ruleset
*ruleset
;
519 ruleset
= container_of(work
, struct landlock_ruleset
, work_free
);
520 free_ruleset(ruleset
);
523 void landlock_put_ruleset_deferred(struct landlock_ruleset
*const ruleset
)
525 if (ruleset
&& refcount_dec_and_test(&ruleset
->usage
)) {
526 INIT_WORK(&ruleset
->work_free
, free_ruleset_work
);
527 schedule_work(&ruleset
->work_free
);
532 * landlock_merge_ruleset - Merge a ruleset with a domain
534 * @parent: Parent domain.
535 * @ruleset: New ruleset to be merged.
537 * Returns the intersection of @parent and @ruleset, or returns @parent if
538 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
540 struct landlock_ruleset
*
541 landlock_merge_ruleset(struct landlock_ruleset
*const parent
,
542 struct landlock_ruleset
*const ruleset
)
544 struct landlock_ruleset
*new_dom
__free(landlock_put_ruleset
) = NULL
;
549 if (WARN_ON_ONCE(!ruleset
|| parent
== ruleset
))
550 return ERR_PTR(-EINVAL
);
553 if (parent
->num_layers
>= LANDLOCK_MAX_NUM_LAYERS
)
554 return ERR_PTR(-E2BIG
);
555 num_layers
= parent
->num_layers
+ 1;
560 /* Creates a new domain... */
561 new_dom
= create_ruleset(num_layers
);
566 kzalloc(sizeof(*new_dom
->hierarchy
), GFP_KERNEL_ACCOUNT
);
567 if (!new_dom
->hierarchy
)
568 return ERR_PTR(-ENOMEM
);
570 refcount_set(&new_dom
->hierarchy
->usage
, 1);
572 /* ...as a child of @parent... */
573 err
= inherit_ruleset(parent
, new_dom
);
577 /* ...and including @ruleset. */
578 err
= merge_ruleset(new_dom
, ruleset
);
582 return no_free_ptr(new_dom
);
586 * The returned access has the same lifetime as @ruleset.
588 const struct landlock_rule
*
589 landlock_find_rule(const struct landlock_ruleset
*const ruleset
,
590 const struct landlock_id id
)
592 const struct rb_root
*root
;
593 const struct rb_node
*node
;
595 root
= get_root((struct landlock_ruleset
*)ruleset
, id
.type
);
598 node
= root
->rb_node
;
601 struct landlock_rule
*this =
602 rb_entry(node
, struct landlock_rule
, node
);
604 if (this->key
.data
== id
.key
.data
)
606 if (this->key
.data
< id
.key
.data
)
607 node
= node
->rb_right
;
609 node
= node
->rb_left
;
615 * @layer_masks is read and may be updated according to the access request and
617 * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
619 * Returns true if the request is allowed (i.e. relevant layer masks for the
620 * request are empty).
622 bool landlock_unmask_layers(const struct landlock_rule
*const rule
,
623 const access_mask_t access_request
,
624 layer_mask_t (*const layer_masks
)[],
625 const size_t masks_array_size
)
629 if (!access_request
|| !layer_masks
)
635 * An access is granted if, for each policy layer, at least one rule
636 * encountered on the pathwalk grants the requested access,
637 * regardless of its position in the layer stack. We must then check
638 * the remaining layers for each inode, from the first added layer to
639 * the last one. When there is multiple requested accesses, for each
640 * policy layer, the full set of requested accesses may not be granted
641 * by only one rule, but by the union (binary OR) of multiple rules.
642 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
644 for (layer_level
= 0; layer_level
< rule
->num_layers
; layer_level
++) {
645 const struct landlock_layer
*const layer
=
646 &rule
->layers
[layer_level
];
647 const layer_mask_t layer_bit
= BIT_ULL(layer
->level
- 1);
648 const unsigned long access_req
= access_request
;
649 unsigned long access_bit
;
653 * Records in @layer_masks which layer grants access to each
657 for_each_set_bit(access_bit
, &access_req
, masks_array_size
) {
658 if (layer
->access
& BIT_ULL(access_bit
))
659 (*layer_masks
)[access_bit
] &= ~layer_bit
;
660 is_empty
= is_empty
&& !(*layer_masks
)[access_bit
];
668 typedef access_mask_t
669 get_access_mask_t(const struct landlock_ruleset
*const ruleset
,
670 const u16 layer_level
);
673 * landlock_init_layer_masks - Initialize layer masks from an access request
675 * Populates @layer_masks such that for each access right in @access_request,
676 * the bits for all the layers are set where this access right is handled.
678 * @domain: The domain that defines the current restrictions.
679 * @access_request: The requested access rights to check.
680 * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
681 * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
682 * @key_type: The key type to switch between access masks of different types.
684 * Returns: An access mask where each access right bit is set which is handled
685 * in any of the active layers in @domain.
688 landlock_init_layer_masks(const struct landlock_ruleset
*const domain
,
689 const access_mask_t access_request
,
690 layer_mask_t (*const layer_masks
)[],
691 const enum landlock_key_type key_type
)
693 access_mask_t handled_accesses
= 0;
694 size_t layer_level
, num_access
;
695 get_access_mask_t
*get_access_mask
;
698 case LANDLOCK_KEY_INODE
:
699 get_access_mask
= landlock_get_fs_access_mask
;
700 num_access
= LANDLOCK_NUM_ACCESS_FS
;
703 #if IS_ENABLED(CONFIG_INET)
704 case LANDLOCK_KEY_NET_PORT
:
705 get_access_mask
= landlock_get_net_access_mask
;
706 num_access
= LANDLOCK_NUM_ACCESS_NET
;
708 #endif /* IS_ENABLED(CONFIG_INET) */
715 memset(layer_masks
, 0,
716 array_size(sizeof((*layer_masks
)[0]), num_access
));
718 /* An empty access request can happen because of O_WRONLY | O_RDWR. */
722 /* Saves all handled accesses per layer. */
723 for (layer_level
= 0; layer_level
< domain
->num_layers
; layer_level
++) {
724 const unsigned long access_req
= access_request
;
725 const access_mask_t access_mask
=
726 get_access_mask(domain
, layer_level
);
727 unsigned long access_bit
;
729 for_each_set_bit(access_bit
, &access_req
, num_access
) {
730 if (BIT_ULL(access_bit
) & access_mask
) {
731 (*layer_masks
)[access_bit
] |=
732 BIT_ULL(layer_level
);
733 handled_accesses
|= BIT_ULL(access_bit
);
737 return handled_accesses
;