1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * cls_cgroup.h Control Group Classifier
5 * Authors: Thomas Graf <tgraf@suug.ch>
8 #ifndef _NET_CLS_CGROUP_H
9 #define _NET_CLS_CGROUP_H
11 #include <linux/cgroup.h>
12 #include <linux/hardirq.h>
13 #include <linux/rcupdate.h>
15 #include <net/inet_sock.h>
17 #ifdef CONFIG_CGROUP_NET_CLASSID
18 struct cgroup_cls_state
{
19 struct cgroup_subsys_state css
;
23 struct cgroup_cls_state
*task_cls_state(struct task_struct
*p
);
25 static inline u32
task_cls_classid(struct task_struct
*p
)
33 classid
= container_of(task_css(p
, net_cls_cgrp_id
),
34 struct cgroup_cls_state
, css
)->classid
;
40 static inline void sock_update_classid(struct sock_cgroup_data
*skcd
)
44 classid
= task_cls_classid(current
);
45 sock_cgroup_set_classid(skcd
, classid
);
48 static inline u32
task_get_classid(const struct sk_buff
*skb
)
50 u32 classid
= task_cls_state(current
)->classid
;
52 /* Due to the nature of the classifier it is required to ignore all
53 * packets originating from softirq context as accessing `current'
54 * would lead to false results.
56 * This test assumes that all callers of dev_queue_xmit() explicitly
57 * disable bh. Knowing this, it is possible to detect softirq based
58 * calls by looking at the number of nested bh disable calls because
59 * softirqs always disables bh.
61 if (in_serving_softirq()) {
62 struct sock
*sk
= skb_to_full_sk(skb
);
64 /* If there is an sock_cgroup_classid we'll use that. */
65 if (!sk
|| !sk_fullsock(sk
))
68 classid
= sock_cgroup_classid(&sk
->sk_cgrp_data
);
73 #else /* !CONFIG_CGROUP_NET_CLASSID */
74 static inline void sock_update_classid(struct sock_cgroup_data
*skcd
)
78 static inline u32
task_get_classid(const struct sk_buff
*skb
)
82 #endif /* CONFIG_CGROUP_NET_CLASSID */
83 #endif /* _NET_CLS_CGROUP_H */