1 // SPDX-License-Identifier: GPL-2.0+
3 * IPv6 IOAM implementation
6 * Justin Iurman <justin.iurman@uliege.be>
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_genl.h>
15 #include <linux/rhashtable.h>
16 #include <linux/netdevice.h>
18 #include <net/addrconf.h>
19 #include <net/genetlink.h>
20 #include <net/ioam6.h>
21 #include <net/sch_generic.h>
23 static void ioam6_ns_release(struct ioam6_namespace
*ns
)
28 static void ioam6_sc_release(struct ioam6_schema
*sc
)
33 static void ioam6_free_ns(void *ptr
, void *arg
)
35 struct ioam6_namespace
*ns
= (struct ioam6_namespace
*)ptr
;
41 static void ioam6_free_sc(void *ptr
, void *arg
)
43 struct ioam6_schema
*sc
= (struct ioam6_schema
*)ptr
;
49 static int ioam6_ns_cmpfn(struct rhashtable_compare_arg
*arg
, const void *obj
)
51 const struct ioam6_namespace
*ns
= obj
;
53 return (ns
->id
!= *(__be16
*)arg
->key
);
56 static int ioam6_sc_cmpfn(struct rhashtable_compare_arg
*arg
, const void *obj
)
58 const struct ioam6_schema
*sc
= obj
;
60 return (sc
->id
!= *(u32
*)arg
->key
);
63 static const struct rhashtable_params rht_ns_params
= {
64 .key_len
= sizeof(__be16
),
65 .key_offset
= offsetof(struct ioam6_namespace
, id
),
66 .head_offset
= offsetof(struct ioam6_namespace
, head
),
67 .automatic_shrinking
= true,
68 .obj_cmpfn
= ioam6_ns_cmpfn
,
71 static const struct rhashtable_params rht_sc_params
= {
72 .key_len
= sizeof(u32
),
73 .key_offset
= offsetof(struct ioam6_schema
, id
),
74 .head_offset
= offsetof(struct ioam6_schema
, head
),
75 .automatic_shrinking
= true,
76 .obj_cmpfn
= ioam6_sc_cmpfn
,
79 static struct genl_family ioam6_genl_family
;
81 static const struct nla_policy ioam6_genl_policy_addns
[] = {
82 [IOAM6_ATTR_NS_ID
] = { .type
= NLA_U16
},
83 [IOAM6_ATTR_NS_DATA
] = { .type
= NLA_U32
},
84 [IOAM6_ATTR_NS_DATA_WIDE
] = { .type
= NLA_U64
},
87 static const struct nla_policy ioam6_genl_policy_delns
[] = {
88 [IOAM6_ATTR_NS_ID
] = { .type
= NLA_U16
},
91 static const struct nla_policy ioam6_genl_policy_addsc
[] = {
92 [IOAM6_ATTR_SC_ID
] = { .type
= NLA_U32
},
93 [IOAM6_ATTR_SC_DATA
] = { .type
= NLA_BINARY
,
94 .len
= IOAM6_MAX_SCHEMA_DATA_LEN
},
97 static const struct nla_policy ioam6_genl_policy_delsc
[] = {
98 [IOAM6_ATTR_SC_ID
] = { .type
= NLA_U32
},
101 static const struct nla_policy ioam6_genl_policy_ns_sc
[] = {
102 [IOAM6_ATTR_NS_ID
] = { .type
= NLA_U16
},
103 [IOAM6_ATTR_SC_ID
] = { .type
= NLA_U32
},
104 [IOAM6_ATTR_SC_NONE
] = { .type
= NLA_FLAG
},
107 static int ioam6_genl_addns(struct sk_buff
*skb
, struct genl_info
*info
)
109 struct ioam6_pernet_data
*nsdata
;
110 struct ioam6_namespace
*ns
;
116 if (!info
->attrs
[IOAM6_ATTR_NS_ID
])
119 id
= cpu_to_be16(nla_get_u16(info
->attrs
[IOAM6_ATTR_NS_ID
]));
120 nsdata
= ioam6_pernet(genl_info_net(info
));
122 mutex_lock(&nsdata
->lock
);
124 ns
= rhashtable_lookup_fast(&nsdata
->namespaces
, &id
, rht_ns_params
);
130 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
138 data32
= nla_get_u32_default(info
->attrs
[IOAM6_ATTR_NS_DATA
],
139 IOAM6_U32_UNAVAILABLE
);
141 data64
= nla_get_u64_default(info
->attrs
[IOAM6_ATTR_NS_DATA_WIDE
],
142 IOAM6_U64_UNAVAILABLE
);
144 ns
->data
= cpu_to_be32(data32
);
145 ns
->data_wide
= cpu_to_be64(data64
);
147 err
= rhashtable_lookup_insert_fast(&nsdata
->namespaces
, &ns
->head
,
153 mutex_unlock(&nsdata
->lock
);
157 static int ioam6_genl_delns(struct sk_buff
*skb
, struct genl_info
*info
)
159 struct ioam6_pernet_data
*nsdata
;
160 struct ioam6_namespace
*ns
;
161 struct ioam6_schema
*sc
;
165 if (!info
->attrs
[IOAM6_ATTR_NS_ID
])
168 id
= cpu_to_be16(nla_get_u16(info
->attrs
[IOAM6_ATTR_NS_ID
]));
169 nsdata
= ioam6_pernet(genl_info_net(info
));
171 mutex_lock(&nsdata
->lock
);
173 ns
= rhashtable_lookup_fast(&nsdata
->namespaces
, &id
, rht_ns_params
);
179 sc
= rcu_dereference_protected(ns
->schema
,
180 lockdep_is_held(&nsdata
->lock
));
182 err
= rhashtable_remove_fast(&nsdata
->namespaces
, &ns
->head
,
188 rcu_assign_pointer(sc
->ns
, NULL
);
190 ioam6_ns_release(ns
);
193 mutex_unlock(&nsdata
->lock
);
197 static int __ioam6_genl_dumpns_element(struct ioam6_namespace
*ns
,
204 struct ioam6_schema
*sc
;
209 hdr
= genlmsg_put(skb
, portid
, seq
, &ioam6_genl_family
, flags
, cmd
);
213 data32
= be32_to_cpu(ns
->data
);
214 data64
= be64_to_cpu(ns
->data_wide
);
216 if (nla_put_u16(skb
, IOAM6_ATTR_NS_ID
, be16_to_cpu(ns
->id
)) ||
217 (data32
!= IOAM6_U32_UNAVAILABLE
&&
218 nla_put_u32(skb
, IOAM6_ATTR_NS_DATA
, data32
)) ||
219 (data64
!= IOAM6_U64_UNAVAILABLE
&&
220 nla_put_u64_64bit(skb
, IOAM6_ATTR_NS_DATA_WIDE
,
221 data64
, IOAM6_ATTR_PAD
)))
222 goto nla_put_failure
;
226 sc
= rcu_dereference(ns
->schema
);
227 if (sc
&& nla_put_u32(skb
, IOAM6_ATTR_SC_ID
, sc
->id
)) {
229 goto nla_put_failure
;
234 genlmsg_end(skb
, hdr
);
238 genlmsg_cancel(skb
, hdr
);
242 static int ioam6_genl_dumpns_start(struct netlink_callback
*cb
)
244 struct ioam6_pernet_data
*nsdata
= ioam6_pernet(sock_net(cb
->skb
->sk
));
245 struct rhashtable_iter
*iter
= (struct rhashtable_iter
*)cb
->args
[0];
248 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
252 cb
->args
[0] = (long)iter
;
255 rhashtable_walk_enter(&nsdata
->namespaces
, iter
);
260 static int ioam6_genl_dumpns_done(struct netlink_callback
*cb
)
262 struct rhashtable_iter
*iter
= (struct rhashtable_iter
*)cb
->args
[0];
264 rhashtable_walk_exit(iter
);
270 static int ioam6_genl_dumpns(struct sk_buff
*skb
, struct netlink_callback
*cb
)
272 struct rhashtable_iter
*iter
;
273 struct ioam6_namespace
*ns
;
276 iter
= (struct rhashtable_iter
*)cb
->args
[0];
277 rhashtable_walk_start(iter
);
280 ns
= rhashtable_walk_next(iter
);
283 if (PTR_ERR(ns
) == -EAGAIN
)
291 err
= __ioam6_genl_dumpns_element(ns
,
292 NETLINK_CB(cb
->skb
).portid
,
296 IOAM6_CMD_DUMP_NAMESPACES
);
304 rhashtable_walk_stop(iter
);
308 static int ioam6_genl_addsc(struct sk_buff
*skb
, struct genl_info
*info
)
310 struct ioam6_pernet_data
*nsdata
;
311 int len
, len_aligned
, err
;
312 struct ioam6_schema
*sc
;
315 if (!info
->attrs
[IOAM6_ATTR_SC_ID
] || !info
->attrs
[IOAM6_ATTR_SC_DATA
])
318 id
= nla_get_u32(info
->attrs
[IOAM6_ATTR_SC_ID
]);
319 nsdata
= ioam6_pernet(genl_info_net(info
));
321 mutex_lock(&nsdata
->lock
);
323 sc
= rhashtable_lookup_fast(&nsdata
->schemas
, &id
, rht_sc_params
);
329 len
= nla_len(info
->attrs
[IOAM6_ATTR_SC_DATA
]);
330 len_aligned
= ALIGN(len
, 4);
332 sc
= kzalloc(sizeof(*sc
) + len_aligned
, GFP_KERNEL
);
339 sc
->len
= len_aligned
;
340 sc
->hdr
= cpu_to_be32(sc
->id
| ((u8
)(sc
->len
/ 4) << 24));
341 nla_memcpy(sc
->data
, info
->attrs
[IOAM6_ATTR_SC_DATA
], len
);
343 err
= rhashtable_lookup_insert_fast(&nsdata
->schemas
, &sc
->head
,
349 mutex_unlock(&nsdata
->lock
);
356 static int ioam6_genl_delsc(struct sk_buff
*skb
, struct genl_info
*info
)
358 struct ioam6_pernet_data
*nsdata
;
359 struct ioam6_namespace
*ns
;
360 struct ioam6_schema
*sc
;
364 if (!info
->attrs
[IOAM6_ATTR_SC_ID
])
367 id
= nla_get_u32(info
->attrs
[IOAM6_ATTR_SC_ID
]);
368 nsdata
= ioam6_pernet(genl_info_net(info
));
370 mutex_lock(&nsdata
->lock
);
372 sc
= rhashtable_lookup_fast(&nsdata
->schemas
, &id
, rht_sc_params
);
378 ns
= rcu_dereference_protected(sc
->ns
, lockdep_is_held(&nsdata
->lock
));
380 err
= rhashtable_remove_fast(&nsdata
->schemas
, &sc
->head
,
386 rcu_assign_pointer(ns
->schema
, NULL
);
388 ioam6_sc_release(sc
);
391 mutex_unlock(&nsdata
->lock
);
395 static int __ioam6_genl_dumpsc_element(struct ioam6_schema
*sc
,
396 u32 portid
, u32 seq
, u32 flags
,
397 struct sk_buff
*skb
, u8 cmd
)
399 struct ioam6_namespace
*ns
;
402 hdr
= genlmsg_put(skb
, portid
, seq
, &ioam6_genl_family
, flags
, cmd
);
406 if (nla_put_u32(skb
, IOAM6_ATTR_SC_ID
, sc
->id
) ||
407 nla_put(skb
, IOAM6_ATTR_SC_DATA
, sc
->len
, sc
->data
))
408 goto nla_put_failure
;
412 ns
= rcu_dereference(sc
->ns
);
413 if (ns
&& nla_put_u16(skb
, IOAM6_ATTR_NS_ID
, be16_to_cpu(ns
->id
))) {
415 goto nla_put_failure
;
420 genlmsg_end(skb
, hdr
);
424 genlmsg_cancel(skb
, hdr
);
428 static int ioam6_genl_dumpsc_start(struct netlink_callback
*cb
)
430 struct ioam6_pernet_data
*nsdata
= ioam6_pernet(sock_net(cb
->skb
->sk
));
431 struct rhashtable_iter
*iter
= (struct rhashtable_iter
*)cb
->args
[0];
434 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
438 cb
->args
[0] = (long)iter
;
441 rhashtable_walk_enter(&nsdata
->schemas
, iter
);
446 static int ioam6_genl_dumpsc_done(struct netlink_callback
*cb
)
448 struct rhashtable_iter
*iter
= (struct rhashtable_iter
*)cb
->args
[0];
450 rhashtable_walk_exit(iter
);
456 static int ioam6_genl_dumpsc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
458 struct rhashtable_iter
*iter
;
459 struct ioam6_schema
*sc
;
462 iter
= (struct rhashtable_iter
*)cb
->args
[0];
463 rhashtable_walk_start(iter
);
466 sc
= rhashtable_walk_next(iter
);
469 if (PTR_ERR(sc
) == -EAGAIN
)
477 err
= __ioam6_genl_dumpsc_element(sc
,
478 NETLINK_CB(cb
->skb
).portid
,
482 IOAM6_CMD_DUMP_SCHEMAS
);
490 rhashtable_walk_stop(iter
);
494 static int ioam6_genl_ns_set_schema(struct sk_buff
*skb
, struct genl_info
*info
)
496 struct ioam6_namespace
*ns
, *ns_ref
;
497 struct ioam6_schema
*sc
, *sc_ref
;
498 struct ioam6_pernet_data
*nsdata
;
503 if (!info
->attrs
[IOAM6_ATTR_NS_ID
] ||
504 (!info
->attrs
[IOAM6_ATTR_SC_ID
] &&
505 !info
->attrs
[IOAM6_ATTR_SC_NONE
]))
508 ns_id
= cpu_to_be16(nla_get_u16(info
->attrs
[IOAM6_ATTR_NS_ID
]));
509 nsdata
= ioam6_pernet(genl_info_net(info
));
511 mutex_lock(&nsdata
->lock
);
513 ns
= rhashtable_lookup_fast(&nsdata
->namespaces
, &ns_id
, rht_ns_params
);
519 if (info
->attrs
[IOAM6_ATTR_SC_NONE
]) {
522 sc_id
= nla_get_u32(info
->attrs
[IOAM6_ATTR_SC_ID
]);
523 sc
= rhashtable_lookup_fast(&nsdata
->schemas
, &sc_id
,
531 sc_ref
= rcu_dereference_protected(ns
->schema
,
532 lockdep_is_held(&nsdata
->lock
));
534 rcu_assign_pointer(sc_ref
->ns
, NULL
);
535 rcu_assign_pointer(ns
->schema
, sc
);
538 ns_ref
= rcu_dereference_protected(sc
->ns
,
539 lockdep_is_held(&nsdata
->lock
));
541 rcu_assign_pointer(ns_ref
->schema
, NULL
);
542 rcu_assign_pointer(sc
->ns
, ns
);
548 mutex_unlock(&nsdata
->lock
);
552 static const struct genl_ops ioam6_genl_ops
[] = {
554 .cmd
= IOAM6_CMD_ADD_NAMESPACE
,
555 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
556 .doit
= ioam6_genl_addns
,
557 .flags
= GENL_ADMIN_PERM
,
558 .policy
= ioam6_genl_policy_addns
,
559 .maxattr
= ARRAY_SIZE(ioam6_genl_policy_addns
) - 1,
562 .cmd
= IOAM6_CMD_DEL_NAMESPACE
,
563 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
564 .doit
= ioam6_genl_delns
,
565 .flags
= GENL_ADMIN_PERM
,
566 .policy
= ioam6_genl_policy_delns
,
567 .maxattr
= ARRAY_SIZE(ioam6_genl_policy_delns
) - 1,
570 .cmd
= IOAM6_CMD_DUMP_NAMESPACES
,
571 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
572 .start
= ioam6_genl_dumpns_start
,
573 .dumpit
= ioam6_genl_dumpns
,
574 .done
= ioam6_genl_dumpns_done
,
575 .flags
= GENL_ADMIN_PERM
,
578 .cmd
= IOAM6_CMD_ADD_SCHEMA
,
579 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
580 .doit
= ioam6_genl_addsc
,
581 .flags
= GENL_ADMIN_PERM
,
582 .policy
= ioam6_genl_policy_addsc
,
583 .maxattr
= ARRAY_SIZE(ioam6_genl_policy_addsc
) - 1,
586 .cmd
= IOAM6_CMD_DEL_SCHEMA
,
587 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
588 .doit
= ioam6_genl_delsc
,
589 .flags
= GENL_ADMIN_PERM
,
590 .policy
= ioam6_genl_policy_delsc
,
591 .maxattr
= ARRAY_SIZE(ioam6_genl_policy_delsc
) - 1,
594 .cmd
= IOAM6_CMD_DUMP_SCHEMAS
,
595 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
596 .start
= ioam6_genl_dumpsc_start
,
597 .dumpit
= ioam6_genl_dumpsc
,
598 .done
= ioam6_genl_dumpsc_done
,
599 .flags
= GENL_ADMIN_PERM
,
602 .cmd
= IOAM6_CMD_NS_SET_SCHEMA
,
603 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
604 .doit
= ioam6_genl_ns_set_schema
,
605 .flags
= GENL_ADMIN_PERM
,
606 .policy
= ioam6_genl_policy_ns_sc
,
607 .maxattr
= ARRAY_SIZE(ioam6_genl_policy_ns_sc
) - 1,
611 #define IOAM6_GENL_EV_GRP_OFFSET 0
613 static const struct genl_multicast_group ioam6_mcgrps
[] = {
614 [IOAM6_GENL_EV_GRP_OFFSET
] = { .name
= IOAM6_GENL_EV_GRP_NAME
,
615 .flags
= GENL_MCAST_CAP_NET_ADMIN
},
618 static int ioam6_event_put_trace(struct sk_buff
*skb
,
619 struct ioam6_trace_hdr
*trace
,
622 if (nla_put_u16(skb
, IOAM6_EVENT_ATTR_TRACE_NAMESPACE
,
623 be16_to_cpu(trace
->namespace_id
)) ||
624 nla_put_u8(skb
, IOAM6_EVENT_ATTR_TRACE_NODELEN
, trace
->nodelen
) ||
625 nla_put_u32(skb
, IOAM6_EVENT_ATTR_TRACE_TYPE
,
626 be32_to_cpu(trace
->type_be32
)) ||
627 nla_put(skb
, IOAM6_EVENT_ATTR_TRACE_DATA
,
628 len
- sizeof(struct ioam6_trace_hdr
) - trace
->remlen
* 4,
629 trace
->data
+ trace
->remlen
* 4))
635 void ioam6_event(enum ioam6_event_type type
, struct net
*net
, gfp_t gfp
,
636 void *opt
, unsigned int opt_len
)
638 struct nlmsghdr
*nlh
;
641 if (!genl_has_listeners(&ioam6_genl_family
, net
,
642 IOAM6_GENL_EV_GRP_OFFSET
))
645 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, gfp
);
649 nlh
= genlmsg_put(skb
, 0, 0, &ioam6_genl_family
, 0, type
);
651 goto nla_put_failure
;
654 case IOAM6_EVENT_UNSPEC
:
657 case IOAM6_EVENT_TRACE
:
658 if (ioam6_event_put_trace(skb
, (struct ioam6_trace_hdr
*)opt
,
660 goto nla_put_failure
;
664 genlmsg_end(skb
, nlh
);
665 genlmsg_multicast_netns(&ioam6_genl_family
, net
, skb
, 0,
666 IOAM6_GENL_EV_GRP_OFFSET
, gfp
);
673 static struct genl_family ioam6_genl_family __ro_after_init
= {
674 .name
= IOAM6_GENL_NAME
,
675 .version
= IOAM6_GENL_VERSION
,
677 .parallel_ops
= true,
678 .ops
= ioam6_genl_ops
,
679 .n_ops
= ARRAY_SIZE(ioam6_genl_ops
),
680 .resv_start_op
= IOAM6_CMD_NS_SET_SCHEMA
+ 1,
681 .mcgrps
= ioam6_mcgrps
,
682 .n_mcgrps
= ARRAY_SIZE(ioam6_mcgrps
),
683 .module
= THIS_MODULE
,
686 struct ioam6_namespace
*ioam6_namespace(struct net
*net
, __be16 id
)
688 struct ioam6_pernet_data
*nsdata
= ioam6_pernet(net
);
690 return rhashtable_lookup_fast(&nsdata
->namespaces
, &id
, rht_ns_params
);
693 static void __ioam6_fill_trace_data(struct sk_buff
*skb
,
694 struct ioam6_namespace
*ns
,
695 struct ioam6_trace_hdr
*trace
,
696 struct ioam6_schema
*sc
,
697 u8 sclen
, bool is_input
)
699 struct timespec64 ts
;
707 data
= trace
->data
+ trace
->remlen
* 4 - trace
->nodelen
* 4 - sclen
* 4;
709 /* hop_lim and node_id */
710 if (trace
->type
.bit0
) {
711 byte
= ipv6_hdr(skb
)->hop_limit
;
715 raw32
= dev_net(skb_dst(skb
)->dev
)->ipv6
.sysctl
.ioam6_id
;
717 *(__be32
*)data
= cpu_to_be32((byte
<< 24) | raw32
);
718 data
+= sizeof(__be32
);
721 /* ingress_if_id and egress_if_id */
722 if (trace
->type
.bit1
) {
724 raw16
= IOAM6_U16_UNAVAILABLE
;
726 raw16
= (__force u16
)READ_ONCE(__in6_dev_get(skb
->dev
)->cnf
.ioam6_id
);
728 *(__be16
*)data
= cpu_to_be16(raw16
);
729 data
+= sizeof(__be16
);
731 if (skb_dst(skb
)->dev
->flags
& IFF_LOOPBACK
)
732 raw16
= IOAM6_U16_UNAVAILABLE
;
734 raw16
= (__force u16
)READ_ONCE(__in6_dev_get(skb_dst(skb
)->dev
)->cnf
.ioam6_id
);
736 *(__be16
*)data
= cpu_to_be16(raw16
);
737 data
+= sizeof(__be16
);
740 /* timestamp seconds */
741 if (trace
->type
.bit2
) {
743 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
745 tstamp
= skb_tstamp_cond(skb
, true);
746 ts
= ktime_to_timespec64(tstamp
);
748 *(__be32
*)data
= cpu_to_be32((u32
)ts
.tv_sec
);
750 data
+= sizeof(__be32
);
753 /* timestamp subseconds */
754 if (trace
->type
.bit3
) {
756 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
758 if (!trace
->type
.bit2
) {
759 tstamp
= skb_tstamp_cond(skb
, true);
760 ts
= ktime_to_timespec64(tstamp
);
763 *(__be32
*)data
= cpu_to_be32((u32
)(ts
.tv_nsec
/ NSEC_PER_USEC
));
765 data
+= sizeof(__be32
);
769 if (trace
->type
.bit4
) {
770 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
771 data
+= sizeof(__be32
);
775 if (trace
->type
.bit5
) {
776 *(__be32
*)data
= ns
->data
;
777 data
+= sizeof(__be32
);
781 if (trace
->type
.bit6
) {
782 struct netdev_queue
*queue
;
786 if (skb_dst(skb
)->dev
->flags
& IFF_LOOPBACK
) {
787 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
789 queue
= skb_get_tx_queue(skb_dst(skb
)->dev
, skb
);
790 qdisc
= rcu_dereference(queue
->qdisc
);
791 qdisc_qstats_qlen_backlog(qdisc
, &qlen
, &backlog
);
793 *(__be32
*)data
= cpu_to_be32(backlog
);
795 data
+= sizeof(__be32
);
798 /* checksum complement */
799 if (trace
->type
.bit7
) {
800 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
801 data
+= sizeof(__be32
);
804 /* hop_lim and node_id (wide) */
805 if (trace
->type
.bit8
) {
806 byte
= ipv6_hdr(skb
)->hop_limit
;
810 raw64
= dev_net(skb_dst(skb
)->dev
)->ipv6
.sysctl
.ioam6_id_wide
;
812 *(__be64
*)data
= cpu_to_be64(((u64
)byte
<< 56) | raw64
);
813 data
+= sizeof(__be64
);
816 /* ingress_if_id and egress_if_id (wide) */
817 if (trace
->type
.bit9
) {
819 raw32
= IOAM6_U32_UNAVAILABLE
;
821 raw32
= READ_ONCE(__in6_dev_get(skb
->dev
)->cnf
.ioam6_id_wide
);
823 *(__be32
*)data
= cpu_to_be32(raw32
);
824 data
+= sizeof(__be32
);
826 if (skb_dst(skb
)->dev
->flags
& IFF_LOOPBACK
)
827 raw32
= IOAM6_U32_UNAVAILABLE
;
829 raw32
= READ_ONCE(__in6_dev_get(skb_dst(skb
)->dev
)->cnf
.ioam6_id_wide
);
831 *(__be32
*)data
= cpu_to_be32(raw32
);
832 data
+= sizeof(__be32
);
835 /* namespace data (wide) */
836 if (trace
->type
.bit10
) {
837 *(__be64
*)data
= ns
->data_wide
;
838 data
+= sizeof(__be64
);
841 /* buffer occupancy */
842 if (trace
->type
.bit11
) {
843 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
844 data
+= sizeof(__be32
);
847 /* bit12 undefined: filled with empty value */
848 if (trace
->type
.bit12
) {
849 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
850 data
+= sizeof(__be32
);
853 /* bit13 undefined: filled with empty value */
854 if (trace
->type
.bit13
) {
855 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
856 data
+= sizeof(__be32
);
859 /* bit14 undefined: filled with empty value */
860 if (trace
->type
.bit14
) {
861 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
862 data
+= sizeof(__be32
);
865 /* bit15 undefined: filled with empty value */
866 if (trace
->type
.bit15
) {
867 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
868 data
+= sizeof(__be32
);
871 /* bit16 undefined: filled with empty value */
872 if (trace
->type
.bit16
) {
873 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
874 data
+= sizeof(__be32
);
877 /* bit17 undefined: filled with empty value */
878 if (trace
->type
.bit17
) {
879 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
880 data
+= sizeof(__be32
);
883 /* bit18 undefined: filled with empty value */
884 if (trace
->type
.bit18
) {
885 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
886 data
+= sizeof(__be32
);
889 /* bit19 undefined: filled with empty value */
890 if (trace
->type
.bit19
) {
891 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
892 data
+= sizeof(__be32
);
895 /* bit20 undefined: filled with empty value */
896 if (trace
->type
.bit20
) {
897 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
898 data
+= sizeof(__be32
);
901 /* bit21 undefined: filled with empty value */
902 if (trace
->type
.bit21
) {
903 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
);
904 data
+= sizeof(__be32
);
907 /* opaque state snapshot */
908 if (trace
->type
.bit22
) {
910 *(__be32
*)data
= cpu_to_be32(IOAM6_U32_UNAVAILABLE
>> 8);
912 *(__be32
*)data
= sc
->hdr
;
913 data
+= sizeof(__be32
);
915 memcpy(data
, sc
->data
, sc
->len
);
920 /* called with rcu_read_lock() */
921 void ioam6_fill_trace_data(struct sk_buff
*skb
,
922 struct ioam6_namespace
*ns
,
923 struct ioam6_trace_hdr
*trace
,
926 struct ioam6_schema
*sc
;
929 /* Skip if Overflow flag is set
934 /* NodeLen does not include Opaque State Snapshot length. We need to
935 * take it into account if the corresponding bit is set (bit 22) and
936 * if the current IOAM namespace has an active schema attached to it
938 sc
= rcu_dereference(ns
->schema
);
939 if (trace
->type
.bit22
) {
940 sclen
= sizeof_field(struct ioam6_schema
, hdr
) / 4;
943 sclen
+= sc
->len
/ 4;
946 /* If there is no space remaining, we set the Overflow flag and we
947 * skip without filling the trace
949 if (!trace
->remlen
|| trace
->remlen
< trace
->nodelen
+ sclen
) {
954 __ioam6_fill_trace_data(skb
, ns
, trace
, sc
, sclen
, is_input
);
955 trace
->remlen
-= trace
->nodelen
+ sclen
;
958 static int __net_init
ioam6_net_init(struct net
*net
)
960 struct ioam6_pernet_data
*nsdata
;
963 nsdata
= kzalloc(sizeof(*nsdata
), GFP_KERNEL
);
967 mutex_init(&nsdata
->lock
);
968 net
->ipv6
.ioam6_data
= nsdata
;
970 err
= rhashtable_init(&nsdata
->namespaces
, &rht_ns_params
);
974 err
= rhashtable_init(&nsdata
->schemas
, &rht_sc_params
);
981 rhashtable_destroy(&nsdata
->namespaces
);
984 net
->ipv6
.ioam6_data
= NULL
;
988 static void __net_exit
ioam6_net_exit(struct net
*net
)
990 struct ioam6_pernet_data
*nsdata
= ioam6_pernet(net
);
992 rhashtable_free_and_destroy(&nsdata
->namespaces
, ioam6_free_ns
, NULL
);
993 rhashtable_free_and_destroy(&nsdata
->schemas
, ioam6_free_sc
, NULL
);
998 static struct pernet_operations ioam6_net_ops
= {
999 .init
= ioam6_net_init
,
1000 .exit
= ioam6_net_exit
,
1003 int __init
ioam6_init(void)
1005 int err
= register_pernet_subsys(&ioam6_net_ops
);
1009 err
= genl_register_family(&ioam6_genl_family
);
1011 goto out_unregister_pernet_subsys
;
1013 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1014 err
= ioam6_iptunnel_init();
1016 goto out_unregister_genl
;
1019 pr_info("In-situ OAM (IOAM) with IPv6\n");
1023 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1024 out_unregister_genl
:
1025 genl_unregister_family(&ioam6_genl_family
);
1027 out_unregister_pernet_subsys
:
1028 unregister_pernet_subsys(&ioam6_net_ops
);
1032 void ioam6_exit(void)
1034 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1035 ioam6_iptunnel_exit();
1037 genl_unregister_family(&ioam6_genl_family
);
1038 unregister_pernet_subsys(&ioam6_net_ops
);