1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/ethtool_netlink.h>
4 #include <net/udp_tunnel.h>
11 const struct nla_policy ethnl_tunnel_info_get_policy
[] = {
12 [ETHTOOL_A_TUNNEL_INFO_HEADER
] =
13 NLA_POLICY_NESTED(ethnl_header_policy
),
16 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN
== ilog2(UDP_TUNNEL_TYPE_VXLAN
));
17 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE
== ilog2(UDP_TUNNEL_TYPE_GENEVE
));
18 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE
==
19 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE
));
21 static ssize_t
ethnl_udp_table_reply_size(unsigned int types
, bool compact
)
25 size
= ethnl_bitset32_size(&types
, NULL
, __ETHTOOL_UDP_TUNNEL_TYPE_CNT
,
26 udp_tunnel_type_names
, compact
);
31 nla_total_size(0) + /* _UDP_TABLE */
32 nla_total_size(sizeof(u32
)); /* _UDP_TABLE_SIZE */
36 ethnl_tunnel_info_reply_size(const struct ethnl_req_info
*req_base
,
37 struct netlink_ext_ack
*extack
)
39 bool compact
= req_base
->flags
& ETHTOOL_FLAG_COMPACT_BITSETS
;
40 const struct udp_tunnel_nic_info
*info
;
45 info
= req_base
->dev
->udp_tunnel_nic_info
;
47 NL_SET_ERR_MSG(extack
,
48 "device does not report tunnel offload info");
52 size
= nla_total_size(0); /* _INFO_UDP_PORTS */
54 for (i
= 0; i
< UDP_TUNNEL_NIC_MAX_TABLES
; i
++) {
55 if (!info
->tables
[i
].n_entries
)
58 ret
= ethnl_udp_table_reply_size(info
->tables
[i
].tunnel_types
,
64 size
+= udp_tunnel_nic_dump_size(req_base
->dev
, i
);
67 if (info
->flags
& UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN
) {
68 ret
= ethnl_udp_table_reply_size(0, compact
);
73 size
+= nla_total_size(0) + /* _TABLE_ENTRY */
74 nla_total_size(sizeof(__be16
)) + /* _ENTRY_PORT */
75 nla_total_size(sizeof(u32
)); /* _ENTRY_TYPE */
82 ethnl_tunnel_info_fill_reply(const struct ethnl_req_info
*req_base
,
85 bool compact
= req_base
->flags
& ETHTOOL_FLAG_COMPACT_BITSETS
;
86 const struct udp_tunnel_nic_info
*info
;
87 struct nlattr
*ports
, *table
, *entry
;
90 info
= req_base
->dev
->udp_tunnel_nic_info
;
94 ports
= nla_nest_start(skb
, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS
);
98 for (i
= 0; i
< UDP_TUNNEL_NIC_MAX_TABLES
; i
++) {
99 if (!info
->tables
[i
].n_entries
)
102 table
= nla_nest_start(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE
);
104 goto err_cancel_ports
;
106 if (nla_put_u32(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE
,
107 info
->tables
[i
].n_entries
))
108 goto err_cancel_table
;
110 if (ethnl_put_bitset32(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES
,
111 &info
->tables
[i
].tunnel_types
, NULL
,
112 __ETHTOOL_UDP_TUNNEL_TYPE_CNT
,
113 udp_tunnel_type_names
, compact
))
114 goto err_cancel_table
;
116 if (udp_tunnel_nic_dump_write(req_base
->dev
, i
, skb
))
117 goto err_cancel_table
;
119 nla_nest_end(skb
, table
);
122 if (info
->flags
& UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN
) {
125 table
= nla_nest_start(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE
);
127 goto err_cancel_ports
;
129 if (nla_put_u32(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE
, 1))
130 goto err_cancel_table
;
132 if (ethnl_put_bitset32(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES
,
134 __ETHTOOL_UDP_TUNNEL_TYPE_CNT
,
135 udp_tunnel_type_names
, compact
))
136 goto err_cancel_table
;
138 entry
= nla_nest_start(skb
, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY
);
140 if (nla_put_be16(skb
, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT
,
141 htons(IANA_VXLAN_UDP_PORT
)) ||
142 nla_put_u32(skb
, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE
,
143 ilog2(UDP_TUNNEL_TYPE_VXLAN
)))
144 goto err_cancel_entry
;
146 nla_nest_end(skb
, entry
);
147 nla_nest_end(skb
, table
);
150 nla_nest_end(skb
, ports
);
155 nla_nest_cancel(skb
, entry
);
157 nla_nest_cancel(skb
, table
);
159 nla_nest_cancel(skb
, ports
);
163 int ethnl_tunnel_info_doit(struct sk_buff
*skb
, struct genl_info
*info
)
165 struct ethnl_req_info req_info
= {};
166 struct nlattr
**tb
= info
->attrs
;
167 struct sk_buff
*rskb
;
172 ret
= ethnl_parse_header_dev_get(&req_info
,
173 tb
[ETHTOOL_A_TUNNEL_INFO_HEADER
],
174 genl_info_net(info
), info
->extack
,
180 ret
= ethnl_tunnel_info_reply_size(&req_info
, info
->extack
);
182 goto err_unlock_rtnl
;
183 reply_len
= ret
+ ethnl_reply_header_size();
185 rskb
= ethnl_reply_init(reply_len
, req_info
.dev
,
186 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY
,
187 ETHTOOL_A_TUNNEL_INFO_HEADER
,
188 info
, &reply_payload
);
191 goto err_unlock_rtnl
;
194 ret
= ethnl_tunnel_info_fill_reply(&req_info
, rskb
);
198 dev_put(req_info
.dev
);
199 genlmsg_end(rskb
, reply_payload
);
201 return genlmsg_reply(rskb
, info
);
207 dev_put(req_info
.dev
);
211 struct ethnl_tunnel_info_dump_ctx
{
212 struct ethnl_req_info req_info
;
217 int ethnl_tunnel_info_start(struct netlink_callback
*cb
)
219 const struct genl_dumpit_info
*info
= genl_dumpit_info(cb
);
220 struct ethnl_tunnel_info_dump_ctx
*ctx
= (void *)cb
->ctx
;
221 struct nlattr
**tb
= info
->attrs
;
224 BUILD_BUG_ON(sizeof(*ctx
) > sizeof(cb
->ctx
));
226 memset(ctx
, 0, sizeof(*ctx
));
228 ret
= ethnl_parse_header_dev_get(&ctx
->req_info
,
229 tb
[ETHTOOL_A_TUNNEL_INFO_HEADER
],
230 sock_net(cb
->skb
->sk
), cb
->extack
,
232 if (ctx
->req_info
.dev
) {
233 dev_put(ctx
->req_info
.dev
);
234 ctx
->req_info
.dev
= NULL
;
240 int ethnl_tunnel_info_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
242 struct ethnl_tunnel_info_dump_ctx
*ctx
= (void *)cb
->ctx
;
243 struct net
*net
= sock_net(skb
->sk
);
244 int s_idx
= ctx
->pos_idx
;
250 cb
->seq
= net
->dev_base_seq
;
251 for (h
= ctx
->pos_hash
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
252 struct hlist_head
*head
;
253 struct net_device
*dev
;
255 head
= &net
->dev_index_head
[h
];
257 hlist_for_each_entry(dev
, head
, index_hlist
) {
261 ehdr
= ethnl_dump_put(skb
, cb
,
262 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY
);
268 ret
= ethnl_fill_reply_header(skb
, dev
, ETHTOOL_A_TUNNEL_INFO_HEADER
);
270 genlmsg_cancel(skb
, ehdr
);
274 ctx
->req_info
.dev
= dev
;
275 ret
= ethnl_tunnel_info_fill_reply(&ctx
->req_info
, skb
);
276 ctx
->req_info
.dev
= NULL
;
278 genlmsg_cancel(skb
, ehdr
);
279 if (ret
== -EOPNOTSUPP
)
283 genlmsg_end(skb
, ehdr
);
293 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
295 if (ret
== -EMSGSIZE
&& skb
->len
)