Linux 4.19.133
[linux/fpc-iii.git] / net / netfilter / nft_payload.c
blob19446a89a2a8158317c011fb662e406c4adafa91
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
12 #include <linux/kernel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables_core.h>
20 #include <net/netfilter/nf_tables.h>
21 /* For layer 4 checksum field offset. */
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmpv6.h>
26 /* add vlan header into the user buffer for if tag was removed by offloads */
27 static bool
28 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
30 int mac_off = skb_mac_header(skb) - skb->data;
31 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
32 struct vlan_ethhdr veth;
34 vlanh = (u8 *) &veth;
35 if (offset < ETH_HLEN) {
36 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
38 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
39 return false;
41 veth.h_vlan_proto = skb->vlan_proto;
43 memcpy(dst_u8, vlanh + offset, ethlen);
45 len -= ethlen;
46 if (len == 0)
47 return true;
49 dst_u8 += ethlen;
50 offset = ETH_HLEN;
51 } else if (offset >= VLAN_ETH_HLEN) {
52 offset -= VLAN_HLEN;
53 goto skip;
56 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
57 veth.h_vlan_encapsulated_proto = skb->protocol;
59 vlanh += offset;
61 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
62 memcpy(dst_u8, vlanh, vlan_len);
64 len -= vlan_len;
65 if (!len)
66 return true;
68 dst_u8 += vlan_len;
69 skip:
70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
73 static void nft_payload_eval(const struct nft_expr *expr,
74 struct nft_regs *regs,
75 const struct nft_pktinfo *pkt)
77 const struct nft_payload *priv = nft_expr_priv(expr);
78 const struct sk_buff *skb = pkt->skb;
79 u32 *dest = &regs->data[priv->dreg];
80 int offset;
82 dest[priv->len / NFT_REG32_SIZE] = 0;
83 switch (priv->base) {
84 case NFT_PAYLOAD_LL_HEADER:
85 if (!skb_mac_header_was_set(skb))
86 goto err;
88 if (skb_vlan_tag_present(skb)) {
89 if (!nft_payload_copy_vlan(dest, skb,
90 priv->offset, priv->len))
91 goto err;
92 return;
94 offset = skb_mac_header(skb) - skb->data;
95 break;
96 case NFT_PAYLOAD_NETWORK_HEADER:
97 offset = skb_network_offset(skb);
98 break;
99 case NFT_PAYLOAD_TRANSPORT_HEADER:
100 if (!pkt->tprot_set)
101 goto err;
102 offset = pkt->xt.thoff;
103 break;
104 default:
105 BUG();
107 offset += priv->offset;
109 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
110 goto err;
111 return;
112 err:
113 regs->verdict.code = NFT_BREAK;
116 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
117 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
119 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
120 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
121 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
122 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
123 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
124 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
127 static int nft_payload_init(const struct nft_ctx *ctx,
128 const struct nft_expr *expr,
129 const struct nlattr * const tb[])
131 struct nft_payload *priv = nft_expr_priv(expr);
133 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
134 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
135 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
136 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
138 return nft_validate_register_store(ctx, priv->dreg, NULL,
139 NFT_DATA_VALUE, priv->len);
142 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
144 const struct nft_payload *priv = nft_expr_priv(expr);
146 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
147 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
148 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
149 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
150 goto nla_put_failure;
151 return 0;
153 nla_put_failure:
154 return -1;
157 static const struct nft_expr_ops nft_payload_ops = {
158 .type = &nft_payload_type,
159 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
160 .eval = nft_payload_eval,
161 .init = nft_payload_init,
162 .dump = nft_payload_dump,
165 const struct nft_expr_ops nft_payload_fast_ops = {
166 .type = &nft_payload_type,
167 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
168 .eval = nft_payload_eval,
169 .init = nft_payload_init,
170 .dump = nft_payload_dump,
173 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
175 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
176 if (*sum == 0)
177 *sum = CSUM_MANGLED_0;
180 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
182 struct udphdr *uh, _uh;
184 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
185 if (!uh)
186 return false;
188 return (__force bool)uh->check;
191 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
192 struct sk_buff *skb,
193 unsigned int *l4csum_offset)
195 switch (pkt->tprot) {
196 case IPPROTO_TCP:
197 *l4csum_offset = offsetof(struct tcphdr, check);
198 break;
199 case IPPROTO_UDP:
200 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
201 return -1;
202 /* Fall through. */
203 case IPPROTO_UDPLITE:
204 *l4csum_offset = offsetof(struct udphdr, check);
205 break;
206 case IPPROTO_ICMPV6:
207 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
208 break;
209 default:
210 return -1;
213 *l4csum_offset += pkt->xt.thoff;
214 return 0;
217 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
218 struct sk_buff *skb,
219 __wsum fsum, __wsum tsum)
221 int l4csum_offset;
222 __sum16 sum;
224 /* If we cannot determine layer 4 checksum offset or this packet doesn't
225 * require layer 4 checksum recalculation, skip this packet.
227 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
228 return 0;
230 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
231 return -1;
233 /* Checksum mangling for an arbitrary amount of bytes, based on
234 * inet_proto_csum_replace*() functions.
236 if (skb->ip_summed != CHECKSUM_PARTIAL) {
237 nft_csum_replace(&sum, fsum, tsum);
238 if (skb->ip_summed == CHECKSUM_COMPLETE) {
239 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
240 tsum);
242 } else {
243 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
244 tsum));
247 if (!skb_make_writable(skb, l4csum_offset + sizeof(sum)) ||
248 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
249 return -1;
251 return 0;
254 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
255 __wsum fsum, __wsum tsum, int csum_offset)
257 __sum16 sum;
259 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
260 return -1;
262 nft_csum_replace(&sum, fsum, tsum);
263 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
264 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
265 return -1;
267 return 0;
270 static void nft_payload_set_eval(const struct nft_expr *expr,
271 struct nft_regs *regs,
272 const struct nft_pktinfo *pkt)
274 const struct nft_payload_set *priv = nft_expr_priv(expr);
275 struct sk_buff *skb = pkt->skb;
276 const u32 *src = &regs->data[priv->sreg];
277 int offset, csum_offset;
278 __wsum fsum, tsum;
280 switch (priv->base) {
281 case NFT_PAYLOAD_LL_HEADER:
282 if (!skb_mac_header_was_set(skb))
283 goto err;
284 offset = skb_mac_header(skb) - skb->data;
285 break;
286 case NFT_PAYLOAD_NETWORK_HEADER:
287 offset = skb_network_offset(skb);
288 break;
289 case NFT_PAYLOAD_TRANSPORT_HEADER:
290 if (!pkt->tprot_set)
291 goto err;
292 offset = pkt->xt.thoff;
293 break;
294 default:
295 BUG();
298 csum_offset = offset + priv->csum_offset;
299 offset += priv->offset;
301 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
302 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
303 skb->ip_summed != CHECKSUM_PARTIAL)) {
304 fsum = skb_checksum(skb, offset, priv->len, 0);
305 tsum = csum_partial(src, priv->len, 0);
307 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
308 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
309 goto err;
311 if (priv->csum_flags &&
312 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
313 goto err;
316 if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
317 skb_store_bits(skb, offset, src, priv->len) < 0)
318 goto err;
320 return;
321 err:
322 regs->verdict.code = NFT_BREAK;
325 static int nft_payload_set_init(const struct nft_ctx *ctx,
326 const struct nft_expr *expr,
327 const struct nlattr * const tb[])
329 struct nft_payload_set *priv = nft_expr_priv(expr);
331 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
332 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
333 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
334 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
336 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
337 priv->csum_type =
338 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
339 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
340 priv->csum_offset =
341 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
342 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
343 u32 flags;
345 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
346 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
347 return -EINVAL;
349 priv->csum_flags = flags;
352 switch (priv->csum_type) {
353 case NFT_PAYLOAD_CSUM_NONE:
354 case NFT_PAYLOAD_CSUM_INET:
355 break;
356 default:
357 return -EOPNOTSUPP;
360 return nft_validate_register_load(priv->sreg, priv->len);
363 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
365 const struct nft_payload_set *priv = nft_expr_priv(expr);
367 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
368 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
369 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
370 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
371 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
372 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
373 htonl(priv->csum_offset)) ||
374 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
375 goto nla_put_failure;
376 return 0;
378 nla_put_failure:
379 return -1;
382 static const struct nft_expr_ops nft_payload_set_ops = {
383 .type = &nft_payload_type,
384 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
385 .eval = nft_payload_set_eval,
386 .init = nft_payload_set_init,
387 .dump = nft_payload_set_dump,
390 static const struct nft_expr_ops *
391 nft_payload_select_ops(const struct nft_ctx *ctx,
392 const struct nlattr * const tb[])
394 enum nft_payload_bases base;
395 unsigned int offset, len;
397 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
398 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
399 tb[NFTA_PAYLOAD_LEN] == NULL)
400 return ERR_PTR(-EINVAL);
402 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
403 switch (base) {
404 case NFT_PAYLOAD_LL_HEADER:
405 case NFT_PAYLOAD_NETWORK_HEADER:
406 case NFT_PAYLOAD_TRANSPORT_HEADER:
407 break;
408 default:
409 return ERR_PTR(-EOPNOTSUPP);
412 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
413 if (tb[NFTA_PAYLOAD_DREG] != NULL)
414 return ERR_PTR(-EINVAL);
415 return &nft_payload_set_ops;
418 if (tb[NFTA_PAYLOAD_DREG] == NULL)
419 return ERR_PTR(-EINVAL);
421 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
422 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
424 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
425 base != NFT_PAYLOAD_LL_HEADER)
426 return &nft_payload_fast_ops;
427 else
428 return &nft_payload_ops;
431 struct nft_expr_type nft_payload_type __read_mostly = {
432 .name = "payload",
433 .select_ops = nft_payload_select_ops,
434 .policy = nft_payload_policy,
435 .maxattr = NFTA_PAYLOAD_MAX,
436 .owner = THIS_MODULE,