Linux 5.2
[linux-2.6/linux-2.6-arm.git] / net / netfilter / nft_payload.c
blob680bd9f38a81e4554e162636b30a8cb1ae56d184
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 /* For layer 4 checksum field offset. */
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/icmpv6.h>
23 /* add vlan header into the user buffer for if tag was removed by offloads */
24 static bool
25 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
27 int mac_off = skb_mac_header(skb) - skb->data;
28 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
29 struct vlan_ethhdr veth;
31 vlanh = (u8 *) &veth;
32 if (offset < ETH_HLEN) {
33 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
35 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
36 return false;
38 veth.h_vlan_proto = skb->vlan_proto;
40 memcpy(dst_u8, vlanh + offset, ethlen);
42 len -= ethlen;
43 if (len == 0)
44 return true;
46 dst_u8 += ethlen;
47 offset = ETH_HLEN;
48 } else if (offset >= VLAN_ETH_HLEN) {
49 offset -= VLAN_HLEN;
50 goto skip;
53 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
54 veth.h_vlan_encapsulated_proto = skb->protocol;
56 vlanh += offset;
58 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
59 memcpy(dst_u8, vlanh, vlan_len);
61 len -= vlan_len;
62 if (!len)
63 return true;
65 dst_u8 += vlan_len;
66 skip:
67 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
70 void nft_payload_eval(const struct nft_expr *expr,
71 struct nft_regs *regs,
72 const struct nft_pktinfo *pkt)
74 const struct nft_payload *priv = nft_expr_priv(expr);
75 const struct sk_buff *skb = pkt->skb;
76 u32 *dest = &regs->data[priv->dreg];
77 int offset;
79 dest[priv->len / NFT_REG32_SIZE] = 0;
80 switch (priv->base) {
81 case NFT_PAYLOAD_LL_HEADER:
82 if (!skb_mac_header_was_set(skb))
83 goto err;
85 if (skb_vlan_tag_present(skb)) {
86 if (!nft_payload_copy_vlan(dest, skb,
87 priv->offset, priv->len))
88 goto err;
89 return;
91 offset = skb_mac_header(skb) - skb->data;
92 break;
93 case NFT_PAYLOAD_NETWORK_HEADER:
94 offset = skb_network_offset(skb);
95 break;
96 case NFT_PAYLOAD_TRANSPORT_HEADER:
97 if (!pkt->tprot_set)
98 goto err;
99 offset = pkt->xt.thoff;
100 break;
101 default:
102 BUG();
104 offset += priv->offset;
106 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
107 goto err;
108 return;
109 err:
110 regs->verdict.code = NFT_BREAK;
113 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
114 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
115 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
116 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
117 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
119 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
120 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
123 static int nft_payload_init(const struct nft_ctx *ctx,
124 const struct nft_expr *expr,
125 const struct nlattr * const tb[])
127 struct nft_payload *priv = nft_expr_priv(expr);
129 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
130 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
131 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
132 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
134 return nft_validate_register_store(ctx, priv->dreg, NULL,
135 NFT_DATA_VALUE, priv->len);
138 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
140 const struct nft_payload *priv = nft_expr_priv(expr);
142 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
143 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
144 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
145 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
146 goto nla_put_failure;
147 return 0;
149 nla_put_failure:
150 return -1;
153 static const struct nft_expr_ops nft_payload_ops = {
154 .type = &nft_payload_type,
155 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
156 .eval = nft_payload_eval,
157 .init = nft_payload_init,
158 .dump = nft_payload_dump,
161 const struct nft_expr_ops nft_payload_fast_ops = {
162 .type = &nft_payload_type,
163 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
164 .eval = nft_payload_eval,
165 .init = nft_payload_init,
166 .dump = nft_payload_dump,
169 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
171 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
172 if (*sum == 0)
173 *sum = CSUM_MANGLED_0;
176 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
178 struct udphdr *uh, _uh;
180 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
181 if (!uh)
182 return false;
184 return (__force bool)uh->check;
187 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
188 struct sk_buff *skb,
189 unsigned int *l4csum_offset)
191 switch (pkt->tprot) {
192 case IPPROTO_TCP:
193 *l4csum_offset = offsetof(struct tcphdr, check);
194 break;
195 case IPPROTO_UDP:
196 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
197 return -1;
198 /* Fall through. */
199 case IPPROTO_UDPLITE:
200 *l4csum_offset = offsetof(struct udphdr, check);
201 break;
202 case IPPROTO_ICMPV6:
203 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
204 break;
205 default:
206 return -1;
209 *l4csum_offset += pkt->xt.thoff;
210 return 0;
213 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
214 struct sk_buff *skb,
215 __wsum fsum, __wsum tsum)
217 int l4csum_offset;
218 __sum16 sum;
220 /* If we cannot determine layer 4 checksum offset or this packet doesn't
221 * require layer 4 checksum recalculation, skip this packet.
223 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
224 return 0;
226 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
227 return -1;
229 /* Checksum mangling for an arbitrary amount of bytes, based on
230 * inet_proto_csum_replace*() functions.
232 if (skb->ip_summed != CHECKSUM_PARTIAL) {
233 nft_csum_replace(&sum, fsum, tsum);
234 if (skb->ip_summed == CHECKSUM_COMPLETE) {
235 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
236 tsum);
238 } else {
239 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
240 tsum));
243 if (!skb_make_writable(skb, l4csum_offset + sizeof(sum)) ||
244 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
245 return -1;
247 return 0;
250 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
251 __wsum fsum, __wsum tsum, int csum_offset)
253 __sum16 sum;
255 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
256 return -1;
258 nft_csum_replace(&sum, fsum, tsum);
259 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
260 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
261 return -1;
263 return 0;
266 static void nft_payload_set_eval(const struct nft_expr *expr,
267 struct nft_regs *regs,
268 const struct nft_pktinfo *pkt)
270 const struct nft_payload_set *priv = nft_expr_priv(expr);
271 struct sk_buff *skb = pkt->skb;
272 const u32 *src = &regs->data[priv->sreg];
273 int offset, csum_offset;
274 __wsum fsum, tsum;
276 switch (priv->base) {
277 case NFT_PAYLOAD_LL_HEADER:
278 if (!skb_mac_header_was_set(skb))
279 goto err;
280 offset = skb_mac_header(skb) - skb->data;
281 break;
282 case NFT_PAYLOAD_NETWORK_HEADER:
283 offset = skb_network_offset(skb);
284 break;
285 case NFT_PAYLOAD_TRANSPORT_HEADER:
286 if (!pkt->tprot_set)
287 goto err;
288 offset = pkt->xt.thoff;
289 break;
290 default:
291 BUG();
294 csum_offset = offset + priv->csum_offset;
295 offset += priv->offset;
297 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
298 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
299 skb->ip_summed != CHECKSUM_PARTIAL)) {
300 fsum = skb_checksum(skb, offset, priv->len, 0);
301 tsum = csum_partial(src, priv->len, 0);
303 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
304 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
305 goto err;
307 if (priv->csum_flags &&
308 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
309 goto err;
312 if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
313 skb_store_bits(skb, offset, src, priv->len) < 0)
314 goto err;
316 return;
317 err:
318 regs->verdict.code = NFT_BREAK;
321 static int nft_payload_set_init(const struct nft_ctx *ctx,
322 const struct nft_expr *expr,
323 const struct nlattr * const tb[])
325 struct nft_payload_set *priv = nft_expr_priv(expr);
327 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
328 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
329 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
330 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
332 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
333 priv->csum_type =
334 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
335 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
336 priv->csum_offset =
337 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
338 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
339 u32 flags;
341 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
342 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
343 return -EINVAL;
345 priv->csum_flags = flags;
348 switch (priv->csum_type) {
349 case NFT_PAYLOAD_CSUM_NONE:
350 case NFT_PAYLOAD_CSUM_INET:
351 break;
352 default:
353 return -EOPNOTSUPP;
356 return nft_validate_register_load(priv->sreg, priv->len);
359 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
361 const struct nft_payload_set *priv = nft_expr_priv(expr);
363 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
364 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
365 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
366 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
367 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
368 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
369 htonl(priv->csum_offset)) ||
370 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
371 goto nla_put_failure;
372 return 0;
374 nla_put_failure:
375 return -1;
378 static const struct nft_expr_ops nft_payload_set_ops = {
379 .type = &nft_payload_type,
380 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
381 .eval = nft_payload_set_eval,
382 .init = nft_payload_set_init,
383 .dump = nft_payload_set_dump,
386 static const struct nft_expr_ops *
387 nft_payload_select_ops(const struct nft_ctx *ctx,
388 const struct nlattr * const tb[])
390 enum nft_payload_bases base;
391 unsigned int offset, len;
393 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
394 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
395 tb[NFTA_PAYLOAD_LEN] == NULL)
396 return ERR_PTR(-EINVAL);
398 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
399 switch (base) {
400 case NFT_PAYLOAD_LL_HEADER:
401 case NFT_PAYLOAD_NETWORK_HEADER:
402 case NFT_PAYLOAD_TRANSPORT_HEADER:
403 break;
404 default:
405 return ERR_PTR(-EOPNOTSUPP);
408 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
409 if (tb[NFTA_PAYLOAD_DREG] != NULL)
410 return ERR_PTR(-EINVAL);
411 return &nft_payload_set_ops;
414 if (tb[NFTA_PAYLOAD_DREG] == NULL)
415 return ERR_PTR(-EINVAL);
417 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
418 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
420 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
421 base != NFT_PAYLOAD_LL_HEADER)
422 return &nft_payload_fast_ops;
423 else
424 return &nft_payload_ops;
427 struct nft_expr_type nft_payload_type __read_mostly = {
428 .name = "payload",
429 .select_ops = nft_payload_select_ops,
430 .policy = nft_payload_policy,
431 .maxattr = NFTA_PAYLOAD_MAX,
432 .owner = THIS_MODULE,