ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / net / netfilter / nft_payload.c
blob1993af3a2979527362bfa419a73c65f71a789363
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
27 struct vlan_ethhdr *veth)
29 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
30 return false;
32 veth->h_vlan_proto = skb->vlan_proto;
33 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
34 veth->h_vlan_encapsulated_proto = skb->protocol;
36 return true;
39 /* add vlan header into the user buffer for if tag was removed by offloads */
40 static bool
41 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
43 int mac_off = skb_mac_header(skb) - skb->data;
44 u8 *vlanh, *dst_u8 = (u8 *) d;
45 struct vlan_ethhdr veth;
46 u8 vlan_hlen = 0;
48 if ((skb->protocol == htons(ETH_P_8021AD) ||
49 skb->protocol == htons(ETH_P_8021Q)) &&
50 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
51 vlan_hlen += VLAN_HLEN;
53 vlanh = (u8 *) &veth;
54 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
55 u8 ethlen = len;
57 if (vlan_hlen &&
58 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
59 return false;
60 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
61 return false;
63 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
64 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
66 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
68 len -= ethlen;
69 if (len == 0)
70 return true;
72 dst_u8 += ethlen;
73 offset = ETH_HLEN + vlan_hlen;
74 } else {
75 offset -= VLAN_HLEN + vlan_hlen;
78 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
81 void nft_payload_eval(const struct nft_expr *expr,
82 struct nft_regs *regs,
83 const struct nft_pktinfo *pkt)
85 const struct nft_payload *priv = nft_expr_priv(expr);
86 const struct sk_buff *skb = pkt->skb;
87 u32 *dest = &regs->data[priv->dreg];
88 int offset;
90 dest[priv->len / NFT_REG32_SIZE] = 0;
91 switch (priv->base) {
92 case NFT_PAYLOAD_LL_HEADER:
93 if (!skb_mac_header_was_set(skb))
94 goto err;
96 if (skb_vlan_tag_present(skb)) {
97 if (!nft_payload_copy_vlan(dest, skb,
98 priv->offset, priv->len))
99 goto err;
100 return;
102 offset = skb_mac_header(skb) - skb->data;
103 break;
104 case NFT_PAYLOAD_NETWORK_HEADER:
105 offset = skb_network_offset(skb);
106 break;
107 case NFT_PAYLOAD_TRANSPORT_HEADER:
108 if (!pkt->tprot_set)
109 goto err;
110 offset = pkt->xt.thoff;
111 break;
112 default:
113 BUG();
115 offset += priv->offset;
117 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
118 goto err;
119 return;
120 err:
121 regs->verdict.code = NFT_BREAK;
124 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
125 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
126 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
127 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
128 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
129 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
130 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
131 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
134 static int nft_payload_init(const struct nft_ctx *ctx,
135 const struct nft_expr *expr,
136 const struct nlattr * const tb[])
138 struct nft_payload *priv = nft_expr_priv(expr);
140 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
141 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
142 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
143 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
145 return nft_validate_register_store(ctx, priv->dreg, NULL,
146 NFT_DATA_VALUE, priv->len);
149 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
151 const struct nft_payload *priv = nft_expr_priv(expr);
153 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
154 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
155 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
156 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
157 goto nla_put_failure;
158 return 0;
160 nla_put_failure:
161 return -1;
164 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
165 struct nft_flow_rule *flow,
166 const struct nft_payload *priv)
168 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
170 switch (priv->offset) {
171 case offsetof(struct ethhdr, h_source):
172 if (priv->len != ETH_ALEN)
173 return -EOPNOTSUPP;
175 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
176 src, ETH_ALEN, reg);
177 break;
178 case offsetof(struct ethhdr, h_dest):
179 if (priv->len != ETH_ALEN)
180 return -EOPNOTSUPP;
182 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
183 dst, ETH_ALEN, reg);
184 break;
185 case offsetof(struct ethhdr, h_proto):
186 if (priv->len != sizeof(__be16))
187 return -EOPNOTSUPP;
189 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
190 n_proto, sizeof(__be16), reg);
191 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
192 break;
193 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
194 if (priv->len != sizeof(__be16))
195 return -EOPNOTSUPP;
197 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
198 vlan_tci, sizeof(__be16), reg);
199 break;
200 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
201 if (priv->len != sizeof(__be16))
202 return -EOPNOTSUPP;
204 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
205 vlan_tpid, sizeof(__be16), reg);
206 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
207 break;
208 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
209 if (priv->len != sizeof(__be16))
210 return -EOPNOTSUPP;
212 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
213 vlan_tci, sizeof(__be16), reg);
214 break;
215 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
216 sizeof(struct vlan_hdr):
217 if (priv->len != sizeof(__be16))
218 return -EOPNOTSUPP;
220 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
221 vlan_tpid, sizeof(__be16), reg);
222 break;
223 default:
224 return -EOPNOTSUPP;
227 return 0;
230 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
231 struct nft_flow_rule *flow,
232 const struct nft_payload *priv)
234 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
236 switch (priv->offset) {
237 case offsetof(struct iphdr, saddr):
238 if (priv->len != sizeof(struct in_addr))
239 return -EOPNOTSUPP;
241 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
242 sizeof(struct in_addr), reg);
243 break;
244 case offsetof(struct iphdr, daddr):
245 if (priv->len != sizeof(struct in_addr))
246 return -EOPNOTSUPP;
248 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
249 sizeof(struct in_addr), reg);
250 break;
251 case offsetof(struct iphdr, protocol):
252 if (priv->len != sizeof(__u8))
253 return -EOPNOTSUPP;
255 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
256 sizeof(__u8), reg);
257 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
258 break;
259 default:
260 return -EOPNOTSUPP;
263 return 0;
266 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
267 struct nft_flow_rule *flow,
268 const struct nft_payload *priv)
270 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
272 switch (priv->offset) {
273 case offsetof(struct ipv6hdr, saddr):
274 if (priv->len != sizeof(struct in6_addr))
275 return -EOPNOTSUPP;
277 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
278 sizeof(struct in6_addr), reg);
279 break;
280 case offsetof(struct ipv6hdr, daddr):
281 if (priv->len != sizeof(struct in6_addr))
282 return -EOPNOTSUPP;
284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
285 sizeof(struct in6_addr), reg);
286 break;
287 case offsetof(struct ipv6hdr, nexthdr):
288 if (priv->len != sizeof(__u8))
289 return -EOPNOTSUPP;
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
292 sizeof(__u8), reg);
293 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
294 break;
295 default:
296 return -EOPNOTSUPP;
299 return 0;
302 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
303 struct nft_flow_rule *flow,
304 const struct nft_payload *priv)
306 int err;
308 switch (ctx->dep.l3num) {
309 case htons(ETH_P_IP):
310 err = nft_payload_offload_ip(ctx, flow, priv);
311 break;
312 case htons(ETH_P_IPV6):
313 err = nft_payload_offload_ip6(ctx, flow, priv);
314 break;
315 default:
316 return -EOPNOTSUPP;
319 return err;
322 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
323 struct nft_flow_rule *flow,
324 const struct nft_payload *priv)
326 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
328 switch (priv->offset) {
329 case offsetof(struct tcphdr, source):
330 if (priv->len != sizeof(__be16))
331 return -EOPNOTSUPP;
333 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
334 sizeof(__be16), reg);
335 break;
336 case offsetof(struct tcphdr, dest):
337 if (priv->len != sizeof(__be16))
338 return -EOPNOTSUPP;
340 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
341 sizeof(__be16), reg);
342 break;
343 default:
344 return -EOPNOTSUPP;
347 return 0;
350 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
351 struct nft_flow_rule *flow,
352 const struct nft_payload *priv)
354 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
356 switch (priv->offset) {
357 case offsetof(struct udphdr, source):
358 if (priv->len != sizeof(__be16))
359 return -EOPNOTSUPP;
361 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
362 sizeof(__be16), reg);
363 break;
364 case offsetof(struct udphdr, dest):
365 if (priv->len != sizeof(__be16))
366 return -EOPNOTSUPP;
368 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
369 sizeof(__be16), reg);
370 break;
371 default:
372 return -EOPNOTSUPP;
375 return 0;
378 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
379 struct nft_flow_rule *flow,
380 const struct nft_payload *priv)
382 int err;
384 switch (ctx->dep.protonum) {
385 case IPPROTO_TCP:
386 err = nft_payload_offload_tcp(ctx, flow, priv);
387 break;
388 case IPPROTO_UDP:
389 err = nft_payload_offload_udp(ctx, flow, priv);
390 break;
391 default:
392 return -EOPNOTSUPP;
395 return err;
398 static int nft_payload_offload(struct nft_offload_ctx *ctx,
399 struct nft_flow_rule *flow,
400 const struct nft_expr *expr)
402 const struct nft_payload *priv = nft_expr_priv(expr);
403 int err;
405 switch (priv->base) {
406 case NFT_PAYLOAD_LL_HEADER:
407 err = nft_payload_offload_ll(ctx, flow, priv);
408 break;
409 case NFT_PAYLOAD_NETWORK_HEADER:
410 err = nft_payload_offload_nh(ctx, flow, priv);
411 break;
412 case NFT_PAYLOAD_TRANSPORT_HEADER:
413 err = nft_payload_offload_th(ctx, flow, priv);
414 break;
415 default:
416 err = -EOPNOTSUPP;
417 break;
419 return err;
422 static const struct nft_expr_ops nft_payload_ops = {
423 .type = &nft_payload_type,
424 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
425 .eval = nft_payload_eval,
426 .init = nft_payload_init,
427 .dump = nft_payload_dump,
428 .offload = nft_payload_offload,
431 const struct nft_expr_ops nft_payload_fast_ops = {
432 .type = &nft_payload_type,
433 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
434 .eval = nft_payload_eval,
435 .init = nft_payload_init,
436 .dump = nft_payload_dump,
437 .offload = nft_payload_offload,
440 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
442 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
443 if (*sum == 0)
444 *sum = CSUM_MANGLED_0;
447 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
449 struct udphdr *uh, _uh;
451 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
452 if (!uh)
453 return false;
455 return (__force bool)uh->check;
458 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
459 struct sk_buff *skb,
460 unsigned int *l4csum_offset)
462 switch (pkt->tprot) {
463 case IPPROTO_TCP:
464 *l4csum_offset = offsetof(struct tcphdr, check);
465 break;
466 case IPPROTO_UDP:
467 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
468 return -1;
469 /* Fall through. */
470 case IPPROTO_UDPLITE:
471 *l4csum_offset = offsetof(struct udphdr, check);
472 break;
473 case IPPROTO_ICMPV6:
474 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
475 break;
476 default:
477 return -1;
480 *l4csum_offset += pkt->xt.thoff;
481 return 0;
484 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
485 struct sk_buff *skb,
486 __wsum fsum, __wsum tsum)
488 int l4csum_offset;
489 __sum16 sum;
491 /* If we cannot determine layer 4 checksum offset or this packet doesn't
492 * require layer 4 checksum recalculation, skip this packet.
494 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
495 return 0;
497 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
498 return -1;
500 /* Checksum mangling for an arbitrary amount of bytes, based on
501 * inet_proto_csum_replace*() functions.
503 if (skb->ip_summed != CHECKSUM_PARTIAL) {
504 nft_csum_replace(&sum, fsum, tsum);
505 if (skb->ip_summed == CHECKSUM_COMPLETE) {
506 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
507 tsum);
509 } else {
510 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
511 tsum));
514 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
515 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
516 return -1;
518 return 0;
521 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
522 __wsum fsum, __wsum tsum, int csum_offset)
524 __sum16 sum;
526 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
527 return -1;
529 nft_csum_replace(&sum, fsum, tsum);
530 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
531 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
532 return -1;
534 return 0;
537 static void nft_payload_set_eval(const struct nft_expr *expr,
538 struct nft_regs *regs,
539 const struct nft_pktinfo *pkt)
541 const struct nft_payload_set *priv = nft_expr_priv(expr);
542 struct sk_buff *skb = pkt->skb;
543 const u32 *src = &regs->data[priv->sreg];
544 int offset, csum_offset;
545 __wsum fsum, tsum;
547 switch (priv->base) {
548 case NFT_PAYLOAD_LL_HEADER:
549 if (!skb_mac_header_was_set(skb))
550 goto err;
551 offset = skb_mac_header(skb) - skb->data;
552 break;
553 case NFT_PAYLOAD_NETWORK_HEADER:
554 offset = skb_network_offset(skb);
555 break;
556 case NFT_PAYLOAD_TRANSPORT_HEADER:
557 if (!pkt->tprot_set)
558 goto err;
559 offset = pkt->xt.thoff;
560 break;
561 default:
562 BUG();
565 csum_offset = offset + priv->csum_offset;
566 offset += priv->offset;
568 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
569 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
570 skb->ip_summed != CHECKSUM_PARTIAL)) {
571 fsum = skb_checksum(skb, offset, priv->len, 0);
572 tsum = csum_partial(src, priv->len, 0);
574 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
575 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
576 goto err;
578 if (priv->csum_flags &&
579 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
580 goto err;
583 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
584 skb_store_bits(skb, offset, src, priv->len) < 0)
585 goto err;
587 return;
588 err:
589 regs->verdict.code = NFT_BREAK;
592 static int nft_payload_set_init(const struct nft_ctx *ctx,
593 const struct nft_expr *expr,
594 const struct nlattr * const tb[])
596 struct nft_payload_set *priv = nft_expr_priv(expr);
598 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
599 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
600 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
601 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
603 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
604 priv->csum_type =
605 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
606 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
607 priv->csum_offset =
608 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
609 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
610 u32 flags;
612 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
613 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
614 return -EINVAL;
616 priv->csum_flags = flags;
619 switch (priv->csum_type) {
620 case NFT_PAYLOAD_CSUM_NONE:
621 case NFT_PAYLOAD_CSUM_INET:
622 break;
623 default:
624 return -EOPNOTSUPP;
627 return nft_validate_register_load(priv->sreg, priv->len);
630 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
632 const struct nft_payload_set *priv = nft_expr_priv(expr);
634 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
635 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
636 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
637 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
638 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
639 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
640 htonl(priv->csum_offset)) ||
641 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
642 goto nla_put_failure;
643 return 0;
645 nla_put_failure:
646 return -1;
649 static const struct nft_expr_ops nft_payload_set_ops = {
650 .type = &nft_payload_type,
651 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
652 .eval = nft_payload_set_eval,
653 .init = nft_payload_set_init,
654 .dump = nft_payload_set_dump,
657 static const struct nft_expr_ops *
658 nft_payload_select_ops(const struct nft_ctx *ctx,
659 const struct nlattr * const tb[])
661 enum nft_payload_bases base;
662 unsigned int offset, len;
664 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
665 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
666 tb[NFTA_PAYLOAD_LEN] == NULL)
667 return ERR_PTR(-EINVAL);
669 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
670 switch (base) {
671 case NFT_PAYLOAD_LL_HEADER:
672 case NFT_PAYLOAD_NETWORK_HEADER:
673 case NFT_PAYLOAD_TRANSPORT_HEADER:
674 break;
675 default:
676 return ERR_PTR(-EOPNOTSUPP);
679 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
680 if (tb[NFTA_PAYLOAD_DREG] != NULL)
681 return ERR_PTR(-EINVAL);
682 return &nft_payload_set_ops;
685 if (tb[NFTA_PAYLOAD_DREG] == NULL)
686 return ERR_PTR(-EINVAL);
688 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
689 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
691 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
692 base != NFT_PAYLOAD_LL_HEADER)
693 return &nft_payload_fast_ops;
694 else
695 return &nft_payload_ops;
698 struct nft_expr_type nft_payload_type __read_mostly = {
699 .name = "payload",
700 .select_ops = nft_payload_select_ops,
701 .policy = nft_payload_policy,
702 .maxattr = NFTA_PAYLOAD_MAX,
703 .owner = THIS_MODULE,