1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
24 #include <linux/ipv6.h>
26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff
*skb
, int mac_off
,
27 struct vlan_ethhdr
*veth
)
29 if (skb_copy_bits(skb
, mac_off
, veth
, ETH_HLEN
))
32 veth
->h_vlan_proto
= skb
->vlan_proto
;
33 veth
->h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
34 veth
->h_vlan_encapsulated_proto
= skb
->protocol
;
39 /* add vlan header into the user buffer for if tag was removed by offloads */
41 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
43 int mac_off
= skb_mac_header(skb
) - skb
->data
;
44 u8
*vlanh
, *dst_u8
= (u8
*) d
;
45 struct vlan_ethhdr veth
;
48 if ((skb
->protocol
== htons(ETH_P_8021AD
) ||
49 skb
->protocol
== htons(ETH_P_8021Q
)) &&
50 offset
>= VLAN_ETH_HLEN
&& offset
< VLAN_ETH_HLEN
+ VLAN_HLEN
)
51 vlan_hlen
+= VLAN_HLEN
;
54 if (offset
< VLAN_ETH_HLEN
+ vlan_hlen
) {
58 skb_copy_bits(skb
, mac_off
, &veth
, VLAN_ETH_HLEN
) < 0)
60 else if (!nft_payload_rebuild_vlan_hdr(skb
, mac_off
, &veth
))
63 if (offset
+ len
> VLAN_ETH_HLEN
+ vlan_hlen
)
64 ethlen
-= offset
+ len
- VLAN_ETH_HLEN
+ vlan_hlen
;
66 memcpy(dst_u8
, vlanh
+ offset
- vlan_hlen
, ethlen
);
73 offset
= ETH_HLEN
+ vlan_hlen
;
75 offset
-= VLAN_HLEN
+ vlan_hlen
;
78 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
81 void nft_payload_eval(const struct nft_expr
*expr
,
82 struct nft_regs
*regs
,
83 const struct nft_pktinfo
*pkt
)
85 const struct nft_payload
*priv
= nft_expr_priv(expr
);
86 const struct sk_buff
*skb
= pkt
->skb
;
87 u32
*dest
= ®s
->data
[priv
->dreg
];
90 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
92 case NFT_PAYLOAD_LL_HEADER
:
93 if (!skb_mac_header_was_set(skb
))
96 if (skb_vlan_tag_present(skb
)) {
97 if (!nft_payload_copy_vlan(dest
, skb
,
98 priv
->offset
, priv
->len
))
102 offset
= skb_mac_header(skb
) - skb
->data
;
104 case NFT_PAYLOAD_NETWORK_HEADER
:
105 offset
= skb_network_offset(skb
);
107 case NFT_PAYLOAD_TRANSPORT_HEADER
:
110 offset
= pkt
->xt
.thoff
;
115 offset
+= priv
->offset
;
117 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
121 regs
->verdict
.code
= NFT_BREAK
;
124 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
125 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
126 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
127 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
128 [NFTA_PAYLOAD_OFFSET
] = { .type
= NLA_U32
},
129 [NFTA_PAYLOAD_LEN
] = { .type
= NLA_U32
},
130 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
131 [NFTA_PAYLOAD_CSUM_OFFSET
] = { .type
= NLA_U32
},
134 static int nft_payload_init(const struct nft_ctx
*ctx
,
135 const struct nft_expr
*expr
,
136 const struct nlattr
* const tb
[])
138 struct nft_payload
*priv
= nft_expr_priv(expr
);
140 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
141 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
142 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
143 priv
->dreg
= nft_parse_register(tb
[NFTA_PAYLOAD_DREG
]);
145 return nft_validate_register_store(ctx
, priv
->dreg
, NULL
,
146 NFT_DATA_VALUE
, priv
->len
);
149 static int nft_payload_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
151 const struct nft_payload
*priv
= nft_expr_priv(expr
);
153 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
154 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
155 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
156 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
157 goto nla_put_failure
;
164 static int nft_payload_offload_ll(struct nft_offload_ctx
*ctx
,
165 struct nft_flow_rule
*flow
,
166 const struct nft_payload
*priv
)
168 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
170 switch (priv
->offset
) {
171 case offsetof(struct ethhdr
, h_source
):
172 if (priv
->len
!= ETH_ALEN
)
175 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
178 case offsetof(struct ethhdr
, h_dest
):
179 if (priv
->len
!= ETH_ALEN
)
182 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
185 case offsetof(struct ethhdr
, h_proto
):
186 if (priv
->len
!= sizeof(__be16
))
189 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
,
190 n_proto
, sizeof(__be16
), reg
);
191 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
193 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
):
194 if (priv
->len
!= sizeof(__be16
))
197 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
198 vlan_tci
, sizeof(__be16
), reg
);
200 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
):
201 if (priv
->len
!= sizeof(__be16
))
204 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
205 vlan_tpid
, sizeof(__be16
), reg
);
206 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
208 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
) + sizeof(struct vlan_hdr
):
209 if (priv
->len
!= sizeof(__be16
))
212 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
213 vlan_tci
, sizeof(__be16
), reg
);
215 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
) +
216 sizeof(struct vlan_hdr
):
217 if (priv
->len
!= sizeof(__be16
))
220 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
221 vlan_tpid
, sizeof(__be16
), reg
);
230 static int nft_payload_offload_ip(struct nft_offload_ctx
*ctx
,
231 struct nft_flow_rule
*flow
,
232 const struct nft_payload
*priv
)
234 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
236 switch (priv
->offset
) {
237 case offsetof(struct iphdr
, saddr
):
238 if (priv
->len
!= sizeof(struct in_addr
))
241 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, src
,
242 sizeof(struct in_addr
), reg
);
244 case offsetof(struct iphdr
, daddr
):
245 if (priv
->len
!= sizeof(struct in_addr
))
248 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, dst
,
249 sizeof(struct in_addr
), reg
);
251 case offsetof(struct iphdr
, protocol
):
252 if (priv
->len
!= sizeof(__u8
))
255 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
257 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
266 static int nft_payload_offload_ip6(struct nft_offload_ctx
*ctx
,
267 struct nft_flow_rule
*flow
,
268 const struct nft_payload
*priv
)
270 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
272 switch (priv
->offset
) {
273 case offsetof(struct ipv6hdr
, saddr
):
274 if (priv
->len
!= sizeof(struct in6_addr
))
277 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, src
,
278 sizeof(struct in6_addr
), reg
);
280 case offsetof(struct ipv6hdr
, daddr
):
281 if (priv
->len
!= sizeof(struct in6_addr
))
284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, dst
,
285 sizeof(struct in6_addr
), reg
);
287 case offsetof(struct ipv6hdr
, nexthdr
):
288 if (priv
->len
!= sizeof(__u8
))
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
293 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
302 static int nft_payload_offload_nh(struct nft_offload_ctx
*ctx
,
303 struct nft_flow_rule
*flow
,
304 const struct nft_payload
*priv
)
308 switch (ctx
->dep
.l3num
) {
309 case htons(ETH_P_IP
):
310 err
= nft_payload_offload_ip(ctx
, flow
, priv
);
312 case htons(ETH_P_IPV6
):
313 err
= nft_payload_offload_ip6(ctx
, flow
, priv
);
322 static int nft_payload_offload_tcp(struct nft_offload_ctx
*ctx
,
323 struct nft_flow_rule
*flow
,
324 const struct nft_payload
*priv
)
326 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
328 switch (priv
->offset
) {
329 case offsetof(struct tcphdr
, source
):
330 if (priv
->len
!= sizeof(__be16
))
333 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
334 sizeof(__be16
), reg
);
336 case offsetof(struct tcphdr
, dest
):
337 if (priv
->len
!= sizeof(__be16
))
340 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
341 sizeof(__be16
), reg
);
350 static int nft_payload_offload_udp(struct nft_offload_ctx
*ctx
,
351 struct nft_flow_rule
*flow
,
352 const struct nft_payload
*priv
)
354 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
356 switch (priv
->offset
) {
357 case offsetof(struct udphdr
, source
):
358 if (priv
->len
!= sizeof(__be16
))
361 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
362 sizeof(__be16
), reg
);
364 case offsetof(struct udphdr
, dest
):
365 if (priv
->len
!= sizeof(__be16
))
368 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
369 sizeof(__be16
), reg
);
378 static int nft_payload_offload_th(struct nft_offload_ctx
*ctx
,
379 struct nft_flow_rule
*flow
,
380 const struct nft_payload
*priv
)
384 switch (ctx
->dep
.protonum
) {
386 err
= nft_payload_offload_tcp(ctx
, flow
, priv
);
389 err
= nft_payload_offload_udp(ctx
, flow
, priv
);
398 static int nft_payload_offload(struct nft_offload_ctx
*ctx
,
399 struct nft_flow_rule
*flow
,
400 const struct nft_expr
*expr
)
402 const struct nft_payload
*priv
= nft_expr_priv(expr
);
405 switch (priv
->base
) {
406 case NFT_PAYLOAD_LL_HEADER
:
407 err
= nft_payload_offload_ll(ctx
, flow
, priv
);
409 case NFT_PAYLOAD_NETWORK_HEADER
:
410 err
= nft_payload_offload_nh(ctx
, flow
, priv
);
412 case NFT_PAYLOAD_TRANSPORT_HEADER
:
413 err
= nft_payload_offload_th(ctx
, flow
, priv
);
422 static const struct nft_expr_ops nft_payload_ops
= {
423 .type
= &nft_payload_type
,
424 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
425 .eval
= nft_payload_eval
,
426 .init
= nft_payload_init
,
427 .dump
= nft_payload_dump
,
428 .offload
= nft_payload_offload
,
431 const struct nft_expr_ops nft_payload_fast_ops
= {
432 .type
= &nft_payload_type
,
433 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
434 .eval
= nft_payload_eval
,
435 .init
= nft_payload_init
,
436 .dump
= nft_payload_dump
,
437 .offload
= nft_payload_offload
,
440 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
442 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
444 *sum
= CSUM_MANGLED_0
;
447 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
449 struct udphdr
*uh
, _uh
;
451 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
455 return (__force
bool)uh
->check
;
458 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
460 unsigned int *l4csum_offset
)
462 switch (pkt
->tprot
) {
464 *l4csum_offset
= offsetof(struct tcphdr
, check
);
467 if (!nft_payload_udp_checksum(skb
, pkt
->xt
.thoff
))
470 case IPPROTO_UDPLITE
:
471 *l4csum_offset
= offsetof(struct udphdr
, check
);
474 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
480 *l4csum_offset
+= pkt
->xt
.thoff
;
484 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
486 __wsum fsum
, __wsum tsum
)
491 /* If we cannot determine layer 4 checksum offset or this packet doesn't
492 * require layer 4 checksum recalculation, skip this packet.
494 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
497 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
500 /* Checksum mangling for an arbitrary amount of bytes, based on
501 * inet_proto_csum_replace*() functions.
503 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
504 nft_csum_replace(&sum
, fsum
, tsum
);
505 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
506 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
510 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
514 if (skb_ensure_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
515 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
521 static int nft_payload_csum_inet(struct sk_buff
*skb
, const u32
*src
,
522 __wsum fsum
, __wsum tsum
, int csum_offset
)
526 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
529 nft_csum_replace(&sum
, fsum
, tsum
);
530 if (skb_ensure_writable(skb
, csum_offset
+ sizeof(sum
)) ||
531 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
537 static void nft_payload_set_eval(const struct nft_expr
*expr
,
538 struct nft_regs
*regs
,
539 const struct nft_pktinfo
*pkt
)
541 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
542 struct sk_buff
*skb
= pkt
->skb
;
543 const u32
*src
= ®s
->data
[priv
->sreg
];
544 int offset
, csum_offset
;
547 switch (priv
->base
) {
548 case NFT_PAYLOAD_LL_HEADER
:
549 if (!skb_mac_header_was_set(skb
))
551 offset
= skb_mac_header(skb
) - skb
->data
;
553 case NFT_PAYLOAD_NETWORK_HEADER
:
554 offset
= skb_network_offset(skb
);
556 case NFT_PAYLOAD_TRANSPORT_HEADER
:
559 offset
= pkt
->xt
.thoff
;
565 csum_offset
= offset
+ priv
->csum_offset
;
566 offset
+= priv
->offset
;
568 if ((priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
|| priv
->csum_flags
) &&
569 (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
||
570 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
571 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
572 tsum
= csum_partial(src
, priv
->len
, 0);
574 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
575 nft_payload_csum_inet(skb
, src
, fsum
, tsum
, csum_offset
))
578 if (priv
->csum_flags
&&
579 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
583 if (skb_ensure_writable(skb
, max(offset
+ priv
->len
, 0)) ||
584 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
589 regs
->verdict
.code
= NFT_BREAK
;
592 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
593 const struct nft_expr
*expr
,
594 const struct nlattr
* const tb
[])
596 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
598 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
599 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
600 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
601 priv
->sreg
= nft_parse_register(tb
[NFTA_PAYLOAD_SREG
]);
603 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
605 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
606 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
])
608 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_OFFSET
]));
609 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
612 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
613 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
616 priv
->csum_flags
= flags
;
619 switch (priv
->csum_type
) {
620 case NFT_PAYLOAD_CSUM_NONE
:
621 case NFT_PAYLOAD_CSUM_INET
:
627 return nft_validate_register_load(priv
->sreg
, priv
->len
);
630 static int nft_payload_set_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
632 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
634 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
635 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
636 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
637 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
638 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
639 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
640 htonl(priv
->csum_offset
)) ||
641 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
642 goto nla_put_failure
;
649 static const struct nft_expr_ops nft_payload_set_ops
= {
650 .type
= &nft_payload_type
,
651 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
652 .eval
= nft_payload_set_eval
,
653 .init
= nft_payload_set_init
,
654 .dump
= nft_payload_set_dump
,
657 static const struct nft_expr_ops
*
658 nft_payload_select_ops(const struct nft_ctx
*ctx
,
659 const struct nlattr
* const tb
[])
661 enum nft_payload_bases base
;
662 unsigned int offset
, len
;
664 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
665 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
666 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
667 return ERR_PTR(-EINVAL
);
669 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
671 case NFT_PAYLOAD_LL_HEADER
:
672 case NFT_PAYLOAD_NETWORK_HEADER
:
673 case NFT_PAYLOAD_TRANSPORT_HEADER
:
676 return ERR_PTR(-EOPNOTSUPP
);
679 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
680 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
681 return ERR_PTR(-EINVAL
);
682 return &nft_payload_set_ops
;
685 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
686 return ERR_PTR(-EINVAL
);
688 offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
689 len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
691 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
692 base
!= NFT_PAYLOAD_LL_HEADER
)
693 return &nft_payload_fast_ops
;
695 return &nft_payload_ops
;
698 struct nft_expr_type nft_payload_type __read_mostly
= {
700 .select_ops
= nft_payload_select_ops
,
701 .policy
= nft_payload_policy
,
702 .maxattr
= NFTA_PAYLOAD_MAX
,
703 .owner
= THIS_MODULE
,