1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff
*skb
, int mac_off
,
28 struct vlan_ethhdr
*veth
)
30 if (skb_copy_bits(skb
, mac_off
, veth
, ETH_HLEN
))
33 veth
->h_vlan_proto
= skb
->vlan_proto
;
34 veth
->h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
35 veth
->h_vlan_encapsulated_proto
= skb
->protocol
;
40 /* add vlan header into the user buffer for if tag was removed by offloads */
42 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
44 int mac_off
= skb_mac_header(skb
) - skb
->data
;
45 u8
*vlanh
, *dst_u8
= (u8
*) d
;
46 struct vlan_ethhdr veth
;
49 if ((skb
->protocol
== htons(ETH_P_8021AD
) ||
50 skb
->protocol
== htons(ETH_P_8021Q
)) &&
51 offset
>= VLAN_ETH_HLEN
&& offset
< VLAN_ETH_HLEN
+ VLAN_HLEN
)
52 vlan_hlen
+= VLAN_HLEN
;
55 if (offset
< VLAN_ETH_HLEN
+ vlan_hlen
) {
59 skb_copy_bits(skb
, mac_off
, &veth
, VLAN_ETH_HLEN
) < 0)
61 else if (!nft_payload_rebuild_vlan_hdr(skb
, mac_off
, &veth
))
64 if (offset
+ len
> VLAN_ETH_HLEN
+ vlan_hlen
)
65 ethlen
-= offset
+ len
- VLAN_ETH_HLEN
+ vlan_hlen
;
67 memcpy(dst_u8
, vlanh
+ offset
- vlan_hlen
, ethlen
);
74 offset
= ETH_HLEN
+ vlan_hlen
;
76 offset
-= VLAN_HLEN
+ vlan_hlen
;
79 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
82 void nft_payload_eval(const struct nft_expr
*expr
,
83 struct nft_regs
*regs
,
84 const struct nft_pktinfo
*pkt
)
86 const struct nft_payload
*priv
= nft_expr_priv(expr
);
87 const struct sk_buff
*skb
= pkt
->skb
;
88 u32
*dest
= ®s
->data
[priv
->dreg
];
91 if (priv
->len
% NFT_REG32_SIZE
)
92 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
95 case NFT_PAYLOAD_LL_HEADER
:
96 if (!skb_mac_header_was_set(skb
))
99 if (skb_vlan_tag_present(skb
)) {
100 if (!nft_payload_copy_vlan(dest
, skb
,
101 priv
->offset
, priv
->len
))
105 offset
= skb_mac_header(skb
) - skb
->data
;
107 case NFT_PAYLOAD_NETWORK_HEADER
:
108 offset
= skb_network_offset(skb
);
110 case NFT_PAYLOAD_TRANSPORT_HEADER
:
113 offset
= pkt
->xt
.thoff
;
118 offset
+= priv
->offset
;
120 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
124 regs
->verdict
.code
= NFT_BREAK
;
127 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
128 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
129 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
130 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
131 [NFTA_PAYLOAD_OFFSET
] = { .type
= NLA_U32
},
132 [NFTA_PAYLOAD_LEN
] = { .type
= NLA_U32
},
133 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
134 [NFTA_PAYLOAD_CSUM_OFFSET
] = { .type
= NLA_U32
},
135 [NFTA_PAYLOAD_CSUM_FLAGS
] = { .type
= NLA_U32
},
138 static int nft_payload_init(const struct nft_ctx
*ctx
,
139 const struct nft_expr
*expr
,
140 const struct nlattr
* const tb
[])
142 struct nft_payload
*priv
= nft_expr_priv(expr
);
144 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
145 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
146 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
147 priv
->dreg
= nft_parse_register(tb
[NFTA_PAYLOAD_DREG
]);
149 return nft_validate_register_store(ctx
, priv
->dreg
, NULL
,
150 NFT_DATA_VALUE
, priv
->len
);
153 static int nft_payload_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
155 const struct nft_payload
*priv
= nft_expr_priv(expr
);
157 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
158 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
159 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
160 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
161 goto nla_put_failure
;
168 static bool nft_payload_offload_mask(struct nft_offload_reg
*reg
,
169 u32 priv_len
, u32 field_len
)
171 unsigned int remainder
, delta
, k
;
172 struct nft_data mask
= {};
173 __be32 remainder_mask
;
175 if (priv_len
== field_len
) {
176 memset(®
->mask
, 0xff, priv_len
);
178 } else if (priv_len
> field_len
) {
182 memset(&mask
, 0xff, field_len
);
183 remainder
= priv_len
% sizeof(u32
);
185 k
= priv_len
/ sizeof(u32
);
186 delta
= field_len
- priv_len
;
187 remainder_mask
= htonl(~((1 << (delta
* BITS_PER_BYTE
)) - 1));
188 mask
.data
[k
] = (__force u32
)remainder_mask
;
191 memcpy(®
->mask
, &mask
, field_len
);
196 static int nft_payload_offload_ll(struct nft_offload_ctx
*ctx
,
197 struct nft_flow_rule
*flow
,
198 const struct nft_payload
*priv
)
200 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
202 switch (priv
->offset
) {
203 case offsetof(struct ethhdr
, h_source
):
204 if (!nft_payload_offload_mask(reg
, priv
->len
, ETH_ALEN
))
207 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
210 case offsetof(struct ethhdr
, h_dest
):
211 if (!nft_payload_offload_mask(reg
, priv
->len
, ETH_ALEN
))
214 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
217 case offsetof(struct ethhdr
, h_proto
):
218 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
221 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
,
222 n_proto
, sizeof(__be16
), reg
);
223 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
225 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
):
226 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
229 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
230 vlan_tci
, sizeof(__be16
), reg
);
232 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
):
233 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
236 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
237 vlan_tpid
, sizeof(__be16
), reg
);
238 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
240 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
) + sizeof(struct vlan_hdr
):
241 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
244 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
245 vlan_tci
, sizeof(__be16
), reg
);
247 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
) +
248 sizeof(struct vlan_hdr
):
249 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
252 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, vlan
,
253 vlan_tpid
, sizeof(__be16
), reg
);
262 static int nft_payload_offload_ip(struct nft_offload_ctx
*ctx
,
263 struct nft_flow_rule
*flow
,
264 const struct nft_payload
*priv
)
266 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
268 switch (priv
->offset
) {
269 case offsetof(struct iphdr
, saddr
):
270 if (!nft_payload_offload_mask(reg
, priv
->len
,
271 sizeof(struct in_addr
)))
274 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, src
,
275 sizeof(struct in_addr
), reg
);
276 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
278 case offsetof(struct iphdr
, daddr
):
279 if (!nft_payload_offload_mask(reg
, priv
->len
,
280 sizeof(struct in_addr
)))
283 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, dst
,
284 sizeof(struct in_addr
), reg
);
285 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
287 case offsetof(struct iphdr
, protocol
):
288 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__u8
)))
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
293 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
302 static int nft_payload_offload_ip6(struct nft_offload_ctx
*ctx
,
303 struct nft_flow_rule
*flow
,
304 const struct nft_payload
*priv
)
306 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
308 switch (priv
->offset
) {
309 case offsetof(struct ipv6hdr
, saddr
):
310 if (!nft_payload_offload_mask(reg
, priv
->len
,
311 sizeof(struct in6_addr
)))
314 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, src
,
315 sizeof(struct in6_addr
), reg
);
316 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
318 case offsetof(struct ipv6hdr
, daddr
):
319 if (!nft_payload_offload_mask(reg
, priv
->len
,
320 sizeof(struct in6_addr
)))
323 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, dst
,
324 sizeof(struct in6_addr
), reg
);
325 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
327 case offsetof(struct ipv6hdr
, nexthdr
):
328 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__u8
)))
331 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
333 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
342 static int nft_payload_offload_nh(struct nft_offload_ctx
*ctx
,
343 struct nft_flow_rule
*flow
,
344 const struct nft_payload
*priv
)
348 switch (ctx
->dep
.l3num
) {
349 case htons(ETH_P_IP
):
350 err
= nft_payload_offload_ip(ctx
, flow
, priv
);
352 case htons(ETH_P_IPV6
):
353 err
= nft_payload_offload_ip6(ctx
, flow
, priv
);
362 static int nft_payload_offload_tcp(struct nft_offload_ctx
*ctx
,
363 struct nft_flow_rule
*flow
,
364 const struct nft_payload
*priv
)
366 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
368 switch (priv
->offset
) {
369 case offsetof(struct tcphdr
, source
):
370 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
373 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
374 sizeof(__be16
), reg
);
376 case offsetof(struct tcphdr
, dest
):
377 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
380 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
381 sizeof(__be16
), reg
);
390 static int nft_payload_offload_udp(struct nft_offload_ctx
*ctx
,
391 struct nft_flow_rule
*flow
,
392 const struct nft_payload
*priv
)
394 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
396 switch (priv
->offset
) {
397 case offsetof(struct udphdr
, source
):
398 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
401 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
402 sizeof(__be16
), reg
);
404 case offsetof(struct udphdr
, dest
):
405 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
408 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
409 sizeof(__be16
), reg
);
418 static int nft_payload_offload_th(struct nft_offload_ctx
*ctx
,
419 struct nft_flow_rule
*flow
,
420 const struct nft_payload
*priv
)
424 switch (ctx
->dep
.protonum
) {
426 err
= nft_payload_offload_tcp(ctx
, flow
, priv
);
429 err
= nft_payload_offload_udp(ctx
, flow
, priv
);
438 static int nft_payload_offload(struct nft_offload_ctx
*ctx
,
439 struct nft_flow_rule
*flow
,
440 const struct nft_expr
*expr
)
442 const struct nft_payload
*priv
= nft_expr_priv(expr
);
445 switch (priv
->base
) {
446 case NFT_PAYLOAD_LL_HEADER
:
447 err
= nft_payload_offload_ll(ctx
, flow
, priv
);
449 case NFT_PAYLOAD_NETWORK_HEADER
:
450 err
= nft_payload_offload_nh(ctx
, flow
, priv
);
452 case NFT_PAYLOAD_TRANSPORT_HEADER
:
453 err
= nft_payload_offload_th(ctx
, flow
, priv
);
462 static const struct nft_expr_ops nft_payload_ops
= {
463 .type
= &nft_payload_type
,
464 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
465 .eval
= nft_payload_eval
,
466 .init
= nft_payload_init
,
467 .dump
= nft_payload_dump
,
468 .offload
= nft_payload_offload
,
471 const struct nft_expr_ops nft_payload_fast_ops
= {
472 .type
= &nft_payload_type
,
473 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
474 .eval
= nft_payload_eval
,
475 .init
= nft_payload_init
,
476 .dump
= nft_payload_dump
,
477 .offload
= nft_payload_offload
,
480 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
482 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
484 *sum
= CSUM_MANGLED_0
;
487 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
489 struct udphdr
*uh
, _uh
;
491 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
495 return (__force
bool)uh
->check
;
498 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
500 unsigned int *l4csum_offset
)
502 switch (pkt
->tprot
) {
504 *l4csum_offset
= offsetof(struct tcphdr
, check
);
507 if (!nft_payload_udp_checksum(skb
, pkt
->xt
.thoff
))
510 case IPPROTO_UDPLITE
:
511 *l4csum_offset
= offsetof(struct udphdr
, check
);
514 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
520 *l4csum_offset
+= pkt
->xt
.thoff
;
524 static int nft_payload_csum_sctp(struct sk_buff
*skb
, int offset
)
528 if (skb_ensure_writable(skb
, offset
+ sizeof(*sh
)))
531 sh
= (struct sctphdr
*)(skb
->data
+ offset
);
532 sh
->checksum
= sctp_compute_cksum(skb
, offset
);
533 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
537 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
539 __wsum fsum
, __wsum tsum
)
544 /* If we cannot determine layer 4 checksum offset or this packet doesn't
545 * require layer 4 checksum recalculation, skip this packet.
547 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
550 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
553 /* Checksum mangling for an arbitrary amount of bytes, based on
554 * inet_proto_csum_replace*() functions.
556 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
557 nft_csum_replace(&sum
, fsum
, tsum
);
558 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
559 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
563 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
567 if (skb_ensure_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
568 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
574 static int nft_payload_csum_inet(struct sk_buff
*skb
, const u32
*src
,
575 __wsum fsum
, __wsum tsum
, int csum_offset
)
579 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
582 nft_csum_replace(&sum
, fsum
, tsum
);
583 if (skb_ensure_writable(skb
, csum_offset
+ sizeof(sum
)) ||
584 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
590 static void nft_payload_set_eval(const struct nft_expr
*expr
,
591 struct nft_regs
*regs
,
592 const struct nft_pktinfo
*pkt
)
594 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
595 struct sk_buff
*skb
= pkt
->skb
;
596 const u32
*src
= ®s
->data
[priv
->sreg
];
597 int offset
, csum_offset
;
600 switch (priv
->base
) {
601 case NFT_PAYLOAD_LL_HEADER
:
602 if (!skb_mac_header_was_set(skb
))
604 offset
= skb_mac_header(skb
) - skb
->data
;
606 case NFT_PAYLOAD_NETWORK_HEADER
:
607 offset
= skb_network_offset(skb
);
609 case NFT_PAYLOAD_TRANSPORT_HEADER
:
612 offset
= pkt
->xt
.thoff
;
618 csum_offset
= offset
+ priv
->csum_offset
;
619 offset
+= priv
->offset
;
621 if ((priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
|| priv
->csum_flags
) &&
622 (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
||
623 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
624 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
625 tsum
= csum_partial(src
, priv
->len
, 0);
627 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
628 nft_payload_csum_inet(skb
, src
, fsum
, tsum
, csum_offset
))
631 if (priv
->csum_flags
&&
632 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
636 if (skb_ensure_writable(skb
, max(offset
+ priv
->len
, 0)) ||
637 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
640 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_SCTP
&&
641 pkt
->tprot
== IPPROTO_SCTP
&&
642 skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
643 if (nft_payload_csum_sctp(skb
, pkt
->xt
.thoff
))
649 regs
->verdict
.code
= NFT_BREAK
;
652 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
653 const struct nft_expr
*expr
,
654 const struct nlattr
* const tb
[])
656 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
658 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
659 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
660 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
661 priv
->sreg
= nft_parse_register(tb
[NFTA_PAYLOAD_SREG
]);
663 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
665 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
666 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
])
668 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_OFFSET
]));
669 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
672 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
673 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
676 priv
->csum_flags
= flags
;
679 switch (priv
->csum_type
) {
680 case NFT_PAYLOAD_CSUM_NONE
:
681 case NFT_PAYLOAD_CSUM_INET
:
683 case NFT_PAYLOAD_CSUM_SCTP
:
684 if (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
)
687 if (priv
->csum_offset
!= offsetof(struct sctphdr
, checksum
))
694 return nft_validate_register_load(priv
->sreg
, priv
->len
);
697 static int nft_payload_set_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
699 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
701 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
702 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
703 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
704 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
705 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
706 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
707 htonl(priv
->csum_offset
)) ||
708 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
709 goto nla_put_failure
;
716 static const struct nft_expr_ops nft_payload_set_ops
= {
717 .type
= &nft_payload_type
,
718 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
719 .eval
= nft_payload_set_eval
,
720 .init
= nft_payload_set_init
,
721 .dump
= nft_payload_set_dump
,
724 static const struct nft_expr_ops
*
725 nft_payload_select_ops(const struct nft_ctx
*ctx
,
726 const struct nlattr
* const tb
[])
728 enum nft_payload_bases base
;
729 unsigned int offset
, len
;
731 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
732 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
733 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
734 return ERR_PTR(-EINVAL
);
736 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
738 case NFT_PAYLOAD_LL_HEADER
:
739 case NFT_PAYLOAD_NETWORK_HEADER
:
740 case NFT_PAYLOAD_TRANSPORT_HEADER
:
743 return ERR_PTR(-EOPNOTSUPP
);
746 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
747 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
748 return ERR_PTR(-EINVAL
);
749 return &nft_payload_set_ops
;
752 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
753 return ERR_PTR(-EINVAL
);
755 offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
756 len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
758 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
759 base
!= NFT_PAYLOAD_LL_HEADER
)
760 return &nft_payload_fast_ops
;
762 return &nft_payload_ops
;
765 struct nft_expr_type nft_payload_type __read_mostly
= {
767 .select_ops
= nft_payload_select_ops
,
768 .policy
= nft_payload_policy
,
769 .maxattr
= NFTA_PAYLOAD_MAX
,
770 .owner
= THIS_MODULE
,