1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
23 #include <linux/icmpv6.h>
25 #include <linux/ipv6.h>
26 #include <net/sctp/checksum.h>
28 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff
*skb
, int mac_off
,
29 struct vlan_ethhdr
*veth
)
31 if (skb_copy_bits(skb
, mac_off
, veth
, ETH_HLEN
))
34 veth
->h_vlan_proto
= skb
->vlan_proto
;
35 veth
->h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
36 veth
->h_vlan_encapsulated_proto
= skb
->protocol
;
41 /* add vlan header into the user buffer for if tag was removed by offloads */
43 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
45 int mac_off
= skb_mac_header(skb
) - skb
->data
;
46 u8
*vlanh
, *dst_u8
= (u8
*) d
;
47 struct vlan_ethhdr veth
;
50 if (offset
< VLAN_ETH_HLEN
) {
53 if (!nft_payload_rebuild_vlan_hdr(skb
, mac_off
, &veth
))
56 if (offset
+ len
> VLAN_ETH_HLEN
)
57 ethlen
-= offset
+ len
- VLAN_ETH_HLEN
;
59 memcpy(dst_u8
, vlanh
+ offset
, ethlen
);
71 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
74 static int __nft_payload_inner_offset(struct nft_pktinfo
*pkt
)
76 unsigned int thoff
= nft_thoff(pkt
);
78 if (!(pkt
->flags
& NFT_PKTINFO_L4PROTO
) || pkt
->fragoff
)
83 pkt
->inneroff
= thoff
+ sizeof(struct udphdr
);
86 struct tcphdr
*th
, _tcph
;
88 th
= skb_header_pointer(pkt
->skb
, thoff
, sizeof(_tcph
), &_tcph
);
92 pkt
->inneroff
= thoff
+ __tcp_hdrlen(th
);
96 u32 offset
= sizeof(struct gre_base_hdr
);
97 struct gre_base_hdr
*gre
, _gre
;
100 gre
= skb_header_pointer(pkt
->skb
, thoff
, sizeof(_gre
), &_gre
);
104 version
= gre
->flags
& GRE_VERSION
;
107 if (gre
->flags
& GRE_ROUTING
)
110 if (gre
->flags
& GRE_CSUM
) {
111 offset
+= sizeof_field(struct gre_full_hdr
, csum
) +
112 sizeof_field(struct gre_full_hdr
, reserved1
);
114 if (gre
->flags
& GRE_KEY
)
115 offset
+= sizeof_field(struct gre_full_hdr
, key
);
117 if (gre
->flags
& GRE_SEQ
)
118 offset
+= sizeof_field(struct gre_full_hdr
, seq
);
124 pkt
->inneroff
= thoff
+ offset
;
128 pkt
->inneroff
= thoff
;
134 pkt
->flags
|= NFT_PKTINFO_INNER
;
139 int nft_payload_inner_offset(const struct nft_pktinfo
*pkt
)
141 if (!(pkt
->flags
& NFT_PKTINFO_INNER
) &&
142 __nft_payload_inner_offset((struct nft_pktinfo
*)pkt
) < 0)
145 return pkt
->inneroff
;
148 static bool nft_payload_need_vlan_adjust(u32 offset
, u32 len
)
150 unsigned int boundary
= offset
+ len
;
152 /* data past ether src/dst requested, copy needed */
153 if (boundary
> offsetof(struct ethhdr
, h_proto
))
159 void nft_payload_eval(const struct nft_expr
*expr
,
160 struct nft_regs
*regs
,
161 const struct nft_pktinfo
*pkt
)
163 const struct nft_payload
*priv
= nft_expr_priv(expr
);
164 const struct sk_buff
*skb
= pkt
->skb
;
165 u32
*dest
= ®s
->data
[priv
->dreg
];
168 if (priv
->len
% NFT_REG32_SIZE
)
169 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
171 switch (priv
->base
) {
172 case NFT_PAYLOAD_LL_HEADER
:
173 if (!skb_mac_header_was_set(skb
) || skb_mac_header_len(skb
) == 0)
176 if (skb_vlan_tag_present(skb
) &&
177 nft_payload_need_vlan_adjust(priv
->offset
, priv
->len
)) {
178 if (!nft_payload_copy_vlan(dest
, skb
,
179 priv
->offset
, priv
->len
))
183 offset
= skb_mac_header(skb
) - skb
->data
;
185 case NFT_PAYLOAD_NETWORK_HEADER
:
186 offset
= skb_network_offset(skb
);
188 case NFT_PAYLOAD_TRANSPORT_HEADER
:
189 if (!(pkt
->flags
& NFT_PKTINFO_L4PROTO
) || pkt
->fragoff
)
191 offset
= nft_thoff(pkt
);
193 case NFT_PAYLOAD_INNER_HEADER
:
194 offset
= nft_payload_inner_offset(pkt
);
202 offset
+= priv
->offset
;
204 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
208 regs
->verdict
.code
= NFT_BREAK
;
211 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
212 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
213 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
214 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
215 [NFTA_PAYLOAD_OFFSET
] = NLA_POLICY_MAX(NLA_BE32
, 255),
216 [NFTA_PAYLOAD_LEN
] = NLA_POLICY_MAX(NLA_BE32
, 255),
217 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
218 [NFTA_PAYLOAD_CSUM_OFFSET
] = NLA_POLICY_MAX(NLA_BE32
, 255),
219 [NFTA_PAYLOAD_CSUM_FLAGS
] = { .type
= NLA_U32
},
222 static int nft_payload_init(const struct nft_ctx
*ctx
,
223 const struct nft_expr
*expr
,
224 const struct nlattr
* const tb
[])
226 struct nft_payload
*priv
= nft_expr_priv(expr
);
228 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
229 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
230 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
232 return nft_parse_register_store(ctx
, tb
[NFTA_PAYLOAD_DREG
],
233 &priv
->dreg
, NULL
, NFT_DATA_VALUE
,
237 static int nft_payload_dump(struct sk_buff
*skb
,
238 const struct nft_expr
*expr
, bool reset
)
240 const struct nft_payload
*priv
= nft_expr_priv(expr
);
242 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
243 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
244 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
245 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
246 goto nla_put_failure
;
253 static bool nft_payload_reduce(struct nft_regs_track
*track
,
254 const struct nft_expr
*expr
)
256 const struct nft_payload
*priv
= nft_expr_priv(expr
);
257 const struct nft_payload
*payload
;
259 if (!nft_reg_track_cmp(track
, expr
, priv
->dreg
)) {
260 nft_reg_track_update(track
, expr
, priv
->dreg
, priv
->len
);
264 payload
= nft_expr_priv(track
->regs
[priv
->dreg
].selector
);
265 if (priv
->base
!= payload
->base
||
266 priv
->offset
!= payload
->offset
||
267 priv
->len
!= payload
->len
) {
268 nft_reg_track_update(track
, expr
, priv
->dreg
, priv
->len
);
272 if (!track
->regs
[priv
->dreg
].bitwise
)
275 return nft_expr_reduce_bitwise(track
, expr
);
278 static bool nft_payload_offload_mask(struct nft_offload_reg
*reg
,
279 u32 priv_len
, u32 field_len
)
281 unsigned int remainder
, delta
, k
;
282 struct nft_data mask
= {};
283 __be32 remainder_mask
;
285 if (priv_len
== field_len
) {
286 memset(®
->mask
, 0xff, priv_len
);
288 } else if (priv_len
> field_len
) {
292 memset(&mask
, 0xff, field_len
);
293 remainder
= priv_len
% sizeof(u32
);
295 k
= priv_len
/ sizeof(u32
);
296 delta
= field_len
- priv_len
;
297 remainder_mask
= htonl(~((1 << (delta
* BITS_PER_BYTE
)) - 1));
298 mask
.data
[k
] = (__force u32
)remainder_mask
;
301 memcpy(®
->mask
, &mask
, field_len
);
306 static int nft_payload_offload_ll(struct nft_offload_ctx
*ctx
,
307 struct nft_flow_rule
*flow
,
308 const struct nft_payload
*priv
)
310 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
312 switch (priv
->offset
) {
313 case offsetof(struct ethhdr
, h_source
):
314 if (!nft_payload_offload_mask(reg
, priv
->len
, ETH_ALEN
))
317 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
320 case offsetof(struct ethhdr
, h_dest
):
321 if (!nft_payload_offload_mask(reg
, priv
->len
, ETH_ALEN
))
324 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth_addrs
,
327 case offsetof(struct ethhdr
, h_proto
):
328 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
331 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
,
332 n_proto
, sizeof(__be16
), reg
);
333 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
335 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
):
336 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
339 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
340 vlan_tci
, sizeof(__be16
), reg
,
341 NFT_OFFLOAD_F_NETWORK2HOST
);
343 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
):
344 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
347 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN
, vlan
,
348 vlan_tpid
, sizeof(__be16
), reg
);
349 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
351 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
) + sizeof(struct vlan_hdr
):
352 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
355 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN
, cvlan
,
356 vlan_tci
, sizeof(__be16
), reg
,
357 NFT_OFFLOAD_F_NETWORK2HOST
);
359 case offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
) +
360 sizeof(struct vlan_hdr
):
361 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
364 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN
, cvlan
,
365 vlan_tpid
, sizeof(__be16
), reg
);
366 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_NETWORK
);
375 static int nft_payload_offload_ip(struct nft_offload_ctx
*ctx
,
376 struct nft_flow_rule
*flow
,
377 const struct nft_payload
*priv
)
379 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
381 switch (priv
->offset
) {
382 case offsetof(struct iphdr
, saddr
):
383 if (!nft_payload_offload_mask(reg
, priv
->len
,
384 sizeof(struct in_addr
)))
387 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, src
,
388 sizeof(struct in_addr
), reg
);
389 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
391 case offsetof(struct iphdr
, daddr
):
392 if (!nft_payload_offload_mask(reg
, priv
->len
,
393 sizeof(struct in_addr
)))
396 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
, dst
,
397 sizeof(struct in_addr
), reg
);
398 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
400 case offsetof(struct iphdr
, protocol
):
401 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__u8
)))
404 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
406 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
415 static int nft_payload_offload_ip6(struct nft_offload_ctx
*ctx
,
416 struct nft_flow_rule
*flow
,
417 const struct nft_payload
*priv
)
419 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
421 switch (priv
->offset
) {
422 case offsetof(struct ipv6hdr
, saddr
):
423 if (!nft_payload_offload_mask(reg
, priv
->len
,
424 sizeof(struct in6_addr
)))
427 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, src
,
428 sizeof(struct in6_addr
), reg
);
429 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
431 case offsetof(struct ipv6hdr
, daddr
):
432 if (!nft_payload_offload_mask(reg
, priv
->len
,
433 sizeof(struct in6_addr
)))
436 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
, dst
,
437 sizeof(struct in6_addr
), reg
);
438 nft_flow_rule_set_addr_type(flow
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
440 case offsetof(struct ipv6hdr
, nexthdr
):
441 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__u8
)))
444 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC
, basic
, ip_proto
,
446 nft_offload_set_dependency(ctx
, NFT_OFFLOAD_DEP_TRANSPORT
);
455 static int nft_payload_offload_nh(struct nft_offload_ctx
*ctx
,
456 struct nft_flow_rule
*flow
,
457 const struct nft_payload
*priv
)
461 switch (ctx
->dep
.l3num
) {
462 case htons(ETH_P_IP
):
463 err
= nft_payload_offload_ip(ctx
, flow
, priv
);
465 case htons(ETH_P_IPV6
):
466 err
= nft_payload_offload_ip6(ctx
, flow
, priv
);
475 static int nft_payload_offload_tcp(struct nft_offload_ctx
*ctx
,
476 struct nft_flow_rule
*flow
,
477 const struct nft_payload
*priv
)
479 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
481 switch (priv
->offset
) {
482 case offsetof(struct tcphdr
, source
):
483 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
486 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
487 sizeof(__be16
), reg
);
489 case offsetof(struct tcphdr
, dest
):
490 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
493 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
494 sizeof(__be16
), reg
);
503 static int nft_payload_offload_udp(struct nft_offload_ctx
*ctx
,
504 struct nft_flow_rule
*flow
,
505 const struct nft_payload
*priv
)
507 struct nft_offload_reg
*reg
= &ctx
->regs
[priv
->dreg
];
509 switch (priv
->offset
) {
510 case offsetof(struct udphdr
, source
):
511 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
514 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, src
,
515 sizeof(__be16
), reg
);
517 case offsetof(struct udphdr
, dest
):
518 if (!nft_payload_offload_mask(reg
, priv
->len
, sizeof(__be16
)))
521 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS
, tp
, dst
,
522 sizeof(__be16
), reg
);
531 static int nft_payload_offload_th(struct nft_offload_ctx
*ctx
,
532 struct nft_flow_rule
*flow
,
533 const struct nft_payload
*priv
)
537 switch (ctx
->dep
.protonum
) {
539 err
= nft_payload_offload_tcp(ctx
, flow
, priv
);
542 err
= nft_payload_offload_udp(ctx
, flow
, priv
);
551 static int nft_payload_offload(struct nft_offload_ctx
*ctx
,
552 struct nft_flow_rule
*flow
,
553 const struct nft_expr
*expr
)
555 const struct nft_payload
*priv
= nft_expr_priv(expr
);
558 switch (priv
->base
) {
559 case NFT_PAYLOAD_LL_HEADER
:
560 err
= nft_payload_offload_ll(ctx
, flow
, priv
);
562 case NFT_PAYLOAD_NETWORK_HEADER
:
563 err
= nft_payload_offload_nh(ctx
, flow
, priv
);
565 case NFT_PAYLOAD_TRANSPORT_HEADER
:
566 err
= nft_payload_offload_th(ctx
, flow
, priv
);
575 static const struct nft_expr_ops nft_payload_ops
= {
576 .type
= &nft_payload_type
,
577 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
578 .eval
= nft_payload_eval
,
579 .init
= nft_payload_init
,
580 .dump
= nft_payload_dump
,
581 .reduce
= nft_payload_reduce
,
582 .offload
= nft_payload_offload
,
585 const struct nft_expr_ops nft_payload_fast_ops
= {
586 .type
= &nft_payload_type
,
587 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
588 .eval
= nft_payload_eval
,
589 .init
= nft_payload_init
,
590 .dump
= nft_payload_dump
,
591 .reduce
= nft_payload_reduce
,
592 .offload
= nft_payload_offload
,
595 void nft_payload_inner_eval(const struct nft_expr
*expr
, struct nft_regs
*regs
,
596 const struct nft_pktinfo
*pkt
,
597 struct nft_inner_tun_ctx
*tun_ctx
)
599 const struct nft_payload
*priv
= nft_expr_priv(expr
);
600 const struct sk_buff
*skb
= pkt
->skb
;
601 u32
*dest
= ®s
->data
[priv
->dreg
];
604 if (priv
->len
% NFT_REG32_SIZE
)
605 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
607 switch (priv
->base
) {
608 case NFT_PAYLOAD_TUN_HEADER
:
609 if (!(tun_ctx
->flags
& NFT_PAYLOAD_CTX_INNER_TUN
))
612 offset
= tun_ctx
->inner_tunoff
;
614 case NFT_PAYLOAD_LL_HEADER
:
615 if (!(tun_ctx
->flags
& NFT_PAYLOAD_CTX_INNER_LL
))
618 offset
= tun_ctx
->inner_lloff
;
620 case NFT_PAYLOAD_NETWORK_HEADER
:
621 if (!(tun_ctx
->flags
& NFT_PAYLOAD_CTX_INNER_NH
))
624 offset
= tun_ctx
->inner_nhoff
;
626 case NFT_PAYLOAD_TRANSPORT_HEADER
:
627 if (!(tun_ctx
->flags
& NFT_PAYLOAD_CTX_INNER_TH
))
630 offset
= tun_ctx
->inner_thoff
;
636 offset
+= priv
->offset
;
638 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
643 regs
->verdict
.code
= NFT_BREAK
;
646 static int nft_payload_inner_init(const struct nft_ctx
*ctx
,
647 const struct nft_expr
*expr
,
648 const struct nlattr
* const tb
[])
650 struct nft_payload
*priv
= nft_expr_priv(expr
);
653 if (!tb
[NFTA_PAYLOAD_BASE
] || !tb
[NFTA_PAYLOAD_OFFSET
] ||
654 !tb
[NFTA_PAYLOAD_LEN
] || !tb
[NFTA_PAYLOAD_DREG
])
657 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
659 case NFT_PAYLOAD_TUN_HEADER
:
660 case NFT_PAYLOAD_LL_HEADER
:
661 case NFT_PAYLOAD_NETWORK_HEADER
:
662 case NFT_PAYLOAD_TRANSPORT_HEADER
:
669 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
670 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
672 return nft_parse_register_store(ctx
, tb
[NFTA_PAYLOAD_DREG
],
673 &priv
->dreg
, NULL
, NFT_DATA_VALUE
,
677 static const struct nft_expr_ops nft_payload_inner_ops
= {
678 .type
= &nft_payload_type
,
679 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
680 .init
= nft_payload_inner_init
,
681 .dump
= nft_payload_dump
,
682 /* direct call to nft_payload_inner_eval(). */
685 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
687 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
689 *sum
= CSUM_MANGLED_0
;
692 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
694 struct udphdr
*uh
, _uh
;
696 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
700 return (__force
bool)uh
->check
;
703 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
705 unsigned int *l4csum_offset
)
710 switch (pkt
->tprot
) {
712 *l4csum_offset
= offsetof(struct tcphdr
, check
);
715 if (!nft_payload_udp_checksum(skb
, nft_thoff(pkt
)))
718 case IPPROTO_UDPLITE
:
719 *l4csum_offset
= offsetof(struct udphdr
, check
);
722 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
728 *l4csum_offset
+= nft_thoff(pkt
);
732 static int nft_payload_csum_sctp(struct sk_buff
*skb
, int offset
)
736 if (skb_ensure_writable(skb
, offset
+ sizeof(*sh
)))
739 sh
= (struct sctphdr
*)(skb
->data
+ offset
);
740 sh
->checksum
= sctp_compute_cksum(skb
, offset
);
741 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
745 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
747 __wsum fsum
, __wsum tsum
)
752 /* If we cannot determine layer 4 checksum offset or this packet doesn't
753 * require layer 4 checksum recalculation, skip this packet.
755 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
758 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
761 /* Checksum mangling for an arbitrary amount of bytes, based on
762 * inet_proto_csum_replace*() functions.
764 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
765 nft_csum_replace(&sum
, fsum
, tsum
);
766 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
767 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
771 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
775 if (skb_ensure_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
776 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
782 static int nft_payload_csum_inet(struct sk_buff
*skb
, const u32
*src
,
783 __wsum fsum
, __wsum tsum
, int csum_offset
)
787 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
790 nft_csum_replace(&sum
, fsum
, tsum
);
791 if (skb_ensure_writable(skb
, csum_offset
+ sizeof(sum
)) ||
792 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
798 struct nft_payload_set
{
799 enum nft_payload_bases base
:8;
808 /* This is not struct vlan_hdr. */
809 struct nft_payload_vlan_hdr
{
815 nft_payload_set_vlan(const u32
*src
, struct sk_buff
*skb
, u8 offset
, u8 len
,
818 struct nft_payload_vlan_hdr
*vlanh
;
822 if (offset
>= offsetof(struct vlan_ethhdr
, h_vlan_encapsulated_proto
)) {
823 *vlan_hlen
= VLAN_HLEN
;
828 case offsetof(struct vlan_ethhdr
, h_vlan_proto
):
830 vlan_proto
= nft_reg_load_be16(src
);
831 skb
->vlan_proto
= vlan_proto
;
832 } else if (len
== 4) {
833 vlanh
= (struct nft_payload_vlan_hdr
*)src
;
834 __vlan_hwaccel_put_tag(skb
, vlanh
->h_vlan_proto
,
835 ntohs(vlanh
->h_vlan_TCI
));
840 case offsetof(struct vlan_ethhdr
, h_vlan_TCI
):
844 vlan_tci
= ntohs(nft_reg_load_be16(src
));
845 skb
->vlan_tci
= vlan_tci
;
854 static void nft_payload_set_eval(const struct nft_expr
*expr
,
855 struct nft_regs
*regs
,
856 const struct nft_pktinfo
*pkt
)
858 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
859 const u32
*src
= ®s
->data
[priv
->sreg
];
860 int offset
, csum_offset
, vlan_hlen
= 0;
861 struct sk_buff
*skb
= pkt
->skb
;
864 switch (priv
->base
) {
865 case NFT_PAYLOAD_LL_HEADER
:
866 if (!skb_mac_header_was_set(skb
))
869 if (skb_vlan_tag_present(skb
) &&
870 nft_payload_need_vlan_adjust(priv
->offset
, priv
->len
)) {
871 if (!nft_payload_set_vlan(src
, skb
,
872 priv
->offset
, priv
->len
,
880 offset
= skb_mac_header(skb
) - skb
->data
- vlan_hlen
;
882 case NFT_PAYLOAD_NETWORK_HEADER
:
883 offset
= skb_network_offset(skb
);
885 case NFT_PAYLOAD_TRANSPORT_HEADER
:
886 if (!(pkt
->flags
& NFT_PKTINFO_L4PROTO
) || pkt
->fragoff
)
888 offset
= nft_thoff(pkt
);
890 case NFT_PAYLOAD_INNER_HEADER
:
891 offset
= nft_payload_inner_offset(pkt
);
900 csum_offset
= offset
+ priv
->csum_offset
;
901 offset
+= priv
->offset
;
903 if ((priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
|| priv
->csum_flags
) &&
904 ((priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
&&
905 priv
->base
!= NFT_PAYLOAD_INNER_HEADER
) ||
906 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
907 if (offset
+ priv
->len
> skb
->len
)
910 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
911 tsum
= csum_partial(src
, priv
->len
, 0);
913 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
914 nft_payload_csum_inet(skb
, src
, fsum
, tsum
, csum_offset
))
917 if (priv
->csum_flags
&&
918 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
922 if (skb_ensure_writable(skb
, max(offset
+ priv
->len
, 0)) ||
923 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
926 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_SCTP
&&
927 pkt
->tprot
== IPPROTO_SCTP
&&
928 skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
929 if (pkt
->fragoff
== 0 &&
930 nft_payload_csum_sctp(skb
, nft_thoff(pkt
)))
936 regs
->verdict
.code
= NFT_BREAK
;
939 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
940 const struct nft_expr
*expr
,
941 const struct nlattr
* const tb
[])
943 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
944 u32 csum_offset
, csum_type
= NFT_PAYLOAD_CSUM_NONE
;
947 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
948 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
949 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
951 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
952 csum_type
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
953 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
]) {
954 err
= nft_parse_u32_check(tb
[NFTA_PAYLOAD_CSUM_OFFSET
], U8_MAX
,
959 priv
->csum_offset
= csum_offset
;
961 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
964 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
965 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
968 priv
->csum_flags
= flags
;
972 case NFT_PAYLOAD_CSUM_NONE
:
973 case NFT_PAYLOAD_CSUM_INET
:
975 case NFT_PAYLOAD_CSUM_SCTP
:
976 if (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
)
979 if (priv
->csum_offset
!= offsetof(struct sctphdr
, checksum
))
985 priv
->csum_type
= csum_type
;
987 return nft_parse_register_load(ctx
, tb
[NFTA_PAYLOAD_SREG
], &priv
->sreg
,
991 static int nft_payload_set_dump(struct sk_buff
*skb
,
992 const struct nft_expr
*expr
, bool reset
)
994 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
996 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
997 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
998 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
999 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
1000 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
1001 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
1002 htonl(priv
->csum_offset
)) ||
1003 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
1004 goto nla_put_failure
;
1011 static bool nft_payload_set_reduce(struct nft_regs_track
*track
,
1012 const struct nft_expr
*expr
)
1016 for (i
= 0; i
< NFT_REG32_NUM
; i
++) {
1017 if (!track
->regs
[i
].selector
)
1020 if (track
->regs
[i
].selector
->ops
!= &nft_payload_ops
&&
1021 track
->regs
[i
].selector
->ops
!= &nft_payload_fast_ops
)
1024 __nft_reg_track_cancel(track
, i
);
1030 static const struct nft_expr_ops nft_payload_set_ops
= {
1031 .type
= &nft_payload_type
,
1032 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
1033 .eval
= nft_payload_set_eval
,
1034 .init
= nft_payload_set_init
,
1035 .dump
= nft_payload_set_dump
,
1036 .reduce
= nft_payload_set_reduce
,
1039 static const struct nft_expr_ops
*
1040 nft_payload_select_ops(const struct nft_ctx
*ctx
,
1041 const struct nlattr
* const tb
[])
1043 enum nft_payload_bases base
;
1044 unsigned int offset
, len
;
1047 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
1048 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
1049 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
1050 return ERR_PTR(-EINVAL
);
1052 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
1054 case NFT_PAYLOAD_LL_HEADER
:
1055 case NFT_PAYLOAD_NETWORK_HEADER
:
1056 case NFT_PAYLOAD_TRANSPORT_HEADER
:
1057 case NFT_PAYLOAD_INNER_HEADER
:
1060 return ERR_PTR(-EOPNOTSUPP
);
1063 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
1064 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
1065 return ERR_PTR(-EINVAL
);
1066 return &nft_payload_set_ops
;
1069 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
1070 return ERR_PTR(-EINVAL
);
1072 err
= nft_parse_u32_check(tb
[NFTA_PAYLOAD_OFFSET
], U8_MAX
, &offset
);
1074 return ERR_PTR(err
);
1076 err
= nft_parse_u32_check(tb
[NFTA_PAYLOAD_LEN
], U8_MAX
, &len
);
1078 return ERR_PTR(err
);
1080 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
1081 base
!= NFT_PAYLOAD_LL_HEADER
&& base
!= NFT_PAYLOAD_INNER_HEADER
)
1082 return &nft_payload_fast_ops
;
1084 return &nft_payload_ops
;
1087 struct nft_expr_type nft_payload_type __read_mostly
= {
1089 .select_ops
= nft_payload_select_ops
,
1090 .inner_ops
= &nft_payload_inner_ops
,
1091 .policy
= nft_payload_policy
,
1092 .maxattr
= NFTA_PAYLOAD_MAX
,
1093 .owner
= THIS_MODULE
,