1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 /* For layer 4 checksum field offset. */
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/icmpv6.h>
23 /* add vlan header into the user buffer for if tag was removed by offloads */
25 nft_payload_copy_vlan(u32
*d
, const struct sk_buff
*skb
, u8 offset
, u8 len
)
27 int mac_off
= skb_mac_header(skb
) - skb
->data
;
28 u8 vlan_len
, *vlanh
, *dst_u8
= (u8
*) d
;
29 struct vlan_ethhdr veth
;
32 if (offset
< ETH_HLEN
) {
33 u8 ethlen
= min_t(u8
, len
, ETH_HLEN
- offset
);
35 if (skb_copy_bits(skb
, mac_off
, &veth
, ETH_HLEN
))
38 veth
.h_vlan_proto
= skb
->vlan_proto
;
40 memcpy(dst_u8
, vlanh
+ offset
, ethlen
);
48 } else if (offset
>= VLAN_ETH_HLEN
) {
53 veth
.h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
54 veth
.h_vlan_encapsulated_proto
= skb
->protocol
;
58 vlan_len
= min_t(u8
, len
, VLAN_ETH_HLEN
- offset
);
59 memcpy(dst_u8
, vlanh
, vlan_len
);
67 return skb_copy_bits(skb
, offset
+ mac_off
, dst_u8
, len
) == 0;
70 void nft_payload_eval(const struct nft_expr
*expr
,
71 struct nft_regs
*regs
,
72 const struct nft_pktinfo
*pkt
)
74 const struct nft_payload
*priv
= nft_expr_priv(expr
);
75 const struct sk_buff
*skb
= pkt
->skb
;
76 u32
*dest
= ®s
->data
[priv
->dreg
];
79 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
81 case NFT_PAYLOAD_LL_HEADER
:
82 if (!skb_mac_header_was_set(skb
))
85 if (skb_vlan_tag_present(skb
)) {
86 if (!nft_payload_copy_vlan(dest
, skb
,
87 priv
->offset
, priv
->len
))
91 offset
= skb_mac_header(skb
) - skb
->data
;
93 case NFT_PAYLOAD_NETWORK_HEADER
:
94 offset
= skb_network_offset(skb
);
96 case NFT_PAYLOAD_TRANSPORT_HEADER
:
99 offset
= pkt
->xt
.thoff
;
104 offset
+= priv
->offset
;
106 if (skb_copy_bits(skb
, offset
, dest
, priv
->len
) < 0)
110 regs
->verdict
.code
= NFT_BREAK
;
113 static const struct nla_policy nft_payload_policy
[NFTA_PAYLOAD_MAX
+ 1] = {
114 [NFTA_PAYLOAD_SREG
] = { .type
= NLA_U32
},
115 [NFTA_PAYLOAD_DREG
] = { .type
= NLA_U32
},
116 [NFTA_PAYLOAD_BASE
] = { .type
= NLA_U32
},
117 [NFTA_PAYLOAD_OFFSET
] = { .type
= NLA_U32
},
118 [NFTA_PAYLOAD_LEN
] = { .type
= NLA_U32
},
119 [NFTA_PAYLOAD_CSUM_TYPE
] = { .type
= NLA_U32
},
120 [NFTA_PAYLOAD_CSUM_OFFSET
] = { .type
= NLA_U32
},
123 static int nft_payload_init(const struct nft_ctx
*ctx
,
124 const struct nft_expr
*expr
,
125 const struct nlattr
* const tb
[])
127 struct nft_payload
*priv
= nft_expr_priv(expr
);
129 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
130 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
131 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
132 priv
->dreg
= nft_parse_register(tb
[NFTA_PAYLOAD_DREG
]);
134 return nft_validate_register_store(ctx
, priv
->dreg
, NULL
,
135 NFT_DATA_VALUE
, priv
->len
);
138 static int nft_payload_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
140 const struct nft_payload
*priv
= nft_expr_priv(expr
);
142 if (nft_dump_register(skb
, NFTA_PAYLOAD_DREG
, priv
->dreg
) ||
143 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
144 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
145 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)))
146 goto nla_put_failure
;
153 static const struct nft_expr_ops nft_payload_ops
= {
154 .type
= &nft_payload_type
,
155 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
156 .eval
= nft_payload_eval
,
157 .init
= nft_payload_init
,
158 .dump
= nft_payload_dump
,
161 const struct nft_expr_ops nft_payload_fast_ops
= {
162 .type
= &nft_payload_type
,
163 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload
)),
164 .eval
= nft_payload_eval
,
165 .init
= nft_payload_init
,
166 .dump
= nft_payload_dump
,
169 static inline void nft_csum_replace(__sum16
*sum
, __wsum fsum
, __wsum tsum
)
171 *sum
= csum_fold(csum_add(csum_sub(~csum_unfold(*sum
), fsum
), tsum
));
173 *sum
= CSUM_MANGLED_0
;
176 static bool nft_payload_udp_checksum(struct sk_buff
*skb
, unsigned int thoff
)
178 struct udphdr
*uh
, _uh
;
180 uh
= skb_header_pointer(skb
, thoff
, sizeof(_uh
), &_uh
);
184 return (__force
bool)uh
->check
;
187 static int nft_payload_l4csum_offset(const struct nft_pktinfo
*pkt
,
189 unsigned int *l4csum_offset
)
191 switch (pkt
->tprot
) {
193 *l4csum_offset
= offsetof(struct tcphdr
, check
);
196 if (!nft_payload_udp_checksum(skb
, pkt
->xt
.thoff
))
199 case IPPROTO_UDPLITE
:
200 *l4csum_offset
= offsetof(struct udphdr
, check
);
203 *l4csum_offset
= offsetof(struct icmp6hdr
, icmp6_cksum
);
209 *l4csum_offset
+= pkt
->xt
.thoff
;
213 static int nft_payload_l4csum_update(const struct nft_pktinfo
*pkt
,
215 __wsum fsum
, __wsum tsum
)
220 /* If we cannot determine layer 4 checksum offset or this packet doesn't
221 * require layer 4 checksum recalculation, skip this packet.
223 if (nft_payload_l4csum_offset(pkt
, skb
, &l4csum_offset
) < 0)
226 if (skb_copy_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
229 /* Checksum mangling for an arbitrary amount of bytes, based on
230 * inet_proto_csum_replace*() functions.
232 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
233 nft_csum_replace(&sum
, fsum
, tsum
);
234 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
235 skb
->csum
= ~csum_add(csum_sub(~(skb
->csum
), fsum
),
239 sum
= ~csum_fold(csum_add(csum_sub(csum_unfold(sum
), fsum
),
243 if (!skb_make_writable(skb
, l4csum_offset
+ sizeof(sum
)) ||
244 skb_store_bits(skb
, l4csum_offset
, &sum
, sizeof(sum
)) < 0)
250 static int nft_payload_csum_inet(struct sk_buff
*skb
, const u32
*src
,
251 __wsum fsum
, __wsum tsum
, int csum_offset
)
255 if (skb_copy_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
258 nft_csum_replace(&sum
, fsum
, tsum
);
259 if (!skb_make_writable(skb
, csum_offset
+ sizeof(sum
)) ||
260 skb_store_bits(skb
, csum_offset
, &sum
, sizeof(sum
)) < 0)
266 static void nft_payload_set_eval(const struct nft_expr
*expr
,
267 struct nft_regs
*regs
,
268 const struct nft_pktinfo
*pkt
)
270 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
271 struct sk_buff
*skb
= pkt
->skb
;
272 const u32
*src
= ®s
->data
[priv
->sreg
];
273 int offset
, csum_offset
;
276 switch (priv
->base
) {
277 case NFT_PAYLOAD_LL_HEADER
:
278 if (!skb_mac_header_was_set(skb
))
280 offset
= skb_mac_header(skb
) - skb
->data
;
282 case NFT_PAYLOAD_NETWORK_HEADER
:
283 offset
= skb_network_offset(skb
);
285 case NFT_PAYLOAD_TRANSPORT_HEADER
:
288 offset
= pkt
->xt
.thoff
;
294 csum_offset
= offset
+ priv
->csum_offset
;
295 offset
+= priv
->offset
;
297 if ((priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
|| priv
->csum_flags
) &&
298 (priv
->base
!= NFT_PAYLOAD_TRANSPORT_HEADER
||
299 skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
300 fsum
= skb_checksum(skb
, offset
, priv
->len
, 0);
301 tsum
= csum_partial(src
, priv
->len
, 0);
303 if (priv
->csum_type
== NFT_PAYLOAD_CSUM_INET
&&
304 nft_payload_csum_inet(skb
, src
, fsum
, tsum
, csum_offset
))
307 if (priv
->csum_flags
&&
308 nft_payload_l4csum_update(pkt
, skb
, fsum
, tsum
) < 0)
312 if (!skb_make_writable(skb
, max(offset
+ priv
->len
, 0)) ||
313 skb_store_bits(skb
, offset
, src
, priv
->len
) < 0)
318 regs
->verdict
.code
= NFT_BREAK
;
321 static int nft_payload_set_init(const struct nft_ctx
*ctx
,
322 const struct nft_expr
*expr
,
323 const struct nlattr
* const tb
[])
325 struct nft_payload_set
*priv
= nft_expr_priv(expr
);
327 priv
->base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
328 priv
->offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
329 priv
->len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
330 priv
->sreg
= nft_parse_register(tb
[NFTA_PAYLOAD_SREG
]);
332 if (tb
[NFTA_PAYLOAD_CSUM_TYPE
])
334 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_TYPE
]));
335 if (tb
[NFTA_PAYLOAD_CSUM_OFFSET
])
337 ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_OFFSET
]));
338 if (tb
[NFTA_PAYLOAD_CSUM_FLAGS
]) {
341 flags
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_CSUM_FLAGS
]));
342 if (flags
& ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR
)
345 priv
->csum_flags
= flags
;
348 switch (priv
->csum_type
) {
349 case NFT_PAYLOAD_CSUM_NONE
:
350 case NFT_PAYLOAD_CSUM_INET
:
356 return nft_validate_register_load(priv
->sreg
, priv
->len
);
359 static int nft_payload_set_dump(struct sk_buff
*skb
, const struct nft_expr
*expr
)
361 const struct nft_payload_set
*priv
= nft_expr_priv(expr
);
363 if (nft_dump_register(skb
, NFTA_PAYLOAD_SREG
, priv
->sreg
) ||
364 nla_put_be32(skb
, NFTA_PAYLOAD_BASE
, htonl(priv
->base
)) ||
365 nla_put_be32(skb
, NFTA_PAYLOAD_OFFSET
, htonl(priv
->offset
)) ||
366 nla_put_be32(skb
, NFTA_PAYLOAD_LEN
, htonl(priv
->len
)) ||
367 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_TYPE
, htonl(priv
->csum_type
)) ||
368 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_OFFSET
,
369 htonl(priv
->csum_offset
)) ||
370 nla_put_be32(skb
, NFTA_PAYLOAD_CSUM_FLAGS
, htonl(priv
->csum_flags
)))
371 goto nla_put_failure
;
378 static const struct nft_expr_ops nft_payload_set_ops
= {
379 .type
= &nft_payload_type
,
380 .size
= NFT_EXPR_SIZE(sizeof(struct nft_payload_set
)),
381 .eval
= nft_payload_set_eval
,
382 .init
= nft_payload_set_init
,
383 .dump
= nft_payload_set_dump
,
386 static const struct nft_expr_ops
*
387 nft_payload_select_ops(const struct nft_ctx
*ctx
,
388 const struct nlattr
* const tb
[])
390 enum nft_payload_bases base
;
391 unsigned int offset
, len
;
393 if (tb
[NFTA_PAYLOAD_BASE
] == NULL
||
394 tb
[NFTA_PAYLOAD_OFFSET
] == NULL
||
395 tb
[NFTA_PAYLOAD_LEN
] == NULL
)
396 return ERR_PTR(-EINVAL
);
398 base
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_BASE
]));
400 case NFT_PAYLOAD_LL_HEADER
:
401 case NFT_PAYLOAD_NETWORK_HEADER
:
402 case NFT_PAYLOAD_TRANSPORT_HEADER
:
405 return ERR_PTR(-EOPNOTSUPP
);
408 if (tb
[NFTA_PAYLOAD_SREG
] != NULL
) {
409 if (tb
[NFTA_PAYLOAD_DREG
] != NULL
)
410 return ERR_PTR(-EINVAL
);
411 return &nft_payload_set_ops
;
414 if (tb
[NFTA_PAYLOAD_DREG
] == NULL
)
415 return ERR_PTR(-EINVAL
);
417 offset
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_OFFSET
]));
418 len
= ntohl(nla_get_be32(tb
[NFTA_PAYLOAD_LEN
]));
420 if (len
<= 4 && is_power_of_2(len
) && IS_ALIGNED(offset
, len
) &&
421 base
!= NFT_PAYLOAD_LL_HEADER
)
422 return &nft_payload_fast_ops
;
424 return &nft_payload_ops
;
427 struct nft_expr_type nft_payload_type __read_mostly
= {
429 .select_ops
= nft_payload_select_ops
,
430 .policy
= nft_payload_policy
,
431 .maxattr
= NFTA_PAYLOAD_MAX
,
432 .owner
= THIS_MODULE
,