KVM: PPC: Book3S HV: Don't rely on host's page size information
[linux/fpc-iii.git] / net / netfilter / nft_payload.c
blobe110b0ebbf58b02b5b4ae8e9b8f9ad804ac861ba
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
12 #include <linux/kernel.h>
13 #include <linux/if_vlan.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables_core.h>
20 #include <net/netfilter/nf_tables.h>
21 /* For layer 4 checksum field offset. */
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/icmpv6.h>
26 /* add vlan header into the user buffer for if tag was removed by offloads */
27 static bool
28 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
30 int mac_off = skb_mac_header(skb) - skb->data;
31 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
32 struct vlan_ethhdr veth;
34 vlanh = (u8 *) &veth;
35 if (offset < ETH_HLEN) {
36 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
38 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
39 return false;
41 veth.h_vlan_proto = skb->vlan_proto;
43 memcpy(dst_u8, vlanh + offset, ethlen);
45 len -= ethlen;
46 if (len == 0)
47 return true;
49 dst_u8 += ethlen;
50 offset = ETH_HLEN;
51 } else if (offset >= VLAN_ETH_HLEN) {
52 offset -= VLAN_HLEN;
53 goto skip;
56 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
57 veth.h_vlan_encapsulated_proto = skb->protocol;
59 vlanh += offset;
61 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
62 memcpy(dst_u8, vlanh, vlan_len);
64 len -= vlan_len;
65 if (!len)
66 return true;
68 dst_u8 += vlan_len;
69 skip:
70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
73 static void nft_payload_eval(const struct nft_expr *expr,
74 struct nft_regs *regs,
75 const struct nft_pktinfo *pkt)
77 const struct nft_payload *priv = nft_expr_priv(expr);
78 const struct sk_buff *skb = pkt->skb;
79 u32 *dest = &regs->data[priv->dreg];
80 int offset;
82 dest[priv->len / NFT_REG32_SIZE] = 0;
83 switch (priv->base) {
84 case NFT_PAYLOAD_LL_HEADER:
85 if (!skb_mac_header_was_set(skb))
86 goto err;
88 if (skb_vlan_tag_present(skb)) {
89 if (!nft_payload_copy_vlan(dest, skb,
90 priv->offset, priv->len))
91 goto err;
92 return;
94 offset = skb_mac_header(skb) - skb->data;
95 break;
96 case NFT_PAYLOAD_NETWORK_HEADER:
97 offset = skb_network_offset(skb);
98 break;
99 case NFT_PAYLOAD_TRANSPORT_HEADER:
100 if (!pkt->tprot_set)
101 goto err;
102 offset = pkt->xt.thoff;
103 break;
104 default:
105 BUG();
107 offset += priv->offset;
109 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
110 goto err;
111 return;
112 err:
113 regs->verdict.code = NFT_BREAK;
116 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
117 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
118 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
119 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
120 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
121 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
122 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
123 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
126 static int nft_payload_init(const struct nft_ctx *ctx,
127 const struct nft_expr *expr,
128 const struct nlattr * const tb[])
130 struct nft_payload *priv = nft_expr_priv(expr);
132 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
133 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
134 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
135 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
137 return nft_validate_register_store(ctx, priv->dreg, NULL,
138 NFT_DATA_VALUE, priv->len);
141 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
143 const struct nft_payload *priv = nft_expr_priv(expr);
145 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
146 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
147 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
148 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
149 goto nla_put_failure;
150 return 0;
152 nla_put_failure:
153 return -1;
156 static const struct nft_expr_ops nft_payload_ops = {
157 .type = &nft_payload_type,
158 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
159 .eval = nft_payload_eval,
160 .init = nft_payload_init,
161 .dump = nft_payload_dump,
164 const struct nft_expr_ops nft_payload_fast_ops = {
165 .type = &nft_payload_type,
166 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
167 .eval = nft_payload_eval,
168 .init = nft_payload_init,
169 .dump = nft_payload_dump,
172 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
174 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
175 if (*sum == 0)
176 *sum = CSUM_MANGLED_0;
179 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
181 struct udphdr *uh, _uh;
183 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
184 if (!uh)
185 return false;
187 return (__force bool)uh->check;
190 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
191 struct sk_buff *skb,
192 unsigned int *l4csum_offset)
194 switch (pkt->tprot) {
195 case IPPROTO_TCP:
196 *l4csum_offset = offsetof(struct tcphdr, check);
197 break;
198 case IPPROTO_UDP:
199 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
200 return -1;
201 /* Fall through. */
202 case IPPROTO_UDPLITE:
203 *l4csum_offset = offsetof(struct udphdr, check);
204 break;
205 case IPPROTO_ICMPV6:
206 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
207 break;
208 default:
209 return -1;
212 *l4csum_offset += pkt->xt.thoff;
213 return 0;
216 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
217 struct sk_buff *skb,
218 __wsum fsum, __wsum tsum)
220 int l4csum_offset;
221 __sum16 sum;
223 /* If we cannot determine layer 4 checksum offset or this packet doesn't
224 * require layer 4 checksum recalculation, skip this packet.
226 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
227 return 0;
229 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
230 return -1;
232 /* Checksum mangling for an arbitrary amount of bytes, based on
233 * inet_proto_csum_replace*() functions.
235 if (skb->ip_summed != CHECKSUM_PARTIAL) {
236 nft_csum_replace(&sum, fsum, tsum);
237 if (skb->ip_summed == CHECKSUM_COMPLETE) {
238 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
239 tsum);
241 } else {
242 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
243 tsum));
246 if (!skb_make_writable(skb, l4csum_offset + sizeof(sum)) ||
247 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
248 return -1;
250 return 0;
253 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
254 __wsum fsum, __wsum tsum, int csum_offset)
256 __sum16 sum;
258 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
259 return -1;
261 nft_csum_replace(&sum, fsum, tsum);
262 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
263 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
264 return -1;
266 return 0;
269 static void nft_payload_set_eval(const struct nft_expr *expr,
270 struct nft_regs *regs,
271 const struct nft_pktinfo *pkt)
273 const struct nft_payload_set *priv = nft_expr_priv(expr);
274 struct sk_buff *skb = pkt->skb;
275 const u32 *src = &regs->data[priv->sreg];
276 int offset, csum_offset;
277 __wsum fsum, tsum;
279 switch (priv->base) {
280 case NFT_PAYLOAD_LL_HEADER:
281 if (!skb_mac_header_was_set(skb))
282 goto err;
283 offset = skb_mac_header(skb) - skb->data;
284 break;
285 case NFT_PAYLOAD_NETWORK_HEADER:
286 offset = skb_network_offset(skb);
287 break;
288 case NFT_PAYLOAD_TRANSPORT_HEADER:
289 if (!pkt->tprot_set)
290 goto err;
291 offset = pkt->xt.thoff;
292 break;
293 default:
294 BUG();
297 csum_offset = offset + priv->csum_offset;
298 offset += priv->offset;
300 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
301 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
302 skb->ip_summed != CHECKSUM_PARTIAL)) {
303 fsum = skb_checksum(skb, offset, priv->len, 0);
304 tsum = csum_partial(src, priv->len, 0);
306 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
307 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
308 goto err;
310 if (priv->csum_flags &&
311 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
312 goto err;
315 if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
316 skb_store_bits(skb, offset, src, priv->len) < 0)
317 goto err;
319 return;
320 err:
321 regs->verdict.code = NFT_BREAK;
324 static int nft_payload_set_init(const struct nft_ctx *ctx,
325 const struct nft_expr *expr,
326 const struct nlattr * const tb[])
328 struct nft_payload_set *priv = nft_expr_priv(expr);
330 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
331 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
332 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
333 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
335 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
336 priv->csum_type =
337 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
338 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
339 priv->csum_offset =
340 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
341 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
342 u32 flags;
344 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
345 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
346 return -EINVAL;
348 priv->csum_flags = flags;
351 switch (priv->csum_type) {
352 case NFT_PAYLOAD_CSUM_NONE:
353 case NFT_PAYLOAD_CSUM_INET:
354 break;
355 default:
356 return -EOPNOTSUPP;
359 return nft_validate_register_load(priv->sreg, priv->len);
362 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
364 const struct nft_payload_set *priv = nft_expr_priv(expr);
366 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
367 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
368 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
369 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
370 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
371 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
372 htonl(priv->csum_offset)) ||
373 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
374 goto nla_put_failure;
375 return 0;
377 nla_put_failure:
378 return -1;
381 static const struct nft_expr_ops nft_payload_set_ops = {
382 .type = &nft_payload_type,
383 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
384 .eval = nft_payload_set_eval,
385 .init = nft_payload_set_init,
386 .dump = nft_payload_set_dump,
389 static const struct nft_expr_ops *
390 nft_payload_select_ops(const struct nft_ctx *ctx,
391 const struct nlattr * const tb[])
393 enum nft_payload_bases base;
394 unsigned int offset, len;
396 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
397 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
398 tb[NFTA_PAYLOAD_LEN] == NULL)
399 return ERR_PTR(-EINVAL);
401 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
402 switch (base) {
403 case NFT_PAYLOAD_LL_HEADER:
404 case NFT_PAYLOAD_NETWORK_HEADER:
405 case NFT_PAYLOAD_TRANSPORT_HEADER:
406 break;
407 default:
408 return ERR_PTR(-EOPNOTSUPP);
411 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
412 if (tb[NFTA_PAYLOAD_DREG] != NULL)
413 return ERR_PTR(-EINVAL);
414 return &nft_payload_set_ops;
417 if (tb[NFTA_PAYLOAD_DREG] == NULL)
418 return ERR_PTR(-EINVAL);
420 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
421 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
423 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
424 base != NFT_PAYLOAD_LL_HEADER)
425 return &nft_payload_fast_ops;
426 else
427 return &nft_payload_ops;
430 struct nft_expr_type nft_payload_type __read_mostly = {
431 .name = "payload",
432 .select_ops = nft_payload_select_ops,
433 .policy = nft_payload_policy,
434 .maxattr = NFTA_PAYLOAD_MAX,
435 .owner = THIS_MODULE,