1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/netlink.h>
11 #include <linux/netfilter.h>
12 #include <linux/netfilter/nf_tables.h>
13 #include <linux/dccp.h>
14 #include <linux/sctp.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables.h>
29 static unsigned int optlen(const u8
*opt
, unsigned int offset
)
31 /* Beware zero-length options: make finite progress */
32 if (opt
[offset
] <= TCPOPT_NOP
|| opt
[offset
+ 1] == 0)
35 return opt
[offset
+ 1];
38 static int nft_skb_copy_to_reg(const struct sk_buff
*skb
, int offset
, u32
*dest
, unsigned int len
)
40 if (len
% NFT_REG32_SIZE
)
41 dest
[len
/ NFT_REG32_SIZE
] = 0;
43 return skb_copy_bits(skb
, offset
, dest
, len
);
46 static void nft_exthdr_ipv6_eval(const struct nft_expr
*expr
,
47 struct nft_regs
*regs
,
48 const struct nft_pktinfo
*pkt
)
50 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
51 u32
*dest
= ®s
->data
[priv
->dreg
];
52 unsigned int offset
= 0;
55 if (pkt
->skb
->protocol
!= htons(ETH_P_IPV6
))
58 err
= ipv6_find_hdr(pkt
->skb
, &offset
, priv
->type
, NULL
, NULL
);
59 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
) {
60 nft_reg_store8(dest
, err
>= 0);
65 offset
+= priv
->offset
;
67 if (nft_skb_copy_to_reg(pkt
->skb
, offset
, dest
, priv
->len
) < 0)
71 regs
->verdict
.code
= NFT_BREAK
;
74 /* find the offset to specified option.
76 * If target header is found, its offset is set in *offset and return option
77 * number. Otherwise, return negative error.
79 * If the first fragment doesn't contain the End of Options it is considered
82 static int ipv4_find_option(struct net
*net
, struct sk_buff
*skb
,
83 unsigned int *offset
, int target
)
85 unsigned char optbuf
[sizeof(struct ip_options
) + 40];
86 struct ip_options
*opt
= (struct ip_options
*)optbuf
;
87 struct iphdr
*iph
, _iph
;
93 iph
= skb_header_pointer(skb
, 0, sizeof(_iph
), &_iph
);
96 start
= sizeof(struct iphdr
);
98 optlen
= iph
->ihl
* 4 - (int)sizeof(struct iphdr
);
102 memset(opt
, 0, sizeof(struct ip_options
));
103 /* Copy the options since __ip_options_compile() modifies
106 if (skb_copy_bits(skb
, start
, opt
->__data
, optlen
))
108 opt
->optlen
= optlen
;
110 if (__ip_options_compile(net
, opt
, NULL
, &info
))
118 found
= target
== IPOPT_SSRR
? opt
->is_strictroute
:
119 !opt
->is_strictroute
;
121 *offset
= opt
->srr
+ start
;
126 *offset
= opt
->rr
+ start
;
130 if (!opt
->router_alert
)
132 *offset
= opt
->router_alert
+ start
;
138 return found
? target
: -ENOENT
;
141 static void nft_exthdr_ipv4_eval(const struct nft_expr
*expr
,
142 struct nft_regs
*regs
,
143 const struct nft_pktinfo
*pkt
)
145 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
146 u32
*dest
= ®s
->data
[priv
->dreg
];
147 struct sk_buff
*skb
= pkt
->skb
;
151 if (skb
->protocol
!= htons(ETH_P_IP
))
154 err
= ipv4_find_option(nft_net(pkt
), skb
, &offset
, priv
->type
);
155 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
) {
156 nft_reg_store8(dest
, err
>= 0);
158 } else if (err
< 0) {
161 offset
+= priv
->offset
;
163 if (nft_skb_copy_to_reg(pkt
->skb
, offset
, dest
, priv
->len
) < 0)
167 regs
->verdict
.code
= NFT_BREAK
;
171 nft_tcp_header_pointer(const struct nft_pktinfo
*pkt
,
172 unsigned int len
, void *buffer
, unsigned int *tcphdr_len
)
176 if (pkt
->tprot
!= IPPROTO_TCP
|| pkt
->fragoff
)
179 tcph
= skb_header_pointer(pkt
->skb
, nft_thoff(pkt
), sizeof(*tcph
), buffer
);
183 *tcphdr_len
= __tcp_hdrlen(tcph
);
184 if (*tcphdr_len
< sizeof(*tcph
) || *tcphdr_len
> len
)
187 return skb_header_pointer(pkt
->skb
, nft_thoff(pkt
), *tcphdr_len
, buffer
);
190 static void nft_exthdr_tcp_eval(const struct nft_expr
*expr
,
191 struct nft_regs
*regs
,
192 const struct nft_pktinfo
*pkt
)
194 u8 buff
[sizeof(struct tcphdr
) + MAX_TCP_OPTION_SPACE
];
195 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
196 unsigned int i
, optl
, tcphdr_len
, offset
;
197 u32
*dest
= ®s
->data
[priv
->dreg
];
201 tcph
= nft_tcp_header_pointer(pkt
, sizeof(buff
), buff
, &tcphdr_len
);
206 for (i
= sizeof(*tcph
); i
< tcphdr_len
- 1; i
+= optl
) {
207 optl
= optlen(opt
, i
);
209 if (priv
->type
!= opt
[i
])
212 if (i
+ optl
> tcphdr_len
|| priv
->len
+ priv
->offset
> optl
)
215 offset
= i
+ priv
->offset
;
216 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
) {
217 nft_reg_store8(dest
, 1);
219 if (priv
->len
% NFT_REG32_SIZE
)
220 dest
[priv
->len
/ NFT_REG32_SIZE
] = 0;
221 memcpy(dest
, opt
+ offset
, priv
->len
);
228 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
)
231 regs
->verdict
.code
= NFT_BREAK
;
234 static void nft_exthdr_tcp_set_eval(const struct nft_expr
*expr
,
235 struct nft_regs
*regs
,
236 const struct nft_pktinfo
*pkt
)
238 u8 buff
[sizeof(struct tcphdr
) + MAX_TCP_OPTION_SPACE
];
239 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
240 unsigned int i
, optl
, tcphdr_len
, offset
;
244 tcph
= nft_tcp_header_pointer(pkt
, sizeof(buff
), buff
, &tcphdr_len
);
248 if (skb_ensure_writable(pkt
->skb
, nft_thoff(pkt
) + tcphdr_len
))
251 tcph
= (struct tcphdr
*)(pkt
->skb
->data
+ nft_thoff(pkt
));
254 for (i
= sizeof(*tcph
); i
< tcphdr_len
- 1; i
+= optl
) {
260 optl
= optlen(opt
, i
);
262 if (priv
->type
!= opt
[i
])
265 if (i
+ optl
> tcphdr_len
|| priv
->len
+ priv
->offset
> optl
)
268 offset
= i
+ priv
->offset
;
272 old
.v16
= (__force __be16
)get_unaligned((u16
*)(opt
+ offset
));
273 new.v16
= (__force __be16
)nft_reg_load16(
274 ®s
->data
[priv
->sreg
]);
276 switch (priv
->type
) {
278 /* increase can cause connection to stall */
279 if (ntohs(old
.v16
) <= ntohs(new.v16
))
284 if (old
.v16
== new.v16
)
287 put_unaligned(new.v16
, (__be16
*)(opt
+ offset
));
288 inet_proto_csum_replace2(&tcph
->check
, pkt
->skb
,
289 old
.v16
, new.v16
, false);
292 new.v32
= nft_reg_load_be32(®s
->data
[priv
->sreg
]);
293 old
.v32
= (__force __be32
)get_unaligned((u32
*)(opt
+ offset
));
295 if (old
.v32
== new.v32
)
298 put_unaligned(new.v32
, (__be32
*)(opt
+ offset
));
299 inet_proto_csum_replace4(&tcph
->check
, pkt
->skb
,
300 old
.v32
, new.v32
, false);
311 regs
->verdict
.code
= NFT_BREAK
;
314 static void nft_exthdr_tcp_strip_eval(const struct nft_expr
*expr
,
315 struct nft_regs
*regs
,
316 const struct nft_pktinfo
*pkt
)
318 u8 buff
[sizeof(struct tcphdr
) + MAX_TCP_OPTION_SPACE
];
319 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
320 unsigned int i
, tcphdr_len
, optl
;
324 tcph
= nft_tcp_header_pointer(pkt
, sizeof(buff
), buff
, &tcphdr_len
);
328 if (skb_ensure_writable(pkt
->skb
, nft_thoff(pkt
) + tcphdr_len
))
331 tcph
= (struct tcphdr
*)(pkt
->skb
->data
+ nft_thoff(pkt
));
334 for (i
= sizeof(*tcph
); i
< tcphdr_len
- 1; i
+= optl
) {
337 optl
= optlen(opt
, i
);
338 if (priv
->type
!= opt
[i
])
341 if (i
+ optl
> tcphdr_len
)
344 for (j
= 0; j
< optl
; ++j
) {
348 if ((i
+ j
) % 2 == 0) {
352 inet_proto_csum_replace2(&tcph
->check
, pkt
->skb
, htons(o
),
355 memset(opt
+ i
, TCPOPT_NOP
, optl
);
359 /* option not found, continue. This allows to do multiple
360 * option removals per rule.
364 regs
->verdict
.code
= NFT_BREAK
;
367 /* can't remove, no choice but to drop */
368 regs
->verdict
.code
= NF_DROP
;
371 static void nft_exthdr_sctp_eval(const struct nft_expr
*expr
,
372 struct nft_regs
*regs
,
373 const struct nft_pktinfo
*pkt
)
375 unsigned int offset
= nft_thoff(pkt
) + sizeof(struct sctphdr
);
376 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
377 u32
*dest
= ®s
->data
[priv
->dreg
];
378 const struct sctp_chunkhdr
*sch
;
379 struct sctp_chunkhdr _sch
;
381 if (pkt
->tprot
!= IPPROTO_SCTP
)
385 sch
= skb_header_pointer(pkt
->skb
, offset
, sizeof(_sch
), &_sch
);
386 if (!sch
|| !sch
->length
)
389 if (sch
->type
== priv
->type
) {
390 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
) {
391 nft_reg_store8(dest
, true);
394 if (priv
->offset
+ priv
->len
> ntohs(sch
->length
) ||
395 offset
+ ntohs(sch
->length
) > pkt
->skb
->len
)
398 if (nft_skb_copy_to_reg(pkt
->skb
, offset
+ priv
->offset
,
399 dest
, priv
->len
) < 0)
403 offset
+= SCTP_PAD4(ntohs(sch
->length
));
404 } while (offset
< pkt
->skb
->len
);
406 if (priv
->flags
& NFT_EXTHDR_F_PRESENT
)
407 nft_reg_store8(dest
, false);
409 regs
->verdict
.code
= NFT_BREAK
;
412 static void nft_exthdr_dccp_eval(const struct nft_expr
*expr
,
413 struct nft_regs
*regs
,
414 const struct nft_pktinfo
*pkt
)
416 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
417 unsigned int thoff
, dataoff
, optoff
, optlen
, i
;
418 u32
*dest
= ®s
->data
[priv
->dreg
];
419 const struct dccp_hdr
*dh
;
422 if (pkt
->tprot
!= IPPROTO_DCCP
|| pkt
->fragoff
)
425 thoff
= nft_thoff(pkt
);
427 dh
= skb_header_pointer(pkt
->skb
, thoff
, sizeof(_dh
), &_dh
);
431 dataoff
= dh
->dccph_doff
* sizeof(u32
);
432 optoff
= __dccp_hdr_len(dh
);
433 if (dataoff
<= optoff
)
436 optlen
= dataoff
- optoff
;
438 for (i
= 0; i
< optlen
; ) {
439 /* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in
440 * the length; the remaining options are at least 2B long. In
441 * all cases, the first byte contains the option type. In
442 * multi-byte options, the second byte contains the option
443 * length, which must be at least two: 1 for the type plus 1 for
444 * the length plus 0-253 for any following option data. We
445 * aren't interested in the option data, only the type and the
446 * length, so we don't need to read more than two bytes at a
449 unsigned int buflen
= optlen
- i
;
453 if (buflen
> sizeof(buf
))
454 buflen
= sizeof(buf
);
456 bufp
= skb_header_pointer(pkt
->skb
, thoff
+ optoff
+ i
, buflen
,
463 if (type
== priv
->type
) {
464 nft_reg_store8(dest
, 1);
468 if (type
<= DCCPO_MAX_RESERVED
) {
488 static const struct nla_policy nft_exthdr_policy
[NFTA_EXTHDR_MAX
+ 1] = {
489 [NFTA_EXTHDR_DREG
] = { .type
= NLA_U32
},
490 [NFTA_EXTHDR_TYPE
] = { .type
= NLA_U8
},
491 [NFTA_EXTHDR_OFFSET
] = { .type
= NLA_U32
},
492 [NFTA_EXTHDR_LEN
] = NLA_POLICY_MAX(NLA_BE32
, 255),
493 [NFTA_EXTHDR_FLAGS
] = { .type
= NLA_U32
},
494 [NFTA_EXTHDR_OP
] = NLA_POLICY_MAX(NLA_BE32
, 255),
495 [NFTA_EXTHDR_SREG
] = { .type
= NLA_U32
},
498 static int nft_exthdr_init(const struct nft_ctx
*ctx
,
499 const struct nft_expr
*expr
,
500 const struct nlattr
* const tb
[])
502 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
503 u32 offset
, len
, flags
= 0, op
= NFT_EXTHDR_OP_IPV6
;
506 if (!tb
[NFTA_EXTHDR_DREG
] ||
507 !tb
[NFTA_EXTHDR_TYPE
] ||
508 !tb
[NFTA_EXTHDR_OFFSET
] ||
509 !tb
[NFTA_EXTHDR_LEN
])
512 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_OFFSET
], U8_MAX
, &offset
);
516 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_LEN
], U8_MAX
, &len
);
520 if (tb
[NFTA_EXTHDR_FLAGS
]) {
521 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_FLAGS
], U8_MAX
, &flags
);
525 if (flags
& ~NFT_EXTHDR_F_PRESENT
)
529 if (tb
[NFTA_EXTHDR_OP
]) {
530 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_OP
], U8_MAX
, &op
);
535 priv
->type
= nla_get_u8(tb
[NFTA_EXTHDR_TYPE
]);
536 priv
->offset
= offset
;
541 return nft_parse_register_store(ctx
, tb
[NFTA_EXTHDR_DREG
],
542 &priv
->dreg
, NULL
, NFT_DATA_VALUE
,
546 static int nft_exthdr_tcp_set_init(const struct nft_ctx
*ctx
,
547 const struct nft_expr
*expr
,
548 const struct nlattr
* const tb
[])
550 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
551 u32 offset
, len
, flags
= 0, op
= NFT_EXTHDR_OP_IPV6
;
554 if (!tb
[NFTA_EXTHDR_SREG
] ||
555 !tb
[NFTA_EXTHDR_TYPE
] ||
556 !tb
[NFTA_EXTHDR_OFFSET
] ||
557 !tb
[NFTA_EXTHDR_LEN
])
560 if (tb
[NFTA_EXTHDR_DREG
] || tb
[NFTA_EXTHDR_FLAGS
])
563 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_OFFSET
], U8_MAX
, &offset
);
567 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_LEN
], U8_MAX
, &len
);
581 err
= nft_parse_u32_check(tb
[NFTA_EXTHDR_OP
], U8_MAX
, &op
);
585 priv
->type
= nla_get_u8(tb
[NFTA_EXTHDR_TYPE
]);
586 priv
->offset
= offset
;
591 return nft_parse_register_load(ctx
, tb
[NFTA_EXTHDR_SREG
], &priv
->sreg
,
595 static int nft_exthdr_tcp_strip_init(const struct nft_ctx
*ctx
,
596 const struct nft_expr
*expr
,
597 const struct nlattr
* const tb
[])
599 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
601 if (tb
[NFTA_EXTHDR_SREG
] ||
602 tb
[NFTA_EXTHDR_DREG
] ||
603 tb
[NFTA_EXTHDR_FLAGS
] ||
604 tb
[NFTA_EXTHDR_OFFSET
] ||
608 if (!tb
[NFTA_EXTHDR_TYPE
])
611 priv
->type
= nla_get_u8(tb
[NFTA_EXTHDR_TYPE
]);
612 priv
->op
= NFT_EXTHDR_OP_TCPOPT
;
617 static int nft_exthdr_ipv4_init(const struct nft_ctx
*ctx
,
618 const struct nft_expr
*expr
,
619 const struct nlattr
* const tb
[])
621 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
622 int err
= nft_exthdr_init(ctx
, expr
, tb
);
627 switch (priv
->type
) {
639 static int nft_exthdr_dccp_init(const struct nft_ctx
*ctx
,
640 const struct nft_expr
*expr
,
641 const struct nlattr
* const tb
[])
643 struct nft_exthdr
*priv
= nft_expr_priv(expr
);
644 int err
= nft_exthdr_init(ctx
, expr
, tb
);
649 if (!(priv
->flags
& NFT_EXTHDR_F_PRESENT
))
655 static int nft_exthdr_dump_common(struct sk_buff
*skb
, const struct nft_exthdr
*priv
)
657 if (nla_put_u8(skb
, NFTA_EXTHDR_TYPE
, priv
->type
))
658 goto nla_put_failure
;
659 if (nla_put_be32(skb
, NFTA_EXTHDR_OFFSET
, htonl(priv
->offset
)))
660 goto nla_put_failure
;
661 if (nla_put_be32(skb
, NFTA_EXTHDR_LEN
, htonl(priv
->len
)))
662 goto nla_put_failure
;
663 if (nla_put_be32(skb
, NFTA_EXTHDR_FLAGS
, htonl(priv
->flags
)))
664 goto nla_put_failure
;
665 if (nla_put_be32(skb
, NFTA_EXTHDR_OP
, htonl(priv
->op
)))
666 goto nla_put_failure
;
673 static int nft_exthdr_dump(struct sk_buff
*skb
,
674 const struct nft_expr
*expr
, bool reset
)
676 const struct nft_exthdr
*priv
= nft_expr_priv(expr
);
678 if (nft_dump_register(skb
, NFTA_EXTHDR_DREG
, priv
->dreg
))
681 return nft_exthdr_dump_common(skb
, priv
);
684 static int nft_exthdr_dump_set(struct sk_buff
*skb
,
685 const struct nft_expr
*expr
, bool reset
)
687 const struct nft_exthdr
*priv
= nft_expr_priv(expr
);
689 if (nft_dump_register(skb
, NFTA_EXTHDR_SREG
, priv
->sreg
))
692 return nft_exthdr_dump_common(skb
, priv
);
695 static int nft_exthdr_dump_strip(struct sk_buff
*skb
,
696 const struct nft_expr
*expr
, bool reset
)
698 const struct nft_exthdr
*priv
= nft_expr_priv(expr
);
700 return nft_exthdr_dump_common(skb
, priv
);
703 static bool nft_exthdr_reduce(struct nft_regs_track
*track
,
704 const struct nft_expr
*expr
)
706 const struct nft_exthdr
*priv
= nft_expr_priv(expr
);
707 const struct nft_exthdr
*exthdr
;
709 if (!nft_reg_track_cmp(track
, expr
, priv
->dreg
)) {
710 nft_reg_track_update(track
, expr
, priv
->dreg
, priv
->len
);
714 exthdr
= nft_expr_priv(track
->regs
[priv
->dreg
].selector
);
715 if (priv
->type
!= exthdr
->type
||
716 priv
->op
!= exthdr
->op
||
717 priv
->flags
!= exthdr
->flags
||
718 priv
->offset
!= exthdr
->offset
||
719 priv
->len
!= exthdr
->len
) {
720 nft_reg_track_update(track
, expr
, priv
->dreg
, priv
->len
);
724 if (!track
->regs
[priv
->dreg
].bitwise
)
727 return nft_expr_reduce_bitwise(track
, expr
);
730 static const struct nft_expr_ops nft_exthdr_ipv6_ops
= {
731 .type
= &nft_exthdr_type
,
732 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
733 .eval
= nft_exthdr_ipv6_eval
,
734 .init
= nft_exthdr_init
,
735 .dump
= nft_exthdr_dump
,
736 .reduce
= nft_exthdr_reduce
,
739 static const struct nft_expr_ops nft_exthdr_ipv4_ops
= {
740 .type
= &nft_exthdr_type
,
741 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
742 .eval
= nft_exthdr_ipv4_eval
,
743 .init
= nft_exthdr_ipv4_init
,
744 .dump
= nft_exthdr_dump
,
745 .reduce
= nft_exthdr_reduce
,
748 static const struct nft_expr_ops nft_exthdr_tcp_ops
= {
749 .type
= &nft_exthdr_type
,
750 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
751 .eval
= nft_exthdr_tcp_eval
,
752 .init
= nft_exthdr_init
,
753 .dump
= nft_exthdr_dump
,
754 .reduce
= nft_exthdr_reduce
,
757 static const struct nft_expr_ops nft_exthdr_tcp_set_ops
= {
758 .type
= &nft_exthdr_type
,
759 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
760 .eval
= nft_exthdr_tcp_set_eval
,
761 .init
= nft_exthdr_tcp_set_init
,
762 .dump
= nft_exthdr_dump_set
,
763 .reduce
= NFT_REDUCE_READONLY
,
766 static const struct nft_expr_ops nft_exthdr_tcp_strip_ops
= {
767 .type
= &nft_exthdr_type
,
768 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
769 .eval
= nft_exthdr_tcp_strip_eval
,
770 .init
= nft_exthdr_tcp_strip_init
,
771 .dump
= nft_exthdr_dump_strip
,
772 .reduce
= NFT_REDUCE_READONLY
,
775 static const struct nft_expr_ops nft_exthdr_sctp_ops
= {
776 .type
= &nft_exthdr_type
,
777 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
778 .eval
= nft_exthdr_sctp_eval
,
779 .init
= nft_exthdr_init
,
780 .dump
= nft_exthdr_dump
,
781 .reduce
= nft_exthdr_reduce
,
784 static const struct nft_expr_ops nft_exthdr_dccp_ops
= {
785 .type
= &nft_exthdr_type
,
786 .size
= NFT_EXPR_SIZE(sizeof(struct nft_exthdr
)),
787 .eval
= nft_exthdr_dccp_eval
,
788 .init
= nft_exthdr_dccp_init
,
789 .dump
= nft_exthdr_dump
,
790 .reduce
= nft_exthdr_reduce
,
793 static const struct nft_expr_ops
*
794 nft_exthdr_select_ops(const struct nft_ctx
*ctx
,
795 const struct nlattr
* const tb
[])
799 if (!tb
[NFTA_EXTHDR_OP
])
800 return &nft_exthdr_ipv6_ops
;
802 if (tb
[NFTA_EXTHDR_SREG
] && tb
[NFTA_EXTHDR_DREG
])
803 return ERR_PTR(-EOPNOTSUPP
);
805 op
= ntohl(nla_get_be32(tb
[NFTA_EXTHDR_OP
]));
807 case NFT_EXTHDR_OP_TCPOPT
:
808 if (tb
[NFTA_EXTHDR_SREG
])
809 return &nft_exthdr_tcp_set_ops
;
810 if (tb
[NFTA_EXTHDR_DREG
])
811 return &nft_exthdr_tcp_ops
;
812 return &nft_exthdr_tcp_strip_ops
;
813 case NFT_EXTHDR_OP_IPV6
:
814 if (tb
[NFTA_EXTHDR_DREG
])
815 return &nft_exthdr_ipv6_ops
;
817 case NFT_EXTHDR_OP_IPV4
:
818 if (ctx
->family
!= NFPROTO_IPV6
) {
819 if (tb
[NFTA_EXTHDR_DREG
])
820 return &nft_exthdr_ipv4_ops
;
823 case NFT_EXTHDR_OP_SCTP
:
824 if (tb
[NFTA_EXTHDR_DREG
])
825 return &nft_exthdr_sctp_ops
;
827 case NFT_EXTHDR_OP_DCCP
:
828 if (tb
[NFTA_EXTHDR_DREG
])
829 return &nft_exthdr_dccp_ops
;
833 return ERR_PTR(-EOPNOTSUPP
);
836 struct nft_expr_type nft_exthdr_type __read_mostly
= {
838 .select_ops
= nft_exthdr_select_ops
,
839 .policy
= nft_exthdr_policy
,
840 .maxattr
= NFTA_EXTHDR_MAX
,
841 .owner
= THIS_MODULE
,