1 // SPDX-License-Identifier: GPL-2.0
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
23 #include <net/dst_metadata.h>
24 #include <net/hotdata.h>
26 #include "xfrm_inout.h"
28 struct xfrm_trans_tasklet
{
29 struct work_struct work
;
30 spinlock_t queue_lock
;
31 struct sk_buff_head queue
;
34 struct xfrm_trans_cb
{
36 struct inet_skb_parm h4
;
37 #if IS_ENABLED(CONFIG_IPV6)
38 struct inet6_skb_parm h6
;
41 int (*finish
)(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
);
45 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
47 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock
);
48 static struct xfrm_input_afinfo
const __rcu
*xfrm_input_afinfo
[2][AF_INET6
+ 1];
50 static struct gro_cells gro_cells
;
51 static struct net_device
*xfrm_napi_dev
;
53 static DEFINE_PER_CPU(struct xfrm_trans_tasklet
, xfrm_trans_tasklet
);
55 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo
*afinfo
)
59 if (WARN_ON(afinfo
->family
> AF_INET6
))
62 spin_lock_bh(&xfrm_input_afinfo_lock
);
63 if (unlikely(xfrm_input_afinfo
[afinfo
->is_ipip
][afinfo
->family
]))
66 rcu_assign_pointer(xfrm_input_afinfo
[afinfo
->is_ipip
][afinfo
->family
], afinfo
);
67 spin_unlock_bh(&xfrm_input_afinfo_lock
);
70 EXPORT_SYMBOL(xfrm_input_register_afinfo
);
72 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo
*afinfo
)
76 spin_lock_bh(&xfrm_input_afinfo_lock
);
77 if (likely(xfrm_input_afinfo
[afinfo
->is_ipip
][afinfo
->family
])) {
78 if (unlikely(xfrm_input_afinfo
[afinfo
->is_ipip
][afinfo
->family
] != afinfo
))
81 RCU_INIT_POINTER(xfrm_input_afinfo
[afinfo
->is_ipip
][afinfo
->family
], NULL
);
83 spin_unlock_bh(&xfrm_input_afinfo_lock
);
87 EXPORT_SYMBOL(xfrm_input_unregister_afinfo
);
89 static const struct xfrm_input_afinfo
*xfrm_input_get_afinfo(u8 family
, bool is_ipip
)
91 const struct xfrm_input_afinfo
*afinfo
;
93 if (WARN_ON_ONCE(family
> AF_INET6
))
97 afinfo
= rcu_dereference(xfrm_input_afinfo
[is_ipip
][family
]);
98 if (unlikely(!afinfo
))
103 static int xfrm_rcv_cb(struct sk_buff
*skb
, unsigned int family
, u8 protocol
,
106 bool is_ipip
= (protocol
== IPPROTO_IPIP
|| protocol
== IPPROTO_IPV6
);
107 const struct xfrm_input_afinfo
*afinfo
;
110 afinfo
= xfrm_input_get_afinfo(family
, is_ipip
);
112 return -EAFNOSUPPORT
;
114 ret
= afinfo
->callback(skb
, protocol
, err
);
120 struct sec_path
*secpath_set(struct sk_buff
*skb
)
122 struct sec_path
*sp
, *tmp
= skb_ext_find(skb
, SKB_EXT_SEC_PATH
);
124 sp
= skb_ext_add(skb
, SKB_EXT_SEC_PATH
);
128 if (tmp
) /* reused existing one (was COW'd if needed) */
131 /* allocated new secpath */
132 memset(sp
->ovec
, 0, sizeof(sp
->ovec
));
135 sp
->verified_cnt
= 0;
139 EXPORT_SYMBOL(secpath_set
);
141 /* Fetch spi and seq from ipsec header */
143 int xfrm_parse_spi(struct sk_buff
*skb
, u8 nexthdr
, __be32
*spi
, __be32
*seq
)
145 int offset
, offset_seq
;
150 hlen
= sizeof(struct ip_auth_hdr
);
151 offset
= offsetof(struct ip_auth_hdr
, spi
);
152 offset_seq
= offsetof(struct ip_auth_hdr
, seq_no
);
155 hlen
= sizeof(struct ip_esp_hdr
);
156 offset
= offsetof(struct ip_esp_hdr
, spi
);
157 offset_seq
= offsetof(struct ip_esp_hdr
, seq_no
);
160 if (!pskb_may_pull(skb
, sizeof(struct ip_comp_hdr
)))
162 *spi
= htonl(ntohs(*(__be16
*)(skb_transport_header(skb
) + 2)));
169 if (!pskb_may_pull(skb
, hlen
))
172 *spi
= *(__be32
*)(skb_transport_header(skb
) + offset
);
173 *seq
= *(__be32
*)(skb_transport_header(skb
) + offset_seq
);
176 EXPORT_SYMBOL(xfrm_parse_spi
);
178 static int xfrm4_remove_beet_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
184 skb
->protocol
= htons(ETH_P_IP
);
186 if (unlikely(XFRM_MODE_SKB_CB(skb
)->protocol
== IPPROTO_BEETPH
)) {
187 struct ip_beet_phdr
*ph
;
190 if (!pskb_may_pull(skb
, sizeof(*ph
)))
193 ph
= (struct ip_beet_phdr
*)skb
->data
;
195 phlen
= sizeof(*ph
) + ph
->padlen
;
196 optlen
= ph
->hdrlen
* 8 + (IPV4_BEET_PHMAXLEN
- phlen
);
197 if (optlen
< 0 || optlen
& 3 || optlen
> 250)
200 XFRM_MODE_SKB_CB(skb
)->protocol
= ph
->nexthdr
;
202 if (!pskb_may_pull(skb
, phlen
))
204 __skb_pull(skb
, phlen
);
207 skb_push(skb
, sizeof(*iph
));
208 skb_reset_network_header(skb
);
209 skb_mac_header_rebuild(skb
);
211 xfrm4_beet_make_header(skb
);
215 iph
->ihl
+= optlen
/ 4;
216 iph
->tot_len
= htons(skb
->len
);
217 iph
->daddr
= x
->sel
.daddr
.a4
;
218 iph
->saddr
= x
->sel
.saddr
.a4
;
220 iph
->check
= ip_fast_csum(skb_network_header(skb
), iph
->ihl
);
226 static void ipip_ecn_decapsulate(struct sk_buff
*skb
)
228 struct iphdr
*inner_iph
= ipip_hdr(skb
);
230 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb
)->tos
))
231 IP_ECN_set_ce(inner_iph
);
234 static int xfrm4_remove_tunnel_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
238 skb
->protocol
= htons(ETH_P_IP
);
240 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
243 err
= skb_unclone(skb
, GFP_ATOMIC
);
247 if (x
->props
.flags
& XFRM_STATE_DECAP_DSCP
)
248 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb
)->tos
, ipip_hdr(skb
));
249 if (!(x
->props
.flags
& XFRM_STATE_NOECN
))
250 ipip_ecn_decapsulate(skb
);
252 skb_reset_network_header(skb
);
253 skb_mac_header_rebuild(skb
);
255 eth_hdr(skb
)->h_proto
= skb
->protocol
;
263 static void ipip6_ecn_decapsulate(struct sk_buff
*skb
)
265 struct ipv6hdr
*inner_iph
= ipipv6_hdr(skb
);
267 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb
)->tos
))
268 IP6_ECN_set_ce(skb
, inner_iph
);
271 static int xfrm6_remove_tunnel_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
275 skb
->protocol
= htons(ETH_P_IPV6
);
277 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
280 err
= skb_unclone(skb
, GFP_ATOMIC
);
284 if (x
->props
.flags
& XFRM_STATE_DECAP_DSCP
)
285 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb
)->tos
, ipipv6_hdr(skb
));
286 if (!(x
->props
.flags
& XFRM_STATE_NOECN
))
287 ipip6_ecn_decapsulate(skb
);
289 skb_reset_network_header(skb
);
290 skb_mac_header_rebuild(skb
);
292 eth_hdr(skb
)->h_proto
= skb
->protocol
;
300 static int xfrm6_remove_beet_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
302 struct ipv6hdr
*ip6h
;
303 int size
= sizeof(struct ipv6hdr
);
306 skb
->protocol
= htons(ETH_P_IPV6
);
308 err
= skb_cow_head(skb
, size
+ skb
->mac_len
);
312 __skb_push(skb
, size
);
313 skb_reset_network_header(skb
);
314 skb_mac_header_rebuild(skb
);
316 xfrm6_beet_make_header(skb
);
318 ip6h
= ipv6_hdr(skb
);
319 ip6h
->payload_len
= htons(skb
->len
- size
);
320 ip6h
->daddr
= x
->sel
.daddr
.in6
;
321 ip6h
->saddr
= x
->sel
.saddr
.in6
;
327 /* Remove encapsulation header.
329 * The IP header will be moved over the top of the encapsulation
332 * On entry, the transport header shall point to where the IP header
333 * should be and the network header shall be set to where the IP
334 * header currently is. skb->data shall point to the start of the
338 xfrm_inner_mode_encap_remove(struct xfrm_state
*x
,
341 switch (x
->props
.mode
) {
343 switch (x
->sel
.family
) {
345 return xfrm4_remove_beet_encap(x
, skb
);
347 return xfrm6_remove_beet_encap(x
, skb
);
350 case XFRM_MODE_TUNNEL
:
351 switch (XFRM_MODE_SKB_CB(skb
)->protocol
) {
353 return xfrm4_remove_tunnel_encap(x
, skb
);
355 return xfrm6_remove_tunnel_encap(x
, skb
);
365 static int xfrm_prepare_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
367 switch (x
->props
.family
) {
369 xfrm4_extract_header(skb
);
372 xfrm6_extract_header(skb
);
376 return -EAFNOSUPPORT
;
379 return xfrm_inner_mode_encap_remove(x
, skb
);
382 /* Remove encapsulation header.
384 * The IP header will be moved over the top of the encapsulation header.
386 * On entry, skb_transport_header() shall point to where the IP header
387 * should be and skb_network_header() shall be set to where the IP header
388 * currently is. skb->data shall point to the start of the payload.
390 static int xfrm4_transport_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
392 struct xfrm_offload
*xo
= xfrm_offload(skb
);
393 int ihl
= skb
->data
- skb_transport_header(skb
);
395 if (skb
->transport_header
!= skb
->network_header
) {
396 memmove(skb_transport_header(skb
),
397 skb_network_header(skb
), ihl
);
400 skb_mac_header_was_set(skb
) ? skb_mac_header_len(skb
) : 0;
401 skb
->network_header
= skb
->transport_header
;
403 ip_hdr(skb
)->tot_len
= htons(skb
->len
+ ihl
);
404 skb_reset_transport_header(skb
);
408 static int xfrm6_transport_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
410 #if IS_ENABLED(CONFIG_IPV6)
411 struct xfrm_offload
*xo
= xfrm_offload(skb
);
412 int ihl
= skb
->data
- skb_transport_header(skb
);
414 if (skb
->transport_header
!= skb
->network_header
) {
415 memmove(skb_transport_header(skb
),
416 skb_network_header(skb
), ihl
);
419 skb_mac_header_was_set(skb
) ? skb_mac_header_len(skb
) : 0;
420 skb
->network_header
= skb
->transport_header
;
422 ipv6_hdr(skb
)->payload_len
= htons(skb
->len
+ ihl
-
423 sizeof(struct ipv6hdr
));
424 skb_reset_transport_header(skb
);
428 return -EAFNOSUPPORT
;
432 static int xfrm_inner_mode_input(struct xfrm_state
*x
,
435 switch (x
->props
.mode
) {
437 case XFRM_MODE_TUNNEL
:
438 return xfrm_prepare_input(x
, skb
);
439 case XFRM_MODE_TRANSPORT
:
440 if (x
->props
.family
== AF_INET
)
441 return xfrm4_transport_input(x
, skb
);
442 if (x
->props
.family
== AF_INET6
)
443 return xfrm6_transport_input(x
, skb
);
445 case XFRM_MODE_ROUTEOPTIMIZATION
:
449 if (x
->mode_cbs
&& x
->mode_cbs
->input
)
450 return x
->mode_cbs
->input(x
, skb
);
459 /* NOTE: encap_type - In addition to the normal (non-negative) values for
460 * encap_type, a negative value of -1 or -2 can be used to resume/restart this
461 * function after a previous invocation early terminated for async operation.
463 int xfrm_input(struct sk_buff
*skb
, int nexthdr
, __be32 spi
, int encap_type
)
465 const struct xfrm_state_afinfo
*afinfo
;
466 struct net
*net
= dev_net(skb
->dev
);
470 struct xfrm_state
*x
= NULL
;
471 xfrm_address_t
*daddr
;
472 u32 mark
= skb
->mark
;
473 unsigned int family
= AF_UNSPEC
;
476 bool xfrm_gro
= false;
477 bool crypto_done
= false;
478 struct xfrm_offload
*xo
= xfrm_offload(skb
);
481 if (encap_type
< 0 || (xo
&& (xo
->flags
& XFRM_GRO
|| encap_type
== 0 ||
482 encap_type
== UDP_ENCAP_ESPINUDP
))) {
483 x
= xfrm_input_state(skb
);
485 if (unlikely(x
->km
.state
!= XFRM_STATE_VALID
)) {
486 if (x
->km
.state
== XFRM_STATE_ACQ
)
487 XFRM_INC_STATS(net
, LINUX_MIB_XFRMACQUIREERROR
);
490 LINUX_MIB_XFRMINSTATEINVALID
);
492 if (encap_type
== -1)
497 family
= x
->props
.family
;
499 /* An encap_type of -2 indicates reconstructed inner packet */
500 if (encap_type
== -2)
501 goto resume_decapped
;
503 /* An encap_type of -1 indicates async resumption. */
504 if (encap_type
== -1) {
506 seq
= XFRM_SKB_CB(skb
)->seq
.input
.low
;
510 seq
= XFRM_SPI_SKB_CB(skb
)->seq
;
512 if (xo
&& (xo
->flags
& CRYPTO_DONE
)) {
514 family
= XFRM_SPI_SKB_CB(skb
)->family
;
516 if (!(xo
->status
& CRYPTO_SUCCESS
)) {
518 (CRYPTO_TRANSPORT_AH_AUTH_FAILED
|
519 CRYPTO_TRANSPORT_ESP_AUTH_FAILED
|
520 CRYPTO_TUNNEL_AH_AUTH_FAILED
|
521 CRYPTO_TUNNEL_ESP_AUTH_FAILED
)) {
523 xfrm_audit_state_icvfail(x
, skb
,
525 x
->stats
.integrity_failed
++;
526 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEPROTOERROR
);
530 if (xo
->status
& CRYPTO_INVALID_PROTOCOL
) {
531 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEPROTOERROR
);
535 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
539 if (xfrm_parse_spi(skb
, nexthdr
, &spi
, &seq
)) {
540 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
548 family
= XFRM_SPI_SKB_CB(skb
)->family
;
550 /* if tunnel is present override skb->mark value with tunnel i_key */
553 if (XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip4
)
554 mark
= be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip4
->parms
.i_key
);
557 if (XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip6
)
558 mark
= be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip6
->parms
.i_key
);
562 sp
= secpath_set(skb
);
564 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINERROR
);
569 if (!spi
&& xfrm_parse_spi(skb
, nexthdr
, &spi
, &seq
)) {
571 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
575 daddr
= (xfrm_address_t
*)(skb_network_header(skb
) +
576 XFRM_SPI_SKB_CB(skb
)->daddroff
);
578 sp
= skb_sec_path(skb
);
580 if (sp
->len
== XFRM_MAX_DEPTH
) {
582 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
586 x
= xfrm_input_state_lookup(net
, mark
, daddr
, spi
, nexthdr
, family
);
589 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOSTATES
);
590 xfrm_audit_state_notfound(skb
, family
, spi
, seq
);
594 if (unlikely(x
->dir
&& x
->dir
!= XFRM_SA_DIR_IN
)) {
596 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEDIRERROR
);
597 xfrm_audit_state_notfound(skb
, family
, spi
, seq
);
603 skb
->mark
= xfrm_smark_get(skb
->mark
, x
);
605 sp
->xvec
[sp
->len
++] = x
;
609 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINERROR
);
616 if (unlikely(x
->km
.state
!= XFRM_STATE_VALID
)) {
617 if (x
->km
.state
== XFRM_STATE_ACQ
)
618 XFRM_INC_STATS(net
, LINUX_MIB_XFRMACQUIREERROR
);
621 LINUX_MIB_XFRMINSTATEINVALID
);
625 if ((x
->encap
? x
->encap
->encap_type
: 0) != encap_type
) {
626 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
630 if (xfrm_replay_check(x
, skb
, seq
)) {
631 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATESEQERROR
);
635 if (xfrm_state_check_expire(x
)) {
636 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEEXPIRED
);
640 spin_unlock(&x
->lock
);
642 if (xfrm_tunnel_check(skb
, x
, family
)) {
643 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMODEERROR
);
647 seq_hi
= htonl(xfrm_replay_seqhi(x
, seq
));
649 XFRM_SKB_CB(skb
)->seq
.input
.low
= seq
;
650 XFRM_SKB_CB(skb
)->seq
.input
.hi
= seq_hi
;
655 nexthdr
= x
->type_offload
->input_tail(x
, skb
);
657 nexthdr
= x
->type
->input(x
, skb
);
659 if (nexthdr
== -EINPROGRESS
)
666 if (nexthdr
== -EBADMSG
) {
667 xfrm_audit_state_icvfail(x
, skb
,
669 x
->stats
.integrity_failed
++;
671 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEPROTOERROR
);
675 /* only the first xfrm gets the encap type */
678 if (xfrm_replay_recheck(x
, skb
, seq
)) {
679 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATESEQERROR
);
683 xfrm_replay_advance(x
, seq
);
685 x
->curlft
.bytes
+= skb
->len
;
687 x
->lastused
= ktime_get_real_seconds();
689 spin_unlock(&x
->lock
);
691 XFRM_MODE_SKB_CB(skb
)->protocol
= nexthdr
;
693 err
= xfrm_inner_mode_input(x
, skb
);
694 if (err
== -EINPROGRESS
)
697 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMODEERROR
);
701 if (x
->outer_mode
.flags
& XFRM_MODE_FLAG_TUNNEL
) {
707 * We need the inner address. However, we only get here for
708 * transport mode so the outer address is identical.
710 daddr
= &x
->id
.daddr
;
711 family
= x
->props
.family
;
713 err
= xfrm_parse_spi(skb
, nexthdr
, &spi
, &seq
);
715 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
721 err
= xfrm_rcv_cb(skb
, family
, x
->type
->proto
, 0);
728 sp
= skb_sec_path(skb
);
731 if (skb_valid_dst(skb
))
733 gro_cells_receive(&gro_cells
, skb
);
736 xo
= xfrm_offload(skb
);
738 xfrm_gro
= xo
->flags
& XFRM_GRO
;
742 afinfo
= xfrm_state_afinfo_get_rcu(x
->props
.family
);
744 err
= afinfo
->transport_finish(skb
, xfrm_gro
|| async
);
747 sp
= skb_sec_path(skb
);
750 if (skb_valid_dst(skb
))
752 gro_cells_receive(&gro_cells
, skb
);
760 spin_unlock(&x
->lock
);
762 xfrm_rcv_cb(skb
, family
, x
&& x
->type
? x
->type
->proto
: nexthdr
, -1);
766 EXPORT_SYMBOL(xfrm_input
);
768 int xfrm_input_resume(struct sk_buff
*skb
, int nexthdr
)
770 return xfrm_input(skb
, nexthdr
, 0, -1);
772 EXPORT_SYMBOL(xfrm_input_resume
);
774 static void xfrm_trans_reinject(struct work_struct
*work
)
776 struct xfrm_trans_tasklet
*trans
= container_of(work
, struct xfrm_trans_tasklet
, work
);
777 struct sk_buff_head queue
;
780 __skb_queue_head_init(&queue
);
781 spin_lock_bh(&trans
->queue_lock
);
782 skb_queue_splice_init(&trans
->queue
, &queue
);
783 spin_unlock_bh(&trans
->queue_lock
);
786 while ((skb
= __skb_dequeue(&queue
)))
787 XFRM_TRANS_SKB_CB(skb
)->finish(XFRM_TRANS_SKB_CB(skb
)->net
,
792 int xfrm_trans_queue_net(struct net
*net
, struct sk_buff
*skb
,
793 int (*finish
)(struct net
*, struct sock
*,
796 struct xfrm_trans_tasklet
*trans
;
798 trans
= this_cpu_ptr(&xfrm_trans_tasklet
);
800 if (skb_queue_len(&trans
->queue
) >= READ_ONCE(net_hotdata
.max_backlog
))
803 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb
) > sizeof(skb
->cb
));
805 XFRM_TRANS_SKB_CB(skb
)->finish
= finish
;
806 XFRM_TRANS_SKB_CB(skb
)->net
= net
;
807 spin_lock_bh(&trans
->queue_lock
);
808 __skb_queue_tail(&trans
->queue
, skb
);
809 spin_unlock_bh(&trans
->queue_lock
);
810 schedule_work(&trans
->work
);
813 EXPORT_SYMBOL(xfrm_trans_queue_net
);
815 int xfrm_trans_queue(struct sk_buff
*skb
,
816 int (*finish
)(struct net
*, struct sock
*,
819 return xfrm_trans_queue_net(dev_net(skb
->dev
), skb
, finish
);
821 EXPORT_SYMBOL(xfrm_trans_queue
);
823 void __init
xfrm_input_init(void)
828 xfrm_napi_dev
= alloc_netdev_dummy(0);
830 panic("Failed to allocate XFRM dummy netdev\n");
832 err
= gro_cells_init(&gro_cells
, xfrm_napi_dev
);
834 gro_cells
.cells
= NULL
;
836 for_each_possible_cpu(i
) {
837 struct xfrm_trans_tasklet
*trans
;
839 trans
= &per_cpu(xfrm_trans_tasklet
, i
);
840 spin_lock_init(&trans
->queue_lock
);
841 __skb_queue_head_init(&trans
->queue
);
842 INIT_WORK(&trans
->work
, xfrm_trans_reinject
);