1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
6 #include <crypto/aead.h>
8 #define IXGBE_IPSEC_KEY_BITS 160
9 static const char aes_gcm_name
[] = "rfc4106(gcm(aes))";
11 static void ixgbe_ipsec_del_sa(struct xfrm_state
*xs
);
14 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
15 * @hw: hw specific details
16 * @idx: register index to write
17 * @key: key byte array
20 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw
*hw
, u16 idx
,
26 for (i
= 0; i
< 4; i
++)
27 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXKEY(i
),
28 (__force u32
)cpu_to_be32(key
[3 - i
]));
29 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXSALT
, (__force u32
)cpu_to_be32(salt
));
30 IXGBE_WRITE_FLUSH(hw
);
32 reg
= IXGBE_READ_REG(hw
, IXGBE_IPSTXIDX
);
33 reg
&= IXGBE_RXTXIDX_IPS_EN
;
34 reg
|= idx
<< IXGBE_RXTXIDX_IDX_SHIFT
| IXGBE_RXTXIDX_WRITE
;
35 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, reg
);
36 IXGBE_WRITE_FLUSH(hw
);
40 * ixgbe_ipsec_set_rx_item - set an Rx table item
41 * @hw: hw specific details
42 * @idx: register index to write
43 * @tbl: table selector
45 * Trigger the device to store into a particular Rx table the
46 * data that has already been loaded into the input register
48 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw
*hw
, u16 idx
,
49 enum ixgbe_ipsec_tbl_sel tbl
)
53 reg
= IXGBE_READ_REG(hw
, IXGBE_IPSRXIDX
);
54 reg
&= IXGBE_RXTXIDX_IPS_EN
;
55 reg
|= tbl
<< IXGBE_RXIDX_TBL_SHIFT
|
56 idx
<< IXGBE_RXTXIDX_IDX_SHIFT
|
58 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, reg
);
59 IXGBE_WRITE_FLUSH(hw
);
63 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
64 * @hw: hw specific details
65 * @idx: register index to write
66 * @spi: security parameter index
67 * @key: key byte array
69 * @mode: rx decrypt control bits
70 * @ip_idx: index into IP table for related IP address
72 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw
*hw
, u16 idx
, __be32 spi
,
73 u32 key
[], u32 salt
, u32 mode
, u32 ip_idx
)
77 /* store the SPI (in bigendian) and IPidx */
78 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXSPI
,
79 (__force u32
)cpu_to_le32((__force u32
)spi
));
80 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIPIDX
, ip_idx
);
81 IXGBE_WRITE_FLUSH(hw
);
83 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_spi_tbl
);
85 /* store the key, salt, and mode */
86 for (i
= 0; i
< 4; i
++)
87 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXKEY(i
),
88 (__force u32
)cpu_to_be32(key
[3 - i
]));
89 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXSALT
, (__force u32
)cpu_to_be32(salt
));
90 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXMOD
, mode
);
91 IXGBE_WRITE_FLUSH(hw
);
93 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_key_tbl
);
97 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
98 * @hw: hw specific details
99 * @idx: register index to write
100 * @addr: IP address byte array
102 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw
*hw
, u16 idx
, __be32 addr
[])
106 /* store the ip address */
107 for (i
= 0; i
< 4; i
++)
108 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIPADDR(i
),
109 (__force u32
)cpu_to_le32((__force u32
)addr
[i
]));
110 IXGBE_WRITE_FLUSH(hw
);
112 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_ip_tbl
);
116 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
117 * @adapter: board private structure
119 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter
*adapter
)
121 struct ixgbe_hw
*hw
= &adapter
->hw
;
122 u32 buf
[4] = {0, 0, 0, 0};
125 /* disable Rx and Tx SA lookup */
126 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, 0);
127 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, 0);
129 /* scrub the tables - split the loops for the max of the IP table */
130 for (idx
= 0; idx
< IXGBE_IPSEC_MAX_RX_IP_COUNT
; idx
++) {
131 ixgbe_ipsec_set_tx_sa(hw
, idx
, buf
, 0);
132 ixgbe_ipsec_set_rx_sa(hw
, idx
, 0, buf
, 0, 0, 0);
133 ixgbe_ipsec_set_rx_ip(hw
, idx
, (__be32
*)buf
);
135 for (; idx
< IXGBE_IPSEC_MAX_SA_COUNT
; idx
++) {
136 ixgbe_ipsec_set_tx_sa(hw
, idx
, buf
, 0);
137 ixgbe_ipsec_set_rx_sa(hw
, idx
, 0, buf
, 0, 0, 0);
142 * ixgbe_ipsec_stop_data
143 * @adapter: board private structure
145 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter
*adapter
)
147 struct ixgbe_hw
*hw
= &adapter
->hw
;
148 bool link
= adapter
->link_up
;
153 /* halt data paths */
154 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
155 reg
|= IXGBE_SECTXCTRL_TX_DIS
;
156 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, reg
);
158 reg
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
159 reg
|= IXGBE_SECRXCTRL_RX_DIS
;
160 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, reg
);
162 /* If both Tx and Rx are ready there are no packets
163 * that we need to flush so the loopback configuration
164 * below is not necessary.
166 t_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
) &
167 IXGBE_SECTXSTAT_SECTX_RDY
;
168 r_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
) &
169 IXGBE_SECRXSTAT_SECRX_RDY
;
173 /* If the tx fifo doesn't have link, but still has data,
174 * we can't clear the tx sec block. Set the MAC loopback
178 reg
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
179 reg
|= IXGBE_MACC_FLU
;
180 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg
);
182 reg
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
183 reg
|= IXGBE_HLREG0_LPBK
;
184 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg
);
186 IXGBE_WRITE_FLUSH(hw
);
190 /* wait for the paths to empty */
194 t_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
) &
195 IXGBE_SECTXSTAT_SECTX_RDY
;
196 r_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
) &
197 IXGBE_SECRXSTAT_SECRX_RDY
;
198 } while (!(t_rdy
&& r_rdy
) && limit
--);
200 /* undo loopback if we played with it earlier */
202 reg
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
203 reg
&= ~IXGBE_MACC_FLU
;
204 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg
);
206 reg
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
207 reg
&= ~IXGBE_HLREG0_LPBK
;
208 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg
);
210 IXGBE_WRITE_FLUSH(hw
);
215 * ixgbe_ipsec_stop_engine
216 * @adapter: board private structure
218 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter
*adapter
)
220 struct ixgbe_hw
*hw
= &adapter
->hw
;
223 ixgbe_ipsec_stop_data(adapter
);
225 /* disable Rx and Tx SA lookup */
226 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, 0);
227 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, 0);
229 /* disable the Rx and Tx engines and full packet store-n-forward */
230 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
231 reg
|= IXGBE_SECTXCTRL_SECTX_DIS
;
232 reg
&= ~IXGBE_SECTXCTRL_STORE_FORWARD
;
233 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, reg
);
235 reg
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
236 reg
|= IXGBE_SECRXCTRL_SECRX_DIS
;
237 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, reg
);
239 /* restore the "tx security buffer almost full threshold" to 0x250 */
240 IXGBE_WRITE_REG(hw
, IXGBE_SECTXBUFFAF
, 0x250);
242 /* Set minimum IFG between packets back to the default 0x1 */
243 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
244 reg
= (reg
& 0xfffffff0) | 0x1;
245 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, reg
);
247 /* final set for normal (no ipsec offload) processing */
248 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, IXGBE_SECTXCTRL_SECTX_DIS
);
249 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, IXGBE_SECRXCTRL_SECRX_DIS
);
251 IXGBE_WRITE_FLUSH(hw
);
255 * ixgbe_ipsec_start_engine
256 * @adapter: board private structure
258 * NOTE: this increases power consumption whether being used or not
260 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter
*adapter
)
262 struct ixgbe_hw
*hw
= &adapter
->hw
;
265 ixgbe_ipsec_stop_data(adapter
);
267 /* Set minimum IFG between packets to 3 */
268 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
269 reg
= (reg
& 0xfffffff0) | 0x3;
270 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, reg
);
272 /* Set "tx security buffer almost full threshold" to 0x15 so that the
273 * almost full indication is generated only after buffer contains at
274 * least an entire jumbo packet.
276 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXBUFFAF
);
277 reg
= (reg
& 0xfffffc00) | 0x15;
278 IXGBE_WRITE_REG(hw
, IXGBE_SECTXBUFFAF
, reg
);
280 /* restart the data paths by clearing the DISABLE bits */
281 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, 0);
282 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, IXGBE_SECTXCTRL_STORE_FORWARD
);
284 /* enable Rx and Tx SA lookup */
285 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, IXGBE_RXTXIDX_IPS_EN
);
286 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, IXGBE_RXTXIDX_IPS_EN
);
288 IXGBE_WRITE_FLUSH(hw
);
292 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
293 * @adapter: board private structure
295 * Reload the HW tables from the SW tables after they've been bashed
298 * Any VF entries are removed from the SW and HW tables since either
299 * (a) the VF also gets reset on PF reset and will ask again for the
300 * offloads, or (b) the VF has been removed by a change in the num_vfs.
302 void ixgbe_ipsec_restore(struct ixgbe_adapter
*adapter
)
304 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
305 struct ixgbe_hw
*hw
= &adapter
->hw
;
308 if (!(adapter
->flags2
& IXGBE_FLAG2_IPSEC_ENABLED
))
311 /* clean up and restart the engine */
312 ixgbe_ipsec_stop_engine(adapter
);
313 ixgbe_ipsec_clear_hw_tables(adapter
);
314 ixgbe_ipsec_start_engine(adapter
);
316 /* reload the Rx and Tx keys */
317 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
318 struct rx_sa
*r
= &ipsec
->rx_tbl
[i
];
319 struct tx_sa
*t
= &ipsec
->tx_tbl
[i
];
322 if (r
->mode
& IXGBE_RXTXMOD_VF
)
323 ixgbe_ipsec_del_sa(r
->xs
);
325 ixgbe_ipsec_set_rx_sa(hw
, i
, r
->xs
->id
.spi
,
327 r
->mode
, r
->iptbl_ind
);
331 if (t
->mode
& IXGBE_RXTXMOD_VF
)
332 ixgbe_ipsec_del_sa(t
->xs
);
334 ixgbe_ipsec_set_tx_sa(hw
, i
, t
->key
, t
->salt
);
338 /* reload the IP addrs */
339 for (i
= 0; i
< IXGBE_IPSEC_MAX_RX_IP_COUNT
; i
++) {
340 struct rx_ip_sa
*ipsa
= &ipsec
->ip_tbl
[i
];
343 ixgbe_ipsec_set_rx_ip(hw
, i
, ipsa
->ipaddr
);
348 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
349 * @ipsec: pointer to ipsec struct
350 * @rxtable: true if we need to look in the Rx table
352 * Returns the first unused index in either the Rx or Tx SA table
354 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec
*ipsec
, bool rxtable
)
359 if (ipsec
->num_rx_sa
== IXGBE_IPSEC_MAX_SA_COUNT
)
362 /* search rx sa table */
363 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
364 if (!ipsec
->rx_tbl
[i
].used
)
368 if (ipsec
->num_tx_sa
== IXGBE_IPSEC_MAX_SA_COUNT
)
371 /* search tx sa table */
372 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
373 if (!ipsec
->tx_tbl
[i
].used
)
382 * ixgbe_ipsec_find_rx_state - find the state that matches
383 * @ipsec: pointer to ipsec struct
384 * @daddr: inbound address to match
385 * @proto: protocol to match
387 * @ip4: true if using an ipv4 address
389 * Returns a pointer to the matching SA state information
391 static struct xfrm_state
*ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec
*ipsec
,
392 __be32
*daddr
, u8 proto
,
393 __be32 spi
, bool ip4
)
396 struct xfrm_state
*ret
= NULL
;
399 hash_for_each_possible_rcu(ipsec
->rx_sa_list
, rsa
, hlist
,
401 if (rsa
->mode
& IXGBE_RXTXMOD_VF
)
403 if (spi
== rsa
->xs
->id
.spi
&&
404 ((ip4
&& *daddr
== rsa
->xs
->id
.daddr
.a4
) ||
405 (!ip4
&& !memcmp(daddr
, &rsa
->xs
->id
.daddr
.a6
,
406 sizeof(rsa
->xs
->id
.daddr
.a6
)))) &&
407 proto
== rsa
->xs
->id
.proto
) {
409 xfrm_state_hold(ret
);
418 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
419 * @xs: pointer to xfrm_state struct
420 * @mykey: pointer to key array to populate
421 * @mysalt: pointer to salt value to populate
423 * This copies the protocol keys and salt to our own data tables. The
424 * 82599 family only supports the one algorithm.
426 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state
*xs
,
427 u32
*mykey
, u32
*mysalt
)
429 struct net_device
*dev
= xs
->xso
.dev
;
430 unsigned char *key_data
;
431 char *alg_name
= NULL
;
435 netdev_err(dev
, "Unsupported IPsec algorithm\n");
439 if (xs
->aead
->alg_icv_len
!= IXGBE_IPSEC_AUTH_BITS
) {
440 netdev_err(dev
, "IPsec offload requires %d bit authentication\n",
441 IXGBE_IPSEC_AUTH_BITS
);
445 key_data
= &xs
->aead
->alg_key
[0];
446 key_len
= xs
->aead
->alg_key_len
;
447 alg_name
= xs
->aead
->alg_name
;
449 if (strcmp(alg_name
, aes_gcm_name
)) {
450 netdev_err(dev
, "Unsupported IPsec algorithm - please use %s\n",
455 /* The key bytes come down in a bigendian array of bytes, so
456 * we don't need to do any byteswapping.
457 * 160 accounts for 16 byte key and 4 byte salt
459 if (key_len
== IXGBE_IPSEC_KEY_BITS
) {
460 *mysalt
= ((u32
*)key_data
)[4];
461 } else if (key_len
!= (IXGBE_IPSEC_KEY_BITS
- (sizeof(*mysalt
) * 8))) {
462 netdev_err(dev
, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
465 netdev_info(dev
, "IPsec hw offload parameters missing 32 bit salt value\n");
468 memcpy(mykey
, key_data
, 16);
474 * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
475 * @xs: pointer to transformer state struct
477 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state
*xs
)
479 struct net_device
*dev
= xs
->xso
.dev
;
480 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
481 struct ixgbe_hw
*hw
= &adapter
->hw
;
482 u32 mfval
, manc
, reg
;
488 #define MANC_EN_IPV4_FILTER BIT(24)
489 #define MFVAL_IPV4_FILTER_SHIFT 16
490 #define MFVAL_IPV6_FILTER_SHIFT 24
491 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
493 #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
494 #define IXGBE_BMCIPVAL 0x5060
497 #define BMCIP_MASK 0x3
499 manc
= IXGBE_READ_REG(hw
, IXGBE_MANC
);
500 manc_ipv4
= !!(manc
& MANC_EN_IPV4_FILTER
);
501 mfval
= IXGBE_READ_REG(hw
, IXGBE_MFVAL
);
502 bmcipval
= IXGBE_READ_REG(hw
, IXGBE_BMCIPVAL
);
504 if (xs
->props
.family
== AF_INET
) {
505 /* are there any IPv4 filters to check? */
507 /* the 4 ipv4 filters are all in MIPAF(3, i) */
508 for (i
= 0; i
< num_filters
; i
++) {
509 if (!(mfval
& BIT(MFVAL_IPV4_FILTER_SHIFT
+ i
)))
512 reg
= IXGBE_READ_REG(hw
, MIPAF_ARR(3, i
));
513 if (reg
== xs
->id
.daddr
.a4
)
518 if ((bmcipval
& BMCIP_MASK
) == BMCIP_V4
) {
519 reg
= IXGBE_READ_REG(hw
, IXGBE_BMCIP(3));
520 if (reg
== xs
->id
.daddr
.a4
)
525 /* if there are ipv4 filters, they are in the last ipv6 slot */
529 for (i
= 0; i
< num_filters
; i
++) {
530 if (!(mfval
& BIT(MFVAL_IPV6_FILTER_SHIFT
+ i
)))
533 for (j
= 0; j
< 4; j
++) {
534 reg
= IXGBE_READ_REG(hw
, MIPAF_ARR(i
, j
));
535 if (reg
!= xs
->id
.daddr
.a6
[j
])
538 if (j
== 4) /* did we match all 4 words? */
542 if ((bmcipval
& BMCIP_MASK
) == BMCIP_V6
) {
543 for (j
= 0; j
< 4; j
++) {
544 reg
= IXGBE_READ_REG(hw
, IXGBE_BMCIP(j
));
545 if (reg
!= xs
->id
.daddr
.a6
[j
])
548 if (j
== 4) /* did we match all 4 words? */
557 * ixgbe_ipsec_add_sa - program device with a security association
558 * @xs: pointer to transformer state struct
560 static int ixgbe_ipsec_add_sa(struct xfrm_state
*xs
)
562 struct net_device
*dev
= xs
->xso
.dev
;
563 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
564 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
565 struct ixgbe_hw
*hw
= &adapter
->hw
;
566 int checked
, match
, first
;
571 if (xs
->id
.proto
!= IPPROTO_ESP
&& xs
->id
.proto
!= IPPROTO_AH
) {
572 netdev_err(dev
, "Unsupported protocol 0x%04x for ipsec offload\n",
577 if (ixgbe_ipsec_check_mgmt_ip(xs
)) {
578 netdev_err(dev
, "IPsec IP addr clash with mgmt filters\n");
582 if (xs
->xso
.flags
& XFRM_OFFLOAD_INBOUND
) {
586 netdev_err(dev
, "Compression offload not supported\n");
590 /* find the first unused index */
591 ret
= ixgbe_ipsec_find_empty_idx(ipsec
, true);
593 netdev_err(dev
, "No space for SA in Rx table!\n");
598 memset(&rsa
, 0, sizeof(rsa
));
602 if (rsa
.xs
->id
.proto
& IPPROTO_ESP
)
603 rsa
.decrypt
= xs
->ealg
|| xs
->aead
;
605 /* get the key and salt */
606 ret
= ixgbe_ipsec_parse_proto_keys(xs
, rsa
.key
, &rsa
.salt
);
608 netdev_err(dev
, "Failed to get key data for Rx SA table\n");
612 /* get ip for rx sa table */
613 if (xs
->props
.family
== AF_INET6
)
614 memcpy(rsa
.ipaddr
, &xs
->id
.daddr
.a6
, 16);
616 memcpy(&rsa
.ipaddr
[3], &xs
->id
.daddr
.a4
, 4);
618 /* The HW does not have a 1:1 mapping from keys to IP addrs, so
619 * check for a matching IP addr entry in the table. If the addr
620 * already exists, use it; else find an unused slot and add the
621 * addr. If one does not exist and there are no unused table
622 * entries, fail the request.
625 /* Find an existing match or first not used, and stop looking
626 * after we've checked all we know we have.
632 i
< IXGBE_IPSEC_MAX_RX_IP_COUNT
&&
633 (checked
< ipsec
->num_rx_sa
|| first
< 0);
635 if (ipsec
->ip_tbl
[i
].used
) {
636 if (!memcmp(ipsec
->ip_tbl
[i
].ipaddr
,
637 rsa
.ipaddr
, sizeof(rsa
.ipaddr
))) {
642 } else if (first
< 0) {
643 first
= i
; /* track the first empty seen */
647 if (ipsec
->num_rx_sa
== 0)
651 /* addrs are the same, we should use this one */
652 rsa
.iptbl_ind
= match
;
653 ipsec
->ip_tbl
[match
].ref_cnt
++;
655 } else if (first
>= 0) {
656 /* no matches, but here's an empty slot */
657 rsa
.iptbl_ind
= first
;
659 memcpy(ipsec
->ip_tbl
[first
].ipaddr
,
660 rsa
.ipaddr
, sizeof(rsa
.ipaddr
));
661 ipsec
->ip_tbl
[first
].ref_cnt
= 1;
662 ipsec
->ip_tbl
[first
].used
= true;
664 ixgbe_ipsec_set_rx_ip(hw
, rsa
.iptbl_ind
, rsa
.ipaddr
);
667 /* no match and no empty slot */
668 netdev_err(dev
, "No space for SA in Rx IP SA table\n");
669 memset(&rsa
, 0, sizeof(rsa
));
673 rsa
.mode
= IXGBE_RXMOD_VALID
;
674 if (rsa
.xs
->id
.proto
& IPPROTO_ESP
)
675 rsa
.mode
|= IXGBE_RXMOD_PROTO_ESP
;
677 rsa
.mode
|= IXGBE_RXMOD_DECRYPT
;
678 if (rsa
.xs
->props
.family
== AF_INET6
)
679 rsa
.mode
|= IXGBE_RXMOD_IPV6
;
681 /* the preparations worked, so save the info */
682 memcpy(&ipsec
->rx_tbl
[sa_idx
], &rsa
, sizeof(rsa
));
684 ixgbe_ipsec_set_rx_sa(hw
, sa_idx
, rsa
.xs
->id
.spi
, rsa
.key
,
685 rsa
.salt
, rsa
.mode
, rsa
.iptbl_ind
);
686 xs
->xso
.offload_handle
= sa_idx
+ IXGBE_IPSEC_BASE_RX_INDEX
;
690 /* hash the new entry for faster search in Rx path */
691 hash_add_rcu(ipsec
->rx_sa_list
, &ipsec
->rx_tbl
[sa_idx
].hlist
,
692 (__force u32
)rsa
.xs
->id
.spi
);
696 if (adapter
->num_vfs
)
699 /* find the first unused index */
700 ret
= ixgbe_ipsec_find_empty_idx(ipsec
, false);
702 netdev_err(dev
, "No space for SA in Tx table\n");
707 memset(&tsa
, 0, sizeof(tsa
));
711 if (xs
->id
.proto
& IPPROTO_ESP
)
712 tsa
.encrypt
= xs
->ealg
|| xs
->aead
;
714 ret
= ixgbe_ipsec_parse_proto_keys(xs
, tsa
.key
, &tsa
.salt
);
716 netdev_err(dev
, "Failed to get key data for Tx SA table\n");
717 memset(&tsa
, 0, sizeof(tsa
));
721 /* the preparations worked, so save the info */
722 memcpy(&ipsec
->tx_tbl
[sa_idx
], &tsa
, sizeof(tsa
));
724 ixgbe_ipsec_set_tx_sa(hw
, sa_idx
, tsa
.key
, tsa
.salt
);
726 xs
->xso
.offload_handle
= sa_idx
+ IXGBE_IPSEC_BASE_TX_INDEX
;
731 /* enable the engine if not already warmed up */
732 if (!(adapter
->flags2
& IXGBE_FLAG2_IPSEC_ENABLED
)) {
733 ixgbe_ipsec_start_engine(adapter
);
734 adapter
->flags2
|= IXGBE_FLAG2_IPSEC_ENABLED
;
741 * ixgbe_ipsec_del_sa - clear out this specific SA
742 * @xs: pointer to transformer state struct
744 static void ixgbe_ipsec_del_sa(struct xfrm_state
*xs
)
746 struct net_device
*dev
= xs
->xso
.dev
;
747 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
748 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
749 struct ixgbe_hw
*hw
= &adapter
->hw
;
750 u32 zerobuf
[4] = {0, 0, 0, 0};
753 if (xs
->xso
.flags
& XFRM_OFFLOAD_INBOUND
) {
757 sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_RX_INDEX
;
758 rsa
= &ipsec
->rx_tbl
[sa_idx
];
761 netdev_err(dev
, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
762 sa_idx
, xs
->xso
.offload_handle
);
766 ixgbe_ipsec_set_rx_sa(hw
, sa_idx
, 0, zerobuf
, 0, 0, 0);
767 hash_del_rcu(&rsa
->hlist
);
769 /* if the IP table entry is referenced by only this SA,
770 * i.e. ref_cnt is only 1, clear the IP table entry as well
772 ipi
= rsa
->iptbl_ind
;
773 if (ipsec
->ip_tbl
[ipi
].ref_cnt
> 0) {
774 ipsec
->ip_tbl
[ipi
].ref_cnt
--;
776 if (!ipsec
->ip_tbl
[ipi
].ref_cnt
) {
777 memset(&ipsec
->ip_tbl
[ipi
], 0,
778 sizeof(struct rx_ip_sa
));
779 ixgbe_ipsec_set_rx_ip(hw
, ipi
,
780 (__force __be32
*)zerobuf
);
784 memset(rsa
, 0, sizeof(struct rx_sa
));
787 sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_TX_INDEX
;
789 if (!ipsec
->tx_tbl
[sa_idx
].used
) {
790 netdev_err(dev
, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
791 sa_idx
, xs
->xso
.offload_handle
);
795 ixgbe_ipsec_set_tx_sa(hw
, sa_idx
, zerobuf
, 0);
796 memset(&ipsec
->tx_tbl
[sa_idx
], 0, sizeof(struct tx_sa
));
800 /* if there are no SAs left, stop the engine to save energy */
801 if (ipsec
->num_rx_sa
== 0 && ipsec
->num_tx_sa
== 0) {
802 adapter
->flags2
&= ~IXGBE_FLAG2_IPSEC_ENABLED
;
803 ixgbe_ipsec_stop_engine(adapter
);
808 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
809 * @skb: current data packet
810 * @xs: pointer to transformer state struct
812 static bool ixgbe_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*xs
)
814 if (xs
->props
.family
== AF_INET
) {
815 /* Offload with IPv4 options is not supported yet */
816 if (ip_hdr(skb
)->ihl
!= 5)
819 /* Offload with IPv6 extension headers is not support yet */
820 if (ipv6_ext_hdr(ipv6_hdr(skb
)->nexthdr
))
827 static const struct xfrmdev_ops ixgbe_xfrmdev_ops
= {
828 .xdo_dev_state_add
= ixgbe_ipsec_add_sa
,
829 .xdo_dev_state_delete
= ixgbe_ipsec_del_sa
,
830 .xdo_dev_offload_ok
= ixgbe_ipsec_offload_ok
,
834 * ixgbe_ipsec_vf_clear - clear the tables of data for a VF
835 * @adapter: board private structure
836 * @vf: VF id to be removed
838 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter
*adapter
, u32 vf
)
840 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
843 /* search rx sa table */
844 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
&& ipsec
->num_rx_sa
; i
++) {
845 if (!ipsec
->rx_tbl
[i
].used
)
847 if (ipsec
->rx_tbl
[i
].mode
& IXGBE_RXTXMOD_VF
&&
848 ipsec
->rx_tbl
[i
].vf
== vf
)
849 ixgbe_ipsec_del_sa(ipsec
->rx_tbl
[i
].xs
);
852 /* search tx sa table */
853 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
&& ipsec
->num_tx_sa
; i
++) {
854 if (!ipsec
->tx_tbl
[i
].used
)
856 if (ipsec
->tx_tbl
[i
].mode
& IXGBE_RXTXMOD_VF
&&
857 ipsec
->tx_tbl
[i
].vf
== vf
)
858 ixgbe_ipsec_del_sa(ipsec
->tx_tbl
[i
].xs
);
863 * ixgbe_ipsec_vf_add_sa - translate VF request to SA add
864 * @adapter: board private structure
865 * @msgbuf: The message buffer
868 * Make up a new xs and algorithm info from the data sent by the VF.
869 * We only need to sketch in just enough to set up the HW offload.
870 * Put the resulting offload_handle into the return message to the VF.
872 * Returns 0 or error value
874 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
876 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
877 struct xfrm_algo_desc
*algo
;
878 struct sa_mbx_msg
*sam
;
879 struct xfrm_state
*xs
;
885 sam
= (struct sa_mbx_msg
*)(&msgbuf
[1]);
886 if (!adapter
->vfinfo
[vf
].trusted
||
887 !(adapter
->flags2
& IXGBE_FLAG2_VF_IPSEC_ENABLED
)) {
888 e_warn(drv
, "VF %d attempted to add an IPsec SA\n", vf
);
893 /* Tx IPsec offload doesn't seem to work on this
894 * device, so block these requests for now.
896 if (!(sam
->flags
& XFRM_OFFLOAD_INBOUND
)) {
901 xs
= kzalloc(sizeof(*xs
), GFP_KERNEL
);
907 xs
->xso
.flags
= sam
->flags
;
908 xs
->id
.spi
= sam
->spi
;
909 xs
->id
.proto
= sam
->proto
;
910 xs
->props
.family
= sam
->family
;
911 if (xs
->props
.family
== AF_INET6
)
912 memcpy(&xs
->id
.daddr
.a6
, sam
->addr
, sizeof(xs
->id
.daddr
.a6
));
914 memcpy(&xs
->id
.daddr
.a4
, sam
->addr
, sizeof(xs
->id
.daddr
.a4
));
915 xs
->xso
.dev
= adapter
->netdev
;
917 algo
= xfrm_aead_get_byname(aes_gcm_name
, IXGBE_IPSEC_AUTH_BITS
, 1);
918 if (unlikely(!algo
)) {
923 aead_len
= sizeof(*xs
->aead
) + IXGBE_IPSEC_KEY_BITS
/ 8;
924 xs
->aead
= kzalloc(aead_len
, GFP_KERNEL
);
925 if (unlikely(!xs
->aead
)) {
930 xs
->props
.ealgo
= algo
->desc
.sadb_alg_id
;
931 xs
->geniv
= algo
->uinfo
.aead
.geniv
;
932 xs
->aead
->alg_icv_len
= IXGBE_IPSEC_AUTH_BITS
;
933 xs
->aead
->alg_key_len
= IXGBE_IPSEC_KEY_BITS
;
934 memcpy(xs
->aead
->alg_key
, sam
->key
, sizeof(sam
->key
));
935 memcpy(xs
->aead
->alg_name
, aes_gcm_name
, sizeof(aes_gcm_name
));
937 /* set up the HW offload */
938 err
= ixgbe_ipsec_add_sa(xs
);
942 pfsa
= xs
->xso
.offload_handle
;
943 if (pfsa
< IXGBE_IPSEC_BASE_TX_INDEX
) {
944 sa_idx
= pfsa
- IXGBE_IPSEC_BASE_RX_INDEX
;
945 ipsec
->rx_tbl
[sa_idx
].vf
= vf
;
946 ipsec
->rx_tbl
[sa_idx
].mode
|= IXGBE_RXTXMOD_VF
;
948 sa_idx
= pfsa
- IXGBE_IPSEC_BASE_TX_INDEX
;
949 ipsec
->tx_tbl
[sa_idx
].vf
= vf
;
950 ipsec
->tx_tbl
[sa_idx
].mode
|= IXGBE_RXTXMOD_VF
;
953 msgbuf
[1] = xs
->xso
.offload_handle
;
958 memset(xs
->aead
, 0, sizeof(*xs
->aead
));
961 memset(xs
, 0, sizeof(*xs
));
969 * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete
970 * @adapter: board private structure
971 * @msgbuf: The message buffer
974 * Given the offload_handle sent by the VF, look for the related SA table
975 * entry and use its xs field to call for a delete of the SA.
977 * Note: We silently ignore requests to delete entries that are already
978 * set to unused because when a VF is set to "DOWN", the PF first
979 * gets a reset and clears all the VF's entries; then the VF's
980 * XFRM stack sends individual deletes for each entry, which the
981 * reset already removed. In the future it might be good to try to
982 * optimize this so not so many unnecessary delete messages are sent.
984 * Returns 0 or error value
986 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
988 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
989 struct xfrm_state
*xs
;
990 u32 pfsa
= msgbuf
[1];
993 if (!adapter
->vfinfo
[vf
].trusted
) {
994 e_err(drv
, "vf %d attempted to delete an SA\n", vf
);
998 if (pfsa
< IXGBE_IPSEC_BASE_TX_INDEX
) {
1001 sa_idx
= pfsa
- IXGBE_IPSEC_BASE_RX_INDEX
;
1002 if (sa_idx
>= IXGBE_IPSEC_MAX_SA_COUNT
) {
1003 e_err(drv
, "vf %d SA index %d out of range\n",
1008 rsa
= &ipsec
->rx_tbl
[sa_idx
];
1013 if (!(rsa
->mode
& IXGBE_RXTXMOD_VF
) ||
1015 e_err(drv
, "vf %d bad Rx SA index %d\n", vf
, sa_idx
);
1019 xs
= ipsec
->rx_tbl
[sa_idx
].xs
;
1023 sa_idx
= pfsa
- IXGBE_IPSEC_BASE_TX_INDEX
;
1024 if (sa_idx
>= IXGBE_IPSEC_MAX_SA_COUNT
) {
1025 e_err(drv
, "vf %d SA index %d out of range\n",
1030 tsa
= &ipsec
->tx_tbl
[sa_idx
];
1035 if (!(tsa
->mode
& IXGBE_RXTXMOD_VF
) ||
1037 e_err(drv
, "vf %d bad Tx SA index %d\n", vf
, sa_idx
);
1041 xs
= ipsec
->tx_tbl
[sa_idx
].xs
;
1044 ixgbe_ipsec_del_sa(xs
);
1046 /* remove the xs that was made-up in the add request */
1047 memset(xs
, 0, sizeof(*xs
));
1054 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
1055 * @tx_ring: outgoing context
1056 * @first: current data packet
1057 * @itd: ipsec Tx data for later use in building context descriptor
1059 int ixgbe_ipsec_tx(struct ixgbe_ring
*tx_ring
,
1060 struct ixgbe_tx_buffer
*first
,
1061 struct ixgbe_ipsec_tx_data
*itd
)
1063 struct ixgbe_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
1064 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
1065 struct xfrm_state
*xs
;
1068 if (unlikely(!first
->skb
->sp
->len
)) {
1069 netdev_err(tx_ring
->netdev
, "%s: no xfrm state len = %d\n",
1070 __func__
, first
->skb
->sp
->len
);
1074 xs
= xfrm_input_state(first
->skb
);
1075 if (unlikely(!xs
)) {
1076 netdev_err(tx_ring
->netdev
, "%s: no xfrm_input_state() xs = %p\n",
1081 itd
->sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_TX_INDEX
;
1082 if (unlikely(itd
->sa_idx
>= IXGBE_IPSEC_MAX_SA_COUNT
)) {
1083 netdev_err(tx_ring
->netdev
, "%s: bad sa_idx=%d handle=%lu\n",
1084 __func__
, itd
->sa_idx
, xs
->xso
.offload_handle
);
1088 tsa
= &ipsec
->tx_tbl
[itd
->sa_idx
];
1089 if (unlikely(!tsa
->used
)) {
1090 netdev_err(tx_ring
->netdev
, "%s: unused sa_idx=%d\n",
1091 __func__
, itd
->sa_idx
);
1095 first
->tx_flags
|= IXGBE_TX_FLAGS_IPSEC
| IXGBE_TX_FLAGS_CC
;
1097 if (xs
->id
.proto
== IPPROTO_ESP
) {
1099 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP
|
1100 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
1101 if (first
->protocol
== htons(ETH_P_IP
))
1102 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPV4
;
1104 /* The actual trailer length is authlen (16 bytes) plus
1105 * 2 bytes for the proto and the padlen values, plus
1106 * padlen bytes of padding. This ends up not the same
1107 * as the static value found in xs->props.trailer_len (21).
1109 * ... but if we're doing GSO, don't bother as the stack
1110 * doesn't add a trailer for those.
1112 if (!skb_is_gso(first
->skb
)) {
1113 /* The "correct" way to get the auth length would be
1115 * authlen = crypto_aead_authsize(xs->data);
1116 * but since we know we only have one size to worry
1117 * about * we can let the compiler use the constant
1118 * and save us a few CPU cycles.
1120 const int authlen
= IXGBE_IPSEC_AUTH_BITS
/ 8;
1121 struct sk_buff
*skb
= first
->skb
;
1125 ret
= skb_copy_bits(skb
, skb
->len
- (authlen
+ 2),
1129 itd
->trailer_len
= authlen
+ 2 + padlen
;
1133 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN
;
1139 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
1140 * @rx_ring: receiving ring
1141 * @rx_desc: receive data descriptor
1142 * @skb: current data packet
1144 * Determine if there was an ipsec encapsulation noticed, and if so set up
1145 * the resulting status for later in the receive stack.
1147 void ixgbe_ipsec_rx(struct ixgbe_ring
*rx_ring
,
1148 union ixgbe_adv_rx_desc
*rx_desc
,
1149 struct sk_buff
*skb
)
1151 struct ixgbe_adapter
*adapter
= netdev_priv(rx_ring
->netdev
);
1152 __le16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
1153 __le16 ipsec_pkt_types
= cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH
|
1154 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP
);
1155 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
1156 struct xfrm_offload
*xo
= NULL
;
1157 struct xfrm_state
*xs
= NULL
;
1158 struct ipv6hdr
*ip6
= NULL
;
1159 struct iphdr
*ip4
= NULL
;
1165 /* Find the ip and crypto headers in the data.
1166 * We can assume no vlan header in the way, b/c the
1167 * hw won't recognize the IPsec packet and anyway the
1168 * currently vlan device doesn't support xfrm offload.
1170 if (pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4
)) {
1171 ip4
= (struct iphdr
*)(skb
->data
+ ETH_HLEN
);
1172 daddr
= &ip4
->daddr
;
1173 c_hdr
= (u8
*)ip4
+ ip4
->ihl
* 4;
1174 } else if (pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6
)) {
1175 ip6
= (struct ipv6hdr
*)(skb
->data
+ ETH_HLEN
);
1176 daddr
= &ip6
->daddr
;
1177 c_hdr
= (u8
*)ip6
+ sizeof(struct ipv6hdr
);
1182 switch (pkt_info
& ipsec_pkt_types
) {
1183 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH
):
1184 spi
= ((struct ip_auth_hdr
*)c_hdr
)->spi
;
1187 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP
):
1188 spi
= ((struct ip_esp_hdr
*)c_hdr
)->spi
;
1189 proto
= IPPROTO_ESP
;
1195 xs
= ixgbe_ipsec_find_rx_state(ipsec
, daddr
, proto
, spi
, !!ip4
);
1199 skb
->sp
= secpath_dup(skb
->sp
);
1200 if (unlikely(!skb
->sp
))
1203 skb
->sp
->xvec
[skb
->sp
->len
++] = xs
;
1205 xo
= xfrm_offload(skb
);
1206 xo
->flags
= CRYPTO_DONE
;
1207 xo
->status
= CRYPTO_SUCCESS
;
1209 adapter
->rx_ipsec
++;
1213 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
1214 * @adapter: board private structure
1216 void ixgbe_init_ipsec_offload(struct ixgbe_adapter
*adapter
)
1218 struct ixgbe_hw
*hw
= &adapter
->hw
;
1219 struct ixgbe_ipsec
*ipsec
;
1223 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
1226 /* If there is no support for either Tx or Rx offload
1227 * we should not be advertising support for IPsec.
1229 t_dis
= IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
) &
1230 IXGBE_SECTXSTAT_SECTX_OFF_DIS
;
1231 r_dis
= IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
) &
1232 IXGBE_SECRXSTAT_SECRX_OFF_DIS
;
1236 ipsec
= kzalloc(sizeof(*ipsec
), GFP_KERNEL
);
1239 hash_init(ipsec
->rx_sa_list
);
1241 size
= sizeof(struct rx_sa
) * IXGBE_IPSEC_MAX_SA_COUNT
;
1242 ipsec
->rx_tbl
= kzalloc(size
, GFP_KERNEL
);
1246 size
= sizeof(struct tx_sa
) * IXGBE_IPSEC_MAX_SA_COUNT
;
1247 ipsec
->tx_tbl
= kzalloc(size
, GFP_KERNEL
);
1251 size
= sizeof(struct rx_ip_sa
) * IXGBE_IPSEC_MAX_RX_IP_COUNT
;
1252 ipsec
->ip_tbl
= kzalloc(size
, GFP_KERNEL
);
1256 ipsec
->num_rx_sa
= 0;
1257 ipsec
->num_tx_sa
= 0;
1259 adapter
->ipsec
= ipsec
;
1260 ixgbe_ipsec_stop_engine(adapter
);
1261 ixgbe_ipsec_clear_hw_tables(adapter
);
1263 adapter
->netdev
->xfrmdev_ops
= &ixgbe_xfrmdev_ops
;
1268 kfree(ipsec
->ip_tbl
);
1269 kfree(ipsec
->rx_tbl
);
1270 kfree(ipsec
->tx_tbl
);
1273 netdev_err(adapter
->netdev
, "Unable to allocate memory for SA tables");
1277 * ixgbe_stop_ipsec_offload - tear down the ipsec offload
1278 * @adapter: board private structure
1280 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter
*adapter
)
1282 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
1284 adapter
->ipsec
= NULL
;
1286 kfree(ipsec
->ip_tbl
);
1287 kfree(ipsec
->rx_tbl
);
1288 kfree(ipsec
->tx_tbl
);