1 /*******************************************************************************
3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * Linux NICS <linux.nics@intel.com>
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
30 #include <crypto/aead.h>
33 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
34 * @hw: hw specific details
35 * @idx: register index to write
36 * @key: key byte array
39 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw
*hw
, u16 idx
,
45 for (i
= 0; i
< 4; i
++)
46 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXKEY(i
), cpu_to_be32(key
[3 - i
]));
47 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXSALT
, cpu_to_be32(salt
));
48 IXGBE_WRITE_FLUSH(hw
);
50 reg
= IXGBE_READ_REG(hw
, IXGBE_IPSTXIDX
);
51 reg
&= IXGBE_RXTXIDX_IPS_EN
;
52 reg
|= idx
<< IXGBE_RXTXIDX_IDX_SHIFT
| IXGBE_RXTXIDX_WRITE
;
53 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, reg
);
54 IXGBE_WRITE_FLUSH(hw
);
58 * ixgbe_ipsec_set_rx_item - set an Rx table item
59 * @hw: hw specific details
60 * @idx: register index to write
61 * @tbl: table selector
63 * Trigger the device to store into a particular Rx table the
64 * data that has already been loaded into the input register
66 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw
*hw
, u16 idx
,
67 enum ixgbe_ipsec_tbl_sel tbl
)
71 reg
= IXGBE_READ_REG(hw
, IXGBE_IPSRXIDX
);
72 reg
&= IXGBE_RXTXIDX_IPS_EN
;
73 reg
|= tbl
<< IXGBE_RXIDX_TBL_SHIFT
|
74 idx
<< IXGBE_RXTXIDX_IDX_SHIFT
|
76 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, reg
);
77 IXGBE_WRITE_FLUSH(hw
);
81 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
82 * @hw: hw specific details
83 * @idx: register index to write
84 * @spi: security parameter index
85 * @key: key byte array
87 * @mode: rx decrypt control bits
88 * @ip_idx: index into IP table for related IP address
90 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw
*hw
, u16 idx
, __be32 spi
,
91 u32 key
[], u32 salt
, u32 mode
, u32 ip_idx
)
95 /* store the SPI (in bigendian) and IPidx */
96 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXSPI
, cpu_to_le32(spi
));
97 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIPIDX
, ip_idx
);
98 IXGBE_WRITE_FLUSH(hw
);
100 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_spi_tbl
);
102 /* store the key, salt, and mode */
103 for (i
= 0; i
< 4; i
++)
104 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXKEY(i
), cpu_to_be32(key
[3 - i
]));
105 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXSALT
, cpu_to_be32(salt
));
106 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXMOD
, mode
);
107 IXGBE_WRITE_FLUSH(hw
);
109 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_key_tbl
);
113 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
114 * @hw: hw specific details
115 * @idx: register index to write
116 * @addr: IP address byte array
118 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw
*hw
, u16 idx
, __be32 addr
[])
122 /* store the ip address */
123 for (i
= 0; i
< 4; i
++)
124 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIPADDR(i
), cpu_to_le32(addr
[i
]));
125 IXGBE_WRITE_FLUSH(hw
);
127 ixgbe_ipsec_set_rx_item(hw
, idx
, ips_rx_ip_tbl
);
131 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
132 * @adapter: board private structure
134 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter
*adapter
)
136 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
137 struct ixgbe_hw
*hw
= &adapter
->hw
;
138 u32 buf
[4] = {0, 0, 0, 0};
141 /* disable Rx and Tx SA lookup */
142 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, 0);
143 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, 0);
145 /* scrub the tables - split the loops for the max of the IP table */
146 for (idx
= 0; idx
< IXGBE_IPSEC_MAX_RX_IP_COUNT
; idx
++) {
147 ixgbe_ipsec_set_tx_sa(hw
, idx
, buf
, 0);
148 ixgbe_ipsec_set_rx_sa(hw
, idx
, 0, buf
, 0, 0, 0);
149 ixgbe_ipsec_set_rx_ip(hw
, idx
, (__be32
*)buf
);
151 for (; idx
< IXGBE_IPSEC_MAX_SA_COUNT
; idx
++) {
152 ixgbe_ipsec_set_tx_sa(hw
, idx
, buf
, 0);
153 ixgbe_ipsec_set_rx_sa(hw
, idx
, 0, buf
, 0, 0, 0);
156 ipsec
->num_rx_sa
= 0;
157 ipsec
->num_tx_sa
= 0;
161 * ixgbe_ipsec_stop_data
162 * @adapter: board private structure
164 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter
*adapter
)
166 struct ixgbe_hw
*hw
= &adapter
->hw
;
167 bool link
= adapter
->link_up
;
172 /* halt data paths */
173 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
174 reg
|= IXGBE_SECTXCTRL_TX_DIS
;
175 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, reg
);
177 reg
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
178 reg
|= IXGBE_SECRXCTRL_RX_DIS
;
179 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, reg
);
181 IXGBE_WRITE_FLUSH(hw
);
183 /* If the tx fifo doesn't have link, but still has data,
184 * we can't clear the tx sec block. Set the MAC loopback
188 reg
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
189 reg
|= IXGBE_MACC_FLU
;
190 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg
);
192 reg
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
193 reg
|= IXGBE_HLREG0_LPBK
;
194 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg
);
196 IXGBE_WRITE_FLUSH(hw
);
200 /* wait for the paths to empty */
204 t_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
) &
205 IXGBE_SECTXSTAT_SECTX_RDY
;
206 r_rdy
= IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
) &
207 IXGBE_SECRXSTAT_SECRX_RDY
;
208 } while (!t_rdy
&& !r_rdy
&& limit
--);
210 /* undo loopback if we played with it earlier */
212 reg
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
213 reg
&= ~IXGBE_MACC_FLU
;
214 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg
);
216 reg
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
217 reg
&= ~IXGBE_HLREG0_LPBK
;
218 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg
);
220 IXGBE_WRITE_FLUSH(hw
);
225 * ixgbe_ipsec_stop_engine
226 * @adapter: board private structure
228 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter
*adapter
)
230 struct ixgbe_hw
*hw
= &adapter
->hw
;
233 ixgbe_ipsec_stop_data(adapter
);
235 /* disable Rx and Tx SA lookup */
236 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, 0);
237 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, 0);
239 /* disable the Rx and Tx engines and full packet store-n-forward */
240 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
241 reg
|= IXGBE_SECTXCTRL_SECTX_DIS
;
242 reg
&= ~IXGBE_SECTXCTRL_STORE_FORWARD
;
243 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, reg
);
245 reg
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
246 reg
|= IXGBE_SECRXCTRL_SECRX_DIS
;
247 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, reg
);
249 /* restore the "tx security buffer almost full threshold" to 0x250 */
250 IXGBE_WRITE_REG(hw
, IXGBE_SECTXBUFFAF
, 0x250);
252 /* Set minimum IFG between packets back to the default 0x1 */
253 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
254 reg
= (reg
& 0xfffffff0) | 0x1;
255 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, reg
);
257 /* final set for normal (no ipsec offload) processing */
258 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, IXGBE_SECTXCTRL_SECTX_DIS
);
259 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, IXGBE_SECRXCTRL_SECRX_DIS
);
261 IXGBE_WRITE_FLUSH(hw
);
265 * ixgbe_ipsec_start_engine
266 * @adapter: board private structure
268 * NOTE: this increases power consumption whether being used or not
270 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter
*adapter
)
272 struct ixgbe_hw
*hw
= &adapter
->hw
;
275 ixgbe_ipsec_stop_data(adapter
);
277 /* Set minimum IFG between packets to 3 */
278 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
279 reg
= (reg
& 0xfffffff0) | 0x3;
280 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, reg
);
282 /* Set "tx security buffer almost full threshold" to 0x15 so that the
283 * almost full indication is generated only after buffer contains at
284 * least an entire jumbo packet.
286 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXBUFFAF
);
287 reg
= (reg
& 0xfffffc00) | 0x15;
288 IXGBE_WRITE_REG(hw
, IXGBE_SECTXBUFFAF
, reg
);
290 /* restart the data paths by clearing the DISABLE bits */
291 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, 0);
292 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, IXGBE_SECTXCTRL_STORE_FORWARD
);
294 /* enable Rx and Tx SA lookup */
295 IXGBE_WRITE_REG(hw
, IXGBE_IPSTXIDX
, IXGBE_RXTXIDX_IPS_EN
);
296 IXGBE_WRITE_REG(hw
, IXGBE_IPSRXIDX
, IXGBE_RXTXIDX_IPS_EN
);
298 IXGBE_WRITE_FLUSH(hw
);
302 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
303 * @adapter: board private structure
305 void ixgbe_ipsec_restore(struct ixgbe_adapter
*adapter
)
307 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
308 struct ixgbe_hw
*hw
= &adapter
->hw
;
311 if (!(adapter
->flags2
& IXGBE_FLAG2_IPSEC_ENABLED
))
314 /* clean up and restart the engine */
315 ixgbe_ipsec_stop_engine(adapter
);
316 ixgbe_ipsec_clear_hw_tables(adapter
);
317 ixgbe_ipsec_start_engine(adapter
);
319 /* reload the IP addrs */
320 for (i
= 0; i
< IXGBE_IPSEC_MAX_RX_IP_COUNT
; i
++) {
321 struct rx_ip_sa
*ipsa
= &ipsec
->ip_tbl
[i
];
324 ixgbe_ipsec_set_rx_ip(hw
, i
, ipsa
->ipaddr
);
327 /* reload the Rx and Tx keys */
328 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
329 struct rx_sa
*rsa
= &ipsec
->rx_tbl
[i
];
330 struct tx_sa
*tsa
= &ipsec
->tx_tbl
[i
];
333 ixgbe_ipsec_set_rx_sa(hw
, i
, rsa
->xs
->id
.spi
,
335 rsa
->mode
, rsa
->iptbl_ind
);
338 ixgbe_ipsec_set_tx_sa(hw
, i
, tsa
->key
, tsa
->salt
);
343 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
344 * @ipsec: pointer to ipsec struct
345 * @rxtable: true if we need to look in the Rx table
347 * Returns the first unused index in either the Rx or Tx SA table
349 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec
*ipsec
, bool rxtable
)
354 if (ipsec
->num_rx_sa
== IXGBE_IPSEC_MAX_SA_COUNT
)
357 /* search rx sa table */
358 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
359 if (!ipsec
->rx_tbl
[i
].used
)
363 if (ipsec
->num_tx_sa
== IXGBE_IPSEC_MAX_SA_COUNT
)
366 /* search tx sa table */
367 for (i
= 0; i
< IXGBE_IPSEC_MAX_SA_COUNT
; i
++) {
368 if (!ipsec
->tx_tbl
[i
].used
)
377 * ixgbe_ipsec_find_rx_state - find the state that matches
378 * @ipsec: pointer to ipsec struct
379 * @daddr: inbound address to match
380 * @proto: protocol to match
382 * @ip4: true if using an ipv4 address
384 * Returns a pointer to the matching SA state information
386 static struct xfrm_state
*ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec
*ipsec
,
387 __be32
*daddr
, u8 proto
,
388 __be32 spi
, bool ip4
)
391 struct xfrm_state
*ret
= NULL
;
394 hash_for_each_possible_rcu(ipsec
->rx_sa_list
, rsa
, hlist
, spi
)
395 if (spi
== rsa
->xs
->id
.spi
&&
396 ((ip4
&& *daddr
== rsa
->xs
->id
.daddr
.a4
) ||
397 (!ip4
&& !memcmp(daddr
, &rsa
->xs
->id
.daddr
.a6
,
398 sizeof(rsa
->xs
->id
.daddr
.a6
)))) &&
399 proto
== rsa
->xs
->id
.proto
) {
401 xfrm_state_hold(ret
);
409 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
410 * @xs: pointer to xfrm_state struct
411 * @mykey: pointer to key array to populate
412 * @mysalt: pointer to salt value to populate
414 * This copies the protocol keys and salt to our own data tables. The
415 * 82599 family only supports the one algorithm.
417 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state
*xs
,
418 u32
*mykey
, u32
*mysalt
)
420 struct net_device
*dev
= xs
->xso
.dev
;
421 unsigned char *key_data
;
422 char *alg_name
= NULL
;
423 const char aes_gcm_name
[] = "rfc4106(gcm(aes))";
427 key_data
= &xs
->aead
->alg_key
[0];
428 key_len
= xs
->aead
->alg_key_len
;
429 alg_name
= xs
->aead
->alg_name
;
431 netdev_err(dev
, "Unsupported IPsec algorithm\n");
435 if (strcmp(alg_name
, aes_gcm_name
)) {
436 netdev_err(dev
, "Unsupported IPsec algorithm - please use %s\n",
441 /* The key bytes come down in a bigendian array of bytes, so
442 * we don't need to do any byteswapping.
443 * 160 accounts for 16 byte key and 4 byte salt
445 if (key_len
== 160) {
446 *mysalt
= ((u32
*)key_data
)[4];
447 } else if (key_len
!= 128) {
448 netdev_err(dev
, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
451 netdev_info(dev
, "IPsec hw offload parameters missing 32 bit salt value\n");
454 memcpy(mykey
, key_data
, 16);
460 * ixgbe_ipsec_add_sa - program device with a security association
461 * @xs: pointer to transformer state struct
463 static int ixgbe_ipsec_add_sa(struct xfrm_state
*xs
)
465 struct net_device
*dev
= xs
->xso
.dev
;
466 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
467 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
468 struct ixgbe_hw
*hw
= &adapter
->hw
;
469 int checked
, match
, first
;
474 if (xs
->id
.proto
!= IPPROTO_ESP
&& xs
->id
.proto
!= IPPROTO_AH
) {
475 netdev_err(dev
, "Unsupported protocol 0x%04x for ipsec offload\n",
480 if (xs
->xso
.flags
& XFRM_OFFLOAD_INBOUND
) {
484 netdev_err(dev
, "Compression offload not supported\n");
488 /* find the first unused index */
489 ret
= ixgbe_ipsec_find_empty_idx(ipsec
, true);
491 netdev_err(dev
, "No space for SA in Rx table!\n");
496 memset(&rsa
, 0, sizeof(rsa
));
500 if (rsa
.xs
->id
.proto
& IPPROTO_ESP
)
501 rsa
.decrypt
= xs
->ealg
|| xs
->aead
;
503 /* get the key and salt */
504 ret
= ixgbe_ipsec_parse_proto_keys(xs
, rsa
.key
, &rsa
.salt
);
506 netdev_err(dev
, "Failed to get key data for Rx SA table\n");
510 /* get ip for rx sa table */
511 if (xs
->props
.family
== AF_INET6
)
512 memcpy(rsa
.ipaddr
, &xs
->id
.daddr
.a6
, 16);
514 memcpy(&rsa
.ipaddr
[3], &xs
->id
.daddr
.a4
, 4);
516 /* The HW does not have a 1:1 mapping from keys to IP addrs, so
517 * check for a matching IP addr entry in the table. If the addr
518 * already exists, use it; else find an unused slot and add the
519 * addr. If one does not exist and there are no unused table
520 * entries, fail the request.
523 /* Find an existing match or first not used, and stop looking
524 * after we've checked all we know we have.
530 i
< IXGBE_IPSEC_MAX_RX_IP_COUNT
&&
531 (checked
< ipsec
->num_rx_sa
|| first
< 0);
533 if (ipsec
->ip_tbl
[i
].used
) {
534 if (!memcmp(ipsec
->ip_tbl
[i
].ipaddr
,
535 rsa
.ipaddr
, sizeof(rsa
.ipaddr
))) {
540 } else if (first
< 0) {
541 first
= i
; /* track the first empty seen */
545 if (ipsec
->num_rx_sa
== 0)
549 /* addrs are the same, we should use this one */
550 rsa
.iptbl_ind
= match
;
551 ipsec
->ip_tbl
[match
].ref_cnt
++;
553 } else if (first
>= 0) {
554 /* no matches, but here's an empty slot */
555 rsa
.iptbl_ind
= first
;
557 memcpy(ipsec
->ip_tbl
[first
].ipaddr
,
558 rsa
.ipaddr
, sizeof(rsa
.ipaddr
));
559 ipsec
->ip_tbl
[first
].ref_cnt
= 1;
560 ipsec
->ip_tbl
[first
].used
= true;
562 ixgbe_ipsec_set_rx_ip(hw
, rsa
.iptbl_ind
, rsa
.ipaddr
);
565 /* no match and no empty slot */
566 netdev_err(dev
, "No space for SA in Rx IP SA table\n");
567 memset(&rsa
, 0, sizeof(rsa
));
571 rsa
.mode
= IXGBE_RXMOD_VALID
;
572 if (rsa
.xs
->id
.proto
& IPPROTO_ESP
)
573 rsa
.mode
|= IXGBE_RXMOD_PROTO_ESP
;
575 rsa
.mode
|= IXGBE_RXMOD_DECRYPT
;
576 if (rsa
.xs
->props
.family
== AF_INET6
)
577 rsa
.mode
|= IXGBE_RXMOD_IPV6
;
579 /* the preparations worked, so save the info */
580 memcpy(&ipsec
->rx_tbl
[sa_idx
], &rsa
, sizeof(rsa
));
582 ixgbe_ipsec_set_rx_sa(hw
, sa_idx
, rsa
.xs
->id
.spi
, rsa
.key
,
583 rsa
.salt
, rsa
.mode
, rsa
.iptbl_ind
);
584 xs
->xso
.offload_handle
= sa_idx
+ IXGBE_IPSEC_BASE_RX_INDEX
;
588 /* hash the new entry for faster search in Rx path */
589 hash_add_rcu(ipsec
->rx_sa_list
, &ipsec
->rx_tbl
[sa_idx
].hlist
,
594 /* find the first unused index */
595 ret
= ixgbe_ipsec_find_empty_idx(ipsec
, false);
597 netdev_err(dev
, "No space for SA in Tx table\n");
602 memset(&tsa
, 0, sizeof(tsa
));
606 if (xs
->id
.proto
& IPPROTO_ESP
)
607 tsa
.encrypt
= xs
->ealg
|| xs
->aead
;
609 ret
= ixgbe_ipsec_parse_proto_keys(xs
, tsa
.key
, &tsa
.salt
);
611 netdev_err(dev
, "Failed to get key data for Tx SA table\n");
612 memset(&tsa
, 0, sizeof(tsa
));
616 /* the preparations worked, so save the info */
617 memcpy(&ipsec
->tx_tbl
[sa_idx
], &tsa
, sizeof(tsa
));
619 ixgbe_ipsec_set_tx_sa(hw
, sa_idx
, tsa
.key
, tsa
.salt
);
621 xs
->xso
.offload_handle
= sa_idx
+ IXGBE_IPSEC_BASE_TX_INDEX
;
626 /* enable the engine if not already warmed up */
627 if (!(adapter
->flags2
& IXGBE_FLAG2_IPSEC_ENABLED
)) {
628 ixgbe_ipsec_start_engine(adapter
);
629 adapter
->flags2
|= IXGBE_FLAG2_IPSEC_ENABLED
;
636 * ixgbe_ipsec_del_sa - clear out this specific SA
637 * @xs: pointer to transformer state struct
639 static void ixgbe_ipsec_del_sa(struct xfrm_state
*xs
)
641 struct net_device
*dev
= xs
->xso
.dev
;
642 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
643 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
644 struct ixgbe_hw
*hw
= &adapter
->hw
;
645 u32 zerobuf
[4] = {0, 0, 0, 0};
648 if (xs
->xso
.flags
& XFRM_OFFLOAD_INBOUND
) {
652 sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_RX_INDEX
;
653 rsa
= &ipsec
->rx_tbl
[sa_idx
];
656 netdev_err(dev
, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
657 sa_idx
, xs
->xso
.offload_handle
);
661 ixgbe_ipsec_set_rx_sa(hw
, sa_idx
, 0, zerobuf
, 0, 0, 0);
662 hash_del_rcu(&rsa
->hlist
);
664 /* if the IP table entry is referenced by only this SA,
665 * i.e. ref_cnt is only 1, clear the IP table entry as well
667 ipi
= rsa
->iptbl_ind
;
668 if (ipsec
->ip_tbl
[ipi
].ref_cnt
> 0) {
669 ipsec
->ip_tbl
[ipi
].ref_cnt
--;
671 if (!ipsec
->ip_tbl
[ipi
].ref_cnt
) {
672 memset(&ipsec
->ip_tbl
[ipi
], 0,
673 sizeof(struct rx_ip_sa
));
674 ixgbe_ipsec_set_rx_ip(hw
, ipi
, zerobuf
);
678 memset(rsa
, 0, sizeof(struct rx_sa
));
681 sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_TX_INDEX
;
683 if (!ipsec
->tx_tbl
[sa_idx
].used
) {
684 netdev_err(dev
, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
685 sa_idx
, xs
->xso
.offload_handle
);
689 ixgbe_ipsec_set_tx_sa(hw
, sa_idx
, zerobuf
, 0);
690 memset(&ipsec
->tx_tbl
[sa_idx
], 0, sizeof(struct tx_sa
));
694 /* if there are no SAs left, stop the engine to save energy */
695 if (ipsec
->num_rx_sa
== 0 && ipsec
->num_tx_sa
== 0) {
696 adapter
->flags2
&= ~IXGBE_FLAG2_IPSEC_ENABLED
;
697 ixgbe_ipsec_stop_engine(adapter
);
702 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
703 * @skb: current data packet
704 * @xs: pointer to transformer state struct
706 static bool ixgbe_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*xs
)
708 if (xs
->props
.family
== AF_INET
) {
709 /* Offload with IPv4 options is not supported yet */
710 if (ip_hdr(skb
)->ihl
!= 5)
713 /* Offload with IPv6 extension headers is not support yet */
714 if (ipv6_ext_hdr(ipv6_hdr(skb
)->nexthdr
))
722 * ixgbe_ipsec_free - called by xfrm garbage collections
723 * @xs: pointer to transformer state struct
725 * We don't have any garbage to collect, so we shouldn't bother
726 * implementing this function, but the XFRM code doesn't check for
727 * existence before calling the API callback.
729 static void ixgbe_ipsec_free(struct xfrm_state
*xs
)
733 static const struct xfrmdev_ops ixgbe_xfrmdev_ops
= {
734 .xdo_dev_state_add
= ixgbe_ipsec_add_sa
,
735 .xdo_dev_state_delete
= ixgbe_ipsec_del_sa
,
736 .xdo_dev_offload_ok
= ixgbe_ipsec_offload_ok
,
737 .xdo_dev_state_free
= ixgbe_ipsec_free
,
741 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
742 * @tx_ring: outgoing context
743 * @first: current data packet
744 * @itd: ipsec Tx data for later use in building context descriptor
746 int ixgbe_ipsec_tx(struct ixgbe_ring
*tx_ring
,
747 struct ixgbe_tx_buffer
*first
,
748 struct ixgbe_ipsec_tx_data
*itd
)
750 struct ixgbe_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
751 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
752 struct xfrm_state
*xs
;
755 if (unlikely(!first
->skb
->sp
->len
)) {
756 netdev_err(tx_ring
->netdev
, "%s: no xfrm state len = %d\n",
757 __func__
, first
->skb
->sp
->len
);
761 xs
= xfrm_input_state(first
->skb
);
763 netdev_err(tx_ring
->netdev
, "%s: no xfrm_input_state() xs = %p\n",
768 itd
->sa_idx
= xs
->xso
.offload_handle
- IXGBE_IPSEC_BASE_TX_INDEX
;
769 if (unlikely(itd
->sa_idx
> IXGBE_IPSEC_MAX_SA_COUNT
)) {
770 netdev_err(tx_ring
->netdev
, "%s: bad sa_idx=%d handle=%lu\n",
771 __func__
, itd
->sa_idx
, xs
->xso
.offload_handle
);
775 tsa
= &ipsec
->tx_tbl
[itd
->sa_idx
];
776 if (unlikely(!tsa
->used
)) {
777 netdev_err(tx_ring
->netdev
, "%s: unused sa_idx=%d\n",
778 __func__
, itd
->sa_idx
);
782 first
->tx_flags
|= IXGBE_TX_FLAGS_IPSEC
| IXGBE_TX_FLAGS_CC
;
785 if (xs
->id
.proto
== IPPROTO_ESP
) {
786 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP
|
787 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
788 if (first
->protocol
== htons(ETH_P_IP
))
789 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPV4
;
790 itd
->trailer_len
= xs
->props
.trailer_len
;
793 itd
->flags
|= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN
;
799 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
800 * @rx_ring: receiving ring
801 * @rx_desc: receive data descriptor
802 * @skb: current data packet
804 * Determine if there was an ipsec encapsulation noticed, and if so set up
805 * the resulting status for later in the receive stack.
807 void ixgbe_ipsec_rx(struct ixgbe_ring
*rx_ring
,
808 union ixgbe_adv_rx_desc
*rx_desc
,
811 struct ixgbe_adapter
*adapter
= netdev_priv(rx_ring
->netdev
);
812 __le16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
813 __le16 ipsec_pkt_types
= cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH
|
814 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP
);
815 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
816 struct xfrm_offload
*xo
= NULL
;
817 struct xfrm_state
*xs
= NULL
;
818 struct ipv6hdr
*ip6
= NULL
;
819 struct iphdr
*ip4
= NULL
;
825 /* Find the ip and crypto headers in the data.
826 * We can assume no vlan header in the way, b/c the
827 * hw won't recognize the IPsec packet and anyway the
828 * currently vlan device doesn't support xfrm offload.
830 if (pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4
)) {
831 ip4
= (struct iphdr
*)(skb
->data
+ ETH_HLEN
);
833 c_hdr
= (u8
*)ip4
+ ip4
->ihl
* 4;
834 } else if (pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6
)) {
835 ip6
= (struct ipv6hdr
*)(skb
->data
+ ETH_HLEN
);
837 c_hdr
= (u8
*)ip6
+ sizeof(struct ipv6hdr
);
842 switch (pkt_info
& ipsec_pkt_types
) {
843 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH
):
844 spi
= ((struct ip_auth_hdr
*)c_hdr
)->spi
;
847 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP
):
848 spi
= ((struct ip_esp_hdr
*)c_hdr
)->spi
;
855 xs
= ixgbe_ipsec_find_rx_state(ipsec
, daddr
, proto
, spi
, !!ip4
);
859 skb
->sp
= secpath_dup(skb
->sp
);
860 if (unlikely(!skb
->sp
))
863 skb
->sp
->xvec
[skb
->sp
->len
++] = xs
;
865 xo
= xfrm_offload(skb
);
866 xo
->flags
= CRYPTO_DONE
;
867 xo
->status
= CRYPTO_SUCCESS
;
873 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
874 * @adapter: board private structure
876 void ixgbe_init_ipsec_offload(struct ixgbe_adapter
*adapter
)
878 struct ixgbe_ipsec
*ipsec
;
881 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
884 ipsec
= kzalloc(sizeof(*ipsec
), GFP_KERNEL
);
887 hash_init(ipsec
->rx_sa_list
);
889 size
= sizeof(struct rx_sa
) * IXGBE_IPSEC_MAX_SA_COUNT
;
890 ipsec
->rx_tbl
= kzalloc(size
, GFP_KERNEL
);
894 size
= sizeof(struct tx_sa
) * IXGBE_IPSEC_MAX_SA_COUNT
;
895 ipsec
->tx_tbl
= kzalloc(size
, GFP_KERNEL
);
899 size
= sizeof(struct rx_ip_sa
) * IXGBE_IPSEC_MAX_RX_IP_COUNT
;
900 ipsec
->ip_tbl
= kzalloc(size
, GFP_KERNEL
);
904 ipsec
->num_rx_sa
= 0;
905 ipsec
->num_tx_sa
= 0;
907 adapter
->ipsec
= ipsec
;
908 ixgbe_ipsec_stop_engine(adapter
);
909 ixgbe_ipsec_clear_hw_tables(adapter
);
911 adapter
->netdev
->xfrmdev_ops
= &ixgbe_xfrmdev_ops
;
912 adapter
->netdev
->features
|= NETIF_F_HW_ESP
;
913 adapter
->netdev
->hw_enc_features
|= NETIF_F_HW_ESP
;
918 kfree(ipsec
->ip_tbl
);
919 kfree(ipsec
->rx_tbl
);
920 kfree(ipsec
->tx_tbl
);
922 kfree(adapter
->ipsec
);
923 netdev_err(adapter
->netdev
, "Unable to allocate memory for SA tables");
927 * ixgbe_stop_ipsec_offload - tear down the ipsec offload
928 * @adapter: board private structure
930 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter
*adapter
)
932 struct ixgbe_ipsec
*ipsec
= adapter
->ipsec
;
934 adapter
->ipsec
= NULL
;
936 kfree(ipsec
->ip_tbl
);
937 kfree(ipsec
->rx_tbl
);
938 kfree(ipsec
->tx_tbl
);