staging: rtl8188eu: rename HalSetBrateCfg() - style
[linux/fpc-iii.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_ipsec.c
blobda4322e4daed5de4fb44f06d8cdb488bc41f6432
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
4 #include "ixgbe.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
8 /**
9 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
10 * @hw: hw specific details
11 * @idx: register index to write
12 * @key: key byte array
13 * @salt: salt bytes
14 **/
15 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
16 u32 key[], u32 salt)
18 u32 reg;
19 int i;
21 for (i = 0; i < 4; i++)
22 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
23 (__force u32)cpu_to_be32(key[3 - i]));
24 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
25 IXGBE_WRITE_FLUSH(hw);
27 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
28 reg &= IXGBE_RXTXIDX_IPS_EN;
29 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
30 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
31 IXGBE_WRITE_FLUSH(hw);
34 /**
35 * ixgbe_ipsec_set_rx_item - set an Rx table item
36 * @hw: hw specific details
37 * @idx: register index to write
38 * @tbl: table selector
40 * Trigger the device to store into a particular Rx table the
41 * data that has already been loaded into the input register
42 **/
43 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
44 enum ixgbe_ipsec_tbl_sel tbl)
46 u32 reg;
48 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
49 reg &= IXGBE_RXTXIDX_IPS_EN;
50 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
51 idx << IXGBE_RXTXIDX_IDX_SHIFT |
52 IXGBE_RXTXIDX_WRITE;
53 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
54 IXGBE_WRITE_FLUSH(hw);
57 /**
58 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
59 * @hw: hw specific details
60 * @idx: register index to write
61 * @spi: security parameter index
62 * @key: key byte array
63 * @salt: salt bytes
64 * @mode: rx decrypt control bits
65 * @ip_idx: index into IP table for related IP address
66 **/
67 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
68 u32 key[], u32 salt, u32 mode, u32 ip_idx)
70 int i;
72 /* store the SPI (in bigendian) and IPidx */
73 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
74 (__force u32)cpu_to_le32((__force u32)spi));
75 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
76 IXGBE_WRITE_FLUSH(hw);
78 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
80 /* store the key, salt, and mode */
81 for (i = 0; i < 4; i++)
82 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
83 (__force u32)cpu_to_be32(key[3 - i]));
84 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
85 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
86 IXGBE_WRITE_FLUSH(hw);
88 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
91 /**
92 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
93 * @hw: hw specific details
94 * @idx: register index to write
95 * @addr: IP address byte array
96 **/
97 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
99 int i;
101 /* store the ip address */
102 for (i = 0; i < 4; i++)
103 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
104 (__force u32)cpu_to_le32((__force u32)addr[i]));
105 IXGBE_WRITE_FLUSH(hw);
107 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
111 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
112 * @adapter: board private structure
114 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
116 struct ixgbe_ipsec *ipsec = adapter->ipsec;
117 struct ixgbe_hw *hw = &adapter->hw;
118 u32 buf[4] = {0, 0, 0, 0};
119 u16 idx;
121 /* disable Rx and Tx SA lookup */
122 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
123 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
125 /* scrub the tables - split the loops for the max of the IP table */
126 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
127 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
128 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
129 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
131 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
132 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
133 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
136 ipsec->num_rx_sa = 0;
137 ipsec->num_tx_sa = 0;
141 * ixgbe_ipsec_stop_data
142 * @adapter: board private structure
144 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
146 struct ixgbe_hw *hw = &adapter->hw;
147 bool link = adapter->link_up;
148 u32 t_rdy, r_rdy;
149 u32 limit;
150 u32 reg;
152 /* halt data paths */
153 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
154 reg |= IXGBE_SECTXCTRL_TX_DIS;
155 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
157 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
158 reg |= IXGBE_SECRXCTRL_RX_DIS;
159 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
161 /* If both Tx and Rx are ready there are no packets
162 * that we need to flush so the loopback configuration
163 * below is not necessary.
165 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
166 IXGBE_SECTXSTAT_SECTX_RDY;
167 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
168 IXGBE_SECRXSTAT_SECRX_RDY;
169 if (t_rdy && r_rdy)
170 return;
172 /* If the tx fifo doesn't have link, but still has data,
173 * we can't clear the tx sec block. Set the MAC loopback
174 * before block clear
176 if (!link) {
177 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
178 reg |= IXGBE_MACC_FLU;
179 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
181 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
182 reg |= IXGBE_HLREG0_LPBK;
183 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
185 IXGBE_WRITE_FLUSH(hw);
186 mdelay(3);
189 /* wait for the paths to empty */
190 limit = 20;
191 do {
192 mdelay(10);
193 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
194 IXGBE_SECTXSTAT_SECTX_RDY;
195 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
196 IXGBE_SECRXSTAT_SECRX_RDY;
197 } while (!(t_rdy && r_rdy) && limit--);
199 /* undo loopback if we played with it earlier */
200 if (!link) {
201 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
202 reg &= ~IXGBE_MACC_FLU;
203 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
205 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
206 reg &= ~IXGBE_HLREG0_LPBK;
207 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
209 IXGBE_WRITE_FLUSH(hw);
214 * ixgbe_ipsec_stop_engine
215 * @adapter: board private structure
217 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
219 struct ixgbe_hw *hw = &adapter->hw;
220 u32 reg;
222 ixgbe_ipsec_stop_data(adapter);
224 /* disable Rx and Tx SA lookup */
225 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
226 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
228 /* disable the Rx and Tx engines and full packet store-n-forward */
229 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
230 reg |= IXGBE_SECTXCTRL_SECTX_DIS;
231 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
232 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
234 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
235 reg |= IXGBE_SECRXCTRL_SECRX_DIS;
236 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
238 /* restore the "tx security buffer almost full threshold" to 0x250 */
239 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
241 /* Set minimum IFG between packets back to the default 0x1 */
242 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
243 reg = (reg & 0xfffffff0) | 0x1;
244 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
246 /* final set for normal (no ipsec offload) processing */
247 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
248 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
250 IXGBE_WRITE_FLUSH(hw);
254 * ixgbe_ipsec_start_engine
255 * @adapter: board private structure
257 * NOTE: this increases power consumption whether being used or not
259 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
261 struct ixgbe_hw *hw = &adapter->hw;
262 u32 reg;
264 ixgbe_ipsec_stop_data(adapter);
266 /* Set minimum IFG between packets to 3 */
267 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
268 reg = (reg & 0xfffffff0) | 0x3;
269 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
271 /* Set "tx security buffer almost full threshold" to 0x15 so that the
272 * almost full indication is generated only after buffer contains at
273 * least an entire jumbo packet.
275 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
276 reg = (reg & 0xfffffc00) | 0x15;
277 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
279 /* restart the data paths by clearing the DISABLE bits */
280 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
281 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
283 /* enable Rx and Tx SA lookup */
284 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
285 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
287 IXGBE_WRITE_FLUSH(hw);
291 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
292 * @adapter: board private structure
294 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
296 struct ixgbe_ipsec *ipsec = adapter->ipsec;
297 struct ixgbe_hw *hw = &adapter->hw;
298 int i;
300 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
301 return;
303 /* clean up and restart the engine */
304 ixgbe_ipsec_stop_engine(adapter);
305 ixgbe_ipsec_clear_hw_tables(adapter);
306 ixgbe_ipsec_start_engine(adapter);
308 /* reload the IP addrs */
309 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
310 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
312 if (ipsa->used)
313 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
316 /* reload the Rx and Tx keys */
317 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
318 struct rx_sa *rsa = &ipsec->rx_tbl[i];
319 struct tx_sa *tsa = &ipsec->tx_tbl[i];
321 if (rsa->used)
322 ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
323 rsa->key, rsa->salt,
324 rsa->mode, rsa->iptbl_ind);
326 if (tsa->used)
327 ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
332 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
333 * @ipsec: pointer to ipsec struct
334 * @rxtable: true if we need to look in the Rx table
336 * Returns the first unused index in either the Rx or Tx SA table
338 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
340 u32 i;
342 if (rxtable) {
343 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
344 return -ENOSPC;
346 /* search rx sa table */
347 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
348 if (!ipsec->rx_tbl[i].used)
349 return i;
351 } else {
352 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
353 return -ENOSPC;
355 /* search tx sa table */
356 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
357 if (!ipsec->tx_tbl[i].used)
358 return i;
362 return -ENOSPC;
366 * ixgbe_ipsec_find_rx_state - find the state that matches
367 * @ipsec: pointer to ipsec struct
368 * @daddr: inbound address to match
369 * @proto: protocol to match
370 * @spi: SPI to match
371 * @ip4: true if using an ipv4 address
373 * Returns a pointer to the matching SA state information
375 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
376 __be32 *daddr, u8 proto,
377 __be32 spi, bool ip4)
379 struct rx_sa *rsa;
380 struct xfrm_state *ret = NULL;
382 rcu_read_lock();
383 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
384 (__force u32)spi) {
385 if (spi == rsa->xs->id.spi &&
386 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
387 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
388 sizeof(rsa->xs->id.daddr.a6)))) &&
389 proto == rsa->xs->id.proto) {
390 ret = rsa->xs;
391 xfrm_state_hold(ret);
392 break;
395 rcu_read_unlock();
396 return ret;
400 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
401 * @xs: pointer to xfrm_state struct
402 * @mykey: pointer to key array to populate
403 * @mysalt: pointer to salt value to populate
405 * This copies the protocol keys and salt to our own data tables. The
406 * 82599 family only supports the one algorithm.
408 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
409 u32 *mykey, u32 *mysalt)
411 struct net_device *dev = xs->xso.dev;
412 unsigned char *key_data;
413 char *alg_name = NULL;
414 const char aes_gcm_name[] = "rfc4106(gcm(aes))";
415 int key_len;
417 if (!xs->aead) {
418 netdev_err(dev, "Unsupported IPsec algorithm\n");
419 return -EINVAL;
422 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
423 netdev_err(dev, "IPsec offload requires %d bit authentication\n",
424 IXGBE_IPSEC_AUTH_BITS);
425 return -EINVAL;
428 key_data = &xs->aead->alg_key[0];
429 key_len = xs->aead->alg_key_len;
430 alg_name = xs->aead->alg_name;
432 if (strcmp(alg_name, aes_gcm_name)) {
433 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
434 aes_gcm_name);
435 return -EINVAL;
438 /* The key bytes come down in a bigendian array of bytes, so
439 * we don't need to do any byteswapping.
440 * 160 accounts for 16 byte key and 4 byte salt
442 if (key_len == 160) {
443 *mysalt = ((u32 *)key_data)[4];
444 } else if (key_len != 128) {
445 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
446 return -EINVAL;
447 } else {
448 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
449 *mysalt = 0;
451 memcpy(mykey, key_data, 16);
453 return 0;
457 * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
458 * @xs: pointer to transformer state struct
460 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
462 struct net_device *dev = xs->xso.dev;
463 struct ixgbe_adapter *adapter = netdev_priv(dev);
464 struct ixgbe_hw *hw = &adapter->hw;
465 u32 mfval, manc, reg;
466 int num_filters = 4;
467 bool manc_ipv4;
468 u32 bmcipval;
469 int i, j;
471 #define MANC_EN_IPV4_FILTER BIT(24)
472 #define MFVAL_IPV4_FILTER_SHIFT 16
473 #define MFVAL_IPV6_FILTER_SHIFT 24
474 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
476 #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
477 #define IXGBE_BMCIPVAL 0x5060
478 #define BMCIP_V4 0x2
479 #define BMCIP_V6 0x3
480 #define BMCIP_MASK 0x3
482 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
483 manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
484 mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
485 bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
487 if (xs->props.family == AF_INET) {
488 /* are there any IPv4 filters to check? */
489 if (manc_ipv4) {
490 /* the 4 ipv4 filters are all in MIPAF(3, i) */
491 for (i = 0; i < num_filters; i++) {
492 if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
493 continue;
495 reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
496 if (reg == xs->id.daddr.a4)
497 return 1;
501 if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
502 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
503 if (reg == xs->id.daddr.a4)
504 return 1;
507 } else {
508 /* if there are ipv4 filters, they are in the last ipv6 slot */
509 if (manc_ipv4)
510 num_filters = 3;
512 for (i = 0; i < num_filters; i++) {
513 if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
514 continue;
516 for (j = 0; j < 4; j++) {
517 reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
518 if (reg != xs->id.daddr.a6[j])
519 break;
521 if (j == 4) /* did we match all 4 words? */
522 return 1;
525 if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
526 for (j = 0; j < 4; j++) {
527 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
528 if (reg != xs->id.daddr.a6[j])
529 break;
531 if (j == 4) /* did we match all 4 words? */
532 return 1;
536 return 0;
540 * ixgbe_ipsec_add_sa - program device with a security association
541 * @xs: pointer to transformer state struct
543 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
545 struct net_device *dev = xs->xso.dev;
546 struct ixgbe_adapter *adapter = netdev_priv(dev);
547 struct ixgbe_ipsec *ipsec = adapter->ipsec;
548 struct ixgbe_hw *hw = &adapter->hw;
549 int checked, match, first;
550 u16 sa_idx;
551 int ret;
552 int i;
554 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
555 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
556 xs->id.proto);
557 return -EINVAL;
560 if (ixgbe_ipsec_check_mgmt_ip(xs)) {
561 netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
562 return -EINVAL;
565 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
566 struct rx_sa rsa;
568 if (xs->calg) {
569 netdev_err(dev, "Compression offload not supported\n");
570 return -EINVAL;
573 /* find the first unused index */
574 ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
575 if (ret < 0) {
576 netdev_err(dev, "No space for SA in Rx table!\n");
577 return ret;
579 sa_idx = (u16)ret;
581 memset(&rsa, 0, sizeof(rsa));
582 rsa.used = true;
583 rsa.xs = xs;
585 if (rsa.xs->id.proto & IPPROTO_ESP)
586 rsa.decrypt = xs->ealg || xs->aead;
588 /* get the key and salt */
589 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
590 if (ret) {
591 netdev_err(dev, "Failed to get key data for Rx SA table\n");
592 return ret;
595 /* get ip for rx sa table */
596 if (xs->props.family == AF_INET6)
597 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
598 else
599 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
601 /* The HW does not have a 1:1 mapping from keys to IP addrs, so
602 * check for a matching IP addr entry in the table. If the addr
603 * already exists, use it; else find an unused slot and add the
604 * addr. If one does not exist and there are no unused table
605 * entries, fail the request.
608 /* Find an existing match or first not used, and stop looking
609 * after we've checked all we know we have.
611 checked = 0;
612 match = -1;
613 first = -1;
614 for (i = 0;
615 i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
616 (checked < ipsec->num_rx_sa || first < 0);
617 i++) {
618 if (ipsec->ip_tbl[i].used) {
619 if (!memcmp(ipsec->ip_tbl[i].ipaddr,
620 rsa.ipaddr, sizeof(rsa.ipaddr))) {
621 match = i;
622 break;
624 checked++;
625 } else if (first < 0) {
626 first = i; /* track the first empty seen */
630 if (ipsec->num_rx_sa == 0)
631 first = 0;
633 if (match >= 0) {
634 /* addrs are the same, we should use this one */
635 rsa.iptbl_ind = match;
636 ipsec->ip_tbl[match].ref_cnt++;
638 } else if (first >= 0) {
639 /* no matches, but here's an empty slot */
640 rsa.iptbl_ind = first;
642 memcpy(ipsec->ip_tbl[first].ipaddr,
643 rsa.ipaddr, sizeof(rsa.ipaddr));
644 ipsec->ip_tbl[first].ref_cnt = 1;
645 ipsec->ip_tbl[first].used = true;
647 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
649 } else {
650 /* no match and no empty slot */
651 netdev_err(dev, "No space for SA in Rx IP SA table\n");
652 memset(&rsa, 0, sizeof(rsa));
653 return -ENOSPC;
656 rsa.mode = IXGBE_RXMOD_VALID;
657 if (rsa.xs->id.proto & IPPROTO_ESP)
658 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
659 if (rsa.decrypt)
660 rsa.mode |= IXGBE_RXMOD_DECRYPT;
661 if (rsa.xs->props.family == AF_INET6)
662 rsa.mode |= IXGBE_RXMOD_IPV6;
664 /* the preparations worked, so save the info */
665 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
667 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
668 rsa.salt, rsa.mode, rsa.iptbl_ind);
669 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
671 ipsec->num_rx_sa++;
673 /* hash the new entry for faster search in Rx path */
674 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
675 (__force u32)rsa.xs->id.spi);
676 } else {
677 struct tx_sa tsa;
679 /* find the first unused index */
680 ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
681 if (ret < 0) {
682 netdev_err(dev, "No space for SA in Tx table\n");
683 return ret;
685 sa_idx = (u16)ret;
687 memset(&tsa, 0, sizeof(tsa));
688 tsa.used = true;
689 tsa.xs = xs;
691 if (xs->id.proto & IPPROTO_ESP)
692 tsa.encrypt = xs->ealg || xs->aead;
694 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
695 if (ret) {
696 netdev_err(dev, "Failed to get key data for Tx SA table\n");
697 memset(&tsa, 0, sizeof(tsa));
698 return ret;
701 /* the preparations worked, so save the info */
702 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
704 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
706 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
708 ipsec->num_tx_sa++;
711 /* enable the engine if not already warmed up */
712 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
713 ixgbe_ipsec_start_engine(adapter);
714 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
717 return 0;
721 * ixgbe_ipsec_del_sa - clear out this specific SA
722 * @xs: pointer to transformer state struct
724 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
726 struct net_device *dev = xs->xso.dev;
727 struct ixgbe_adapter *adapter = netdev_priv(dev);
728 struct ixgbe_ipsec *ipsec = adapter->ipsec;
729 struct ixgbe_hw *hw = &adapter->hw;
730 u32 zerobuf[4] = {0, 0, 0, 0};
731 u16 sa_idx;
733 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
734 struct rx_sa *rsa;
735 u8 ipi;
737 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
738 rsa = &ipsec->rx_tbl[sa_idx];
740 if (!rsa->used) {
741 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
742 sa_idx, xs->xso.offload_handle);
743 return;
746 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
747 hash_del_rcu(&rsa->hlist);
749 /* if the IP table entry is referenced by only this SA,
750 * i.e. ref_cnt is only 1, clear the IP table entry as well
752 ipi = rsa->iptbl_ind;
753 if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
754 ipsec->ip_tbl[ipi].ref_cnt--;
756 if (!ipsec->ip_tbl[ipi].ref_cnt) {
757 memset(&ipsec->ip_tbl[ipi], 0,
758 sizeof(struct rx_ip_sa));
759 ixgbe_ipsec_set_rx_ip(hw, ipi,
760 (__force __be32 *)zerobuf);
764 memset(rsa, 0, sizeof(struct rx_sa));
765 ipsec->num_rx_sa--;
766 } else {
767 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
769 if (!ipsec->tx_tbl[sa_idx].used) {
770 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
771 sa_idx, xs->xso.offload_handle);
772 return;
775 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
776 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
777 ipsec->num_tx_sa--;
780 /* if there are no SAs left, stop the engine to save energy */
781 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
782 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
783 ixgbe_ipsec_stop_engine(adapter);
788 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
789 * @skb: current data packet
790 * @xs: pointer to transformer state struct
792 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
794 if (xs->props.family == AF_INET) {
795 /* Offload with IPv4 options is not supported yet */
796 if (ip_hdr(skb)->ihl != 5)
797 return false;
798 } else {
799 /* Offload with IPv6 extension headers is not support yet */
800 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
801 return false;
804 return true;
807 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
808 .xdo_dev_state_add = ixgbe_ipsec_add_sa,
809 .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
810 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
814 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
815 * @tx_ring: outgoing context
816 * @first: current data packet
817 * @itd: ipsec Tx data for later use in building context descriptor
819 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
820 struct ixgbe_tx_buffer *first,
821 struct ixgbe_ipsec_tx_data *itd)
823 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
824 struct ixgbe_ipsec *ipsec = adapter->ipsec;
825 struct xfrm_state *xs;
826 struct tx_sa *tsa;
828 if (unlikely(!first->skb->sp->len)) {
829 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
830 __func__, first->skb->sp->len);
831 return 0;
834 xs = xfrm_input_state(first->skb);
835 if (unlikely(!xs)) {
836 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
837 __func__, xs);
838 return 0;
841 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
842 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
843 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
844 __func__, itd->sa_idx, xs->xso.offload_handle);
845 return 0;
848 tsa = &ipsec->tx_tbl[itd->sa_idx];
849 if (unlikely(!tsa->used)) {
850 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
851 __func__, itd->sa_idx);
852 return 0;
855 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
857 if (xs->id.proto == IPPROTO_ESP) {
859 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
860 IXGBE_ADVTXD_TUCMD_L4T_TCP;
861 if (first->protocol == htons(ETH_P_IP))
862 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
864 /* The actual trailer length is authlen (16 bytes) plus
865 * 2 bytes for the proto and the padlen values, plus
866 * padlen bytes of padding. This ends up not the same
867 * as the static value found in xs->props.trailer_len (21).
869 * ... but if we're doing GSO, don't bother as the stack
870 * doesn't add a trailer for those.
872 if (!skb_is_gso(first->skb)) {
873 /* The "correct" way to get the auth length would be
874 * to use
875 * authlen = crypto_aead_authsize(xs->data);
876 * but since we know we only have one size to worry
877 * about * we can let the compiler use the constant
878 * and save us a few CPU cycles.
880 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
881 struct sk_buff *skb = first->skb;
882 u8 padlen;
883 int ret;
885 ret = skb_copy_bits(skb, skb->len - (authlen + 2),
886 &padlen, 1);
887 if (unlikely(ret))
888 return 0;
889 itd->trailer_len = authlen + 2 + padlen;
892 if (tsa->encrypt)
893 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
895 return 1;
899 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
900 * @rx_ring: receiving ring
901 * @rx_desc: receive data descriptor
902 * @skb: current data packet
904 * Determine if there was an ipsec encapsulation noticed, and if so set up
905 * the resulting status for later in the receive stack.
907 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
908 union ixgbe_adv_rx_desc *rx_desc,
909 struct sk_buff *skb)
911 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
912 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
913 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
914 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
915 struct ixgbe_ipsec *ipsec = adapter->ipsec;
916 struct xfrm_offload *xo = NULL;
917 struct xfrm_state *xs = NULL;
918 struct ipv6hdr *ip6 = NULL;
919 struct iphdr *ip4 = NULL;
920 void *daddr;
921 __be32 spi;
922 u8 *c_hdr;
923 u8 proto;
925 /* Find the ip and crypto headers in the data.
926 * We can assume no vlan header in the way, b/c the
927 * hw won't recognize the IPsec packet and anyway the
928 * currently vlan device doesn't support xfrm offload.
930 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
931 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
932 daddr = &ip4->daddr;
933 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
934 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
935 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
936 daddr = &ip6->daddr;
937 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
938 } else {
939 return;
942 switch (pkt_info & ipsec_pkt_types) {
943 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
944 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
945 proto = IPPROTO_AH;
946 break;
947 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
948 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
949 proto = IPPROTO_ESP;
950 break;
951 default:
952 return;
955 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
956 if (unlikely(!xs))
957 return;
959 skb->sp = secpath_dup(skb->sp);
960 if (unlikely(!skb->sp))
961 return;
963 skb->sp->xvec[skb->sp->len++] = xs;
964 skb->sp->olen++;
965 xo = xfrm_offload(skb);
966 xo->flags = CRYPTO_DONE;
967 xo->status = CRYPTO_SUCCESS;
969 adapter->rx_ipsec++;
973 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
974 * @adapter: board private structure
976 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
978 struct ixgbe_hw *hw = &adapter->hw;
979 struct ixgbe_ipsec *ipsec;
980 u32 t_dis, r_dis;
981 size_t size;
983 if (hw->mac.type == ixgbe_mac_82598EB)
984 return;
986 /* If there is no support for either Tx or Rx offload
987 * we should not be advertising support for IPsec.
989 t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
990 IXGBE_SECTXSTAT_SECTX_OFF_DIS;
991 r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
992 IXGBE_SECRXSTAT_SECRX_OFF_DIS;
993 if (t_dis || r_dis)
994 return;
996 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
997 if (!ipsec)
998 goto err1;
999 hash_init(ipsec->rx_sa_list);
1001 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1002 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
1003 if (!ipsec->rx_tbl)
1004 goto err2;
1006 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1007 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
1008 if (!ipsec->tx_tbl)
1009 goto err2;
1011 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
1012 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
1013 if (!ipsec->ip_tbl)
1014 goto err2;
1016 ipsec->num_rx_sa = 0;
1017 ipsec->num_tx_sa = 0;
1019 adapter->ipsec = ipsec;
1020 ixgbe_ipsec_stop_engine(adapter);
1021 ixgbe_ipsec_clear_hw_tables(adapter);
1023 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
1025 return;
1027 err2:
1028 kfree(ipsec->ip_tbl);
1029 kfree(ipsec->rx_tbl);
1030 kfree(ipsec->tx_tbl);
1031 kfree(ipsec);
1032 err1:
1033 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
1037 * ixgbe_stop_ipsec_offload - tear down the ipsec offload
1038 * @adapter: board private structure
1040 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
1042 struct ixgbe_ipsec *ipsec = adapter->ipsec;
1044 adapter->ipsec = NULL;
1045 if (ipsec) {
1046 kfree(ipsec->ip_tbl);
1047 kfree(ipsec->rx_tbl);
1048 kfree(ipsec->tx_tbl);
1049 kfree(ipsec);