2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
38 #define pr_fmt(fmt) "chcr:" fmt
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/cryptohash.h>
44 #include <linux/skbuff.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/highmem.h>
47 #include <linux/if_vlan.h>
49 #include <linux/netdevice.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/sha.h>
56 #include <crypto/authenc.h>
57 #include <crypto/internal/aead.h>
58 #include <crypto/null.h>
59 #include <crypto/internal/skcipher.h>
60 #include <crypto/aead.h>
61 #include <crypto/scatterwalk.h>
62 #include <crypto/internal/hash.h>
64 #include "chcr_core.h"
65 #include "chcr_algo.h"
66 #include "chcr_crypto.h"
69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
72 #define MAX_IMM_TX_PKT_LEN 256
73 #define GCM_ESP_IV_SIZE 8
75 static int chcr_xfrm_add_state(struct xfrm_state
*x
);
76 static void chcr_xfrm_del_state(struct xfrm_state
*x
);
77 static void chcr_xfrm_free_state(struct xfrm_state
*x
);
78 static bool chcr_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
);
79 static void chcr_advance_esn_state(struct xfrm_state
*x
);
81 static const struct xfrmdev_ops chcr_xfrmdev_ops
= {
82 .xdo_dev_state_add
= chcr_xfrm_add_state
,
83 .xdo_dev_state_delete
= chcr_xfrm_del_state
,
84 .xdo_dev_state_free
= chcr_xfrm_free_state
,
85 .xdo_dev_offload_ok
= chcr_ipsec_offload_ok
,
86 .xdo_dev_state_advance_esn
= chcr_advance_esn_state
,
89 /* Add offload xfrms to Chelsio Interface */
90 void chcr_add_xfrmops(const struct cxgb4_lld_info
*lld
)
92 struct net_device
*netdev
= NULL
;
95 for (i
= 0; i
< lld
->nports
; i
++) {
96 netdev
= lld
->ports
[i
];
99 netdev
->xfrmdev_ops
= &chcr_xfrmdev_ops
;
100 netdev
->hw_enc_features
|= NETIF_F_HW_ESP
;
101 netdev
->features
|= NETIF_F_HW_ESP
;
103 netdev_change_features(netdev
);
108 static inline int chcr_ipsec_setauthsize(struct xfrm_state
*x
,
109 struct ipsec_sa_entry
*sa_entry
)
112 int authsize
= x
->aead
->alg_icv_len
/ 8;
114 sa_entry
->authsize
= authsize
;
118 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
121 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
124 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
132 static inline int chcr_ipsec_setkey(struct xfrm_state
*x
,
133 struct ipsec_sa_entry
*sa_entry
)
135 int keylen
= (x
->aead
->alg_key_len
+ 7) / 8;
136 unsigned char *key
= x
->aead
->alg_key
;
137 int ck_size
, key_ctx_size
= 0;
138 unsigned char ghash_h
[AEAD_H_SIZE
];
139 struct crypto_aes_ctx aes
;
143 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
144 memcpy(sa_entry
->salt
, key
+ keylen
, 4);
147 if (keylen
== AES_KEYSIZE_128
) {
148 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
149 } else if (keylen
== AES_KEYSIZE_192
) {
150 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
151 } else if (keylen
== AES_KEYSIZE_256
) {
152 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
154 pr_err("GCM: Invalid key length %d\n", keylen
);
159 memcpy(sa_entry
->key
, key
, keylen
);
160 sa_entry
->enckey_len
= keylen
;
161 key_ctx_size
= sizeof(struct _key_ctx
) +
162 ((DIV_ROUND_UP(keylen
, 16)) << 4) +
165 sa_entry
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
166 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
170 /* Calculate the H = CIPH(K, 0 repeated 16 times).
171 * It will go in key context
173 ret
= aes_expandkey(&aes
, key
, keylen
);
175 sa_entry
->enckey_len
= 0;
178 memset(ghash_h
, 0, AEAD_H_SIZE
);
179 aes_encrypt(&aes
, ghash_h
, ghash_h
);
180 memzero_explicit(&aes
, sizeof(aes
));
182 memcpy(sa_entry
->key
+ (DIV_ROUND_UP(sa_entry
->enckey_len
, 16) *
183 16), ghash_h
, AEAD_H_SIZE
);
184 sa_entry
->kctx_len
= ((DIV_ROUND_UP(sa_entry
->enckey_len
, 16)) << 4) +
191 * chcr_xfrm_add_state
192 * returns 0 on success, negative error if failed to send message to FPGA
193 * positive error if FPGA returned a bad response
195 static int chcr_xfrm_add_state(struct xfrm_state
*x
)
197 struct ipsec_sa_entry
*sa_entry
;
200 if (x
->props
.aalgo
!= SADB_AALG_NONE
) {
201 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
204 if (x
->props
.calgo
!= SADB_X_CALG_NONE
) {
205 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
208 if (x
->props
.family
!= AF_INET
&&
209 x
->props
.family
!= AF_INET6
) {
210 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
213 if (x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
214 x
->props
.mode
!= XFRM_MODE_TUNNEL
) {
215 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
218 if (x
->id
.proto
!= IPPROTO_ESP
) {
219 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
223 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
227 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
230 if (x
->aead
->alg_icv_len
!= 128 &&
231 x
->aead
->alg_icv_len
!= 96) {
232 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
235 if ((x
->aead
->alg_key_len
!= 128 + 32) &&
236 (x
->aead
->alg_key_len
!= 256 + 32)) {
237 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
241 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
245 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
248 if (strcmp(x
->geniv
, "seqiv")) {
249 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
253 sa_entry
= kzalloc(sizeof(*sa_entry
), GFP_KERNEL
);
259 sa_entry
->hmac_ctrl
= chcr_ipsec_setauthsize(x
, sa_entry
);
260 if (x
->props
.flags
& XFRM_STATE_ESN
)
262 chcr_ipsec_setkey(x
, sa_entry
);
263 x
->xso
.offload_handle
= (unsigned long)sa_entry
;
264 try_module_get(THIS_MODULE
);
269 static void chcr_xfrm_del_state(struct xfrm_state
*x
)
272 if (!x
->xso
.offload_handle
)
276 static void chcr_xfrm_free_state(struct xfrm_state
*x
)
278 struct ipsec_sa_entry
*sa_entry
;
280 if (!x
->xso
.offload_handle
)
283 sa_entry
= (struct ipsec_sa_entry
*)x
->xso
.offload_handle
;
285 module_put(THIS_MODULE
);
288 static bool chcr_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
)
290 if (x
->props
.family
== AF_INET
) {
291 /* Offload with IP options is not supported yet */
292 if (ip_hdr(skb
)->ihl
> 5)
295 /* Offload with IPv6 extension headers is not support yet */
296 if (ipv6_ext_hdr(ipv6_hdr(skb
)->nexthdr
))
299 /* Inline single pdu */
300 if (skb_shinfo(skb
)->gso_size
)
305 static void chcr_advance_esn_state(struct xfrm_state
*x
)
308 if (!x
->xso
.offload_handle
)
312 static inline int is_eth_imm(const struct sk_buff
*skb
,
313 struct ipsec_sa_entry
*sa_entry
)
315 unsigned int kctx_len
;
318 kctx_len
= sa_entry
->kctx_len
;
319 hdrlen
= sizeof(struct fw_ulptx_wr
) +
320 sizeof(struct chcr_ipsec_req
) + kctx_len
;
322 hdrlen
+= sizeof(struct cpl_tx_pkt
);
324 hdrlen
+= (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
), 16)
326 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
331 static inline unsigned int calc_tx_sec_flits(const struct sk_buff
*skb
,
332 struct ipsec_sa_entry
*sa_entry
,
335 unsigned int kctx_len
;
340 kctx_len
= sa_entry
->kctx_len
;
341 hdrlen
= is_eth_imm(skb
, sa_entry
);
342 aadivlen
= sa_entry
->esn
? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
),
346 /* If the skb is small enough, we can pump it out as a work request
347 * with only immediate data. In that case we just have to have the
348 * TX Packet header plus the skb data in the Work Request.
353 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
356 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
358 /* Otherwise, we're going to have to construct a Scatter gather list
359 * of the skb body and fragments. We also include the flits necessary
360 * for the TX Packet Work Request and CPL. We always have a firmware
361 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
362 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
363 * message or, if we're doing a Large Send Offload, an LSO CPL message
364 * with an embedded TX Packet Write CPL message.
366 flits
+= (sizeof(struct fw_ulptx_wr
) +
367 sizeof(struct chcr_ipsec_req
) +
369 sizeof(struct cpl_tx_pkt_core
) +
370 aadivlen
) / sizeof(__be64
);
374 inline void *copy_esn_pktxt(struct sk_buff
*skb
,
375 struct net_device
*dev
,
377 struct ipsec_sa_entry
*sa_entry
)
379 struct chcr_ipsec_aadiv
*aadiv
;
380 struct ulptx_idata
*sc_imm
;
381 struct ip_esp_hdr
*esphdr
;
382 struct xfrm_offload
*xo
;
383 struct sge_eth_txq
*q
;
384 struct adapter
*adap
;
385 struct port_info
*pi
;
393 pi
= netdev_priv(dev
);
395 qidx
= skb
->queue_mapping
;
396 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
398 /* end of queue, reset pos to start of queue */
399 eoq
= (void *)q
->q
.stat
- pos
;
403 len
= DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
), 16) << 4;
405 aadiv
= (struct chcr_ipsec_aadiv
*)pos
;
406 esphdr
= (struct ip_esp_hdr
*)skb_transport_header(skb
);
407 iv
= skb_transport_header(skb
) + sizeof(struct ip_esp_hdr
);
408 xo
= xfrm_offload(skb
);
410 aadiv
->spi
= (esphdr
->spi
);
411 seqlo
= htonl(esphdr
->seq_no
);
412 seqno
= cpu_to_be64(seqlo
+ ((u64
)xo
->seq
.hi
<< 32));
413 memcpy(aadiv
->seq_no
, &seqno
, 8);
414 iv
= skb_transport_header(skb
) + sizeof(struct ip_esp_hdr
);
415 memcpy(aadiv
->iv
, iv
, 8);
417 if (is_eth_imm(skb
, sa_entry
) && !skb_is_nonlinear(skb
)) {
418 sc_imm
= (struct ulptx_idata
*)(pos
+
419 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
),
420 sizeof(__be64
)) << 3));
421 sc_imm
->cmd_more
= FILL_CMD_MORE(0);
422 sc_imm
->len
= cpu_to_be32(skb
->len
);
428 inline void *copy_cpltx_pktxt(struct sk_buff
*skb
,
429 struct net_device
*dev
,
431 struct ipsec_sa_entry
*sa_entry
)
433 struct cpl_tx_pkt_core
*cpl
;
434 struct sge_eth_txq
*q
;
435 struct adapter
*adap
;
436 struct port_info
*pi
;
441 pi
= netdev_priv(dev
);
443 qidx
= skb
->queue_mapping
;
444 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
446 left
= (void *)q
->q
.stat
- pos
;
450 cpl
= (struct cpl_tx_pkt_core
*)pos
;
452 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
453 ctrl0
= TXPKT_OPCODE_V(CPL_TX_PKT_XT
) | TXPKT_INTF_V(pi
->tx_chan
) |
454 TXPKT_PF_V(adap
->pf
);
455 if (skb_vlan_tag_present(skb
)) {
457 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
460 cpl
->ctrl0
= htonl(ctrl0
);
461 cpl
->pack
= htons(0);
462 cpl
->len
= htons(skb
->len
);
463 cpl
->ctrl1
= cpu_to_be64(cntrl
);
465 pos
+= sizeof(struct cpl_tx_pkt_core
);
466 /* Copy ESN info for HW */
468 pos
= copy_esn_pktxt(skb
, dev
, pos
, sa_entry
);
472 inline void *copy_key_cpltx_pktxt(struct sk_buff
*skb
,
473 struct net_device
*dev
,
475 struct ipsec_sa_entry
*sa_entry
)
477 struct _key_ctx
*key_ctx
;
478 int left
, eoq
, key_len
;
479 struct sge_eth_txq
*q
;
480 struct adapter
*adap
;
481 struct port_info
*pi
;
484 pi
= netdev_priv(dev
);
486 qidx
= skb
->queue_mapping
;
487 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
488 key_len
= sa_entry
->kctx_len
;
490 /* end of queue, reset pos to start of queue */
491 eoq
= (void *)q
->q
.stat
- pos
;
495 left
= 64 * q
->q
.size
;
498 /* Copy the Key context header */
499 key_ctx
= (struct _key_ctx
*)pos
;
500 key_ctx
->ctx_hdr
= sa_entry
->key_ctx_hdr
;
501 memcpy(key_ctx
->salt
, sa_entry
->salt
, MAX_SALT
);
502 pos
+= sizeof(struct _key_ctx
);
503 left
-= sizeof(struct _key_ctx
);
505 if (likely(key_len
<= left
)) {
506 memcpy(key_ctx
->key
, sa_entry
->key
, key_len
);
509 memcpy(pos
, sa_entry
->key
, left
);
510 memcpy(q
->q
.desc
, sa_entry
->key
+ left
,
512 pos
= (u8
*)q
->q
.desc
+ (key_len
- left
);
514 /* Copy CPL TX PKT XT */
515 pos
= copy_cpltx_pktxt(skb
, dev
, pos
, sa_entry
);
520 inline void *chcr_crypto_wreq(struct sk_buff
*skb
,
521 struct net_device
*dev
,
524 struct ipsec_sa_entry
*sa_entry
)
526 struct port_info
*pi
= netdev_priv(dev
);
527 struct adapter
*adap
= pi
->adapter
;
528 unsigned int ivsize
= GCM_ESP_IV_SIZE
;
529 struct chcr_ipsec_wr
*wr
;
530 bool immediate
= false;
542 int qidx
= skb_get_queue_mapping(skb
);
543 struct sge_eth_txq
*q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
544 unsigned int kctx_len
= sa_entry
->kctx_len
;
545 int qid
= q
->q
.cntxt_id
;
547 atomic_inc(&adap
->chcr_stats
.ipsec_cnt
);
549 flits
= calc_tx_sec_flits(skb
, sa_entry
, &immediate
);
550 ndesc
= DIV_ROUND_UP(flits
, 2);
555 immdatalen
= skb
->len
;
558 esnlen
= sizeof(struct chcr_ipsec_aadiv
);
559 if (!skb_is_nonlinear(skb
))
564 wr
= (struct chcr_ipsec_wr
*)pos
;
565 wr
->wreq
.op_to_compl
= htonl(FW_WR_OP_V(FW_ULPTX_WR
));
566 wr_mid
= FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc
);
568 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
569 netif_tx_stop_queue(q
->txq
);
572 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
574 wr_mid
|= FW_ULPTX_WR_DATA_F
;
575 wr
->wreq
.flowid_len16
= htonl(wr_mid
);
578 wr
->req
.ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(pi
->port_id
, qid
);
579 wr
->req
.ulptx
.len
= htonl(ndesc
- 1);
582 wr
->req
.sc_imm
.cmd_more
= FILL_CMD_MORE(!immdatalen
|| sc_more
);
583 wr
->req
.sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
584 sizeof(wr
->req
.key_ctx
) +
586 sizeof(struct cpl_tx_pkt_core
) +
588 (esnlen
? 0 : immdatalen
));
591 ivinoffset
= sa_entry
->esn
? (ESN_IV_INSERT_OFFSET
+ 1) :
592 (skb_transport_offset(skb
) +
593 sizeof(struct ip_esp_hdr
) + 1);
594 wr
->req
.sec_cpl
.op_ivinsrtofst
= htonl(
595 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU
) |
596 CPL_TX_SEC_PDU_CPLLEN_V(2) |
597 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
598 CPL_TX_SEC_PDU_IVINSRTOFST_V(
601 wr
->req
.sec_cpl
.pldlen
= htonl(skb
->len
+ esnlen
);
602 aadstart
= sa_entry
->esn
? 1 : (skb_transport_offset(skb
) + 1);
603 aadstop
= sa_entry
->esn
? ESN_IV_INSERT_OFFSET
:
604 (skb_transport_offset(skb
) +
605 sizeof(struct ip_esp_hdr
));
606 ciphstart
= skb_transport_offset(skb
) + sizeof(struct ip_esp_hdr
) +
608 ciphstart
+= sa_entry
->esn
? esnlen
: 0;
610 wr
->req
.sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
615 wr
->req
.sec_cpl
.cipherstop_lo_authinsert
=
616 FILL_SEC_CPL_AUTHINSERT(0, ciphstart
,
619 wr
->req
.sec_cpl
.seqno_numivs
=
620 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP
, 1,
621 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
622 CHCR_SCMD_AUTH_MODE_GHASH
,
625 wr
->req
.sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
628 pos
+= sizeof(struct fw_ulptx_wr
) +
629 sizeof(struct ulp_txpkt
) +
630 sizeof(struct ulptx_idata
) +
631 sizeof(struct cpl_tx_sec_pdu
);
633 pos
= copy_key_cpltx_pktxt(skb
, dev
, pos
, sa_entry
);
639 * flits_to_desc - returns the num of Tx descriptors for the given flits
640 * @n: the number of flits
642 * Returns the number of Tx descriptors needed for the supplied number
645 static inline unsigned int flits_to_desc(unsigned int n
)
647 WARN_ON(n
> SGE_MAX_WR_LEN
/ 8);
648 return DIV_ROUND_UP(n
, 8);
651 static inline unsigned int txq_avail(const struct sge_txq
*q
)
653 return q
->size
- 1 - q
->in_use
;
656 static void eth_txq_stop(struct sge_eth_txq
*q
)
658 netif_tx_stop_queue(q
->txq
);
662 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
666 if (q
->pidx
>= q
->size
)
671 * chcr_ipsec_xmit called from ULD Tx handler
673 int chcr_ipsec_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
675 struct xfrm_state
*x
= xfrm_input_state(skb
);
676 unsigned int last_desc
, ndesc
, flits
= 0;
677 struct ipsec_sa_entry
*sa_entry
;
678 u64
*pos
, *end
, *before
, *sgl
;
679 struct tx_sw_desc
*sgl_sdesc
;
680 int qidx
, left
, credits
;
681 bool immediate
= false;
682 struct sge_eth_txq
*q
;
683 struct adapter
*adap
;
684 struct port_info
*pi
;
687 if (!x
->xso
.offload_handle
)
688 return NETDEV_TX_BUSY
;
690 sa_entry
= (struct ipsec_sa_entry
*)x
->xso
.offload_handle
;
692 sp
= skb_sec_path(skb
);
694 out_free
: dev_kfree_skb_any(skb
);
698 pi
= netdev_priv(dev
);
700 qidx
= skb
->queue_mapping
;
701 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
703 cxgb4_reclaim_completed_tx(adap
, &q
->q
, true);
705 flits
= calc_tx_sec_flits(skb
, sa_entry
, &immediate
);
706 ndesc
= flits_to_desc(flits
);
707 credits
= txq_avail(&q
->q
) - ndesc
;
709 if (unlikely(credits
< 0)) {
711 dev_err(adap
->pdev_dev
,
712 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
713 dev
->name
, qidx
, credits
, ndesc
, txq_avail(&q
->q
),
715 return NETDEV_TX_BUSY
;
718 last_desc
= q
->q
.pidx
+ ndesc
- 1;
719 if (last_desc
>= q
->q
.size
)
720 last_desc
-= q
->q
.size
;
721 sgl_sdesc
= &q
->q
.sdesc
[last_desc
];
724 unlikely(cxgb4_map_skb(adap
->pdev_dev
, skb
, sgl_sdesc
->addr
) < 0)) {
725 memset(sgl_sdesc
->addr
, 0, sizeof(sgl_sdesc
->addr
));
730 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
732 end
= (u64
*)pos
+ flits
;
733 /* Setup IPSec CPL */
734 pos
= (void *)chcr_crypto_wreq(skb
, dev
, (void *)pos
,
736 if (before
> (u64
*)pos
) {
737 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
738 end
= (void *)q
->q
.desc
+ left
;
740 if (pos
== (u64
*)q
->q
.stat
) {
741 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
742 end
= (void *)q
->q
.desc
+ left
;
743 pos
= (void *)q
->q
.desc
;
748 cxgb4_inline_tx_skb(skb
, &q
->q
, sgl
);
749 dev_consume_skb_any(skb
);
751 cxgb4_write_sgl(skb
, &q
->q
, (void *)sgl
, end
,
754 sgl_sdesc
->skb
= skb
;
756 txq_advance(&q
->q
, ndesc
);
758 cxgb4_ring_tx_db(adap
, &q
->q
, ndesc
);