2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
38 #define pr_fmt(fmt) "chcr:" fmt
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/cryptohash.h>
44 #include <linux/skbuff.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/highmem.h>
47 #include <linux/if_vlan.h>
49 #include <linux/netdevice.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/sha.h>
56 #include <crypto/authenc.h>
57 #include <crypto/internal/aead.h>
58 #include <crypto/null.h>
59 #include <crypto/internal/skcipher.h>
60 #include <crypto/aead.h>
61 #include <crypto/scatterwalk.h>
62 #include <crypto/internal/hash.h>
64 #include "chcr_core.h"
65 #include "chcr_algo.h"
66 #include "chcr_crypto.h"
69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
72 #define MAX_IMM_TX_PKT_LEN 256
73 #define GCM_ESP_IV_SIZE 8
75 static int chcr_xfrm_add_state(struct xfrm_state
*x
);
76 static void chcr_xfrm_del_state(struct xfrm_state
*x
);
77 static void chcr_xfrm_free_state(struct xfrm_state
*x
);
78 static bool chcr_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
);
79 static void chcr_advance_esn_state(struct xfrm_state
*x
);
81 static const struct xfrmdev_ops chcr_xfrmdev_ops
= {
82 .xdo_dev_state_add
= chcr_xfrm_add_state
,
83 .xdo_dev_state_delete
= chcr_xfrm_del_state
,
84 .xdo_dev_state_free
= chcr_xfrm_free_state
,
85 .xdo_dev_offload_ok
= chcr_ipsec_offload_ok
,
86 .xdo_dev_state_advance_esn
= chcr_advance_esn_state
,
89 /* Add offload xfrms to Chelsio Interface */
90 void chcr_add_xfrmops(const struct cxgb4_lld_info
*lld
)
92 struct net_device
*netdev
= NULL
;
95 for (i
= 0; i
< lld
->nports
; i
++) {
96 netdev
= lld
->ports
[i
];
99 netdev
->xfrmdev_ops
= &chcr_xfrmdev_ops
;
100 netdev
->hw_enc_features
|= NETIF_F_HW_ESP
;
101 netdev
->features
|= NETIF_F_HW_ESP
;
102 netdev_change_features(netdev
);
106 static inline int chcr_ipsec_setauthsize(struct xfrm_state
*x
,
107 struct ipsec_sa_entry
*sa_entry
)
110 int authsize
= x
->aead
->alg_icv_len
/ 8;
112 sa_entry
->authsize
= authsize
;
116 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
119 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
122 hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
130 static inline int chcr_ipsec_setkey(struct xfrm_state
*x
,
131 struct ipsec_sa_entry
*sa_entry
)
133 int keylen
= (x
->aead
->alg_key_len
+ 7) / 8;
134 unsigned char *key
= x
->aead
->alg_key
;
135 int ck_size
, key_ctx_size
= 0;
136 unsigned char ghash_h
[AEAD_H_SIZE
];
137 struct crypto_aes_ctx aes
;
141 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
142 memcpy(sa_entry
->salt
, key
+ keylen
, 4);
145 if (keylen
== AES_KEYSIZE_128
) {
146 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
147 } else if (keylen
== AES_KEYSIZE_192
) {
148 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
149 } else if (keylen
== AES_KEYSIZE_256
) {
150 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
152 pr_err("GCM: Invalid key length %d\n", keylen
);
157 memcpy(sa_entry
->key
, key
, keylen
);
158 sa_entry
->enckey_len
= keylen
;
159 key_ctx_size
= sizeof(struct _key_ctx
) +
160 ((DIV_ROUND_UP(keylen
, 16)) << 4) +
163 sa_entry
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
164 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
168 /* Calculate the H = CIPH(K, 0 repeated 16 times).
169 * It will go in key context
171 ret
= aes_expandkey(&aes
, key
, keylen
);
173 sa_entry
->enckey_len
= 0;
176 memset(ghash_h
, 0, AEAD_H_SIZE
);
177 aes_encrypt(&aes
, ghash_h
, ghash_h
);
178 memzero_explicit(&aes
, sizeof(aes
));
180 memcpy(sa_entry
->key
+ (DIV_ROUND_UP(sa_entry
->enckey_len
, 16) *
181 16), ghash_h
, AEAD_H_SIZE
);
182 sa_entry
->kctx_len
= ((DIV_ROUND_UP(sa_entry
->enckey_len
, 16)) << 4) +
189 * chcr_xfrm_add_state
190 * returns 0 on success, negative error if failed to send message to FPGA
191 * positive error if FPGA returned a bad response
193 static int chcr_xfrm_add_state(struct xfrm_state
*x
)
195 struct ipsec_sa_entry
*sa_entry
;
198 if (x
->props
.aalgo
!= SADB_AALG_NONE
) {
199 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
202 if (x
->props
.calgo
!= SADB_X_CALG_NONE
) {
203 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
206 if (x
->props
.family
!= AF_INET
&&
207 x
->props
.family
!= AF_INET6
) {
208 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
211 if (x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
212 x
->props
.mode
!= XFRM_MODE_TUNNEL
) {
213 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
216 if (x
->id
.proto
!= IPPROTO_ESP
) {
217 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
221 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
225 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
228 if (x
->aead
->alg_icv_len
!= 128 &&
229 x
->aead
->alg_icv_len
!= 96) {
230 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
233 if ((x
->aead
->alg_key_len
!= 128 + 32) &&
234 (x
->aead
->alg_key_len
!= 256 + 32)) {
235 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
239 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
243 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
246 if (strcmp(x
->geniv
, "seqiv")) {
247 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
251 sa_entry
= kzalloc(sizeof(*sa_entry
), GFP_KERNEL
);
257 sa_entry
->hmac_ctrl
= chcr_ipsec_setauthsize(x
, sa_entry
);
258 if (x
->props
.flags
& XFRM_STATE_ESN
)
260 chcr_ipsec_setkey(x
, sa_entry
);
261 x
->xso
.offload_handle
= (unsigned long)sa_entry
;
262 try_module_get(THIS_MODULE
);
267 static void chcr_xfrm_del_state(struct xfrm_state
*x
)
270 if (!x
->xso
.offload_handle
)
274 static void chcr_xfrm_free_state(struct xfrm_state
*x
)
276 struct ipsec_sa_entry
*sa_entry
;
278 if (!x
->xso
.offload_handle
)
281 sa_entry
= (struct ipsec_sa_entry
*)x
->xso
.offload_handle
;
283 module_put(THIS_MODULE
);
286 static bool chcr_ipsec_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
)
288 if (x
->props
.family
== AF_INET
) {
289 /* Offload with IP options is not supported yet */
290 if (ip_hdr(skb
)->ihl
> 5)
293 /* Offload with IPv6 extension headers is not support yet */
294 if (ipv6_ext_hdr(ipv6_hdr(skb
)->nexthdr
))
297 /* Inline single pdu */
298 if (skb_shinfo(skb
)->gso_size
)
303 static void chcr_advance_esn_state(struct xfrm_state
*x
)
306 if (!x
->xso
.offload_handle
)
310 static inline int is_eth_imm(const struct sk_buff
*skb
,
311 struct ipsec_sa_entry
*sa_entry
)
313 unsigned int kctx_len
;
316 kctx_len
= sa_entry
->kctx_len
;
317 hdrlen
= sizeof(struct fw_ulptx_wr
) +
318 sizeof(struct chcr_ipsec_req
) + kctx_len
;
320 hdrlen
+= sizeof(struct cpl_tx_pkt
);
322 hdrlen
+= (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
), 16)
324 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
329 static inline unsigned int calc_tx_sec_flits(const struct sk_buff
*skb
,
330 struct ipsec_sa_entry
*sa_entry
,
333 unsigned int kctx_len
;
338 kctx_len
= sa_entry
->kctx_len
;
339 hdrlen
= is_eth_imm(skb
, sa_entry
);
340 aadivlen
= sa_entry
->esn
? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
),
344 /* If the skb is small enough, we can pump it out as a work request
345 * with only immediate data. In that case we just have to have the
346 * TX Packet header plus the skb data in the Work Request.
351 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
354 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
356 /* Otherwise, we're going to have to construct a Scatter gather list
357 * of the skb body and fragments. We also include the flits necessary
358 * for the TX Packet Work Request and CPL. We always have a firmware
359 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
360 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
361 * message or, if we're doing a Large Send Offload, an LSO CPL message
362 * with an embedded TX Packet Write CPL message.
364 flits
+= (sizeof(struct fw_ulptx_wr
) +
365 sizeof(struct chcr_ipsec_req
) +
367 sizeof(struct cpl_tx_pkt_core
) +
368 aadivlen
) / sizeof(__be64
);
372 inline void *copy_esn_pktxt(struct sk_buff
*skb
,
373 struct net_device
*dev
,
375 struct ipsec_sa_entry
*sa_entry
)
377 struct chcr_ipsec_aadiv
*aadiv
;
378 struct ulptx_idata
*sc_imm
;
379 struct ip_esp_hdr
*esphdr
;
380 struct xfrm_offload
*xo
;
381 struct sge_eth_txq
*q
;
382 struct adapter
*adap
;
383 struct port_info
*pi
;
391 pi
= netdev_priv(dev
);
393 qidx
= skb
->queue_mapping
;
394 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
396 /* end of queue, reset pos to start of queue */
397 eoq
= (void *)q
->q
.stat
- pos
;
401 len
= DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
), 16) << 4;
403 aadiv
= (struct chcr_ipsec_aadiv
*)pos
;
404 esphdr
= (struct ip_esp_hdr
*)skb_transport_header(skb
);
405 iv
= skb_transport_header(skb
) + sizeof(struct ip_esp_hdr
);
406 xo
= xfrm_offload(skb
);
408 aadiv
->spi
= (esphdr
->spi
);
409 seqlo
= htonl(esphdr
->seq_no
);
410 seqno
= cpu_to_be64(seqlo
+ ((u64
)xo
->seq
.hi
<< 32));
411 memcpy(aadiv
->seq_no
, &seqno
, 8);
412 iv
= skb_transport_header(skb
) + sizeof(struct ip_esp_hdr
);
413 memcpy(aadiv
->iv
, iv
, 8);
415 if (is_eth_imm(skb
, sa_entry
) && !skb_is_nonlinear(skb
)) {
416 sc_imm
= (struct ulptx_idata
*)(pos
+
417 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv
),
418 sizeof(__be64
)) << 3));
419 sc_imm
->cmd_more
= FILL_CMD_MORE(0);
420 sc_imm
->len
= cpu_to_be32(skb
->len
);
426 inline void *copy_cpltx_pktxt(struct sk_buff
*skb
,
427 struct net_device
*dev
,
429 struct ipsec_sa_entry
*sa_entry
)
431 struct cpl_tx_pkt_core
*cpl
;
432 struct sge_eth_txq
*q
;
433 struct adapter
*adap
;
434 struct port_info
*pi
;
439 pi
= netdev_priv(dev
);
441 qidx
= skb
->queue_mapping
;
442 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
444 left
= (void *)q
->q
.stat
- pos
;
448 cpl
= (struct cpl_tx_pkt_core
*)pos
;
450 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
451 ctrl0
= TXPKT_OPCODE_V(CPL_TX_PKT_XT
) | TXPKT_INTF_V(pi
->tx_chan
) |
452 TXPKT_PF_V(adap
->pf
);
453 if (skb_vlan_tag_present(skb
)) {
455 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
458 cpl
->ctrl0
= htonl(ctrl0
);
459 cpl
->pack
= htons(0);
460 cpl
->len
= htons(skb
->len
);
461 cpl
->ctrl1
= cpu_to_be64(cntrl
);
463 pos
+= sizeof(struct cpl_tx_pkt_core
);
464 /* Copy ESN info for HW */
466 pos
= copy_esn_pktxt(skb
, dev
, pos
, sa_entry
);
470 inline void *copy_key_cpltx_pktxt(struct sk_buff
*skb
,
471 struct net_device
*dev
,
473 struct ipsec_sa_entry
*sa_entry
)
475 struct _key_ctx
*key_ctx
;
476 int left
, eoq
, key_len
;
477 struct sge_eth_txq
*q
;
478 struct adapter
*adap
;
479 struct port_info
*pi
;
482 pi
= netdev_priv(dev
);
484 qidx
= skb
->queue_mapping
;
485 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
486 key_len
= sa_entry
->kctx_len
;
488 /* end of queue, reset pos to start of queue */
489 eoq
= (void *)q
->q
.stat
- pos
;
493 left
= 64 * q
->q
.size
;
496 /* Copy the Key context header */
497 key_ctx
= (struct _key_ctx
*)pos
;
498 key_ctx
->ctx_hdr
= sa_entry
->key_ctx_hdr
;
499 memcpy(key_ctx
->salt
, sa_entry
->salt
, MAX_SALT
);
500 pos
+= sizeof(struct _key_ctx
);
501 left
-= sizeof(struct _key_ctx
);
503 if (likely(key_len
<= left
)) {
504 memcpy(key_ctx
->key
, sa_entry
->key
, key_len
);
507 memcpy(pos
, sa_entry
->key
, left
);
508 memcpy(q
->q
.desc
, sa_entry
->key
+ left
,
510 pos
= (u8
*)q
->q
.desc
+ (key_len
- left
);
512 /* Copy CPL TX PKT XT */
513 pos
= copy_cpltx_pktxt(skb
, dev
, pos
, sa_entry
);
518 inline void *chcr_crypto_wreq(struct sk_buff
*skb
,
519 struct net_device
*dev
,
522 struct ipsec_sa_entry
*sa_entry
)
524 struct port_info
*pi
= netdev_priv(dev
);
525 struct adapter
*adap
= pi
->adapter
;
526 unsigned int ivsize
= GCM_ESP_IV_SIZE
;
527 struct chcr_ipsec_wr
*wr
;
528 bool immediate
= false;
540 int qidx
= skb_get_queue_mapping(skb
);
541 struct sge_eth_txq
*q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
542 unsigned int kctx_len
= sa_entry
->kctx_len
;
543 int qid
= q
->q
.cntxt_id
;
545 atomic_inc(&adap
->chcr_stats
.ipsec_cnt
);
547 flits
= calc_tx_sec_flits(skb
, sa_entry
, &immediate
);
548 ndesc
= DIV_ROUND_UP(flits
, 2);
553 immdatalen
= skb
->len
;
556 esnlen
= sizeof(struct chcr_ipsec_aadiv
);
557 if (!skb_is_nonlinear(skb
))
562 wr
= (struct chcr_ipsec_wr
*)pos
;
563 wr
->wreq
.op_to_compl
= htonl(FW_WR_OP_V(FW_ULPTX_WR
));
564 wr_mid
= FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc
);
566 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
567 netif_tx_stop_queue(q
->txq
);
570 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
572 wr_mid
|= FW_ULPTX_WR_DATA_F
;
573 wr
->wreq
.flowid_len16
= htonl(wr_mid
);
576 wr
->req
.ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(pi
->port_id
, qid
);
577 wr
->req
.ulptx
.len
= htonl(ndesc
- 1);
580 wr
->req
.sc_imm
.cmd_more
= FILL_CMD_MORE(!immdatalen
|| sc_more
);
581 wr
->req
.sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
582 sizeof(wr
->req
.key_ctx
) +
584 sizeof(struct cpl_tx_pkt_core
) +
586 (esnlen
? 0 : immdatalen
));
589 ivinoffset
= sa_entry
->esn
? (ESN_IV_INSERT_OFFSET
+ 1) :
590 (skb_transport_offset(skb
) +
591 sizeof(struct ip_esp_hdr
) + 1);
592 wr
->req
.sec_cpl
.op_ivinsrtofst
= htonl(
593 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU
) |
594 CPL_TX_SEC_PDU_CPLLEN_V(2) |
595 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
596 CPL_TX_SEC_PDU_IVINSRTOFST_V(
599 wr
->req
.sec_cpl
.pldlen
= htonl(skb
->len
+ esnlen
);
600 aadstart
= sa_entry
->esn
? 1 : (skb_transport_offset(skb
) + 1);
601 aadstop
= sa_entry
->esn
? ESN_IV_INSERT_OFFSET
:
602 (skb_transport_offset(skb
) +
603 sizeof(struct ip_esp_hdr
));
604 ciphstart
= skb_transport_offset(skb
) + sizeof(struct ip_esp_hdr
) +
606 ciphstart
+= sa_entry
->esn
? esnlen
: 0;
608 wr
->req
.sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
613 wr
->req
.sec_cpl
.cipherstop_lo_authinsert
=
614 FILL_SEC_CPL_AUTHINSERT(0, ciphstart
,
617 wr
->req
.sec_cpl
.seqno_numivs
=
618 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP
, 1,
619 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
620 CHCR_SCMD_AUTH_MODE_GHASH
,
623 wr
->req
.sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
626 pos
+= sizeof(struct fw_ulptx_wr
) +
627 sizeof(struct ulp_txpkt
) +
628 sizeof(struct ulptx_idata
) +
629 sizeof(struct cpl_tx_sec_pdu
);
631 pos
= copy_key_cpltx_pktxt(skb
, dev
, pos
, sa_entry
);
637 * flits_to_desc - returns the num of Tx descriptors for the given flits
638 * @n: the number of flits
640 * Returns the number of Tx descriptors needed for the supplied number
643 static inline unsigned int flits_to_desc(unsigned int n
)
645 WARN_ON(n
> SGE_MAX_WR_LEN
/ 8);
646 return DIV_ROUND_UP(n
, 8);
649 static inline unsigned int txq_avail(const struct sge_txq
*q
)
651 return q
->size
- 1 - q
->in_use
;
654 static void eth_txq_stop(struct sge_eth_txq
*q
)
656 netif_tx_stop_queue(q
->txq
);
660 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
664 if (q
->pidx
>= q
->size
)
669 * chcr_ipsec_xmit called from ULD Tx handler
671 int chcr_ipsec_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
673 struct xfrm_state
*x
= xfrm_input_state(skb
);
674 unsigned int last_desc
, ndesc
, flits
= 0;
675 struct ipsec_sa_entry
*sa_entry
;
676 u64
*pos
, *end
, *before
, *sgl
;
677 struct tx_sw_desc
*sgl_sdesc
;
678 int qidx
, left
, credits
;
679 bool immediate
= false;
680 struct sge_eth_txq
*q
;
681 struct adapter
*adap
;
682 struct port_info
*pi
;
685 if (!x
->xso
.offload_handle
)
686 return NETDEV_TX_BUSY
;
688 sa_entry
= (struct ipsec_sa_entry
*)x
->xso
.offload_handle
;
690 sp
= skb_sec_path(skb
);
692 out_free
: dev_kfree_skb_any(skb
);
696 pi
= netdev_priv(dev
);
698 qidx
= skb
->queue_mapping
;
699 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
701 cxgb4_reclaim_completed_tx(adap
, &q
->q
, true);
703 flits
= calc_tx_sec_flits(skb
, sa_entry
, &immediate
);
704 ndesc
= flits_to_desc(flits
);
705 credits
= txq_avail(&q
->q
) - ndesc
;
707 if (unlikely(credits
< 0)) {
709 dev_err(adap
->pdev_dev
,
710 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
711 dev
->name
, qidx
, credits
, ndesc
, txq_avail(&q
->q
),
713 return NETDEV_TX_BUSY
;
716 last_desc
= q
->q
.pidx
+ ndesc
- 1;
717 if (last_desc
>= q
->q
.size
)
718 last_desc
-= q
->q
.size
;
719 sgl_sdesc
= &q
->q
.sdesc
[last_desc
];
722 unlikely(cxgb4_map_skb(adap
->pdev_dev
, skb
, sgl_sdesc
->addr
) < 0)) {
723 memset(sgl_sdesc
->addr
, 0, sizeof(sgl_sdesc
->addr
));
728 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
730 end
= (u64
*)pos
+ flits
;
731 /* Setup IPSec CPL */
732 pos
= (void *)chcr_crypto_wreq(skb
, dev
, (void *)pos
,
734 if (before
> (u64
*)pos
) {
735 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
736 end
= (void *)q
->q
.desc
+ left
;
738 if (pos
== (u64
*)q
->q
.stat
) {
739 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
740 end
= (void *)q
->q
.desc
+ left
;
741 pos
= (void *)q
->q
.desc
;
746 cxgb4_inline_tx_skb(skb
, &q
->q
, sgl
);
747 dev_consume_skb_any(skb
);
749 cxgb4_write_sgl(skb
, &q
->q
, (void *)sgl
, end
,
752 sgl_sdesc
->skb
= skb
;
754 txq_advance(&q
->q
, ndesc
);
756 cxgb4_ring_tx_db(adap
, &q
->q
, ndesc
);