2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/mlx4/cq.h>
36 #include <linux/mlx4/qp.h>
37 #include <linux/skbuff.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
44 MAX_INLINE
= 104, /* 128 - 16 - 4 - 4 */
47 static int inline_thold __read_mostly
= MAX_INLINE
;
49 module_param_named(inline_thold
, inline_thold
, int, 0444);
50 MODULE_PARM_DESC(inline_thold
, "treshold for using inline data");
52 int mlx4_en_create_tx_ring(struct mlx4_en_priv
*priv
,
53 struct mlx4_en_tx_ring
*ring
, u32 size
,
56 struct mlx4_en_dev
*mdev
= priv
->mdev
;
61 ring
->size_mask
= size
- 1;
62 ring
->stride
= stride
;
64 inline_thold
= min(inline_thold
, MAX_INLINE
);
66 spin_lock_init(&ring
->comp_lock
);
68 tmp
= size
* sizeof(struct mlx4_en_tx_info
);
69 ring
->tx_info
= vmalloc(tmp
);
71 mlx4_err(mdev
, "Failed allocating tx_info ring\n");
74 mlx4_dbg(DRV
, priv
, "Allocated tx_info ring at addr:%p size:%d\n",
77 ring
->bounce_buf
= kmalloc(MAX_DESC_SIZE
, GFP_KERNEL
);
78 if (!ring
->bounce_buf
) {
79 mlx4_err(mdev
, "Failed allocating bounce buffer\n");
83 ring
->buf_size
= ALIGN(size
* ring
->stride
, MLX4_EN_PAGE_SIZE
);
85 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
,
88 mlx4_err(mdev
, "Failed allocating hwq resources\n");
92 err
= mlx4_en_map_buffer(&ring
->wqres
.buf
);
94 mlx4_err(mdev
, "Failed to map TX buffer\n");
98 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
100 mlx4_dbg(DRV
, priv
, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring
, ring
->buf
, ring
->size
,
102 ring
->buf_size
, (unsigned long long) ring
->wqres
.buf
.direct
.map
);
104 err
= mlx4_qp_reserve_range(mdev
->dev
, 1, 1, &ring
->qpn
);
106 mlx4_err(mdev
, "Failed reserving qp for tx ring.\n");
110 err
= mlx4_qp_alloc(mdev
->dev
, ring
->qpn
, &ring
->qp
);
112 mlx4_err(mdev
, "Failed allocating qp %d\n", ring
->qpn
);
119 mlx4_qp_release_range(mdev
->dev
, ring
->qpn
, 1);
121 mlx4_en_unmap_buffer(&ring
->wqres
.buf
);
123 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
125 kfree(ring
->bounce_buf
);
126 ring
->bounce_buf
= NULL
;
128 vfree(ring
->tx_info
);
129 ring
->tx_info
= NULL
;
133 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv
*priv
,
134 struct mlx4_en_tx_ring
*ring
)
136 struct mlx4_en_dev
*mdev
= priv
->mdev
;
137 mlx4_dbg(DRV
, priv
, "Destroying tx ring, qpn: %d\n", ring
->qpn
);
139 mlx4_qp_remove(mdev
->dev
, &ring
->qp
);
140 mlx4_qp_free(mdev
->dev
, &ring
->qp
);
141 mlx4_qp_release_range(mdev
->dev
, ring
->qpn
, 1);
142 mlx4_en_unmap_buffer(&ring
->wqres
.buf
);
143 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
144 kfree(ring
->bounce_buf
);
145 ring
->bounce_buf
= NULL
;
146 vfree(ring
->tx_info
);
147 ring
->tx_info
= NULL
;
150 int mlx4_en_activate_tx_ring(struct mlx4_en_priv
*priv
,
151 struct mlx4_en_tx_ring
*ring
,
154 struct mlx4_en_dev
*mdev
= priv
->mdev
;
159 ring
->cons
= 0xffffffff;
160 ring
->last_nr_txbb
= 1;
163 memset(ring
->tx_info
, 0, ring
->size
* sizeof(struct mlx4_en_tx_info
));
164 memset(ring
->buf
, 0, ring
->buf_size
);
166 ring
->qp_state
= MLX4_QP_STATE_RST
;
167 ring
->doorbell_qpn
= swab32(ring
->qp
.qpn
<< 8);
169 mlx4_en_fill_qp_context(priv
, ring
->size
, ring
->stride
, 1, 0, ring
->qpn
,
170 ring
->cqn
, srqn
, &ring
->context
);
172 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, &ring
->context
,
173 &ring
->qp
, &ring
->qp_state
);
178 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv
*priv
,
179 struct mlx4_en_tx_ring
*ring
)
181 struct mlx4_en_dev
*mdev
= priv
->mdev
;
183 mlx4_qp_modify(mdev
->dev
, NULL
, ring
->qp_state
,
184 MLX4_QP_STATE_RST
, NULL
, 0, 0, &ring
->qp
);
188 static u32
mlx4_en_free_tx_desc(struct mlx4_en_priv
*priv
,
189 struct mlx4_en_tx_ring
*ring
,
192 struct mlx4_en_dev
*mdev
= priv
->mdev
;
193 struct mlx4_en_tx_info
*tx_info
= &ring
->tx_info
[index
];
194 struct mlx4_en_tx_desc
*tx_desc
= ring
->buf
+ index
* TXBB_SIZE
;
195 struct mlx4_wqe_data_seg
*data
= (void *) tx_desc
+ tx_info
->data_offset
;
196 struct sk_buff
*skb
= tx_info
->skb
;
197 struct skb_frag_struct
*frag
;
198 void *end
= ring
->buf
+ ring
->buf_size
;
199 int frags
= skb_shinfo(skb
)->nr_frags
;
201 __be32
*ptr
= (__be32
*)tx_desc
;
202 __be32 stamp
= cpu_to_be32(STAMP_VAL
| (!!owner
<< STAMP_SHIFT
));
204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc
+ tx_info
->nr_txbb
* TXBB_SIZE
<= end
)) {
207 if (tx_info
->linear
) {
208 pci_unmap_single(mdev
->pdev
,
209 (dma_addr_t
) be64_to_cpu(data
->addr
),
210 be32_to_cpu(data
->byte_count
),
215 for (i
= 0; i
< frags
; i
++) {
216 frag
= &skb_shinfo(skb
)->frags
[i
];
217 pci_unmap_page(mdev
->pdev
,
218 (dma_addr_t
) be64_to_cpu(data
[i
].addr
),
219 frag
->size
, PCI_DMA_TODEVICE
);
222 /* Stamp the freed descriptor */
223 for (i
= 0; i
< tx_info
->nr_txbb
* TXBB_SIZE
; i
+= STAMP_STRIDE
) {
230 if ((void *) data
>= end
) {
231 data
= (struct mlx4_wqe_data_seg
*)
232 (ring
->buf
+ ((void *) data
- end
));
235 if (tx_info
->linear
) {
236 pci_unmap_single(mdev
->pdev
,
237 (dma_addr_t
) be64_to_cpu(data
->addr
),
238 be32_to_cpu(data
->byte_count
),
243 for (i
= 0; i
< frags
; i
++) {
244 /* Check for wraparound before unmapping */
245 if ((void *) data
>= end
)
246 data
= (struct mlx4_wqe_data_seg
*) ring
->buf
;
247 frag
= &skb_shinfo(skb
)->frags
[i
];
248 pci_unmap_page(mdev
->pdev
,
249 (dma_addr_t
) be64_to_cpu(data
->addr
),
250 frag
->size
, PCI_DMA_TODEVICE
);
253 /* Stamp the freed descriptor */
254 for (i
= 0; i
< tx_info
->nr_txbb
* TXBB_SIZE
; i
+= STAMP_STRIDE
) {
257 if ((void *) ptr
>= end
) {
259 stamp
^= cpu_to_be32(0x80000000);
264 dev_kfree_skb_any(skb
);
265 return tx_info
->nr_txbb
;
269 int mlx4_en_free_tx_buf(struct net_device
*dev
, struct mlx4_en_tx_ring
*ring
)
271 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
274 /* Skip last polled descriptor */
275 ring
->cons
+= ring
->last_nr_txbb
;
276 mlx4_dbg(DRV
, priv
, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
277 ring
->cons
, ring
->prod
);
279 if ((u32
) (ring
->prod
- ring
->cons
) > ring
->size
) {
280 if (netif_msg_tx_err(priv
))
281 mlx4_warn(priv
->mdev
, "Tx consumer passed producer!\n");
285 while (ring
->cons
!= ring
->prod
) {
286 ring
->last_nr_txbb
= mlx4_en_free_tx_desc(priv
, ring
,
287 ring
->cons
& ring
->size_mask
,
288 !!(ring
->cons
& ring
->size
));
289 ring
->cons
+= ring
->last_nr_txbb
;
294 mlx4_dbg(DRV
, priv
, "Freed %d uncompleted tx descriptors\n", cnt
);
299 void mlx4_en_set_prio_map(struct mlx4_en_priv
*priv
, u16
*prio_map
, u32 ring_num
)
301 int block
= 8 / ring_num
;
302 int extra
= 8 - (block
* ring_num
);
308 for (prio
= 0; prio
< 8; prio
++)
313 for (prio
= 0; prio
< 8; prio
++) {
314 if (extra
&& (num
== block
+ 1)) {
318 } else if (!extra
&& (num
== block
)) {
322 prio_map
[prio
] = ring
;
323 mlx4_dbg(DRV
, priv
, " prio:%d --> ring:%d\n", prio
, ring
);
328 static void mlx4_en_process_tx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
)
330 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
331 struct mlx4_cq
*mcq
= &cq
->mcq
;
332 struct mlx4_en_tx_ring
*ring
= &priv
->tx_ring
[cq
->ring
];
333 struct mlx4_cqe
*cqe
= cq
->buf
;
336 u32 txbbs_skipped
= 0;
339 /* index always points to the first TXBB of the last polled descriptor */
340 index
= ring
->cons
& ring
->size_mask
;
341 new_index
= be16_to_cpu(cqe
->wqe_index
) & ring
->size_mask
;
342 if (index
== new_index
)
349 * We use a two-stage loop:
350 * - the first samples the HW-updated CQE
351 * - the second frees TXBBs until the last sample
352 * This lets us amortize CQE cache misses, while still polling the CQ
353 * until is quiescent.
355 cq_last_sav
= mcq
->cons_index
;
358 /* Skip over last polled CQE */
359 index
= (index
+ ring
->last_nr_txbb
) & ring
->size_mask
;
360 txbbs_skipped
+= ring
->last_nr_txbb
;
363 ring
->last_nr_txbb
= mlx4_en_free_tx_desc(
365 !!((ring
->cons
+ txbbs_skipped
) &
369 } while (index
!= new_index
);
371 new_index
= be16_to_cpu(cqe
->wqe_index
) & ring
->size_mask
;
372 } while (index
!= new_index
);
373 AVG_PERF_COUNTER(priv
->pstats
.tx_coal_avg
,
374 (u32
) (mcq
->cons_index
- cq_last_sav
));
377 * To prevent CQ overflow we first update CQ consumer and only then
382 ring
->cons
+= txbbs_skipped
;
384 /* Wakeup Tx queue if this ring stopped it */
385 if (unlikely(ring
->blocked
)) {
386 if ((u32
) (ring
->prod
- ring
->cons
) <=
387 ring
->size
- HEADROOM
- MAX_DESC_TXBBS
) {
389 /* TODO: support multiqueue netdevs. Currently, we block
390 * when *any* ring is full. Note that:
391 * - 2 Tx rings can unblock at the same time and call
392 * netif_wake_queue(), which is OK since this
393 * operation is idempotent.
394 * - We might wake the queue just after another ring
395 * stopped it. This is no big deal because the next
396 * transmission on that ring would stop the queue.
399 netif_wake_queue(dev
);
400 priv
->port_stats
.wake_queue
++;
405 void mlx4_en_tx_irq(struct mlx4_cq
*mcq
)
407 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
408 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
409 struct mlx4_en_tx_ring
*ring
= &priv
->tx_ring
[cq
->ring
];
411 if (!spin_trylock(&ring
->comp_lock
))
413 mlx4_en_process_tx_cq(cq
->dev
, cq
);
414 mod_timer(&cq
->timer
, jiffies
+ 1);
415 spin_unlock(&ring
->comp_lock
);
419 void mlx4_en_poll_tx_cq(unsigned long data
)
421 struct mlx4_en_cq
*cq
= (struct mlx4_en_cq
*) data
;
422 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
423 struct mlx4_en_tx_ring
*ring
= &priv
->tx_ring
[cq
->ring
];
426 INC_PERF_COUNTER(priv
->pstats
.tx_poll
);
428 if (!spin_trylock(&ring
->comp_lock
)) {
429 mod_timer(&cq
->timer
, jiffies
+ MLX4_EN_TX_POLL_TIMEOUT
);
432 mlx4_en_process_tx_cq(cq
->dev
, cq
);
433 inflight
= (u32
) (ring
->prod
- ring
->cons
- ring
->last_nr_txbb
);
435 /* If there are still packets in flight and the timer has not already
436 * been scheduled by the Tx routine then schedule it here to guarantee
437 * completion processing of these packets */
438 if (inflight
&& priv
->port_up
)
439 mod_timer(&cq
->timer
, jiffies
+ MLX4_EN_TX_POLL_TIMEOUT
);
441 spin_unlock(&ring
->comp_lock
);
444 static struct mlx4_en_tx_desc
*mlx4_en_bounce_to_desc(struct mlx4_en_priv
*priv
,
445 struct mlx4_en_tx_ring
*ring
,
447 unsigned int desc_size
)
449 u32 copy
= (ring
->size
- index
) * TXBB_SIZE
;
452 for (i
= desc_size
- copy
- 4; i
>= 0; i
-= 4) {
453 if ((i
& (TXBB_SIZE
- 1)) == 0)
456 *((u32
*) (ring
->buf
+ i
)) =
457 *((u32
*) (ring
->bounce_buf
+ copy
+ i
));
460 for (i
= copy
- 4; i
>= 4 ; i
-= 4) {
461 if ((i
& (TXBB_SIZE
- 1)) == 0)
464 *((u32
*) (ring
->buf
+ index
* TXBB_SIZE
+ i
)) =
465 *((u32
*) (ring
->bounce_buf
+ i
));
468 /* Return real descriptor location */
469 return ring
->buf
+ index
* TXBB_SIZE
;
472 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv
*priv
, int tx_ind
)
474 struct mlx4_en_cq
*cq
= &priv
->tx_cq
[tx_ind
];
475 struct mlx4_en_tx_ring
*ring
= &priv
->tx_ring
[tx_ind
];
477 /* If we don't have a pending timer, set one up to catch our recent
478 post in case the interface becomes idle */
479 if (!timer_pending(&cq
->timer
))
480 mod_timer(&cq
->timer
, jiffies
+ MLX4_EN_TX_POLL_TIMEOUT
);
482 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
483 if ((++ring
->poll_cnt
& (MLX4_EN_TX_POLL_MODER
- 1)) == 0)
484 if (spin_trylock(&ring
->comp_lock
)) {
485 mlx4_en_process_tx_cq(priv
->dev
, cq
);
486 spin_unlock(&ring
->comp_lock
);
490 static void *get_frag_ptr(struct sk_buff
*skb
)
492 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
493 struct page
*page
= frag
->page
;
496 ptr
= page_address(page
);
500 return ptr
+ frag
->page_offset
;
503 static int is_inline(struct sk_buff
*skb
, void **pfrag
)
507 if (inline_thold
&& !skb_is_gso(skb
) && skb
->len
<= inline_thold
) {
508 if (skb_shinfo(skb
)->nr_frags
== 1) {
509 ptr
= get_frag_ptr(skb
);
517 } else if (unlikely(skb_shinfo(skb
)->nr_frags
))
526 static int inline_size(struct sk_buff
*skb
)
528 if (skb
->len
+ CTRL_SIZE
+ sizeof(struct mlx4_wqe_inline_seg
)
529 <= MLX4_INLINE_ALIGN
)
530 return ALIGN(skb
->len
+ CTRL_SIZE
+
531 sizeof(struct mlx4_wqe_inline_seg
), 16);
533 return ALIGN(skb
->len
+ CTRL_SIZE
+ 2 *
534 sizeof(struct mlx4_wqe_inline_seg
), 16);
537 static int get_real_size(struct sk_buff
*skb
, struct net_device
*dev
,
538 int *lso_header_size
)
540 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
541 struct mlx4_en_dev
*mdev
= priv
->mdev
;
544 if (skb_is_gso(skb
)) {
545 *lso_header_size
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
546 real_size
= CTRL_SIZE
+ skb_shinfo(skb
)->nr_frags
* DS_SIZE
+
547 ALIGN(*lso_header_size
+ 4, DS_SIZE
);
548 if (unlikely(*lso_header_size
!= skb_headlen(skb
))) {
549 /* We add a segment for the skb linear buffer only if
550 * it contains data */
551 if (*lso_header_size
< skb_headlen(skb
))
552 real_size
+= DS_SIZE
;
554 if (netif_msg_tx_err(priv
))
555 mlx4_warn(mdev
, "Non-linear headers\n");
556 dev_kfree_skb_any(skb
);
560 if (unlikely(*lso_header_size
> MAX_LSO_HDR_SIZE
)) {
561 if (netif_msg_tx_err(priv
))
562 mlx4_warn(mdev
, "LSO header size too big\n");
563 dev_kfree_skb_any(skb
);
567 *lso_header_size
= 0;
568 if (!is_inline(skb
, NULL
))
569 real_size
= CTRL_SIZE
+ (skb_shinfo(skb
)->nr_frags
+ 1) * DS_SIZE
;
571 real_size
= inline_size(skb
);
577 static void build_inline_wqe(struct mlx4_en_tx_desc
*tx_desc
, struct sk_buff
*skb
,
578 int real_size
, u16
*vlan_tag
, int tx_ind
, void *fragptr
)
580 struct mlx4_wqe_inline_seg
*inl
= &tx_desc
->inl
;
581 int spc
= MLX4_INLINE_ALIGN
- CTRL_SIZE
- sizeof *inl
;
583 if (skb
->len
<= spc
) {
584 inl
->byte_count
= cpu_to_be32(1 << 31 | skb
->len
);
585 skb_copy_from_linear_data(skb
, inl
+ 1, skb_headlen(skb
));
586 if (skb_shinfo(skb
)->nr_frags
)
587 memcpy(((void *)(inl
+ 1)) + skb_headlen(skb
), fragptr
,
588 skb_shinfo(skb
)->frags
[0].size
);
591 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
592 if (skb_headlen(skb
) <= spc
) {
593 skb_copy_from_linear_data(skb
, inl
+ 1, skb_headlen(skb
));
594 if (skb_headlen(skb
) < spc
) {
595 memcpy(((void *)(inl
+ 1)) + skb_headlen(skb
),
596 fragptr
, spc
- skb_headlen(skb
));
597 fragptr
+= spc
- skb_headlen(skb
);
599 inl
= (void *) (inl
+ 1) + spc
;
600 memcpy(((void *)(inl
+ 1)), fragptr
, skb
->len
- spc
);
602 skb_copy_from_linear_data(skb
, inl
+ 1, spc
);
603 inl
= (void *) (inl
+ 1) + spc
;
604 skb_copy_from_linear_data_offset(skb
, spc
, inl
+ 1,
605 skb_headlen(skb
) - spc
);
606 if (skb_shinfo(skb
)->nr_frags
)
607 memcpy(((void *)(inl
+ 1)) + skb_headlen(skb
) - spc
,
608 fragptr
, skb_shinfo(skb
)->frags
[0].size
);
612 inl
->byte_count
= cpu_to_be32(1 << 31 | (skb
->len
- spc
));
614 tx_desc
->ctrl
.vlan_tag
= cpu_to_be16(*vlan_tag
);
615 tx_desc
->ctrl
.ins_vlan
= MLX4_WQE_CTRL_INS_VLAN
* !!(*vlan_tag
);
616 tx_desc
->ctrl
.fence_size
= (real_size
/ 16) & 0x3f;
619 static int get_vlan_info(struct mlx4_en_priv
*priv
, struct sk_buff
*skb
,
624 /* Obtain VLAN information if present */
625 if (priv
->vlgrp
&& vlan_tx_tag_present(skb
)) {
626 *vlan_tag
= vlan_tx_tag_get(skb
);
627 /* Set the Tx ring to use according to vlan priority */
628 tx_ind
= priv
->tx_prio_map
[*vlan_tag
>> 13];
636 int mlx4_en_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
638 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
639 struct mlx4_en_dev
*mdev
= priv
->mdev
;
640 struct mlx4_en_tx_ring
*ring
;
641 struct mlx4_en_cq
*cq
;
642 struct mlx4_en_tx_desc
*tx_desc
;
643 struct mlx4_wqe_data_seg
*data
;
644 struct skb_frag_struct
*frag
;
645 struct mlx4_en_tx_info
*tx_info
;
658 if (unlikely(!skb
->len
)) {
659 dev_kfree_skb_any(skb
);
662 real_size
= get_real_size(skb
, dev
, &lso_header_size
);
663 if (unlikely(!real_size
))
666 /* Allign descriptor to TXBB size */
667 desc_size
= ALIGN(real_size
, TXBB_SIZE
);
668 nr_txbb
= desc_size
/ TXBB_SIZE
;
669 if (unlikely(nr_txbb
> MAX_DESC_TXBBS
)) {
670 if (netif_msg_tx_err(priv
))
671 mlx4_warn(mdev
, "Oversized header or SG list\n");
672 dev_kfree_skb_any(skb
);
676 tx_ind
= get_vlan_info(priv
, skb
, &vlan_tag
);
677 ring
= &priv
->tx_ring
[tx_ind
];
679 /* Check available TXBBs And 2K spare for prefetch */
680 if (unlikely(((int)(ring
->prod
- ring
->cons
)) >
681 ring
->size
- HEADROOM
- MAX_DESC_TXBBS
)) {
682 /* every full Tx ring stops queue.
683 * TODO: implement multi-queue support (per-queue stop) */
684 netif_stop_queue(dev
);
686 priv
->port_stats
.queue_stopped
++;
688 /* Use interrupts to find out when queue opened */
689 cq
= &priv
->tx_cq
[tx_ind
];
690 mlx4_en_arm_cq(priv
, cq
);
691 return NETDEV_TX_BUSY
;
694 /* Now that we know what Tx ring to use */
695 if (unlikely(!priv
->port_up
)) {
696 if (netif_msg_tx_err(priv
))
697 mlx4_warn(mdev
, "xmit: port down!\n");
698 dev_kfree_skb_any(skb
);
702 /* Track current inflight packets for performance analysis */
703 AVG_PERF_COUNTER(priv
->pstats
.inflight_avg
,
704 (u32
) (ring
->prod
- ring
->cons
- 1));
706 /* Packet is good - grab an index and transmit it */
707 index
= ring
->prod
& ring
->size_mask
;
709 /* See if we have enough space for whole descriptor TXBB for setting
710 * SW ownership on next descriptor; if not, use a bounce buffer. */
711 if (likely(index
+ nr_txbb
<= ring
->size
))
712 tx_desc
= ring
->buf
+ index
* TXBB_SIZE
;
714 tx_desc
= (struct mlx4_en_tx_desc
*) ring
->bounce_buf
;
716 /* Save skb in tx_info ring */
717 tx_info
= &ring
->tx_info
[index
];
719 tx_info
->nr_txbb
= nr_txbb
;
721 /* Prepare ctrl segement apart opcode+ownership, which depends on
722 * whether LSO is used */
723 tx_desc
->ctrl
.vlan_tag
= cpu_to_be16(vlan_tag
);
724 tx_desc
->ctrl
.ins_vlan
= MLX4_WQE_CTRL_INS_VLAN
* !!vlan_tag
;
725 tx_desc
->ctrl
.fence_size
= (real_size
/ 16) & 0x3f;
726 tx_desc
->ctrl
.srcrb_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
727 MLX4_WQE_CTRL_SOLICITED
);
728 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
729 tx_desc
->ctrl
.srcrb_flags
|= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
730 MLX4_WQE_CTRL_TCP_UDP_CSUM
);
731 priv
->port_stats
.tx_chksum_offload
++;
734 /* Handle LSO (TSO) packets */
735 if (lso_header_size
) {
736 /* Mark opcode as LSO */
737 op_own
= cpu_to_be32(MLX4_OPCODE_LSO
| (1 << 6)) |
738 ((ring
->prod
& ring
->size
) ?
739 cpu_to_be32(MLX4_EN_BIT_DESC_OWN
) : 0);
741 /* Fill in the LSO prefix */
742 tx_desc
->lso
.mss_hdr_size
= cpu_to_be32(
743 skb_shinfo(skb
)->gso_size
<< 16 | lso_header_size
);
746 * note that we already verified that it is linear */
747 memcpy(tx_desc
->lso
.header
, skb
->data
, lso_header_size
);
748 data
= ((void *) &tx_desc
->lso
+
749 ALIGN(lso_header_size
+ 4, DS_SIZE
));
751 priv
->port_stats
.tso_packets
++;
752 i
= ((skb
->len
- lso_header_size
) / skb_shinfo(skb
)->gso_size
) +
753 !!((skb
->len
- lso_header_size
) % skb_shinfo(skb
)->gso_size
);
754 ring
->bytes
+= skb
->len
+ (i
- 1) * lso_header_size
;
757 /* Normal (Non LSO) packet */
758 op_own
= cpu_to_be32(MLX4_OPCODE_SEND
) |
759 ((ring
->prod
& ring
->size
) ?
760 cpu_to_be32(MLX4_EN_BIT_DESC_OWN
) : 0);
761 data
= &tx_desc
->data
;
762 ring
->bytes
+= max(skb
->len
, (unsigned int) ETH_ZLEN
);
766 AVG_PERF_COUNTER(priv
->pstats
.tx_pktsz_avg
, skb
->len
);
769 /* valid only for none inline segments */
770 tx_info
->data_offset
= (void *) data
- (void *) tx_desc
;
772 tx_info
->linear
= (lso_header_size
< skb_headlen(skb
) && !is_inline(skb
, NULL
)) ? 1 : 0;
773 data
+= skb_shinfo(skb
)->nr_frags
+ tx_info
->linear
- 1;
775 if (!is_inline(skb
, &fragptr
)) {
777 for (i
= skb_shinfo(skb
)->nr_frags
- 1; i
>= 0; i
--) {
778 frag
= &skb_shinfo(skb
)->frags
[i
];
779 dma
= pci_map_page(mdev
->dev
->pdev
, frag
->page
, frag
->page_offset
,
780 frag
->size
, PCI_DMA_TODEVICE
);
781 data
->addr
= cpu_to_be64(dma
);
782 data
->lkey
= cpu_to_be32(mdev
->mr
.key
);
784 data
->byte_count
= cpu_to_be32(frag
->size
);
788 /* Map linear part */
789 if (tx_info
->linear
) {
790 dma
= pci_map_single(mdev
->dev
->pdev
, skb
->data
+ lso_header_size
,
791 skb_headlen(skb
) - lso_header_size
, PCI_DMA_TODEVICE
);
792 data
->addr
= cpu_to_be64(dma
);
793 data
->lkey
= cpu_to_be32(mdev
->mr
.key
);
795 data
->byte_count
= cpu_to_be32(skb_headlen(skb
) - lso_header_size
);
799 build_inline_wqe(tx_desc
, skb
, real_size
, &vlan_tag
, tx_ind
, fragptr
);
803 ring
->prod
+= nr_txbb
;
805 /* If we used a bounce buffer then copy descriptor back into place */
806 if (tx_desc
== (struct mlx4_en_tx_desc
*) ring
->bounce_buf
)
807 tx_desc
= mlx4_en_bounce_to_desc(priv
, ring
, index
, desc_size
);
809 /* Run destructor before passing skb to HW */
810 if (likely(!skb_shared(skb
)))
813 /* Ensure new descirptor hits memory
814 * before setting ownership of this descriptor to HW */
816 tx_desc
->ctrl
.owner_opcode
= op_own
;
820 writel(ring
->doorbell_qpn
, mdev
->uar_map
+ MLX4_SEND_DOORBELL
);
821 dev
->trans_start
= jiffies
;
824 mlx4_en_xmit_poll(priv
, tx_ind
);