2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
52 * Rx buffer size. We use largish buffers if possible but settle for single
53 * pages under memory shortage.
56 # define FL_PG_ORDER 0
58 # define FL_PG_ORDER (16 - PAGE_SHIFT)
61 /* RX_PULL_LEN should be <= RX_COPY_THRES */
62 #define RX_COPY_THRES 256
63 #define RX_PULL_LEN 128
66 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
69 #define RX_PKT_SKB_LEN 512
71 /* Ethernet header padding prepended to RX_PKTs */
75 * Max number of Tx descriptors we clean up at a time. Should be modest as
76 * freeing skbs isn't cheap and it happens while holding locks. We just need
77 * to free packets faster than they arrive, we eventually catch up and keep
78 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
80 #define MAX_TX_RECLAIM 16
83 * Max number of Rx buffers we replenish at a time. Again keep this modest,
84 * allocating buffers isn't cheap either.
86 #define MAX_RX_REFILL 16U
89 * Period of the Rx queue check timer. This timer is infrequent as it has
90 * something to do only when the system experiences severe memory shortage.
92 #define RX_QCHECK_PERIOD (HZ / 2)
95 * Period of the Tx queue check timer.
97 #define TX_QCHECK_PERIOD (HZ / 2)
100 * Max number of Tx descriptors to be reclaimed by the Tx timer.
102 #define MAX_TIMER_TX_RECLAIM 100
105 * Timer index used when backing off due to memory shortage.
107 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
110 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
111 * attempt to refill it.
113 #define FL_STARVE_THRES 4
116 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
117 * This is the same as calc_tx_descs() for a TSO packet with
118 * nr_frags == MAX_SKB_FRAGS.
120 #define ETHTXQ_STOP_THRES \
121 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
124 * Suspension threshold for non-Ethernet Tx queues. We require enough room
125 * for a full sized WR.
127 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
130 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
133 #define MAX_IMM_TX_PKT_LEN 128
136 * Max size of a WR sent through a control Tx queue.
138 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
141 /* packet alignment in FL buffers */
142 FL_ALIGN
= L1_CACHE_BYTES
< 32 ? 32 : L1_CACHE_BYTES
,
143 /* egress status entry size */
144 STAT_LEN
= L1_CACHE_BYTES
> 64 ? 128 : 64
147 struct tx_sw_desc
{ /* SW state per Tx descriptor */
149 struct ulptx_sgl
*sgl
;
152 struct rx_sw_desc
{ /* SW state per Rx descriptor */
158 * The low bits of rx_sw_desc.dma_addr have special meaning.
161 RX_LARGE_BUF
= 1 << 0, /* buffer is larger than PAGE_SIZE */
162 RX_UNMAPPED_BUF
= 1 << 1, /* buffer is not mapped */
165 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*d
)
167 return d
->dma_addr
& ~(dma_addr_t
)(RX_LARGE_BUF
| RX_UNMAPPED_BUF
);
170 static inline bool is_buf_mapped(const struct rx_sw_desc
*d
)
172 return !(d
->dma_addr
& RX_UNMAPPED_BUF
);
176 * txq_avail - return the number of available slots in a Tx queue
179 * Returns the number of descriptors in a Tx queue available to write new
182 static inline unsigned int txq_avail(const struct sge_txq
*q
)
184 return q
->size
- 1 - q
->in_use
;
188 * fl_cap - return the capacity of a free-buffer list
191 * Returns the capacity of a free-buffer list. The capacity is less than
192 * the size because one descriptor needs to be left unpopulated, otherwise
193 * HW will think the FL is empty.
195 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
197 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
200 static inline bool fl_starving(const struct sge_fl
*fl
)
202 return fl
->avail
- fl
->pend_cred
<= FL_STARVE_THRES
;
205 static int map_skb(struct device
*dev
, const struct sk_buff
*skb
,
208 const skb_frag_t
*fp
, *end
;
209 const struct skb_shared_info
*si
;
211 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
212 if (dma_mapping_error(dev
, *addr
))
215 si
= skb_shinfo(skb
);
216 end
= &si
->frags
[si
->nr_frags
];
218 for (fp
= si
->frags
; fp
< end
; fp
++) {
219 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
221 if (dma_mapping_error(dev
, *addr
))
227 while (fp
-- > si
->frags
)
228 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
230 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
235 #ifdef CONFIG_NEED_DMA_MAP_STATE
236 static void unmap_skb(struct device
*dev
, const struct sk_buff
*skb
,
237 const dma_addr_t
*addr
)
239 const skb_frag_t
*fp
, *end
;
240 const struct skb_shared_info
*si
;
242 dma_unmap_single(dev
, *addr
++, skb_headlen(skb
), DMA_TO_DEVICE
);
244 si
= skb_shinfo(skb
);
245 end
= &si
->frags
[si
->nr_frags
];
246 for (fp
= si
->frags
; fp
< end
; fp
++)
247 dma_unmap_page(dev
, *addr
++, skb_frag_size(fp
), DMA_TO_DEVICE
);
251 * deferred_unmap_destructor - unmap a packet when it is freed
254 * This is the packet destructor used for Tx packets that need to remain
255 * mapped until they are freed rather than until their Tx descriptors are
258 static void deferred_unmap_destructor(struct sk_buff
*skb
)
260 unmap_skb(skb
->dev
->dev
.parent
, skb
, (dma_addr_t
*)skb
->head
);
264 static void unmap_sgl(struct device
*dev
, const struct sk_buff
*skb
,
265 const struct ulptx_sgl
*sgl
, const struct sge_txq
*q
)
267 const struct ulptx_sge_pair
*p
;
268 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
270 if (likely(skb_headlen(skb
)))
271 dma_unmap_single(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
274 dma_unmap_page(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
280 * the complexity below is because of the possibility of a wrap-around
281 * in the middle of an SGL
283 for (p
= sgl
->sge
; nfrags
>= 2; nfrags
-= 2) {
284 if (likely((u8
*)(p
+ 1) <= (u8
*)q
->stat
)) {
285 unmap
: dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
286 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
287 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[1]),
288 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
290 } else if ((u8
*)p
== (u8
*)q
->stat
) {
291 p
= (const struct ulptx_sge_pair
*)q
->desc
;
293 } else if ((u8
*)p
+ 8 == (u8
*)q
->stat
) {
294 const __be64
*addr
= (const __be64
*)q
->desc
;
296 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
297 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
298 dma_unmap_page(dev
, be64_to_cpu(addr
[1]),
299 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
300 p
= (const struct ulptx_sge_pair
*)&addr
[2];
302 const __be64
*addr
= (const __be64
*)q
->desc
;
304 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
305 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
306 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
307 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
308 p
= (const struct ulptx_sge_pair
*)&addr
[1];
314 if ((u8
*)p
== (u8
*)q
->stat
)
315 p
= (const struct ulptx_sge_pair
*)q
->desc
;
316 addr
= (u8
*)p
+ 16 <= (u8
*)q
->stat
? p
->addr
[0] :
317 *(const __be64
*)q
->desc
;
318 dma_unmap_page(dev
, be64_to_cpu(addr
), ntohl(p
->len
[0]),
324 * free_tx_desc - reclaims Tx descriptors and their buffers
325 * @adapter: the adapter
326 * @q: the Tx queue to reclaim descriptors from
327 * @n: the number of descriptors to reclaim
328 * @unmap: whether the buffers should be unmapped for DMA
330 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
331 * Tx buffers. Called with the Tx queue lock held.
333 static void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
334 unsigned int n
, bool unmap
)
336 struct tx_sw_desc
*d
;
337 unsigned int cidx
= q
->cidx
;
338 struct device
*dev
= adap
->pdev_dev
;
342 if (d
->skb
) { /* an SGL is present */
344 unmap_sgl(dev
, d
->skb
, d
->sgl
, q
);
349 if (++cidx
== q
->size
) {
358 * Return the number of reclaimable descriptors in a Tx queue.
360 static inline int reclaimable(const struct sge_txq
*q
)
362 int hw_cidx
= ntohs(q
->stat
->cidx
);
364 return hw_cidx
< 0 ? hw_cidx
+ q
->size
: hw_cidx
;
368 * reclaim_completed_tx - reclaims completed Tx descriptors
370 * @q: the Tx queue to reclaim completed descriptors from
371 * @unmap: whether the buffers should be unmapped for DMA
373 * Reclaims Tx descriptors that the SGE has indicated it has processed,
374 * and frees the associated buffers if possible. Called with the Tx
377 static inline void reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
380 int avail
= reclaimable(q
);
384 * Limit the amount of clean up work we do at a time to keep
385 * the Tx lock hold time O(1).
387 if (avail
> MAX_TX_RECLAIM
)
388 avail
= MAX_TX_RECLAIM
;
390 free_tx_desc(adap
, q
, avail
, unmap
);
395 static inline int get_buf_size(const struct rx_sw_desc
*d
)
398 return (d
->dma_addr
& RX_LARGE_BUF
) ? (PAGE_SIZE
<< FL_PG_ORDER
) :
406 * free_rx_bufs - free the Rx buffers on an SGE free list
408 * @q: the SGE free list to free buffers from
409 * @n: how many buffers to free
411 * Release the next @n buffers on an SGE free-buffer Rx queue. The
412 * buffers must be made inaccessible to HW before calling this function.
414 static void free_rx_bufs(struct adapter
*adap
, struct sge_fl
*q
, int n
)
417 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
419 if (is_buf_mapped(d
))
420 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
421 get_buf_size(d
), PCI_DMA_FROMDEVICE
);
424 if (++q
->cidx
== q
->size
)
431 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
433 * @q: the SGE free list
435 * Unmap the current buffer on an SGE free-buffer Rx queue. The
436 * buffer must be made inaccessible to HW before calling this function.
438 * This is similar to @free_rx_bufs above but does not free the buffer.
439 * Do note that the FL still loses any further access to the buffer.
441 static void unmap_rx_buf(struct adapter
*adap
, struct sge_fl
*q
)
443 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
445 if (is_buf_mapped(d
))
446 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
447 get_buf_size(d
), PCI_DMA_FROMDEVICE
);
449 if (++q
->cidx
== q
->size
)
454 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
456 if (q
->pend_cred
>= 8) {
458 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
), DBPRIO
|
459 QID(q
->cntxt_id
) | PIDX(q
->pend_cred
/ 8));
464 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, struct page
*pg
,
468 sd
->dma_addr
= mapping
; /* includes size low bits */
472 * refill_fl - refill an SGE Rx buffer ring
474 * @q: the ring to refill
475 * @n: the number of new buffers to allocate
476 * @gfp: the gfp flags for the allocations
478 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
479 * allocated with the supplied gfp flags. The caller must assure that
480 * @n does not exceed the queue's capacity. If afterwards the queue is
481 * found critically low mark it as starving in the bitmap of starving FLs.
483 * Returns the number of buffers allocated.
485 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
,
490 unsigned int cred
= q
->avail
;
491 __be64
*d
= &q
->desc
[q
->pidx
];
492 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
494 gfp
|= __GFP_NOWARN
| __GFP_COLD
;
498 * Prefer large buffers
501 pg
= alloc_pages(gfp
| __GFP_COMP
, FL_PG_ORDER
);
503 q
->large_alloc_failed
++;
504 break; /* fall back to single pages */
507 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0,
508 PAGE_SIZE
<< FL_PG_ORDER
,
510 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
511 __free_pages(pg
, FL_PG_ORDER
);
512 goto out
; /* do not try small pages for this error */
514 mapping
|= RX_LARGE_BUF
;
515 *d
++ = cpu_to_be64(mapping
);
517 set_rx_sw_desc(sd
, pg
, mapping
);
521 if (++q
->pidx
== q
->size
) {
531 pg
= alloc_page(gfp
);
537 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0, PAGE_SIZE
,
539 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
543 *d
++ = cpu_to_be64(mapping
);
545 set_rx_sw_desc(sd
, pg
, mapping
);
549 if (++q
->pidx
== q
->size
) {
556 out
: cred
= q
->avail
- cred
;
557 q
->pend_cred
+= cred
;
560 if (unlikely(fl_starving(q
))) {
562 set_bit(q
->cntxt_id
- adap
->sge
.egr_start
,
563 adap
->sge
.starving_fl
);
569 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
571 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
576 * alloc_ring - allocate resources for an SGE descriptor ring
577 * @dev: the PCI device's core device
578 * @nelem: the number of descriptors
579 * @elem_size: the size of each descriptor
580 * @sw_size: the size of the SW state associated with each ring element
581 * @phys: the physical address of the allocated ring
582 * @metadata: address of the array holding the SW state for the ring
583 * @stat_size: extra space in HW ring for status information
584 * @node: preferred node for memory allocations
586 * Allocates resources for an SGE descriptor ring, such as Tx queues,
587 * free buffer lists, or response queues. Each SGE ring requires
588 * space for its HW descriptors plus, optionally, space for the SW state
589 * associated with each HW entry (the metadata). The function returns
590 * three values: the virtual address for the HW ring (the return value
591 * of the function), the bus address of the HW ring, and the address
594 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t elem_size
,
595 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
596 size_t stat_size
, int node
)
598 size_t len
= nelem
* elem_size
+ stat_size
;
600 void *p
= dma_alloc_coherent(dev
, len
, phys
, GFP_KERNEL
);
605 s
= kzalloc_node(nelem
* sw_size
, GFP_KERNEL
, node
);
608 dma_free_coherent(dev
, len
, p
, *phys
);
613 *(void **)metadata
= s
;
619 * sgl_len - calculates the size of an SGL of the given capacity
620 * @n: the number of SGL entries
622 * Calculates the number of flits needed for a scatter/gather list that
623 * can hold the given number of entries.
625 static inline unsigned int sgl_len(unsigned int n
)
628 return (3 * n
) / 2 + (n
& 1) + 2;
632 * flits_to_desc - returns the num of Tx descriptors for the given flits
633 * @n: the number of flits
635 * Returns the number of Tx descriptors needed for the supplied number
638 static inline unsigned int flits_to_desc(unsigned int n
)
640 BUG_ON(n
> SGE_MAX_WR_LEN
/ 8);
641 return DIV_ROUND_UP(n
, 8);
645 * is_eth_imm - can an Ethernet packet be sent as immediate data?
648 * Returns whether an Ethernet packet is small enough to fit as
651 static inline int is_eth_imm(const struct sk_buff
*skb
)
653 return skb
->len
<= MAX_IMM_TX_PKT_LEN
- sizeof(struct cpl_tx_pkt
);
657 * calc_tx_flits - calculate the number of flits for a packet Tx WR
660 * Returns the number of flits needed for a Tx WR for the given Ethernet
661 * packet, including the needed WR and CPL headers.
663 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
)
668 return DIV_ROUND_UP(skb
->len
+ sizeof(struct cpl_tx_pkt
), 8);
670 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1) + 4;
671 if (skb_shinfo(skb
)->gso_size
)
677 * calc_tx_descs - calculate the number of Tx descriptors for a packet
680 * Returns the number of Tx descriptors needed for the given Ethernet
681 * packet, including the needed WR and CPL headers.
683 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
)
685 return flits_to_desc(calc_tx_flits(skb
));
689 * write_sgl - populate a scatter/gather list for a packet
691 * @q: the Tx queue we are writing into
692 * @sgl: starting location for writing the SGL
693 * @end: points right after the end of the SGL
694 * @start: start offset into skb main-body data to include in the SGL
695 * @addr: the list of bus addresses for the SGL elements
697 * Generates a gather list for the buffers that make up a packet.
698 * The caller must provide adequate space for the SGL that will be written.
699 * The SGL includes all of the packet's page fragments and the data in its
700 * main body except for the first @start bytes. @sgl must be 16-byte
701 * aligned and within a Tx descriptor with available space. @end points
702 * right after the end of the SGL but does not account for any potential
703 * wrap around, i.e., @end > @sgl.
705 static void write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
706 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
707 const dma_addr_t
*addr
)
710 struct ulptx_sge_pair
*to
;
711 const struct skb_shared_info
*si
= skb_shinfo(skb
);
712 unsigned int nfrags
= si
->nr_frags
;
713 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
715 len
= skb_headlen(skb
) - start
;
717 sgl
->len0
= htonl(len
);
718 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
721 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
722 sgl
->addr0
= cpu_to_be64(addr
[1]);
725 sgl
->cmd_nsge
= htonl(ULPTX_CMD(ULP_TX_SC_DSGL
) | ULPTX_NSGE(nfrags
));
726 if (likely(--nfrags
== 0))
729 * Most of the complexity below deals with the possibility we hit the
730 * end of the queue in the middle of writing the SGL. For this case
731 * only we create the SGL in a temporary buffer and then copy it.
733 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
735 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
736 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
737 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
738 to
->addr
[0] = cpu_to_be64(addr
[i
]);
739 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
742 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
743 to
->len
[1] = cpu_to_be32(0);
744 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
746 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
747 unsigned int part0
= (u8
*)q
->stat
- (u8
*)sgl
->sge
, part1
;
750 memcpy(sgl
->sge
, buf
, part0
);
751 part1
= (u8
*)end
- (u8
*)q
->stat
;
752 memcpy(q
->desc
, (u8
*)buf
+ part0
, part1
);
753 end
= (void *)q
->desc
+ part1
;
755 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
760 * ring_tx_db - check and potentially ring a Tx queue's doorbell
763 * @n: number of new descriptors to give to HW
765 * Ring the doorbel for a Tx queue.
767 static inline void ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
)
769 wmb(); /* write descriptors before telling HW */
770 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
771 QID(q
->cntxt_id
) | PIDX(n
));
775 * inline_tx_skb - inline a packet's data into Tx descriptors
777 * @q: the Tx queue where the packet will be inlined
778 * @pos: starting position in the Tx queue where to inline the packet
780 * Inline a packet's contents directly into Tx descriptors, starting at
781 * the given position within the Tx DMA ring.
782 * Most of the complexity of this operation is dealing with wrap arounds
783 * in the middle of the packet we want to inline.
785 static void inline_tx_skb(const struct sk_buff
*skb
, const struct sge_txq
*q
,
789 int left
= (void *)q
->stat
- pos
;
791 if (likely(skb
->len
<= left
)) {
792 if (likely(!skb
->data_len
))
793 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
795 skb_copy_bits(skb
, 0, pos
, skb
->len
);
798 skb_copy_bits(skb
, 0, pos
, left
);
799 skb_copy_bits(skb
, left
, q
->desc
, skb
->len
- left
);
800 pos
= (void *)q
->desc
+ (skb
->len
- left
);
803 /* 0-pad to multiple of 16 */
804 p
= PTR_ALIGN(pos
, 8);
805 if ((uintptr_t)p
& 8)
810 * Figure out what HW csum a packet wants and return the appropriate control
813 static u64
hwcsum(const struct sk_buff
*skb
)
816 const struct iphdr
*iph
= ip_hdr(skb
);
818 if (iph
->version
== 4) {
819 if (iph
->protocol
== IPPROTO_TCP
)
820 csum_type
= TX_CSUM_TCPIP
;
821 else if (iph
->protocol
== IPPROTO_UDP
)
822 csum_type
= TX_CSUM_UDPIP
;
825 * unknown protocol, disable HW csum
826 * and hope a bad packet is detected
828 return TXPKT_L4CSUM_DIS
;
832 * this doesn't work with extension headers
834 const struct ipv6hdr
*ip6h
= (const struct ipv6hdr
*)iph
;
836 if (ip6h
->nexthdr
== IPPROTO_TCP
)
837 csum_type
= TX_CSUM_TCPIP6
;
838 else if (ip6h
->nexthdr
== IPPROTO_UDP
)
839 csum_type
= TX_CSUM_UDPIP6
;
844 if (likely(csum_type
>= TX_CSUM_TCPIP
))
845 return TXPKT_CSUM_TYPE(csum_type
) |
846 TXPKT_IPHDR_LEN(skb_network_header_len(skb
)) |
847 TXPKT_ETHHDR_LEN(skb_network_offset(skb
) - ETH_HLEN
);
849 int start
= skb_transport_offset(skb
);
851 return TXPKT_CSUM_TYPE(csum_type
) | TXPKT_CSUM_START(start
) |
852 TXPKT_CSUM_LOC(start
+ skb
->csum_offset
);
856 static void eth_txq_stop(struct sge_eth_txq
*q
)
858 netif_tx_stop_queue(q
->txq
);
862 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
866 if (q
->pidx
>= q
->size
)
871 * t4_eth_xmit - add a packet to an Ethernet Tx queue
873 * @dev: the egress net device
875 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
877 netdev_tx_t
t4_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
882 unsigned int flits
, ndesc
;
883 struct adapter
*adap
;
884 struct sge_eth_txq
*q
;
885 const struct port_info
*pi
;
886 struct fw_eth_tx_pkt_wr
*wr
;
887 struct cpl_tx_pkt_core
*cpl
;
888 const struct skb_shared_info
*ssi
;
889 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
892 * The chip min packet length is 10 octets but play safe and reject
893 * anything shorter than an Ethernet header.
895 if (unlikely(skb
->len
< ETH_HLEN
)) {
896 out_free
: dev_kfree_skb(skb
);
900 pi
= netdev_priv(dev
);
902 qidx
= skb_get_queue_mapping(skb
);
903 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
905 reclaim_completed_tx(adap
, &q
->q
, true);
907 flits
= calc_tx_flits(skb
);
908 ndesc
= flits_to_desc(flits
);
909 credits
= txq_avail(&q
->q
) - ndesc
;
911 if (unlikely(credits
< 0)) {
913 dev_err(adap
->pdev_dev
,
914 "%s: Tx ring %u full while queue awake!\n",
916 return NETDEV_TX_BUSY
;
919 if (!is_eth_imm(skb
) &&
920 unlikely(map_skb(adap
->pdev_dev
, skb
, addr
) < 0)) {
925 wr_mid
= FW_WR_LEN16(DIV_ROUND_UP(flits
, 2));
926 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
928 wr_mid
|= FW_WR_EQUEQ
| FW_WR_EQUIQ
;
931 wr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
932 wr
->equiq_to_len16
= htonl(wr_mid
);
933 wr
->r3
= cpu_to_be64(0);
934 end
= (u64
*)wr
+ flits
;
936 ssi
= skb_shinfo(skb
);
938 struct cpl_tx_pkt_lso
*lso
= (void *)wr
;
939 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
940 int l3hdr_len
= skb_network_header_len(skb
);
941 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
943 wr
->op_immdlen
= htonl(FW_WR_OP(FW_ETH_TX_PKT_WR
) |
944 FW_WR_IMMDLEN(sizeof(*lso
)));
945 lso
->c
.lso_ctrl
= htonl(LSO_OPCODE(CPL_TX_PKT_LSO
) |
946 LSO_FIRST_SLICE
| LSO_LAST_SLICE
|
948 LSO_ETHHDR_LEN(eth_xtra_len
/ 4) |
949 LSO_IPHDR_LEN(l3hdr_len
/ 4) |
950 LSO_TCPHDR_LEN(tcp_hdr(skb
)->doff
));
951 lso
->c
.ipid_ofst
= htons(0);
952 lso
->c
.mss
= htons(ssi
->gso_size
);
953 lso
->c
.seqno_offset
= htonl(0);
954 lso
->c
.len
= htonl(skb
->len
);
955 cpl
= (void *)(lso
+ 1);
956 cntrl
= TXPKT_CSUM_TYPE(v6
? TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
957 TXPKT_IPHDR_LEN(l3hdr_len
) |
958 TXPKT_ETHHDR_LEN(eth_xtra_len
);
960 q
->tx_cso
+= ssi
->gso_segs
;
964 len
= is_eth_imm(skb
) ? skb
->len
+ sizeof(*cpl
) : sizeof(*cpl
);
965 wr
->op_immdlen
= htonl(FW_WR_OP(FW_ETH_TX_PKT_WR
) |
967 cpl
= (void *)(wr
+ 1);
968 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
969 cntrl
= hwcsum(skb
) | TXPKT_IPCSUM_DIS
;
972 cntrl
= TXPKT_L4CSUM_DIS
| TXPKT_IPCSUM_DIS
;
975 if (vlan_tx_tag_present(skb
)) {
977 cntrl
|= TXPKT_VLAN_VLD
| TXPKT_VLAN(vlan_tx_tag_get(skb
));
980 cpl
->ctrl0
= htonl(TXPKT_OPCODE(CPL_TX_PKT_XT
) |
981 TXPKT_INTF(pi
->tx_chan
) | TXPKT_PF(adap
->fn
));
982 cpl
->pack
= htons(0);
983 cpl
->len
= htons(skb
->len
);
984 cpl
->ctrl1
= cpu_to_be64(cntrl
);
986 if (is_eth_imm(skb
)) {
987 inline_tx_skb(skb
, &q
->q
, cpl
+ 1);
992 write_sgl(skb
, &q
->q
, (struct ulptx_sgl
*)(cpl
+ 1), end
, 0,
996 last_desc
= q
->q
.pidx
+ ndesc
- 1;
997 if (last_desc
>= q
->q
.size
)
998 last_desc
-= q
->q
.size
;
999 q
->q
.sdesc
[last_desc
].skb
= skb
;
1000 q
->q
.sdesc
[last_desc
].sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1003 txq_advance(&q
->q
, ndesc
);
1005 ring_tx_db(adap
, &q
->q
, ndesc
);
1006 return NETDEV_TX_OK
;
1010 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1011 * @q: the SGE control Tx queue
1013 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1014 * that send only immediate data (presently just the control queues) and
1015 * thus do not have any sk_buffs to release.
1017 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1019 int hw_cidx
= ntohs(q
->stat
->cidx
);
1020 int reclaim
= hw_cidx
- q
->cidx
;
1025 q
->in_use
-= reclaim
;
1030 * is_imm - check whether a packet can be sent as immediate data
1033 * Returns true if a packet can be sent as a WR with immediate data.
1035 static inline int is_imm(const struct sk_buff
*skb
)
1037 return skb
->len
<= MAX_CTRL_WR_LEN
;
1041 * ctrlq_check_stop - check if a control queue is full and should stop
1043 * @wr: most recent WR written to the queue
1045 * Check if a control queue has become full and should be stopped.
1046 * We clean up control queue descriptors very lazily, only when we are out.
1047 * If the queue is still full after reclaiming any completed descriptors
1048 * we suspend it and have the last WR wake it up.
1050 static void ctrlq_check_stop(struct sge_ctrl_txq
*q
, struct fw_wr_hdr
*wr
)
1052 reclaim_completed_tx_imm(&q
->q
);
1053 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1054 wr
->lo
|= htonl(FW_WR_EQUEQ
| FW_WR_EQUIQ
);
1061 * ctrl_xmit - send a packet through an SGE control Tx queue
1062 * @q: the control queue
1065 * Send a packet through an SGE control Tx queue. Packets sent through
1066 * a control queue must fit entirely as immediate data.
1068 static int ctrl_xmit(struct sge_ctrl_txq
*q
, struct sk_buff
*skb
)
1071 struct fw_wr_hdr
*wr
;
1073 if (unlikely(!is_imm(skb
))) {
1076 return NET_XMIT_DROP
;
1079 ndesc
= DIV_ROUND_UP(skb
->len
, sizeof(struct tx_desc
));
1080 spin_lock(&q
->sendq
.lock
);
1082 if (unlikely(q
->full
)) {
1083 skb
->priority
= ndesc
; /* save for restart */
1084 __skb_queue_tail(&q
->sendq
, skb
);
1085 spin_unlock(&q
->sendq
.lock
);
1089 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1090 inline_tx_skb(skb
, &q
->q
, wr
);
1092 txq_advance(&q
->q
, ndesc
);
1093 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
))
1094 ctrlq_check_stop(q
, wr
);
1096 ring_tx_db(q
->adap
, &q
->q
, ndesc
);
1097 spin_unlock(&q
->sendq
.lock
);
1100 return NET_XMIT_SUCCESS
;
1104 * restart_ctrlq - restart a suspended control queue
1105 * @data: the control queue to restart
1107 * Resumes transmission on a suspended Tx control queue.
1109 static void restart_ctrlq(unsigned long data
)
1111 struct sk_buff
*skb
;
1112 unsigned int written
= 0;
1113 struct sge_ctrl_txq
*q
= (struct sge_ctrl_txq
*)data
;
1115 spin_lock(&q
->sendq
.lock
);
1116 reclaim_completed_tx_imm(&q
->q
);
1117 BUG_ON(txq_avail(&q
->q
) < TXQ_STOP_THRES
); /* q should be empty */
1119 while ((skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
1120 struct fw_wr_hdr
*wr
;
1121 unsigned int ndesc
= skb
->priority
; /* previously saved */
1124 * Write descriptors and free skbs outside the lock to limit
1125 * wait times. q->full is still set so new skbs will be queued.
1127 spin_unlock(&q
->sendq
.lock
);
1129 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1130 inline_tx_skb(skb
, &q
->q
, wr
);
1134 txq_advance(&q
->q
, ndesc
);
1135 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1136 unsigned long old
= q
->q
.stops
;
1138 ctrlq_check_stop(q
, wr
);
1139 if (q
->q
.stops
!= old
) { /* suspended anew */
1140 spin_lock(&q
->sendq
.lock
);
1145 ring_tx_db(q
->adap
, &q
->q
, written
);
1148 spin_lock(&q
->sendq
.lock
);
1151 ringdb
: if (written
)
1152 ring_tx_db(q
->adap
, &q
->q
, written
);
1153 spin_unlock(&q
->sendq
.lock
);
1157 * t4_mgmt_tx - send a management message
1158 * @adap: the adapter
1159 * @skb: the packet containing the management message
1161 * Send a management message through control queue 0.
1163 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
1168 ret
= ctrl_xmit(&adap
->sge
.ctrlq
[0], skb
);
1174 * is_ofld_imm - check whether a packet can be sent as immediate data
1177 * Returns true if a packet can be sent as an offload WR with immediate
1178 * data. We currently use the same limit as for Ethernet packets.
1180 static inline int is_ofld_imm(const struct sk_buff
*skb
)
1182 return skb
->len
<= MAX_IMM_TX_PKT_LEN
;
1186 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1189 * Returns the number of flits needed for the given offload packet.
1190 * These packets are already fully constructed and no additional headers
1193 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
1195 unsigned int flits
, cnt
;
1197 if (is_ofld_imm(skb
))
1198 return DIV_ROUND_UP(skb
->len
, 8);
1200 flits
= skb_transport_offset(skb
) / 8U; /* headers */
1201 cnt
= skb_shinfo(skb
)->nr_frags
;
1202 if (skb
->tail
!= skb
->transport_header
)
1204 return flits
+ sgl_len(cnt
);
1208 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1209 * @adap: the adapter
1210 * @q: the queue to stop
1212 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1213 * inability to map packets. A periodic timer attempts to restart
1216 static void txq_stop_maperr(struct sge_ofld_txq
*q
)
1220 set_bit(q
->q
.cntxt_id
- q
->adap
->sge
.egr_start
,
1221 q
->adap
->sge
.txq_maperr
);
1225 * ofldtxq_stop - stop an offload Tx queue that has become full
1226 * @q: the queue to stop
1227 * @skb: the packet causing the queue to become full
1229 * Stops an offload Tx queue that has become full and modifies the packet
1230 * being written to request a wakeup.
1232 static void ofldtxq_stop(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1234 struct fw_wr_hdr
*wr
= (struct fw_wr_hdr
*)skb
->data
;
1236 wr
->lo
|= htonl(FW_WR_EQUEQ
| FW_WR_EQUIQ
);
1242 * service_ofldq - restart a suspended offload queue
1243 * @q: the offload queue
1245 * Services an offload Tx queue by moving packets from its packet queue
1246 * to the HW Tx ring. The function starts and ends with the queue locked.
1248 static void service_ofldq(struct sge_ofld_txq
*q
)
1252 struct sk_buff
*skb
;
1253 unsigned int written
= 0;
1254 unsigned int flits
, ndesc
;
1256 while ((skb
= skb_peek(&q
->sendq
)) != NULL
&& !q
->full
) {
1258 * We drop the lock but leave skb on sendq, thus retaining
1259 * exclusive access to the state of the queue.
1261 spin_unlock(&q
->sendq
.lock
);
1263 reclaim_completed_tx(q
->adap
, &q
->q
, false);
1265 flits
= skb
->priority
; /* previously saved */
1266 ndesc
= flits_to_desc(flits
);
1267 credits
= txq_avail(&q
->q
) - ndesc
;
1268 BUG_ON(credits
< 0);
1269 if (unlikely(credits
< TXQ_STOP_THRES
))
1270 ofldtxq_stop(q
, skb
);
1272 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
1273 if (is_ofld_imm(skb
))
1274 inline_tx_skb(skb
, &q
->q
, pos
);
1275 else if (map_skb(q
->adap
->pdev_dev
, skb
,
1276 (dma_addr_t
*)skb
->head
)) {
1278 spin_lock(&q
->sendq
.lock
);
1281 int last_desc
, hdr_len
= skb_transport_offset(skb
);
1283 memcpy(pos
, skb
->data
, hdr_len
);
1284 write_sgl(skb
, &q
->q
, (void *)pos
+ hdr_len
,
1285 pos
+ flits
, hdr_len
,
1286 (dma_addr_t
*)skb
->head
);
1287 #ifdef CONFIG_NEED_DMA_MAP_STATE
1288 skb
->dev
= q
->adap
->port
[0];
1289 skb
->destructor
= deferred_unmap_destructor
;
1291 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1292 if (last_desc
>= q
->q
.size
)
1293 last_desc
-= q
->q
.size
;
1294 q
->q
.sdesc
[last_desc
].skb
= skb
;
1297 txq_advance(&q
->q
, ndesc
);
1299 if (unlikely(written
> 32)) {
1300 ring_tx_db(q
->adap
, &q
->q
, written
);
1304 spin_lock(&q
->sendq
.lock
);
1305 __skb_unlink(skb
, &q
->sendq
);
1306 if (is_ofld_imm(skb
))
1309 if (likely(written
))
1310 ring_tx_db(q
->adap
, &q
->q
, written
);
1314 * ofld_xmit - send a packet through an offload queue
1315 * @q: the Tx offload queue
1318 * Send an offload packet through an SGE offload queue.
1320 static int ofld_xmit(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1322 skb
->priority
= calc_tx_flits_ofld(skb
); /* save for restart */
1323 spin_lock(&q
->sendq
.lock
);
1324 __skb_queue_tail(&q
->sendq
, skb
);
1325 if (q
->sendq
.qlen
== 1)
1327 spin_unlock(&q
->sendq
.lock
);
1328 return NET_XMIT_SUCCESS
;
1332 * restart_ofldq - restart a suspended offload queue
1333 * @data: the offload queue to restart
1335 * Resumes transmission on a suspended Tx offload queue.
1337 static void restart_ofldq(unsigned long data
)
1339 struct sge_ofld_txq
*q
= (struct sge_ofld_txq
*)data
;
1341 spin_lock(&q
->sendq
.lock
);
1342 q
->full
= 0; /* the queue actually is completely empty now */
1344 spin_unlock(&q
->sendq
.lock
);
1348 * skb_txq - return the Tx queue an offload packet should use
1351 * Returns the Tx queue an offload packet should use as indicated by bits
1352 * 1-15 in the packet's queue_mapping.
1354 static inline unsigned int skb_txq(const struct sk_buff
*skb
)
1356 return skb
->queue_mapping
>> 1;
1360 * is_ctrl_pkt - return whether an offload packet is a control packet
1363 * Returns whether an offload packet should use an OFLD or a CTRL
1364 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1366 static inline unsigned int is_ctrl_pkt(const struct sk_buff
*skb
)
1368 return skb
->queue_mapping
& 1;
1371 static inline int ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1373 unsigned int idx
= skb_txq(skb
);
1375 if (unlikely(is_ctrl_pkt(skb
)))
1376 return ctrl_xmit(&adap
->sge
.ctrlq
[idx
], skb
);
1377 return ofld_xmit(&adap
->sge
.ofldtxq
[idx
], skb
);
1381 * t4_ofld_send - send an offload packet
1382 * @adap: the adapter
1385 * Sends an offload packet. We use the packet queue_mapping to select the
1386 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1387 * should be sent as regular or control, bits 1-15 select the queue.
1389 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1394 ret
= ofld_send(adap
, skb
);
1400 * cxgb4_ofld_send - send an offload packet
1401 * @dev: the net device
1404 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1405 * intended for ULDs.
1407 int cxgb4_ofld_send(struct net_device
*dev
, struct sk_buff
*skb
)
1409 return t4_ofld_send(netdev2adap(dev
), skb
);
1411 EXPORT_SYMBOL(cxgb4_ofld_send
);
1413 static inline void copy_frags(struct sk_buff
*skb
,
1414 const struct pkt_gl
*gl
, unsigned int offset
)
1418 /* usually there's just one frag */
1419 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
1420 gl
->frags
[0].offset
+ offset
,
1421 gl
->frags
[0].size
- offset
);
1422 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
1423 for (i
= 1; i
< gl
->nfrags
; i
++)
1424 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
1425 gl
->frags
[i
].offset
,
1428 /* get a reference to the last page, we don't own it */
1429 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
1433 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1434 * @gl: the gather list
1435 * @skb_len: size of sk_buff main body if it carries fragments
1436 * @pull_len: amount of data to move to the sk_buff's main body
1438 * Builds an sk_buff from the given packet gather list. Returns the
1439 * sk_buff or %NULL if sk_buff allocation failed.
1441 struct sk_buff
*cxgb4_pktgl_to_skb(const struct pkt_gl
*gl
,
1442 unsigned int skb_len
, unsigned int pull_len
)
1444 struct sk_buff
*skb
;
1447 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1448 * size, which is expected since buffers are at least PAGE_SIZEd.
1449 * In this case packets up to RX_COPY_THRES have only one fragment.
1451 if (gl
->tot_len
<= RX_COPY_THRES
) {
1452 skb
= dev_alloc_skb(gl
->tot_len
);
1455 __skb_put(skb
, gl
->tot_len
);
1456 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
1458 skb
= dev_alloc_skb(skb_len
);
1461 __skb_put(skb
, pull_len
);
1462 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
1464 copy_frags(skb
, gl
, pull_len
);
1465 skb
->len
= gl
->tot_len
;
1466 skb
->data_len
= skb
->len
- pull_len
;
1467 skb
->truesize
+= skb
->data_len
;
1471 EXPORT_SYMBOL(cxgb4_pktgl_to_skb
);
1474 * t4_pktgl_free - free a packet gather list
1475 * @gl: the gather list
1477 * Releases the pages of a packet gather list. We do not own the last
1478 * page on the list and do not free it.
1480 static void t4_pktgl_free(const struct pkt_gl
*gl
)
1483 const struct page_frag
*p
;
1485 for (p
= gl
->frags
, n
= gl
->nfrags
- 1; n
--; p
++)
1490 * Process an MPS trace packet. Give it an unused protocol number so it won't
1491 * be delivered to anyone and send it to the stack for capture.
1493 static noinline
int handle_trace_pkt(struct adapter
*adap
,
1494 const struct pkt_gl
*gl
)
1496 struct sk_buff
*skb
;
1497 struct cpl_trace_pkt
*p
;
1499 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
1500 if (unlikely(!skb
)) {
1505 p
= (struct cpl_trace_pkt
*)skb
->data
;
1506 __skb_pull(skb
, sizeof(*p
));
1507 skb_reset_mac_header(skb
);
1508 skb
->protocol
= htons(0xffff);
1509 skb
->dev
= adap
->port
[0];
1510 netif_receive_skb(skb
);
1514 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
1515 const struct cpl_rx_pkt
*pkt
)
1518 struct sk_buff
*skb
;
1520 skb
= napi_get_frags(&rxq
->rspq
.napi
);
1521 if (unlikely(!skb
)) {
1523 rxq
->stats
.rx_drops
++;
1527 copy_frags(skb
, gl
, RX_PKT_PAD
);
1528 skb
->len
= gl
->tot_len
- RX_PKT_PAD
;
1529 skb
->data_len
= skb
->len
;
1530 skb
->truesize
+= skb
->data_len
;
1531 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1532 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
1533 if (rxq
->rspq
.netdev
->features
& NETIF_F_RXHASH
)
1534 skb
->rxhash
= (__force u32
)pkt
->rsshdr
.hash_val
;
1536 if (unlikely(pkt
->vlan_ex
)) {
1537 __vlan_hwaccel_put_tag(skb
, ntohs(pkt
->vlan
));
1538 rxq
->stats
.vlan_ex
++;
1540 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
1541 if (ret
== GRO_HELD
)
1542 rxq
->stats
.lro_pkts
++;
1543 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
1544 rxq
->stats
.lro_merged
++;
1546 rxq
->stats
.rx_cso
++;
1550 * t4_ethrx_handler - process an ingress ethernet packet
1551 * @q: the response queue that received the packet
1552 * @rsp: the response queue descriptor holding the RX_PKT message
1553 * @si: the gather list of packet fragments
1555 * Process an ingress ethernet packet and deliver it to the stack.
1557 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1558 const struct pkt_gl
*si
)
1561 struct sk_buff
*skb
;
1562 const struct cpl_rx_pkt
*pkt
;
1563 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1565 if (unlikely(*(u8
*)rsp
== CPL_TRACE_PKT
))
1566 return handle_trace_pkt(q
->adap
, si
);
1568 pkt
= (const struct cpl_rx_pkt
*)rsp
;
1569 csum_ok
= pkt
->csum_calc
&& !pkt
->err_vec
;
1570 if ((pkt
->l2info
& htonl(RXF_TCP
)) &&
1571 (q
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&& !pkt
->ip_frag
) {
1572 do_gro(rxq
, si
, pkt
);
1576 skb
= cxgb4_pktgl_to_skb(si
, RX_PKT_SKB_LEN
, RX_PULL_LEN
);
1577 if (unlikely(!skb
)) {
1579 rxq
->stats
.rx_drops
++;
1583 __skb_pull(skb
, RX_PKT_PAD
); /* remove ethernet header padding */
1584 skb
->protocol
= eth_type_trans(skb
, q
->netdev
);
1585 skb_record_rx_queue(skb
, q
->idx
);
1586 if (skb
->dev
->features
& NETIF_F_RXHASH
)
1587 skb
->rxhash
= (__force u32
)pkt
->rsshdr
.hash_val
;
1591 if (csum_ok
&& (q
->netdev
->features
& NETIF_F_RXCSUM
) &&
1592 (pkt
->l2info
& htonl(RXF_UDP
| RXF_TCP
))) {
1593 if (!pkt
->ip_frag
) {
1594 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1595 rxq
->stats
.rx_cso
++;
1596 } else if (pkt
->l2info
& htonl(RXF_IP
)) {
1597 __sum16 c
= (__force __sum16
)pkt
->csum
;
1598 skb
->csum
= csum_unfold(c
);
1599 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1600 rxq
->stats
.rx_cso
++;
1603 skb_checksum_none_assert(skb
);
1605 if (unlikely(pkt
->vlan_ex
)) {
1606 __vlan_hwaccel_put_tag(skb
, ntohs(pkt
->vlan
));
1607 rxq
->stats
.vlan_ex
++;
1609 netif_receive_skb(skb
);
1614 * restore_rx_bufs - put back a packet's Rx buffers
1615 * @si: the packet gather list
1616 * @q: the SGE free list
1617 * @frags: number of FL buffers to restore
1619 * Puts back on an FL the Rx buffers associated with @si. The buffers
1620 * have already been unmapped and are left unmapped, we mark them so to
1621 * prevent further unmapping attempts.
1623 * This function undoes a series of @unmap_rx_buf calls when we find out
1624 * that the current packet can't be processed right away afterall and we
1625 * need to come back to it later. This is a very rare event and there's
1626 * no effort to make this particularly efficient.
1628 static void restore_rx_bufs(const struct pkt_gl
*si
, struct sge_fl
*q
,
1631 struct rx_sw_desc
*d
;
1635 q
->cidx
= q
->size
- 1;
1638 d
= &q
->sdesc
[q
->cidx
];
1639 d
->page
= si
->frags
[frags
].page
;
1640 d
->dma_addr
|= RX_UNMAPPED_BUF
;
1646 * is_new_response - check if a response is newly written
1647 * @r: the response descriptor
1648 * @q: the response queue
1650 * Returns true if a response descriptor contains a yet unprocessed
1653 static inline bool is_new_response(const struct rsp_ctrl
*r
,
1654 const struct sge_rspq
*q
)
1656 return RSPD_GEN(r
->type_gen
) == q
->gen
;
1660 * rspq_next - advance to the next entry in a response queue
1663 * Updates the state of a response queue to advance it to the next entry.
1665 static inline void rspq_next(struct sge_rspq
*q
)
1667 q
->cur_desc
= (void *)q
->cur_desc
+ q
->iqe_len
;
1668 if (unlikely(++q
->cidx
== q
->size
)) {
1671 q
->cur_desc
= q
->desc
;
1676 * process_responses - process responses from an SGE response queue
1677 * @q: the ingress queue to process
1678 * @budget: how many responses can be processed in this round
1680 * Process responses from an SGE response queue up to the supplied budget.
1681 * Responses include received packets as well as control messages from FW
1684 * Additionally choose the interrupt holdoff time for the next interrupt
1685 * on this queue. If the system is under memory shortage use a fairly
1686 * long delay to help recovery.
1688 static int process_responses(struct sge_rspq
*q
, int budget
)
1691 int budget_left
= budget
;
1692 const struct rsp_ctrl
*rc
;
1693 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1695 while (likely(budget_left
)) {
1696 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
1697 if (!is_new_response(rc
, q
))
1701 rsp_type
= RSPD_TYPE(rc
->type_gen
);
1702 if (likely(rsp_type
== RSP_TYPE_FLBUF
)) {
1703 struct page_frag
*fp
;
1705 const struct rx_sw_desc
*rsd
;
1706 u32 len
= ntohl(rc
->pldbuflen_qid
), bufsz
, frags
;
1708 if (len
& RSPD_NEWBUF
) {
1709 if (likely(q
->offset
> 0)) {
1710 free_rx_bufs(q
->adap
, &rxq
->fl
, 1);
1713 len
= RSPD_LEN(len
);
1717 /* gather packet fragments */
1718 for (frags
= 0, fp
= si
.frags
; ; frags
++, fp
++) {
1719 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
1720 bufsz
= get_buf_size(rsd
);
1721 fp
->page
= rsd
->page
;
1722 fp
->offset
= q
->offset
;
1723 fp
->size
= min(bufsz
, len
);
1727 unmap_rx_buf(q
->adap
, &rxq
->fl
);
1731 * Last buffer remains mapped so explicitly make it
1732 * coherent for CPU access.
1734 dma_sync_single_for_cpu(q
->adap
->pdev_dev
,
1736 fp
->size
, DMA_FROM_DEVICE
);
1738 si
.va
= page_address(si
.frags
[0].page
) +
1742 si
.nfrags
= frags
+ 1;
1743 ret
= q
->handler(q
, q
->cur_desc
, &si
);
1744 if (likely(ret
== 0))
1745 q
->offset
+= ALIGN(fp
->size
, FL_ALIGN
);
1747 restore_rx_bufs(&si
, &rxq
->fl
, frags
);
1748 } else if (likely(rsp_type
== RSP_TYPE_CPL
)) {
1749 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
1751 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
1754 if (unlikely(ret
)) {
1755 /* couldn't process descriptor, back off for recovery */
1756 q
->next_intr_params
= QINTR_TIMER_IDX(NOMEM_TMR_IDX
);
1764 if (q
->offset
>= 0 && rxq
->fl
.size
- rxq
->fl
.avail
>= 16)
1765 __refill_fl(q
->adap
, &rxq
->fl
);
1766 return budget
- budget_left
;
1770 * napi_rx_handler - the NAPI handler for Rx processing
1771 * @napi: the napi instance
1772 * @budget: how many packets we can process in this round
1774 * Handler for new data events when using NAPI. This does not need any
1775 * locking or protection from interrupts as data interrupts are off at
1776 * this point and other adapter interrupts do not interfere (the latter
1777 * in not a concern at all with MSI-X as non-data interrupts then have
1778 * a separate handler).
1780 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
1782 unsigned int params
;
1783 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
1784 int work_done
= process_responses(q
, budget
);
1786 if (likely(work_done
< budget
)) {
1787 napi_complete(napi
);
1788 params
= q
->next_intr_params
;
1789 q
->next_intr_params
= q
->intr_params
;
1791 params
= QINTR_TIMER_IDX(7);
1793 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS
), CIDXINC(work_done
) |
1794 INGRESSQID((u32
)q
->cntxt_id
) | SEINTARM(params
));
1799 * The MSI-X interrupt handler for an SGE response queue.
1801 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
)
1803 struct sge_rspq
*q
= cookie
;
1805 napi_schedule(&q
->napi
);
1810 * Process the indirect interrupt entries in the interrupt queue and kick off
1811 * NAPI for each queue that has generated an entry.
1813 static unsigned int process_intrq(struct adapter
*adap
)
1815 unsigned int credits
;
1816 const struct rsp_ctrl
*rc
;
1817 struct sge_rspq
*q
= &adap
->sge
.intrq
;
1819 spin_lock(&adap
->sge
.intrq_lock
);
1820 for (credits
= 0; ; credits
++) {
1821 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
1822 if (!is_new_response(rc
, q
))
1826 if (RSPD_TYPE(rc
->type_gen
) == RSP_TYPE_INTR
) {
1827 unsigned int qid
= ntohl(rc
->pldbuflen_qid
);
1829 qid
-= adap
->sge
.ingr_start
;
1830 napi_schedule(&adap
->sge
.ingr_map
[qid
]->napi
);
1836 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS
), CIDXINC(credits
) |
1837 INGRESSQID(q
->cntxt_id
) | SEINTARM(q
->intr_params
));
1838 spin_unlock(&adap
->sge
.intrq_lock
);
1843 * The MSI interrupt handler, which handles data events from SGE response queues
1844 * as well as error and other async events as they all use the same MSI vector.
1846 static irqreturn_t
t4_intr_msi(int irq
, void *cookie
)
1848 struct adapter
*adap
= cookie
;
1850 t4_slow_intr_handler(adap
);
1851 process_intrq(adap
);
1856 * Interrupt handler for legacy INTx interrupts.
1857 * Handles data events from SGE response queues as well as error and other
1858 * async events as they all use the same interrupt line.
1860 static irqreturn_t
t4_intr_intx(int irq
, void *cookie
)
1862 struct adapter
*adap
= cookie
;
1864 t4_write_reg(adap
, MYPF_REG(PCIE_PF_CLI
), 0);
1865 if (t4_slow_intr_handler(adap
) | process_intrq(adap
))
1867 return IRQ_NONE
; /* probably shared interrupt */
1871 * t4_intr_handler - select the top-level interrupt handler
1872 * @adap: the adapter
1874 * Selects the top-level interrupt handler based on the type of interrupts
1875 * (MSI-X, MSI, or INTx).
1877 irq_handler_t
t4_intr_handler(struct adapter
*adap
)
1879 if (adap
->flags
& USING_MSIX
)
1880 return t4_sge_intr_msix
;
1881 if (adap
->flags
& USING_MSI
)
1883 return t4_intr_intx
;
1886 static void sge_rx_timer_cb(unsigned long data
)
1889 unsigned int i
, cnt
[2];
1890 struct adapter
*adap
= (struct adapter
*)data
;
1891 struct sge
*s
= &adap
->sge
;
1893 for (i
= 0; i
< ARRAY_SIZE(s
->starving_fl
); i
++)
1894 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
1895 struct sge_eth_rxq
*rxq
;
1896 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
1897 struct sge_fl
*fl
= s
->egr_map
[id
];
1899 clear_bit(id
, s
->starving_fl
);
1900 smp_mb__after_clear_bit();
1902 if (fl_starving(fl
)) {
1903 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
1904 if (napi_reschedule(&rxq
->rspq
.napi
))
1907 set_bit(id
, s
->starving_fl
);
1911 t4_write_reg(adap
, SGE_DEBUG_INDEX
, 13);
1912 cnt
[0] = t4_read_reg(adap
, SGE_DEBUG_DATA_HIGH
);
1913 cnt
[1] = t4_read_reg(adap
, SGE_DEBUG_DATA_LOW
);
1915 for (i
= 0; i
< 2; i
++)
1916 if (cnt
[i
] >= s
->starve_thres
) {
1917 if (s
->idma_state
[i
] || cnt
[i
] == 0xffffffff)
1919 s
->idma_state
[i
] = 1;
1920 t4_write_reg(adap
, SGE_DEBUG_INDEX
, 11);
1921 m
= t4_read_reg(adap
, SGE_DEBUG_DATA_LOW
) >> (i
* 16);
1922 dev_warn(adap
->pdev_dev
,
1923 "SGE idma%u starvation detected for "
1924 "queue %lu\n", i
, m
& 0xffff);
1925 } else if (s
->idma_state
[i
])
1926 s
->idma_state
[i
] = 0;
1928 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
1931 static void sge_tx_timer_cb(unsigned long data
)
1934 unsigned int i
, budget
;
1935 struct adapter
*adap
= (struct adapter
*)data
;
1936 struct sge
*s
= &adap
->sge
;
1938 for (i
= 0; i
< ARRAY_SIZE(s
->txq_maperr
); i
++)
1939 for (m
= s
->txq_maperr
[i
]; m
; m
&= m
- 1) {
1940 unsigned long id
= __ffs(m
) + i
* BITS_PER_LONG
;
1941 struct sge_ofld_txq
*txq
= s
->egr_map
[id
];
1943 clear_bit(id
, s
->txq_maperr
);
1944 tasklet_schedule(&txq
->qresume_tsk
);
1947 budget
= MAX_TIMER_TX_RECLAIM
;
1948 i
= s
->ethtxq_rover
;
1950 struct sge_eth_txq
*q
= &s
->ethtxq
[i
];
1953 time_after_eq(jiffies
, q
->txq
->trans_start
+ HZ
/ 100) &&
1954 __netif_tx_trylock(q
->txq
)) {
1955 int avail
= reclaimable(&q
->q
);
1961 free_tx_desc(adap
, &q
->q
, avail
, true);
1962 q
->q
.in_use
-= avail
;
1965 __netif_tx_unlock(q
->txq
);
1968 if (++i
>= s
->ethqsets
)
1970 } while (budget
&& i
!= s
->ethtxq_rover
);
1971 s
->ethtxq_rover
= i
;
1972 mod_timer(&s
->tx_timer
, jiffies
+ (budget
? TX_QCHECK_PERIOD
: 2));
1975 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
1976 struct net_device
*dev
, int intr_idx
,
1977 struct sge_fl
*fl
, rspq_handler_t hnd
)
1981 struct port_info
*pi
= netdev_priv(dev
);
1983 /* Size needs to be multiple of 16, including status entry. */
1984 iq
->size
= roundup(iq
->size
, 16);
1986 iq
->desc
= alloc_ring(adap
->pdev_dev
, iq
->size
, iq
->iqe_len
, 0,
1987 &iq
->phys_addr
, NULL
, 0, NUMA_NO_NODE
);
1991 memset(&c
, 0, sizeof(c
));
1992 c
.op_to_vfn
= htonl(FW_CMD_OP(FW_IQ_CMD
) | FW_CMD_REQUEST
|
1993 FW_CMD_WRITE
| FW_CMD_EXEC
|
1994 FW_IQ_CMD_PFN(adap
->fn
) | FW_IQ_CMD_VFN(0));
1995 c
.alloc_to_len16
= htonl(FW_IQ_CMD_ALLOC
| FW_IQ_CMD_IQSTART(1) |
1997 c
.type_to_iqandstindex
= htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP
) |
1998 FW_IQ_CMD_IQASYNCH(fwevtq
) | FW_IQ_CMD_VIID(pi
->viid
) |
1999 FW_IQ_CMD_IQANDST(intr_idx
< 0) | FW_IQ_CMD_IQANUD(1) |
2000 FW_IQ_CMD_IQANDSTINDEX(intr_idx
>= 0 ? intr_idx
:
2002 c
.iqdroprss_to_iqesize
= htons(FW_IQ_CMD_IQPCIECH(pi
->tx_chan
) |
2003 FW_IQ_CMD_IQGTSMODE
|
2004 FW_IQ_CMD_IQINTCNTTHRESH(iq
->pktcnt_idx
) |
2005 FW_IQ_CMD_IQESIZE(ilog2(iq
->iqe_len
) - 4));
2006 c
.iqsize
= htons(iq
->size
);
2007 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
2010 fl
->size
= roundup(fl
->size
, 8);
2011 fl
->desc
= alloc_ring(adap
->pdev_dev
, fl
->size
, sizeof(__be64
),
2012 sizeof(struct rx_sw_desc
), &fl
->addr
,
2013 &fl
->sdesc
, STAT_LEN
, NUMA_NO_NODE
);
2017 flsz
= fl
->size
/ 8 + STAT_LEN
/ sizeof(struct tx_desc
);
2018 c
.iqns_to_fl0congen
= htonl(FW_IQ_CMD_FL0PACKEN
|
2019 FW_IQ_CMD_FL0FETCHRO(1) |
2020 FW_IQ_CMD_FL0DATARO(1) |
2021 FW_IQ_CMD_FL0PADEN
);
2022 c
.fl0dcaen_to_fl0cidxfthresh
= htons(FW_IQ_CMD_FL0FBMIN(2) |
2023 FW_IQ_CMD_FL0FBMAX(3));
2024 c
.fl0size
= htons(flsz
);
2025 c
.fl0addr
= cpu_to_be64(fl
->addr
);
2028 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2032 netif_napi_add(dev
, &iq
->napi
, napi_rx_handler
, 64);
2033 iq
->cur_desc
= iq
->desc
;
2036 iq
->next_intr_params
= iq
->intr_params
;
2037 iq
->cntxt_id
= ntohs(c
.iqid
);
2038 iq
->abs_id
= ntohs(c
.physiqid
);
2039 iq
->size
--; /* subtract status entry */
2044 /* set offset to -1 to distinguish ingress queues without FL */
2045 iq
->offset
= fl
? 0 : -1;
2047 adap
->sge
.ingr_map
[iq
->cntxt_id
- adap
->sge
.ingr_start
] = iq
;
2050 fl
->cntxt_id
= ntohs(c
.fl0id
);
2051 fl
->avail
= fl
->pend_cred
= 0;
2052 fl
->pidx
= fl
->cidx
= 0;
2053 fl
->alloc_failed
= fl
->large_alloc_failed
= fl
->starving
= 0;
2054 adap
->sge
.egr_map
[fl
->cntxt_id
- adap
->sge
.egr_start
] = fl
;
2055 refill_fl(adap
, fl
, fl_cap(fl
), GFP_KERNEL
);
2063 dma_free_coherent(adap
->pdev_dev
, iq
->size
* iq
->iqe_len
,
2064 iq
->desc
, iq
->phys_addr
);
2067 if (fl
&& fl
->desc
) {
2070 dma_free_coherent(adap
->pdev_dev
, flsz
* sizeof(struct tx_desc
),
2071 fl
->desc
, fl
->addr
);
2077 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
2080 q
->cidx
= q
->pidx
= 0;
2081 q
->stops
= q
->restarts
= 0;
2082 q
->stat
= (void *)&q
->desc
[q
->size
];
2084 adap
->sge
.egr_map
[id
- adap
->sge
.egr_start
] = q
;
2087 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
2088 struct net_device
*dev
, struct netdev_queue
*netdevq
,
2092 struct fw_eq_eth_cmd c
;
2093 struct port_info
*pi
= netdev_priv(dev
);
2095 /* Add status entries */
2096 nentries
= txq
->q
.size
+ STAT_LEN
/ sizeof(struct tx_desc
);
2098 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2099 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2100 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, STAT_LEN
,
2101 netdev_queue_numa_node_read(netdevq
));
2105 memset(&c
, 0, sizeof(c
));
2106 c
.op_to_vfn
= htonl(FW_CMD_OP(FW_EQ_ETH_CMD
) | FW_CMD_REQUEST
|
2107 FW_CMD_WRITE
| FW_CMD_EXEC
|
2108 FW_EQ_ETH_CMD_PFN(adap
->fn
) | FW_EQ_ETH_CMD_VFN(0));
2109 c
.alloc_to_len16
= htonl(FW_EQ_ETH_CMD_ALLOC
|
2110 FW_EQ_ETH_CMD_EQSTART
| FW_LEN16(c
));
2111 c
.viid_pkd
= htonl(FW_EQ_ETH_CMD_VIID(pi
->viid
));
2112 c
.fetchszm_to_iqid
= htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2113 FW_EQ_ETH_CMD_PCIECHN(pi
->tx_chan
) |
2114 FW_EQ_ETH_CMD_FETCHRO(1) |
2115 FW_EQ_ETH_CMD_IQID(iqid
));
2116 c
.dcaen_to_eqsize
= htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2117 FW_EQ_ETH_CMD_FBMAX(3) |
2118 FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2119 FW_EQ_ETH_CMD_EQSIZE(nentries
));
2120 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2122 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2124 kfree(txq
->q
.sdesc
);
2125 txq
->q
.sdesc
= NULL
;
2126 dma_free_coherent(adap
->pdev_dev
,
2127 nentries
* sizeof(struct tx_desc
),
2128 txq
->q
.desc
, txq
->q
.phys_addr
);
2133 init_txq(adap
, &txq
->q
, FW_EQ_ETH_CMD_EQID_GET(ntohl(c
.eqid_pkd
)));
2135 txq
->tso
= txq
->tx_cso
= txq
->vlan_ins
= 0;
2136 txq
->mapping_err
= 0;
2140 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
2141 struct net_device
*dev
, unsigned int iqid
,
2142 unsigned int cmplqid
)
2145 struct fw_eq_ctrl_cmd c
;
2146 struct port_info
*pi
= netdev_priv(dev
);
2148 /* Add status entries */
2149 nentries
= txq
->q
.size
+ STAT_LEN
/ sizeof(struct tx_desc
);
2151 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, nentries
,
2152 sizeof(struct tx_desc
), 0, &txq
->q
.phys_addr
,
2153 NULL
, 0, NUMA_NO_NODE
);
2157 c
.op_to_vfn
= htonl(FW_CMD_OP(FW_EQ_CTRL_CMD
) | FW_CMD_REQUEST
|
2158 FW_CMD_WRITE
| FW_CMD_EXEC
|
2159 FW_EQ_CTRL_CMD_PFN(adap
->fn
) |
2160 FW_EQ_CTRL_CMD_VFN(0));
2161 c
.alloc_to_len16
= htonl(FW_EQ_CTRL_CMD_ALLOC
|
2162 FW_EQ_CTRL_CMD_EQSTART
| FW_LEN16(c
));
2163 c
.cmpliqid_eqid
= htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid
));
2164 c
.physeqid_pkd
= htonl(0);
2165 c
.fetchszm_to_iqid
= htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2166 FW_EQ_CTRL_CMD_PCIECHN(pi
->tx_chan
) |
2167 FW_EQ_CTRL_CMD_FETCHRO
|
2168 FW_EQ_CTRL_CMD_IQID(iqid
));
2169 c
.dcaen_to_eqsize
= htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2170 FW_EQ_CTRL_CMD_FBMAX(3) |
2171 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2172 FW_EQ_CTRL_CMD_EQSIZE(nentries
));
2173 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2175 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2177 dma_free_coherent(adap
->pdev_dev
,
2178 nentries
* sizeof(struct tx_desc
),
2179 txq
->q
.desc
, txq
->q
.phys_addr
);
2184 init_txq(adap
, &txq
->q
, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c
.cmpliqid_eqid
)));
2186 skb_queue_head_init(&txq
->sendq
);
2187 tasklet_init(&txq
->qresume_tsk
, restart_ctrlq
, (unsigned long)txq
);
2192 int t4_sge_alloc_ofld_txq(struct adapter
*adap
, struct sge_ofld_txq
*txq
,
2193 struct net_device
*dev
, unsigned int iqid
)
2196 struct fw_eq_ofld_cmd c
;
2197 struct port_info
*pi
= netdev_priv(dev
);
2199 /* Add status entries */
2200 nentries
= txq
->q
.size
+ STAT_LEN
/ sizeof(struct tx_desc
);
2202 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2203 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2204 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, STAT_LEN
,
2209 memset(&c
, 0, sizeof(c
));
2210 c
.op_to_vfn
= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD
) | FW_CMD_REQUEST
|
2211 FW_CMD_WRITE
| FW_CMD_EXEC
|
2212 FW_EQ_OFLD_CMD_PFN(adap
->fn
) |
2213 FW_EQ_OFLD_CMD_VFN(0));
2214 c
.alloc_to_len16
= htonl(FW_EQ_OFLD_CMD_ALLOC
|
2215 FW_EQ_OFLD_CMD_EQSTART
| FW_LEN16(c
));
2216 c
.fetchszm_to_iqid
= htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2217 FW_EQ_OFLD_CMD_PCIECHN(pi
->tx_chan
) |
2218 FW_EQ_OFLD_CMD_FETCHRO(1) |
2219 FW_EQ_OFLD_CMD_IQID(iqid
));
2220 c
.dcaen_to_eqsize
= htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2221 FW_EQ_OFLD_CMD_FBMAX(3) |
2222 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2223 FW_EQ_OFLD_CMD_EQSIZE(nentries
));
2224 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2226 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2228 kfree(txq
->q
.sdesc
);
2229 txq
->q
.sdesc
= NULL
;
2230 dma_free_coherent(adap
->pdev_dev
,
2231 nentries
* sizeof(struct tx_desc
),
2232 txq
->q
.desc
, txq
->q
.phys_addr
);
2237 init_txq(adap
, &txq
->q
, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c
.eqid_pkd
)));
2239 skb_queue_head_init(&txq
->sendq
);
2240 tasklet_init(&txq
->qresume_tsk
, restart_ofldq
, (unsigned long)txq
);
2242 txq
->mapping_err
= 0;
2246 static void free_txq(struct adapter
*adap
, struct sge_txq
*q
)
2248 dma_free_coherent(adap
->pdev_dev
,
2249 q
->size
* sizeof(struct tx_desc
) + STAT_LEN
,
2250 q
->desc
, q
->phys_addr
);
2256 static void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
2259 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
2261 adap
->sge
.ingr_map
[rq
->cntxt_id
- adap
->sge
.ingr_start
] = NULL
;
2262 t4_iq_free(adap
, adap
->fn
, adap
->fn
, 0, FW_IQ_TYPE_FL_INT_CAP
,
2263 rq
->cntxt_id
, fl_id
, 0xffff);
2264 dma_free_coherent(adap
->pdev_dev
, (rq
->size
+ 1) * rq
->iqe_len
,
2265 rq
->desc
, rq
->phys_addr
);
2266 netif_napi_del(&rq
->napi
);
2268 rq
->cntxt_id
= rq
->abs_id
= 0;
2272 free_rx_bufs(adap
, fl
, fl
->avail
);
2273 dma_free_coherent(adap
->pdev_dev
, fl
->size
* 8 + STAT_LEN
,
2274 fl
->desc
, fl
->addr
);
2283 * t4_free_sge_resources - free SGE resources
2284 * @adap: the adapter
2286 * Frees resources used by the SGE queue sets.
2288 void t4_free_sge_resources(struct adapter
*adap
)
2291 struct sge_eth_rxq
*eq
= adap
->sge
.ethrxq
;
2292 struct sge_eth_txq
*etq
= adap
->sge
.ethtxq
;
2293 struct sge_ofld_rxq
*oq
= adap
->sge
.ofldrxq
;
2295 /* clean up Ethernet Tx/Rx queues */
2296 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++, eq
++, etq
++) {
2298 free_rspq_fl(adap
, &eq
->rspq
, &eq
->fl
);
2300 t4_eth_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2302 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
2303 kfree(etq
->q
.sdesc
);
2304 free_txq(adap
, &etq
->q
);
2308 /* clean up RDMA and iSCSI Rx queues */
2309 for (i
= 0; i
< adap
->sge
.ofldqsets
; i
++, oq
++) {
2311 free_rspq_fl(adap
, &oq
->rspq
, &oq
->fl
);
2313 for (i
= 0, oq
= adap
->sge
.rdmarxq
; i
< adap
->sge
.rdmaqs
; i
++, oq
++) {
2315 free_rspq_fl(adap
, &oq
->rspq
, &oq
->fl
);
2318 /* clean up offload Tx queues */
2319 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ofldtxq
); i
++) {
2320 struct sge_ofld_txq
*q
= &adap
->sge
.ofldtxq
[i
];
2323 tasklet_kill(&q
->qresume_tsk
);
2324 t4_ofld_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2326 free_tx_desc(adap
, &q
->q
, q
->q
.in_use
, false);
2328 __skb_queue_purge(&q
->sendq
);
2329 free_txq(adap
, &q
->q
);
2333 /* clean up control Tx queues */
2334 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ctrlq
); i
++) {
2335 struct sge_ctrl_txq
*cq
= &adap
->sge
.ctrlq
[i
];
2338 tasklet_kill(&cq
->qresume_tsk
);
2339 t4_ctrl_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2341 __skb_queue_purge(&cq
->sendq
);
2342 free_txq(adap
, &cq
->q
);
2346 if (adap
->sge
.fw_evtq
.desc
)
2347 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
2349 if (adap
->sge
.intrq
.desc
)
2350 free_rspq_fl(adap
, &adap
->sge
.intrq
, NULL
);
2352 /* clear the reverse egress queue map */
2353 memset(adap
->sge
.egr_map
, 0, sizeof(adap
->sge
.egr_map
));
2356 void t4_sge_start(struct adapter
*adap
)
2358 adap
->sge
.ethtxq_rover
= 0;
2359 mod_timer(&adap
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2360 mod_timer(&adap
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
2364 * t4_sge_stop - disable SGE operation
2365 * @adap: the adapter
2367 * Stop tasklets and timers associated with the DMA engine. Note that
2368 * this is effective only if measures have been taken to disable any HW
2369 * events that may restart them.
2371 void t4_sge_stop(struct adapter
*adap
)
2374 struct sge
*s
= &adap
->sge
;
2376 if (in_interrupt()) /* actions below require waiting */
2379 if (s
->rx_timer
.function
)
2380 del_timer_sync(&s
->rx_timer
);
2381 if (s
->tx_timer
.function
)
2382 del_timer_sync(&s
->tx_timer
);
2384 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++) {
2385 struct sge_ofld_txq
*q
= &s
->ofldtxq
[i
];
2388 tasklet_kill(&q
->qresume_tsk
);
2390 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++) {
2391 struct sge_ctrl_txq
*cq
= &s
->ctrlq
[i
];
2394 tasklet_kill(&cq
->qresume_tsk
);
2399 * t4_sge_init - initialize SGE
2400 * @adap: the adapter
2402 * Performs SGE initialization needed every time after a chip reset.
2403 * We do not initialize any of the queues here, instead the driver
2404 * top-level must request them individually.
2406 void t4_sge_init(struct adapter
*adap
)
2409 struct sge
*s
= &adap
->sge
;
2410 unsigned int fl_align_log
= ilog2(FL_ALIGN
);
2412 t4_set_reg_field(adap
, SGE_CONTROL
, PKTSHIFT_MASK
|
2413 INGPADBOUNDARY_MASK
| EGRSTATUSPAGESIZE
,
2414 INGPADBOUNDARY(fl_align_log
- 5) | PKTSHIFT(2) |
2416 (STAT_LEN
== 128 ? EGRSTATUSPAGESIZE
: 0));
2418 for (i
= v
= 0; i
< 32; i
+= 4)
2419 v
|= (PAGE_SHIFT
- 10) << i
;
2420 t4_write_reg(adap
, SGE_HOST_PAGE_SIZE
, v
);
2421 t4_write_reg(adap
, SGE_FL_BUFFER_SIZE0
, PAGE_SIZE
);
2423 t4_write_reg(adap
, SGE_FL_BUFFER_SIZE1
, PAGE_SIZE
<< FL_PG_ORDER
);
2425 t4_write_reg(adap
, SGE_INGRESS_RX_THRESHOLD
,
2426 THRESHOLD_0(s
->counter_val
[0]) |
2427 THRESHOLD_1(s
->counter_val
[1]) |
2428 THRESHOLD_2(s
->counter_val
[2]) |
2429 THRESHOLD_3(s
->counter_val
[3]));
2430 t4_write_reg(adap
, SGE_TIMER_VALUE_0_AND_1
,
2431 TIMERVALUE0(us_to_core_ticks(adap
, s
->timer_val
[0])) |
2432 TIMERVALUE1(us_to_core_ticks(adap
, s
->timer_val
[1])));
2433 t4_write_reg(adap
, SGE_TIMER_VALUE_2_AND_3
,
2434 TIMERVALUE0(us_to_core_ticks(adap
, s
->timer_val
[2])) |
2435 TIMERVALUE1(us_to_core_ticks(adap
, s
->timer_val
[3])));
2436 t4_write_reg(adap
, SGE_TIMER_VALUE_4_AND_5
,
2437 TIMERVALUE0(us_to_core_ticks(adap
, s
->timer_val
[4])) |
2438 TIMERVALUE1(us_to_core_ticks(adap
, s
->timer_val
[5])));
2439 setup_timer(&s
->rx_timer
, sge_rx_timer_cb
, (unsigned long)adap
);
2440 setup_timer(&s
->tx_timer
, sge_tx_timer_cb
, (unsigned long)adap
);
2441 s
->starve_thres
= core_ticks_per_usec(adap
) * 1000000; /* 1 s */
2442 s
->idma_state
[0] = s
->idma_state
[1] = 0;
2443 spin_lock_init(&s
->intrq_lock
);