2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
55 #define RX_PAGE_SIZE 2048
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
61 #define SGE_RX_COPY_THRES 256
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
66 #define SGE_RX_DROP_THRES 16
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
72 #define TX_RECLAIM_PERIOD (HZ / 4)
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
78 * Types of Tx queues in each queue set. Order here matters, do not change.
80 enum { TXQ_ETH
, TXQ_OFLD
, TXQ_CTRL
};
82 /* Values for sge_txq.flags */
84 TXQ_RUNNING
= 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB
= 1 << 1, /* last packet rang the doorbell */
89 u64 flit
[TX_DESC_FLITS
];
99 struct tx_sw_desc
{ /* SW state per Tx descriptor */
103 struct rx_sw_desc
{ /* SW state per Rx descriptor */
106 struct sge_fl_page page
;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr
);
111 struct rsp_desc
{ /* response queue descriptor */
112 struct rss_header rss_hdr
;
119 struct unmap_info
{ /* packet unmapping info, overlays skb->cb */
120 int sflit
; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx
; /* first page fragment in current Tx descriptor */
122 u16 addr_idx
; /* buffer index of first SGL entry in descriptor */
123 u32 len
; /* mapped length of skb main body */
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
130 struct deferred_unmap_info
{
131 struct pci_dev
*pdev
;
132 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
141 * HW allows up to 4 descriptors to be combined into a WR.
143 static u8 flit_desc_map
[] = {
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
156 # error "SGE_NUM_GENBITS must be 1 or 2"
160 static inline struct sge_qset
*fl_to_qset(const struct sge_fl
*q
, int qidx
)
162 return container_of(q
, struct sge_qset
, fl
[qidx
]);
165 static inline struct sge_qset
*rspq_to_qset(const struct sge_rspq
*q
)
167 return container_of(q
, struct sge_qset
, rspq
);
170 static inline struct sge_qset
*txq_to_qset(const struct sge_txq
*q
, int qidx
)
172 return container_of(q
, struct sge_qset
, txq
[qidx
]);
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
181 * Replenishes a response queue by making the supplied number of responses
184 static inline void refill_rspq(struct adapter
*adapter
,
185 const struct sge_rspq
*q
, unsigned int credits
)
187 t3_write_reg(adapter
, A_SG_RSPQ_CREDIT_RETURN
,
188 V_RSPQ(q
->cntxt_id
) | V_CREDITS(credits
));
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
197 static inline int need_skb_unmap(void)
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
204 DECLARE_PCI_UNMAP_ADDR(addr
);
207 return sizeof(struct dummy
) != 0;
211 * unmap_skb - unmap a packet main body and its page fragments
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
227 static inline void unmap_skb(struct sk_buff
*skb
, struct sge_txq
*q
,
228 unsigned int cidx
, struct pci_dev
*pdev
)
230 const struct sg_ent
*sgp
;
231 struct unmap_info
*ui
= (struct unmap_info
*)skb
->cb
;
232 int nfrags
, frag_idx
, curflit
, j
= ui
->addr_idx
;
234 sgp
= (struct sg_ent
*)&q
->desc
[cidx
].flit
[ui
->sflit
];
237 pci_unmap_single(pdev
, be64_to_cpu(sgp
->addr
[0]), ui
->len
,
239 ui
->len
= 0; /* so we know for next descriptor for this skb */
243 frag_idx
= ui
->fragidx
;
244 curflit
= ui
->sflit
+ 1 + j
;
245 nfrags
= skb_shinfo(skb
)->nr_frags
;
247 while (frag_idx
< nfrags
&& curflit
< WR_FLITS
) {
248 pci_unmap_page(pdev
, be64_to_cpu(sgp
->addr
[j
]),
249 skb_shinfo(skb
)->frags
[frag_idx
].size
,
260 if (frag_idx
< nfrags
) { /* SGL continues into next Tx descriptor */
261 ui
->fragidx
= frag_idx
;
263 ui
->sflit
= curflit
- WR_FLITS
- j
; /* sflit can be -1 */
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
276 static void free_tx_desc(struct adapter
*adapter
, struct sge_txq
*q
,
279 struct tx_sw_desc
*d
;
280 struct pci_dev
*pdev
= adapter
->pdev
;
281 unsigned int cidx
= q
->cidx
;
283 const int need_unmap
= need_skb_unmap() &&
284 q
->cntxt_id
>= FW_TUNNEL_SGEEC_START
;
288 if (d
->skb
) { /* an SGL is present */
290 unmap_skb(d
->skb
, q
, cidx
, pdev
);
291 if (d
->skb
->priority
== cidx
)
295 if (++cidx
== q
->size
) {
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
312 static inline void reclaim_completed_tx(struct adapter
*adapter
,
315 unsigned int reclaim
= q
->processed
- q
->cleaned
;
318 free_tx_desc(adapter
, q
, reclaim
);
319 q
->cleaned
+= reclaim
;
320 q
->in_use
-= reclaim
;
325 * should_restart_tx - are there enough resources to restart a Tx queue?
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
330 static inline int should_restart_tx(const struct sge_txq
*q
)
332 unsigned int r
= q
->processed
- q
->cleaned
;
334 return q
->in_use
- r
< (q
->size
>> 1);
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
345 static void free_rx_bufs(struct pci_dev
*pdev
, struct sge_fl
*q
)
347 unsigned int cidx
= q
->cidx
;
349 while (q
->credits
--) {
350 struct rx_sw_desc
*d
= &q
->sdesc
[cidx
];
352 pci_unmap_single(pdev
, pci_unmap_addr(d
, dma_addr
),
353 q
->buf_size
, PCI_DMA_FROMDEVICE
);
355 if (q
->buf_size
!= RX_PAGE_SIZE
) {
359 if (d
->t
.page
.frag
.page
)
360 put_page(d
->t
.page
.frag
.page
);
361 d
->t
.page
.frag
.page
= NULL
;
363 if (++cidx
== q
->size
)
367 if (q
->page
.frag
.page
)
368 put_page(q
->page
.frag
.page
);
369 q
->page
.frag
.page
= NULL
;
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
381 * Add a buffer of the given length to the supplied HW and SW Rx
384 static inline void add_one_rx_buf(unsigned char *va
, unsigned int len
,
385 struct rx_desc
*d
, struct rx_sw_desc
*sd
,
386 unsigned int gen
, struct pci_dev
*pdev
)
390 mapping
= pci_map_single(pdev
, va
, len
, PCI_DMA_FROMDEVICE
);
391 pci_unmap_addr_set(sd
, dma_addr
, mapping
);
393 d
->addr_lo
= cpu_to_be32(mapping
);
394 d
->addr_hi
= cpu_to_be32((u64
) mapping
>> 32);
396 d
->len_gen
= cpu_to_be32(V_FLD_GEN1(gen
));
397 d
->gen2
= cpu_to_be32(V_FLD_GEN2(gen
));
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
411 static void refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
, gfp_t gfp
)
413 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
414 struct rx_desc
*d
= &q
->desc
[q
->pidx
];
415 struct sge_fl_page
*p
= &q
->page
;
420 if (unlikely(q
->buf_size
!= RX_PAGE_SIZE
)) {
421 struct sk_buff
*skb
= alloc_skb(q
->buf_size
, gfp
);
431 p
->frag
.page
= alloc_pages(gfp
, 0);
432 if (unlikely(!p
->frag
.page
)) {
436 p
->frag
.size
= RX_PAGE_SIZE
;
437 p
->frag
.page_offset
= 0;
438 p
->va
= page_address(p
->frag
.page
);
442 memcpy(&sd
->t
, p
, sizeof(*p
));
445 p
->frag
.page_offset
+= RX_PAGE_SIZE
;
446 BUG_ON(p
->frag
.page_offset
> PAGE_SIZE
);
447 p
->va
+= RX_PAGE_SIZE
;
448 if (p
->frag
.page_offset
== PAGE_SIZE
)
451 get_page(p
->frag
.page
);
454 add_one_rx_buf(va
, q
->buf_size
, d
, sd
, q
->gen
, adap
->pdev
);
458 if (++q
->pidx
== q
->size
) {
467 t3_write_reg(adap
, A_SG_KDOORBELL
, V_EGRCNTX(q
->cntxt_id
));
470 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
472 refill_fl(adap
, fl
, min(16U, fl
->size
- fl
->credits
), GFP_ATOMIC
);
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
484 static void recycle_rx_buf(struct adapter
*adap
, struct sge_fl
*q
,
487 struct rx_desc
*from
= &q
->desc
[idx
];
488 struct rx_desc
*to
= &q
->desc
[q
->pidx
];
490 memcpy(&q
->sdesc
[q
->pidx
], &q
->sdesc
[idx
], sizeof(struct rx_sw_desc
));
491 to
->addr_lo
= from
->addr_lo
; /* already big endian */
492 to
->addr_hi
= from
->addr_hi
; /* likewise */
494 to
->len_gen
= cpu_to_be32(V_FLD_GEN1(q
->gen
));
495 to
->gen2
= cpu_to_be32(V_FLD_GEN2(q
->gen
));
498 if (++q
->pidx
== q
->size
) {
502 t3_write_reg(adap
, A_SG_KDOORBELL
, V_EGRCNTX(q
->cntxt_id
));
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
522 static void *alloc_ring(struct pci_dev
*pdev
, size_t nelem
, size_t elem_size
,
523 size_t sw_size
, dma_addr_t
* phys
, void *metadata
)
525 size_t len
= nelem
* elem_size
;
527 void *p
= dma_alloc_coherent(&pdev
->dev
, len
, phys
, GFP_KERNEL
);
532 s
= kcalloc(nelem
, sw_size
, GFP_KERNEL
);
535 dma_free_coherent(&pdev
->dev
, len
, p
, *phys
);
540 *(void **)metadata
= s
;
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
554 void t3_free_qset(struct adapter
*adapter
, struct sge_qset
*q
)
557 struct pci_dev
*pdev
= adapter
->pdev
;
559 if (q
->tx_reclaim_timer
.function
)
560 del_timer_sync(&q
->tx_reclaim_timer
);
562 for (i
= 0; i
< SGE_RXQ_PER_SET
; ++i
)
564 spin_lock(&adapter
->sge
.reg_lock
);
565 t3_sge_disable_fl(adapter
, q
->fl
[i
].cntxt_id
);
566 spin_unlock(&adapter
->sge
.reg_lock
);
567 free_rx_bufs(pdev
, &q
->fl
[i
]);
568 kfree(q
->fl
[i
].sdesc
);
569 dma_free_coherent(&pdev
->dev
,
571 sizeof(struct rx_desc
), q
->fl
[i
].desc
,
575 for (i
= 0; i
< SGE_TXQ_PER_SET
; ++i
)
576 if (q
->txq
[i
].desc
) {
577 spin_lock(&adapter
->sge
.reg_lock
);
578 t3_sge_enable_ecntxt(adapter
, q
->txq
[i
].cntxt_id
, 0);
579 spin_unlock(&adapter
->sge
.reg_lock
);
580 if (q
->txq
[i
].sdesc
) {
581 free_tx_desc(adapter
, &q
->txq
[i
],
583 kfree(q
->txq
[i
].sdesc
);
585 dma_free_coherent(&pdev
->dev
,
587 sizeof(struct tx_desc
),
588 q
->txq
[i
].desc
, q
->txq
[i
].phys_addr
);
589 __skb_queue_purge(&q
->txq
[i
].sendq
);
593 spin_lock(&adapter
->sge
.reg_lock
);
594 t3_sge_disable_rspcntxt(adapter
, q
->rspq
.cntxt_id
);
595 spin_unlock(&adapter
->sge
.reg_lock
);
596 dma_free_coherent(&pdev
->dev
,
597 q
->rspq
.size
* sizeof(struct rsp_desc
),
598 q
->rspq
.desc
, q
->rspq
.phys_addr
);
602 q
->netdev
->atalk_ptr
= NULL
;
604 memset(q
, 0, sizeof(*q
));
608 * init_qset_cntxt - initialize an SGE queue set context info
610 * @id: the queue set id
612 * Initializes the TIDs and context ids for the queues of a queue set.
614 static void init_qset_cntxt(struct sge_qset
*qs
, unsigned int id
)
616 qs
->rspq
.cntxt_id
= id
;
617 qs
->fl
[0].cntxt_id
= 2 * id
;
618 qs
->fl
[1].cntxt_id
= 2 * id
+ 1;
619 qs
->txq
[TXQ_ETH
].cntxt_id
= FW_TUNNEL_SGEEC_START
+ id
;
620 qs
->txq
[TXQ_ETH
].token
= FW_TUNNEL_TID_START
+ id
;
621 qs
->txq
[TXQ_OFLD
].cntxt_id
= FW_OFLD_SGEEC_START
+ id
;
622 qs
->txq
[TXQ_CTRL
].cntxt_id
= FW_CTRL_SGEEC_START
+ id
;
623 qs
->txq
[TXQ_CTRL
].token
= FW_CTRL_TID_START
+ id
;
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
633 static inline unsigned int sgl_len(unsigned int n
)
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n
) / 2 + (n
& 1);
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
643 * Calculates the number of Tx descriptors needed for the supplied number
646 static inline unsigned int flits_to_desc(unsigned int n
)
648 BUG_ON(n
>= ARRAY_SIZE(flit_desc_map
));
649 return flit_desc_map
[n
];
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
656 * Return a packet containing the immediate data of the given response.
658 static inline struct sk_buff
*get_imm_packet(const struct rsp_desc
*resp
)
660 struct sk_buff
*skb
= alloc_skb(IMMED_PKT_SIZE
, GFP_ATOMIC
);
663 __skb_put(skb
, IMMED_PKT_SIZE
);
664 skb_copy_to_linear_data(skb
, resp
->imm_data
, IMMED_PKT_SIZE
);
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
676 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
)
680 if (skb
->len
<= WR_LEN
- sizeof(struct cpl_tx_pkt
))
683 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1) + 2;
684 if (skb_shinfo(skb
)->gso_size
)
686 return flits_to_desc(flits
);
690 * make_sgl - populate a scatter/gather list for a packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
701 static inline unsigned int make_sgl(const struct sk_buff
*skb
,
702 struct sg_ent
*sgp
, unsigned char *start
,
703 unsigned int len
, struct pci_dev
*pdev
)
706 unsigned int i
, j
= 0, nfrags
;
709 mapping
= pci_map_single(pdev
, start
, len
, PCI_DMA_TODEVICE
);
710 sgp
->len
[0] = cpu_to_be32(len
);
711 sgp
->addr
[0] = cpu_to_be64(mapping
);
715 nfrags
= skb_shinfo(skb
)->nr_frags
;
716 for (i
= 0; i
< nfrags
; i
++) {
717 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
719 mapping
= pci_map_page(pdev
, frag
->page
, frag
->page_offset
,
720 frag
->size
, PCI_DMA_TODEVICE
);
721 sgp
->len
[j
] = cpu_to_be32(frag
->size
);
722 sgp
->addr
[j
] = cpu_to_be64(mapping
);
729 return ((nfrags
+ (len
!= 0)) * 3) / 2 + j
;
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
742 * When GTS is disabled we unconditionally ring the doorbell.
744 static inline void check_ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
)
747 clear_bit(TXQ_LAST_PKT_DB
, &q
->flags
);
748 if (test_and_set_bit(TXQ_RUNNING
, &q
->flags
) == 0) {
749 set_bit(TXQ_LAST_PKT_DB
, &q
->flags
);
750 t3_write_reg(adap
, A_SG_KDOORBELL
,
751 F_SELEGRCNTX
| V_EGRCNTX(q
->cntxt_id
));
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap
, A_SG_KDOORBELL
,
756 F_SELEGRCNTX
| V_EGRCNTX(q
->cntxt_id
));
760 static inline void wr_gen2(struct tx_desc
*d
, unsigned int gen
)
762 #if SGE_NUM_GENBITS == 2
763 d
->flit
[TX_DESC_FLITS
- 1] = cpu_to_be64(gen
);
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
786 static void write_wr_hdr_sgl(unsigned int ndesc
, struct sk_buff
*skb
,
787 struct tx_desc
*d
, unsigned int pidx
,
788 const struct sge_txq
*q
,
789 const struct sg_ent
*sgl
,
790 unsigned int flits
, unsigned int sgl_flits
,
791 unsigned int gen
, unsigned int wr_hi
,
794 struct work_request_hdr
*wrp
= (struct work_request_hdr
*)d
;
795 struct tx_sw_desc
*sd
= &q
->sdesc
[pidx
];
798 if (need_skb_unmap()) {
799 struct unmap_info
*ui
= (struct unmap_info
*)skb
->cb
;
806 if (likely(ndesc
== 1)) {
807 skb
->priority
= pidx
;
808 wrp
->wr_hi
= htonl(F_WR_SOP
| F_WR_EOP
| V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits
)) | wr_hi
;
811 wrp
->wr_lo
= htonl(V_WR_LEN(flits
+ sgl_flits
) |
812 V_WR_GEN(gen
)) | wr_lo
;
815 unsigned int ogen
= gen
;
816 const u64
*fp
= (const u64
*)sgl
;
817 struct work_request_hdr
*wp
= wrp
;
819 wrp
->wr_hi
= htonl(F_WR_SOP
| V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits
)) | wr_hi
;
823 unsigned int avail
= WR_FLITS
- flits
;
825 if (avail
> sgl_flits
)
827 memcpy(&d
->flit
[flits
], fp
, avail
* sizeof(*fp
));
836 if (++pidx
== q
->size
) {
844 wrp
= (struct work_request_hdr
*)d
;
845 wrp
->wr_hi
= htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi
;
847 wrp
->wr_lo
= htonl(V_WR_LEN(min(WR_FLITS
,
849 V_WR_GEN(gen
)) | wr_lo
;
853 skb
->priority
= pidx
;
854 wrp
->wr_hi
|= htonl(F_WR_EOP
);
856 wp
->wr_lo
= htonl(V_WR_LEN(WR_FLITS
) | V_WR_GEN(ogen
)) | wr_lo
;
857 wr_gen2((struct tx_desc
*)wp
, ogen
);
863 * write_tx_pkt_wr - write a TX_PKT work request
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
873 * Generate a TX_PKT work request to send the supplied packet.
875 static void write_tx_pkt_wr(struct adapter
*adap
, struct sk_buff
*skb
,
876 const struct port_info
*pi
,
877 unsigned int pidx
, unsigned int gen
,
878 struct sge_txq
*q
, unsigned int ndesc
,
881 unsigned int flits
, sgl_flits
, cntrl
, tso_info
;
882 struct sg_ent
*sgp
, sgl
[MAX_SKB_FRAGS
/ 2 + 1];
883 struct tx_desc
*d
= &q
->desc
[pidx
];
884 struct cpl_tx_pkt
*cpl
= (struct cpl_tx_pkt
*)d
;
886 cpl
->len
= htonl(skb
->len
| 0x80000000);
887 cntrl
= V_TXPKT_INTF(pi
->port_id
);
889 if (vlan_tx_tag_present(skb
) && pi
->vlan_grp
)
890 cntrl
|= F_TXPKT_VLAN_VLD
| V_TXPKT_VLAN(vlan_tx_tag_get(skb
));
892 tso_info
= V_LSO_MSS(skb_shinfo(skb
)->gso_size
);
895 struct cpl_tx_pkt_lso
*hdr
= (struct cpl_tx_pkt_lso
*)cpl
;
898 cntrl
|= V_TXPKT_OPCODE(CPL_TX_PKT_LSO
);
899 hdr
->cntrl
= htonl(cntrl
);
900 eth_type
= skb_network_offset(skb
) == ETH_HLEN
?
901 CPL_ETH_II
: CPL_ETH_II_VLAN
;
902 tso_info
|= V_LSO_ETH_TYPE(eth_type
) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb
)->ihl
) |
904 V_LSO_TCPHDR_WORDS(tcp_hdr(skb
)->doff
);
905 hdr
->lso_info
= htonl(tso_info
);
908 cntrl
|= V_TXPKT_OPCODE(CPL_TX_PKT
);
909 cntrl
|= F_TXPKT_IPCSUM_DIS
; /* SW calculates IP csum */
910 cntrl
|= V_TXPKT_L4CSUM_DIS(skb
->ip_summed
!= CHECKSUM_PARTIAL
);
911 cpl
->cntrl
= htonl(cntrl
);
913 if (skb
->len
<= WR_LEN
- sizeof(*cpl
)) {
914 q
->sdesc
[pidx
].skb
= NULL
;
916 skb_copy_from_linear_data(skb
, &d
->flit
[2],
919 skb_copy_bits(skb
, 0, &d
->flit
[2], skb
->len
);
921 flits
= (skb
->len
+ 7) / 8 + 2;
922 cpl
->wr
.wr_hi
= htonl(V_WR_BCNTLFLT(skb
->len
& 7) |
923 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT
)
924 | F_WR_SOP
| F_WR_EOP
| compl);
926 cpl
->wr
.wr_lo
= htonl(V_WR_LEN(flits
) | V_WR_GEN(gen
) |
936 sgp
= ndesc
== 1 ? (struct sg_ent
*)&d
->flit
[flits
] : sgl
;
937 sgl_flits
= make_sgl(skb
, sgp
, skb
->data
, skb_headlen(skb
), adap
->pdev
);
938 if (need_skb_unmap())
939 ((struct unmap_info
*)skb
->cb
)->len
= skb_headlen(skb
);
941 write_wr_hdr_sgl(ndesc
, skb
, d
, pidx
, q
, sgl
, flits
, sgl_flits
, gen
,
942 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT
) | compl),
943 htonl(V_WR_TID(q
->token
)));
947 * eth_xmit - add a packet to the Ethernet Tx queue
949 * @dev: the egress net device
951 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
953 int t3_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
955 unsigned int ndesc
, pidx
, credits
, gen
, compl;
956 const struct port_info
*pi
= netdev_priv(dev
);
957 struct adapter
*adap
= dev
->priv
;
958 struct sge_qset
*qs
= dev2qset(dev
);
959 struct sge_txq
*q
= &qs
->txq
[TXQ_ETH
];
962 * The chip min packet length is 9 octets but play safe and reject
963 * anything shorter than an Ethernet header.
965 if (unlikely(skb
->len
< ETH_HLEN
)) {
971 reclaim_completed_tx(adap
, q
);
973 credits
= q
->size
- q
->in_use
;
974 ndesc
= calc_tx_descs(skb
);
976 if (unlikely(credits
< ndesc
)) {
977 if (!netif_queue_stopped(dev
)) {
978 netif_stop_queue(dev
);
979 set_bit(TXQ_ETH
, &qs
->txq_stopped
);
981 dev_err(&adap
->pdev
->dev
,
982 "%s: Tx ring %u full while queue awake!\n",
983 dev
->name
, q
->cntxt_id
& 7);
985 spin_unlock(&q
->lock
);
986 return NETDEV_TX_BUSY
;
990 if (unlikely(credits
- ndesc
< q
->stop_thres
)) {
992 netif_stop_queue(dev
);
993 set_bit(TXQ_ETH
, &qs
->txq_stopped
);
995 if (should_restart_tx(q
) &&
996 test_and_clear_bit(TXQ_ETH
, &qs
->txq_stopped
)) {
998 netif_wake_queue(dev
);
1004 q
->unacked
+= ndesc
;
1005 compl = (q
->unacked
& 8) << (S_WR_COMPL
- 3);
1009 if (q
->pidx
>= q
->size
) {
1014 /* update port statistics */
1015 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1016 qs
->port_stats
[SGE_PSTAT_TX_CSUM
]++;
1017 if (skb_shinfo(skb
)->gso_size
)
1018 qs
->port_stats
[SGE_PSTAT_TSO
]++;
1019 if (vlan_tx_tag_present(skb
) && pi
->vlan_grp
)
1020 qs
->port_stats
[SGE_PSTAT_VLANINS
]++;
1022 dev
->trans_start
= jiffies
;
1023 spin_unlock(&q
->lock
);
1026 * We do not use Tx completion interrupts to free DMAd Tx packets.
1027 * This is good for performamce but means that we rely on new Tx
1028 * packets arriving to run the destructors of completed packets,
1029 * which open up space in their sockets' send queues. Sometimes
1030 * we do not get such new packets causing Tx to stall. A single
1031 * UDP transmitter is a good example of this situation. We have
1032 * a clean up timer that periodically reclaims completed packets
1033 * but it doesn't run often enough (nor do we want it to) to prevent
1034 * lengthy stalls. A solution to this problem is to run the
1035 * destructor early, after the packet is queued but before it's DMAd.
1036 * A cons is that we lie to socket memory accounting, but the amount
1037 * of extra memory is reasonable (limited by the number of Tx
1038 * descriptors), the packets do actually get freed quickly by new
1039 * packets almost always, and for protocols like TCP that wait for
1040 * acks to really free up the data the extra memory is even less.
1041 * On the positive side we run the destructors on the sending CPU
1042 * rather than on a potentially different completing CPU, usually a
1043 * good thing. We also run them without holding our Tx queue lock,
1044 * unlike what reclaim_completed_tx() would otherwise do.
1046 * Run the destructor before telling the DMA engine about the packet
1047 * to make sure it doesn't complete and get freed prematurely.
1049 if (likely(!skb_shared(skb
)))
1052 write_tx_pkt_wr(adap
, skb
, pi
, pidx
, gen
, q
, ndesc
, compl);
1053 check_ring_tx_db(adap
, q
);
1054 return NETDEV_TX_OK
;
1058 * write_imm - write a packet into a Tx descriptor as immediate data
1059 * @d: the Tx descriptor to write
1061 * @len: the length of packet data to write as immediate data
1062 * @gen: the generation bit value to write
1064 * Writes a packet as immediate data into a Tx descriptor. The packet
1065 * contains a work request at its beginning. We must write the packet
1066 * carefully so the SGE doesn't read accidentally before it's written in
1069 static inline void write_imm(struct tx_desc
*d
, struct sk_buff
*skb
,
1070 unsigned int len
, unsigned int gen
)
1072 struct work_request_hdr
*from
= (struct work_request_hdr
*)skb
->data
;
1073 struct work_request_hdr
*to
= (struct work_request_hdr
*)d
;
1075 memcpy(&to
[1], &from
[1], len
- sizeof(*from
));
1076 to
->wr_hi
= from
->wr_hi
| htonl(F_WR_SOP
| F_WR_EOP
|
1077 V_WR_BCNTLFLT(len
& 7));
1079 to
->wr_lo
= from
->wr_lo
| htonl(V_WR_GEN(gen
) |
1080 V_WR_LEN((len
+ 7) / 8));
1086 * check_desc_avail - check descriptor availability on a send queue
1087 * @adap: the adapter
1088 * @q: the send queue
1089 * @skb: the packet needing the descriptors
1090 * @ndesc: the number of Tx descriptors needed
1091 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1093 * Checks if the requested number of Tx descriptors is available on an
1094 * SGE send queue. If the queue is already suspended or not enough
1095 * descriptors are available the packet is queued for later transmission.
1096 * Must be called with the Tx queue locked.
1098 * Returns 0 if enough descriptors are available, 1 if there aren't
1099 * enough descriptors and the packet has been queued, and 2 if the caller
1100 * needs to retry because there weren't enough descriptors at the
1101 * beginning of the call but some freed up in the mean time.
1103 static inline int check_desc_avail(struct adapter
*adap
, struct sge_txq
*q
,
1104 struct sk_buff
*skb
, unsigned int ndesc
,
1107 if (unlikely(!skb_queue_empty(&q
->sendq
))) {
1108 addq_exit
:__skb_queue_tail(&q
->sendq
, skb
);
1111 if (unlikely(q
->size
- q
->in_use
< ndesc
)) {
1112 struct sge_qset
*qs
= txq_to_qset(q
, qid
);
1114 set_bit(qid
, &qs
->txq_stopped
);
1115 smp_mb__after_clear_bit();
1117 if (should_restart_tx(q
) &&
1118 test_and_clear_bit(qid
, &qs
->txq_stopped
))
1128 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1129 * @q: the SGE control Tx queue
1131 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1132 * that send only immediate data (presently just the control queues) and
1133 * thus do not have any sk_buffs to release.
1135 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1137 unsigned int reclaim
= q
->processed
- q
->cleaned
;
1139 q
->in_use
-= reclaim
;
1140 q
->cleaned
+= reclaim
;
1143 static inline int immediate(const struct sk_buff
*skb
)
1145 return skb
->len
<= WR_LEN
&& !skb
->data_len
;
1149 * ctrl_xmit - send a packet through an SGE control Tx queue
1150 * @adap: the adapter
1151 * @q: the control queue
1154 * Send a packet through an SGE control Tx queue. Packets sent through
1155 * a control queue must fit entirely as immediate data in a single Tx
1156 * descriptor and have no page fragments.
1158 static int ctrl_xmit(struct adapter
*adap
, struct sge_txq
*q
,
1159 struct sk_buff
*skb
)
1162 struct work_request_hdr
*wrp
= (struct work_request_hdr
*)skb
->data
;
1164 if (unlikely(!immediate(skb
))) {
1167 return NET_XMIT_SUCCESS
;
1170 wrp
->wr_hi
|= htonl(F_WR_SOP
| F_WR_EOP
);
1171 wrp
->wr_lo
= htonl(V_WR_TID(q
->token
));
1173 spin_lock(&q
->lock
);
1174 again
:reclaim_completed_tx_imm(q
);
1176 ret
= check_desc_avail(adap
, q
, skb
, 1, TXQ_CTRL
);
1177 if (unlikely(ret
)) {
1179 spin_unlock(&q
->lock
);
1185 write_imm(&q
->desc
[q
->pidx
], skb
, skb
->len
, q
->gen
);
1188 if (++q
->pidx
>= q
->size
) {
1192 spin_unlock(&q
->lock
);
1194 t3_write_reg(adap
, A_SG_KDOORBELL
,
1195 F_SELEGRCNTX
| V_EGRCNTX(q
->cntxt_id
));
1196 return NET_XMIT_SUCCESS
;
1200 * restart_ctrlq - restart a suspended control queue
1201 * @qs: the queue set cotaining the control queue
1203 * Resumes transmission on a suspended Tx control queue.
1205 static void restart_ctrlq(unsigned long data
)
1207 struct sk_buff
*skb
;
1208 struct sge_qset
*qs
= (struct sge_qset
*)data
;
1209 struct sge_txq
*q
= &qs
->txq
[TXQ_CTRL
];
1210 struct adapter
*adap
= qs
->netdev
->priv
;
1212 spin_lock(&q
->lock
);
1213 again
:reclaim_completed_tx_imm(q
);
1215 while (q
->in_use
< q
->size
&& (skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
1217 write_imm(&q
->desc
[q
->pidx
], skb
, skb
->len
, q
->gen
);
1219 if (++q
->pidx
>= q
->size
) {
1226 if (!skb_queue_empty(&q
->sendq
)) {
1227 set_bit(TXQ_CTRL
, &qs
->txq_stopped
);
1228 smp_mb__after_clear_bit();
1230 if (should_restart_tx(q
) &&
1231 test_and_clear_bit(TXQ_CTRL
, &qs
->txq_stopped
))
1236 spin_unlock(&q
->lock
);
1237 t3_write_reg(adap
, A_SG_KDOORBELL
,
1238 F_SELEGRCNTX
| V_EGRCNTX(q
->cntxt_id
));
1242 * Send a management message through control queue 0
1244 int t3_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
1246 return ctrl_xmit(adap
, &adap
->sge
.qs
[0].txq
[TXQ_CTRL
], skb
);
1250 * deferred_unmap_destructor - unmap a packet when it is freed
1253 * This is the packet destructor used for Tx packets that need to remain
1254 * mapped until they are freed rather than until their Tx descriptors are
1257 static void deferred_unmap_destructor(struct sk_buff
*skb
)
1260 const dma_addr_t
*p
;
1261 const struct skb_shared_info
*si
;
1262 const struct deferred_unmap_info
*dui
;
1263 const struct unmap_info
*ui
= (struct unmap_info
*)skb
->cb
;
1265 dui
= (struct deferred_unmap_info
*)skb
->head
;
1269 pci_unmap_single(dui
->pdev
, *p
++, ui
->len
, PCI_DMA_TODEVICE
);
1271 si
= skb_shinfo(skb
);
1272 for (i
= 0; i
< si
->nr_frags
; i
++)
1273 pci_unmap_page(dui
->pdev
, *p
++, si
->frags
[i
].size
,
1277 static void setup_deferred_unmapping(struct sk_buff
*skb
, struct pci_dev
*pdev
,
1278 const struct sg_ent
*sgl
, int sgl_flits
)
1281 struct deferred_unmap_info
*dui
;
1283 dui
= (struct deferred_unmap_info
*)skb
->head
;
1285 for (p
= dui
->addr
; sgl_flits
>= 3; sgl
++, sgl_flits
-= 3) {
1286 *p
++ = be64_to_cpu(sgl
->addr
[0]);
1287 *p
++ = be64_to_cpu(sgl
->addr
[1]);
1290 *p
= be64_to_cpu(sgl
->addr
[0]);
1294 * write_ofld_wr - write an offload work request
1295 * @adap: the adapter
1296 * @skb: the packet to send
1298 * @pidx: index of the first Tx descriptor to write
1299 * @gen: the generation value to use
1300 * @ndesc: number of descriptors the packet will occupy
1302 * Write an offload work request to send the supplied packet. The packet
1303 * data already carry the work request with most fields populated.
1305 static void write_ofld_wr(struct adapter
*adap
, struct sk_buff
*skb
,
1306 struct sge_txq
*q
, unsigned int pidx
,
1307 unsigned int gen
, unsigned int ndesc
)
1309 unsigned int sgl_flits
, flits
;
1310 struct work_request_hdr
*from
;
1311 struct sg_ent
*sgp
, sgl
[MAX_SKB_FRAGS
/ 2 + 1];
1312 struct tx_desc
*d
= &q
->desc
[pidx
];
1314 if (immediate(skb
)) {
1315 q
->sdesc
[pidx
].skb
= NULL
;
1316 write_imm(d
, skb
, skb
->len
, gen
);
1320 /* Only TX_DATA builds SGLs */
1322 from
= (struct work_request_hdr
*)skb
->data
;
1323 memcpy(&d
->flit
[1], &from
[1],
1324 skb_transport_offset(skb
) - sizeof(*from
));
1326 flits
= skb_transport_offset(skb
) / 8;
1327 sgp
= ndesc
== 1 ? (struct sg_ent
*)&d
->flit
[flits
] : sgl
;
1328 sgl_flits
= make_sgl(skb
, sgp
, skb_transport_header(skb
),
1329 skb
->tail
- skb
->transport_header
,
1331 if (need_skb_unmap()) {
1332 setup_deferred_unmapping(skb
, adap
->pdev
, sgp
, sgl_flits
);
1333 skb
->destructor
= deferred_unmap_destructor
;
1334 ((struct unmap_info
*)skb
->cb
)->len
= (skb
->tail
-
1335 skb
->transport_header
);
1338 write_wr_hdr_sgl(ndesc
, skb
, d
, pidx
, q
, sgl
, flits
, sgl_flits
,
1339 gen
, from
->wr_hi
, from
->wr_lo
);
1343 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1346 * Returns the number of Tx descriptors needed for the given offload
1347 * packet. These packets are already fully constructed.
1349 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff
*skb
)
1351 unsigned int flits
, cnt
= skb_shinfo(skb
)->nr_frags
;
1353 if (skb
->len
<= WR_LEN
&& cnt
== 0)
1354 return 1; /* packet fits as immediate data */
1356 flits
= skb_transport_offset(skb
) / 8; /* headers */
1357 if (skb
->tail
!= skb
->transport_header
)
1359 return flits_to_desc(flits
+ sgl_len(cnt
));
1363 * ofld_xmit - send a packet through an offload queue
1364 * @adap: the adapter
1365 * @q: the Tx offload queue
1368 * Send an offload packet through an SGE offload queue.
1370 static int ofld_xmit(struct adapter
*adap
, struct sge_txq
*q
,
1371 struct sk_buff
*skb
)
1374 unsigned int ndesc
= calc_tx_descs_ofld(skb
), pidx
, gen
;
1376 spin_lock(&q
->lock
);
1377 again
:reclaim_completed_tx(adap
, q
);
1379 ret
= check_desc_avail(adap
, q
, skb
, ndesc
, TXQ_OFLD
);
1380 if (unlikely(ret
)) {
1382 skb
->priority
= ndesc
; /* save for restart */
1383 spin_unlock(&q
->lock
);
1393 if (q
->pidx
>= q
->size
) {
1397 spin_unlock(&q
->lock
);
1399 write_ofld_wr(adap
, skb
, q
, pidx
, gen
, ndesc
);
1400 check_ring_tx_db(adap
, q
);
1401 return NET_XMIT_SUCCESS
;
1405 * restart_offloadq - restart a suspended offload queue
1406 * @qs: the queue set cotaining the offload queue
1408 * Resumes transmission on a suspended Tx offload queue.
1410 static void restart_offloadq(unsigned long data
)
1412 struct sk_buff
*skb
;
1413 struct sge_qset
*qs
= (struct sge_qset
*)data
;
1414 struct sge_txq
*q
= &qs
->txq
[TXQ_OFLD
];
1415 struct adapter
*adap
= qs
->netdev
->priv
;
1417 spin_lock(&q
->lock
);
1418 again
:reclaim_completed_tx(adap
, q
);
1420 while ((skb
= skb_peek(&q
->sendq
)) != NULL
) {
1421 unsigned int gen
, pidx
;
1422 unsigned int ndesc
= skb
->priority
;
1424 if (unlikely(q
->size
- q
->in_use
< ndesc
)) {
1425 set_bit(TXQ_OFLD
, &qs
->txq_stopped
);
1426 smp_mb__after_clear_bit();
1428 if (should_restart_tx(q
) &&
1429 test_and_clear_bit(TXQ_OFLD
, &qs
->txq_stopped
))
1439 if (q
->pidx
>= q
->size
) {
1443 __skb_unlink(skb
, &q
->sendq
);
1444 spin_unlock(&q
->lock
);
1446 write_ofld_wr(adap
, skb
, q
, pidx
, gen
, ndesc
);
1447 spin_lock(&q
->lock
);
1449 spin_unlock(&q
->lock
);
1452 set_bit(TXQ_RUNNING
, &q
->flags
);
1453 set_bit(TXQ_LAST_PKT_DB
, &q
->flags
);
1455 t3_write_reg(adap
, A_SG_KDOORBELL
,
1456 F_SELEGRCNTX
| V_EGRCNTX(q
->cntxt_id
));
1460 * queue_set - return the queue set a packet should use
1463 * Maps a packet to the SGE queue set it should use. The desired queue
1464 * set is carried in bits 1-3 in the packet's priority.
1466 static inline int queue_set(const struct sk_buff
*skb
)
1468 return skb
->priority
>> 1;
1472 * is_ctrl_pkt - return whether an offload packet is a control packet
1475 * Determines whether an offload packet should use an OFLD or a CTRL
1476 * Tx queue. This is indicated by bit 0 in the packet's priority.
1478 static inline int is_ctrl_pkt(const struct sk_buff
*skb
)
1480 return skb
->priority
& 1;
1484 * t3_offload_tx - send an offload packet
1485 * @tdev: the offload device to send to
1488 * Sends an offload packet. We use the packet priority to select the
1489 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1490 * should be sent as regular or control, bits 1-3 select the queue set.
1492 int t3_offload_tx(struct t3cdev
*tdev
, struct sk_buff
*skb
)
1494 struct adapter
*adap
= tdev2adap(tdev
);
1495 struct sge_qset
*qs
= &adap
->sge
.qs
[queue_set(skb
)];
1497 if (unlikely(is_ctrl_pkt(skb
)))
1498 return ctrl_xmit(adap
, &qs
->txq
[TXQ_CTRL
], skb
);
1500 return ofld_xmit(adap
, &qs
->txq
[TXQ_OFLD
], skb
);
1504 * offload_enqueue - add an offload packet to an SGE offload receive queue
1505 * @q: the SGE response queue
1508 * Add a new offload packet to an SGE response queue's offload packet
1509 * queue. If the packet is the first on the queue it schedules the RX
1510 * softirq to process the queue.
1512 static inline void offload_enqueue(struct sge_rspq
*q
, struct sk_buff
*skb
)
1514 skb
->next
= skb
->prev
= NULL
;
1516 q
->rx_tail
->next
= skb
;
1518 struct sge_qset
*qs
= rspq_to_qset(q
);
1520 if (__netif_rx_schedule_prep(qs
->netdev
))
1521 __netif_rx_schedule(qs
->netdev
);
1528 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1529 * @tdev: the offload device that will be receiving the packets
1530 * @q: the SGE response queue that assembled the bundle
1531 * @skbs: the partial bundle
1532 * @n: the number of packets in the bundle
1534 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1536 static inline void deliver_partial_bundle(struct t3cdev
*tdev
,
1538 struct sk_buff
*skbs
[], int n
)
1541 q
->offload_bundles
++;
1542 tdev
->recv(tdev
, skbs
, n
);
1547 * ofld_poll - NAPI handler for offload packets in interrupt mode
1548 * @dev: the network device doing the polling
1549 * @budget: polling budget
1551 * The NAPI handler for offload packets when a response queue is serviced
1552 * by the hard interrupt handler, i.e., when it's operating in non-polling
1553 * mode. Creates small packet batches and sends them through the offload
1554 * receive handler. Batches need to be of modest size as we do prefetches
1555 * on the packets in each.
1557 static int ofld_poll(struct net_device
*dev
, int *budget
)
1559 struct adapter
*adapter
= dev
->priv
;
1560 struct sge_qset
*qs
= dev2qset(dev
);
1561 struct sge_rspq
*q
= &qs
->rspq
;
1562 int work_done
, limit
= min(*budget
, dev
->quota
), avail
= limit
;
1565 struct sk_buff
*head
, *tail
, *skbs
[RX_BUNDLE_SIZE
];
1568 spin_lock_irq(&q
->lock
);
1571 work_done
= limit
- avail
;
1572 *budget
-= work_done
;
1573 dev
->quota
-= work_done
;
1574 __netif_rx_complete(dev
);
1575 spin_unlock_irq(&q
->lock
);
1580 q
->rx_head
= q
->rx_tail
= NULL
;
1581 spin_unlock_irq(&q
->lock
);
1583 for (ngathered
= 0; avail
&& head
; avail
--) {
1584 prefetch(head
->data
);
1585 skbs
[ngathered
] = head
;
1587 skbs
[ngathered
]->next
= NULL
;
1588 if (++ngathered
== RX_BUNDLE_SIZE
) {
1589 q
->offload_bundles
++;
1590 adapter
->tdev
.recv(&adapter
->tdev
, skbs
,
1595 if (head
) { /* splice remaining packets back onto Rx queue */
1596 spin_lock_irq(&q
->lock
);
1597 tail
->next
= q
->rx_head
;
1601 spin_unlock_irq(&q
->lock
);
1603 deliver_partial_bundle(&adapter
->tdev
, q
, skbs
, ngathered
);
1605 work_done
= limit
- avail
;
1606 *budget
-= work_done
;
1607 dev
->quota
-= work_done
;
1612 * rx_offload - process a received offload packet
1613 * @tdev: the offload device receiving the packet
1614 * @rq: the response queue that received the packet
1616 * @rx_gather: a gather list of packets if we are building a bundle
1617 * @gather_idx: index of the next available slot in the bundle
1619 * Process an ingress offload pakcet and add it to the offload ingress
1620 * queue. Returns the index of the next available slot in the bundle.
1622 static inline int rx_offload(struct t3cdev
*tdev
, struct sge_rspq
*rq
,
1623 struct sk_buff
*skb
, struct sk_buff
*rx_gather
[],
1624 unsigned int gather_idx
)
1627 skb_reset_mac_header(skb
);
1628 skb_reset_network_header(skb
);
1629 skb_reset_transport_header(skb
);
1632 rx_gather
[gather_idx
++] = skb
;
1633 if (gather_idx
== RX_BUNDLE_SIZE
) {
1634 tdev
->recv(tdev
, rx_gather
, RX_BUNDLE_SIZE
);
1636 rq
->offload_bundles
++;
1639 offload_enqueue(rq
, skb
);
1645 * restart_tx - check whether to restart suspended Tx queues
1646 * @qs: the queue set to resume
1648 * Restarts suspended Tx queues of an SGE queue set if they have enough
1649 * free resources to resume operation.
1651 static void restart_tx(struct sge_qset
*qs
)
1653 if (test_bit(TXQ_ETH
, &qs
->txq_stopped
) &&
1654 should_restart_tx(&qs
->txq
[TXQ_ETH
]) &&
1655 test_and_clear_bit(TXQ_ETH
, &qs
->txq_stopped
)) {
1656 qs
->txq
[TXQ_ETH
].restarts
++;
1657 if (netif_running(qs
->netdev
))
1658 netif_wake_queue(qs
->netdev
);
1661 if (test_bit(TXQ_OFLD
, &qs
->txq_stopped
) &&
1662 should_restart_tx(&qs
->txq
[TXQ_OFLD
]) &&
1663 test_and_clear_bit(TXQ_OFLD
, &qs
->txq_stopped
)) {
1664 qs
->txq
[TXQ_OFLD
].restarts
++;
1665 tasklet_schedule(&qs
->txq
[TXQ_OFLD
].qresume_tsk
);
1667 if (test_bit(TXQ_CTRL
, &qs
->txq_stopped
) &&
1668 should_restart_tx(&qs
->txq
[TXQ_CTRL
]) &&
1669 test_and_clear_bit(TXQ_CTRL
, &qs
->txq_stopped
)) {
1670 qs
->txq
[TXQ_CTRL
].restarts
++;
1671 tasklet_schedule(&qs
->txq
[TXQ_CTRL
].qresume_tsk
);
1676 * rx_eth - process an ingress ethernet packet
1677 * @adap: the adapter
1678 * @rq: the response queue that received the packet
1680 * @pad: amount of padding at the start of the buffer
1682 * Process an ingress ethernet pakcet and deliver it to the stack.
1683 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1684 * if it was immediate data in a response.
1686 static void rx_eth(struct adapter
*adap
, struct sge_rspq
*rq
,
1687 struct sk_buff
*skb
, int pad
)
1689 struct cpl_rx_pkt
*p
= (struct cpl_rx_pkt
*)(skb
->data
+ pad
);
1690 struct port_info
*pi
;
1692 skb_pull(skb
, sizeof(*p
) + pad
);
1693 skb
->dev
->last_rx
= jiffies
;
1694 skb
->protocol
= eth_type_trans(skb
, adap
->port
[p
->iff
]);
1695 pi
= netdev_priv(skb
->dev
);
1696 if (pi
->rx_csum_offload
&& p
->csum_valid
&& p
->csum
== 0xffff &&
1698 rspq_to_qset(rq
)->port_stats
[SGE_PSTAT_RX_CSUM_GOOD
]++;
1699 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1701 skb
->ip_summed
= CHECKSUM_NONE
;
1703 if (unlikely(p
->vlan_valid
)) {
1704 struct vlan_group
*grp
= pi
->vlan_grp
;
1706 rspq_to_qset(rq
)->port_stats
[SGE_PSTAT_VLANEX
]++;
1708 __vlan_hwaccel_rx(skb
, grp
, ntohs(p
->vlan
),
1711 dev_kfree_skb_any(skb
);
1712 } else if (rq
->polling
)
1713 netif_receive_skb(skb
);
1718 #define SKB_DATA_SIZE 128
1720 static void skb_data_init(struct sk_buff
*skb
, struct sge_fl_page
*p
,
1724 if (len
<= SKB_DATA_SIZE
) {
1725 skb_copy_to_linear_data(skb
, p
->va
, len
);
1727 put_page(p
->frag
.page
);
1729 skb_copy_to_linear_data(skb
, p
->va
, SKB_DATA_SIZE
);
1730 skb_shinfo(skb
)->frags
[0].page
= p
->frag
.page
;
1731 skb_shinfo(skb
)->frags
[0].page_offset
=
1732 p
->frag
.page_offset
+ SKB_DATA_SIZE
;
1733 skb_shinfo(skb
)->frags
[0].size
= len
- SKB_DATA_SIZE
;
1734 skb_shinfo(skb
)->nr_frags
= 1;
1735 skb
->data_len
= len
- SKB_DATA_SIZE
;
1736 skb
->tail
+= SKB_DATA_SIZE
;
1737 skb
->truesize
+= skb
->data_len
;
1742 * get_packet - return the next ingress packet buffer from a free list
1743 * @adap: the adapter that received the packet
1744 * @fl: the SGE free list holding the packet
1745 * @len: the packet length including any SGE padding
1746 * @drop_thres: # of remaining buffers before we start dropping packets
1748 * Get the next packet from a free list and complete setup of the
1749 * sk_buff. If the packet is small we make a copy and recycle the
1750 * original buffer, otherwise we use the original buffer itself. If a
1751 * positive drop threshold is supplied packets are dropped and their
1752 * buffers recycled if (a) the number of remaining buffers is under the
1753 * threshold and the packet is too big to copy, or (b) the packet should
1754 * be copied but there is no memory for the copy.
1756 static struct sk_buff
*get_packet(struct adapter
*adap
, struct sge_fl
*fl
,
1757 unsigned int len
, unsigned int drop_thres
)
1759 struct sk_buff
*skb
= NULL
;
1760 struct rx_sw_desc
*sd
= &fl
->sdesc
[fl
->cidx
];
1762 prefetch(sd
->t
.skb
->data
);
1764 if (len
<= SGE_RX_COPY_THRES
) {
1765 skb
= alloc_skb(len
, GFP_ATOMIC
);
1766 if (likely(skb
!= NULL
)) {
1767 struct rx_desc
*d
= &fl
->desc
[fl
->cidx
];
1768 dma_addr_t mapping
=
1769 (dma_addr_t
)((u64
) be32_to_cpu(d
->addr_hi
) << 32 |
1770 be32_to_cpu(d
->addr_lo
));
1772 __skb_put(skb
, len
);
1773 pci_dma_sync_single_for_cpu(adap
->pdev
, mapping
, len
,
1774 PCI_DMA_FROMDEVICE
);
1775 skb_copy_from_linear_data(sd
->t
.skb
, skb
->data
, len
);
1776 pci_dma_sync_single_for_device(adap
->pdev
, mapping
, len
,
1777 PCI_DMA_FROMDEVICE
);
1778 } else if (!drop_thres
)
1781 recycle_rx_buf(adap
, fl
, fl
->cidx
);
1785 if (unlikely(fl
->credits
< drop_thres
))
1789 pci_unmap_single(adap
->pdev
, pci_unmap_addr(sd
, dma_addr
),
1790 fl
->buf_size
, PCI_DMA_FROMDEVICE
);
1793 __refill_fl(adap
, fl
);
1798 * handle_rsp_cntrl_info - handles control information in a response
1799 * @qs: the queue set corresponding to the response
1800 * @flags: the response control flags
1802 * Handles the control information of an SGE response, such as GTS
1803 * indications and completion credits for the queue set's Tx queues.
1804 * HW coalesces credits, we don't do any extra SW coalescing.
1806 static inline void handle_rsp_cntrl_info(struct sge_qset
*qs
, u32 flags
)
1808 unsigned int credits
;
1811 if (flags
& F_RSPD_TXQ0_GTS
)
1812 clear_bit(TXQ_RUNNING
, &qs
->txq
[TXQ_ETH
].flags
);
1815 credits
= G_RSPD_TXQ0_CR(flags
);
1817 qs
->txq
[TXQ_ETH
].processed
+= credits
;
1819 credits
= G_RSPD_TXQ2_CR(flags
);
1821 qs
->txq
[TXQ_CTRL
].processed
+= credits
;
1824 if (flags
& F_RSPD_TXQ1_GTS
)
1825 clear_bit(TXQ_RUNNING
, &qs
->txq
[TXQ_OFLD
].flags
);
1827 credits
= G_RSPD_TXQ1_CR(flags
);
1829 qs
->txq
[TXQ_OFLD
].processed
+= credits
;
1833 * check_ring_db - check if we need to ring any doorbells
1834 * @adapter: the adapter
1835 * @qs: the queue set whose Tx queues are to be examined
1836 * @sleeping: indicates which Tx queue sent GTS
1838 * Checks if some of a queue set's Tx queues need to ring their doorbells
1839 * to resume transmission after idling while they still have unprocessed
1842 static void check_ring_db(struct adapter
*adap
, struct sge_qset
*qs
,
1843 unsigned int sleeping
)
1845 if (sleeping
& F_RSPD_TXQ0_GTS
) {
1846 struct sge_txq
*txq
= &qs
->txq
[TXQ_ETH
];
1848 if (txq
->cleaned
+ txq
->in_use
!= txq
->processed
&&
1849 !test_and_set_bit(TXQ_LAST_PKT_DB
, &txq
->flags
)) {
1850 set_bit(TXQ_RUNNING
, &txq
->flags
);
1851 t3_write_reg(adap
, A_SG_KDOORBELL
, F_SELEGRCNTX
|
1852 V_EGRCNTX(txq
->cntxt_id
));
1856 if (sleeping
& F_RSPD_TXQ1_GTS
) {
1857 struct sge_txq
*txq
= &qs
->txq
[TXQ_OFLD
];
1859 if (txq
->cleaned
+ txq
->in_use
!= txq
->processed
&&
1860 !test_and_set_bit(TXQ_LAST_PKT_DB
, &txq
->flags
)) {
1861 set_bit(TXQ_RUNNING
, &txq
->flags
);
1862 t3_write_reg(adap
, A_SG_KDOORBELL
, F_SELEGRCNTX
|
1863 V_EGRCNTX(txq
->cntxt_id
));
1869 * is_new_response - check if a response is newly written
1870 * @r: the response descriptor
1871 * @q: the response queue
1873 * Returns true if a response descriptor contains a yet unprocessed
1876 static inline int is_new_response(const struct rsp_desc
*r
,
1877 const struct sge_rspq
*q
)
1879 return (r
->intr_gen
& F_RSPD_GEN2
) == q
->gen
;
1882 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1883 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1884 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1885 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1886 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1888 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1889 #define NOMEM_INTR_DELAY 2500
1892 * process_responses - process responses from an SGE response queue
1893 * @adap: the adapter
1894 * @qs: the queue set to which the response queue belongs
1895 * @budget: how many responses can be processed in this round
1897 * Process responses from an SGE response queue up to the supplied budget.
1898 * Responses include received packets as well as credits and other events
1899 * for the queues that belong to the response queue's queue set.
1900 * A negative budget is effectively unlimited.
1902 * Additionally choose the interrupt holdoff time for the next interrupt
1903 * on this queue. If the system is under memory shortage use a fairly
1904 * long delay to help recovery.
1906 static int process_responses(struct adapter
*adap
, struct sge_qset
*qs
,
1909 struct sge_rspq
*q
= &qs
->rspq
;
1910 struct rsp_desc
*r
= &q
->desc
[q
->cidx
];
1911 int budget_left
= budget
;
1912 unsigned int sleeping
= 0;
1913 struct sk_buff
*offload_skbs
[RX_BUNDLE_SIZE
];
1916 q
->next_holdoff
= q
->holdoff_tmr
;
1918 while (likely(budget_left
&& is_new_response(r
, q
))) {
1919 int eth
, ethpad
= 2;
1920 struct sk_buff
*skb
= NULL
;
1921 u32 len
, flags
= ntohl(r
->flags
);
1922 u32 rss_hi
= *(const u32
*)r
, rss_lo
= r
->rss_hdr
.rss_hash_val
;
1924 eth
= r
->rss_hdr
.opcode
== CPL_RX_PKT
;
1926 if (unlikely(flags
& F_RSPD_ASYNC_NOTIF
)) {
1927 skb
= alloc_skb(AN_PKT_SIZE
, GFP_ATOMIC
);
1931 memcpy(__skb_put(skb
, AN_PKT_SIZE
), r
, AN_PKT_SIZE
);
1932 skb
->data
[0] = CPL_ASYNC_NOTIF
;
1933 rss_hi
= htonl(CPL_ASYNC_NOTIF
<< 24);
1935 } else if (flags
& F_RSPD_IMM_DATA_VALID
) {
1936 skb
= get_imm_packet(r
);
1937 if (unlikely(!skb
)) {
1939 q
->next_holdoff
= NOMEM_INTR_DELAY
;
1941 /* consume one credit since we tried */
1947 } else if ((len
= ntohl(r
->len_cq
)) != 0) {
1949 (len
& F_RSPD_FLQ
) ? &qs
->fl
[1] : &qs
->fl
[0];
1951 if (fl
->buf_size
== RX_PAGE_SIZE
) {
1952 struct rx_sw_desc
*sd
= &fl
->sdesc
[fl
->cidx
];
1953 struct sge_fl_page
*p
= &sd
->t
.page
;
1956 prefetch(p
->va
+ L1_CACHE_BYTES
);
1958 __refill_fl(adap
, fl
);
1960 pci_unmap_single(adap
->pdev
,
1961 pci_unmap_addr(sd
, dma_addr
),
1963 PCI_DMA_FROMDEVICE
);
1966 if (unlikely(fl
->credits
<
1970 skb
= alloc_skb(SKB_DATA_SIZE
,
1972 if (unlikely(!skb
)) {
1975 recycle_rx_buf(adap
, fl
,
1980 skb
= alloc_skb(SKB_DATA_SIZE
,
1986 skb_data_init(skb
, p
, G_RSPD_LEN(len
));
1992 skb
= get_packet(adap
, fl
, G_RSPD_LEN(len
),
1993 eth
? SGE_RX_DROP_THRES
: 0);
1996 if (++fl
->cidx
== fl
->size
)
2001 if (flags
& RSPD_CTRL_MASK
) {
2002 sleeping
|= flags
& RSPD_GTS_MASK
;
2003 handle_rsp_cntrl_info(qs
, flags
);
2007 if (unlikely(++q
->cidx
== q
->size
)) {
2014 if (++q
->credits
>= (q
->size
/ 4)) {
2015 refill_rspq(adap
, q
, q
->credits
);
2020 /* Preserve the RSS info in csum & priority */
2022 skb
->priority
= rss_lo
;
2025 rx_eth(adap
, q
, skb
, ethpad
);
2027 if (unlikely(r
->rss_hdr
.opcode
==
2029 __skb_pull(skb
, ethpad
);
2031 ngathered
= rx_offload(&adap
->tdev
, q
,
2039 deliver_partial_bundle(&adap
->tdev
, q
, offload_skbs
, ngathered
);
2041 check_ring_db(adap
, qs
, sleeping
);
2043 smp_mb(); /* commit Tx queue .processed updates */
2044 if (unlikely(qs
->txq_stopped
!= 0))
2047 budget
-= budget_left
;
2051 static inline int is_pure_response(const struct rsp_desc
*r
)
2053 u32 n
= ntohl(r
->flags
) & (F_RSPD_ASYNC_NOTIF
| F_RSPD_IMM_DATA_VALID
);
2055 return (n
| r
->len_cq
) == 0;
2059 * napi_rx_handler - the NAPI handler for Rx processing
2060 * @dev: the net device
2061 * @budget: how many packets we can process in this round
2063 * Handler for new data events when using NAPI.
2065 static int napi_rx_handler(struct net_device
*dev
, int *budget
)
2067 struct adapter
*adap
= dev
->priv
;
2068 struct sge_qset
*qs
= dev2qset(dev
);
2069 int effective_budget
= min(*budget
, dev
->quota
);
2071 int work_done
= process_responses(adap
, qs
, effective_budget
);
2072 *budget
-= work_done
;
2073 dev
->quota
-= work_done
;
2075 if (work_done
>= effective_budget
)
2078 netif_rx_complete(dev
);
2081 * Because we don't atomically flush the following write it is
2082 * possible that in very rare cases it can reach the device in a way
2083 * that races with a new response being written plus an error interrupt
2084 * causing the NAPI interrupt handler below to return unhandled status
2085 * to the OS. To protect against this would require flushing the write
2086 * and doing both the write and the flush with interrupts off. Way too
2087 * expensive and unjustifiable given the rarity of the race.
2089 * The race cannot happen at all with MSI-X.
2091 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(qs
->rspq
.cntxt_id
) |
2092 V_NEWTIMER(qs
->rspq
.next_holdoff
) |
2093 V_NEWINDEX(qs
->rspq
.cidx
));
2098 * Returns true if the device is already scheduled for polling.
2100 static inline int napi_is_scheduled(struct net_device
*dev
)
2102 return test_bit(__LINK_STATE_RX_SCHED
, &dev
->state
);
2106 * process_pure_responses - process pure responses from a response queue
2107 * @adap: the adapter
2108 * @qs: the queue set owning the response queue
2109 * @r: the first pure response to process
2111 * A simpler version of process_responses() that handles only pure (i.e.,
2112 * non data-carrying) responses. Such respones are too light-weight to
2113 * justify calling a softirq under NAPI, so we handle them specially in
2114 * the interrupt handler. The function is called with a pointer to a
2115 * response, which the caller must ensure is a valid pure response.
2117 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2119 static int process_pure_responses(struct adapter
*adap
, struct sge_qset
*qs
,
2122 struct sge_rspq
*q
= &qs
->rspq
;
2123 unsigned int sleeping
= 0;
2126 u32 flags
= ntohl(r
->flags
);
2129 if (unlikely(++q
->cidx
== q
->size
)) {
2136 if (flags
& RSPD_CTRL_MASK
) {
2137 sleeping
|= flags
& RSPD_GTS_MASK
;
2138 handle_rsp_cntrl_info(qs
, flags
);
2142 if (++q
->credits
>= (q
->size
/ 4)) {
2143 refill_rspq(adap
, q
, q
->credits
);
2146 } while (is_new_response(r
, q
) && is_pure_response(r
));
2149 check_ring_db(adap
, qs
, sleeping
);
2151 smp_mb(); /* commit Tx queue .processed updates */
2152 if (unlikely(qs
->txq_stopped
!= 0))
2155 return is_new_response(r
, q
);
2159 * handle_responses - decide what to do with new responses in NAPI mode
2160 * @adap: the adapter
2161 * @q: the response queue
2163 * This is used by the NAPI interrupt handlers to decide what to do with
2164 * new SGE responses. If there are no new responses it returns -1. If
2165 * there are new responses and they are pure (i.e., non-data carrying)
2166 * it handles them straight in hard interrupt context as they are very
2167 * cheap and don't deliver any packets. Finally, if there are any data
2168 * signaling responses it schedules the NAPI handler. Returns 1 if it
2169 * schedules NAPI, 0 if all new responses were pure.
2171 * The caller must ascertain NAPI is not already running.
2173 static inline int handle_responses(struct adapter
*adap
, struct sge_rspq
*q
)
2175 struct sge_qset
*qs
= rspq_to_qset(q
);
2176 struct rsp_desc
*r
= &q
->desc
[q
->cidx
];
2178 if (!is_new_response(r
, q
))
2180 if (is_pure_response(r
) && process_pure_responses(adap
, qs
, r
) == 0) {
2181 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(q
->cntxt_id
) |
2182 V_NEWTIMER(q
->holdoff_tmr
) | V_NEWINDEX(q
->cidx
));
2185 if (likely(__netif_rx_schedule_prep(qs
->netdev
)))
2186 __netif_rx_schedule(qs
->netdev
);
2191 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2192 * (i.e., response queue serviced in hard interrupt).
2194 irqreturn_t
t3_sge_intr_msix(int irq
, void *cookie
)
2196 struct sge_qset
*qs
= cookie
;
2197 struct adapter
*adap
= qs
->netdev
->priv
;
2198 struct sge_rspq
*q
= &qs
->rspq
;
2200 spin_lock(&q
->lock
);
2201 if (process_responses(adap
, qs
, -1) == 0)
2202 q
->unhandled_irqs
++;
2203 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(q
->cntxt_id
) |
2204 V_NEWTIMER(q
->next_holdoff
) | V_NEWINDEX(q
->cidx
));
2205 spin_unlock(&q
->lock
);
2210 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2211 * (i.e., response queue serviced by NAPI polling).
2213 irqreturn_t
t3_sge_intr_msix_napi(int irq
, void *cookie
)
2215 struct sge_qset
*qs
= cookie
;
2216 struct adapter
*adap
= qs
->netdev
->priv
;
2217 struct sge_rspq
*q
= &qs
->rspq
;
2219 spin_lock(&q
->lock
);
2220 BUG_ON(napi_is_scheduled(qs
->netdev
));
2222 if (handle_responses(adap
, q
) < 0)
2223 q
->unhandled_irqs
++;
2224 spin_unlock(&q
->lock
);
2229 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2230 * SGE response queues as well as error and other async events as they all use
2231 * the same MSI vector. We use one SGE response queue per port in this mode
2232 * and protect all response queues with queue 0's lock.
2234 static irqreturn_t
t3_intr_msi(int irq
, void *cookie
)
2236 int new_packets
= 0;
2237 struct adapter
*adap
= cookie
;
2238 struct sge_rspq
*q
= &adap
->sge
.qs
[0].rspq
;
2240 spin_lock(&q
->lock
);
2242 if (process_responses(adap
, &adap
->sge
.qs
[0], -1)) {
2243 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(q
->cntxt_id
) |
2244 V_NEWTIMER(q
->next_holdoff
) | V_NEWINDEX(q
->cidx
));
2248 if (adap
->params
.nports
== 2 &&
2249 process_responses(adap
, &adap
->sge
.qs
[1], -1)) {
2250 struct sge_rspq
*q1
= &adap
->sge
.qs
[1].rspq
;
2252 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(q1
->cntxt_id
) |
2253 V_NEWTIMER(q1
->next_holdoff
) |
2254 V_NEWINDEX(q1
->cidx
));
2258 if (!new_packets
&& t3_slow_intr_handler(adap
) == 0)
2259 q
->unhandled_irqs
++;
2261 spin_unlock(&q
->lock
);
2265 static int rspq_check_napi(struct net_device
*dev
, struct sge_rspq
*q
)
2267 if (!napi_is_scheduled(dev
) && is_new_response(&q
->desc
[q
->cidx
], q
)) {
2268 if (likely(__netif_rx_schedule_prep(dev
)))
2269 __netif_rx_schedule(dev
);
2276 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2277 * by NAPI polling). Handles data events from SGE response queues as well as
2278 * error and other async events as they all use the same MSI vector. We use
2279 * one SGE response queue per port in this mode and protect all response
2280 * queues with queue 0's lock.
2282 irqreturn_t
t3_intr_msi_napi(int irq
, void *cookie
)
2285 struct adapter
*adap
= cookie
;
2286 struct sge_rspq
*q
= &adap
->sge
.qs
[0].rspq
;
2288 spin_lock(&q
->lock
);
2290 new_packets
= rspq_check_napi(adap
->sge
.qs
[0].netdev
, q
);
2291 if (adap
->params
.nports
== 2)
2292 new_packets
+= rspq_check_napi(adap
->sge
.qs
[1].netdev
,
2293 &adap
->sge
.qs
[1].rspq
);
2294 if (!new_packets
&& t3_slow_intr_handler(adap
) == 0)
2295 q
->unhandled_irqs
++;
2297 spin_unlock(&q
->lock
);
2302 * A helper function that processes responses and issues GTS.
2304 static inline int process_responses_gts(struct adapter
*adap
,
2305 struct sge_rspq
*rq
)
2309 work
= process_responses(adap
, rspq_to_qset(rq
), -1);
2310 t3_write_reg(adap
, A_SG_GTS
, V_RSPQ(rq
->cntxt_id
) |
2311 V_NEWTIMER(rq
->next_holdoff
) | V_NEWINDEX(rq
->cidx
));
2316 * The legacy INTx interrupt handler. This needs to handle data events from
2317 * SGE response queues as well as error and other async events as they all use
2318 * the same interrupt pin. We use one SGE response queue per port in this mode
2319 * and protect all response queues with queue 0's lock.
2321 static irqreturn_t
t3_intr(int irq
, void *cookie
)
2323 int work_done
, w0
, w1
;
2324 struct adapter
*adap
= cookie
;
2325 struct sge_rspq
*q0
= &adap
->sge
.qs
[0].rspq
;
2326 struct sge_rspq
*q1
= &adap
->sge
.qs
[1].rspq
;
2328 spin_lock(&q0
->lock
);
2330 w0
= is_new_response(&q0
->desc
[q0
->cidx
], q0
);
2331 w1
= adap
->params
.nports
== 2 &&
2332 is_new_response(&q1
->desc
[q1
->cidx
], q1
);
2334 if (likely(w0
| w1
)) {
2335 t3_write_reg(adap
, A_PL_CLI
, 0);
2336 t3_read_reg(adap
, A_PL_CLI
); /* flush */
2339 process_responses_gts(adap
, q0
);
2342 process_responses_gts(adap
, q1
);
2344 work_done
= w0
| w1
;
2346 work_done
= t3_slow_intr_handler(adap
);
2348 spin_unlock(&q0
->lock
);
2349 return IRQ_RETVAL(work_done
!= 0);
2353 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2354 * Handles data events from SGE response queues as well as error and other
2355 * async events as they all use the same interrupt pin. We use one SGE
2356 * response queue per port in this mode and protect all response queues with
2359 static irqreturn_t
t3b_intr(int irq
, void *cookie
)
2362 struct adapter
*adap
= cookie
;
2363 struct sge_rspq
*q0
= &adap
->sge
.qs
[0].rspq
;
2365 t3_write_reg(adap
, A_PL_CLI
, 0);
2366 map
= t3_read_reg(adap
, A_SG_DATA_INTR
);
2368 if (unlikely(!map
)) /* shared interrupt, most likely */
2371 spin_lock(&q0
->lock
);
2373 if (unlikely(map
& F_ERRINTR
))
2374 t3_slow_intr_handler(adap
);
2376 if (likely(map
& 1))
2377 process_responses_gts(adap
, q0
);
2380 process_responses_gts(adap
, &adap
->sge
.qs
[1].rspq
);
2382 spin_unlock(&q0
->lock
);
2387 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2388 * Handles data events from SGE response queues as well as error and other
2389 * async events as they all use the same interrupt pin. We use one SGE
2390 * response queue per port in this mode and protect all response queues with
2393 static irqreturn_t
t3b_intr_napi(int irq
, void *cookie
)
2396 struct net_device
*dev
;
2397 struct adapter
*adap
= cookie
;
2398 struct sge_rspq
*q0
= &adap
->sge
.qs
[0].rspq
;
2400 t3_write_reg(adap
, A_PL_CLI
, 0);
2401 map
= t3_read_reg(adap
, A_SG_DATA_INTR
);
2403 if (unlikely(!map
)) /* shared interrupt, most likely */
2406 spin_lock(&q0
->lock
);
2408 if (unlikely(map
& F_ERRINTR
))
2409 t3_slow_intr_handler(adap
);
2411 if (likely(map
& 1)) {
2412 dev
= adap
->sge
.qs
[0].netdev
;
2414 if (likely(__netif_rx_schedule_prep(dev
)))
2415 __netif_rx_schedule(dev
);
2418 dev
= adap
->sge
.qs
[1].netdev
;
2420 if (likely(__netif_rx_schedule_prep(dev
)))
2421 __netif_rx_schedule(dev
);
2424 spin_unlock(&q0
->lock
);
2429 * t3_intr_handler - select the top-level interrupt handler
2430 * @adap: the adapter
2431 * @polling: whether using NAPI to service response queues
2433 * Selects the top-level interrupt handler based on the type of interrupts
2434 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2437 intr_handler_t
t3_intr_handler(struct adapter
*adap
, int polling
)
2439 if (adap
->flags
& USING_MSIX
)
2440 return polling
? t3_sge_intr_msix_napi
: t3_sge_intr_msix
;
2441 if (adap
->flags
& USING_MSI
)
2442 return polling
? t3_intr_msi_napi
: t3_intr_msi
;
2443 if (adap
->params
.rev
> 0)
2444 return polling
? t3b_intr_napi
: t3b_intr
;
2449 * t3_sge_err_intr_handler - SGE async event interrupt handler
2450 * @adapter: the adapter
2452 * Interrupt handler for SGE asynchronous (non-data) events.
2454 void t3_sge_err_intr_handler(struct adapter
*adapter
)
2456 unsigned int v
, status
= t3_read_reg(adapter
, A_SG_INT_CAUSE
);
2458 if (status
& F_RSPQCREDITOVERFOW
)
2459 CH_ALERT(adapter
, "SGE response queue credit overflow\n");
2461 if (status
& F_RSPQDISABLED
) {
2462 v
= t3_read_reg(adapter
, A_SG_RSPQ_FL_STATUS
);
2465 "packet delivered to disabled response queue "
2466 "(0x%x)\n", (v
>> S_RSPQ0DISABLED
) & 0xff);
2469 t3_write_reg(adapter
, A_SG_INT_CAUSE
, status
);
2470 if (status
& (F_RSPQCREDITOVERFOW
| F_RSPQDISABLED
))
2471 t3_fatal_err(adapter
);
2475 * sge_timer_cb - perform periodic maintenance of an SGE qset
2476 * @data: the SGE queue set to maintain
2478 * Runs periodically from a timer to perform maintenance of an SGE queue
2479 * set. It performs two tasks:
2481 * a) Cleans up any completed Tx descriptors that may still be pending.
2482 * Normal descriptor cleanup happens when new packets are added to a Tx
2483 * queue so this timer is relatively infrequent and does any cleanup only
2484 * if the Tx queue has not seen any new packets in a while. We make a
2485 * best effort attempt to reclaim descriptors, in that we don't wait
2486 * around if we cannot get a queue's lock (which most likely is because
2487 * someone else is queueing new packets and so will also handle the clean
2488 * up). Since control queues use immediate data exclusively we don't
2489 * bother cleaning them up here.
2491 * b) Replenishes Rx queues that have run out due to memory shortage.
2492 * Normally new Rx buffers are added when existing ones are consumed but
2493 * when out of memory a queue can become empty. We try to add only a few
2494 * buffers here, the queue will be replenished fully as these new buffers
2495 * are used up if memory shortage has subsided.
2497 static void sge_timer_cb(unsigned long data
)
2500 struct sge_qset
*qs
= (struct sge_qset
*)data
;
2501 struct adapter
*adap
= qs
->netdev
->priv
;
2503 if (spin_trylock(&qs
->txq
[TXQ_ETH
].lock
)) {
2504 reclaim_completed_tx(adap
, &qs
->txq
[TXQ_ETH
]);
2505 spin_unlock(&qs
->txq
[TXQ_ETH
].lock
);
2507 if (spin_trylock(&qs
->txq
[TXQ_OFLD
].lock
)) {
2508 reclaim_completed_tx(adap
, &qs
->txq
[TXQ_OFLD
]);
2509 spin_unlock(&qs
->txq
[TXQ_OFLD
].lock
);
2511 lock
= (adap
->flags
& USING_MSIX
) ? &qs
->rspq
.lock
:
2512 &adap
->sge
.qs
[0].rspq
.lock
;
2513 if (spin_trylock_irq(lock
)) {
2514 if (!napi_is_scheduled(qs
->netdev
)) {
2515 u32 status
= t3_read_reg(adap
, A_SG_RSPQ_FL_STATUS
);
2517 if (qs
->fl
[0].credits
< qs
->fl
[0].size
)
2518 __refill_fl(adap
, &qs
->fl
[0]);
2519 if (qs
->fl
[1].credits
< qs
->fl
[1].size
)
2520 __refill_fl(adap
, &qs
->fl
[1]);
2522 if (status
& (1 << qs
->rspq
.cntxt_id
)) {
2524 if (qs
->rspq
.credits
) {
2525 refill_rspq(adap
, &qs
->rspq
, 1);
2527 qs
->rspq
.restarted
++;
2528 t3_write_reg(adap
, A_SG_RSPQ_FL_STATUS
,
2529 1 << qs
->rspq
.cntxt_id
);
2533 spin_unlock_irq(lock
);
2535 mod_timer(&qs
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
2539 * t3_update_qset_coalesce - update coalescing settings for a queue set
2540 * @qs: the SGE queue set
2541 * @p: new queue set parameters
2543 * Update the coalescing settings for an SGE queue set. Nothing is done
2544 * if the queue set is not initialized yet.
2546 void t3_update_qset_coalesce(struct sge_qset
*qs
, const struct qset_params
*p
)
2551 qs
->rspq
.holdoff_tmr
= max(p
->coalesce_usecs
* 10, 1U);/* can't be 0 */
2552 qs
->rspq
.polling
= p
->polling
;
2553 qs
->netdev
->poll
= p
->polling
? napi_rx_handler
: ofld_poll
;
2557 * t3_sge_alloc_qset - initialize an SGE queue set
2558 * @adapter: the adapter
2559 * @id: the queue set id
2560 * @nports: how many Ethernet ports will be using this queue set
2561 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2562 * @p: configuration parameters for this queue set
2563 * @ntxq: number of Tx queues for the queue set
2564 * @netdev: net device associated with this queue set
2566 * Allocate resources and initialize an SGE queue set. A queue set
2567 * comprises a response queue, two Rx free-buffer queues, and up to 3
2568 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2569 * queue, offload queue, and control queue.
2571 int t3_sge_alloc_qset(struct adapter
*adapter
, unsigned int id
, int nports
,
2572 int irq_vec_idx
, const struct qset_params
*p
,
2573 int ntxq
, struct net_device
*netdev
)
2575 int i
, ret
= -ENOMEM
;
2576 struct sge_qset
*q
= &adapter
->sge
.qs
[id
];
2578 init_qset_cntxt(q
, id
);
2579 init_timer(&q
->tx_reclaim_timer
);
2580 q
->tx_reclaim_timer
.data
= (unsigned long)q
;
2581 q
->tx_reclaim_timer
.function
= sge_timer_cb
;
2583 q
->fl
[0].desc
= alloc_ring(adapter
->pdev
, p
->fl_size
,
2584 sizeof(struct rx_desc
),
2585 sizeof(struct rx_sw_desc
),
2586 &q
->fl
[0].phys_addr
, &q
->fl
[0].sdesc
);
2590 q
->fl
[1].desc
= alloc_ring(adapter
->pdev
, p
->jumbo_size
,
2591 sizeof(struct rx_desc
),
2592 sizeof(struct rx_sw_desc
),
2593 &q
->fl
[1].phys_addr
, &q
->fl
[1].sdesc
);
2597 q
->rspq
.desc
= alloc_ring(adapter
->pdev
, p
->rspq_size
,
2598 sizeof(struct rsp_desc
), 0,
2599 &q
->rspq
.phys_addr
, NULL
);
2603 for (i
= 0; i
< ntxq
; ++i
) {
2605 * The control queue always uses immediate data so does not
2606 * need to keep track of any sk_buffs.
2608 size_t sz
= i
== TXQ_CTRL
? 0 : sizeof(struct tx_sw_desc
);
2610 q
->txq
[i
].desc
= alloc_ring(adapter
->pdev
, p
->txq_size
[i
],
2611 sizeof(struct tx_desc
), sz
,
2612 &q
->txq
[i
].phys_addr
,
2614 if (!q
->txq
[i
].desc
)
2618 q
->txq
[i
].size
= p
->txq_size
[i
];
2619 spin_lock_init(&q
->txq
[i
].lock
);
2620 skb_queue_head_init(&q
->txq
[i
].sendq
);
2623 tasklet_init(&q
->txq
[TXQ_OFLD
].qresume_tsk
, restart_offloadq
,
2625 tasklet_init(&q
->txq
[TXQ_CTRL
].qresume_tsk
, restart_ctrlq
,
2628 q
->fl
[0].gen
= q
->fl
[1].gen
= 1;
2629 q
->fl
[0].size
= p
->fl_size
;
2630 q
->fl
[1].size
= p
->jumbo_size
;
2633 q
->rspq
.size
= p
->rspq_size
;
2634 spin_lock_init(&q
->rspq
.lock
);
2636 q
->txq
[TXQ_ETH
].stop_thres
= nports
*
2637 flits_to_desc(sgl_len(MAX_SKB_FRAGS
+ 1) + 3);
2639 if (!is_offload(adapter
)) {
2641 q
->fl
[0].buf_size
= RX_PAGE_SIZE
;
2643 q
->fl
[0].buf_size
= SGE_RX_SM_BUF_SIZE
+ 2 +
2644 sizeof(struct cpl_rx_pkt
);
2646 q
->fl
[1].buf_size
= MAX_FRAME_SIZE
+ 2 +
2647 sizeof(struct cpl_rx_pkt
);
2650 q
->fl
[0].buf_size
= RX_PAGE_SIZE
;
2652 q
->fl
[0].buf_size
= SGE_RX_SM_BUF_SIZE
+
2653 sizeof(struct cpl_rx_data
);
2655 q
->fl
[1].buf_size
= (16 * 1024) -
2656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2659 spin_lock(&adapter
->sge
.reg_lock
);
2661 /* FL threshold comparison uses < */
2662 ret
= t3_sge_init_rspcntxt(adapter
, q
->rspq
.cntxt_id
, irq_vec_idx
,
2663 q
->rspq
.phys_addr
, q
->rspq
.size
,
2664 q
->fl
[0].buf_size
, 1, 0);
2668 for (i
= 0; i
< SGE_RXQ_PER_SET
; ++i
) {
2669 ret
= t3_sge_init_flcntxt(adapter
, q
->fl
[i
].cntxt_id
, 0,
2670 q
->fl
[i
].phys_addr
, q
->fl
[i
].size
,
2671 q
->fl
[i
].buf_size
, p
->cong_thres
, 1,
2677 ret
= t3_sge_init_ecntxt(adapter
, q
->txq
[TXQ_ETH
].cntxt_id
, USE_GTS
,
2678 SGE_CNTXT_ETH
, id
, q
->txq
[TXQ_ETH
].phys_addr
,
2679 q
->txq
[TXQ_ETH
].size
, q
->txq
[TXQ_ETH
].token
,
2685 ret
= t3_sge_init_ecntxt(adapter
, q
->txq
[TXQ_OFLD
].cntxt_id
,
2686 USE_GTS
, SGE_CNTXT_OFLD
, id
,
2687 q
->txq
[TXQ_OFLD
].phys_addr
,
2688 q
->txq
[TXQ_OFLD
].size
, 0, 1, 0);
2694 ret
= t3_sge_init_ecntxt(adapter
, q
->txq
[TXQ_CTRL
].cntxt_id
, 0,
2696 q
->txq
[TXQ_CTRL
].phys_addr
,
2697 q
->txq
[TXQ_CTRL
].size
,
2698 q
->txq
[TXQ_CTRL
].token
, 1, 0);
2703 spin_unlock(&adapter
->sge
.reg_lock
);
2705 t3_update_qset_coalesce(q
, p
);
2708 * We use atalk_ptr as a backpointer to a qset. In case a device is
2709 * associated with multiple queue sets only the first one sets
2712 if (netdev
->atalk_ptr
== NULL
)
2713 netdev
->atalk_ptr
= q
;
2715 refill_fl(adapter
, &q
->fl
[0], q
->fl
[0].size
, GFP_KERNEL
);
2716 refill_fl(adapter
, &q
->fl
[1], q
->fl
[1].size
, GFP_KERNEL
);
2717 refill_rspq(adapter
, &q
->rspq
, q
->rspq
.size
- 1);
2719 t3_write_reg(adapter
, A_SG_GTS
, V_RSPQ(q
->rspq
.cntxt_id
) |
2720 V_NEWTIMER(q
->rspq
.holdoff_tmr
));
2722 mod_timer(&q
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
2726 spin_unlock(&adapter
->sge
.reg_lock
);
2728 t3_free_qset(adapter
, q
);
2733 * t3_free_sge_resources - free SGE resources
2734 * @adap: the adapter
2736 * Frees resources used by the SGE queue sets.
2738 void t3_free_sge_resources(struct adapter
*adap
)
2742 for (i
= 0; i
< SGE_QSETS
; ++i
)
2743 t3_free_qset(adap
, &adap
->sge
.qs
[i
]);
2747 * t3_sge_start - enable SGE
2748 * @adap: the adapter
2750 * Enables the SGE for DMAs. This is the last step in starting packet
2753 void t3_sge_start(struct adapter
*adap
)
2755 t3_set_reg_field(adap
, A_SG_CONTROL
, F_GLOBALENABLE
, F_GLOBALENABLE
);
2759 * t3_sge_stop - disable SGE operation
2760 * @adap: the adapter
2762 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2763 * from error interrupts) or from normal process context. In the latter
2764 * case it also disables any pending queue restart tasklets. Note that
2765 * if it is called in interrupt context it cannot disable the restart
2766 * tasklets as it cannot wait, however the tasklets will have no effect
2767 * since the doorbells are disabled and the driver will call this again
2768 * later from process context, at which time the tasklets will be stopped
2769 * if they are still running.
2771 void t3_sge_stop(struct adapter
*adap
)
2773 t3_set_reg_field(adap
, A_SG_CONTROL
, F_GLOBALENABLE
, 0);
2774 if (!in_interrupt()) {
2777 for (i
= 0; i
< SGE_QSETS
; ++i
) {
2778 struct sge_qset
*qs
= &adap
->sge
.qs
[i
];
2780 tasklet_kill(&qs
->txq
[TXQ_OFLD
].qresume_tsk
);
2781 tasklet_kill(&qs
->txq
[TXQ_CTRL
].qresume_tsk
);
2787 * t3_sge_init - initialize SGE
2788 * @adap: the adapter
2789 * @p: the SGE parameters
2791 * Performs SGE initialization needed every time after a chip reset.
2792 * We do not initialize any of the queue sets here, instead the driver
2793 * top-level must request those individually. We also do not enable DMA
2794 * here, that should be done after the queues have been set up.
2796 void t3_sge_init(struct adapter
*adap
, struct sge_params
*p
)
2798 unsigned int ctrl
, ups
= ffs(pci_resource_len(adap
->pdev
, 2) >> 12);
2800 ctrl
= F_DROPPKT
| V_PKTSHIFT(2) | F_FLMODE
| F_AVOIDCQOVFL
|
2802 V_HOSTPAGESIZE(PAGE_SHIFT
- 11) | F_BIGENDIANINGRESS
|
2803 V_USERSPACESIZE(ups
? ups
- 1 : 0) | F_ISCSICOALESCING
;
2804 #if SGE_NUM_GENBITS == 1
2805 ctrl
|= F_EGRGENCTRL
;
2807 if (adap
->params
.rev
> 0) {
2808 if (!(adap
->flags
& (USING_MSIX
| USING_MSI
)))
2809 ctrl
|= F_ONEINTMULTQ
| F_OPTONEINTMULTQ
;
2810 ctrl
|= F_CQCRDTCTRL
| F_AVOIDCQOVFL
;
2812 t3_write_reg(adap
, A_SG_CONTROL
, ctrl
);
2813 t3_write_reg(adap
, A_SG_EGR_RCQ_DRB_THRSH
, V_HIRCQDRBTHRSH(512) |
2814 V_LORCQDRBTHRSH(512));
2815 t3_write_reg(adap
, A_SG_TIMER_TICK
, core_ticks_per_usec(adap
) / 10);
2816 t3_write_reg(adap
, A_SG_CMDQ_CREDIT_TH
, V_THRESHOLD(32) |
2817 V_TIMEOUT(200 * core_ticks_per_usec(adap
)));
2818 t3_write_reg(adap
, A_SG_HI_DRB_HI_THRSH
, 1000);
2819 t3_write_reg(adap
, A_SG_HI_DRB_LO_THRSH
, 256);
2820 t3_write_reg(adap
, A_SG_LO_DRB_HI_THRSH
, 1000);
2821 t3_write_reg(adap
, A_SG_LO_DRB_LO_THRSH
, 256);
2822 t3_write_reg(adap
, A_SG_OCO_BASE
, V_BASE1(0xfff));
2823 t3_write_reg(adap
, A_SG_DRB_PRI_THRESH
, 63 * 1024);
2827 * t3_sge_prep - one-time SGE initialization
2828 * @adap: the associated adapter
2829 * @p: SGE parameters
2831 * Performs one-time initialization of SGE SW state. Includes determining
2832 * defaults for the assorted SGE parameters, which admins can change until
2833 * they are used to initialize the SGE.
2835 void __devinit
t3_sge_prep(struct adapter
*adap
, struct sge_params
*p
)
2839 p
->max_pkt_size
= (16 * 1024) - sizeof(struct cpl_rx_data
) -
2840 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2842 for (i
= 0; i
< SGE_QSETS
; ++i
) {
2843 struct qset_params
*q
= p
->qset
+ i
;
2845 q
->polling
= adap
->params
.rev
> 0;
2846 q
->coalesce_usecs
= 5;
2847 q
->rspq_size
= 1024;
2849 q
->jumbo_size
= 512;
2850 q
->txq_size
[TXQ_ETH
] = 1024;
2851 q
->txq_size
[TXQ_OFLD
] = 1024;
2852 q
->txq_size
[TXQ_CTRL
] = 256;
2856 spin_lock_init(&adap
->sge
.reg_lock
);
2860 * t3_get_desc - dump an SGE descriptor for debugging purposes
2861 * @qs: the queue set
2862 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2863 * @idx: the descriptor index in the queue
2864 * @data: where to dump the descriptor contents
2866 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2867 * size of the descriptor.
2869 int t3_get_desc(const struct sge_qset
*qs
, unsigned int qnum
, unsigned int idx
,
2870 unsigned char *data
)
2876 if (!qs
->txq
[qnum
].desc
|| idx
>= qs
->txq
[qnum
].size
)
2878 memcpy(data
, &qs
->txq
[qnum
].desc
[idx
], sizeof(struct tx_desc
));
2879 return sizeof(struct tx_desc
);
2883 if (!qs
->rspq
.desc
|| idx
>= qs
->rspq
.size
)
2885 memcpy(data
, &qs
->rspq
.desc
[idx
], sizeof(struct rsp_desc
));
2886 return sizeof(struct rsp_desc
);
2890 if (!qs
->fl
[qnum
].desc
|| idx
>= qs
->fl
[qnum
].size
)
2892 memcpy(data
, &qs
->fl
[qnum
].desc
[idx
], sizeof(struct rx_desc
));
2893 return sizeof(struct rx_desc
);