Initial commit
[wrt350n-kernel.git] / drivers / net / cxgb3 / sge.c
blob979f3fc5e76567f1f20d2f03c77816a6469ab144
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include "common.h"
40 #include "regs.h"
41 #include "sge_defs.h"
42 #include "t3_cpl.h"
43 #include "firmware_exports.h"
45 #define USE_GTS 0
47 #define SGE_RX_SM_BUF_SIZE 1536
49 #define SGE_RX_COPY_THRES 256
50 #define SGE_RX_PULL_LEN 128
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
57 #define FL0_PG_CHUNK_SIZE 2048
59 #define SGE_RX_DROP_THRES 16
62 * Period of the Tx buffer reclaim timer. This timer does not need to run
63 * frequently as Tx buffers are usually reclaimed by new Tx packets.
65 #define TX_RECLAIM_PERIOD (HZ / 4)
67 /* WR size in bytes */
68 #define WR_LEN (WR_FLITS * 8)
71 * Types of Tx queues in each queue set. Order here matters, do not change.
73 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
75 /* Values for sge_txq.flags */
76 enum {
77 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
78 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
81 struct tx_desc {
82 __be64 flit[TX_DESC_FLITS];
85 struct rx_desc {
86 __be32 addr_lo;
87 __be32 len_gen;
88 __be32 gen2;
89 __be32 addr_hi;
92 struct tx_sw_desc { /* SW state per Tx descriptor */
93 struct sk_buff *skb;
94 u8 eop; /* set if last descriptor for packet */
95 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
96 u8 fragidx; /* first page fragment associated with descriptor */
97 s8 sflit; /* start flit of first SGL entry in descriptor */
100 struct rx_sw_desc { /* SW state per Rx descriptor */
101 union {
102 struct sk_buff *skb;
103 struct fl_pg_chunk pg_chunk;
105 DECLARE_PCI_UNMAP_ADDR(dma_addr);
108 struct rsp_desc { /* response queue descriptor */
109 struct rss_header rss_hdr;
110 __be32 flags;
111 __be32 len_cq;
112 u8 imm_data[47];
113 u8 intr_gen;
117 * Holds unmapping information for Tx packets that need deferred unmapping.
118 * This structure lives at skb->head and must be allocated by callers.
120 struct deferred_unmap_info {
121 struct pci_dev *pdev;
122 dma_addr_t addr[MAX_SKB_FRAGS + 1];
126 * Maps a number of flits to the number of Tx descriptors that can hold them.
127 * The formula is
129 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
131 * HW allows up to 4 descriptors to be combined into a WR.
133 static u8 flit_desc_map[] = {
135 #if SGE_NUM_GENBITS == 1
136 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
138 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
139 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
140 #elif SGE_NUM_GENBITS == 2
141 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
143 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
144 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
145 #else
146 # error "SGE_NUM_GENBITS must be 1 or 2"
147 #endif
150 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
152 return container_of(q, struct sge_qset, fl[qidx]);
155 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
157 return container_of(q, struct sge_qset, rspq);
160 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
162 return container_of(q, struct sge_qset, txq[qidx]);
166 * refill_rspq - replenish an SGE response queue
167 * @adapter: the adapter
168 * @q: the response queue to replenish
169 * @credits: how many new responses to make available
171 * Replenishes a response queue by making the supplied number of responses
172 * available to HW.
174 static inline void refill_rspq(struct adapter *adapter,
175 const struct sge_rspq *q, unsigned int credits)
177 rmb();
178 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
179 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
183 * need_skb_unmap - does the platform need unmapping of sk_buffs?
185 * Returns true if the platfrom needs sk_buff unmapping. The compiler
186 * optimizes away unecessary code if this returns true.
188 static inline int need_skb_unmap(void)
191 * This structure is used to tell if the platfrom needs buffer
192 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
194 struct dummy {
195 DECLARE_PCI_UNMAP_ADDR(addr);
198 return sizeof(struct dummy) != 0;
202 * unmap_skb - unmap a packet main body and its page fragments
203 * @skb: the packet
204 * @q: the Tx queue containing Tx descriptors for the packet
205 * @cidx: index of Tx descriptor
206 * @pdev: the PCI device
208 * Unmap the main body of an sk_buff and its page fragments, if any.
209 * Because of the fairly complicated structure of our SGLs and the desire
210 * to conserve space for metadata, the information necessary to unmap an
211 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
212 * descriptors (the physical addresses of the various data buffers), and
213 * the SW descriptor state (assorted indices). The send functions
214 * initialize the indices for the first packet descriptor so we can unmap
215 * the buffers held in the first Tx descriptor here, and we have enough
216 * information at this point to set the state for the next Tx descriptor.
218 * Note that it is possible to clean up the first descriptor of a packet
219 * before the send routines have written the next descriptors, but this
220 * race does not cause any problem. We just end up writing the unmapping
221 * info for the descriptor first.
223 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
224 unsigned int cidx, struct pci_dev *pdev)
226 const struct sg_ent *sgp;
227 struct tx_sw_desc *d = &q->sdesc[cidx];
228 int nfrags, frag_idx, curflit, j = d->addr_idx;
230 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
231 frag_idx = d->fragidx;
233 if (frag_idx == 0 && skb_headlen(skb)) {
234 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
235 skb_headlen(skb), PCI_DMA_TODEVICE);
236 j = 1;
239 curflit = d->sflit + 1 + j;
240 nfrags = skb_shinfo(skb)->nr_frags;
242 while (frag_idx < nfrags && curflit < WR_FLITS) {
243 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
244 skb_shinfo(skb)->frags[frag_idx].size,
245 PCI_DMA_TODEVICE);
246 j ^= 1;
247 if (j == 0) {
248 sgp++;
249 curflit++;
251 curflit++;
252 frag_idx++;
255 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
256 d = cidx + 1 == q->size ? q->sdesc : d + 1;
257 d->fragidx = frag_idx;
258 d->addr_idx = j;
259 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
264 * free_tx_desc - reclaims Tx descriptors and their buffers
265 * @adapter: the adapter
266 * @q: the Tx queue to reclaim descriptors from
267 * @n: the number of descriptors to reclaim
269 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
270 * Tx buffers. Called with the Tx queue lock held.
272 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
273 unsigned int n)
275 struct tx_sw_desc *d;
276 struct pci_dev *pdev = adapter->pdev;
277 unsigned int cidx = q->cidx;
279 const int need_unmap = need_skb_unmap() &&
280 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
282 d = &q->sdesc[cidx];
283 while (n--) {
284 if (d->skb) { /* an SGL is present */
285 if (need_unmap)
286 unmap_skb(d->skb, q, cidx, pdev);
287 if (d->eop)
288 kfree_skb(d->skb);
290 ++d;
291 if (++cidx == q->size) {
292 cidx = 0;
293 d = q->sdesc;
296 q->cidx = cidx;
300 * reclaim_completed_tx - reclaims completed Tx descriptors
301 * @adapter: the adapter
302 * @q: the Tx queue to reclaim completed descriptors from
304 * Reclaims Tx descriptors that the SGE has indicated it has processed,
305 * and frees the associated buffers if possible. Called with the Tx
306 * queue's lock held.
308 static inline void reclaim_completed_tx(struct adapter *adapter,
309 struct sge_txq *q)
311 unsigned int reclaim = q->processed - q->cleaned;
313 if (reclaim) {
314 free_tx_desc(adapter, q, reclaim);
315 q->cleaned += reclaim;
316 q->in_use -= reclaim;
321 * should_restart_tx - are there enough resources to restart a Tx queue?
322 * @q: the Tx queue
324 * Checks if there are enough descriptors to restart a suspended Tx queue.
326 static inline int should_restart_tx(const struct sge_txq *q)
328 unsigned int r = q->processed - q->cleaned;
330 return q->in_use - r < (q->size >> 1);
334 * free_rx_bufs - free the Rx buffers on an SGE free list
335 * @pdev: the PCI device associated with the adapter
336 * @rxq: the SGE free list to clean up
338 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
339 * this queue should be stopped before calling this function.
341 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
343 unsigned int cidx = q->cidx;
345 while (q->credits--) {
346 struct rx_sw_desc *d = &q->sdesc[cidx];
348 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
349 q->buf_size, PCI_DMA_FROMDEVICE);
350 if (q->use_pages) {
351 put_page(d->pg_chunk.page);
352 d->pg_chunk.page = NULL;
353 } else {
354 kfree_skb(d->skb);
355 d->skb = NULL;
357 if (++cidx == q->size)
358 cidx = 0;
361 if (q->pg_chunk.page) {
362 __free_page(q->pg_chunk.page);
363 q->pg_chunk.page = NULL;
368 * add_one_rx_buf - add a packet buffer to a free-buffer list
369 * @va: buffer start VA
370 * @len: the buffer length
371 * @d: the HW Rx descriptor to write
372 * @sd: the SW Rx descriptor to write
373 * @gen: the generation bit value
374 * @pdev: the PCI device associated with the adapter
376 * Add a buffer of the given length to the supplied HW and SW Rx
377 * descriptors.
379 static inline void add_one_rx_buf(void *va, unsigned int len,
380 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev)
383 dma_addr_t mapping;
385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
386 pci_unmap_addr_set(sd, dma_addr, mapping);
388 d->addr_lo = cpu_to_be32(mapping);
389 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
390 wmb();
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
395 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
397 if (!q->pg_chunk.page) {
398 q->pg_chunk.page = alloc_page(gfp);
399 if (unlikely(!q->pg_chunk.page))
400 return -ENOMEM;
401 q->pg_chunk.va = page_address(q->pg_chunk.page);
402 q->pg_chunk.offset = 0;
404 sd->pg_chunk = q->pg_chunk;
406 q->pg_chunk.offset += q->buf_size;
407 if (q->pg_chunk.offset == PAGE_SIZE)
408 q->pg_chunk.page = NULL;
409 else {
410 q->pg_chunk.va += q->buf_size;
411 get_page(q->pg_chunk.page);
413 return 0;
417 * refill_fl - refill an SGE free-buffer list
418 * @adapter: the adapter
419 * @q: the free-list to refill
420 * @n: the number of new buffers to allocate
421 * @gfp: the gfp flags for allocating new buffers
423 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
424 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity.
427 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
429 void *buf_start;
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx];
433 while (n--) {
434 if (q->use_pages) {
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
436 nomem: q->alloc_failed++;
437 break;
439 buf_start = sd->pg_chunk.va;
440 } else {
441 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
443 if (!skb)
444 goto nomem;
446 sd->skb = skb;
447 buf_start = skb->data;
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
451 adap->pdev);
452 d++;
453 sd++;
454 if (++q->pidx == q->size) {
455 q->pidx = 0;
456 q->gen ^= 1;
457 sd = q->sdesc;
458 d = q->desc;
460 q->credits++;
462 wmb();
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
466 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
468 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
472 * recycle_rx_buf - recycle a receive buffer
473 * @adapter: the adapter
474 * @q: the SGE free list
475 * @idx: index of buffer to recycle
477 * Recycles the specified buffer on the given free list by adding it at
478 * the next available slot on the list.
480 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
481 unsigned int idx)
483 struct rx_desc *from = &q->desc[idx];
484 struct rx_desc *to = &q->desc[q->pidx];
486 q->sdesc[q->pidx] = q->sdesc[idx];
487 to->addr_lo = from->addr_lo; /* already big endian */
488 to->addr_hi = from->addr_hi; /* likewise */
489 wmb();
490 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
491 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
492 q->credits++;
494 if (++q->pidx == q->size) {
495 q->pidx = 0;
496 q->gen ^= 1;
498 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
502 * alloc_ring - allocate resources for an SGE descriptor ring
503 * @pdev: the PCI device
504 * @nelem: the number of descriptors
505 * @elem_size: the size of each descriptor
506 * @sw_size: the size of the SW state associated with each ring element
507 * @phys: the physical address of the allocated ring
508 * @metadata: address of the array holding the SW state for the ring
510 * Allocates resources for an SGE descriptor ring, such as Tx queues,
511 * free buffer lists, or response queues. Each SGE ring requires
512 * space for its HW descriptors plus, optionally, space for the SW state
513 * associated with each HW entry (the metadata). The function returns
514 * three values: the virtual address for the HW ring (the return value
515 * of the function), the physical address of the HW ring, and the address
516 * of the SW ring.
518 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
519 size_t sw_size, dma_addr_t * phys, void *metadata)
521 size_t len = nelem * elem_size;
522 void *s = NULL;
523 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
525 if (!p)
526 return NULL;
527 if (sw_size) {
528 s = kcalloc(nelem, sw_size, GFP_KERNEL);
530 if (!s) {
531 dma_free_coherent(&pdev->dev, len, p, *phys);
532 return NULL;
535 if (metadata)
536 *(void **)metadata = s;
537 memset(p, 0, len);
538 return p;
542 * free_qset - free the resources of an SGE queue set
543 * @adapter: the adapter owning the queue set
544 * @q: the queue set
546 * Release the HW and SW resources associated with an SGE queue set, such
547 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
548 * queue set must be quiesced prior to calling this.
550 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
552 int i;
553 struct pci_dev *pdev = adapter->pdev;
555 if (q->tx_reclaim_timer.function)
556 del_timer_sync(&q->tx_reclaim_timer);
558 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
559 if (q->fl[i].desc) {
560 spin_lock(&adapter->sge.reg_lock);
561 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
562 spin_unlock(&adapter->sge.reg_lock);
563 free_rx_bufs(pdev, &q->fl[i]);
564 kfree(q->fl[i].sdesc);
565 dma_free_coherent(&pdev->dev,
566 q->fl[i].size *
567 sizeof(struct rx_desc), q->fl[i].desc,
568 q->fl[i].phys_addr);
571 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
572 if (q->txq[i].desc) {
573 spin_lock(&adapter->sge.reg_lock);
574 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
575 spin_unlock(&adapter->sge.reg_lock);
576 if (q->txq[i].sdesc) {
577 free_tx_desc(adapter, &q->txq[i],
578 q->txq[i].in_use);
579 kfree(q->txq[i].sdesc);
581 dma_free_coherent(&pdev->dev,
582 q->txq[i].size *
583 sizeof(struct tx_desc),
584 q->txq[i].desc, q->txq[i].phys_addr);
585 __skb_queue_purge(&q->txq[i].sendq);
588 if (q->rspq.desc) {
589 spin_lock(&adapter->sge.reg_lock);
590 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
591 spin_unlock(&adapter->sge.reg_lock);
592 dma_free_coherent(&pdev->dev,
593 q->rspq.size * sizeof(struct rsp_desc),
594 q->rspq.desc, q->rspq.phys_addr);
597 memset(q, 0, sizeof(*q));
601 * init_qset_cntxt - initialize an SGE queue set context info
602 * @qs: the queue set
603 * @id: the queue set id
605 * Initializes the TIDs and context ids for the queues of a queue set.
607 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
609 qs->rspq.cntxt_id = id;
610 qs->fl[0].cntxt_id = 2 * id;
611 qs->fl[1].cntxt_id = 2 * id + 1;
612 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
613 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
614 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
615 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
616 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
620 * sgl_len - calculates the size of an SGL of the given capacity
621 * @n: the number of SGL entries
623 * Calculates the number of flits needed for a scatter/gather list that
624 * can hold the given number of entries.
626 static inline unsigned int sgl_len(unsigned int n)
628 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
629 return (3 * n) / 2 + (n & 1);
633 * flits_to_desc - returns the num of Tx descriptors for the given flits
634 * @n: the number of flits
636 * Calculates the number of Tx descriptors needed for the supplied number
637 * of flits.
639 static inline unsigned int flits_to_desc(unsigned int n)
641 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
642 return flit_desc_map[n];
646 * get_packet - return the next ingress packet buffer from a free list
647 * @adap: the adapter that received the packet
648 * @fl: the SGE free list holding the packet
649 * @len: the packet length including any SGE padding
650 * @drop_thres: # of remaining buffers before we start dropping packets
652 * Get the next packet from a free list and complete setup of the
653 * sk_buff. If the packet is small we make a copy and recycle the
654 * original buffer, otherwise we use the original buffer itself. If a
655 * positive drop threshold is supplied packets are dropped and their
656 * buffers recycled if (a) the number of remaining buffers is under the
657 * threshold and the packet is too big to copy, or (b) the packet should
658 * be copied but there is no memory for the copy.
660 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
661 unsigned int len, unsigned int drop_thres)
663 struct sk_buff *skb = NULL;
664 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
666 prefetch(sd->skb->data);
667 fl->credits--;
669 if (len <= SGE_RX_COPY_THRES) {
670 skb = alloc_skb(len, GFP_ATOMIC);
671 if (likely(skb != NULL)) {
672 __skb_put(skb, len);
673 pci_dma_sync_single_for_cpu(adap->pdev,
674 pci_unmap_addr(sd, dma_addr), len,
675 PCI_DMA_FROMDEVICE);
676 memcpy(skb->data, sd->skb->data, len);
677 pci_dma_sync_single_for_device(adap->pdev,
678 pci_unmap_addr(sd, dma_addr), len,
679 PCI_DMA_FROMDEVICE);
680 } else if (!drop_thres)
681 goto use_orig_buf;
682 recycle:
683 recycle_rx_buf(adap, fl, fl->cidx);
684 return skb;
687 if (unlikely(fl->credits < drop_thres))
688 goto recycle;
690 use_orig_buf:
691 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
692 fl->buf_size, PCI_DMA_FROMDEVICE);
693 skb = sd->skb;
694 skb_put(skb, len);
695 __refill_fl(adap, fl);
696 return skb;
700 * get_packet_pg - return the next ingress packet buffer from a free list
701 * @adap: the adapter that received the packet
702 * @fl: the SGE free list holding the packet
703 * @len: the packet length including any SGE padding
704 * @drop_thres: # of remaining buffers before we start dropping packets
706 * Get the next packet from a free list populated with page chunks.
707 * If the packet is small we make a copy and recycle the original buffer,
708 * otherwise we attach the original buffer as a page fragment to a fresh
709 * sk_buff. If a positive drop threshold is supplied packets are dropped
710 * and their buffers recycled if (a) the number of remaining buffers is
711 * under the threshold and the packet is too big to copy, or (b) there's
712 * no system memory.
714 * Note: this function is similar to @get_packet but deals with Rx buffers
715 * that are page chunks rather than sk_buffs.
717 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
718 unsigned int len, unsigned int drop_thres)
720 struct sk_buff *skb = NULL;
721 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
723 if (len <= SGE_RX_COPY_THRES) {
724 skb = alloc_skb(len, GFP_ATOMIC);
725 if (likely(skb != NULL)) {
726 __skb_put(skb, len);
727 pci_dma_sync_single_for_cpu(adap->pdev,
728 pci_unmap_addr(sd, dma_addr), len,
729 PCI_DMA_FROMDEVICE);
730 memcpy(skb->data, sd->pg_chunk.va, len);
731 pci_dma_sync_single_for_device(adap->pdev,
732 pci_unmap_addr(sd, dma_addr), len,
733 PCI_DMA_FROMDEVICE);
734 } else if (!drop_thres)
735 return NULL;
736 recycle:
737 fl->credits--;
738 recycle_rx_buf(adap, fl, fl->cidx);
739 return skb;
742 if (unlikely(fl->credits <= drop_thres))
743 goto recycle;
745 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
746 if (unlikely(!skb)) {
747 if (!drop_thres)
748 return NULL;
749 goto recycle;
752 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
753 fl->buf_size, PCI_DMA_FROMDEVICE);
754 __skb_put(skb, SGE_RX_PULL_LEN);
755 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
756 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
757 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
758 len - SGE_RX_PULL_LEN);
759 skb->len = len;
760 skb->data_len = len - SGE_RX_PULL_LEN;
761 skb->truesize += skb->data_len;
763 fl->credits--;
765 * We do not refill FLs here, we let the caller do it to overlap a
766 * prefetch.
768 return skb;
772 * get_imm_packet - return the next ingress packet buffer from a response
773 * @resp: the response descriptor containing the packet data
775 * Return a packet containing the immediate data of the given response.
777 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
779 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
781 if (skb) {
782 __skb_put(skb, IMMED_PKT_SIZE);
783 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
785 return skb;
789 * calc_tx_descs - calculate the number of Tx descriptors for a packet
790 * @skb: the packet
792 * Returns the number of Tx descriptors needed for the given Ethernet
793 * packet. Ethernet packets require addition of WR and CPL headers.
795 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
797 unsigned int flits;
799 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
800 return 1;
802 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
803 if (skb_shinfo(skb)->gso_size)
804 flits++;
805 return flits_to_desc(flits);
809 * make_sgl - populate a scatter/gather list for a packet
810 * @skb: the packet
811 * @sgp: the SGL to populate
812 * @start: start address of skb main body data to include in the SGL
813 * @len: length of skb main body data to include in the SGL
814 * @pdev: the PCI device
816 * Generates a scatter/gather list for the buffers that make up a packet
817 * and returns the SGL size in 8-byte words. The caller must size the SGL
818 * appropriately.
820 static inline unsigned int make_sgl(const struct sk_buff *skb,
821 struct sg_ent *sgp, unsigned char *start,
822 unsigned int len, struct pci_dev *pdev)
824 dma_addr_t mapping;
825 unsigned int i, j = 0, nfrags;
827 if (len) {
828 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
829 sgp->len[0] = cpu_to_be32(len);
830 sgp->addr[0] = cpu_to_be64(mapping);
831 j = 1;
834 nfrags = skb_shinfo(skb)->nr_frags;
835 for (i = 0; i < nfrags; i++) {
836 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
838 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
839 frag->size, PCI_DMA_TODEVICE);
840 sgp->len[j] = cpu_to_be32(frag->size);
841 sgp->addr[j] = cpu_to_be64(mapping);
842 j ^= 1;
843 if (j == 0)
844 ++sgp;
846 if (j)
847 sgp->len[j] = 0;
848 return ((nfrags + (len != 0)) * 3) / 2 + j;
852 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
853 * @adap: the adapter
854 * @q: the Tx queue
856 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
857 * where the HW is going to sleep just after we checked, however,
858 * then the interrupt handler will detect the outstanding TX packet
859 * and ring the doorbell for us.
861 * When GTS is disabled we unconditionally ring the doorbell.
863 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
865 #if USE_GTS
866 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
867 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
868 set_bit(TXQ_LAST_PKT_DB, &q->flags);
869 t3_write_reg(adap, A_SG_KDOORBELL,
870 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
872 #else
873 wmb(); /* write descriptors before telling HW */
874 t3_write_reg(adap, A_SG_KDOORBELL,
875 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
876 #endif
879 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
881 #if SGE_NUM_GENBITS == 2
882 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
883 #endif
887 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
888 * @ndesc: number of Tx descriptors spanned by the SGL
889 * @skb: the packet corresponding to the WR
890 * @d: first Tx descriptor to be written
891 * @pidx: index of above descriptors
892 * @q: the SGE Tx queue
893 * @sgl: the SGL
894 * @flits: number of flits to the start of the SGL in the first descriptor
895 * @sgl_flits: the SGL size in flits
896 * @gen: the Tx descriptor generation
897 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
898 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
900 * Write a work request header and an associated SGL. If the SGL is
901 * small enough to fit into one Tx descriptor it has already been written
902 * and we just need to write the WR header. Otherwise we distribute the
903 * SGL across the number of descriptors it spans.
905 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
906 struct tx_desc *d, unsigned int pidx,
907 const struct sge_txq *q,
908 const struct sg_ent *sgl,
909 unsigned int flits, unsigned int sgl_flits,
910 unsigned int gen, __be32 wr_hi,
911 __be32 wr_lo)
913 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
914 struct tx_sw_desc *sd = &q->sdesc[pidx];
916 sd->skb = skb;
917 if (need_skb_unmap()) {
918 sd->fragidx = 0;
919 sd->addr_idx = 0;
920 sd->sflit = flits;
923 if (likely(ndesc == 1)) {
924 sd->eop = 1;
925 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
926 V_WR_SGLSFLT(flits)) | wr_hi;
927 wmb();
928 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
929 V_WR_GEN(gen)) | wr_lo;
930 wr_gen2(d, gen);
931 } else {
932 unsigned int ogen = gen;
933 const u64 *fp = (const u64 *)sgl;
934 struct work_request_hdr *wp = wrp;
936 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
937 V_WR_SGLSFLT(flits)) | wr_hi;
939 while (sgl_flits) {
940 unsigned int avail = WR_FLITS - flits;
942 if (avail > sgl_flits)
943 avail = sgl_flits;
944 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
945 sgl_flits -= avail;
946 ndesc--;
947 if (!sgl_flits)
948 break;
950 fp += avail;
951 d++;
952 sd->eop = 0;
953 sd++;
954 if (++pidx == q->size) {
955 pidx = 0;
956 gen ^= 1;
957 d = q->desc;
958 sd = q->sdesc;
961 sd->skb = skb;
962 wrp = (struct work_request_hdr *)d;
963 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
964 V_WR_SGLSFLT(1)) | wr_hi;
965 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
966 sgl_flits + 1)) |
967 V_WR_GEN(gen)) | wr_lo;
968 wr_gen2(d, gen);
969 flits = 1;
971 sd->eop = 1;
972 wrp->wr_hi |= htonl(F_WR_EOP);
973 wmb();
974 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
975 wr_gen2((struct tx_desc *)wp, ogen);
976 WARN_ON(ndesc != 0);
981 * write_tx_pkt_wr - write a TX_PKT work request
982 * @adap: the adapter
983 * @skb: the packet to send
984 * @pi: the egress interface
985 * @pidx: index of the first Tx descriptor to write
986 * @gen: the generation value to use
987 * @q: the Tx queue
988 * @ndesc: number of descriptors the packet will occupy
989 * @compl: the value of the COMPL bit to use
991 * Generate a TX_PKT work request to send the supplied packet.
993 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
994 const struct port_info *pi,
995 unsigned int pidx, unsigned int gen,
996 struct sge_txq *q, unsigned int ndesc,
997 unsigned int compl)
999 unsigned int flits, sgl_flits, cntrl, tso_info;
1000 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1001 struct tx_desc *d = &q->desc[pidx];
1002 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1004 cpl->len = htonl(skb->len | 0x80000000);
1005 cntrl = V_TXPKT_INTF(pi->port_id);
1007 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1008 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1010 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1011 if (tso_info) {
1012 int eth_type;
1013 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1015 d->flit[2] = 0;
1016 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1017 hdr->cntrl = htonl(cntrl);
1018 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1019 CPL_ETH_II : CPL_ETH_II_VLAN;
1020 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1021 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1022 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1023 hdr->lso_info = htonl(tso_info);
1024 flits = 3;
1025 } else {
1026 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1027 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1028 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1029 cpl->cntrl = htonl(cntrl);
1031 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1032 q->sdesc[pidx].skb = NULL;
1033 if (!skb->data_len)
1034 skb_copy_from_linear_data(skb, &d->flit[2],
1035 skb->len);
1036 else
1037 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1039 flits = (skb->len + 7) / 8 + 2;
1040 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1041 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1042 | F_WR_SOP | F_WR_EOP | compl);
1043 wmb();
1044 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1045 V_WR_TID(q->token));
1046 wr_gen2(d, gen);
1047 kfree_skb(skb);
1048 return;
1051 flits = 2;
1054 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1055 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1057 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1058 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1059 htonl(V_WR_TID(q->token)));
1062 static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1063 struct sge_txq *q)
1065 netif_stop_queue(dev);
1066 set_bit(TXQ_ETH, &qs->txq_stopped);
1067 q->stops++;
1071 * eth_xmit - add a packet to the Ethernet Tx queue
1072 * @skb: the packet
1073 * @dev: the egress net device
1075 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1077 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1079 unsigned int ndesc, pidx, credits, gen, compl;
1080 const struct port_info *pi = netdev_priv(dev);
1081 struct adapter *adap = pi->adapter;
1082 struct sge_qset *qs = pi->qs;
1083 struct sge_txq *q = &qs->txq[TXQ_ETH];
1086 * The chip min packet length is 9 octets but play safe and reject
1087 * anything shorter than an Ethernet header.
1089 if (unlikely(skb->len < ETH_HLEN)) {
1090 dev_kfree_skb(skb);
1091 return NETDEV_TX_OK;
1094 spin_lock(&q->lock);
1095 reclaim_completed_tx(adap, q);
1097 credits = q->size - q->in_use;
1098 ndesc = calc_tx_descs(skb);
1100 if (unlikely(credits < ndesc)) {
1101 t3_stop_queue(dev, qs, q);
1102 dev_err(&adap->pdev->dev,
1103 "%s: Tx ring %u full while queue awake!\n",
1104 dev->name, q->cntxt_id & 7);
1105 spin_unlock(&q->lock);
1106 return NETDEV_TX_BUSY;
1109 q->in_use += ndesc;
1110 if (unlikely(credits - ndesc < q->stop_thres))
1111 if (USE_GTS || !should_restart_tx(q))
1112 t3_stop_queue(dev, qs, q);
1114 gen = q->gen;
1115 q->unacked += ndesc;
1116 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1117 q->unacked &= 7;
1118 pidx = q->pidx;
1119 q->pidx += ndesc;
1120 if (q->pidx >= q->size) {
1121 q->pidx -= q->size;
1122 q->gen ^= 1;
1125 /* update port statistics */
1126 if (skb->ip_summed == CHECKSUM_COMPLETE)
1127 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1128 if (skb_shinfo(skb)->gso_size)
1129 qs->port_stats[SGE_PSTAT_TSO]++;
1130 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1131 qs->port_stats[SGE_PSTAT_VLANINS]++;
1133 dev->trans_start = jiffies;
1134 spin_unlock(&q->lock);
1137 * We do not use Tx completion interrupts to free DMAd Tx packets.
1138 * This is good for performamce but means that we rely on new Tx
1139 * packets arriving to run the destructors of completed packets,
1140 * which open up space in their sockets' send queues. Sometimes
1141 * we do not get such new packets causing Tx to stall. A single
1142 * UDP transmitter is a good example of this situation. We have
1143 * a clean up timer that periodically reclaims completed packets
1144 * but it doesn't run often enough (nor do we want it to) to prevent
1145 * lengthy stalls. A solution to this problem is to run the
1146 * destructor early, after the packet is queued but before it's DMAd.
1147 * A cons is that we lie to socket memory accounting, but the amount
1148 * of extra memory is reasonable (limited by the number of Tx
1149 * descriptors), the packets do actually get freed quickly by new
1150 * packets almost always, and for protocols like TCP that wait for
1151 * acks to really free up the data the extra memory is even less.
1152 * On the positive side we run the destructors on the sending CPU
1153 * rather than on a potentially different completing CPU, usually a
1154 * good thing. We also run them without holding our Tx queue lock,
1155 * unlike what reclaim_completed_tx() would otherwise do.
1157 * Run the destructor before telling the DMA engine about the packet
1158 * to make sure it doesn't complete and get freed prematurely.
1160 if (likely(!skb_shared(skb)))
1161 skb_orphan(skb);
1163 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1164 check_ring_tx_db(adap, q);
1165 return NETDEV_TX_OK;
1169 * write_imm - write a packet into a Tx descriptor as immediate data
1170 * @d: the Tx descriptor to write
1171 * @skb: the packet
1172 * @len: the length of packet data to write as immediate data
1173 * @gen: the generation bit value to write
1175 * Writes a packet as immediate data into a Tx descriptor. The packet
1176 * contains a work request at its beginning. We must write the packet
1177 * carefully so the SGE doesn't read it accidentally before it's written
1178 * in its entirety.
1180 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1181 unsigned int len, unsigned int gen)
1183 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1184 struct work_request_hdr *to = (struct work_request_hdr *)d;
1186 if (likely(!skb->data_len))
1187 memcpy(&to[1], &from[1], len - sizeof(*from));
1188 else
1189 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1191 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1192 V_WR_BCNTLFLT(len & 7));
1193 wmb();
1194 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1195 V_WR_LEN((len + 7) / 8));
1196 wr_gen2(d, gen);
1197 kfree_skb(skb);
1201 * check_desc_avail - check descriptor availability on a send queue
1202 * @adap: the adapter
1203 * @q: the send queue
1204 * @skb: the packet needing the descriptors
1205 * @ndesc: the number of Tx descriptors needed
1206 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1208 * Checks if the requested number of Tx descriptors is available on an
1209 * SGE send queue. If the queue is already suspended or not enough
1210 * descriptors are available the packet is queued for later transmission.
1211 * Must be called with the Tx queue locked.
1213 * Returns 0 if enough descriptors are available, 1 if there aren't
1214 * enough descriptors and the packet has been queued, and 2 if the caller
1215 * needs to retry because there weren't enough descriptors at the
1216 * beginning of the call but some freed up in the mean time.
1218 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1219 struct sk_buff *skb, unsigned int ndesc,
1220 unsigned int qid)
1222 if (unlikely(!skb_queue_empty(&q->sendq))) {
1223 addq_exit:__skb_queue_tail(&q->sendq, skb);
1224 return 1;
1226 if (unlikely(q->size - q->in_use < ndesc)) {
1227 struct sge_qset *qs = txq_to_qset(q, qid);
1229 set_bit(qid, &qs->txq_stopped);
1230 smp_mb__after_clear_bit();
1232 if (should_restart_tx(q) &&
1233 test_and_clear_bit(qid, &qs->txq_stopped))
1234 return 2;
1236 q->stops++;
1237 goto addq_exit;
1239 return 0;
1243 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1244 * @q: the SGE control Tx queue
1246 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1247 * that send only immediate data (presently just the control queues) and
1248 * thus do not have any sk_buffs to release.
1250 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1252 unsigned int reclaim = q->processed - q->cleaned;
1254 q->in_use -= reclaim;
1255 q->cleaned += reclaim;
1258 static inline int immediate(const struct sk_buff *skb)
1260 return skb->len <= WR_LEN;
1264 * ctrl_xmit - send a packet through an SGE control Tx queue
1265 * @adap: the adapter
1266 * @q: the control queue
1267 * @skb: the packet
1269 * Send a packet through an SGE control Tx queue. Packets sent through
1270 * a control queue must fit entirely as immediate data in a single Tx
1271 * descriptor and have no page fragments.
1273 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1274 struct sk_buff *skb)
1276 int ret;
1277 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1279 if (unlikely(!immediate(skb))) {
1280 WARN_ON(1);
1281 dev_kfree_skb(skb);
1282 return NET_XMIT_SUCCESS;
1285 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1286 wrp->wr_lo = htonl(V_WR_TID(q->token));
1288 spin_lock(&q->lock);
1289 again:reclaim_completed_tx_imm(q);
1291 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1292 if (unlikely(ret)) {
1293 if (ret == 1) {
1294 spin_unlock(&q->lock);
1295 return NET_XMIT_CN;
1297 goto again;
1300 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1302 q->in_use++;
1303 if (++q->pidx >= q->size) {
1304 q->pidx = 0;
1305 q->gen ^= 1;
1307 spin_unlock(&q->lock);
1308 wmb();
1309 t3_write_reg(adap, A_SG_KDOORBELL,
1310 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1311 return NET_XMIT_SUCCESS;
1315 * restart_ctrlq - restart a suspended control queue
1316 * @qs: the queue set cotaining the control queue
1318 * Resumes transmission on a suspended Tx control queue.
1320 static void restart_ctrlq(unsigned long data)
1322 struct sk_buff *skb;
1323 struct sge_qset *qs = (struct sge_qset *)data;
1324 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1326 spin_lock(&q->lock);
1327 again:reclaim_completed_tx_imm(q);
1329 while (q->in_use < q->size &&
1330 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1332 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1334 if (++q->pidx >= q->size) {
1335 q->pidx = 0;
1336 q->gen ^= 1;
1338 q->in_use++;
1341 if (!skb_queue_empty(&q->sendq)) {
1342 set_bit(TXQ_CTRL, &qs->txq_stopped);
1343 smp_mb__after_clear_bit();
1345 if (should_restart_tx(q) &&
1346 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1347 goto again;
1348 q->stops++;
1351 spin_unlock(&q->lock);
1352 wmb();
1353 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1354 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1358 * Send a management message through control queue 0
1360 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1362 int ret;
1363 local_bh_disable();
1364 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1365 local_bh_enable();
1367 return ret;
1371 * deferred_unmap_destructor - unmap a packet when it is freed
1372 * @skb: the packet
1374 * This is the packet destructor used for Tx packets that need to remain
1375 * mapped until they are freed rather than until their Tx descriptors are
1376 * freed.
1378 static void deferred_unmap_destructor(struct sk_buff *skb)
1380 int i;
1381 const dma_addr_t *p;
1382 const struct skb_shared_info *si;
1383 const struct deferred_unmap_info *dui;
1385 dui = (struct deferred_unmap_info *)skb->head;
1386 p = dui->addr;
1388 if (skb->tail - skb->transport_header)
1389 pci_unmap_single(dui->pdev, *p++,
1390 skb->tail - skb->transport_header,
1391 PCI_DMA_TODEVICE);
1393 si = skb_shinfo(skb);
1394 for (i = 0; i < si->nr_frags; i++)
1395 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1396 PCI_DMA_TODEVICE);
1399 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1400 const struct sg_ent *sgl, int sgl_flits)
1402 dma_addr_t *p;
1403 struct deferred_unmap_info *dui;
1405 dui = (struct deferred_unmap_info *)skb->head;
1406 dui->pdev = pdev;
1407 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1408 *p++ = be64_to_cpu(sgl->addr[0]);
1409 *p++ = be64_to_cpu(sgl->addr[1]);
1411 if (sgl_flits)
1412 *p = be64_to_cpu(sgl->addr[0]);
1416 * write_ofld_wr - write an offload work request
1417 * @adap: the adapter
1418 * @skb: the packet to send
1419 * @q: the Tx queue
1420 * @pidx: index of the first Tx descriptor to write
1421 * @gen: the generation value to use
1422 * @ndesc: number of descriptors the packet will occupy
1424 * Write an offload work request to send the supplied packet. The packet
1425 * data already carry the work request with most fields populated.
1427 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1428 struct sge_txq *q, unsigned int pidx,
1429 unsigned int gen, unsigned int ndesc)
1431 unsigned int sgl_flits, flits;
1432 struct work_request_hdr *from;
1433 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1434 struct tx_desc *d = &q->desc[pidx];
1436 if (immediate(skb)) {
1437 q->sdesc[pidx].skb = NULL;
1438 write_imm(d, skb, skb->len, gen);
1439 return;
1442 /* Only TX_DATA builds SGLs */
1444 from = (struct work_request_hdr *)skb->data;
1445 memcpy(&d->flit[1], &from[1],
1446 skb_transport_offset(skb) - sizeof(*from));
1448 flits = skb_transport_offset(skb) / 8;
1449 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1450 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1451 skb->tail - skb->transport_header,
1452 adap->pdev);
1453 if (need_skb_unmap()) {
1454 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1455 skb->destructor = deferred_unmap_destructor;
1458 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1459 gen, from->wr_hi, from->wr_lo);
1463 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1464 * @skb: the packet
1466 * Returns the number of Tx descriptors needed for the given offload
1467 * packet. These packets are already fully constructed.
1469 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1471 unsigned int flits, cnt;
1473 if (skb->len <= WR_LEN)
1474 return 1; /* packet fits as immediate data */
1476 flits = skb_transport_offset(skb) / 8; /* headers */
1477 cnt = skb_shinfo(skb)->nr_frags;
1478 if (skb->tail != skb->transport_header)
1479 cnt++;
1480 return flits_to_desc(flits + sgl_len(cnt));
1484 * ofld_xmit - send a packet through an offload queue
1485 * @adap: the adapter
1486 * @q: the Tx offload queue
1487 * @skb: the packet
1489 * Send an offload packet through an SGE offload queue.
1491 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1492 struct sk_buff *skb)
1494 int ret;
1495 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1497 spin_lock(&q->lock);
1498 again:reclaim_completed_tx(adap, q);
1500 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1501 if (unlikely(ret)) {
1502 if (ret == 1) {
1503 skb->priority = ndesc; /* save for restart */
1504 spin_unlock(&q->lock);
1505 return NET_XMIT_CN;
1507 goto again;
1510 gen = q->gen;
1511 q->in_use += ndesc;
1512 pidx = q->pidx;
1513 q->pidx += ndesc;
1514 if (q->pidx >= q->size) {
1515 q->pidx -= q->size;
1516 q->gen ^= 1;
1518 spin_unlock(&q->lock);
1520 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1521 check_ring_tx_db(adap, q);
1522 return NET_XMIT_SUCCESS;
1526 * restart_offloadq - restart a suspended offload queue
1527 * @qs: the queue set cotaining the offload queue
1529 * Resumes transmission on a suspended Tx offload queue.
1531 static void restart_offloadq(unsigned long data)
1533 struct sk_buff *skb;
1534 struct sge_qset *qs = (struct sge_qset *)data;
1535 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1536 const struct port_info *pi = netdev_priv(qs->netdev);
1537 struct adapter *adap = pi->adapter;
1539 spin_lock(&q->lock);
1540 again:reclaim_completed_tx(adap, q);
1542 while ((skb = skb_peek(&q->sendq)) != NULL) {
1543 unsigned int gen, pidx;
1544 unsigned int ndesc = skb->priority;
1546 if (unlikely(q->size - q->in_use < ndesc)) {
1547 set_bit(TXQ_OFLD, &qs->txq_stopped);
1548 smp_mb__after_clear_bit();
1550 if (should_restart_tx(q) &&
1551 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1552 goto again;
1553 q->stops++;
1554 break;
1557 gen = q->gen;
1558 q->in_use += ndesc;
1559 pidx = q->pidx;
1560 q->pidx += ndesc;
1561 if (q->pidx >= q->size) {
1562 q->pidx -= q->size;
1563 q->gen ^= 1;
1565 __skb_unlink(skb, &q->sendq);
1566 spin_unlock(&q->lock);
1568 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1569 spin_lock(&q->lock);
1571 spin_unlock(&q->lock);
1573 #if USE_GTS
1574 set_bit(TXQ_RUNNING, &q->flags);
1575 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1576 #endif
1577 wmb();
1578 t3_write_reg(adap, A_SG_KDOORBELL,
1579 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1583 * queue_set - return the queue set a packet should use
1584 * @skb: the packet
1586 * Maps a packet to the SGE queue set it should use. The desired queue
1587 * set is carried in bits 1-3 in the packet's priority.
1589 static inline int queue_set(const struct sk_buff *skb)
1591 return skb->priority >> 1;
1595 * is_ctrl_pkt - return whether an offload packet is a control packet
1596 * @skb: the packet
1598 * Determines whether an offload packet should use an OFLD or a CTRL
1599 * Tx queue. This is indicated by bit 0 in the packet's priority.
1601 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1603 return skb->priority & 1;
1607 * t3_offload_tx - send an offload packet
1608 * @tdev: the offload device to send to
1609 * @skb: the packet
1611 * Sends an offload packet. We use the packet priority to select the
1612 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1613 * should be sent as regular or control, bits 1-3 select the queue set.
1615 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1617 struct adapter *adap = tdev2adap(tdev);
1618 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1620 if (unlikely(is_ctrl_pkt(skb)))
1621 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1623 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1627 * offload_enqueue - add an offload packet to an SGE offload receive queue
1628 * @q: the SGE response queue
1629 * @skb: the packet
1631 * Add a new offload packet to an SGE response queue's offload packet
1632 * queue. If the packet is the first on the queue it schedules the RX
1633 * softirq to process the queue.
1635 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1637 skb->next = skb->prev = NULL;
1638 if (q->rx_tail)
1639 q->rx_tail->next = skb;
1640 else {
1641 struct sge_qset *qs = rspq_to_qset(q);
1643 napi_schedule(&qs->napi);
1644 q->rx_head = skb;
1646 q->rx_tail = skb;
1650 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1651 * @tdev: the offload device that will be receiving the packets
1652 * @q: the SGE response queue that assembled the bundle
1653 * @skbs: the partial bundle
1654 * @n: the number of packets in the bundle
1656 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1658 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1659 struct sge_rspq *q,
1660 struct sk_buff *skbs[], int n)
1662 if (n) {
1663 q->offload_bundles++;
1664 tdev->recv(tdev, skbs, n);
1669 * ofld_poll - NAPI handler for offload packets in interrupt mode
1670 * @dev: the network device doing the polling
1671 * @budget: polling budget
1673 * The NAPI handler for offload packets when a response queue is serviced
1674 * by the hard interrupt handler, i.e., when it's operating in non-polling
1675 * mode. Creates small packet batches and sends them through the offload
1676 * receive handler. Batches need to be of modest size as we do prefetches
1677 * on the packets in each.
1679 static int ofld_poll(struct napi_struct *napi, int budget)
1681 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1682 struct sge_rspq *q = &qs->rspq;
1683 struct adapter *adapter = qs->adap;
1684 int work_done = 0;
1686 while (work_done < budget) {
1687 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1688 int ngathered;
1690 spin_lock_irq(&q->lock);
1691 head = q->rx_head;
1692 if (!head) {
1693 napi_complete(napi);
1694 spin_unlock_irq(&q->lock);
1695 return work_done;
1698 tail = q->rx_tail;
1699 q->rx_head = q->rx_tail = NULL;
1700 spin_unlock_irq(&q->lock);
1702 for (ngathered = 0; work_done < budget && head; work_done++) {
1703 prefetch(head->data);
1704 skbs[ngathered] = head;
1705 head = head->next;
1706 skbs[ngathered]->next = NULL;
1707 if (++ngathered == RX_BUNDLE_SIZE) {
1708 q->offload_bundles++;
1709 adapter->tdev.recv(&adapter->tdev, skbs,
1710 ngathered);
1711 ngathered = 0;
1714 if (head) { /* splice remaining packets back onto Rx queue */
1715 spin_lock_irq(&q->lock);
1716 tail->next = q->rx_head;
1717 if (!q->rx_head)
1718 q->rx_tail = tail;
1719 q->rx_head = head;
1720 spin_unlock_irq(&q->lock);
1722 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1725 return work_done;
1729 * rx_offload - process a received offload packet
1730 * @tdev: the offload device receiving the packet
1731 * @rq: the response queue that received the packet
1732 * @skb: the packet
1733 * @rx_gather: a gather list of packets if we are building a bundle
1734 * @gather_idx: index of the next available slot in the bundle
1736 * Process an ingress offload pakcet and add it to the offload ingress
1737 * queue. Returns the index of the next available slot in the bundle.
1739 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1740 struct sk_buff *skb, struct sk_buff *rx_gather[],
1741 unsigned int gather_idx)
1743 skb_reset_mac_header(skb);
1744 skb_reset_network_header(skb);
1745 skb_reset_transport_header(skb);
1747 if (rq->polling) {
1748 rx_gather[gather_idx++] = skb;
1749 if (gather_idx == RX_BUNDLE_SIZE) {
1750 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1751 gather_idx = 0;
1752 rq->offload_bundles++;
1754 } else
1755 offload_enqueue(rq, skb);
1757 return gather_idx;
1761 * restart_tx - check whether to restart suspended Tx queues
1762 * @qs: the queue set to resume
1764 * Restarts suspended Tx queues of an SGE queue set if they have enough
1765 * free resources to resume operation.
1767 static void restart_tx(struct sge_qset *qs)
1769 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1770 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1771 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1772 qs->txq[TXQ_ETH].restarts++;
1773 if (netif_running(qs->netdev))
1774 netif_wake_queue(qs->netdev);
1777 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1778 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1779 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1780 qs->txq[TXQ_OFLD].restarts++;
1781 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1783 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1784 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1785 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1786 qs->txq[TXQ_CTRL].restarts++;
1787 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1792 * rx_eth - process an ingress ethernet packet
1793 * @adap: the adapter
1794 * @rq: the response queue that received the packet
1795 * @skb: the packet
1796 * @pad: amount of padding at the start of the buffer
1798 * Process an ingress ethernet pakcet and deliver it to the stack.
1799 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1800 * if it was immediate data in a response.
1802 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1803 struct sk_buff *skb, int pad)
1805 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1806 struct port_info *pi;
1808 skb_pull(skb, sizeof(*p) + pad);
1809 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1810 skb->dev->last_rx = jiffies;
1811 pi = netdev_priv(skb->dev);
1812 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
1813 !p->fragment) {
1814 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1815 skb->ip_summed = CHECKSUM_UNNECESSARY;
1816 } else
1817 skb->ip_summed = CHECKSUM_NONE;
1819 if (unlikely(p->vlan_valid)) {
1820 struct vlan_group *grp = pi->vlan_grp;
1822 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1823 if (likely(grp))
1824 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1825 rq->polling);
1826 else
1827 dev_kfree_skb_any(skb);
1828 } else if (rq->polling)
1829 netif_receive_skb(skb);
1830 else
1831 netif_rx(skb);
1835 * handle_rsp_cntrl_info - handles control information in a response
1836 * @qs: the queue set corresponding to the response
1837 * @flags: the response control flags
1839 * Handles the control information of an SGE response, such as GTS
1840 * indications and completion credits for the queue set's Tx queues.
1841 * HW coalesces credits, we don't do any extra SW coalescing.
1843 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1845 unsigned int credits;
1847 #if USE_GTS
1848 if (flags & F_RSPD_TXQ0_GTS)
1849 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1850 #endif
1852 credits = G_RSPD_TXQ0_CR(flags);
1853 if (credits)
1854 qs->txq[TXQ_ETH].processed += credits;
1856 credits = G_RSPD_TXQ2_CR(flags);
1857 if (credits)
1858 qs->txq[TXQ_CTRL].processed += credits;
1860 # if USE_GTS
1861 if (flags & F_RSPD_TXQ1_GTS)
1862 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1863 # endif
1864 credits = G_RSPD_TXQ1_CR(flags);
1865 if (credits)
1866 qs->txq[TXQ_OFLD].processed += credits;
1870 * check_ring_db - check if we need to ring any doorbells
1871 * @adapter: the adapter
1872 * @qs: the queue set whose Tx queues are to be examined
1873 * @sleeping: indicates which Tx queue sent GTS
1875 * Checks if some of a queue set's Tx queues need to ring their doorbells
1876 * to resume transmission after idling while they still have unprocessed
1877 * descriptors.
1879 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1880 unsigned int sleeping)
1882 if (sleeping & F_RSPD_TXQ0_GTS) {
1883 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1885 if (txq->cleaned + txq->in_use != txq->processed &&
1886 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1887 set_bit(TXQ_RUNNING, &txq->flags);
1888 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1889 V_EGRCNTX(txq->cntxt_id));
1893 if (sleeping & F_RSPD_TXQ1_GTS) {
1894 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1896 if (txq->cleaned + txq->in_use != txq->processed &&
1897 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1898 set_bit(TXQ_RUNNING, &txq->flags);
1899 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1900 V_EGRCNTX(txq->cntxt_id));
1906 * is_new_response - check if a response is newly written
1907 * @r: the response descriptor
1908 * @q: the response queue
1910 * Returns true if a response descriptor contains a yet unprocessed
1911 * response.
1913 static inline int is_new_response(const struct rsp_desc *r,
1914 const struct sge_rspq *q)
1916 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1919 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1920 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1921 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1922 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1923 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1925 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1926 #define NOMEM_INTR_DELAY 2500
1929 * process_responses - process responses from an SGE response queue
1930 * @adap: the adapter
1931 * @qs: the queue set to which the response queue belongs
1932 * @budget: how many responses can be processed in this round
1934 * Process responses from an SGE response queue up to the supplied budget.
1935 * Responses include received packets as well as credits and other events
1936 * for the queues that belong to the response queue's queue set.
1937 * A negative budget is effectively unlimited.
1939 * Additionally choose the interrupt holdoff time for the next interrupt
1940 * on this queue. If the system is under memory shortage use a fairly
1941 * long delay to help recovery.
1943 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1944 int budget)
1946 struct sge_rspq *q = &qs->rspq;
1947 struct rsp_desc *r = &q->desc[q->cidx];
1948 int budget_left = budget;
1949 unsigned int sleeping = 0;
1950 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1951 int ngathered = 0;
1953 q->next_holdoff = q->holdoff_tmr;
1955 while (likely(budget_left && is_new_response(r, q))) {
1956 int eth, ethpad = 2;
1957 struct sk_buff *skb = NULL;
1958 u32 len, flags = ntohl(r->flags);
1959 __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1961 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1963 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1964 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1965 if (!skb)
1966 goto no_mem;
1968 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1969 skb->data[0] = CPL_ASYNC_NOTIF;
1970 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1971 q->async_notif++;
1972 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1973 skb = get_imm_packet(r);
1974 if (unlikely(!skb)) {
1975 no_mem:
1976 q->next_holdoff = NOMEM_INTR_DELAY;
1977 q->nomem++;
1978 /* consume one credit since we tried */
1979 budget_left--;
1980 break;
1982 q->imm_data++;
1983 ethpad = 0;
1984 } else if ((len = ntohl(r->len_cq)) != 0) {
1985 struct sge_fl *fl;
1987 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1988 if (fl->use_pages) {
1989 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
1991 prefetch(addr);
1992 #if L1_CACHE_BYTES < 128
1993 prefetch(addr + L1_CACHE_BYTES);
1994 #endif
1995 __refill_fl(adap, fl);
1997 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
1998 eth ? SGE_RX_DROP_THRES : 0);
1999 } else
2000 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2001 eth ? SGE_RX_DROP_THRES : 0);
2002 if (unlikely(!skb)) {
2003 if (!eth)
2004 goto no_mem;
2005 q->rx_drops++;
2006 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2007 __skb_pull(skb, 2);
2009 if (++fl->cidx == fl->size)
2010 fl->cidx = 0;
2011 } else
2012 q->pure_rsps++;
2014 if (flags & RSPD_CTRL_MASK) {
2015 sleeping |= flags & RSPD_GTS_MASK;
2016 handle_rsp_cntrl_info(qs, flags);
2019 r++;
2020 if (unlikely(++q->cidx == q->size)) {
2021 q->cidx = 0;
2022 q->gen ^= 1;
2023 r = q->desc;
2025 prefetch(r);
2027 if (++q->credits >= (q->size / 4)) {
2028 refill_rspq(adap, q, q->credits);
2029 q->credits = 0;
2032 if (likely(skb != NULL)) {
2033 if (eth)
2034 rx_eth(adap, q, skb, ethpad);
2035 else {
2036 q->offload_pkts++;
2037 /* Preserve the RSS info in csum & priority */
2038 skb->csum = rss_hi;
2039 skb->priority = rss_lo;
2040 ngathered = rx_offload(&adap->tdev, q, skb,
2041 offload_skbs,
2042 ngathered);
2045 --budget_left;
2048 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2049 if (sleeping)
2050 check_ring_db(adap, qs, sleeping);
2052 smp_mb(); /* commit Tx queue .processed updates */
2053 if (unlikely(qs->txq_stopped != 0))
2054 restart_tx(qs);
2056 budget -= budget_left;
2057 return budget;
2060 static inline int is_pure_response(const struct rsp_desc *r)
2062 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2064 return (n | r->len_cq) == 0;
2068 * napi_rx_handler - the NAPI handler for Rx processing
2069 * @napi: the napi instance
2070 * @budget: how many packets we can process in this round
2072 * Handler for new data events when using NAPI.
2074 static int napi_rx_handler(struct napi_struct *napi, int budget)
2076 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2077 struct adapter *adap = qs->adap;
2078 int work_done = process_responses(adap, qs, budget);
2080 if (likely(work_done < budget)) {
2081 napi_complete(napi);
2084 * Because we don't atomically flush the following
2085 * write it is possible that in very rare cases it can
2086 * reach the device in a way that races with a new
2087 * response being written plus an error interrupt
2088 * causing the NAPI interrupt handler below to return
2089 * unhandled status to the OS. To protect against
2090 * this would require flushing the write and doing
2091 * both the write and the flush with interrupts off.
2092 * Way too expensive and unjustifiable given the
2093 * rarity of the race.
2095 * The race cannot happen at all with MSI-X.
2097 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2098 V_NEWTIMER(qs->rspq.next_holdoff) |
2099 V_NEWINDEX(qs->rspq.cidx));
2101 return work_done;
2105 * Returns true if the device is already scheduled for polling.
2107 static inline int napi_is_scheduled(struct napi_struct *napi)
2109 return test_bit(NAPI_STATE_SCHED, &napi->state);
2113 * process_pure_responses - process pure responses from a response queue
2114 * @adap: the adapter
2115 * @qs: the queue set owning the response queue
2116 * @r: the first pure response to process
2118 * A simpler version of process_responses() that handles only pure (i.e.,
2119 * non data-carrying) responses. Such respones are too light-weight to
2120 * justify calling a softirq under NAPI, so we handle them specially in
2121 * the interrupt handler. The function is called with a pointer to a
2122 * response, which the caller must ensure is a valid pure response.
2124 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2126 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2127 struct rsp_desc *r)
2129 struct sge_rspq *q = &qs->rspq;
2130 unsigned int sleeping = 0;
2132 do {
2133 u32 flags = ntohl(r->flags);
2135 r++;
2136 if (unlikely(++q->cidx == q->size)) {
2137 q->cidx = 0;
2138 q->gen ^= 1;
2139 r = q->desc;
2141 prefetch(r);
2143 if (flags & RSPD_CTRL_MASK) {
2144 sleeping |= flags & RSPD_GTS_MASK;
2145 handle_rsp_cntrl_info(qs, flags);
2148 q->pure_rsps++;
2149 if (++q->credits >= (q->size / 4)) {
2150 refill_rspq(adap, q, q->credits);
2151 q->credits = 0;
2153 } while (is_new_response(r, q) && is_pure_response(r));
2155 if (sleeping)
2156 check_ring_db(adap, qs, sleeping);
2158 smp_mb(); /* commit Tx queue .processed updates */
2159 if (unlikely(qs->txq_stopped != 0))
2160 restart_tx(qs);
2162 return is_new_response(r, q);
2166 * handle_responses - decide what to do with new responses in NAPI mode
2167 * @adap: the adapter
2168 * @q: the response queue
2170 * This is used by the NAPI interrupt handlers to decide what to do with
2171 * new SGE responses. If there are no new responses it returns -1. If
2172 * there are new responses and they are pure (i.e., non-data carrying)
2173 * it handles them straight in hard interrupt context as they are very
2174 * cheap and don't deliver any packets. Finally, if there are any data
2175 * signaling responses it schedules the NAPI handler. Returns 1 if it
2176 * schedules NAPI, 0 if all new responses were pure.
2178 * The caller must ascertain NAPI is not already running.
2180 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2182 struct sge_qset *qs = rspq_to_qset(q);
2183 struct rsp_desc *r = &q->desc[q->cidx];
2185 if (!is_new_response(r, q))
2186 return -1;
2187 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2188 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2189 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2190 return 0;
2192 napi_schedule(&qs->napi);
2193 return 1;
2197 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2198 * (i.e., response queue serviced in hard interrupt).
2200 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2202 struct sge_qset *qs = cookie;
2203 struct adapter *adap = qs->adap;
2204 struct sge_rspq *q = &qs->rspq;
2206 spin_lock(&q->lock);
2207 if (process_responses(adap, qs, -1) == 0)
2208 q->unhandled_irqs++;
2209 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2210 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2211 spin_unlock(&q->lock);
2212 return IRQ_HANDLED;
2216 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2217 * (i.e., response queue serviced by NAPI polling).
2219 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2221 struct sge_qset *qs = cookie;
2222 struct sge_rspq *q = &qs->rspq;
2224 spin_lock(&q->lock);
2226 if (handle_responses(qs->adap, q) < 0)
2227 q->unhandled_irqs++;
2228 spin_unlock(&q->lock);
2229 return IRQ_HANDLED;
2233 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2234 * SGE response queues as well as error and other async events as they all use
2235 * the same MSI vector. We use one SGE response queue per port in this mode
2236 * and protect all response queues with queue 0's lock.
2238 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2240 int new_packets = 0;
2241 struct adapter *adap = cookie;
2242 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2244 spin_lock(&q->lock);
2246 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2247 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2248 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2249 new_packets = 1;
2252 if (adap->params.nports == 2 &&
2253 process_responses(adap, &adap->sge.qs[1], -1)) {
2254 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2256 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2257 V_NEWTIMER(q1->next_holdoff) |
2258 V_NEWINDEX(q1->cidx));
2259 new_packets = 1;
2262 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2263 q->unhandled_irqs++;
2265 spin_unlock(&q->lock);
2266 return IRQ_HANDLED;
2269 static int rspq_check_napi(struct sge_qset *qs)
2271 struct sge_rspq *q = &qs->rspq;
2273 if (!napi_is_scheduled(&qs->napi) &&
2274 is_new_response(&q->desc[q->cidx], q)) {
2275 napi_schedule(&qs->napi);
2276 return 1;
2278 return 0;
2282 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2283 * by NAPI polling). Handles data events from SGE response queues as well as
2284 * error and other async events as they all use the same MSI vector. We use
2285 * one SGE response queue per port in this mode and protect all response
2286 * queues with queue 0's lock.
2288 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2290 int new_packets;
2291 struct adapter *adap = cookie;
2292 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2294 spin_lock(&q->lock);
2296 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2297 if (adap->params.nports == 2)
2298 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2299 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2300 q->unhandled_irqs++;
2302 spin_unlock(&q->lock);
2303 return IRQ_HANDLED;
2307 * A helper function that processes responses and issues GTS.
2309 static inline int process_responses_gts(struct adapter *adap,
2310 struct sge_rspq *rq)
2312 int work;
2314 work = process_responses(adap, rspq_to_qset(rq), -1);
2315 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2316 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2317 return work;
2321 * The legacy INTx interrupt handler. This needs to handle data events from
2322 * SGE response queues as well as error and other async events as they all use
2323 * the same interrupt pin. We use one SGE response queue per port in this mode
2324 * and protect all response queues with queue 0's lock.
2326 static irqreturn_t t3_intr(int irq, void *cookie)
2328 int work_done, w0, w1;
2329 struct adapter *adap = cookie;
2330 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2331 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2333 spin_lock(&q0->lock);
2335 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2336 w1 = adap->params.nports == 2 &&
2337 is_new_response(&q1->desc[q1->cidx], q1);
2339 if (likely(w0 | w1)) {
2340 t3_write_reg(adap, A_PL_CLI, 0);
2341 t3_read_reg(adap, A_PL_CLI); /* flush */
2343 if (likely(w0))
2344 process_responses_gts(adap, q0);
2346 if (w1)
2347 process_responses_gts(adap, q1);
2349 work_done = w0 | w1;
2350 } else
2351 work_done = t3_slow_intr_handler(adap);
2353 spin_unlock(&q0->lock);
2354 return IRQ_RETVAL(work_done != 0);
2358 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2359 * Handles data events from SGE response queues as well as error and other
2360 * async events as they all use the same interrupt pin. We use one SGE
2361 * response queue per port in this mode and protect all response queues with
2362 * queue 0's lock.
2364 static irqreturn_t t3b_intr(int irq, void *cookie)
2366 u32 map;
2367 struct adapter *adap = cookie;
2368 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2370 t3_write_reg(adap, A_PL_CLI, 0);
2371 map = t3_read_reg(adap, A_SG_DATA_INTR);
2373 if (unlikely(!map)) /* shared interrupt, most likely */
2374 return IRQ_NONE;
2376 spin_lock(&q0->lock);
2378 if (unlikely(map & F_ERRINTR))
2379 t3_slow_intr_handler(adap);
2381 if (likely(map & 1))
2382 process_responses_gts(adap, q0);
2384 if (map & 2)
2385 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2387 spin_unlock(&q0->lock);
2388 return IRQ_HANDLED;
2392 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2393 * Handles data events from SGE response queues as well as error and other
2394 * async events as they all use the same interrupt pin. We use one SGE
2395 * response queue per port in this mode and protect all response queues with
2396 * queue 0's lock.
2398 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2400 u32 map;
2401 struct adapter *adap = cookie;
2402 struct sge_qset *qs0 = &adap->sge.qs[0];
2403 struct sge_rspq *q0 = &qs0->rspq;
2405 t3_write_reg(adap, A_PL_CLI, 0);
2406 map = t3_read_reg(adap, A_SG_DATA_INTR);
2408 if (unlikely(!map)) /* shared interrupt, most likely */
2409 return IRQ_NONE;
2411 spin_lock(&q0->lock);
2413 if (unlikely(map & F_ERRINTR))
2414 t3_slow_intr_handler(adap);
2416 if (likely(map & 1))
2417 napi_schedule(&qs0->napi);
2419 if (map & 2)
2420 napi_schedule(&adap->sge.qs[1].napi);
2422 spin_unlock(&q0->lock);
2423 return IRQ_HANDLED;
2427 * t3_intr_handler - select the top-level interrupt handler
2428 * @adap: the adapter
2429 * @polling: whether using NAPI to service response queues
2431 * Selects the top-level interrupt handler based on the type of interrupts
2432 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2433 * response queues.
2435 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2437 if (adap->flags & USING_MSIX)
2438 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2439 if (adap->flags & USING_MSI)
2440 return polling ? t3_intr_msi_napi : t3_intr_msi;
2441 if (adap->params.rev > 0)
2442 return polling ? t3b_intr_napi : t3b_intr;
2443 return t3_intr;
2446 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2447 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2448 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2449 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2450 F_HIRCQPARITYERROR)
2451 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2452 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2453 F_RSPQDISABLED)
2456 * t3_sge_err_intr_handler - SGE async event interrupt handler
2457 * @adapter: the adapter
2459 * Interrupt handler for SGE asynchronous (non-data) events.
2461 void t3_sge_err_intr_handler(struct adapter *adapter)
2463 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2465 if (status & SGE_PARERR)
2466 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2467 status & SGE_PARERR);
2468 if (status & SGE_FRAMINGERR)
2469 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2470 status & SGE_FRAMINGERR);
2472 if (status & F_RSPQCREDITOVERFOW)
2473 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2475 if (status & F_RSPQDISABLED) {
2476 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2478 CH_ALERT(adapter,
2479 "packet delivered to disabled response queue "
2480 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2483 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2484 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2485 status & F_HIPIODRBDROPERR ? "high" : "lo");
2487 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2488 if (status & SGE_FATALERR)
2489 t3_fatal_err(adapter);
2493 * sge_timer_cb - perform periodic maintenance of an SGE qset
2494 * @data: the SGE queue set to maintain
2496 * Runs periodically from a timer to perform maintenance of an SGE queue
2497 * set. It performs two tasks:
2499 * a) Cleans up any completed Tx descriptors that may still be pending.
2500 * Normal descriptor cleanup happens when new packets are added to a Tx
2501 * queue so this timer is relatively infrequent and does any cleanup only
2502 * if the Tx queue has not seen any new packets in a while. We make a
2503 * best effort attempt to reclaim descriptors, in that we don't wait
2504 * around if we cannot get a queue's lock (which most likely is because
2505 * someone else is queueing new packets and so will also handle the clean
2506 * up). Since control queues use immediate data exclusively we don't
2507 * bother cleaning them up here.
2509 * b) Replenishes Rx queues that have run out due to memory shortage.
2510 * Normally new Rx buffers are added when existing ones are consumed but
2511 * when out of memory a queue can become empty. We try to add only a few
2512 * buffers here, the queue will be replenished fully as these new buffers
2513 * are used up if memory shortage has subsided.
2515 static void sge_timer_cb(unsigned long data)
2517 spinlock_t *lock;
2518 struct sge_qset *qs = (struct sge_qset *)data;
2519 struct adapter *adap = qs->adap;
2521 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2522 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2523 spin_unlock(&qs->txq[TXQ_ETH].lock);
2525 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2526 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2527 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2529 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2530 &adap->sge.qs[0].rspq.lock;
2531 if (spin_trylock_irq(lock)) {
2532 if (!napi_is_scheduled(&qs->napi)) {
2533 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2535 if (qs->fl[0].credits < qs->fl[0].size)
2536 __refill_fl(adap, &qs->fl[0]);
2537 if (qs->fl[1].credits < qs->fl[1].size)
2538 __refill_fl(adap, &qs->fl[1]);
2540 if (status & (1 << qs->rspq.cntxt_id)) {
2541 qs->rspq.starved++;
2542 if (qs->rspq.credits) {
2543 refill_rspq(adap, &qs->rspq, 1);
2544 qs->rspq.credits--;
2545 qs->rspq.restarted++;
2546 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2547 1 << qs->rspq.cntxt_id);
2551 spin_unlock_irq(lock);
2553 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2557 * t3_update_qset_coalesce - update coalescing settings for a queue set
2558 * @qs: the SGE queue set
2559 * @p: new queue set parameters
2561 * Update the coalescing settings for an SGE queue set. Nothing is done
2562 * if the queue set is not initialized yet.
2564 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2566 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2567 qs->rspq.polling = p->polling;
2568 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2572 * t3_sge_alloc_qset - initialize an SGE queue set
2573 * @adapter: the adapter
2574 * @id: the queue set id
2575 * @nports: how many Ethernet ports will be using this queue set
2576 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2577 * @p: configuration parameters for this queue set
2578 * @ntxq: number of Tx queues for the queue set
2579 * @netdev: net device associated with this queue set
2581 * Allocate resources and initialize an SGE queue set. A queue set
2582 * comprises a response queue, two Rx free-buffer queues, and up to 3
2583 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2584 * queue, offload queue, and control queue.
2586 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2587 int irq_vec_idx, const struct qset_params *p,
2588 int ntxq, struct net_device *dev)
2590 int i, ret = -ENOMEM;
2591 struct sge_qset *q = &adapter->sge.qs[id];
2593 init_qset_cntxt(q, id);
2594 init_timer(&q->tx_reclaim_timer);
2595 q->tx_reclaim_timer.data = (unsigned long)q;
2596 q->tx_reclaim_timer.function = sge_timer_cb;
2598 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2599 sizeof(struct rx_desc),
2600 sizeof(struct rx_sw_desc),
2601 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2602 if (!q->fl[0].desc)
2603 goto err;
2605 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2606 sizeof(struct rx_desc),
2607 sizeof(struct rx_sw_desc),
2608 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2609 if (!q->fl[1].desc)
2610 goto err;
2612 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2613 sizeof(struct rsp_desc), 0,
2614 &q->rspq.phys_addr, NULL);
2615 if (!q->rspq.desc)
2616 goto err;
2618 for (i = 0; i < ntxq; ++i) {
2620 * The control queue always uses immediate data so does not
2621 * need to keep track of any sk_buffs.
2623 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2625 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2626 sizeof(struct tx_desc), sz,
2627 &q->txq[i].phys_addr,
2628 &q->txq[i].sdesc);
2629 if (!q->txq[i].desc)
2630 goto err;
2632 q->txq[i].gen = 1;
2633 q->txq[i].size = p->txq_size[i];
2634 spin_lock_init(&q->txq[i].lock);
2635 skb_queue_head_init(&q->txq[i].sendq);
2638 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2639 (unsigned long)q);
2640 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2641 (unsigned long)q);
2643 q->fl[0].gen = q->fl[1].gen = 1;
2644 q->fl[0].size = p->fl_size;
2645 q->fl[1].size = p->jumbo_size;
2647 q->rspq.gen = 1;
2648 q->rspq.size = p->rspq_size;
2649 spin_lock_init(&q->rspq.lock);
2651 q->txq[TXQ_ETH].stop_thres = nports *
2652 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2654 #if FL0_PG_CHUNK_SIZE > 0
2655 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2656 #else
2657 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2658 #endif
2659 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2660 q->fl[1].buf_size = is_offload(adapter) ?
2661 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2662 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2664 spin_lock(&adapter->sge.reg_lock);
2666 /* FL threshold comparison uses < */
2667 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2668 q->rspq.phys_addr, q->rspq.size,
2669 q->fl[0].buf_size, 1, 0);
2670 if (ret)
2671 goto err_unlock;
2673 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2674 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2675 q->fl[i].phys_addr, q->fl[i].size,
2676 q->fl[i].buf_size, p->cong_thres, 1,
2678 if (ret)
2679 goto err_unlock;
2682 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2683 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2684 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2685 1, 0);
2686 if (ret)
2687 goto err_unlock;
2689 if (ntxq > 1) {
2690 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2691 USE_GTS, SGE_CNTXT_OFLD, id,
2692 q->txq[TXQ_OFLD].phys_addr,
2693 q->txq[TXQ_OFLD].size, 0, 1, 0);
2694 if (ret)
2695 goto err_unlock;
2698 if (ntxq > 2) {
2699 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2700 SGE_CNTXT_CTRL, id,
2701 q->txq[TXQ_CTRL].phys_addr,
2702 q->txq[TXQ_CTRL].size,
2703 q->txq[TXQ_CTRL].token, 1, 0);
2704 if (ret)
2705 goto err_unlock;
2708 spin_unlock(&adapter->sge.reg_lock);
2710 q->adap = adapter;
2711 q->netdev = dev;
2712 t3_update_qset_coalesce(q, p);
2714 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2715 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2716 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2718 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2719 V_NEWTIMER(q->rspq.holdoff_tmr));
2721 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2722 return 0;
2724 err_unlock:
2725 spin_unlock(&adapter->sge.reg_lock);
2726 err:
2727 t3_free_qset(adapter, q);
2728 return ret;
2732 * t3_free_sge_resources - free SGE resources
2733 * @adap: the adapter
2735 * Frees resources used by the SGE queue sets.
2737 void t3_free_sge_resources(struct adapter *adap)
2739 int i;
2741 for (i = 0; i < SGE_QSETS; ++i)
2742 t3_free_qset(adap, &adap->sge.qs[i]);
2746 * t3_sge_start - enable SGE
2747 * @adap: the adapter
2749 * Enables the SGE for DMAs. This is the last step in starting packet
2750 * transfers.
2752 void t3_sge_start(struct adapter *adap)
2754 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2758 * t3_sge_stop - disable SGE operation
2759 * @adap: the adapter
2761 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2762 * from error interrupts) or from normal process context. In the latter
2763 * case it also disables any pending queue restart tasklets. Note that
2764 * if it is called in interrupt context it cannot disable the restart
2765 * tasklets as it cannot wait, however the tasklets will have no effect
2766 * since the doorbells are disabled and the driver will call this again
2767 * later from process context, at which time the tasklets will be stopped
2768 * if they are still running.
2770 void t3_sge_stop(struct adapter *adap)
2772 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2773 if (!in_interrupt()) {
2774 int i;
2776 for (i = 0; i < SGE_QSETS; ++i) {
2777 struct sge_qset *qs = &adap->sge.qs[i];
2779 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2780 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2786 * t3_sge_init - initialize SGE
2787 * @adap: the adapter
2788 * @p: the SGE parameters
2790 * Performs SGE initialization needed every time after a chip reset.
2791 * We do not initialize any of the queue sets here, instead the driver
2792 * top-level must request those individually. We also do not enable DMA
2793 * here, that should be done after the queues have been set up.
2795 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2797 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2799 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2800 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
2801 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2802 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2803 #if SGE_NUM_GENBITS == 1
2804 ctrl |= F_EGRGENCTRL;
2805 #endif
2806 if (adap->params.rev > 0) {
2807 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2808 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2810 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2811 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2812 V_LORCQDRBTHRSH(512));
2813 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2814 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2815 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2816 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
2817 adap->params.rev < T3_REV_C ? 1000 : 500);
2818 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2819 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2820 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2821 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2822 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2826 * t3_sge_prep - one-time SGE initialization
2827 * @adap: the associated adapter
2828 * @p: SGE parameters
2830 * Performs one-time initialization of SGE SW state. Includes determining
2831 * defaults for the assorted SGE parameters, which admins can change until
2832 * they are used to initialize the SGE.
2834 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
2836 int i;
2838 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2839 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2841 for (i = 0; i < SGE_QSETS; ++i) {
2842 struct qset_params *q = p->qset + i;
2844 q->polling = adap->params.rev > 0;
2845 q->coalesce_usecs = 5;
2846 q->rspq_size = 1024;
2847 q->fl_size = 1024;
2848 q->jumbo_size = 512;
2849 q->txq_size[TXQ_ETH] = 1024;
2850 q->txq_size[TXQ_OFLD] = 1024;
2851 q->txq_size[TXQ_CTRL] = 256;
2852 q->cong_thres = 0;
2855 spin_lock_init(&adap->sge.reg_lock);
2859 * t3_get_desc - dump an SGE descriptor for debugging purposes
2860 * @qs: the queue set
2861 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2862 * @idx: the descriptor index in the queue
2863 * @data: where to dump the descriptor contents
2865 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2866 * size of the descriptor.
2868 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2869 unsigned char *data)
2871 if (qnum >= 6)
2872 return -EINVAL;
2874 if (qnum < 3) {
2875 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2876 return -EINVAL;
2877 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2878 return sizeof(struct tx_desc);
2881 if (qnum == 3) {
2882 if (!qs->rspq.desc || idx >= qs->rspq.size)
2883 return -EINVAL;
2884 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2885 return sizeof(struct rsp_desc);
2888 qnum -= 4;
2889 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2890 return -EINVAL;
2891 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2892 return sizeof(struct rx_desc);