Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / pci / cxgb_sge.c
blobe22694f7cb33b3cc262f935e2eecbfa6399b633a
1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 #ifdef __NetBSD__
32 __KERNEL_RCSID(0, "$NetBSD: cxgb_sge.c,v 1.11 2009/01/03 03:43:22 yamt Exp $");
33 #endif
34 #ifdef __FreeBSD__
35 __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_sge.c,v 1.30 2007/09/09 04:34:03 kmacy Exp $");
36 #endif
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #ifdef __FreeBSD__
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #endif
45 #include <sys/conf.h>
46 #include <machine/bus.h>
47 #ifdef __FreeBSD__
48 #include <machine/resource.h>
49 #include <sys/bus_dma.h>
50 #include <sys/rman.h>
51 #endif
52 #include <sys/queue.h>
53 #include <sys/sysctl.h>
54 #ifdef __FreeBSD__
55 #include <sys/taskqueue.h>
56 #endif
58 #include <sys/proc.h>
59 #include <sys/sched.h>
60 #ifdef __FreeBSD__
61 #include <sys/smp.h>
62 #endif
63 #include <sys/systm.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/tcp.h>
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
73 #ifdef CONFIG_DEFINED
74 #include <cxgb_include.h>
75 #else
76 #ifdef __FreeBSD__
77 #include <dev/cxgb/cxgb_include.h>
78 #endif
79 #ifdef __NetBSD__
80 #include <dev/pci/cxgb_include.h>
81 #endif
82 #endif
84 uint32_t collapse_free = 0;
85 uint32_t mb_free_vec_free = 0;
86 int txq_fills = 0;
87 int collapse_mbufs = 0;
88 static int bogus_imm = 0;
89 #ifndef DISABLE_MBUF_IOVEC
90 static int recycle_enable = 1;
91 #endif
93 #define USE_GTS 0
95 #define SGE_RX_SM_BUF_SIZE 1536
96 #define SGE_RX_DROP_THRES 16
97 #define SGE_RX_COPY_THRES 128
100 * Period of the Tx buffer reclaim timer. This timer does not need to run
101 * frequently as Tx buffers are usually reclaimed by new Tx packets.
103 #define TX_RECLAIM_PERIOD (hz >> 1)
106 * work request size in bytes
108 #define WR_LEN (WR_FLITS * 8)
111 * Values for sge_txq.flags
113 enum {
114 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
115 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
118 struct tx_desc {
119 uint64_t flit[TX_DESC_FLITS];
120 } __packed;
122 struct rx_desc {
123 uint32_t addr_lo;
124 uint32_t len_gen;
125 uint32_t gen2;
126 uint32_t addr_hi;
127 } __packed;
129 struct rsp_desc { /* response queue descriptor */
130 struct rss_header rss_hdr;
131 uint32_t flags;
132 uint32_t len_cq;
133 uint8_t imm_data[47];
134 uint8_t intr_gen;
135 } __packed;
137 #define RX_SW_DESC_MAP_CREATED (1 << 0)
138 #define TX_SW_DESC_MAP_CREATED (1 << 1)
139 #define RX_SW_DESC_INUSE (1 << 3)
140 #define TX_SW_DESC_MAPPED (1 << 4)
142 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
143 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
144 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
145 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
147 struct tx_sw_desc { /* SW state per Tx descriptor */
148 struct mbuf *m;
149 #ifdef __NetBSD__
150 bus_dma_segment_t segs[1];
151 #endif
152 bus_dmamap_t map;
153 int flags;
156 struct rx_sw_desc { /* SW state per Rx descriptor */
157 void *cl;
158 bus_dmamap_t map;
159 int flags;
162 struct txq_state {
163 unsigned int compl;
164 unsigned int gen;
165 unsigned int pidx;
168 #ifdef __FreeBSD__
169 struct refill_fl_cb_arg {
170 int error;
171 bus_dma_segment_t seg;
172 int nseg;
174 #endif
177 * Maps a number of flits to the number of Tx descriptors that can hold them.
178 * The formula is
180 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
182 * HW allows up to 4 descriptors to be combined into a WR.
184 static uint8_t flit_desc_map[] = {
186 #if SGE_NUM_GENBITS == 1
187 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
188 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
189 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
190 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
191 #elif SGE_NUM_GENBITS == 2
192 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
193 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
194 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
195 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
196 #else
197 # error "SGE_NUM_GENBITS must be 1 or 2"
198 #endif
202 static int lro_default = 0;
203 int cxgb_debug = 0;
205 static void t3_free_qset(adapter_t *sc, struct sge_qset *q);
206 static void sge_timer_cb(void *arg);
207 #ifdef __FreeBSD__
208 static void sge_timer_reclaim(void *arg, int ncount);
209 static void sge_txq_reclaim_handler(void *arg, int ncount);
210 #endif
211 #ifdef __NetBSD__
212 static void sge_timer_reclaim(struct work *wk, void *arg);
213 static void sge_txq_reclaim_handler(struct work *wk, void *arg);
214 #endif
215 static int free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec);
218 * reclaim_completed_tx - reclaims completed Tx descriptors
219 * @adapter: the adapter
220 * @q: the Tx queue to reclaim completed descriptors from
222 * Reclaims Tx descriptors that the SGE has indicated it has processed,
223 * and frees the associated buffers if possible. Called with the Tx
224 * queue's lock held.
226 static __inline int
227 reclaim_completed_tx(struct sge_txq *q, int nbufs, struct mbuf **mvec)
229 int reclaimed, reclaim = desc_reclaimable(q);
230 int n = 0;
232 mtx_assert(&q->lock, MA_OWNED);
233 if (reclaim > 0) {
234 n = free_tx_desc(q, min(reclaim, nbufs), mvec);
235 reclaimed = min(reclaim, nbufs);
236 q->cleaned += reclaimed;
237 q->in_use -= reclaimed;
239 return (n);
243 * should_restart_tx - are there enough resources to restart a Tx queue?
244 * @q: the Tx queue
246 * Checks if there are enough descriptors to restart a suspended Tx queue.
248 static __inline int
249 should_restart_tx(const struct sge_txq *q)
251 unsigned int r = q->processed - q->cleaned;
253 return q->in_use - r < (q->size >> 1);
257 * t3_sge_init - initialize SGE
258 * @adap: the adapter
259 * @p: the SGE parameters
261 * Performs SGE initialization needed every time after a chip reset.
262 * We do not initialize any of the queue sets here, instead the driver
263 * top-level must request those individually. We also do not enable DMA
264 * here, that should be done after the queues have been set up.
266 void
267 t3_sge_init(adapter_t *adap, struct sge_params *p)
269 u_int ctrl, ups;
271 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
273 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
274 F_CQCRDTCTRL |
275 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
276 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
277 #if SGE_NUM_GENBITS == 1
278 ctrl |= F_EGRGENCTRL;
279 #endif
280 if (adap->params.rev > 0) {
281 if (!(adap->flags & (USING_MSIX | USING_MSI)))
282 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
283 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
285 t3_write_reg(adap, A_SG_CONTROL, ctrl);
286 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
287 V_LORCQDRBTHRSH(512));
288 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
289 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
290 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
291 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
292 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
293 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
294 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
295 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
296 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
301 * sgl_len - calculates the size of an SGL of the given capacity
302 * @n: the number of SGL entries
304 * Calculates the number of flits needed for a scatter/gather list that
305 * can hold the given number of entries.
307 static __inline unsigned int
308 sgl_len(unsigned int n)
310 return ((3 * n) / 2 + (n & 1));
314 * get_imm_packet - return the next ingress packet buffer from a response
315 * @resp: the response descriptor containing the packet data
317 * Return a packet containing the immediate data of the given response.
319 #ifdef DISABLE_MBUF_IOVEC
320 static __inline int
321 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct t3_mbuf_hdr *mh)
323 struct mbuf *m;
324 int len;
325 uint32_t flags = ntohl(resp->flags);
326 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
329 * would be a firmware bug
331 if (sopeop == RSPQ_NSOP_NEOP || sopeop == RSPQ_SOP)
332 return (0);
334 m = m_gethdr(M_NOWAIT, MT_DATA);
335 len = G_RSPD_LEN(ntohl(resp->len_cq));
337 if (m) {
338 MH_ALIGN(m, IMMED_PKT_SIZE);
339 memcpy(m->m_data, resp->imm_data, IMMED_PKT_SIZE);
340 m->m_len = len;
342 switch (sopeop) {
343 case RSPQ_SOP_EOP:
344 mh->mh_head = mh->mh_tail = m;
345 m->m_pkthdr.len = len;
346 m->m_flags |= M_PKTHDR;
347 break;
348 case RSPQ_EOP:
349 m->m_flags &= ~M_PKTHDR;
350 mh->mh_head->m_pkthdr.len += len;
351 mh->mh_tail->m_next = m;
352 mh->mh_tail = m;
353 break;
356 return (m != NULL);
359 #else
360 static int
361 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m, void *cl, uint32_t flags)
363 int len, error;
364 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
367 * would be a firmware bug
369 len = G_RSPD_LEN(ntohl(resp->len_cq));
370 if (sopeop == RSPQ_NSOP_NEOP || sopeop == RSPQ_SOP) {
371 if (cxgb_debug)
372 device_printf(sc->dev, "unexpected value sopeop=%d flags=0x%x len=%din get_imm_packet\n", sopeop, flags, len);
373 bogus_imm++;
374 return (EINVAL);
376 error = 0;
377 switch (sopeop) {
378 case RSPQ_SOP_EOP:
379 m->m_len = m->m_pkthdr.len = len;
380 memcpy(mtod(m, uint8_t *), resp->imm_data, len);
381 break;
382 case RSPQ_EOP:
383 memcpy(cl, resp->imm_data, len);
384 m_iovappend(m, cl, MSIZE, len, 0);
385 break;
386 default:
387 bogus_imm++;
388 error = EINVAL;
391 return (error);
393 #endif
395 static __inline u_int
396 flits_to_desc(u_int n)
398 return (flit_desc_map[n]);
401 void
402 t3_sge_err_intr_handler(adapter_t *adapter)
404 unsigned int v, status;
407 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
409 if (status & F_RSPQCREDITOVERFOW)
410 CH_ALERT(adapter, "SGE response queue credit overflow\n");
412 if (status & F_RSPQDISABLED) {
413 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
415 CH_ALERT(adapter,
416 "packet delivered to disabled response queue (0x%x)\n",
417 (v >> S_RSPQ0DISABLED) & 0xff);
420 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
421 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
422 t3_fatal_err(adapter);
425 void
426 t3_sge_prep(adapter_t *adap, struct sge_params *p)
428 int i;
430 /* XXX Does ETHER_ALIGN need to be accounted for here? */
431 p->max_pkt_size = MJUM16BYTES - sizeof(struct cpl_rx_data);
433 for (i = 0; i < SGE_QSETS; ++i) {
434 struct qset_params *q = p->qset + i;
436 q->polling = adap->params.rev > 0;
438 if (adap->params.nports > 2)
439 q->coalesce_nsecs = 50000;
440 else
441 q->coalesce_nsecs = 5000;
443 q->rspq_size = RSPQ_Q_SIZE;
444 q->fl_size = FL_Q_SIZE;
445 q->jumbo_size = JUMBO_Q_SIZE;
446 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
447 q->txq_size[TXQ_OFLD] = 1024;
448 q->txq_size[TXQ_CTRL] = 256;
449 q->cong_thres = 0;
454 t3_sge_alloc(adapter_t *sc)
456 #ifdef __FreeBSD__
457 /* The parent tag. */
458 if (bus_dma_tag_create( NULL, /* parent */
459 1, 0, /* algnmnt, boundary */
460 BUS_SPACE_MAXADDR, /* lowaddr */
461 BUS_SPACE_MAXADDR, /* highaddr */
462 NULL, NULL, /* filter, filterarg */
463 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
464 BUS_SPACE_UNRESTRICTED, /* nsegments */
465 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
466 0, /* flags */
467 NULL, NULL, /* lock, lockarg */
468 &sc->parent_dmat)) {
469 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
470 return (ENOMEM);
474 * DMA tag for normal sized RX frames
476 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
477 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
478 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
479 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
480 return (ENOMEM);
484 * DMA tag for jumbo sized RX frames.
486 if (bus_dma_tag_create(sc->parent_dmat, MJUMPAGESIZE, 0, BUS_SPACE_MAXADDR,
487 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE,
488 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
489 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
490 return (ENOMEM);
494 * DMA tag for TX frames.
496 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
497 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
498 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
499 NULL, NULL, &sc->tx_dmat)) {
500 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
501 return (ENOMEM);
503 #endif
505 #ifdef __NetBSD__
506 /* The parent tag. */
507 sc->parent_dmat = sc->pa.pa_dmat;
510 * DMA tag for normal sized RX frames
512 sc->rx_dmat = sc->pa.pa_dmat;
515 * DMA tag for jumbo sized RX frames.
517 sc->rx_jumbo_dmat = sc->pa.pa_dmat;
520 * DMA tag for TX frames.
522 sc->tx_dmat = sc->pa.pa_dmat;
523 #endif
525 return (0);
529 t3_sge_free(struct adapter * sc)
531 #ifdef __FreeBSD__
532 if (sc->tx_dmat != NULL)
533 bus_dma_tag_destroy(sc->tx_dmat);
535 if (sc->rx_jumbo_dmat != NULL)
536 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
538 if (sc->rx_dmat != NULL)
539 bus_dma_tag_destroy(sc->rx_dmat);
541 if (sc->parent_dmat != NULL)
542 bus_dma_tag_destroy(sc->parent_dmat);
543 #endif
545 return (0);
548 void
549 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
552 qs->rspq.holdoff_tmr = max(p->coalesce_nsecs/100, 1U);
553 qs->rspq.polling = 0 /* p->polling */;
556 #ifdef __FreeBSD__
557 static void
558 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
560 struct refill_fl_cb_arg *cb_arg = arg;
562 cb_arg->error = error;
563 cb_arg->seg = segs[0];
564 cb_arg->nseg = nseg;
567 #endif
570 * refill_fl - refill an SGE free-buffer list
571 * @sc: the controller softc
572 * @q: the free-list to refill
573 * @n: the number of new buffers to allocate
575 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
576 * The caller must assure that @n does not exceed the queue's capacity.
578 static void
579 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
581 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
582 struct rx_desc *d = &q->desc[q->pidx];
583 #ifdef __FreeBSD__
584 struct refill_fl_cb_arg cb_arg;
585 #endif
586 void *cl;
587 int err;
589 #ifdef __FreeBSD__
590 cb_arg.error = 0;
591 #endif
592 while (n--) {
594 * We only allocate a cluster, mbuf allocation happens after rx
596 #ifdef __FreeBSD__
597 if ((cl = m_cljget(NULL, M_DONTWAIT, q->buf_size)) == NULL) {
598 log(LOG_WARNING, "Failed to allocate cluster\n");
599 goto done;
601 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
602 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
603 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
604 uma_zfree(q->zone, cl);
605 goto done;
607 sd->flags |= RX_SW_DESC_MAP_CREATED;
609 err = bus_dmamap_load(q->entry_tag, sd->map, cl, q->buf_size,
610 refill_fl_cb, &cb_arg, 0);
612 if (err != 0 || cb_arg.error) {
613 log(LOG_WARNING, "failure in refill_fl %d\n", cb_arg.error);
615 * XXX free cluster
617 return;
619 #endif
620 #ifdef __NetBSD__
621 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0)
623 err = bus_dmamap_create(sc->pa.pa_dmat,
624 q->buf_size, 1, q->buf_size, 0,
625 BUS_DMA_ALLOCNOW, &sd->map);
626 if (err != 0)
628 log(LOG_WARNING, "failure in refill_fl\n");
629 return;
631 sd->flags |= RX_SW_DESC_MAP_CREATED;
633 cl = malloc(q->buf_size, M_DEVBUF, M_NOWAIT);
634 if (cl == NULL)
636 log(LOG_WARNING, "Failed to allocate cluster\n");
637 break;
639 err = bus_dmamap_load(sc->pa.pa_dmat, sd->map, cl, q->buf_size, NULL, BUS_DMA_NOWAIT);
640 if (err)
642 log(LOG_WARNING, "failure in refill_fl\n");
643 free(cl, M_DEVBUF);
644 return;
646 #endif
648 sd->flags |= RX_SW_DESC_INUSE;
649 sd->cl = cl;
650 #ifdef __FreeBSD__
651 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
652 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
653 #endif
654 #ifdef __NetBSD__
655 d->addr_lo = htobe32(sd->map->dm_segs[0].ds_addr & 0xffffffff);
656 d->addr_hi = htobe32(((uint64_t)sd->map->dm_segs[0].ds_addr>>32) & 0xffffffff);
657 #endif
658 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
659 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
661 d++;
662 sd++;
664 if (++q->pidx == q->size) {
665 q->pidx = 0;
666 q->gen ^= 1;
667 sd = q->sdesc;
668 d = q->desc;
670 q->credits++;
673 #ifdef __FreeBSD__
674 done:
675 #endif
676 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
681 * free_rx_bufs - free the Rx buffers on an SGE free list
682 * @sc: the controle softc
683 * @q: the SGE free list to clean up
685 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
686 * this queue should be stopped before calling this function.
688 static void
689 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
691 u_int cidx = q->cidx;
693 while (q->credits--) {
694 struct rx_sw_desc *d = &q->sdesc[cidx];
696 if (d->flags & RX_SW_DESC_INUSE) {
697 #ifdef __FreeBSD__
698 bus_dmamap_unload(q->entry_tag, d->map);
699 bus_dmamap_destroy(q->entry_tag, d->map);
700 uma_zfree(q->zone, d->cl);
701 #endif
702 #ifdef __NetBSD__
703 bus_dmamap_unload(q->entry_tag, d->map);
704 bus_dmamap_destroy(q->entry_tag, d->map);
705 d->map = NULL;
706 free(d->cl, M_DEVBUF);
707 d->cl = NULL;
708 #endif
710 d->cl = NULL;
711 if (++cidx == q->size)
712 cidx = 0;
716 static __inline void
717 __refill_fl(adapter_t *adap, struct sge_fl *fl)
719 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
722 #ifndef DISABLE_MBUF_IOVEC
724 * recycle_rx_buf - recycle a receive buffer
725 * @adapter: the adapter
726 * @q: the SGE free list
727 * @idx: index of buffer to recycle
729 * Recycles the specified buffer on the given free list by adding it at
730 * the next available slot on the list.
732 static void
733 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
735 struct rx_desc *from = &q->desc[idx];
736 struct rx_desc *to = &q->desc[q->pidx];
738 q->sdesc[q->pidx] = q->sdesc[idx];
739 to->addr_lo = from->addr_lo; // already big endian
740 to->addr_hi = from->addr_hi; // likewise
741 wmb();
742 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
743 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
744 q->credits++;
746 if (++q->pidx == q->size) {
747 q->pidx = 0;
748 q->gen ^= 1;
750 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
752 #endif
754 #ifdef __FreeBSD__
755 static void
756 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
758 uint32_t *addr;
760 addr = arg;
761 *addr = segs[0].ds_addr;
763 #endif
765 static int
766 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
767 bus_addr_t *phys,
768 void *desc, void *sdesc, bus_dma_tag_t *tag,
769 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
771 size_t len = nelem * elem_size;
772 void *s = NULL;
773 void *p = NULL;
774 int err;
775 #ifdef __NetBSD__
776 bus_dma_segment_t phys_seg;
777 #endif
779 #ifdef __FreeBSD__
780 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
781 BUS_SPACE_MAXADDR_32BIT,
782 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
783 len, 0, NULL, NULL, tag)) != 0) {
784 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
785 return (ENOMEM);
788 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
789 map)) != 0) {
790 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
791 return (ENOMEM);
794 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
795 memset(p, 0, len);
796 *(void **)desc = p;
798 if (sw_size) {
799 len = nelem * sw_size;
800 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
801 *(void **)sdesc = s;
803 if (parent_entry_tag == NULL)
804 return (0);
806 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
807 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
808 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
809 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
810 NULL, NULL, entry_tag)) != 0) {
811 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
812 return (ENOMEM);
814 #endif
816 #ifdef __NetBSD__
817 int nsegs;
819 *tag = sc->pa.pa_dmat;
821 /* allocate wired physical memory for DMA descriptor array */
822 err = bus_dmamem_alloc(*tag, len, PAGE_SIZE, 0, &phys_seg, 1,
823 &nsegs, BUS_DMA_NOWAIT);
824 if (err != 0)
826 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
827 return (ENOMEM);
829 *phys = phys_seg.ds_addr;
831 /* map physical address to kernel virtual address */
832 err = bus_dmamem_map(*tag, &phys_seg, 1, len, &p,
833 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
834 if (err != 0)
836 device_printf(sc->dev, "Cannot map descriptor memory\n");
837 return (ENOMEM);
840 memset(p, 0, len);
841 *(void **)desc = p;
843 if (sw_size)
845 len = nelem * sw_size;
846 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
847 *(void **)sdesc = s;
850 if (parent_entry_tag == NULL)
851 return (0);
852 *entry_tag = sc->pa.pa_dmat;
853 #endif
855 return (0);
858 static void
859 #ifdef __FreeBSD__
860 sge_slow_intr_handler(void *arg, int ncount)
861 #endif
862 #ifdef __NetBSD__
863 sge_slow_intr_handler(struct work *wk, void *arg)
864 #endif
866 adapter_t *sc = arg;
868 t3_slow_intr_handler(sc);
872 * sge_timer_cb - perform periodic maintenance of an SGE qset
873 * @data: the SGE queue set to maintain
875 * Runs periodically from a timer to perform maintenance of an SGE queue
876 * set. It performs two tasks:
878 * a) Cleans up any completed Tx descriptors that may still be pending.
879 * Normal descriptor cleanup happens when new packets are added to a Tx
880 * queue so this timer is relatively infrequent and does any cleanup only
881 * if the Tx queue has not seen any new packets in a while. We make a
882 * best effort attempt to reclaim descriptors, in that we don't wait
883 * around if we cannot get a queue's lock (which most likely is because
884 * someone else is queueing new packets and so will also handle the clean
885 * up). Since control queues use immediate data exclusively we don't
886 * bother cleaning them up here.
888 * b) Replenishes Rx queues that have run out due to memory shortage.
889 * Normally new Rx buffers are added when existing ones are consumed but
890 * when out of memory a queue can become empty. We try to add only a few
891 * buffers here, the queue will be replenished fully as these new buffers
892 * are used up if memory shortage has subsided.
894 * c) Return coalesced response queue credits in case a response queue is
895 * starved.
897 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
898 * fifo overflows and the FW doesn't implement any recovery scheme yet.
901 static void
902 sge_timer_cb(void *arg)
904 adapter_t *sc = arg;
905 struct port_info *p;
906 struct sge_qset *qs;
907 struct sge_txq *txq;
908 int i, j;
909 int reclaim_eth, reclaim_ofl, refill_rx;
911 for (i = 0; i < sc->params.nports; i++)
912 for (j = 0; j < sc->port[i].nqsets; j++) {
913 qs = &sc->sge.qs[i + j];
914 txq = &qs->txq[0];
915 reclaim_eth = txq[TXQ_ETH].processed - txq[TXQ_ETH].cleaned;
916 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
917 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
918 (qs->fl[1].credits < qs->fl[1].size));
919 if (reclaim_eth || reclaim_ofl || refill_rx) {
920 p = &sc->port[i];
921 #ifdef __FreeBSD__
922 taskqueue_enqueue(p->tq, &p->timer_reclaim_task);
923 #endif
925 #ifdef __NetBSD__
926 workqueue_enqueue(p->timer_reclaim_task.wq, &p->timer_reclaim_task.w, NULL);
927 #endif
928 break;
931 if (sc->params.nports > 2) {
932 int k;
934 for_each_port(sc, k) {
935 struct port_info *pi = &sc->port[k];
937 t3_write_reg(sc, A_SG_KDOORBELL,
938 F_SELEGRCNTX |
939 (FW_TUNNEL_SGEEC_START + pi->first_qset));
942 if (sc->open_device_map != 0)
943 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
947 * This is meant to be a catch-all function to keep sge state private
948 * to sge.c
952 t3_sge_init_adapter(adapter_t *sc)
954 #ifdef __FreeBSD__
955 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
956 #endif
957 #ifdef __NetBSD__
958 callout_init(&sc->sge_timer_ch, 0);
959 #endif
960 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
961 #ifdef __FreeBSD__
962 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
963 #endif
964 #ifdef __NetBSD__
965 sc->slow_intr_task.name = "sge_slow_intr";
966 sc->slow_intr_task.func = sge_slow_intr_handler;
967 sc->slow_intr_task.context = sc;
968 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &sc->slow_intr_task, NULL, "cxgb_make_task");
969 #endif
970 return (0);
974 t3_sge_init_port(struct port_info *p)
976 #ifdef __FreeBSD__
977 TASK_INIT(&p->timer_reclaim_task, 0, sge_timer_reclaim, p);
978 #endif
979 #ifdef __NetBSD__
980 p->timer_reclaim_task.name = "sge_timer_reclaim";
981 p->timer_reclaim_task.func = sge_timer_reclaim;
982 p->timer_reclaim_task.context = p;
983 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &p->timer_reclaim_task, NULL, "cxgb_make_task");
984 #endif
986 return (0);
989 void
990 t3_sge_deinit_sw(adapter_t *sc)
992 #ifdef __FreeBSD__
993 int i;
994 #endif
996 callout_drain(&sc->sge_timer_ch);
997 #ifdef __FreeBSD__
998 if (sc->tq)
999 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1000 for (i = 0; i < sc->params.nports; i++)
1001 if (sc->port[i].tq != NULL)
1002 taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task);
1003 #endif
1007 * refill_rspq - replenish an SGE response queue
1008 * @adapter: the adapter
1009 * @q: the response queue to replenish
1010 * @credits: how many new responses to make available
1012 * Replenishes a response queue by making the supplied number of responses
1013 * available to HW.
1015 static __inline void
1016 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1019 /* mbufs are allocated on demand when a rspq entry is processed. */
1020 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1021 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1024 static __inline void
1025 sge_txq_reclaim_(struct sge_txq *txq)
1027 int reclaimable, i, n;
1028 struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
1029 struct port_info *p;
1031 p = txq->port;
1032 reclaim_more:
1033 n = 0;
1034 reclaimable = desc_reclaimable(txq);
1035 if (reclaimable > 0 && mtx_trylock(&txq->lock)) {
1036 n = reclaim_completed_tx(txq, TX_CLEAN_MAX_DESC, m_vec);
1037 mtx_unlock(&txq->lock);
1039 if (n == 0)
1040 return;
1042 for (i = 0; i < n; i++) {
1043 m_freem_vec(m_vec[i]);
1045 if (p && p->ifp->if_drv_flags & IFF_DRV_OACTIVE &&
1046 txq->size - txq->in_use >= TX_START_MAX_DESC) {
1047 txq_fills++;
1048 p->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1049 #ifdef __FreeBSD__
1050 taskqueue_enqueue(p->tq, &p->start_task);
1051 #endif
1052 #ifdef __NetBSD__
1053 workqueue_enqueue(p->start_task.wq, &p->start_task.w, NULL);
1054 #endif
1057 if (n)
1058 goto reclaim_more;
1061 static void
1062 #ifdef __FreeBSD__
1063 sge_txq_reclaim_handler(void *arg, int ncount)
1064 #endif
1065 #ifdef __NetBSD__
1066 sge_txq_reclaim_handler(struct work *wk, void *arg)
1067 #endif
1069 struct sge_txq *q = arg;
1071 sge_txq_reclaim_(q);
1074 static void
1075 #ifdef __FreeBSD__
1076 sge_timer_reclaim(void *arg, int ncount)
1077 #endif
1078 #ifdef __NetBSD__
1079 sge_timer_reclaim(struct work *wk, void *arg)
1080 #endif
1082 struct port_info *p = arg;
1083 int i, nqsets = p->nqsets;
1084 adapter_t *sc = p->adapter;
1085 struct sge_qset *qs;
1086 struct sge_txq *txq;
1087 struct mtx *lock;
1089 for (i = 0; i < nqsets; i++) {
1090 qs = &sc->sge.qs[i];
1091 txq = &qs->txq[TXQ_ETH];
1092 sge_txq_reclaim_(txq);
1094 txq = &qs->txq[TXQ_OFLD];
1095 sge_txq_reclaim_(txq);
1097 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1098 &sc->sge.qs[0].rspq.lock;
1100 if (mtx_trylock(lock)) {
1101 /* XXX currently assume that we are *NOT* polling */
1102 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1104 if (qs->fl[0].credits < qs->fl[0].size - 16)
1105 __refill_fl(sc, &qs->fl[0]);
1106 if (qs->fl[1].credits < qs->fl[1].size - 16)
1107 __refill_fl(sc, &qs->fl[1]);
1109 if (status & (1 << qs->rspq.cntxt_id)) {
1110 if (qs->rspq.credits) {
1111 refill_rspq(sc, &qs->rspq, 1);
1112 qs->rspq.credits--;
1113 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1114 1 << qs->rspq.cntxt_id);
1117 mtx_unlock(lock);
1123 * init_qset_cntxt - initialize an SGE queue set context info
1124 * @qs: the queue set
1125 * @id: the queue set id
1127 * Initializes the TIDs and context ids for the queues of a queue set.
1129 static void
1130 init_qset_cntxt(struct sge_qset *qs, u_int id)
1133 qs->rspq.cntxt_id = id;
1134 qs->fl[0].cntxt_id = 2 * id;
1135 qs->fl[1].cntxt_id = 2 * id + 1;
1136 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1137 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1138 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1139 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1140 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1144 static void
1145 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1147 txq->in_use += ndesc;
1149 * XXX we don't handle stopping of queue
1150 * presumably start handles this when we bump against the end
1152 txqs->gen = txq->gen;
1153 txq->unacked += ndesc;
1154 txqs->compl = (txq->unacked & 8) << (S_WR_COMPL - 3);
1155 txq->unacked &= 7;
1156 txqs->pidx = txq->pidx;
1157 txq->pidx += ndesc;
1159 if (txq->pidx >= txq->size) {
1160 txq->pidx -= txq->size;
1161 txq->gen ^= 1;
1167 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1168 * @m: the packet mbufs
1169 * @nsegs: the number of segments
1171 * Returns the number of Tx descriptors needed for the given Ethernet
1172 * packet. Ethernet packets require addition of WR and CPL headers.
1174 static __inline unsigned int
1175 calc_tx_descs(const struct mbuf *m, int nsegs)
1177 unsigned int flits;
1179 if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt))
1180 return 1;
1182 flits = sgl_len(nsegs) + 2;
1183 #ifdef TSO_SUPPORTED
1184 if (m->m_pkthdr.csum_flags & (CSUM_TSO))
1185 flits++;
1186 #endif
1187 return flits_to_desc(flits);
1190 static unsigned int
1191 busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq,
1192 struct tx_sw_desc *stx, bus_dma_segment_t *segs, int *nsegs)
1194 struct mbuf *m0;
1195 int err, pktlen;
1196 #ifdef __NetBSD__
1197 int i, total_len;
1198 #endif
1200 m0 = *m;
1201 pktlen = m0->m_pkthdr.len;
1203 #ifdef __FreeBSD__
1204 err = bus_dmamap_load_mvec_sg(txq->entry_tag, stx->map, m0, segs, nsegs, 0);
1205 #endif
1206 #ifdef __NetBSD__
1207 m0 = *m;
1208 i = 0;
1209 total_len = 0;
1210 while (m0)
1212 i++;
1213 total_len += m0->m_len;
1214 m0 = m0->m_next;
1216 err = bus_dmamap_create(txq->entry_tag, total_len, TX_MAX_SEGS, total_len, 0, BUS_DMA_NOWAIT, &stx->map);
1217 if (err)
1218 return (err);
1219 err = bus_dmamap_load_mbuf(txq->entry_tag, stx->map, *m, 0);
1220 if (err)
1221 return (err);
1222 // feed out the physical mappings
1223 *nsegs = stx->map->dm_nsegs;
1224 for (i=0; i<*nsegs; i++)
1226 segs[i] = stx->map->dm_segs[i];
1228 #endif
1229 #ifdef DEBUG
1230 if (err) {
1231 int n = 0;
1232 struct mbuf *mtmp = m0;
1233 while(mtmp) {
1234 n++;
1235 mtmp = mtmp->m_next;
1237 printf("map_mbufs: bus_dmamap_load_mbuf_sg failed with %d - pkthdr.len==%d nmbufs=%d\n",
1238 err, m0->m_pkthdr.len, n);
1240 #endif
1241 if (err == EFBIG) {
1242 /* Too many segments, try to defrag */
1243 m0 = m_defrag(m0, M_DONTWAIT);
1244 if (m0 == NULL) {
1245 m_freem(*m);
1246 *m = NULL;
1247 return (ENOBUFS);
1249 *m = m0;
1250 #ifdef __FreeBSD__
1251 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, stx->map, m0, segs, nsegs, 0);
1252 #endif
1253 #ifdef __NetBSD__
1254 INT3; // XXXXXXXXXXXXXXXXXX like above!
1255 #endif
1258 if (err == ENOMEM) {
1259 return (err);
1262 if (err) {
1263 if (cxgb_debug)
1264 printf("map failure err=%d pktlen=%d\n", err, pktlen);
1265 m_freem_vec(m0);
1266 *m = NULL;
1267 return (err);
1270 #ifdef __FreeBSD__
1271 bus_dmamap_sync(txq->entry_tag, stx->map, BUS_DMASYNC_PREWRITE);
1272 #endif
1273 #ifdef __NetBSD__
1274 bus_dmamap_sync(txq->entry_tag, stx->map, 0, pktlen, BUS_DMASYNC_PREWRITE);
1275 #endif
1276 stx->flags |= TX_SW_DESC_MAPPED;
1278 return (0);
1282 * make_sgl - populate a scatter/gather list for a packet
1283 * @sgp: the SGL to populate
1284 * @segs: the packet dma segments
1285 * @nsegs: the number of segments
1287 * Generates a scatter/gather list for the buffers that make up a packet
1288 * and returns the SGL size in 8-byte words. The caller must size the SGL
1289 * appropriately.
1291 static __inline void
1292 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1294 int i, idx;
1296 for (idx = 0, i = 0; i < nsegs; i++, idx ^= 1) {
1297 if (i && idx == 0)
1298 ++sgp;
1300 sgp->len[idx] = htobe32(segs[i].ds_len);
1301 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1304 if (idx)
1305 sgp->len[idx] = 0;
1309 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1310 * @adap: the adapter
1311 * @q: the Tx queue
1313 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1314 * where the HW is going to sleep just after we checked, however,
1315 * then the interrupt handler will detect the outstanding TX packet
1316 * and ring the doorbell for us.
1318 * When GTS is disabled we unconditionally ring the doorbell.
1320 static __inline void
1321 check_ring_tx_db(adapter_t *adap, struct sge_txq *q)
1323 #if USE_GTS
1324 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1325 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1326 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1327 #ifdef T3_TRACE
1328 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1329 q->cntxt_id);
1330 #endif
1331 t3_write_reg(adap, A_SG_KDOORBELL,
1332 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1334 #else
1335 wmb(); /* write descriptors before telling HW */
1336 t3_write_reg(adap, A_SG_KDOORBELL,
1337 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1338 #endif
1341 static __inline void
1342 wr_gen2(struct tx_desc *d, unsigned int gen)
1344 #if SGE_NUM_GENBITS == 2
1345 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1346 #endif
1352 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1353 * @ndesc: number of Tx descriptors spanned by the SGL
1354 * @txd: first Tx descriptor to be written
1355 * @txqs: txq state (generation and producer index)
1356 * @txq: the SGE Tx queue
1357 * @sgl: the SGL
1358 * @flits: number of flits to the start of the SGL in the first descriptor
1359 * @sgl_flits: the SGL size in flits
1360 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1361 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1363 * Write a work request header and an associated SGL. If the SGL is
1364 * small enough to fit into one Tx descriptor it has already been written
1365 * and we just need to write the WR header. Otherwise we distribute the
1366 * SGL across the number of descriptors it spans.
1369 static void
1370 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1371 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1372 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1375 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1376 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1378 if (__predict_true(ndesc == 1)) {
1379 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1380 V_WR_SGLSFLT(flits)) | wr_hi;
1381 wmb();
1382 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1383 V_WR_GEN(txqs->gen)) | wr_lo;
1384 /* XXX gen? */
1385 wr_gen2(txd, txqs->gen);
1386 } else {
1387 unsigned int ogen = txqs->gen;
1388 const uint64_t *fp = (const uint64_t *)sgl;
1389 struct work_request_hdr *wp = wrp;
1391 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1392 V_WR_SGLSFLT(flits)) | wr_hi;
1394 while (sgl_flits) {
1395 unsigned int avail = WR_FLITS - flits;
1397 if (avail > sgl_flits)
1398 avail = sgl_flits;
1399 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1400 sgl_flits -= avail;
1401 ndesc--;
1402 if (!sgl_flits)
1403 break;
1405 fp += avail;
1406 txd++;
1407 txsd++;
1408 if (++txqs->pidx == txq->size) {
1409 txqs->pidx = 0;
1410 txqs->gen ^= 1;
1411 txd = txq->desc;
1412 txsd = txq->sdesc;
1416 * when the head of the mbuf chain
1417 * is freed all clusters will be freed
1418 * with it
1420 txsd->m = NULL;
1421 wrp = (struct work_request_hdr *)txd;
1422 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1423 V_WR_SGLSFLT(1)) | wr_hi;
1424 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1425 sgl_flits + 1)) |
1426 V_WR_GEN(txqs->gen)) | wr_lo;
1427 wr_gen2(txd, txqs->gen);
1428 flits = 1;
1430 wrp->wr_hi |= htonl(F_WR_EOP);
1431 wmb();
1432 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1433 wr_gen2((struct tx_desc *)wp, ogen);
1438 /* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */
1439 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20)
1442 t3_encap(struct port_info *p, struct mbuf **m, int *free_it)
1444 adapter_t *sc;
1445 struct mbuf *m0;
1446 struct sge_qset *qs;
1447 struct sge_txq *txq;
1448 struct tx_sw_desc *stx;
1449 struct txq_state txqs;
1450 unsigned int ndesc, flits, cntrl, mlen;
1451 int err, nsegs, tso_info = 0;
1453 struct work_request_hdr *wrp;
1454 struct tx_sw_desc *txsd;
1455 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
1456 bus_dma_segment_t segs[TX_MAX_SEGS];
1457 uint32_t wr_hi, wr_lo, sgl_flits;
1459 struct tx_desc *txd;
1460 struct cpl_tx_pkt *cpl;
1462 m0 = *m;
1463 sc = p->adapter;
1465 DPRINTF("t3_encap port_id=%d qsidx=%d ", p->port_id, p->first_qset);
1467 /* port_id=1 qsid=1 txpkt_intf=2 tx_chan=0 */
1469 qs = &sc->sge.qs[p->first_qset];
1471 txq = &qs->txq[TXQ_ETH];
1472 stx = &txq->sdesc[txq->pidx];
1473 txd = &txq->desc[txq->pidx];
1474 cpl = (struct cpl_tx_pkt *)txd;
1475 mlen = m0->m_pkthdr.len;
1476 cpl->len = htonl(mlen | 0x80000000);
1478 DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", mlen, p->txpkt_intf, p->tx_chan);
1480 * XXX handle checksum, TSO, and VLAN here
1483 cntrl = V_TXPKT_INTF(p->txpkt_intf);
1486 * XXX need to add VLAN support for 6.x
1488 #ifdef VLAN_SUPPORTED
1489 if (m0->m_flags & M_VLANTAG)
1490 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
1491 if (m0->m_pkthdr.csum_flags & (CSUM_TSO))
1492 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1493 #endif
1494 if (tso_info) {
1495 int eth_type;
1496 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *) cpl;
1497 struct ip *ip;
1498 struct tcphdr *tcp;
1499 char *pkthdr, tmp[TCPPKTHDRSIZE]; /* is this too large for the stack? */
1501 txd->flit[2] = 0;
1502 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1503 hdr->cntrl = htonl(cntrl);
1505 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1506 pkthdr = &tmp[0];
1507 m_copydata(m0, 0, TCPPKTHDRSIZE, pkthdr);
1508 } else {
1509 pkthdr = mtod(m0, char *);
1512 #ifdef VLAN_SUPPORTED
1513 if (__predict_false(m0->m_flags & M_VLANTAG)) {
1514 eth_type = CPL_ETH_II_VLAN;
1515 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN +
1516 ETHER_VLAN_ENCAP_LEN);
1517 } else {
1518 eth_type = CPL_ETH_II;
1519 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN);
1521 #else
1522 eth_type = CPL_ETH_II;
1523 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN);
1524 #endif
1525 tcp = (struct tcphdr *)((uint8_t *)ip +
1526 sizeof(*ip));
1528 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1529 V_LSO_IPHDR_WORDS(ip->ip_hl) |
1530 V_LSO_TCPHDR_WORDS(tcp->th_off);
1531 hdr->lso_info = htonl(tso_info);
1532 flits = 3;
1533 } else {
1534 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1535 cpl->cntrl = htonl(cntrl);
1537 if (mlen <= WR_LEN - sizeof(*cpl)) {
1538 txq_prod(txq, 1, &txqs);
1539 txq->sdesc[txqs.pidx].m = NULL;
1541 if (m0->m_len == m0->m_pkthdr.len)
1542 memcpy(&txd->flit[2], mtod(m0, uint8_t *), mlen);
1543 else
1544 m_copydata(m0, 0, mlen, (void *)&txd->flit[2]);
1546 *free_it = 1;
1547 flits = (mlen + 7) / 8 + 2;
1548 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1549 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1550 F_WR_SOP | F_WR_EOP | txqs.compl);
1551 wmb();
1552 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) |
1553 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1555 wr_gen2(txd, txqs.gen);
1556 check_ring_tx_db(sc, txq);
1557 return (0);
1559 flits = 2;
1562 wrp = (struct work_request_hdr *)txd;
1564 if ((err = busdma_map_mbufs(m, txq, stx, segs, &nsegs)) != 0) {
1565 return (err);
1567 m0 = *m;
1568 ndesc = calc_tx_descs(m0, nsegs);
1570 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1571 make_sgl(sgp, segs, nsegs);
1573 sgl_flits = sgl_len(nsegs);
1575 DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc);
1576 txq_prod(txq, ndesc, &txqs);
1577 txsd = &txq->sdesc[txqs.pidx];
1578 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1579 wr_lo = htonl(V_WR_TID(txq->token));
1580 txsd->m = m0;
1581 m_set_priority(m0, txqs.pidx);
1583 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, sgl_flits, wr_hi, wr_lo);
1584 check_ring_tx_db(p->adapter, txq);
1586 return (0);
1591 * write_imm - write a packet into a Tx descriptor as immediate data
1592 * @d: the Tx descriptor to write
1593 * @m: the packet
1594 * @len: the length of packet data to write as immediate data
1595 * @gen: the generation bit value to write
1597 * Writes a packet as immediate data into a Tx descriptor. The packet
1598 * contains a work request at its beginning. We must write the packet
1599 * carefully so the SGE doesn't read accidentally before it's written in
1600 * its entirety.
1602 static __inline void
1603 write_imm(struct tx_desc *d, struct mbuf *m,
1604 unsigned int len, unsigned int gen)
1606 struct work_request_hdr *from = mtod(m, struct work_request_hdr *);
1607 struct work_request_hdr *to = (struct work_request_hdr *)d;
1609 memcpy(&to[1], &from[1], len - sizeof(*from));
1610 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1611 V_WR_BCNTLFLT(len & 7));
1612 wmb();
1613 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1614 V_WR_LEN((len + 7) / 8));
1615 wr_gen2(d, gen);
1616 m_freem(m);
1620 * check_desc_avail - check descriptor availability on a send queue
1621 * @adap: the adapter
1622 * @q: the TX queue
1623 * @m: the packet needing the descriptors
1624 * @ndesc: the number of Tx descriptors needed
1625 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1627 * Checks if the requested number of Tx descriptors is available on an
1628 * SGE send queue. If the queue is already suspended or not enough
1629 * descriptors are available the packet is queued for later transmission.
1630 * Must be called with the Tx queue locked.
1632 * Returns 0 if enough descriptors are available, 1 if there aren't
1633 * enough descriptors and the packet has been queued, and 2 if the caller
1634 * needs to retry because there weren't enough descriptors at the
1635 * beginning of the call but some freed up in the mean time.
1637 static __inline int
1638 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1639 struct mbuf *m, unsigned int ndesc,
1640 unsigned int qid)
1643 * XXX We currently only use this for checking the control queue
1644 * the control queue is only used for binding qsets which happens
1645 * at init time so we are guaranteed enough descriptors
1647 if (__predict_false(!mbufq_empty(&q->sendq))) {
1648 addq_exit: mbufq_tail(&q->sendq, m);
1649 return 1;
1651 if (__predict_false(q->size - q->in_use < ndesc)) {
1653 struct sge_qset *qs = txq_to_qset(q, qid);
1655 setbit(&qs->txq_stopped, qid);
1656 smp_mb();
1658 if (should_restart_tx(q) &&
1659 test_and_clear_bit(qid, &qs->txq_stopped))
1660 return 2;
1662 q->stops++;
1663 goto addq_exit;
1665 return 0;
1670 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1671 * @q: the SGE control Tx queue
1673 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1674 * that send only immediate data (presently just the control queues) and
1675 * thus do not have any mbufs
1677 static __inline void
1678 reclaim_completed_tx_imm(struct sge_txq *q)
1680 unsigned int reclaim = q->processed - q->cleaned;
1682 mtx_assert(&q->lock, MA_OWNED);
1684 q->in_use -= reclaim;
1685 q->cleaned += reclaim;
1688 static __inline int
1689 immediate(const struct mbuf *m)
1691 return m->m_len <= WR_LEN && m->m_pkthdr.len <= WR_LEN ;
1695 * ctrl_xmit - send a packet through an SGE control Tx queue
1696 * @adap: the adapter
1697 * @q: the control queue
1698 * @m: the packet
1700 * Send a packet through an SGE control Tx queue. Packets sent through
1701 * a control queue must fit entirely as immediate data in a single Tx
1702 * descriptor and have no page fragments.
1704 static int
1705 ctrl_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
1707 int ret;
1708 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1710 if (__predict_false(!immediate(m))) {
1711 m_freem(m);
1712 return 0;
1715 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1716 wrp->wr_lo = htonl(V_WR_TID(q->token));
1718 mtx_lock(&q->lock);
1719 again: reclaim_completed_tx_imm(q);
1721 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1722 if (__predict_false(ret)) {
1723 if (ret == 1) {
1724 mtx_unlock(&q->lock);
1725 return (-1);
1727 goto again;
1730 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1732 q->in_use++;
1733 if (++q->pidx >= q->size) {
1734 q->pidx = 0;
1735 q->gen ^= 1;
1737 mtx_unlock(&q->lock);
1738 wmb();
1739 t3_write_reg(adap, A_SG_KDOORBELL,
1740 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1741 return (0);
1746 * restart_ctrlq - restart a suspended control queue
1747 * @qs: the queue set cotaining the control queue
1749 * Resumes transmission on a suspended Tx control queue.
1751 static void
1752 #ifdef __FreeBSD__
1753 restart_ctrlq(void *data, int npending)
1754 #endif
1755 #ifdef __NetBSD__
1756 restart_ctrlq(struct work *wk, void *data)
1757 #endif
1759 struct mbuf *m;
1760 struct sge_qset *qs = (struct sge_qset *)data;
1761 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1762 adapter_t *adap = qs->port->adapter;
1764 mtx_lock(&q->lock);
1765 again: reclaim_completed_tx_imm(q);
1767 while (q->in_use < q->size &&
1768 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1770 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1772 if (++q->pidx >= q->size) {
1773 q->pidx = 0;
1774 q->gen ^= 1;
1776 q->in_use++;
1778 if (!mbufq_empty(&q->sendq)) {
1779 setbit(&qs->txq_stopped, TXQ_CTRL);
1780 smp_mb();
1782 if (should_restart_tx(q) &&
1783 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1784 goto again;
1785 q->stops++;
1787 mtx_unlock(&q->lock);
1788 t3_write_reg(adap, A_SG_KDOORBELL,
1789 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1794 * Send a management message through control queue 0
1797 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1799 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m);
1803 * free_qset - free the resources of an SGE queue set
1804 * @sc: the controller owning the queue set
1805 * @q: the queue set
1807 * Release the HW and SW resources associated with an SGE queue set, such
1808 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1809 * queue set must be quiesced prior to calling this.
1811 static void
1812 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1814 int i;
1816 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1817 if (q->fl[i].desc) {
1818 mtx_lock(&sc->sge.reg_lock);
1819 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1820 mtx_unlock(&sc->sge.reg_lock);
1821 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1822 #ifdef __FreeBSD__
1823 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1824 q->fl[i].desc_map);
1825 bus_dma_tag_destroy(q->fl[i].desc_tag);
1826 bus_dma_tag_destroy(q->fl[i].entry_tag);
1827 #endif
1828 #ifdef __NetBSD__
1829 INT3;
1830 // bus_dmamem_free(q->fl[i].desc_tag, &q->fl[i].phys_addr, 1);
1831 // XXXXXXXXXXX destroy DMA tags????
1832 #endif
1834 if (q->fl[i].sdesc) {
1835 free_rx_bufs(sc, &q->fl[i]);
1836 free(q->fl[i].sdesc, M_DEVBUF);
1840 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
1841 if (q->txq[i].desc) {
1842 mtx_lock(&sc->sge.reg_lock);
1843 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
1844 mtx_unlock(&sc->sge.reg_lock);
1845 bus_dmamap_unload(q->txq[i].desc_tag,
1846 q->txq[i].desc_map);
1847 #ifdef __FreeBSD__
1848 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
1849 q->txq[i].desc_map);
1850 bus_dma_tag_destroy(q->txq[i].desc_tag);
1851 bus_dma_tag_destroy(q->txq[i].entry_tag);
1852 MTX_DESTROY(&q->txq[i].lock);
1853 #endif
1854 #ifdef __NetBSD__
1855 INT3;
1856 // bus_dmamem_free(q->txq[i].desc_tag, &q->txq[i].phys_addr, 1);
1857 // XXXXXXXXXXX destroy DMA tags???? And the lock?!??!
1858 #endif
1861 if (q->txq[i].sdesc) {
1862 free(q->txq[i].sdesc, M_DEVBUF);
1866 if (q->rspq.desc) {
1867 mtx_lock(&sc->sge.reg_lock);
1868 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
1869 mtx_unlock(&sc->sge.reg_lock);
1871 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
1872 #ifdef __FreeBSD__
1873 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
1874 q->rspq.desc_map);
1875 bus_dma_tag_destroy(q->rspq.desc_tag);
1876 MTX_DESTROY(&q->rspq.lock);
1877 #endif
1878 #ifdef __NetBSD__
1879 INT3;
1880 // bus_dmamem_free(q->rspq.desc_tag, &q->rspq.phys_addr, 1);
1881 // XXXXXXXXXXX destroy DMA tags???? and the LOCK ?!?!?
1882 #endif
1885 memset(q, 0, sizeof(*q));
1889 * t3_free_sge_resources - free SGE resources
1890 * @sc: the adapter softc
1892 * Frees resources used by the SGE queue sets.
1894 void
1895 t3_free_sge_resources(adapter_t *sc)
1897 int i, nqsets;
1899 for (nqsets = i = 0; i < (sc)->params.nports; i++)
1900 nqsets += sc->port[i].nqsets;
1902 for (i = 0; i < nqsets; ++i)
1903 t3_free_qset(sc, &sc->sge.qs[i]);
1907 * t3_sge_start - enable SGE
1908 * @sc: the controller softc
1910 * Enables the SGE for DMAs. This is the last step in starting packet
1911 * transfers.
1913 void
1914 t3_sge_start(adapter_t *sc)
1916 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
1920 * t3_sge_stop - disable SGE operation
1921 * @sc: the adapter
1923 * Disables the DMA engine. This can be called in emeregencies (e.g.,
1924 * from error interrupts) or from normal process context. In the latter
1925 * case it also disables any pending queue restart tasklets. Note that
1926 * if it is called in interrupt context it cannot disable the restart
1927 * tasklets as it cannot wait, however the tasklets will have no effect
1928 * since the doorbells are disabled and the driver will call this again
1929 * later from process context, at which time the tasklets will be stopped
1930 * if they are still running.
1932 void
1933 t3_sge_stop(adapter_t *sc)
1935 int i, nqsets;
1937 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
1939 #ifdef __FreeBSD__
1940 if (sc->tq == NULL)
1941 return;
1942 #endif
1944 for (nqsets = i = 0; i < (sc)->params.nports; i++)
1945 nqsets += sc->port[i].nqsets;
1947 for (i = 0; i < nqsets; ++i) {
1948 #ifdef __FreeBSD__
1949 struct sge_qset *qs = &sc->sge.qs[i];
1951 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
1952 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
1953 #endif
1959 * free_tx_desc - reclaims Tx descriptors and their buffers
1960 * @adapter: the adapter
1961 * @q: the Tx queue to reclaim descriptors from
1962 * @n: the number of descriptors to reclaim
1964 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
1965 * Tx buffers. Called with the Tx queue lock held.
1968 free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec)
1970 struct tx_sw_desc *d;
1971 unsigned int cidx = q->cidx;
1972 int nbufs = 0;
1974 #ifdef T3_TRACE
1975 T3_TRACE2(sc->tb[q->cntxt_id & 7],
1976 "reclaiming %u Tx descriptors at cidx %u", n, cidx);
1977 #endif
1978 d = &q->sdesc[cidx];
1980 while (n-- > 0) {
1981 DPRINTF("cidx=%d d=%p\n", cidx, d);
1982 if (d->m) {
1983 if (d->flags & TX_SW_DESC_MAPPED) {
1984 bus_dmamap_unload(q->entry_tag, d->map);
1985 bus_dmamap_destroy(q->entry_tag, d->map);
1986 d->flags &= ~TX_SW_DESC_MAPPED;
1988 if (m_get_priority(d->m) == cidx) {
1989 m_vec[nbufs] = d->m;
1990 d->m = NULL;
1991 nbufs++;
1992 } else {
1993 printf("pri=%d cidx=%d\n", (int)m_get_priority(d->m), cidx);
1996 ++d;
1997 if (++cidx == q->size) {
1998 cidx = 0;
1999 d = q->sdesc;
2002 q->cidx = cidx;
2004 return (nbufs);
2008 * is_new_response - check if a response is newly written
2009 * @r: the response descriptor
2010 * @q: the response queue
2012 * Returns true if a response descriptor contains a yet unprocessed
2013 * response.
2015 static __inline int
2016 is_new_response(const struct rsp_desc *r,
2017 const struct sge_rspq *q)
2019 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2022 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2023 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2024 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2025 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2026 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2028 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2029 #define NOMEM_INTR_DELAY 2500
2032 * write_ofld_wr - write an offload work request
2033 * @adap: the adapter
2034 * @m: the packet to send
2035 * @q: the Tx queue
2036 * @pidx: index of the first Tx descriptor to write
2037 * @gen: the generation value to use
2038 * @ndesc: number of descriptors the packet will occupy
2040 * Write an offload work request to send the supplied packet. The packet
2041 * data already carry the work request with most fields populated.
2043 static void
2044 write_ofld_wr(adapter_t *adap, struct mbuf *m,
2045 struct sge_txq *q, unsigned int pidx,
2046 unsigned int gen, unsigned int ndesc,
2047 bus_dma_segment_t *segs, unsigned int nsegs)
2049 unsigned int sgl_flits, flits;
2050 struct work_request_hdr *from;
2051 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
2052 struct tx_desc *d = &q->desc[pidx];
2053 struct txq_state txqs;
2055 if (immediate(m)) {
2056 q->sdesc[pidx].m = NULL;
2057 write_imm(d, m, m->m_len, gen);
2058 return;
2061 /* Only TX_DATA builds SGLs */
2063 from = mtod(m, struct work_request_hdr *);
2064 INT3; /// DEBUG this???
2065 #ifdef __FreeBSD__
2066 memcpy(&d->flit[1], &from[1],
2067 (uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *) - sizeof(*from));
2068 #endif
2069 #ifdef __NetBSD__
2070 INT3;
2071 flits = 3; // XXXXXXXXXXXXXX
2072 #endif
2074 #ifdef __FreeBSD__
2075 flits = ((uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *)) / 8;
2076 #endif
2077 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : sgl;
2079 make_sgl(sgp, segs, nsegs);
2080 sgl_flits = sgl_len(nsegs);
2082 txqs.gen = q->gen;
2083 txqs.pidx = q->pidx;
2084 txqs.compl = (q->unacked & 8) << (S_WR_COMPL - 3);
2085 write_wr_hdr_sgl(ndesc, d, &txqs, q, sgl, flits, sgl_flits,
2086 from->wr_hi, from->wr_lo);
2090 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
2091 * @m: the packet
2093 * Returns the number of Tx descriptors needed for the given offload
2094 * packet. These packets are already fully constructed.
2096 static __inline unsigned int
2097 calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs)
2099 unsigned int flits, cnt = 0;
2102 if (m->m_len <= WR_LEN)
2103 return 1; /* packet fits as immediate data */
2105 if (m->m_flags & M_IOVEC)
2106 cnt = mtomv(m)->mv_count;
2108 INT3; // Debug this????
2109 #ifdef __FreeBSD__
2110 flits = ((uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *)) / 8; /* headers */
2111 #endif
2112 #ifdef __NetBSD__
2113 INT3;
2114 flits = 3; // XXXXXXXXX
2115 #endif
2117 return flits_to_desc(flits + sgl_len(cnt));
2121 * ofld_xmit - send a packet through an offload queue
2122 * @adap: the adapter
2123 * @q: the Tx offload queue
2124 * @m: the packet
2126 * Send an offload packet through an SGE offload queue.
2128 static int
2129 ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
2131 int ret, nsegs;
2132 unsigned int ndesc;
2133 unsigned int pidx, gen;
2134 struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
2135 bus_dma_segment_t segs[TX_MAX_SEGS];
2136 int i, cleaned;
2137 struct tx_sw_desc *stx = &q->sdesc[q->pidx];
2139 mtx_lock(&q->lock);
2140 if ((ret = busdma_map_mbufs(&m, q, stx, segs, &nsegs)) != 0) {
2141 mtx_unlock(&q->lock);
2142 return (ret);
2144 ndesc = calc_tx_descs_ofld(m, nsegs);
2145 again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec);
2147 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2148 if (__predict_false(ret)) {
2149 if (ret == 1) {
2150 m_set_priority(m, ndesc); /* save for restart */
2151 mtx_unlock(&q->lock);
2152 return EINTR;
2154 goto again;
2157 gen = q->gen;
2158 q->in_use += ndesc;
2159 pidx = q->pidx;
2160 q->pidx += ndesc;
2161 if (q->pidx >= q->size) {
2162 q->pidx -= q->size;
2163 q->gen ^= 1;
2165 #ifdef T3_TRACE
2166 T3_TRACE5(adap->tb[q->cntxt_id & 7],
2167 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
2168 ndesc, pidx, skb->len, skb->len - skb->data_len,
2169 skb_shinfo(skb)->nr_frags);
2170 #endif
2171 mtx_unlock(&q->lock);
2173 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2174 check_ring_tx_db(adap, q);
2176 for (i = 0; i < cleaned; i++) {
2177 m_freem_vec(m_vec[i]);
2179 return (0);
2183 * restart_offloadq - restart a suspended offload queue
2184 * @qs: the queue set cotaining the offload queue
2186 * Resumes transmission on a suspended Tx offload queue.
2188 static void
2189 #ifdef __FreeBSD__
2190 restart_offloadq(void *data, int npending)
2191 #endif
2192 #ifdef __NetBSD__
2193 restart_offloadq(struct work *wk, void *data)
2194 #endif
2197 struct mbuf *m;
2198 struct sge_qset *qs = data;
2199 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2200 adapter_t *adap = qs->port->adapter;
2201 struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
2202 bus_dma_segment_t segs[TX_MAX_SEGS];
2203 int nsegs, i, cleaned;
2204 struct tx_sw_desc *stx = &q->sdesc[q->pidx];
2206 mtx_lock(&q->lock);
2207 again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec);
2209 while ((m = mbufq_peek(&q->sendq)) != NULL) {
2210 unsigned int gen, pidx;
2211 unsigned int ndesc = m_get_priority(m);
2213 if (__predict_false(q->size - q->in_use < ndesc)) {
2214 setbit(&qs->txq_stopped, TXQ_OFLD);
2215 smp_mb();
2217 if (should_restart_tx(q) &&
2218 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2219 goto again;
2220 q->stops++;
2221 break;
2224 gen = q->gen;
2225 q->in_use += ndesc;
2226 pidx = q->pidx;
2227 q->pidx += ndesc;
2228 if (q->pidx >= q->size) {
2229 q->pidx -= q->size;
2230 q->gen ^= 1;
2233 (void)mbufq_dequeue(&q->sendq);
2234 busdma_map_mbufs(&m, q, stx, segs, &nsegs);
2235 mtx_unlock(&q->lock);
2236 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2237 mtx_lock(&q->lock);
2239 mtx_unlock(&q->lock);
2241 #if USE_GTS
2242 set_bit(TXQ_RUNNING, &q->flags);
2243 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2244 #endif
2245 t3_write_reg(adap, A_SG_KDOORBELL,
2246 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2248 for (i = 0; i < cleaned; i++) {
2249 m_freem_vec(m_vec[i]);
2254 * queue_set - return the queue set a packet should use
2255 * @m: the packet
2257 * Maps a packet to the SGE queue set it should use. The desired queue
2258 * set is carried in bits 1-3 in the packet's priority.
2260 static __inline int
2261 queue_set(const struct mbuf *m)
2263 return m_get_priority(m) >> 1;
2267 * is_ctrl_pkt - return whether an offload packet is a control packet
2268 * @m: the packet
2270 * Determines whether an offload packet should use an OFLD or a CTRL
2271 * Tx queue. This is indicated by bit 0 in the packet's priority.
2273 static __inline int
2274 is_ctrl_pkt(const struct mbuf *m)
2276 return m_get_priority(m) & 1;
2280 * t3_offload_tx - send an offload packet
2281 * @tdev: the offload device to send to
2282 * @m: the packet
2284 * Sends an offload packet. We use the packet priority to select the
2285 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2286 * should be sent as regular or control, bits 1-3 select the queue set.
2289 t3_offload_tx(struct toedev *tdev, struct mbuf *m)
2291 adapter_t *adap = tdev2adap(tdev);
2292 struct sge_qset *qs = &adap->sge.qs[queue_set(m)];
2294 if (__predict_false(is_ctrl_pkt(m)))
2295 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m);
2297 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m);
2300 #ifdef __FreeBSD__
2302 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
2303 * @tdev: the offload device that will be receiving the packets
2304 * @q: the SGE response queue that assembled the bundle
2305 * @m: the partial bundle
2306 * @n: the number of packets in the bundle
2308 * Delivers a (partial) bundle of Rx offload packets to an offload device.
2310 static __inline void
2311 deliver_partial_bundle(struct toedev *tdev,
2312 struct sge_rspq *q,
2313 struct mbuf *mbufs[], int n)
2315 if (n) {
2316 q->offload_bundles++;
2317 cxgb_ofld_recv(tdev, mbufs, n);
2321 static __inline int
2322 rx_offload(struct toedev *tdev, struct sge_rspq *rq,
2323 struct mbuf *m, struct mbuf *rx_gather[],
2324 unsigned int gather_idx)
2326 rq->offload_pkts++;
2327 m->m_pkthdr.header = mtod(m, void *);
2329 rx_gather[gather_idx++] = m;
2330 if (gather_idx == RX_BUNDLE_SIZE) {
2331 cxgb_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE);
2332 gather_idx = 0;
2333 rq->offload_bundles++;
2335 return (gather_idx);
2337 #endif
2339 static void
2340 restart_tx(struct sge_qset *qs)
2342 #ifdef __FreeBSD__
2343 struct adapter *sc = qs->port->adapter;
2344 #endif
2346 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2347 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2348 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2349 qs->txq[TXQ_OFLD].restarts++;
2350 #ifdef __FreeBSD__
2351 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2352 #endif
2353 #ifdef __NetBSD__
2354 workqueue_enqueue(qs->txq[TXQ_OFLD].qresume_task.wq, &qs->txq[TXQ_OFLD].qresume_task.w, NULL);
2355 #endif
2357 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2358 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2359 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2360 qs->txq[TXQ_CTRL].restarts++;
2361 #ifdef __FreeBSD__
2362 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2363 #endif
2364 #ifdef __NetBSD__
2365 workqueue_enqueue(qs->txq[TXQ_CTRL].qresume_task.wq, &qs->txq[TXQ_CTRL].qresume_task.w, NULL);
2366 #endif
2371 * t3_sge_alloc_qset - initialize an SGE queue set
2372 * @sc: the controller softc
2373 * @id: the queue set id
2374 * @nports: how many Ethernet ports will be using this queue set
2375 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2376 * @p: configuration parameters for this queue set
2377 * @ntxq: number of Tx queues for the queue set
2378 * @pi: port info for queue set
2380 * Allocate resources and initialize an SGE queue set. A queue set
2381 * comprises a response queue, two Rx free-buffer queues, and up to 3
2382 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2383 * queue, offload queue, and control queue.
2386 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2387 const struct qset_params *p, int ntxq, struct port_info *pi)
2389 struct sge_qset *q = &sc->sge.qs[id];
2390 int i, ret = 0;
2392 init_qset_cntxt(q, id);
2394 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2395 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2396 &q->fl[0].desc, &q->fl[0].sdesc,
2397 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2398 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2399 goto err;
2402 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2403 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2404 &q->fl[1].desc, &q->fl[1].sdesc,
2405 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2406 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2407 goto err;
2410 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2411 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2412 &q->rspq.desc_tag, &q->rspq.desc_map,
2413 NULL, NULL)) != 0) {
2414 goto err;
2417 for (i = 0; i < ntxq; ++i) {
2419 * The control queue always uses immediate data so does not
2420 * need to keep track of any mbufs.
2421 * XXX Placeholder for future TOE support.
2423 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2425 if ((ret = alloc_ring(sc, p->txq_size[i],
2426 sizeof(struct tx_desc), sz,
2427 &q->txq[i].phys_addr, &q->txq[i].desc,
2428 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2429 &q->txq[i].desc_map,
2430 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2431 goto err;
2433 mbufq_init(&q->txq[i].sendq);
2434 q->txq[i].gen = 1;
2435 q->txq[i].size = p->txq_size[i];
2436 #ifdef __FreeBSD__
2437 snprintf(q->txq[i].lockbuf, TXQ_NAME_LEN, "t3 txq lock %d:%d:%d",
2438 device_get_unit(sc->dev), irq_vec_idx, i);
2439 #endif
2440 #ifdef __NetBSD__
2441 snprintf(q->txq[i].lockbuf, TXQ_NAME_LEN, "t3 txq lock %d:%d:%d",
2442 0, irq_vec_idx, i);
2443 #endif
2444 MTX_INIT(&q->txq[i].lock, q->txq[i].lockbuf, NULL, MTX_DEF);
2447 q->txq[TXQ_ETH].port = pi;
2449 #ifdef __FreeBSD__
2450 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2451 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2452 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_ETH]);
2453 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_OFLD]);
2454 #endif
2455 #ifdef __NetBSD__
2456 q->txq[TXQ_OFLD].qresume_task.name = "restart_offloadq";
2457 q->txq[TXQ_OFLD].qresume_task.func = restart_offloadq;
2458 q->txq[TXQ_OFLD].qresume_task.context = q;
2459 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &q->txq[TXQ_OFLD].qresume_task, NULL, "cxgb_make_task");
2461 q->txq[TXQ_CTRL].qresume_task.name = "restart_ctrlq";
2462 q->txq[TXQ_CTRL].qresume_task.func = restart_ctrlq;
2463 q->txq[TXQ_CTRL].qresume_task.context = q;
2464 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &q->txq[TXQ_CTRL].qresume_task, NULL, "cxgb_make_task");
2466 q->txq[TXQ_ETH].qreclaim_task.name = "sge_txq_reclaim_handler";
2467 q->txq[TXQ_ETH].qreclaim_task.func = sge_txq_reclaim_handler;
2468 q->txq[TXQ_ETH].qreclaim_task.context = &q->txq[TXQ_ETH];
2469 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &q->txq[TXQ_ETH].qreclaim_task, NULL, "cxgb_make_task");
2471 q->txq[TXQ_OFLD].qreclaim_task.name = "sge_txq_reclaim_handler";
2472 q->txq[TXQ_OFLD].qreclaim_task.func = sge_txq_reclaim_handler;
2473 q->txq[TXQ_OFLD].qreclaim_task.context = &q->txq[TXQ_OFLD];
2474 kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &q->txq[TXQ_OFLD].qreclaim_task, NULL, "cxgb_make_task");
2475 #endif
2477 q->fl[0].gen = q->fl[1].gen = 1;
2478 q->fl[0].size = p->fl_size;
2479 q->fl[1].size = p->jumbo_size;
2481 q->rspq.gen = 1;
2482 q->rspq.cidx = 0;
2483 q->rspq.size = p->rspq_size;
2485 q->txq[TXQ_ETH].stop_thres = nports *
2486 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2488 q->fl[0].buf_size = MCLBYTES;
2489 #ifdef __FreeBSD__
2490 q->fl[0].zone = zone_clust;
2491 q->fl[0].type = EXT_CLUSTER;
2492 #endif
2493 q->fl[1].buf_size = MJUMPAGESIZE;
2494 #ifdef __FreeBSD__
2495 q->fl[1].zone = zone_jumbop;
2496 q->fl[1].type = EXT_JUMBOP;
2497 #endif
2499 q->lro.enabled = lro_default;
2501 mtx_lock(&sc->sge.reg_lock);
2502 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2503 q->rspq.phys_addr, q->rspq.size,
2504 q->fl[0].buf_size, 1, 0);
2505 if (ret) {
2506 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2507 goto err_unlock;
2510 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2511 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2512 q->fl[i].phys_addr, q->fl[i].size,
2513 q->fl[i].buf_size, p->cong_thres, 1,
2515 if (ret) {
2516 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2517 goto err_unlock;
2521 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2522 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2523 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2524 1, 0);
2525 if (ret) {
2526 printf("error %d from t3_sge_init_ecntxt\n", ret);
2527 goto err_unlock;
2530 if (ntxq > 1) {
2531 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2532 USE_GTS, SGE_CNTXT_OFLD, id,
2533 q->txq[TXQ_OFLD].phys_addr,
2534 q->txq[TXQ_OFLD].size, 0, 1, 0);
2535 if (ret) {
2536 printf("error %d from t3_sge_init_ecntxt\n", ret);
2537 goto err_unlock;
2541 if (ntxq > 2) {
2542 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2543 SGE_CNTXT_CTRL, id,
2544 q->txq[TXQ_CTRL].phys_addr,
2545 q->txq[TXQ_CTRL].size,
2546 q->txq[TXQ_CTRL].token, 1, 0);
2547 if (ret) {
2548 printf("error %d from t3_sge_init_ecntxt\n", ret);
2549 goto err_unlock;
2553 #ifdef __FreeBSD__
2554 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2555 device_get_unit(sc->dev), irq_vec_idx);
2556 #endif
2557 #ifdef __NetBSD__
2558 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2559 0, irq_vec_idx);
2560 #endif
2561 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2563 mtx_unlock(&sc->sge.reg_lock);
2564 t3_update_qset_coalesce(q, p);
2565 q->port = pi;
2567 refill_fl(sc, &q->fl[0], q->fl[0].size);
2568 refill_fl(sc, &q->fl[1], q->fl[1].size);
2569 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2571 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2572 V_NEWTIMER(q->rspq.holdoff_tmr));
2574 return (0);
2576 err_unlock:
2577 mtx_unlock(&sc->sge.reg_lock);
2578 err:
2579 t3_free_qset(sc, q);
2581 return (ret);
2584 void
2585 t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
2587 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2588 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2589 struct ifnet *ifp = pi->ifp;
2591 DPRINTF("rx_eth m=%p m->m_data=%p p->iff=%d\n", m, mtod(m, uint8_t *), cpl->iff);
2593 #ifdef __FreeBSD__
2594 if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
2595 cpl->csum_valid && cpl->csum == 0xffff) {
2596 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
2597 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2598 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
2599 m->m_pkthdr.csum_data = 0xffff;
2601 #endif
2603 * XXX need to add VLAN support for 6.x
2605 #ifdef VLAN_SUPPORTED
2606 if (__predict_false(cpl->vlan_valid)) {
2607 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2608 m->m_flags |= M_VLANTAG;
2610 #endif
2612 m->m_pkthdr.rcvif = ifp;
2613 #ifdef __FreeBSD__
2614 m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad;
2615 #endif
2616 m_explode(m);
2618 * adjust after conversion to mbuf chain
2620 m_adj(m, sizeof(*cpl) + ethpad);
2622 (*ifp->if_input)(ifp, m);
2626 * get_packet - return the next ingress packet buffer from a free list
2627 * @adap: the adapter that received the packet
2628 * @drop_thres: # of remaining buffers before we start dropping packets
2629 * @qs: the qset that the SGE free list holding the packet belongs to
2630 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2631 * @r: response descriptor
2633 * Get the next packet from a free list and complete setup of the
2634 * sk_buff. If the packet is small we make a copy and recycle the
2635 * original buffer, otherwise we use the original buffer itself. If a
2636 * positive drop threshold is supplied packets are dropped and their
2637 * buffers recycled if (a) the number of remaining buffers is under the
2638 * threshold and the packet is too big to copy, or (b) the packet should
2639 * be copied but there is no memory for the copy.
2641 #ifdef DISABLE_MBUF_IOVEC
2643 static int
2644 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2645 struct t3_mbuf_hdr *mh, struct rsp_desc *r, struct mbuf *m)
2648 unsigned int len_cq = ntohl(r->len_cq);
2649 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2650 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2651 uint32_t len = G_RSPD_LEN(len_cq);
2652 uint32_t flags = ntohl(r->flags);
2653 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
2654 int ret = 0;
2656 prefetch(sd->cl);
2658 fl->credits--;
2659 #ifdef __FreeBSD__
2660 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2661 #endif
2662 #ifdef __NetBSD__
2663 bus_dmamap_sync(fl->entry_tag, sd->map, 0, len, BUS_DMASYNC_POSTREAD);
2664 #endif
2665 bus_dmamap_unload(fl->entry_tag, sd->map);
2667 m->m_len = len;
2668 m_cljset(m, sd->cl, fl->type);
2670 switch(sopeop) {
2671 case RSPQ_SOP_EOP:
2672 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
2673 mh->mh_head = mh->mh_tail = m;
2674 m->m_pkthdr.len = len;
2675 m->m_flags |= M_PKTHDR;
2676 ret = 1;
2677 break;
2678 case RSPQ_NSOP_NEOP:
2679 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
2680 m->m_flags &= ~M_PKTHDR;
2681 if (mh->mh_tail == NULL) {
2682 if (cxgb_debug)
2683 printf("discarding intermediate descriptor entry\n");
2684 m_freem(m);
2685 break;
2687 mh->mh_tail->m_next = m;
2688 mh->mh_tail = m;
2689 mh->mh_head->m_pkthdr.len += len;
2690 ret = 0;
2691 break;
2692 case RSPQ_SOP:
2693 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
2694 m->m_pkthdr.len = len;
2695 mh->mh_head = mh->mh_tail = m;
2696 m->m_flags |= M_PKTHDR;
2697 ret = 0;
2698 break;
2699 case RSPQ_EOP:
2700 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
2701 m->m_flags &= ~M_PKTHDR;
2702 mh->mh_head->m_pkthdr.len += len;
2703 mh->mh_tail->m_next = m;
2704 mh->mh_tail = m;
2705 ret = 1;
2706 break;
2708 if (++fl->cidx == fl->size)
2709 fl->cidx = 0;
2711 return (ret);
2714 #else
2715 static int
2716 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2717 struct mbuf *m, struct rsp_desc *r)
2720 unsigned int len_cq = ntohl(r->len_cq);
2721 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2722 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2723 uint32_t len = G_RSPD_LEN(len_cq);
2724 uint32_t flags = ntohl(r->flags);
2725 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
2726 void *cl;
2727 int ret = 0;
2729 prefetch(sd->cl);
2731 fl->credits--;
2732 #ifdef __FreeBSD__
2733 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2734 #endif
2735 #ifdef __NetBSD__
2736 bus_dmamap_sync(fl->entry_tag, sd->map, 0, len, BUS_DMASYNC_POSTREAD);
2737 #endif
2739 if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) {
2740 cl = mtod(m, void *);
2741 memcpy(cl, sd->cl, len);
2742 recycle_rx_buf(adap, fl, fl->cidx);
2743 } else {
2744 cl = sd->cl;
2745 bus_dmamap_unload(fl->entry_tag, sd->map);
2747 switch(sopeop) {
2748 case RSPQ_SOP_EOP:
2749 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
2750 m->m_len = m->m_pkthdr.len = len;
2751 if (cl == sd->cl)
2752 m_cljset(m, cl, fl->type);
2753 ret = 1;
2754 goto done;
2755 break;
2756 case RSPQ_NSOP_NEOP:
2757 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
2758 ret = 0;
2759 break;
2760 case RSPQ_SOP:
2761 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
2762 m_iovinit(m);
2763 ret = 0;
2764 break;
2765 case RSPQ_EOP:
2766 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
2767 ret = 1;
2768 break;
2770 m_iovappend(m, cl, fl->buf_size, len, 0);
2772 done:
2773 if (++fl->cidx == fl->size)
2774 fl->cidx = 0;
2776 return (ret);
2778 #endif
2780 * handle_rsp_cntrl_info - handles control information in a response
2781 * @qs: the queue set corresponding to the response
2782 * @flags: the response control flags
2784 * Handles the control information of an SGE response, such as GTS
2785 * indications and completion credits for the queue set's Tx queues.
2786 * HW coalesces credits, we don't do any extra SW coalescing.
2788 static __inline void
2789 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2791 unsigned int credits;
2793 #if USE_GTS
2794 if (flags & F_RSPD_TXQ0_GTS)
2795 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2796 #endif
2797 credits = G_RSPD_TXQ0_CR(flags);
2798 if (credits) {
2799 qs->txq[TXQ_ETH].processed += credits;
2800 if (desc_reclaimable(&qs->txq[TXQ_ETH]) > TX_START_MAX_DESC)
2801 #ifdef __FreeBSD__
2802 taskqueue_enqueue(qs->port->adapter->tq,
2803 &qs->port->timer_reclaim_task);
2804 #endif
2805 #ifdef __NetBSD__
2806 workqueue_enqueue(qs->port->timer_reclaim_task.wq,
2807 &qs->port->timer_reclaim_task.w, NULL);
2808 #endif
2811 credits = G_RSPD_TXQ2_CR(flags);
2812 if (credits)
2813 qs->txq[TXQ_CTRL].processed += credits;
2815 # if USE_GTS
2816 if (flags & F_RSPD_TXQ1_GTS)
2817 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2818 # endif
2819 credits = G_RSPD_TXQ1_CR(flags);
2820 if (credits)
2821 qs->txq[TXQ_OFLD].processed += credits;
2824 static void
2825 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2826 unsigned int sleeping)
2832 * process_responses - process responses from an SGE response queue
2833 * @adap: the adapter
2834 * @qs: the queue set to which the response queue belongs
2835 * @budget: how many responses can be processed in this round
2837 * Process responses from an SGE response queue up to the supplied budget.
2838 * Responses include received packets as well as credits and other events
2839 * for the queues that belong to the response queue's queue set.
2840 * A negative budget is effectively unlimited.
2842 * Additionally choose the interrupt holdoff time for the next interrupt
2843 * on this queue. If the system is under memory shortage use a fairly
2844 * long delay to help recovery.
2846 static int
2847 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2849 struct sge_rspq *rspq = &qs->rspq;
2850 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2851 int budget_left = budget;
2852 unsigned int sleeping = 0;
2853 int lro = qs->lro.enabled;
2854 #ifdef __FreeBSD__
2855 struct mbuf *offload_mbufs[RX_BUNDLE_SIZE];
2856 int ngathered = 0;
2857 #endif
2858 #ifdef DEBUG
2859 static int last_holdoff = 0;
2860 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2861 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2862 last_holdoff = rspq->holdoff_tmr;
2864 #endif
2865 rspq->next_holdoff = rspq->holdoff_tmr;
2867 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2868 int eth, eop = 0, ethpad = 0;
2869 uint32_t flags = ntohl(r->flags);
2870 uint32_t rss_csum = *(const uint32_t *)r;
2871 uint32_t rss_hash = r->rss_hdr.rss_hash_val;
2873 eth = (r->rss_hdr.opcode == CPL_RX_PKT);
2875 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2876 /* XXX */
2877 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2878 #ifdef DISABLE_MBUF_IOVEC
2879 if (cxgb_debug)
2880 printf("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n", r->rss_hdr.opcode, rspq->cidx);
2882 if(get_imm_packet(adap, r, &rspq->rspq_mh) == 0) {
2883 rspq->next_holdoff = NOMEM_INTR_DELAY;
2884 budget_left--;
2885 break;
2886 } else {
2887 eop = 1;
2889 #else
2890 struct mbuf *m = NULL;
2892 if (rspq->rspq_mbuf == NULL)
2893 rspq->rspq_mbuf = m_gethdr(M_DONTWAIT, MT_DATA);
2894 else
2895 m = m_gethdr(M_DONTWAIT, MT_DATA);
2898 * XXX revisit me
2900 if (rspq->rspq_mbuf == NULL && m == NULL) {
2901 rspq->next_holdoff = NOMEM_INTR_DELAY;
2902 budget_left--;
2903 break;
2905 if (get_imm_packet(adap, r, rspq->rspq_mbuf, m, flags))
2906 goto skip;
2907 eop = 1;
2908 #endif
2909 rspq->imm_data++;
2910 } else if (r->len_cq) {
2911 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2913 #ifdef DISABLE_MBUF_IOVEC
2914 struct mbuf *m;
2915 m = m_gethdr(M_NOWAIT, MT_DATA);
2917 if (m == NULL) {
2918 log(LOG_WARNING, "failed to get mbuf for packet\n");
2919 break;
2922 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r, m);
2923 #else
2924 if (rspq->rspq_mbuf == NULL)
2925 rspq->rspq_mbuf = m_gethdr(M_DONTWAIT, MT_DATA);
2926 if (rspq->rspq_mbuf == NULL) {
2927 log(LOG_WARNING, "failed to get mbuf for packet\n");
2928 break;
2930 eop = get_packet(adap, drop_thresh, qs, rspq->rspq_mbuf, r);
2931 #endif
2932 ethpad = 2;
2933 } else {
2934 DPRINTF("pure response\n");
2935 rspq->pure_rsps++;
2938 if (flags & RSPD_CTRL_MASK) {
2939 sleeping |= flags & RSPD_GTS_MASK;
2940 handle_rsp_cntrl_info(qs, flags);
2942 #ifndef DISABLE_MBUF_IOVEC
2943 skip:
2944 #endif
2945 r++;
2946 if (__predict_false(++rspq->cidx == rspq->size)) {
2947 rspq->cidx = 0;
2948 rspq->gen ^= 1;
2949 r = rspq->desc;
2952 prefetch(r);
2953 if (++rspq->credits >= (rspq->size / 4)) {
2954 refill_rspq(adap, rspq, rspq->credits);
2955 rspq->credits = 0;
2958 if (eop) {
2959 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *));
2960 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES);
2962 if (eth) {
2963 t3_rx_eth_lro(adap, rspq, rspq->rspq_mh.mh_head, ethpad,
2964 rss_hash, rss_csum, lro);
2966 rspq->rspq_mh.mh_head = NULL;
2967 } else {
2968 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
2970 * XXX size mismatch
2972 m_set_priority(rspq->rspq_mh.mh_head, rss_hash);
2974 #ifdef __FreeBSD__
2975 ngathered = rx_offload(&adap->tdev, rspq,
2976 rspq->rspq_mh.mh_head, offload_mbufs, ngathered);
2977 #endif
2979 __refill_fl(adap, &qs->fl[0]);
2980 __refill_fl(adap, &qs->fl[1]);
2983 --budget_left;
2986 #ifdef __FreeBSD__
2987 deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered);
2988 #endif
2989 t3_lro_flush(adap, qs, &qs->lro);
2991 if (sleeping)
2992 check_ring_db(adap, qs, sleeping);
2994 smp_mb(); /* commit Tx queue processed updates */
2995 if (__predict_false(qs->txq_stopped != 0))
2996 restart_tx(qs);
2998 budget -= budget_left;
2999 return (budget);
3003 * A helper function that processes responses and issues GTS.
3005 static __inline int
3006 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3008 int work;
3009 static int last_holdoff = 0;
3011 work = process_responses(adap, rspq_to_qset(rq), -1);
3013 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3014 printf("next_holdoff=%d\n", rq->next_holdoff);
3015 last_holdoff = rq->next_holdoff;
3017 if (work)
3018 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3019 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3020 return work;
3025 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3026 * Handles data events from SGE response queues as well as error and other
3027 * async events as they all use the same interrupt pin. We use one SGE
3028 * response queue per port in this mode and protect all response queues with
3029 * queue 0's lock.
3031 #ifdef __FreeBSD__
3032 void
3033 #endif
3034 #ifdef __NetBSD__
3036 #endif
3037 t3b_intr(void *data)
3039 uint32_t i, map;
3040 adapter_t *adap = data;
3041 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3043 t3_write_reg(adap, A_PL_CLI, 0);
3044 map = t3_read_reg(adap, A_SG_DATA_INTR);
3046 if (!map)
3047 #ifdef __FreeBSD__
3048 return;
3049 #endif
3050 #ifdef __NetBSD__
3051 return (FALSE);
3052 #endif
3054 if (__predict_false(map & F_ERRINTR))
3055 #ifdef __FreeBSD__
3056 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3057 #endif
3058 #ifdef __NetBSD__
3059 workqueue_enqueue(adap->slow_intr_task.wq, &adap->slow_intr_task.w, NULL);
3060 #endif
3062 mtx_lock(&q0->lock);
3063 for_each_port(adap, i)
3064 if (map & (1 << i))
3065 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3066 mtx_unlock(&q0->lock);
3068 #ifdef __NetBSD__
3069 return (TRUE);
3070 #endif
3074 * The MSI interrupt handler. This needs to handle data events from SGE
3075 * response queues as well as error and other async events as they all use
3076 * the same MSI vector. We use one SGE response queue per port in this mode
3077 * and protect all response queues with queue 0's lock.
3079 #ifdef __FreeBSD__
3080 void
3081 #endif
3082 #ifdef __NetBSD__
3084 #endif
3085 t3_intr_msi(void *data)
3087 adapter_t *adap = data;
3088 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3089 int i, new_packets = 0;
3091 mtx_lock(&q0->lock);
3093 for_each_port(adap, i)
3094 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3095 new_packets = 1;
3096 mtx_unlock(&q0->lock);
3097 if (new_packets == 0)
3098 #ifdef __FreeBSD__
3099 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3100 #endif
3101 #ifdef __NetBSD__
3102 workqueue_enqueue(adap->slow_intr_task.wq, &adap->slow_intr_task.w, NULL);
3104 return (TRUE);
3105 #endif
3108 #ifdef __FreeBSD__
3109 void
3110 #endif
3111 #ifdef __NetBSD__
3113 #endif
3114 t3_intr_msix(void *data)
3116 struct sge_qset *qs = data;
3117 adapter_t *adap = qs->port->adapter;
3118 struct sge_rspq *rspq = &qs->rspq;
3120 mtx_lock(&rspq->lock);
3121 if (process_responses_gts(adap, rspq) == 0)
3122 rspq->unhandled_irqs++;
3123 mtx_unlock(&rspq->lock);
3125 #ifdef __NetBSD__
3126 return (TRUE);
3127 #endif
3130 #ifdef __FreeBSD__
3132 * broken by recent mbuf changes
3134 static int
3135 t3_lro_enable(SYSCTL_HANDLER_ARGS)
3137 adapter_t *sc;
3138 int i, j, enabled, err, nqsets = 0;
3140 #ifndef LRO_WORKING
3141 return (0);
3142 #endif
3144 sc = arg1;
3145 enabled = sc->sge.qs[0].lro.enabled;
3146 err = sysctl_handle_int(oidp, &enabled, arg2, req);
3148 if (err != 0)
3149 return (err);
3150 if (enabled == sc->sge.qs[0].lro.enabled)
3151 return (0);
3153 for (i = 0; i < sc->params.nports; i++)
3154 for (j = 0; j < sc->port[i].nqsets; j++)
3155 nqsets++;
3157 for (i = 0; i < nqsets; i++)
3158 sc->sge.qs[i].lro.enabled = enabled;
3160 return (0);
3163 static int
3164 t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS)
3166 adapter_t *sc = arg1;
3167 struct qset_params *qsp = &sc->params.sge.qset[0];
3168 int coalesce_nsecs;
3169 struct sge_qset *qs;
3170 int i, j, err, nqsets = 0;
3171 struct mtx *lock;
3173 coalesce_nsecs = qsp->coalesce_nsecs;
3174 err = sysctl_handle_int(oidp, &coalesce_nsecs, arg2, req);
3176 if (err != 0) {
3177 return (err);
3179 if (coalesce_nsecs == qsp->coalesce_nsecs)
3180 return (0);
3182 for (i = 0; i < sc->params.nports; i++)
3183 for (j = 0; j < sc->port[i].nqsets; j++)
3184 nqsets++;
3186 coalesce_nsecs = max(100, coalesce_nsecs);
3188 for (i = 0; i < nqsets; i++) {
3189 qs = &sc->sge.qs[i];
3190 qsp = &sc->params.sge.qset[i];
3191 qsp->coalesce_nsecs = coalesce_nsecs;
3193 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3194 &sc->sge.qs[0].rspq.lock;
3196 mtx_lock(lock);
3197 t3_update_qset_coalesce(qs, qsp);
3198 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3199 V_NEWTIMER(qs->rspq.holdoff_tmr));
3200 mtx_unlock(lock);
3203 return (0);
3207 void
3208 t3_add_sysctls(adapter_t *sc)
3210 struct sysctl_ctx_list *ctx;
3211 struct sysctl_oid_list *children;
3213 ctx = device_get_sysctl_ctx(sc->dev);
3214 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3216 /* random information */
3217 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3218 "firmware_version",
3219 CTLFLAG_RD, &sc->fw_version,
3220 0, "firmware version");
3222 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3223 "enable_lro",
3224 CTLTYPE_INT|CTLFLAG_RW, sc,
3225 0, t3_lro_enable,
3226 "I", "enable large receive offload");
3228 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3229 "intr_coal",
3230 CTLTYPE_INT|CTLFLAG_RW, sc,
3231 0, t3_set_coalesce_nsecs,
3232 "I", "interrupt coalescing timer (ns)");
3233 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3234 "enable_debug",
3235 CTLFLAG_RW, &cxgb_debug,
3236 0, "enable verbose debugging output");
3238 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3239 "collapse_free",
3240 CTLFLAG_RD, &collapse_free,
3241 0, "frees during collapse");
3242 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3243 "mb_free_vec_free",
3244 CTLFLAG_RD, &mb_free_vec_free,
3245 0, "frees during mb_free_vec");
3246 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3247 "collapse_mbufs",
3248 CTLFLAG_RW, &collapse_mbufs,
3249 0, "collapse mbuf chains into iovecs");
3250 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3251 "txq_overrun",
3252 CTLFLAG_RD, &txq_fills,
3253 0, "#times txq overrun");
3254 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3255 "bogus_imm",
3256 CTLFLAG_RD, &bogus_imm,
3257 0, "#times a bogus immediate response was seen");
3259 #endif
3262 * t3_get_desc - dump an SGE descriptor for debugging purposes
3263 * @qs: the queue set
3264 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3265 * @idx: the descriptor index in the queue
3266 * @data: where to dump the descriptor contents
3268 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3269 * size of the descriptor.
3272 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3273 unsigned char *data)
3275 if (qnum >= 6)
3276 return (EINVAL);
3278 if (qnum < 3) {
3279 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3280 return -EINVAL;
3281 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3282 return sizeof(struct tx_desc);
3285 if (qnum == 3) {
3286 if (!qs->rspq.desc || idx >= qs->rspq.size)
3287 return (EINVAL);
3288 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3289 return sizeof(struct rsp_desc);
3292 qnum -= 4;
3293 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3294 return (EINVAL);
3295 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3296 return sizeof(struct rx_desc);