PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / chelsio / cxgb / sge.c
blob4c58793890030db331ef473d5eeaf90f9f832f16
1 /*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, see <http://www.gnu.org/licenses/>. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/ktime.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_vlan.h>
48 #include <linux/skbuff.h>
49 #include <linux/mm.h>
50 #include <linux/tcp.h>
51 #include <linux/ip.h>
52 #include <linux/in.h>
53 #include <linux/if_arp.h>
54 #include <linux/slab.h>
55 #include <linux/prefetch.h>
57 #include "cpl5_cmd.h"
58 #include "sge.h"
59 #include "regs.h"
60 #include "espi.h"
62 /* This belongs in if_ether.h */
63 #define ETH_P_CPL5 0xf
65 #define SGE_CMDQ_N 2
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_SM_BUF_SIZE 1536
75 #define SGE_TX_DESC_MAX_PLEN 16384
77 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
80 * Period of the TX buffer reclaim timer. This timer does not need to run
81 * frequently as TX buffers are usually reclaimed by new TX packets.
83 #define TX_RECLAIM_PERIOD (HZ / 4)
85 #define M_CMD_LEN 0x7fffffff
86 #define V_CMD_LEN(v) (v)
87 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
88 #define V_CMD_GEN1(v) ((v) << 31)
89 #define V_CMD_GEN2(v) (v)
90 #define F_CMD_DATAVALID (1 << 1)
91 #define F_CMD_SOP (1 << 2)
92 #define V_CMD_EOP(v) ((v) << 3)
95 * Command queue, receive buffer list, and response queue descriptors.
97 #if defined(__BIG_ENDIAN_BITFIELD)
98 struct cmdQ_e {
99 u32 addr_lo;
100 u32 len_gen;
101 u32 flags;
102 u32 addr_hi;
105 struct freelQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 gen2;
109 u32 addr_hi;
112 struct respQ_e {
113 u32 Qsleeping : 4;
114 u32 Cmdq1CreditReturn : 5;
115 u32 Cmdq1DmaComplete : 5;
116 u32 Cmdq0CreditReturn : 5;
117 u32 Cmdq0DmaComplete : 5;
118 u32 FreelistQid : 2;
119 u32 CreditValid : 1;
120 u32 DataValid : 1;
121 u32 Offload : 1;
122 u32 Eop : 1;
123 u32 Sop : 1;
124 u32 GenerationBit : 1;
125 u32 BufferLength;
127 #elif defined(__LITTLE_ENDIAN_BITFIELD)
128 struct cmdQ_e {
129 u32 len_gen;
130 u32 addr_lo;
131 u32 addr_hi;
132 u32 flags;
135 struct freelQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 gen2;
142 struct respQ_e {
143 u32 BufferLength;
144 u32 GenerationBit : 1;
145 u32 Sop : 1;
146 u32 Eop : 1;
147 u32 Offload : 1;
148 u32 DataValid : 1;
149 u32 CreditValid : 1;
150 u32 FreelistQid : 2;
151 u32 Cmdq0DmaComplete : 5;
152 u32 Cmdq0CreditReturn : 5;
153 u32 Cmdq1DmaComplete : 5;
154 u32 Cmdq1CreditReturn : 5;
155 u32 Qsleeping : 4;
157 #endif
160 * SW Context Command and Freelist Queue Descriptors
162 struct cmdQ_ce {
163 struct sk_buff *skb;
164 DEFINE_DMA_UNMAP_ADDR(dma_addr);
165 DEFINE_DMA_UNMAP_LEN(dma_len);
168 struct freelQ_ce {
169 struct sk_buff *skb;
170 DEFINE_DMA_UNMAP_ADDR(dma_addr);
171 DEFINE_DMA_UNMAP_LEN(dma_len);
175 * SW command, freelist and response rings
177 struct cmdQ {
178 unsigned long status; /* HW DMA fetch status */
179 unsigned int in_use; /* # of in-use command descriptors */
180 unsigned int size; /* # of descriptors */
181 unsigned int processed; /* total # of descs HW has processed */
182 unsigned int cleaned; /* total # of descs SW has reclaimed */
183 unsigned int stop_thres; /* SW TX queue suspend threshold */
184 u16 pidx; /* producer index (SW) */
185 u16 cidx; /* consumer index (HW) */
186 u8 genbit; /* current generation (=valid) bit */
187 u8 sop; /* is next entry start of packet? */
188 struct cmdQ_e *entries; /* HW command descriptor Q */
189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
194 struct freelQ {
195 unsigned int credits; /* # of available RX buffers */
196 unsigned int size; /* free list capacity */
197 u16 pidx; /* producer index (SW) */
198 u16 cidx; /* consumer index (HW) */
199 u16 rx_buffer_size; /* Buffer size on this free list */
200 u16 dma_offset; /* DMA offset to align IP headers */
201 u16 recycleq_idx; /* skb recycle q to use */
202 u8 genbit; /* current generation (=valid) bit */
203 struct freelQ_e *entries; /* HW freelist descriptor Q */
204 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
205 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
208 struct respQ {
209 unsigned int credits; /* credits to be returned to SGE */
210 unsigned int size; /* # of response Q descriptors */
211 u16 cidx; /* consumer index (SW) */
212 u8 genbit; /* current generation(=valid) bit */
213 struct respQ_e *entries; /* HW response descriptor Q */
214 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
217 /* Bit flags for cmdQ.status */
218 enum {
219 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
220 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
223 /* T204 TX SW scheduler */
225 /* Per T204 TX port */
226 struct sched_port {
227 unsigned int avail; /* available bits - quota */
228 unsigned int drain_bits_per_1024ns; /* drain rate */
229 unsigned int speed; /* drain rate, mbps */
230 unsigned int mtu; /* mtu size */
231 struct sk_buff_head skbq; /* pending skbs */
234 /* Per T204 device */
235 struct sched {
236 ktime_t last_updated; /* last time quotas were computed */
237 unsigned int max_avail; /* max bits to be sent to any port */
238 unsigned int port; /* port index (round robin ports) */
239 unsigned int num; /* num skbs in per port queues */
240 struct sched_port p[MAX_NPORTS];
241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
243 static void restart_sched(unsigned long);
247 * Main SGE data structure
249 * Interrupts are handled by a single CPU and it is likely that on a MP system
250 * the application is migrated to another CPU. In that scenario, we try to
251 * separate the RX(in irq context) and TX state in order to decrease memory
252 * contention.
254 struct sge {
255 struct adapter *adapter; /* adapter backpointer */
256 struct net_device *netdev; /* netdevice backpointer */
257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
258 struct respQ respQ; /* response Q */
259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
261 unsigned int jumbo_fl; /* jumbo freelist Q index */
262 unsigned int intrtimer_nres; /* no-resource interrupt timer */
263 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
264 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
265 struct timer_list espibug_timer;
266 unsigned long espibug_timeout;
267 struct sk_buff *espibug_skb[MAX_NPORTS];
268 u32 sge_control; /* shadow value of sge control reg */
269 struct sge_intr_counts stats;
270 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
271 struct sched *tx_sched;
272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
275 static const u8 ch_mac_addr[ETH_ALEN] = {
276 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
280 * stop tasklet and free all pending skb's
282 static void tx_sched_stop(struct sge *sge)
284 struct sched *s = sge->tx_sched;
285 int i;
287 tasklet_kill(&s->sched_tsk);
289 for (i = 0; i < MAX_NPORTS; i++)
290 __skb_queue_purge(&s->p[s->port].skbq);
294 * t1_sched_update_parms() is called when the MTU or link speed changes. It
295 * re-computes scheduler parameters to scope with the change.
297 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
298 unsigned int mtu, unsigned int speed)
300 struct sched *s = sge->tx_sched;
301 struct sched_port *p = &s->p[port];
302 unsigned int max_avail_segs;
304 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
305 if (speed)
306 p->speed = speed;
307 if (mtu)
308 p->mtu = mtu;
310 if (speed || mtu) {
311 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
312 do_div(drain, (p->mtu + 50) * 1000);
313 p->drain_bits_per_1024ns = (unsigned int) drain;
315 if (p->speed < 1000)
316 p->drain_bits_per_1024ns =
317 90 * p->drain_bits_per_1024ns / 100;
320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
321 p->drain_bits_per_1024ns -= 16;
322 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
323 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
324 } else {
325 s->max_avail = 16384;
326 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
329 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
330 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
331 p->speed, s->max_avail, max_avail_segs,
332 p->drain_bits_per_1024ns);
334 return max_avail_segs * (p->mtu - 40);
337 #if 0
340 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
341 * data that can be pushed per port.
343 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
345 struct sched *s = sge->tx_sched;
346 unsigned int i;
348 s->max_avail = val;
349 for (i = 0; i < MAX_NPORTS; i++)
350 t1_sched_update_parms(sge, i, 0, 0);
354 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
355 * is draining.
357 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
358 unsigned int val)
360 struct sched *s = sge->tx_sched;
361 struct sched_port *p = &s->p[port];
362 p->drain_bits_per_1024ns = val * 1024 / 1000;
363 t1_sched_update_parms(sge, port, 0, 0);
366 #endif /* 0 */
369 * tx_sched_init() allocates resources and does basic initialization.
371 static int tx_sched_init(struct sge *sge)
373 struct sched *s;
374 int i;
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
377 if (!s)
378 return -ENOMEM;
380 pr_debug("tx_sched_init\n");
381 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
382 sge->tx_sched = s;
384 for (i = 0; i < MAX_NPORTS; i++) {
385 skb_queue_head_init(&s->p[i].skbq);
386 t1_sched_update_parms(sge, i, 1500, 1000);
389 return 0;
393 * sched_update_avail() computes the delta since the last time it was called
394 * and updates the per port quota (number of bits that can be sent to the any
395 * port).
397 static inline int sched_update_avail(struct sge *sge)
399 struct sched *s = sge->tx_sched;
400 ktime_t now = ktime_get();
401 unsigned int i;
402 long long delta_time_ns;
404 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
406 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
407 if (delta_time_ns < 15000)
408 return 0;
410 for (i = 0; i < MAX_NPORTS; i++) {
411 struct sched_port *p = &s->p[i];
412 unsigned int delta_avail;
414 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
415 p->avail = min(p->avail + delta_avail, s->max_avail);
418 s->last_updated = now;
420 return 1;
424 * sched_skb() is called from two different places. In the tx path, any
425 * packet generating load on an output port will call sched_skb()
426 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
427 * context (skb == NULL).
428 * The scheduler only returns a skb (which will then be sent) if the
429 * length of the skb is <= the current quota of the output port.
431 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
432 unsigned int credits)
434 struct sched *s = sge->tx_sched;
435 struct sk_buff_head *skbq;
436 unsigned int i, len, update = 1;
438 pr_debug("sched_skb %p\n", skb);
439 if (!skb) {
440 if (!s->num)
441 return NULL;
442 } else {
443 skbq = &s->p[skb->dev->if_port].skbq;
444 __skb_queue_tail(skbq, skb);
445 s->num++;
446 skb = NULL;
449 if (credits < MAX_SKB_FRAGS + 1)
450 goto out;
452 again:
453 for (i = 0; i < MAX_NPORTS; i++) {
454 s->port = (s->port + 1) & (MAX_NPORTS - 1);
455 skbq = &s->p[s->port].skbq;
457 skb = skb_peek(skbq);
459 if (!skb)
460 continue;
462 len = skb->len;
463 if (len <= s->p[s->port].avail) {
464 s->p[s->port].avail -= len;
465 s->num--;
466 __skb_unlink(skb, skbq);
467 goto out;
469 skb = NULL;
472 if (update-- && sched_update_avail(sge))
473 goto again;
475 out:
476 /* If there are more pending skbs, we use the hardware to schedule us
477 * again.
479 if (s->num && !skb) {
480 struct cmdQ *q = &sge->cmdQ[0];
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
484 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
487 pr_debug("sched_skb ret %p\n", skb);
489 return skb;
493 * PIO to indicate that memory mapped Q contains valid descriptor(s).
495 static inline void doorbell_pio(struct adapter *adapter, u32 val)
497 wmb();
498 writel(val, adapter->regs + A_SG_DOORBELL);
502 * Frees all RX buffers on the freelist Q. The caller must make sure that
503 * the SGE is turned off before calling this function.
505 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
507 unsigned int cidx = q->cidx;
509 while (q->credits--) {
510 struct freelQ_ce *ce = &q->centries[cidx];
512 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513 dma_unmap_len(ce, dma_len),
514 PCI_DMA_FROMDEVICE);
515 dev_kfree_skb(ce->skb);
516 ce->skb = NULL;
517 if (++cidx == q->size)
518 cidx = 0;
523 * Free RX free list and response queue resources.
525 static void free_rx_resources(struct sge *sge)
527 struct pci_dev *pdev = sge->adapter->pdev;
528 unsigned int size, i;
530 if (sge->respQ.entries) {
531 size = sizeof(struct respQ_e) * sge->respQ.size;
532 pci_free_consistent(pdev, size, sge->respQ.entries,
533 sge->respQ.dma_addr);
536 for (i = 0; i < SGE_FREELQ_N; i++) {
537 struct freelQ *q = &sge->freelQ[i];
539 if (q->centries) {
540 free_freelQ_buffers(pdev, q);
541 kfree(q->centries);
543 if (q->entries) {
544 size = sizeof(struct freelQ_e) * q->size;
545 pci_free_consistent(pdev, size, q->entries,
546 q->dma_addr);
552 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
553 * response queue.
555 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
557 struct pci_dev *pdev = sge->adapter->pdev;
558 unsigned int size, i;
560 for (i = 0; i < SGE_FREELQ_N; i++) {
561 struct freelQ *q = &sge->freelQ[i];
563 q->genbit = 1;
564 q->size = p->freelQ_size[i];
565 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566 size = sizeof(struct freelQ_e) * q->size;
567 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
568 if (!q->entries)
569 goto err_no_mem;
571 size = sizeof(struct freelQ_ce) * q->size;
572 q->centries = kzalloc(size, GFP_KERNEL);
573 if (!q->centries)
574 goto err_no_mem;
578 * Calculate the buffer sizes for the two free lists. FL0 accommodates
579 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
580 * including all the sk_buff overhead.
582 * Note: For T2 FL0 and FL1 are reversed.
584 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
585 sizeof(struct cpl_rx_data) +
586 sge->freelQ[!sge->jumbo_fl].dma_offset;
588 size = (16 * 1024) -
589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
591 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
594 * Setup which skb recycle Q should be used when recycling buffers from
595 * each free list.
597 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
598 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
600 sge->respQ.genbit = 1;
601 sge->respQ.size = SGE_RESPQ_E_N;
602 sge->respQ.credits = 0;
603 size = sizeof(struct respQ_e) * sge->respQ.size;
604 sge->respQ.entries =
605 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
606 if (!sge->respQ.entries)
607 goto err_no_mem;
608 return 0;
610 err_no_mem:
611 free_rx_resources(sge);
612 return -ENOMEM;
616 * Reclaims n TX descriptors and frees the buffers associated with them.
618 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
620 struct cmdQ_ce *ce;
621 struct pci_dev *pdev = sge->adapter->pdev;
622 unsigned int cidx = q->cidx;
624 q->in_use -= n;
625 ce = &q->centries[cidx];
626 while (n--) {
627 if (likely(dma_unmap_len(ce, dma_len))) {
628 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
629 dma_unmap_len(ce, dma_len),
630 PCI_DMA_TODEVICE);
631 if (q->sop)
632 q->sop = 0;
634 if (ce->skb) {
635 dev_kfree_skb_any(ce->skb);
636 q->sop = 1;
638 ce++;
639 if (++cidx == q->size) {
640 cidx = 0;
641 ce = q->centries;
644 q->cidx = cidx;
648 * Free TX resources.
650 * Assumes that SGE is stopped and all interrupts are disabled.
652 static void free_tx_resources(struct sge *sge)
654 struct pci_dev *pdev = sge->adapter->pdev;
655 unsigned int size, i;
657 for (i = 0; i < SGE_CMDQ_N; i++) {
658 struct cmdQ *q = &sge->cmdQ[i];
660 if (q->centries) {
661 if (q->in_use)
662 free_cmdQ_buffers(sge, q, q->in_use);
663 kfree(q->centries);
665 if (q->entries) {
666 size = sizeof(struct cmdQ_e) * q->size;
667 pci_free_consistent(pdev, size, q->entries,
668 q->dma_addr);
674 * Allocates basic TX resources, consisting of memory mapped command Qs.
676 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
678 struct pci_dev *pdev = sge->adapter->pdev;
679 unsigned int size, i;
681 for (i = 0; i < SGE_CMDQ_N; i++) {
682 struct cmdQ *q = &sge->cmdQ[i];
684 q->genbit = 1;
685 q->sop = 1;
686 q->size = p->cmdQ_size[i];
687 q->in_use = 0;
688 q->status = 0;
689 q->processed = q->cleaned = 0;
690 q->stop_thres = 0;
691 spin_lock_init(&q->lock);
692 size = sizeof(struct cmdQ_e) * q->size;
693 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
694 if (!q->entries)
695 goto err_no_mem;
697 size = sizeof(struct cmdQ_ce) * q->size;
698 q->centries = kzalloc(size, GFP_KERNEL);
699 if (!q->centries)
700 goto err_no_mem;
704 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
705 * only. For queue 0 set the stop threshold so we can handle one more
706 * packet from each port, plus reserve an additional 24 entries for
707 * Ethernet packets only. Queue 1 never suspends nor do we reserve
708 * space for Ethernet packets.
710 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
711 (MAX_SKB_FRAGS + 1);
712 return 0;
714 err_no_mem:
715 free_tx_resources(sge);
716 return -ENOMEM;
719 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
720 u32 size, int base_reg_lo,
721 int base_reg_hi, int size_reg)
723 writel((u32)addr, adapter->regs + base_reg_lo);
724 writel(addr >> 32, adapter->regs + base_reg_hi);
725 writel(size, adapter->regs + size_reg);
729 * Enable/disable VLAN acceleration.
731 void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
733 struct sge *sge = adapter->sge;
735 if (features & NETIF_F_HW_VLAN_CTAG_RX)
736 sge->sge_control |= F_VLAN_XTRACT;
737 else
738 sge->sge_control &= ~F_VLAN_XTRACT;
739 if (adapter->open_device_map) {
740 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
741 readl(adapter->regs + A_SG_CONTROL); /* flush */
746 * Programs the various SGE registers. However, the engine is not yet enabled,
747 * but sge->sge_control is setup and ready to go.
749 static void configure_sge(struct sge *sge, struct sge_params *p)
751 struct adapter *ap = sge->adapter;
753 writel(0, ap->regs + A_SG_CONTROL);
754 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
755 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
756 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
757 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
758 setup_ring_params(ap, sge->freelQ[0].dma_addr,
759 sge->freelQ[0].size, A_SG_FL0BASELWR,
760 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
761 setup_ring_params(ap, sge->freelQ[1].dma_addr,
762 sge->freelQ[1].size, A_SG_FL1BASELWR,
763 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
765 /* The threshold comparison uses <. */
766 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
768 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
769 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
770 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
772 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
773 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
774 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
775 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
777 #if defined(__BIG_ENDIAN_BITFIELD)
778 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
779 #endif
781 /* Initialize no-resource timer */
782 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
784 t1_sge_set_coalesce_params(sge, p);
788 * Return the payload capacity of the jumbo free-list buffers.
790 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
792 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
793 sge->freelQ[sge->jumbo_fl].dma_offset -
794 sizeof(struct cpl_rx_data);
798 * Frees all SGE related resources and the sge structure itself
800 void t1_sge_destroy(struct sge *sge)
802 int i;
804 for_each_port(sge->adapter, i)
805 free_percpu(sge->port_stats[i]);
807 kfree(sge->tx_sched);
808 free_tx_resources(sge);
809 free_rx_resources(sge);
810 kfree(sge);
814 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
815 * context Q) until the Q is full or alloc_skb fails.
817 * It is possible that the generation bits already match, indicating that the
818 * buffer is already valid and nothing needs to be done. This happens when we
819 * copied a received buffer into a new sk_buff during the interrupt processing.
821 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
822 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
823 * aligned.
825 static void refill_free_list(struct sge *sge, struct freelQ *q)
827 struct pci_dev *pdev = sge->adapter->pdev;
828 struct freelQ_ce *ce = &q->centries[q->pidx];
829 struct freelQ_e *e = &q->entries[q->pidx];
830 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
832 while (q->credits < q->size) {
833 struct sk_buff *skb;
834 dma_addr_t mapping;
836 skb = dev_alloc_skb(q->rx_buffer_size);
837 if (!skb)
838 break;
840 skb_reserve(skb, q->dma_offset);
841 mapping = pci_map_single(pdev, skb->data, dma_len,
842 PCI_DMA_FROMDEVICE);
843 skb_reserve(skb, sge->rx_pkt_pad);
845 ce->skb = skb;
846 dma_unmap_addr_set(ce, dma_addr, mapping);
847 dma_unmap_len_set(ce, dma_len, dma_len);
848 e->addr_lo = (u32)mapping;
849 e->addr_hi = (u64)mapping >> 32;
850 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
851 wmb();
852 e->gen2 = V_CMD_GEN2(q->genbit);
854 e++;
855 ce++;
856 if (++q->pidx == q->size) {
857 q->pidx = 0;
858 q->genbit ^= 1;
859 ce = q->centries;
860 e = q->entries;
862 q->credits++;
867 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
868 * of both rings, we go into 'few interrupt mode' in order to give the system
869 * time to free up resources.
871 static void freelQs_empty(struct sge *sge)
873 struct adapter *adapter = sge->adapter;
874 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
875 u32 irqholdoff_reg;
877 refill_free_list(sge, &sge->freelQ[0]);
878 refill_free_list(sge, &sge->freelQ[1]);
880 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
881 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
882 irq_reg |= F_FL_EXHAUSTED;
883 irqholdoff_reg = sge->fixed_intrtimer;
884 } else {
885 /* Clear the F_FL_EXHAUSTED interrupts for now */
886 irq_reg &= ~F_FL_EXHAUSTED;
887 irqholdoff_reg = sge->intrtimer_nres;
889 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
890 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
892 /* We reenable the Qs to force a freelist GTS interrupt later */
893 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
896 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
897 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
898 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
899 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
902 * Disable SGE Interrupts
904 void t1_sge_intr_disable(struct sge *sge)
906 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
908 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
909 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
913 * Enable SGE interrupts.
915 void t1_sge_intr_enable(struct sge *sge)
917 u32 en = SGE_INT_ENABLE;
918 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
920 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
921 en &= ~F_PACKET_TOO_BIG;
922 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
923 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
927 * Clear SGE interrupts.
929 void t1_sge_intr_clear(struct sge *sge)
931 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
932 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
936 * SGE 'Error' interrupt handler
938 int t1_sge_intr_error_handler(struct sge *sge)
940 struct adapter *adapter = sge->adapter;
941 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
943 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
944 cause &= ~F_PACKET_TOO_BIG;
945 if (cause & F_RESPQ_EXHAUSTED)
946 sge->stats.respQ_empty++;
947 if (cause & F_RESPQ_OVERFLOW) {
948 sge->stats.respQ_overflow++;
949 pr_alert("%s: SGE response queue overflow\n",
950 adapter->name);
952 if (cause & F_FL_EXHAUSTED) {
953 sge->stats.freelistQ_empty++;
954 freelQs_empty(sge);
956 if (cause & F_PACKET_TOO_BIG) {
957 sge->stats.pkt_too_big++;
958 pr_alert("%s: SGE max packet size exceeded\n",
959 adapter->name);
961 if (cause & F_PACKET_MISMATCH) {
962 sge->stats.pkt_mismatch++;
963 pr_alert("%s: SGE packet mismatch\n", adapter->name);
965 if (cause & SGE_INT_FATAL)
966 t1_fatal_err(adapter);
968 writel(cause, adapter->regs + A_SG_INT_CAUSE);
969 return 0;
972 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
974 return &sge->stats;
977 void t1_sge_get_port_stats(const struct sge *sge, int port,
978 struct sge_port_stats *ss)
980 int cpu;
982 memset(ss, 0, sizeof(*ss));
983 for_each_possible_cpu(cpu) {
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
986 ss->rx_cso_good += st->rx_cso_good;
987 ss->tx_cso += st->tx_cso;
988 ss->tx_tso += st->tx_tso;
989 ss->tx_need_hdrroom += st->tx_need_hdrroom;
990 ss->vlan_xtract += st->vlan_xtract;
991 ss->vlan_insert += st->vlan_insert;
996 * recycle_fl_buf - recycle a free list buffer
997 * @fl: the free list
998 * @idx: index of buffer to recycle
1000 * Recycles the specified buffer on the given free list by adding it at
1001 * the next available slot on the list.
1003 static void recycle_fl_buf(struct freelQ *fl, int idx)
1005 struct freelQ_e *from = &fl->entries[idx];
1006 struct freelQ_e *to = &fl->entries[fl->pidx];
1008 fl->centries[fl->pidx] = fl->centries[idx];
1009 to->addr_lo = from->addr_lo;
1010 to->addr_hi = from->addr_hi;
1011 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1012 wmb();
1013 to->gen2 = V_CMD_GEN2(fl->genbit);
1014 fl->credits++;
1016 if (++fl->pidx == fl->size) {
1017 fl->pidx = 0;
1018 fl->genbit ^= 1;
1022 static int copybreak __read_mostly = 256;
1023 module_param(copybreak, int, 0);
1024 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1027 * get_packet - return the next ingress packet buffer
1028 * @pdev: the PCI device that received the packet
1029 * @fl: the SGE free list holding the packet
1030 * @len: the actual packet length, excluding any SGE padding
1032 * Get the next packet from a free list and complete setup of the
1033 * sk_buff. If the packet is small we make a copy and recycle the
1034 * original buffer, otherwise we use the original buffer itself. If a
1035 * positive drop threshold is supplied packets are dropped and their
1036 * buffers recycled if (a) the number of remaining buffers is under the
1037 * threshold and the packet is too big to copy, or (b) the packet should
1038 * be copied but there is no memory for the copy.
1040 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1041 struct freelQ *fl, unsigned int len)
1043 struct sk_buff *skb;
1044 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1046 if (len < copybreak) {
1047 skb = netdev_alloc_skb_ip_align(NULL, len);
1048 if (!skb)
1049 goto use_orig_buf;
1051 skb_put(skb, len);
1052 pci_dma_sync_single_for_cpu(pdev,
1053 dma_unmap_addr(ce, dma_addr),
1054 dma_unmap_len(ce, dma_len),
1055 PCI_DMA_FROMDEVICE);
1056 skb_copy_from_linear_data(ce->skb, skb->data, len);
1057 pci_dma_sync_single_for_device(pdev,
1058 dma_unmap_addr(ce, dma_addr),
1059 dma_unmap_len(ce, dma_len),
1060 PCI_DMA_FROMDEVICE);
1061 recycle_fl_buf(fl, fl->cidx);
1062 return skb;
1065 use_orig_buf:
1066 if (fl->credits < 2) {
1067 recycle_fl_buf(fl, fl->cidx);
1068 return NULL;
1071 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1072 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1073 skb = ce->skb;
1074 prefetch(skb->data);
1076 skb_put(skb, len);
1077 return skb;
1081 * unexpected_offload - handle an unexpected offload packet
1082 * @adapter: the adapter
1083 * @fl: the free list that received the packet
1085 * Called when we receive an unexpected offload packet (e.g., the TOE
1086 * function is disabled or the card is a NIC). Prints a message and
1087 * recycles the buffer.
1089 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1091 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1092 struct sk_buff *skb = ce->skb;
1094 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1095 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1096 pr_err("%s: unexpected offload packet, cmd %u\n",
1097 adapter->name, *skb->data);
1098 recycle_fl_buf(fl, fl->cidx);
1102 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1103 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1104 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1105 * Note that the *_large_page_tx_descs stuff will be optimized out when
1106 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1108 * compute_large_page_descs() computes how many additional descriptors are
1109 * required to break down the stack's request.
1111 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1113 unsigned int count = 0;
1115 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1116 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1117 unsigned int i, len = skb_headlen(skb);
1118 while (len > SGE_TX_DESC_MAX_PLEN) {
1119 count++;
1120 len -= SGE_TX_DESC_MAX_PLEN;
1122 for (i = 0; nfrags--; i++) {
1123 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1124 len = skb_frag_size(frag);
1125 while (len > SGE_TX_DESC_MAX_PLEN) {
1126 count++;
1127 len -= SGE_TX_DESC_MAX_PLEN;
1131 return count;
1135 * Write a cmdQ entry.
1137 * Since this function writes the 'flags' field, it must not be used to
1138 * write the first cmdQ entry.
1140 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1141 unsigned int len, unsigned int gen,
1142 unsigned int eop)
1144 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1146 e->addr_lo = (u32)mapping;
1147 e->addr_hi = (u64)mapping >> 32;
1148 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1149 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1153 * See comment for previous function.
1155 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1156 * *desc_len exceeds HW's capability.
1158 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1159 struct cmdQ_e **e,
1160 struct cmdQ_ce **ce,
1161 unsigned int *gen,
1162 dma_addr_t *desc_mapping,
1163 unsigned int *desc_len,
1164 unsigned int nfrags,
1165 struct cmdQ *q)
1167 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1168 struct cmdQ_e *e1 = *e;
1169 struct cmdQ_ce *ce1 = *ce;
1171 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1172 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1173 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1174 *gen, nfrags == 0 && *desc_len == 0);
1175 ce1->skb = NULL;
1176 dma_unmap_len_set(ce1, dma_len, 0);
1177 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1178 if (*desc_len) {
1179 ce1++;
1180 e1++;
1181 if (++pidx == q->size) {
1182 pidx = 0;
1183 *gen ^= 1;
1184 ce1 = q->centries;
1185 e1 = q->entries;
1189 *e = e1;
1190 *ce = ce1;
1192 return pidx;
1196 * Write the command descriptors to transmit the given skb starting at
1197 * descriptor pidx with the given generation.
1199 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1200 unsigned int pidx, unsigned int gen,
1201 struct cmdQ *q)
1203 dma_addr_t mapping, desc_mapping;
1204 struct cmdQ_e *e, *e1;
1205 struct cmdQ_ce *ce;
1206 unsigned int i, flags, first_desc_len, desc_len,
1207 nfrags = skb_shinfo(skb)->nr_frags;
1209 e = e1 = &q->entries[pidx];
1210 ce = &q->centries[pidx];
1212 mapping = pci_map_single(adapter->pdev, skb->data,
1213 skb_headlen(skb), PCI_DMA_TODEVICE);
1215 desc_mapping = mapping;
1216 desc_len = skb_headlen(skb);
1218 flags = F_CMD_DATAVALID | F_CMD_SOP |
1219 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1220 V_CMD_GEN2(gen);
1221 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1222 desc_len : SGE_TX_DESC_MAX_PLEN;
1223 e->addr_lo = (u32)desc_mapping;
1224 e->addr_hi = (u64)desc_mapping >> 32;
1225 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1226 ce->skb = NULL;
1227 dma_unmap_len_set(ce, dma_len, 0);
1229 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1230 desc_len > SGE_TX_DESC_MAX_PLEN) {
1231 desc_mapping += first_desc_len;
1232 desc_len -= first_desc_len;
1233 e1++;
1234 ce++;
1235 if (++pidx == q->size) {
1236 pidx = 0;
1237 gen ^= 1;
1238 e1 = q->entries;
1239 ce = q->centries;
1241 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1242 &desc_mapping, &desc_len,
1243 nfrags, q);
1245 if (likely(desc_len))
1246 write_tx_desc(e1, desc_mapping, desc_len, gen,
1247 nfrags == 0);
1250 ce->skb = NULL;
1251 dma_unmap_addr_set(ce, dma_addr, mapping);
1252 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1254 for (i = 0; nfrags--; i++) {
1255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1256 e1++;
1257 ce++;
1258 if (++pidx == q->size) {
1259 pidx = 0;
1260 gen ^= 1;
1261 e1 = q->entries;
1262 ce = q->centries;
1265 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1266 skb_frag_size(frag), DMA_TO_DEVICE);
1267 desc_mapping = mapping;
1268 desc_len = skb_frag_size(frag);
1270 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1271 &desc_mapping, &desc_len,
1272 nfrags, q);
1273 if (likely(desc_len))
1274 write_tx_desc(e1, desc_mapping, desc_len, gen,
1275 nfrags == 0);
1276 ce->skb = NULL;
1277 dma_unmap_addr_set(ce, dma_addr, mapping);
1278 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1280 ce->skb = skb;
1281 wmb();
1282 e->flags = flags;
1286 * Clean up completed Tx buffers.
1288 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1290 unsigned int reclaim = q->processed - q->cleaned;
1292 if (reclaim) {
1293 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1294 q->processed, q->cleaned);
1295 free_cmdQ_buffers(sge, q, reclaim);
1296 q->cleaned += reclaim;
1301 * Called from tasklet. Checks the scheduler for any
1302 * pending skbs that can be sent.
1304 static void restart_sched(unsigned long arg)
1306 struct sge *sge = (struct sge *) arg;
1307 struct adapter *adapter = sge->adapter;
1308 struct cmdQ *q = &sge->cmdQ[0];
1309 struct sk_buff *skb;
1310 unsigned int credits, queued_skb = 0;
1312 spin_lock(&q->lock);
1313 reclaim_completed_tx(sge, q);
1315 credits = q->size - q->in_use;
1316 pr_debug("restart_sched credits=%d\n", credits);
1317 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1318 unsigned int genbit, pidx, count;
1319 count = 1 + skb_shinfo(skb)->nr_frags;
1320 count += compute_large_page_tx_descs(skb);
1321 q->in_use += count;
1322 genbit = q->genbit;
1323 pidx = q->pidx;
1324 q->pidx += count;
1325 if (q->pidx >= q->size) {
1326 q->pidx -= q->size;
1327 q->genbit ^= 1;
1329 write_tx_descs(adapter, skb, pidx, genbit, q);
1330 credits = q->size - q->in_use;
1331 queued_skb = 1;
1334 if (queued_skb) {
1335 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1336 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1337 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1338 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1341 spin_unlock(&q->lock);
1345 * sge_rx - process an ingress ethernet packet
1346 * @sge: the sge structure
1347 * @fl: the free list that contains the packet buffer
1348 * @len: the packet length
1350 * Process an ingress ethernet pakcet and deliver it to the stack.
1352 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1354 struct sk_buff *skb;
1355 const struct cpl_rx_pkt *p;
1356 struct adapter *adapter = sge->adapter;
1357 struct sge_port_stats *st;
1358 struct net_device *dev;
1360 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1361 if (unlikely(!skb)) {
1362 sge->stats.rx_drops++;
1363 return;
1366 p = (const struct cpl_rx_pkt *) skb->data;
1367 if (p->iff >= adapter->params.nports) {
1368 kfree_skb(skb);
1369 return;
1371 __skb_pull(skb, sizeof(*p));
1373 st = this_cpu_ptr(sge->port_stats[p->iff]);
1374 dev = adapter->port[p->iff].dev;
1376 skb->protocol = eth_type_trans(skb, dev);
1377 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1378 skb->protocol == htons(ETH_P_IP) &&
1379 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1380 ++st->rx_cso_good;
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382 } else
1383 skb_checksum_none_assert(skb);
1385 if (p->vlan_valid) {
1386 st->vlan_xtract++;
1387 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1389 netif_receive_skb(skb);
1393 * Returns true if a command queue has enough available descriptors that
1394 * we can resume Tx operation after temporarily disabling its packet queue.
1396 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1398 unsigned int r = q->processed - q->cleaned;
1400 return q->in_use - r < (q->size >> 1);
1404 * Called when sufficient space has become available in the SGE command queues
1405 * after the Tx packet schedulers have been suspended to restart the Tx path.
1407 static void restart_tx_queues(struct sge *sge)
1409 struct adapter *adap = sge->adapter;
1410 int i;
1412 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1413 return;
1415 for_each_port(adap, i) {
1416 struct net_device *nd = adap->port[i].dev;
1418 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1419 netif_running(nd)) {
1420 sge->stats.cmdQ_restarted[2]++;
1421 netif_wake_queue(nd);
1427 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1428 * information.
1430 static unsigned int update_tx_info(struct adapter *adapter,
1431 unsigned int flags,
1432 unsigned int pr0)
1434 struct sge *sge = adapter->sge;
1435 struct cmdQ *cmdq = &sge->cmdQ[0];
1437 cmdq->processed += pr0;
1438 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1439 freelQs_empty(sge);
1440 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1442 if (flags & F_CMDQ0_ENABLE) {
1443 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1445 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1446 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1447 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1448 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1450 if (sge->tx_sched)
1451 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1453 flags &= ~F_CMDQ0_ENABLE;
1456 if (unlikely(sge->stopped_tx_queues != 0))
1457 restart_tx_queues(sge);
1459 return flags;
1463 * Process SGE responses, up to the supplied budget. Returns the number of
1464 * responses processed. A negative budget is effectively unlimited.
1466 static int process_responses(struct adapter *adapter, int budget)
1468 struct sge *sge = adapter->sge;
1469 struct respQ *q = &sge->respQ;
1470 struct respQ_e *e = &q->entries[q->cidx];
1471 int done = 0;
1472 unsigned int flags = 0;
1473 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1475 while (done < budget && e->GenerationBit == q->genbit) {
1476 flags |= e->Qsleeping;
1478 cmdq_processed[0] += e->Cmdq0CreditReturn;
1479 cmdq_processed[1] += e->Cmdq1CreditReturn;
1481 /* We batch updates to the TX side to avoid cacheline
1482 * ping-pong of TX state information on MP where the sender
1483 * might run on a different CPU than this function...
1485 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1486 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1487 cmdq_processed[0] = 0;
1490 if (unlikely(cmdq_processed[1] > 16)) {
1491 sge->cmdQ[1].processed += cmdq_processed[1];
1492 cmdq_processed[1] = 0;
1495 if (likely(e->DataValid)) {
1496 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1498 BUG_ON(!e->Sop || !e->Eop);
1499 if (unlikely(e->Offload))
1500 unexpected_offload(adapter, fl);
1501 else
1502 sge_rx(sge, fl, e->BufferLength);
1504 ++done;
1507 * Note: this depends on each packet consuming a
1508 * single free-list buffer; cf. the BUG above.
1510 if (++fl->cidx == fl->size)
1511 fl->cidx = 0;
1512 prefetch(fl->centries[fl->cidx].skb);
1514 if (unlikely(--fl->credits <
1515 fl->size - SGE_FREEL_REFILL_THRESH))
1516 refill_free_list(sge, fl);
1517 } else
1518 sge->stats.pure_rsps++;
1520 e++;
1521 if (unlikely(++q->cidx == q->size)) {
1522 q->cidx = 0;
1523 q->genbit ^= 1;
1524 e = q->entries;
1526 prefetch(e);
1528 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1529 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1530 q->credits = 0;
1534 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1535 sge->cmdQ[1].processed += cmdq_processed[1];
1537 return done;
1540 static inline int responses_pending(const struct adapter *adapter)
1542 const struct respQ *Q = &adapter->sge->respQ;
1543 const struct respQ_e *e = &Q->entries[Q->cidx];
1545 return e->GenerationBit == Q->genbit;
1549 * A simpler version of process_responses() that handles only pure (i.e.,
1550 * non data-carrying) responses. Such respones are too light-weight to justify
1551 * calling a softirq when using NAPI, so we handle them specially in hard
1552 * interrupt context. The function is called with a pointer to a response,
1553 * which the caller must ensure is a valid pure response. Returns 1 if it
1554 * encounters a valid data-carrying response, 0 otherwise.
1556 static int process_pure_responses(struct adapter *adapter)
1558 struct sge *sge = adapter->sge;
1559 struct respQ *q = &sge->respQ;
1560 struct respQ_e *e = &q->entries[q->cidx];
1561 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1562 unsigned int flags = 0;
1563 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1565 prefetch(fl->centries[fl->cidx].skb);
1566 if (e->DataValid)
1567 return 1;
1569 do {
1570 flags |= e->Qsleeping;
1572 cmdq_processed[0] += e->Cmdq0CreditReturn;
1573 cmdq_processed[1] += e->Cmdq1CreditReturn;
1575 e++;
1576 if (unlikely(++q->cidx == q->size)) {
1577 q->cidx = 0;
1578 q->genbit ^= 1;
1579 e = q->entries;
1581 prefetch(e);
1583 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1584 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1585 q->credits = 0;
1587 sge->stats.pure_rsps++;
1588 } while (e->GenerationBit == q->genbit && !e->DataValid);
1590 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1591 sge->cmdQ[1].processed += cmdq_processed[1];
1593 return e->GenerationBit == q->genbit;
1597 * Handler for new data events when using NAPI. This does not need any locking
1598 * or protection from interrupts as data interrupts are off at this point and
1599 * other adapter interrupts do not interfere.
1601 int t1_poll(struct napi_struct *napi, int budget)
1603 struct adapter *adapter = container_of(napi, struct adapter, napi);
1604 int work_done = process_responses(adapter, budget);
1606 if (likely(work_done < budget)) {
1607 napi_complete(napi);
1608 writel(adapter->sge->respQ.cidx,
1609 adapter->regs + A_SG_SLEEPING);
1611 return work_done;
1614 irqreturn_t t1_interrupt(int irq, void *data)
1616 struct adapter *adapter = data;
1617 struct sge *sge = adapter->sge;
1618 int handled;
1620 if (likely(responses_pending(adapter))) {
1621 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1623 if (napi_schedule_prep(&adapter->napi)) {
1624 if (process_pure_responses(adapter))
1625 __napi_schedule(&adapter->napi);
1626 else {
1627 /* no data, no NAPI needed */
1628 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1629 /* undo schedule_prep */
1630 napi_enable(&adapter->napi);
1633 return IRQ_HANDLED;
1636 spin_lock(&adapter->async_lock);
1637 handled = t1_slow_intr_handler(adapter);
1638 spin_unlock(&adapter->async_lock);
1640 if (!handled)
1641 sge->stats.unhandled_irqs++;
1643 return IRQ_RETVAL(handled != 0);
1647 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1649 * The code figures out how many entries the sk_buff will require in the
1650 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1651 * has complete. Then, it doesn't access the global structure anymore, but
1652 * uses the corresponding fields on the stack. In conjunction with a spinlock
1653 * around that code, we can make the function reentrant without holding the
1654 * lock when we actually enqueue (which might be expensive, especially on
1655 * architectures with IO MMUs).
1657 * This runs with softirqs disabled.
1659 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1660 unsigned int qid, struct net_device *dev)
1662 struct sge *sge = adapter->sge;
1663 struct cmdQ *q = &sge->cmdQ[qid];
1664 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1666 if (!spin_trylock(&q->lock))
1667 return NETDEV_TX_LOCKED;
1669 reclaim_completed_tx(sge, q);
1671 pidx = q->pidx;
1672 credits = q->size - q->in_use;
1673 count = 1 + skb_shinfo(skb)->nr_frags;
1674 count += compute_large_page_tx_descs(skb);
1676 /* Ethernet packet */
1677 if (unlikely(credits < count)) {
1678 if (!netif_queue_stopped(dev)) {
1679 netif_stop_queue(dev);
1680 set_bit(dev->if_port, &sge->stopped_tx_queues);
1681 sge->stats.cmdQ_full[2]++;
1682 pr_err("%s: Tx ring full while queue awake!\n",
1683 adapter->name);
1685 spin_unlock(&q->lock);
1686 return NETDEV_TX_BUSY;
1689 if (unlikely(credits - count < q->stop_thres)) {
1690 netif_stop_queue(dev);
1691 set_bit(dev->if_port, &sge->stopped_tx_queues);
1692 sge->stats.cmdQ_full[2]++;
1695 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1696 * through the scheduler.
1698 if (sge->tx_sched && !qid && skb->dev) {
1699 use_sched:
1700 use_sched_skb = 1;
1701 /* Note that the scheduler might return a different skb than
1702 * the one passed in.
1704 skb = sched_skb(sge, skb, credits);
1705 if (!skb) {
1706 spin_unlock(&q->lock);
1707 return NETDEV_TX_OK;
1709 pidx = q->pidx;
1710 count = 1 + skb_shinfo(skb)->nr_frags;
1711 count += compute_large_page_tx_descs(skb);
1714 q->in_use += count;
1715 genbit = q->genbit;
1716 pidx = q->pidx;
1717 q->pidx += count;
1718 if (q->pidx >= q->size) {
1719 q->pidx -= q->size;
1720 q->genbit ^= 1;
1722 spin_unlock(&q->lock);
1724 write_tx_descs(adapter, skb, pidx, genbit, q);
1727 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1728 * the doorbell if the Q is asleep. There is a natural race, where
1729 * the hardware is going to sleep just after we checked, however,
1730 * then the interrupt handler will detect the outstanding TX packet
1731 * and ring the doorbell for us.
1733 if (qid)
1734 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1735 else {
1736 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1737 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1738 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1739 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1743 if (use_sched_skb) {
1744 if (spin_trylock(&q->lock)) {
1745 credits = q->size - q->in_use;
1746 skb = NULL;
1747 goto use_sched;
1750 return NETDEV_TX_OK;
1753 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1756 * eth_hdr_len - return the length of an Ethernet header
1757 * @data: pointer to the start of the Ethernet header
1759 * Returns the length of an Ethernet header, including optional VLAN tag.
1761 static inline int eth_hdr_len(const void *data)
1763 const struct ethhdr *e = data;
1765 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1769 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1771 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1773 struct adapter *adapter = dev->ml_priv;
1774 struct sge *sge = adapter->sge;
1775 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1776 struct cpl_tx_pkt *cpl;
1777 struct sk_buff *orig_skb = skb;
1778 int ret;
1780 if (skb->protocol == htons(ETH_P_CPL5))
1781 goto send;
1784 * We are using a non-standard hard_header_len.
1785 * Allocate more header room in the rare cases it is not big enough.
1787 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1788 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1789 ++st->tx_need_hdrroom;
1790 dev_kfree_skb_any(orig_skb);
1791 if (!skb)
1792 return NETDEV_TX_OK;
1795 if (skb_shinfo(skb)->gso_size) {
1796 int eth_type;
1797 struct cpl_tx_pkt_lso *hdr;
1799 ++st->tx_tso;
1801 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1802 CPL_ETH_II : CPL_ETH_II_VLAN;
1804 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1805 hdr->opcode = CPL_TX_PKT_LSO;
1806 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1807 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1808 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1809 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1810 skb_shinfo(skb)->gso_size));
1811 hdr->len = htonl(skb->len - sizeof(*hdr));
1812 cpl = (struct cpl_tx_pkt *)hdr;
1813 } else {
1815 * Packets shorter than ETH_HLEN can break the MAC, drop them
1816 * early. Also, we may get oversized packets because some
1817 * parts of the kernel don't handle our unusual hard_header_len
1818 * right, drop those too.
1820 if (unlikely(skb->len < ETH_HLEN ||
1821 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1822 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1823 skb->len, eth_hdr_len(skb->data), dev->mtu);
1824 dev_kfree_skb_any(skb);
1825 return NETDEV_TX_OK;
1828 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1829 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1830 if (unlikely(skb_checksum_help(skb))) {
1831 netdev_dbg(dev, "unable to do udp checksum\n");
1832 dev_kfree_skb_any(skb);
1833 return NETDEV_TX_OK;
1837 /* Hmmm, assuming to catch the gratious arp... and we'll use
1838 * it to flush out stuck espi packets...
1840 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1841 if (skb->protocol == htons(ETH_P_ARP) &&
1842 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1843 adapter->sge->espibug_skb[dev->if_port] = skb;
1844 /* We want to re-use this skb later. We
1845 * simply bump the reference count and it
1846 * will not be freed...
1848 skb = skb_get(skb);
1852 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1853 cpl->opcode = CPL_TX_PKT;
1854 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1855 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1856 /* the length field isn't used so don't bother setting it */
1858 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1860 cpl->iff = dev->if_port;
1862 if (vlan_tx_tag_present(skb)) {
1863 cpl->vlan_valid = 1;
1864 cpl->vlan = htons(vlan_tx_tag_get(skb));
1865 st->vlan_insert++;
1866 } else
1867 cpl->vlan_valid = 0;
1869 send:
1870 ret = t1_sge_tx(skb, adapter, 0, dev);
1872 /* If transmit busy, and we reallocated skb's due to headroom limit,
1873 * then silently discard to avoid leak.
1875 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1876 dev_kfree_skb_any(skb);
1877 ret = NETDEV_TX_OK;
1879 return ret;
1883 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1885 static void sge_tx_reclaim_cb(unsigned long data)
1887 int i;
1888 struct sge *sge = (struct sge *)data;
1890 for (i = 0; i < SGE_CMDQ_N; ++i) {
1891 struct cmdQ *q = &sge->cmdQ[i];
1893 if (!spin_trylock(&q->lock))
1894 continue;
1896 reclaim_completed_tx(sge, q);
1897 if (i == 0 && q->in_use) { /* flush pending credits */
1898 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1900 spin_unlock(&q->lock);
1902 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1906 * Propagate changes of the SGE coalescing parameters to the HW.
1908 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1910 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1911 core_ticks_per_usec(sge->adapter);
1912 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1913 return 0;
1917 * Allocates both RX and TX resources and configures the SGE. However,
1918 * the hardware is not enabled yet.
1920 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1922 if (alloc_rx_resources(sge, p))
1923 return -ENOMEM;
1924 if (alloc_tx_resources(sge, p)) {
1925 free_rx_resources(sge);
1926 return -ENOMEM;
1928 configure_sge(sge, p);
1931 * Now that we have sized the free lists calculate the payload
1932 * capacity of the large buffers. Other parts of the driver use
1933 * this to set the max offload coalescing size so that RX packets
1934 * do not overflow our large buffers.
1936 p->large_buf_capacity = jumbo_payload_capacity(sge);
1937 return 0;
1941 * Disables the DMA engine.
1943 void t1_sge_stop(struct sge *sge)
1945 int i;
1946 writel(0, sge->adapter->regs + A_SG_CONTROL);
1947 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1949 if (is_T2(sge->adapter))
1950 del_timer_sync(&sge->espibug_timer);
1952 del_timer_sync(&sge->tx_reclaim_timer);
1953 if (sge->tx_sched)
1954 tx_sched_stop(sge);
1956 for (i = 0; i < MAX_NPORTS; i++)
1957 kfree_skb(sge->espibug_skb[i]);
1961 * Enables the DMA engine.
1963 void t1_sge_start(struct sge *sge)
1965 refill_free_list(sge, &sge->freelQ[0]);
1966 refill_free_list(sge, &sge->freelQ[1]);
1968 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1969 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1970 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1972 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1974 if (is_T2(sge->adapter))
1975 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1979 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1981 static void espibug_workaround_t204(unsigned long data)
1983 struct adapter *adapter = (struct adapter *)data;
1984 struct sge *sge = adapter->sge;
1985 unsigned int nports = adapter->params.nports;
1986 u32 seop[MAX_NPORTS];
1988 if (adapter->open_device_map & PORT_MASK) {
1989 int i;
1991 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
1992 return;
1994 for (i = 0; i < nports; i++) {
1995 struct sk_buff *skb = sge->espibug_skb[i];
1997 if (!netif_running(adapter->port[i].dev) ||
1998 netif_queue_stopped(adapter->port[i].dev) ||
1999 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2000 continue;
2002 if (!skb->cb[0]) {
2003 skb_copy_to_linear_data_offset(skb,
2004 sizeof(struct cpl_tx_pkt),
2005 ch_mac_addr,
2006 ETH_ALEN);
2007 skb_copy_to_linear_data_offset(skb,
2008 skb->len - 10,
2009 ch_mac_addr,
2010 ETH_ALEN);
2011 skb->cb[0] = 0xff;
2014 /* bump the reference count to avoid freeing of
2015 * the skb once the DMA has completed.
2017 skb = skb_get(skb);
2018 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2021 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2024 static void espibug_workaround(unsigned long data)
2026 struct adapter *adapter = (struct adapter *)data;
2027 struct sge *sge = adapter->sge;
2029 if (netif_running(adapter->port[0].dev)) {
2030 struct sk_buff *skb = sge->espibug_skb[0];
2031 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2033 if ((seop & 0xfff0fff) == 0xfff && skb) {
2034 if (!skb->cb[0]) {
2035 skb_copy_to_linear_data_offset(skb,
2036 sizeof(struct cpl_tx_pkt),
2037 ch_mac_addr,
2038 ETH_ALEN);
2039 skb_copy_to_linear_data_offset(skb,
2040 skb->len - 10,
2041 ch_mac_addr,
2042 ETH_ALEN);
2043 skb->cb[0] = 0xff;
2046 /* bump the reference count to avoid freeing of the
2047 * skb once the DMA has completed.
2049 skb = skb_get(skb);
2050 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2053 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2057 * Creates a t1_sge structure and returns suggested resource parameters.
2059 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2061 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2062 int i;
2064 if (!sge)
2065 return NULL;
2067 sge->adapter = adapter;
2068 sge->netdev = adapter->port[0].dev;
2069 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2070 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2072 for_each_port(adapter, i) {
2073 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2074 if (!sge->port_stats[i])
2075 goto nomem_port;
2078 init_timer(&sge->tx_reclaim_timer);
2079 sge->tx_reclaim_timer.data = (unsigned long)sge;
2080 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2082 if (is_T2(sge->adapter)) {
2083 init_timer(&sge->espibug_timer);
2085 if (adapter->params.nports > 1) {
2086 tx_sched_init(sge);
2087 sge->espibug_timer.function = espibug_workaround_t204;
2088 } else
2089 sge->espibug_timer.function = espibug_workaround;
2090 sge->espibug_timer.data = (unsigned long)sge->adapter;
2092 sge->espibug_timeout = 1;
2093 /* for T204, every 10ms */
2094 if (adapter->params.nports > 1)
2095 sge->espibug_timeout = HZ/100;
2099 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2100 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2101 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2102 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2103 if (sge->tx_sched) {
2104 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2105 p->rx_coalesce_usecs = 15;
2106 else
2107 p->rx_coalesce_usecs = 50;
2108 } else
2109 p->rx_coalesce_usecs = 50;
2111 p->coalesce_enable = 0;
2112 p->sample_interval_usecs = 0;
2114 return sge;
2115 nomem_port:
2116 while (i >= 0) {
2117 free_percpu(sge->port_stats[i]);
2118 --i;
2120 kfree(sge);
2121 return NULL;