sync hh.org
[hh.org.git] / drivers / net / chelsio / sge.c
blob0ca8d876e16f41903fba9a0530e536f836447f83
1 /*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
40 #include "common.h"
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/ktime.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/if_vlan.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
51 #include <linux/mm.h>
52 #include <linux/tcp.h>
53 #include <linux/ip.h>
54 #include <linux/in.h>
55 #include <linux/if_arp.h>
57 #include "cpl5_cmd.h"
58 #include "sge.h"
59 #include "regs.h"
60 #include "espi.h"
62 /* This belongs in if_ether.h */
63 #define ETH_P_CPL5 0xf
65 #define SGE_CMDQ_N 2
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_COPY_THRES 256
75 #define SGE_RX_SM_BUF_SIZE 1536
76 #define SGE_TX_DESC_MAX_PLEN 16384
78 # define SGE_RX_DROP_THRES 2
80 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
86 #define TX_RECLAIM_PERIOD (HZ / 4)
88 #ifndef NET_IP_ALIGN
89 # define NET_IP_ALIGN 2
90 #endif
92 #define M_CMD_LEN 0x7fffffff
93 #define V_CMD_LEN(v) (v)
94 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95 #define V_CMD_GEN1(v) ((v) << 31)
96 #define V_CMD_GEN2(v) (v)
97 #define F_CMD_DATAVALID (1 << 1)
98 #define F_CMD_SOP (1 << 2)
99 #define V_CMD_EOP(v) ((v) << 3)
102 * Command queue, receive buffer list, and response queue descriptors.
104 #if defined(__BIG_ENDIAN_BITFIELD)
105 struct cmdQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
112 struct freelQ_e {
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
119 struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
134 #elif defined(__LITTLE_ENDIAN_BITFIELD)
135 struct cmdQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
142 struct freelQ_e {
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
149 struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
164 #endif
167 * SW Context Command and Freelist Queue Descriptors
169 struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
175 struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
182 * SW command, freelist and response rings
184 struct cmdQ {
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
194 u8 sop; /* is next entry start of packet? */
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
201 struct freelQ {
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
206 u16 rx_buffer_size; /* Buffer size on this free list */
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
215 struct respQ {
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
220 struct respQ_e *entries; /* HW response descriptor Q */
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
224 /* Bit flags for cmdQ.status */
225 enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
230 /* T204 TX SW scheduler */
232 /* Per T204 TX port */
233 struct sched_port {
234 unsigned int avail; /* available bits - quota */
235 unsigned int drain_bits_per_1024ns; /* drain rate */
236 unsigned int speed; /* drain rate, mbps */
237 unsigned int mtu; /* mtu size */
238 struct sk_buff_head skbq; /* pending skbs */
241 /* Per T204 device */
242 struct sched {
243 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
250 static void restart_sched(unsigned long);
254 * Main SGE data structure
256 * Interrupts are handled by a single CPU and it is likely that on a MP system
257 * the application is migrated to another CPU. In that scenario, we try to
258 * seperate the RX(in irq context) and TX state in order to decrease memory
259 * contention.
261 struct sge {
262 struct adapter *adapter; /* adapter backpointer */
263 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */
269 unsigned int intrtimer_nres; /* no-resource interrupt timer */
270 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
271 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
272 struct timer_list espibug_timer;
273 unsigned long espibug_timeout;
274 struct sk_buff *espibug_skb[MAX_NPORTS];
275 u32 sge_control; /* shadow value of sge control reg */
276 struct sge_intr_counts stats;
277 struct sge_port_stats *port_stats[MAX_NPORTS];
278 struct sched *tx_sched;
279 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
283 * stop tasklet and free all pending skb's
285 static void tx_sched_stop(struct sge *sge)
287 struct sched *s = sge->tx_sched;
288 int i;
290 tasklet_kill(&s->sched_tsk);
292 for (i = 0; i < MAX_NPORTS; i++)
293 __skb_queue_purge(&s->p[s->port].skbq);
297 * t1_sched_update_parms() is called when the MTU or link speed changes. It
298 * re-computes scheduler parameters to scope with the change.
300 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
301 unsigned int mtu, unsigned int speed)
303 struct sched *s = sge->tx_sched;
304 struct sched_port *p = &s->p[port];
305 unsigned int max_avail_segs;
307 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
308 if (speed)
309 p->speed = speed;
310 if (mtu)
311 p->mtu = mtu;
313 if (speed || mtu) {
314 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
315 do_div(drain, (p->mtu + 50) * 1000);
316 p->drain_bits_per_1024ns = (unsigned int) drain;
318 if (p->speed < 1000)
319 p->drain_bits_per_1024ns =
320 90 * p->drain_bits_per_1024ns / 100;
323 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
324 p->drain_bits_per_1024ns -= 16;
325 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
326 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
327 } else {
328 s->max_avail = 16384;
329 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
332 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
333 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
334 p->speed, s->max_avail, max_avail_segs,
335 p->drain_bits_per_1024ns);
337 return max_avail_segs * (p->mtu - 40);
341 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
342 * data that can be pushed per port.
344 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
346 struct sched *s = sge->tx_sched;
347 unsigned int i;
349 s->max_avail = val;
350 for (i = 0; i < MAX_NPORTS; i++)
351 t1_sched_update_parms(sge, i, 0, 0);
355 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
356 * is draining.
358 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
359 unsigned int val)
361 struct sched *s = sge->tx_sched;
362 struct sched_port *p = &s->p[port];
363 p->drain_bits_per_1024ns = val * 1024 / 1000;
364 t1_sched_update_parms(sge, port, 0, 0);
369 * get_clock() implements a ns clock (see ktime_get)
371 static inline ktime_t get_clock(void)
373 struct timespec ts;
375 ktime_get_ts(&ts);
376 return timespec_to_ktime(ts);
380 * tx_sched_init() allocates resources and does basic initialization.
382 static int tx_sched_init(struct sge *sge)
384 struct sched *s;
385 int i;
387 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
388 if (!s)
389 return -ENOMEM;
391 pr_debug("tx_sched_init\n");
392 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
393 sge->tx_sched = s;
395 for (i = 0; i < MAX_NPORTS; i++) {
396 skb_queue_head_init(&s->p[i].skbq);
397 t1_sched_update_parms(sge, i, 1500, 1000);
400 return 0;
404 * sched_update_avail() computes the delta since the last time it was called
405 * and updates the per port quota (number of bits that can be sent to the any
406 * port).
408 static inline int sched_update_avail(struct sge *sge)
410 struct sched *s = sge->tx_sched;
411 ktime_t now = get_clock();
412 unsigned int i;
413 long long delta_time_ns;
415 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
417 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
418 if (delta_time_ns < 15000)
419 return 0;
421 for (i = 0; i < MAX_NPORTS; i++) {
422 struct sched_port *p = &s->p[i];
423 unsigned int delta_avail;
425 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
426 p->avail = min(p->avail + delta_avail, s->max_avail);
429 s->last_updated = now;
431 return 1;
435 * sched_skb() is called from two different places. In the tx path, any
436 * packet generating load on an output port will call sched_skb()
437 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
438 * context (skb == NULL).
439 * The scheduler only returns a skb (which will then be sent) if the
440 * length of the skb is <= the current quota of the output port.
442 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
443 unsigned int credits)
445 struct sched *s = sge->tx_sched;
446 struct sk_buff_head *skbq;
447 unsigned int i, len, update = 1;
449 pr_debug("sched_skb %p\n", skb);
450 if (!skb) {
451 if (!s->num)
452 return NULL;
453 } else {
454 skbq = &s->p[skb->dev->if_port].skbq;
455 __skb_queue_tail(skbq, skb);
456 s->num++;
457 skb = NULL;
460 if (credits < MAX_SKB_FRAGS + 1)
461 goto out;
463 again:
464 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq;
468 skb = skb_peek(skbq);
470 if (!skb)
471 continue;
473 len = skb->len;
474 if (len <= s->p[s->port].avail) {
475 s->p[s->port].avail -= len;
476 s->num--;
477 __skb_unlink(skb, skbq);
478 goto out;
480 skb = NULL;
483 if (update-- && sched_update_avail(sge))
484 goto again;
486 out:
487 /* If there are more pending skbs, we use the hardware to schedule us
488 * again.
490 if (s->num && !skb) {
491 struct cmdQ *q = &sge->cmdQ[0];
492 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
493 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
494 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
495 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
498 pr_debug("sched_skb ret %p\n", skb);
500 return skb;
504 * PIO to indicate that memory mapped Q contains valid descriptor(s).
506 static inline void doorbell_pio(struct adapter *adapter, u32 val)
508 wmb();
509 writel(val, adapter->regs + A_SG_DOORBELL);
513 * Frees all RX buffers on the freelist Q. The caller must make sure that
514 * the SGE is turned off before calling this function.
516 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
518 unsigned int cidx = q->cidx;
520 while (q->credits--) {
521 struct freelQ_ce *ce = &q->centries[cidx];
523 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
524 pci_unmap_len(ce, dma_len),
525 PCI_DMA_FROMDEVICE);
526 dev_kfree_skb(ce->skb);
527 ce->skb = NULL;
528 if (++cidx == q->size)
529 cidx = 0;
534 * Free RX free list and response queue resources.
536 static void free_rx_resources(struct sge *sge)
538 struct pci_dev *pdev = sge->adapter->pdev;
539 unsigned int size, i;
541 if (sge->respQ.entries) {
542 size = sizeof(struct respQ_e) * sge->respQ.size;
543 pci_free_consistent(pdev, size, sge->respQ.entries,
544 sge->respQ.dma_addr);
547 for (i = 0; i < SGE_FREELQ_N; i++) {
548 struct freelQ *q = &sge->freelQ[i];
550 if (q->centries) {
551 free_freelQ_buffers(pdev, q);
552 kfree(q->centries);
554 if (q->entries) {
555 size = sizeof(struct freelQ_e) * q->size;
556 pci_free_consistent(pdev, size, q->entries,
557 q->dma_addr);
563 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
564 * response queue.
566 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
568 struct pci_dev *pdev = sge->adapter->pdev;
569 unsigned int size, i;
571 for (i = 0; i < SGE_FREELQ_N; i++) {
572 struct freelQ *q = &sge->freelQ[i];
574 q->genbit = 1;
575 q->size = p->freelQ_size[i];
576 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
577 size = sizeof(struct freelQ_e) * q->size;
578 q->entries = (struct freelQ_e *)
579 pci_alloc_consistent(pdev, size, &q->dma_addr);
580 if (!q->entries)
581 goto err_no_mem;
582 memset(q->entries, 0, size);
583 size = sizeof(struct freelQ_ce) * q->size;
584 q->centries = kzalloc(size, GFP_KERNEL);
585 if (!q->centries)
586 goto err_no_mem;
590 * Calculate the buffer sizes for the two free lists. FL0 accommodates
591 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
592 * including all the sk_buff overhead.
594 * Note: For T2 FL0 and FL1 are reversed.
596 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
597 sizeof(struct cpl_rx_data) +
598 sge->freelQ[!sge->jumbo_fl].dma_offset;
600 size = (16 * 1024) -
601 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
603 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
606 * Setup which skb recycle Q should be used when recycling buffers from
607 * each free list.
609 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
610 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
612 sge->respQ.genbit = 1;
613 sge->respQ.size = SGE_RESPQ_E_N;
614 sge->respQ.credits = 0;
615 size = sizeof(struct respQ_e) * sge->respQ.size;
616 sge->respQ.entries = (struct respQ_e *)
617 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
618 if (!sge->respQ.entries)
619 goto err_no_mem;
620 memset(sge->respQ.entries, 0, size);
621 return 0;
623 err_no_mem:
624 free_rx_resources(sge);
625 return -ENOMEM;
629 * Reclaims n TX descriptors and frees the buffers associated with them.
631 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
633 struct cmdQ_ce *ce;
634 struct pci_dev *pdev = sge->adapter->pdev;
635 unsigned int cidx = q->cidx;
637 q->in_use -= n;
638 ce = &q->centries[cidx];
639 while (n--) {
640 if (q->sop) {
641 if (likely(pci_unmap_len(ce, dma_len))) {
642 pci_unmap_single(pdev,
643 pci_unmap_addr(ce, dma_addr),
644 pci_unmap_len(ce, dma_len),
645 PCI_DMA_TODEVICE);
646 q->sop = 0;
648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE);
655 if (ce->skb) {
656 dev_kfree_skb_any(ce->skb);
657 q->sop = 1;
659 ce++;
660 if (++cidx == q->size) {
661 cidx = 0;
662 ce = q->centries;
665 q->cidx = cidx;
669 * Free TX resources.
671 * Assumes that SGE is stopped and all interrupts are disabled.
673 static void free_tx_resources(struct sge *sge)
675 struct pci_dev *pdev = sge->adapter->pdev;
676 unsigned int size, i;
678 for (i = 0; i < SGE_CMDQ_N; i++) {
679 struct cmdQ *q = &sge->cmdQ[i];
681 if (q->centries) {
682 if (q->in_use)
683 free_cmdQ_buffers(sge, q, q->in_use);
684 kfree(q->centries);
686 if (q->entries) {
687 size = sizeof(struct cmdQ_e) * q->size;
688 pci_free_consistent(pdev, size, q->entries,
689 q->dma_addr);
695 * Allocates basic TX resources, consisting of memory mapped command Qs.
697 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
699 struct pci_dev *pdev = sge->adapter->pdev;
700 unsigned int size, i;
702 for (i = 0; i < SGE_CMDQ_N; i++) {
703 struct cmdQ *q = &sge->cmdQ[i];
705 q->genbit = 1;
706 q->sop = 1;
707 q->size = p->cmdQ_size[i];
708 q->in_use = 0;
709 q->status = 0;
710 q->processed = q->cleaned = 0;
711 q->stop_thres = 0;
712 spin_lock_init(&q->lock);
713 size = sizeof(struct cmdQ_e) * q->size;
714 q->entries = (struct cmdQ_e *)
715 pci_alloc_consistent(pdev, size, &q->dma_addr);
716 if (!q->entries)
717 goto err_no_mem;
718 memset(q->entries, 0, size);
719 size = sizeof(struct cmdQ_ce) * q->size;
720 q->centries = kzalloc(size, GFP_KERNEL);
721 if (!q->centries)
722 goto err_no_mem;
726 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
727 * only. For queue 0 set the stop threshold so we can handle one more
728 * packet from each port, plus reserve an additional 24 entries for
729 * Ethernet packets only. Queue 1 never suspends nor do we reserve
730 * space for Ethernet packets.
732 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
733 (MAX_SKB_FRAGS + 1);
734 return 0;
736 err_no_mem:
737 free_tx_resources(sge);
738 return -ENOMEM;
741 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
742 u32 size, int base_reg_lo,
743 int base_reg_hi, int size_reg)
745 writel((u32)addr, adapter->regs + base_reg_lo);
746 writel(addr >> 32, adapter->regs + base_reg_hi);
747 writel(size, adapter->regs + size_reg);
751 * Enable/disable VLAN acceleration.
753 void t1_set_vlan_accel(struct adapter *adapter, int on_off)
755 struct sge *sge = adapter->sge;
757 sge->sge_control &= ~F_VLAN_XTRACT;
758 if (on_off)
759 sge->sge_control |= F_VLAN_XTRACT;
760 if (adapter->open_device_map) {
761 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
762 readl(adapter->regs + A_SG_CONTROL); /* flush */
767 * Programs the various SGE registers. However, the engine is not yet enabled,
768 * but sge->sge_control is setup and ready to go.
770 static void configure_sge(struct sge *sge, struct sge_params *p)
772 struct adapter *ap = sge->adapter;
774 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
777 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
778 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
779 setup_ring_params(ap, sge->freelQ[0].dma_addr,
780 sge->freelQ[0].size, A_SG_FL0BASELWR,
781 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
782 setup_ring_params(ap, sge->freelQ[1].dma_addr,
783 sge->freelQ[1].size, A_SG_FL1BASELWR,
784 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
786 /* The threshold comparison uses <. */
787 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
789 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
790 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
791 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
793 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
794 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
795 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
796 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
798 #if defined(__BIG_ENDIAN_BITFIELD)
799 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
800 #endif
802 /* Initialize no-resource timer */
803 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
805 t1_sge_set_coalesce_params(sge, p);
809 * Return the payload capacity of the jumbo free-list buffers.
811 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
813 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
814 sge->freelQ[sge->jumbo_fl].dma_offset -
815 sizeof(struct cpl_rx_data);
819 * Frees all SGE related resources and the sge structure itself
821 void t1_sge_destroy(struct sge *sge)
823 int i;
825 for_each_port(sge->adapter, i)
826 free_percpu(sge->port_stats[i]);
828 kfree(sge->tx_sched);
829 free_tx_resources(sge);
830 free_rx_resources(sge);
831 kfree(sge);
835 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
836 * context Q) until the Q is full or alloc_skb fails.
838 * It is possible that the generation bits already match, indicating that the
839 * buffer is already valid and nothing needs to be done. This happens when we
840 * copied a received buffer into a new sk_buff during the interrupt processing.
842 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
843 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
844 * aligned.
846 static void refill_free_list(struct sge *sge, struct freelQ *q)
848 struct pci_dev *pdev = sge->adapter->pdev;
849 struct freelQ_ce *ce = &q->centries[q->pidx];
850 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
854 while (q->credits < q->size) {
855 struct sk_buff *skb;
856 dma_addr_t mapping;
858 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
859 if (!skb)
860 break;
862 skb_reserve(skb, q->dma_offset);
863 mapping = pci_map_single(pdev, skb->data, dma_len,
864 PCI_DMA_FROMDEVICE);
865 ce->skb = skb;
866 pci_unmap_addr_set(ce, dma_addr, mapping);
867 pci_unmap_len_set(ce, dma_len, dma_len);
868 e->addr_lo = (u32)mapping;
869 e->addr_hi = (u64)mapping >> 32;
870 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
871 wmb();
872 e->gen2 = V_CMD_GEN2(q->genbit);
874 e++;
875 ce++;
876 if (++q->pidx == q->size) {
877 q->pidx = 0;
878 q->genbit ^= 1;
879 ce = q->centries;
880 e = q->entries;
882 q->credits++;
888 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
889 * of both rings, we go into 'few interrupt mode' in order to give the system
890 * time to free up resources.
892 static void freelQs_empty(struct sge *sge)
894 struct adapter *adapter = sge->adapter;
895 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
896 u32 irqholdoff_reg;
898 refill_free_list(sge, &sge->freelQ[0]);
899 refill_free_list(sge, &sge->freelQ[1]);
901 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
902 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
903 irq_reg |= F_FL_EXHAUSTED;
904 irqholdoff_reg = sge->fixed_intrtimer;
905 } else {
906 /* Clear the F_FL_EXHAUSTED interrupts for now */
907 irq_reg &= ~F_FL_EXHAUSTED;
908 irqholdoff_reg = sge->intrtimer_nres;
910 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
911 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
913 /* We reenable the Qs to force a freelist GTS interrupt later */
914 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
917 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
918 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
919 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
920 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
923 * Disable SGE Interrupts
925 void t1_sge_intr_disable(struct sge *sge)
927 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
929 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
930 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
934 * Enable SGE interrupts.
936 void t1_sge_intr_enable(struct sge *sge)
938 u32 en = SGE_INT_ENABLE;
939 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
941 if (sge->adapter->flags & TSO_CAPABLE)
942 en &= ~F_PACKET_TOO_BIG;
943 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
944 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
948 * Clear SGE interrupts.
950 void t1_sge_intr_clear(struct sge *sge)
952 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
953 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
957 * SGE 'Error' interrupt handler
959 int t1_sge_intr_error_handler(struct sge *sge)
961 struct adapter *adapter = sge->adapter;
962 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
964 if (adapter->flags & TSO_CAPABLE)
965 cause &= ~F_PACKET_TOO_BIG;
966 if (cause & F_RESPQ_EXHAUSTED)
967 sge->stats.respQ_empty++;
968 if (cause & F_RESPQ_OVERFLOW) {
969 sge->stats.respQ_overflow++;
970 CH_ALERT("%s: SGE response queue overflow\n",
971 adapter->name);
973 if (cause & F_FL_EXHAUSTED) {
974 sge->stats.freelistQ_empty++;
975 freelQs_empty(sge);
977 if (cause & F_PACKET_TOO_BIG) {
978 sge->stats.pkt_too_big++;
979 CH_ALERT("%s: SGE max packet size exceeded\n",
980 adapter->name);
982 if (cause & F_PACKET_MISMATCH) {
983 sge->stats.pkt_mismatch++;
984 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
986 if (cause & SGE_INT_FATAL)
987 t1_fatal_err(adapter);
989 writel(cause, adapter->regs + A_SG_INT_CAUSE);
990 return 0;
993 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
995 return &sge->stats;
998 void t1_sge_get_port_stats(const struct sge *sge, int port,
999 struct sge_port_stats *ss)
1001 int cpu;
1003 memset(ss, 0, sizeof(*ss));
1004 for_each_possible_cpu(cpu) {
1005 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
1007 ss->rx_packets += st->rx_packets;
1008 ss->rx_cso_good += st->rx_cso_good;
1009 ss->tx_packets += st->tx_packets;
1010 ss->tx_cso += st->tx_cso;
1011 ss->tx_tso += st->tx_tso;
1012 ss->vlan_xtract += st->vlan_xtract;
1013 ss->vlan_insert += st->vlan_insert;
1018 * recycle_fl_buf - recycle a free list buffer
1019 * @fl: the free list
1020 * @idx: index of buffer to recycle
1022 * Recycles the specified buffer on the given free list by adding it at
1023 * the next available slot on the list.
1025 static void recycle_fl_buf(struct freelQ *fl, int idx)
1027 struct freelQ_e *from = &fl->entries[idx];
1028 struct freelQ_e *to = &fl->entries[fl->pidx];
1030 fl->centries[fl->pidx] = fl->centries[idx];
1031 to->addr_lo = from->addr_lo;
1032 to->addr_hi = from->addr_hi;
1033 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1034 wmb();
1035 to->gen2 = V_CMD_GEN2(fl->genbit);
1036 fl->credits++;
1038 if (++fl->pidx == fl->size) {
1039 fl->pidx = 0;
1040 fl->genbit ^= 1;
1045 * get_packet - return the next ingress packet buffer
1046 * @pdev: the PCI device that received the packet
1047 * @fl: the SGE free list holding the packet
1048 * @len: the actual packet length, excluding any SGE padding
1049 * @dma_pad: padding at beginning of buffer left by SGE DMA
1050 * @skb_pad: padding to be used if the packet is copied
1051 * @copy_thres: length threshold under which a packet should be copied
1052 * @drop_thres: # of remaining buffers before we start dropping packets
1054 * Get the next packet from a free list and complete setup of the
1055 * sk_buff. If the packet is small we make a copy and recycle the
1056 * original buffer, otherwise we use the original buffer itself. If a
1057 * positive drop threshold is supplied packets are dropped and their
1058 * buffers recycled if (a) the number of remaining buffers is under the
1059 * threshold and the packet is too big to copy, or (b) the packet should
1060 * be copied but there is no memory for the copy.
1062 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1063 struct freelQ *fl, unsigned int len,
1064 int dma_pad, int skb_pad,
1065 unsigned int copy_thres,
1066 unsigned int drop_thres)
1068 struct sk_buff *skb;
1069 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1071 if (len < copy_thres) {
1072 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
1073 if (likely(skb != NULL)) {
1074 skb_reserve(skb, skb_pad);
1075 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres)
1086 goto use_orig_buf;
1088 recycle_fl_buf(fl, fl->cidx);
1089 return skb;
1092 if (fl->credits < drop_thres) {
1093 recycle_fl_buf(fl, fl->cidx);
1094 return NULL;
1097 use_orig_buf:
1098 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
1099 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100 skb = ce->skb;
1101 skb_reserve(skb, dma_pad);
1102 skb_put(skb, len);
1103 return skb;
1107 * unexpected_offload - handle an unexpected offload packet
1108 * @adapter: the adapter
1109 * @fl: the free list that received the packet
1111 * Called when we receive an unexpected offload packet (e.g., the TOE
1112 * function is disabled or the card is a NIC). Prints a message and
1113 * recycles the buffer.
1115 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1117 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1118 struct sk_buff *skb = ce->skb;
1120 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
1121 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1122 CH_ERR("%s: unexpected offload packet, cmd %u\n",
1123 adapter->name, *skb->data);
1124 recycle_fl_buf(fl, fl->cidx);
1128 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1129 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1130 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1131 * Note that the *_large_page_tx_descs stuff will be optimized out when
1132 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1134 * compute_large_page_descs() computes how many additional descriptors are
1135 * required to break down the stack's request.
1137 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1139 unsigned int count = 0;
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len;
1143 while (len > SGE_TX_DESC_MAX_PLEN) {
1144 count++;
1145 len -= SGE_TX_DESC_MAX_PLEN;
1147 for (i = 0; nfrags--; i++) {
1148 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1149 len = frag->size;
1150 while (len > SGE_TX_DESC_MAX_PLEN) {
1151 count++;
1152 len -= SGE_TX_DESC_MAX_PLEN;
1156 return count;
1160 * Write a cmdQ entry.
1162 * Since this function writes the 'flags' field, it must not be used to
1163 * write the first cmdQ entry.
1165 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1166 unsigned int len, unsigned int gen,
1167 unsigned int eop)
1169 if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
1170 BUG();
1171 e->addr_lo = (u32)mapping;
1172 e->addr_hi = (u64)mapping >> 32;
1173 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1174 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1178 * See comment for previous function.
1180 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1181 * *desc_len exceeds HW's capability.
1183 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1184 struct cmdQ_e **e,
1185 struct cmdQ_ce **ce,
1186 unsigned int *gen,
1187 dma_addr_t *desc_mapping,
1188 unsigned int *desc_len,
1189 unsigned int nfrags,
1190 struct cmdQ *q)
1192 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1193 struct cmdQ_e *e1 = *e;
1194 struct cmdQ_ce *ce1 = *ce;
1196 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1197 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1198 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1199 *gen, nfrags == 0 && *desc_len == 0);
1200 ce1->skb = NULL;
1201 pci_unmap_len_set(ce1, dma_len, 0);
1202 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1203 if (*desc_len) {
1204 ce1++;
1205 e1++;
1206 if (++pidx == q->size) {
1207 pidx = 0;
1208 *gen ^= 1;
1209 ce1 = q->centries;
1210 e1 = q->entries;
1214 *e = e1;
1215 *ce = ce1;
1217 return pidx;
1221 * Write the command descriptors to transmit the given skb starting at
1222 * descriptor pidx with the given generation.
1224 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1225 unsigned int pidx, unsigned int gen,
1226 struct cmdQ *q)
1228 dma_addr_t mapping, desc_mapping;
1229 struct cmdQ_e *e, *e1;
1230 struct cmdQ_ce *ce;
1231 unsigned int i, flags, first_desc_len, desc_len,
1232 nfrags = skb_shinfo(skb)->nr_frags;
1234 e = e1 = &q->entries[pidx];
1235 ce = &q->centries[pidx];
1237 mapping = pci_map_single(adapter->pdev, skb->data,
1238 skb->len - skb->data_len, PCI_DMA_TODEVICE);
1240 desc_mapping = mapping;
1241 desc_len = skb->len - skb->data_len;
1243 flags = F_CMD_DATAVALID | F_CMD_SOP |
1244 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1245 V_CMD_GEN2(gen);
1246 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1247 desc_len : SGE_TX_DESC_MAX_PLEN;
1248 e->addr_lo = (u32)desc_mapping;
1249 e->addr_hi = (u64)desc_mapping >> 32;
1250 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1251 ce->skb = NULL;
1252 pci_unmap_len_set(ce, dma_len, 0);
1254 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1255 desc_len > SGE_TX_DESC_MAX_PLEN) {
1256 desc_mapping += first_desc_len;
1257 desc_len -= first_desc_len;
1258 e1++;
1259 ce++;
1260 if (++pidx == q->size) {
1261 pidx = 0;
1262 gen ^= 1;
1263 e1 = q->entries;
1264 ce = q->centries;
1266 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1267 &desc_mapping, &desc_len,
1268 nfrags, q);
1270 if (likely(desc_len))
1271 write_tx_desc(e1, desc_mapping, desc_len, gen,
1272 nfrags == 0);
1275 ce->skb = NULL;
1276 pci_unmap_addr_set(ce, dma_addr, mapping);
1277 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
1279 for (i = 0; nfrags--; i++) {
1280 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1281 e1++;
1282 ce++;
1283 if (++pidx == q->size) {
1284 pidx = 0;
1285 gen ^= 1;
1286 e1 = q->entries;
1287 ce = q->centries;
1290 mapping = pci_map_page(adapter->pdev, frag->page,
1291 frag->page_offset, frag->size,
1292 PCI_DMA_TODEVICE);
1293 desc_mapping = mapping;
1294 desc_len = frag->size;
1296 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1297 &desc_mapping, &desc_len,
1298 nfrags, q);
1299 if (likely(desc_len))
1300 write_tx_desc(e1, desc_mapping, desc_len, gen,
1301 nfrags == 0);
1302 ce->skb = NULL;
1303 pci_unmap_addr_set(ce, dma_addr, mapping);
1304 pci_unmap_len_set(ce, dma_len, frag->size);
1306 ce->skb = skb;
1307 wmb();
1308 e->flags = flags;
1312 * Clean up completed Tx buffers.
1314 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1316 unsigned int reclaim = q->processed - q->cleaned;
1318 if (reclaim) {
1319 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1320 q->processed, q->cleaned);
1321 free_cmdQ_buffers(sge, q, reclaim);
1322 q->cleaned += reclaim;
1327 * Called from tasklet. Checks the scheduler for any
1328 * pending skbs that can be sent.
1330 static void restart_sched(unsigned long arg)
1332 struct sge *sge = (struct sge *) arg;
1333 struct adapter *adapter = sge->adapter;
1334 struct cmdQ *q = &sge->cmdQ[0];
1335 struct sk_buff *skb;
1336 unsigned int credits, queued_skb = 0;
1338 spin_lock(&q->lock);
1339 reclaim_completed_tx(sge, q);
1341 credits = q->size - q->in_use;
1342 pr_debug("restart_sched credits=%d\n", credits);
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count;
1348 genbit = q->genbit;
1349 pidx = q->pidx;
1350 q->pidx += count;
1351 if (q->pidx >= q->size) {
1352 q->pidx -= q->size;
1353 q->genbit ^= 1;
1355 write_tx_descs(adapter, skb, pidx, genbit, q);
1356 credits = q->size - q->in_use;
1357 queued_skb = 1;
1360 if (queued_skb) {
1361 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1362 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1363 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1364 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1367 spin_unlock(&q->lock);
1371 * sge_rx - process an ingress ethernet packet
1372 * @sge: the sge structure
1373 * @fl: the free list that contains the packet buffer
1374 * @len: the packet length
1376 * Process an ingress ethernet pakcet and deliver it to the stack.
1378 static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1380 struct sk_buff *skb;
1381 struct cpl_rx_pkt *p;
1382 struct adapter *adapter = sge->adapter;
1383 struct sge_port_stats *st;
1385 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
1386 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1387 SGE_RX_DROP_THRES);
1388 if (unlikely(!skb)) {
1389 sge->stats.rx_drops++;
1390 return 0;
1393 p = (struct cpl_rx_pkt *)skb->data;
1394 skb_pull(skb, sizeof(*p));
1395 if (p->iff >= adapter->params.nports) {
1396 kfree_skb(skb);
1397 return 0;
1400 skb->dev = adapter->port[p->iff].dev;
1401 skb->dev->last_rx = jiffies;
1402 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
1403 st->rx_packets++;
1405 skb->protocol = eth_type_trans(skb, skb->dev);
1406 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1407 skb->protocol == htons(ETH_P_IP) &&
1408 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1409 ++st->rx_cso_good;
1410 skb->ip_summed = CHECKSUM_UNNECESSARY;
1411 } else
1412 skb->ip_summed = CHECKSUM_NONE;
1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1415 st->vlan_xtract++;
1416 if (adapter->params.sge.polling)
1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1418 ntohs(p->vlan));
1419 else
1420 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1421 ntohs(p->vlan));
1422 } else if (adapter->params.sge.polling)
1423 netif_receive_skb(skb);
1424 else
1425 netif_rx(skb);
1426 return 0;
1430 * Returns true if a command queue has enough available descriptors that
1431 * we can resume Tx operation after temporarily disabling its packet queue.
1433 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1435 unsigned int r = q->processed - q->cleaned;
1437 return q->in_use - r < (q->size >> 1);
1441 * Called when sufficient space has become available in the SGE command queues
1442 * after the Tx packet schedulers have been suspended to restart the Tx path.
1444 static void restart_tx_queues(struct sge *sge)
1446 struct adapter *adap = sge->adapter;
1448 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1449 int i;
1451 for_each_port(adap, i) {
1452 struct net_device *nd = adap->port[i].dev;
1454 if (test_and_clear_bit(nd->if_port,
1455 &sge->stopped_tx_queues) &&
1456 netif_running(nd)) {
1457 sge->stats.cmdQ_restarted[2]++;
1458 netif_wake_queue(nd);
1465 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1466 * information.
1468 static unsigned int update_tx_info(struct adapter *adapter,
1469 unsigned int flags,
1470 unsigned int pr0)
1472 struct sge *sge = adapter->sge;
1473 struct cmdQ *cmdq = &sge->cmdQ[0];
1475 cmdq->processed += pr0;
1476 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1477 freelQs_empty(sge);
1478 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1480 if (flags & F_CMDQ0_ENABLE) {
1481 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1483 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1484 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1485 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1486 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1488 if (sge->tx_sched)
1489 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1491 flags &= ~F_CMDQ0_ENABLE;
1494 if (unlikely(sge->stopped_tx_queues != 0))
1495 restart_tx_queues(sge);
1497 return flags;
1501 * Process SGE responses, up to the supplied budget. Returns the number of
1502 * responses processed. A negative budget is effectively unlimited.
1504 static int process_responses(struct adapter *adapter, int budget)
1506 struct sge *sge = adapter->sge;
1507 struct respQ *q = &sge->respQ;
1508 struct respQ_e *e = &q->entries[q->cidx];
1509 int budget_left = budget;
1510 unsigned int flags = 0;
1511 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1514 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1515 flags |= e->Qsleeping;
1517 cmdq_processed[0] += e->Cmdq0CreditReturn;
1518 cmdq_processed[1] += e->Cmdq1CreditReturn;
1520 /* We batch updates to the TX side to avoid cacheline
1521 * ping-pong of TX state information on MP where the sender
1522 * might run on a different CPU than this function...
1524 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1525 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1526 cmdq_processed[0] = 0;
1528 if (unlikely(cmdq_processed[1] > 16)) {
1529 sge->cmdQ[1].processed += cmdq_processed[1];
1530 cmdq_processed[1] = 0;
1532 if (likely(e->DataValid)) {
1533 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1535 BUG_ON(!e->Sop || !e->Eop);
1536 if (unlikely(e->Offload))
1537 unexpected_offload(adapter, fl);
1538 else
1539 sge_rx(sge, fl, e->BufferLength);
1542 * Note: this depends on each packet consuming a
1543 * single free-list buffer; cf. the BUG above.
1545 if (++fl->cidx == fl->size)
1546 fl->cidx = 0;
1547 if (unlikely(--fl->credits <
1548 fl->size - SGE_FREEL_REFILL_THRESH))
1549 refill_free_list(sge, fl);
1550 } else
1551 sge->stats.pure_rsps++;
1553 e++;
1554 if (unlikely(++q->cidx == q->size)) {
1555 q->cidx = 0;
1556 q->genbit ^= 1;
1557 e = q->entries;
1559 prefetch(e);
1561 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1562 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1563 q->credits = 0;
1565 --budget_left;
1568 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1569 sge->cmdQ[1].processed += cmdq_processed[1];
1571 budget -= budget_left;
1572 return budget;
1576 * A simpler version of process_responses() that handles only pure (i.e.,
1577 * non data-carrying) responses. Such respones are too light-weight to justify
1578 * calling a softirq when using NAPI, so we handle them specially in hard
1579 * interrupt context. The function is called with a pointer to a response,
1580 * which the caller must ensure is a valid pure response. Returns 1 if it
1581 * encounters a valid data-carrying response, 0 otherwise.
1583 static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1585 struct sge *sge = adapter->sge;
1586 struct respQ *q = &sge->respQ;
1587 unsigned int flags = 0;
1588 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1590 do {
1591 flags |= e->Qsleeping;
1593 cmdq_processed[0] += e->Cmdq0CreditReturn;
1594 cmdq_processed[1] += e->Cmdq1CreditReturn;
1596 e++;
1597 if (unlikely(++q->cidx == q->size)) {
1598 q->cidx = 0;
1599 q->genbit ^= 1;
1600 e = q->entries;
1602 prefetch(e);
1604 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1605 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1606 q->credits = 0;
1608 sge->stats.pure_rsps++;
1609 } while (e->GenerationBit == q->genbit && !e->DataValid);
1611 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1612 sge->cmdQ[1].processed += cmdq_processed[1];
1614 return e->GenerationBit == q->genbit;
1618 * Handler for new data events when using NAPI. This does not need any locking
1619 * or protection from interrupts as data interrupts are off at this point and
1620 * other adapter interrupts do not interfere.
1622 static int t1_poll(struct net_device *dev, int *budget)
1624 struct adapter *adapter = dev->priv;
1625 int effective_budget = min(*budget, dev->quota);
1627 int work_done = process_responses(adapter, effective_budget);
1628 *budget -= work_done;
1629 dev->quota -= work_done;
1631 if (work_done >= effective_budget)
1632 return 1;
1634 __netif_rx_complete(dev);
1637 * Because we don't atomically flush the following write it is
1638 * possible that in very rare cases it can reach the device in a way
1639 * that races with a new response being written plus an error interrupt
1640 * causing the NAPI interrupt handler below to return unhandled status
1641 * to the OS. To protect against this would require flushing the write
1642 * and doing both the write and the flush with interrupts off. Way too
1643 * expensive and unjustifiable given the rarity of the race.
1645 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1646 return 0;
1650 * Returns true if the device is already scheduled for polling.
1652 static inline int napi_is_scheduled(struct net_device *dev)
1654 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1658 * NAPI version of the main interrupt handler.
1660 static irqreturn_t t1_interrupt_napi(int irq, void *data)
1662 int handled;
1663 struct adapter *adapter = data;
1664 struct sge *sge = adapter->sge;
1665 struct respQ *q = &adapter->sge->respQ;
1668 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1669 * handler has control of the response queue and the interrupt handler
1670 * can look at the queue reliably only once it knows NAPI is off.
1671 * We can't wait that long to clear the SGE_DATA interrupt because we
1672 * could race with t1_poll rearming the SGE interrupt, so we need to
1673 * clear the interrupt speculatively and really early on.
1675 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1677 spin_lock(&adapter->async_lock);
1678 if (!napi_is_scheduled(sge->netdev)) {
1679 struct respQ_e *e = &q->entries[q->cidx];
1681 if (e->GenerationBit == q->genbit) {
1682 if (e->DataValid ||
1683 process_pure_responses(adapter, e)) {
1684 if (likely(__netif_rx_schedule_prep(sge->netdev)))
1685 __netif_rx_schedule(sge->netdev);
1686 else if (net_ratelimit())
1687 printk(KERN_INFO
1688 "NAPI schedule failure!\n");
1689 } else
1690 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1692 handled = 1;
1693 goto unlock;
1694 } else
1695 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1696 } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) {
1697 printk(KERN_ERR "data interrupt while NAPI running\n");
1700 handled = t1_slow_intr_handler(adapter);
1701 if (!handled)
1702 sge->stats.unhandled_irqs++;
1703 unlock:
1704 spin_unlock(&adapter->async_lock);
1705 return IRQ_RETVAL(handled != 0);
1709 * Main interrupt handler, optimized assuming that we took a 'DATA'
1710 * interrupt.
1712 * 1. Clear the interrupt
1713 * 2. Loop while we find valid descriptors and process them; accumulate
1714 * information that can be processed after the loop
1715 * 3. Tell the SGE at which index we stopped processing descriptors
1716 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1717 * outstanding TX buffers waiting, replenish RX buffers, potentially
1718 * reenable upper layers if they were turned off due to lack of TX
1719 * resources which are available again.
1720 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1721 * let the slow_intr_handler run and do error handling.
1723 static irqreturn_t t1_interrupt(int irq, void *cookie)
1725 int work_done;
1726 struct respQ_e *e;
1727 struct adapter *adapter = cookie;
1728 struct respQ *Q = &adapter->sge->respQ;
1730 spin_lock(&adapter->async_lock);
1731 e = &Q->entries[Q->cidx];
1732 prefetch(e);
1734 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1736 if (likely(e->GenerationBit == Q->genbit))
1737 work_done = process_responses(adapter, -1);
1738 else
1739 work_done = t1_slow_intr_handler(adapter);
1742 * The unconditional clearing of the PL_CAUSE above may have raced
1743 * with DMA completion and the corresponding generation of a response
1744 * to cause us to miss the resulting data interrupt. The next write
1745 * is also unconditional to recover the missed interrupt and render
1746 * this race harmless.
1748 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1750 if (!work_done)
1751 adapter->sge->stats.unhandled_irqs++;
1752 spin_unlock(&adapter->async_lock);
1753 return IRQ_RETVAL(work_done != 0);
1756 irq_handler_t t1_select_intr_handler(adapter_t *adapter)
1758 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1762 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1764 * The code figures out how many entries the sk_buff will require in the
1765 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1766 * has complete. Then, it doesn't access the global structure anymore, but
1767 * uses the corresponding fields on the stack. In conjuction with a spinlock
1768 * around that code, we can make the function reentrant without holding the
1769 * lock when we actually enqueue (which might be expensive, especially on
1770 * architectures with IO MMUs).
1772 * This runs with softirqs disabled.
1774 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1775 unsigned int qid, struct net_device *dev)
1777 struct sge *sge = adapter->sge;
1778 struct cmdQ *q = &sge->cmdQ[qid];
1779 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1781 if (!spin_trylock(&q->lock))
1782 return NETDEV_TX_LOCKED;
1784 reclaim_completed_tx(sge, q);
1786 pidx = q->pidx;
1787 credits = q->size - q->in_use;
1788 count = 1 + skb_shinfo(skb)->nr_frags;
1789 count += compute_large_page_tx_descs(skb);
1791 /* Ethernet packet */
1792 if (unlikely(credits < count)) {
1793 if (!netif_queue_stopped(dev)) {
1794 netif_stop_queue(dev);
1795 set_bit(dev->if_port, &sge->stopped_tx_queues);
1796 sge->stats.cmdQ_full[2]++;
1797 CH_ERR("%s: Tx ring full while queue awake!\n",
1798 adapter->name);
1800 spin_unlock(&q->lock);
1801 return NETDEV_TX_BUSY;
1804 if (unlikely(credits - count < q->stop_thres)) {
1805 netif_stop_queue(dev);
1806 set_bit(dev->if_port, &sge->stopped_tx_queues);
1807 sge->stats.cmdQ_full[2]++;
1810 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1811 * through the scheduler.
1813 if (sge->tx_sched && !qid && skb->dev) {
1814 use_sched:
1815 use_sched_skb = 1;
1816 /* Note that the scheduler might return a different skb than
1817 * the one passed in.
1819 skb = sched_skb(sge, skb, credits);
1820 if (!skb) {
1821 spin_unlock(&q->lock);
1822 return NETDEV_TX_OK;
1824 pidx = q->pidx;
1825 count = 1 + skb_shinfo(skb)->nr_frags;
1826 count += compute_large_page_tx_descs(skb);
1829 q->in_use += count;
1830 genbit = q->genbit;
1831 pidx = q->pidx;
1832 q->pidx += count;
1833 if (q->pidx >= q->size) {
1834 q->pidx -= q->size;
1835 q->genbit ^= 1;
1837 spin_unlock(&q->lock);
1839 write_tx_descs(adapter, skb, pidx, genbit, q);
1842 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1843 * the doorbell if the Q is asleep. There is a natural race, where
1844 * the hardware is going to sleep just after we checked, however,
1845 * then the interrupt handler will detect the outstanding TX packet
1846 * and ring the doorbell for us.
1848 if (qid)
1849 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1850 else {
1851 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1852 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1853 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1854 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1858 if (use_sched_skb) {
1859 if (spin_trylock(&q->lock)) {
1860 credits = q->size - q->in_use;
1861 skb = NULL;
1862 goto use_sched;
1865 return NETDEV_TX_OK;
1868 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1871 * eth_hdr_len - return the length of an Ethernet header
1872 * @data: pointer to the start of the Ethernet header
1874 * Returns the length of an Ethernet header, including optional VLAN tag.
1876 static inline int eth_hdr_len(const void *data)
1878 const struct ethhdr *e = data;
1880 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1884 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1886 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1888 struct adapter *adapter = dev->priv;
1889 struct sge *sge = adapter->sge;
1890 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
1891 struct cpl_tx_pkt *cpl;
1892 struct sk_buff *orig_skb = skb;
1893 int ret;
1895 if (skb->protocol == htons(ETH_P_CPL5))
1896 goto send;
1898 if (skb_shinfo(skb)->gso_size) {
1899 int eth_type;
1900 struct cpl_tx_pkt_lso *hdr;
1902 ++st->tx_tso;
1904 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1905 CPL_ETH_II : CPL_ETH_II_VLAN;
1907 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1908 hdr->opcode = CPL_TX_PKT_LSO;
1909 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1910 hdr->ip_hdr_words = skb->nh.iph->ihl;
1911 hdr->tcp_hdr_words = skb->h.th->doff;
1912 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1913 skb_shinfo(skb)->gso_size));
1914 hdr->len = htonl(skb->len - sizeof(*hdr));
1915 cpl = (struct cpl_tx_pkt *)hdr;
1916 } else {
1918 * Packets shorter than ETH_HLEN can break the MAC, drop them
1919 * early. Also, we may get oversized packets because some
1920 * parts of the kernel don't handle our unusual hard_header_len
1921 * right, drop those too.
1923 if (unlikely(skb->len < ETH_HLEN ||
1924 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1925 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1926 skb->len, eth_hdr_len(skb->data), dev->mtu);
1927 dev_kfree_skb_any(skb);
1928 return NETDEV_TX_OK;
1932 * We are using a non-standard hard_header_len and some kernel
1933 * components, such as pktgen, do not handle it right.
1934 * Complain when this happens but try to fix things up.
1936 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1937 pr_debug("%s: headroom %d header_len %d\n", dev->name,
1938 skb_headroom(skb), dev->hard_header_len);
1940 if (net_ratelimit())
1941 printk(KERN_ERR "%s: inadequate headroom in "
1942 "Tx packet\n", dev->name);
1943 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1944 dev_kfree_skb_any(orig_skb);
1945 if (!skb)
1946 return NETDEV_TX_OK;
1949 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1950 skb->ip_summed == CHECKSUM_PARTIAL &&
1951 skb->nh.iph->protocol == IPPROTO_UDP) {
1952 if (unlikely(skb_checksum_help(skb))) {
1953 pr_debug("%s: unable to do udp checksum\n", dev->name);
1954 dev_kfree_skb_any(skb);
1955 return NETDEV_TX_OK;
1959 /* Hmmm, assuming to catch the gratious arp... and we'll use
1960 * it to flush out stuck espi packets...
1962 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1963 if (skb->protocol == htons(ETH_P_ARP) &&
1964 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1965 adapter->sge->espibug_skb[dev->if_port] = skb;
1966 /* We want to re-use this skb later. We
1967 * simply bump the reference count and it
1968 * will not be freed...
1970 skb = skb_get(skb);
1974 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1975 cpl->opcode = CPL_TX_PKT;
1976 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1977 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1978 /* the length field isn't used so don't bother setting it */
1980 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1982 cpl->iff = dev->if_port;
1984 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1985 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1986 cpl->vlan_valid = 1;
1987 cpl->vlan = htons(vlan_tx_tag_get(skb));
1988 st->vlan_insert++;
1989 } else
1990 #endif
1991 cpl->vlan_valid = 0;
1993 send:
1994 st->tx_packets++;
1995 dev->trans_start = jiffies;
1996 ret = t1_sge_tx(skb, adapter, 0, dev);
1998 /* If transmit busy, and we reallocated skb's due to headroom limit,
1999 * then silently discard to avoid leak.
2001 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
2002 dev_kfree_skb_any(skb);
2003 ret = NETDEV_TX_OK;
2005 return ret;
2009 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
2011 static void sge_tx_reclaim_cb(unsigned long data)
2013 int i;
2014 struct sge *sge = (struct sge *)data;
2016 for (i = 0; i < SGE_CMDQ_N; ++i) {
2017 struct cmdQ *q = &sge->cmdQ[i];
2019 if (!spin_trylock(&q->lock))
2020 continue;
2022 reclaim_completed_tx(sge, q);
2023 if (i == 0 && q->in_use) { /* flush pending credits */
2024 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
2026 spin_unlock(&q->lock);
2028 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2032 * Propagate changes of the SGE coalescing parameters to the HW.
2034 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
2036 sge->netdev->poll = t1_poll;
2037 sge->fixed_intrtimer = p->rx_coalesce_usecs *
2038 core_ticks_per_usec(sge->adapter);
2039 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
2040 return 0;
2044 * Allocates both RX and TX resources and configures the SGE. However,
2045 * the hardware is not enabled yet.
2047 int t1_sge_configure(struct sge *sge, struct sge_params *p)
2049 if (alloc_rx_resources(sge, p))
2050 return -ENOMEM;
2051 if (alloc_tx_resources(sge, p)) {
2052 free_rx_resources(sge);
2053 return -ENOMEM;
2055 configure_sge(sge, p);
2058 * Now that we have sized the free lists calculate the payload
2059 * capacity of the large buffers. Other parts of the driver use
2060 * this to set the max offload coalescing size so that RX packets
2061 * do not overflow our large buffers.
2063 p->large_buf_capacity = jumbo_payload_capacity(sge);
2064 return 0;
2068 * Disables the DMA engine.
2070 void t1_sge_stop(struct sge *sge)
2072 int i;
2073 writel(0, sge->adapter->regs + A_SG_CONTROL);
2074 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
2076 if (is_T2(sge->adapter))
2077 del_timer_sync(&sge->espibug_timer);
2079 del_timer_sync(&sge->tx_reclaim_timer);
2080 if (sge->tx_sched)
2081 tx_sched_stop(sge);
2083 for (i = 0; i < MAX_NPORTS; i++)
2084 if (sge->espibug_skb[i])
2085 kfree_skb(sge->espibug_skb[i]);
2089 * Enables the DMA engine.
2091 void t1_sge_start(struct sge *sge)
2093 refill_free_list(sge, &sge->freelQ[0]);
2094 refill_free_list(sge, &sge->freelQ[1]);
2096 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
2097 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
2098 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
2100 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2102 if (is_T2(sge->adapter))
2103 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2107 * Callback for the T2 ESPI 'stuck packet feature' workaorund
2109 static void espibug_workaround_t204(unsigned long data)
2111 struct adapter *adapter = (struct adapter *)data;
2112 struct sge *sge = adapter->sge;
2113 unsigned int nports = adapter->params.nports;
2114 u32 seop[MAX_NPORTS];
2116 if (adapter->open_device_map & PORT_MASK) {
2117 int i;
2118 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
2119 return;
2121 for (i = 0; i < nports; i++) {
2122 struct sk_buff *skb = sge->espibug_skb[i];
2123 if ( (netif_running(adapter->port[i].dev)) &&
2124 !(netif_queue_stopped(adapter->port[i].dev)) &&
2125 (seop[i] && ((seop[i] & 0xfff) == 0)) &&
2126 skb ) {
2127 if (!skb->cb[0]) {
2128 u8 ch_mac_addr[ETH_ALEN] =
2129 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2130 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2131 ch_mac_addr, ETH_ALEN);
2132 memcpy(skb->data + skb->len - 10,
2133 ch_mac_addr, ETH_ALEN);
2134 skb->cb[0] = 0xff;
2137 /* bump the reference count to avoid freeing of
2138 * the skb once the DMA has completed.
2140 skb = skb_get(skb);
2141 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2145 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2148 static void espibug_workaround(unsigned long data)
2150 struct adapter *adapter = (struct adapter *)data;
2151 struct sge *sge = adapter->sge;
2153 if (netif_running(adapter->port[0].dev)) {
2154 struct sk_buff *skb = sge->espibug_skb[0];
2155 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2157 if ((seop & 0xfff0fff) == 0xfff && skb) {
2158 if (!skb->cb[0]) {
2159 u8 ch_mac_addr[ETH_ALEN] =
2160 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2161 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2162 ch_mac_addr, ETH_ALEN);
2163 memcpy(skb->data + skb->len - 10, ch_mac_addr,
2164 ETH_ALEN);
2165 skb->cb[0] = 0xff;
2168 /* bump the reference count to avoid freeing of the
2169 * skb once the DMA has completed.
2171 skb = skb_get(skb);
2172 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2175 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2179 * Creates a t1_sge structure and returns suggested resource parameters.
2181 struct sge * __devinit t1_sge_create(struct adapter *adapter,
2182 struct sge_params *p)
2184 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2185 int i;
2187 if (!sge)
2188 return NULL;
2190 sge->adapter = adapter;
2191 sge->netdev = adapter->port[0].dev;
2192 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2193 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2195 for_each_port(adapter, i) {
2196 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2197 if (!sge->port_stats[i])
2198 goto nomem_port;
2201 init_timer(&sge->tx_reclaim_timer);
2202 sge->tx_reclaim_timer.data = (unsigned long)sge;
2203 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2205 if (is_T2(sge->adapter)) {
2206 init_timer(&sge->espibug_timer);
2208 if (adapter->params.nports > 1) {
2209 tx_sched_init(sge);
2210 sge->espibug_timer.function = espibug_workaround_t204;
2211 } else {
2212 sge->espibug_timer.function = espibug_workaround;
2214 sge->espibug_timer.data = (unsigned long)sge->adapter;
2216 sge->espibug_timeout = 1;
2217 /* for T204, every 10ms */
2218 if (adapter->params.nports > 1)
2219 sge->espibug_timeout = HZ/100;
2223 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2224 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2225 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2226 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2227 if (sge->tx_sched) {
2228 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2229 p->rx_coalesce_usecs = 15;
2230 else
2231 p->rx_coalesce_usecs = 50;
2232 } else
2233 p->rx_coalesce_usecs = 50;
2235 p->coalesce_enable = 0;
2236 p->sample_interval_usecs = 0;
2237 p->polling = 0;
2239 return sge;
2240 nomem_port:
2241 while (i >= 0) {
2242 free_percpu(sge->port_stats[i]);
2243 --i;
2245 kfree(sge);
2246 return NULL;