1 /*****************************************************************************
5 * $Date: 2005/06/21 18:29:48 $ *
8 * part of the Chelsio 10Gb Ethernet Driver. *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
22 * http://www.chelsio.com *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
27 * Maintainers: maintainers@chelsio.com *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
38 ****************************************************************************/
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/ktime.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/if_vlan.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
52 #include <linux/tcp.h>
55 #include <linux/if_arp.h>
56 #include <linux/slab.h>
57 #include <linux/prefetch.h>
64 /* This belongs in if_ether.h */
65 #define ETH_P_CPL5 0xf
68 #define SGE_FREELQ_N 2
69 #define SGE_CMDQ0_E_N 1024
70 #define SGE_CMDQ1_E_N 128
71 #define SGE_FREEL_SIZE 4096
72 #define SGE_JUMBO_FREEL_SIZE 512
73 #define SGE_FREEL_REFILL_THRESH 16
74 #define SGE_RESPQ_E_N 1024
75 #define SGE_INTRTIMER_NRES 1000
76 #define SGE_RX_SM_BUF_SIZE 1536
77 #define SGE_TX_DESC_MAX_PLEN 16384
79 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
82 * Period of the TX buffer reclaim timer. This timer does not need to run
83 * frequently as TX buffers are usually reclaimed by new TX packets.
85 #define TX_RECLAIM_PERIOD (HZ / 4)
87 #define M_CMD_LEN 0x7fffffff
88 #define V_CMD_LEN(v) (v)
89 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
90 #define V_CMD_GEN1(v) ((v) << 31)
91 #define V_CMD_GEN2(v) (v)
92 #define F_CMD_DATAVALID (1 << 1)
93 #define F_CMD_SOP (1 << 2)
94 #define V_CMD_EOP(v) ((v) << 3)
97 * Command queue, receive buffer list, and response queue descriptors.
99 #if defined(__BIG_ENDIAN_BITFIELD)
116 u32 Cmdq1CreditReturn
: 5;
117 u32 Cmdq1DmaComplete
: 5;
118 u32 Cmdq0CreditReturn
: 5;
119 u32 Cmdq0DmaComplete
: 5;
126 u32 GenerationBit
: 1;
129 #elif defined(__LITTLE_ENDIAN_BITFIELD)
146 u32 GenerationBit
: 1;
153 u32 Cmdq0DmaComplete
: 5;
154 u32 Cmdq0CreditReturn
: 5;
155 u32 Cmdq1DmaComplete
: 5;
156 u32 Cmdq1CreditReturn
: 5;
162 * SW Context Command and Freelist Queue Descriptors
166 DEFINE_DMA_UNMAP_ADDR(dma_addr
);
167 DEFINE_DMA_UNMAP_LEN(dma_len
);
172 DEFINE_DMA_UNMAP_ADDR(dma_addr
);
173 DEFINE_DMA_UNMAP_LEN(dma_len
);
177 * SW command, freelist and response rings
180 unsigned long status
; /* HW DMA fetch status */
181 unsigned int in_use
; /* # of in-use command descriptors */
182 unsigned int size
; /* # of descriptors */
183 unsigned int processed
; /* total # of descs HW has processed */
184 unsigned int cleaned
; /* total # of descs SW has reclaimed */
185 unsigned int stop_thres
; /* SW TX queue suspend threshold */
186 u16 pidx
; /* producer index (SW) */
187 u16 cidx
; /* consumer index (HW) */
188 u8 genbit
; /* current generation (=valid) bit */
189 u8 sop
; /* is next entry start of packet? */
190 struct cmdQ_e
*entries
; /* HW command descriptor Q */
191 struct cmdQ_ce
*centries
; /* SW command context descriptor Q */
192 dma_addr_t dma_addr
; /* DMA addr HW command descriptor Q */
193 spinlock_t lock
; /* Lock to protect cmdQ enqueuing */
197 unsigned int credits
; /* # of available RX buffers */
198 unsigned int size
; /* free list capacity */
199 u16 pidx
; /* producer index (SW) */
200 u16 cidx
; /* consumer index (HW) */
201 u16 rx_buffer_size
; /* Buffer size on this free list */
202 u16 dma_offset
; /* DMA offset to align IP headers */
203 u16 recycleq_idx
; /* skb recycle q to use */
204 u8 genbit
; /* current generation (=valid) bit */
205 struct freelQ_e
*entries
; /* HW freelist descriptor Q */
206 struct freelQ_ce
*centries
; /* SW freelist context descriptor Q */
207 dma_addr_t dma_addr
; /* DMA addr HW freelist descriptor Q */
211 unsigned int credits
; /* credits to be returned to SGE */
212 unsigned int size
; /* # of response Q descriptors */
213 u16 cidx
; /* consumer index (SW) */
214 u8 genbit
; /* current generation(=valid) bit */
215 struct respQ_e
*entries
; /* HW response descriptor Q */
216 dma_addr_t dma_addr
; /* DMA addr HW response descriptor Q */
219 /* Bit flags for cmdQ.status */
221 CMDQ_STAT_RUNNING
= 1, /* fetch engine is running */
222 CMDQ_STAT_LAST_PKT_DB
= 2 /* last packet rung the doorbell */
225 /* T204 TX SW scheduler */
227 /* Per T204 TX port */
229 unsigned int avail
; /* available bits - quota */
230 unsigned int drain_bits_per_1024ns
; /* drain rate */
231 unsigned int speed
; /* drain rate, mbps */
232 unsigned int mtu
; /* mtu size */
233 struct sk_buff_head skbq
; /* pending skbs */
236 /* Per T204 device */
238 ktime_t last_updated
; /* last time quotas were computed */
239 unsigned int max_avail
; /* max bits to be sent to any port */
240 unsigned int port
; /* port index (round robin ports) */
241 unsigned int num
; /* num skbs in per port queues */
242 struct sched_port p
[MAX_NPORTS
];
243 struct tasklet_struct sched_tsk
;/* tasklet used to run scheduler */
245 static void restart_sched(unsigned long);
249 * Main SGE data structure
251 * Interrupts are handled by a single CPU and it is likely that on a MP system
252 * the application is migrated to another CPU. In that scenario, we try to
253 * separate the RX(in irq context) and TX state in order to decrease memory
257 struct adapter
*adapter
; /* adapter backpointer */
258 struct net_device
*netdev
; /* netdevice backpointer */
259 struct freelQ freelQ
[SGE_FREELQ_N
]; /* buffer free lists */
260 struct respQ respQ
; /* response Q */
261 unsigned long stopped_tx_queues
; /* bitmap of suspended Tx queues */
262 unsigned int rx_pkt_pad
; /* RX padding for L2 packets */
263 unsigned int jumbo_fl
; /* jumbo freelist Q index */
264 unsigned int intrtimer_nres
; /* no-resource interrupt timer */
265 unsigned int fixed_intrtimer
;/* non-adaptive interrupt timer */
266 struct timer_list tx_reclaim_timer
; /* reclaims TX buffers */
267 struct timer_list espibug_timer
;
268 unsigned long espibug_timeout
;
269 struct sk_buff
*espibug_skb
[MAX_NPORTS
];
270 u32 sge_control
; /* shadow value of sge control reg */
271 struct sge_intr_counts stats
;
272 struct sge_port_stats __percpu
*port_stats
[MAX_NPORTS
];
273 struct sched
*tx_sched
;
274 struct cmdQ cmdQ
[SGE_CMDQ_N
] ____cacheline_aligned_in_smp
;
277 static const u8 ch_mac_addr
[ETH_ALEN
] = {
278 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
282 * stop tasklet and free all pending skb's
284 static void tx_sched_stop(struct sge
*sge
)
286 struct sched
*s
= sge
->tx_sched
;
289 tasklet_kill(&s
->sched_tsk
);
291 for (i
= 0; i
< MAX_NPORTS
; i
++)
292 __skb_queue_purge(&s
->p
[s
->port
].skbq
);
296 * t1_sched_update_parms() is called when the MTU or link speed changes. It
297 * re-computes scheduler parameters to scope with the change.
299 unsigned int t1_sched_update_parms(struct sge
*sge
, unsigned int port
,
300 unsigned int mtu
, unsigned int speed
)
302 struct sched
*s
= sge
->tx_sched
;
303 struct sched_port
*p
= &s
->p
[port
];
304 unsigned int max_avail_segs
;
306 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu
, speed
);
313 unsigned long long drain
= 1024ULL * p
->speed
* (p
->mtu
- 40);
314 do_div(drain
, (p
->mtu
+ 50) * 1000);
315 p
->drain_bits_per_1024ns
= (unsigned int) drain
;
318 p
->drain_bits_per_1024ns
=
319 90 * p
->drain_bits_per_1024ns
/ 100;
322 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
) {
323 p
->drain_bits_per_1024ns
-= 16;
324 s
->max_avail
= max(4096U, p
->mtu
+ 16 + 14 + 4);
325 max_avail_segs
= max(1U, 4096 / (p
->mtu
- 40));
327 s
->max_avail
= 16384;
328 max_avail_segs
= max(1U, 9000 / (p
->mtu
- 40));
331 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
332 "max_avail_segs %u drain_bits_per_1024ns %u\n", p
->mtu
,
333 p
->speed
, s
->max_avail
, max_avail_segs
,
334 p
->drain_bits_per_1024ns
);
336 return max_avail_segs
* (p
->mtu
- 40);
342 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
343 * data that can be pushed per port.
345 void t1_sched_set_max_avail_bytes(struct sge
*sge
, unsigned int val
)
347 struct sched
*s
= sge
->tx_sched
;
351 for (i
= 0; i
< MAX_NPORTS
; i
++)
352 t1_sched_update_parms(sge
, i
, 0, 0);
356 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
359 void t1_sched_set_drain_bits_per_us(struct sge
*sge
, unsigned int port
,
362 struct sched
*s
= sge
->tx_sched
;
363 struct sched_port
*p
= &s
->p
[port
];
364 p
->drain_bits_per_1024ns
= val
* 1024 / 1000;
365 t1_sched_update_parms(sge
, port
, 0, 0);
372 * get_clock() implements a ns clock (see ktime_get)
374 static inline ktime_t
get_clock(void)
379 return timespec_to_ktime(ts
);
383 * tx_sched_init() allocates resources and does basic initialization.
385 static int tx_sched_init(struct sge
*sge
)
390 s
= kzalloc(sizeof (struct sched
), GFP_KERNEL
);
394 pr_debug("tx_sched_init\n");
395 tasklet_init(&s
->sched_tsk
, restart_sched
, (unsigned long) sge
);
398 for (i
= 0; i
< MAX_NPORTS
; i
++) {
399 skb_queue_head_init(&s
->p
[i
].skbq
);
400 t1_sched_update_parms(sge
, i
, 1500, 1000);
407 * sched_update_avail() computes the delta since the last time it was called
408 * and updates the per port quota (number of bits that can be sent to the any
411 static inline int sched_update_avail(struct sge
*sge
)
413 struct sched
*s
= sge
->tx_sched
;
414 ktime_t now
= get_clock();
416 long long delta_time_ns
;
418 delta_time_ns
= ktime_to_ns(ktime_sub(now
, s
->last_updated
));
420 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns
);
421 if (delta_time_ns
< 15000)
424 for (i
= 0; i
< MAX_NPORTS
; i
++) {
425 struct sched_port
*p
= &s
->p
[i
];
426 unsigned int delta_avail
;
428 delta_avail
= (p
->drain_bits_per_1024ns
* delta_time_ns
) >> 13;
429 p
->avail
= min(p
->avail
+ delta_avail
, s
->max_avail
);
432 s
->last_updated
= now
;
438 * sched_skb() is called from two different places. In the tx path, any
439 * packet generating load on an output port will call sched_skb()
440 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
441 * context (skb == NULL).
442 * The scheduler only returns a skb (which will then be sent) if the
443 * length of the skb is <= the current quota of the output port.
445 static struct sk_buff
*sched_skb(struct sge
*sge
, struct sk_buff
*skb
,
446 unsigned int credits
)
448 struct sched
*s
= sge
->tx_sched
;
449 struct sk_buff_head
*skbq
;
450 unsigned int i
, len
, update
= 1;
452 pr_debug("sched_skb %p\n", skb
);
457 skbq
= &s
->p
[skb
->dev
->if_port
].skbq
;
458 __skb_queue_tail(skbq
, skb
);
463 if (credits
< MAX_SKB_FRAGS
+ 1)
467 for (i
= 0; i
< MAX_NPORTS
; i
++) {
468 s
->port
= (s
->port
+ 1) & (MAX_NPORTS
- 1);
469 skbq
= &s
->p
[s
->port
].skbq
;
471 skb
= skb_peek(skbq
);
477 if (len
<= s
->p
[s
->port
].avail
) {
478 s
->p
[s
->port
].avail
-= len
;
480 __skb_unlink(skb
, skbq
);
486 if (update
-- && sched_update_avail(sge
))
490 /* If there are more pending skbs, we use the hardware to schedule us
493 if (s
->num
&& !skb
) {
494 struct cmdQ
*q
= &sge
->cmdQ
[0];
495 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
496 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
497 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
498 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
501 pr_debug("sched_skb ret %p\n", skb
);
507 * PIO to indicate that memory mapped Q contains valid descriptor(s).
509 static inline void doorbell_pio(struct adapter
*adapter
, u32 val
)
512 writel(val
, adapter
->regs
+ A_SG_DOORBELL
);
516 * Frees all RX buffers on the freelist Q. The caller must make sure that
517 * the SGE is turned off before calling this function.
519 static void free_freelQ_buffers(struct pci_dev
*pdev
, struct freelQ
*q
)
521 unsigned int cidx
= q
->cidx
;
523 while (q
->credits
--) {
524 struct freelQ_ce
*ce
= &q
->centries
[cidx
];
526 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
527 dma_unmap_len(ce
, dma_len
),
529 dev_kfree_skb(ce
->skb
);
531 if (++cidx
== q
->size
)
537 * Free RX free list and response queue resources.
539 static void free_rx_resources(struct sge
*sge
)
541 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
542 unsigned int size
, i
;
544 if (sge
->respQ
.entries
) {
545 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
546 pci_free_consistent(pdev
, size
, sge
->respQ
.entries
,
547 sge
->respQ
.dma_addr
);
550 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
551 struct freelQ
*q
= &sge
->freelQ
[i
];
554 free_freelQ_buffers(pdev
, q
);
558 size
= sizeof(struct freelQ_e
) * q
->size
;
559 pci_free_consistent(pdev
, size
, q
->entries
,
566 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
569 static int alloc_rx_resources(struct sge
*sge
, struct sge_params
*p
)
571 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
572 unsigned int size
, i
;
574 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
575 struct freelQ
*q
= &sge
->freelQ
[i
];
578 q
->size
= p
->freelQ_size
[i
];
579 q
->dma_offset
= sge
->rx_pkt_pad
? 0 : NET_IP_ALIGN
;
580 size
= sizeof(struct freelQ_e
) * q
->size
;
581 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
585 size
= sizeof(struct freelQ_ce
) * q
->size
;
586 q
->centries
= kzalloc(size
, GFP_KERNEL
);
592 * Calculate the buffer sizes for the two free lists. FL0 accommodates
593 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
594 * including all the sk_buff overhead.
596 * Note: For T2 FL0 and FL1 are reversed.
598 sge
->freelQ
[!sge
->jumbo_fl
].rx_buffer_size
= SGE_RX_SM_BUF_SIZE
+
599 sizeof(struct cpl_rx_data
) +
600 sge
->freelQ
[!sge
->jumbo_fl
].dma_offset
;
603 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
605 sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
= size
;
608 * Setup which skb recycle Q should be used when recycling buffers from
611 sge
->freelQ
[!sge
->jumbo_fl
].recycleq_idx
= 0;
612 sge
->freelQ
[sge
->jumbo_fl
].recycleq_idx
= 1;
614 sge
->respQ
.genbit
= 1;
615 sge
->respQ
.size
= SGE_RESPQ_E_N
;
616 sge
->respQ
.credits
= 0;
617 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
619 pci_alloc_consistent(pdev
, size
, &sge
->respQ
.dma_addr
);
620 if (!sge
->respQ
.entries
)
625 free_rx_resources(sge
);
630 * Reclaims n TX descriptors and frees the buffers associated with them.
632 static void free_cmdQ_buffers(struct sge
*sge
, struct cmdQ
*q
, unsigned int n
)
635 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
636 unsigned int cidx
= q
->cidx
;
639 ce
= &q
->centries
[cidx
];
641 if (likely(dma_unmap_len(ce
, dma_len
))) {
642 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
643 dma_unmap_len(ce
, dma_len
),
649 dev_kfree_skb_any(ce
->skb
);
653 if (++cidx
== q
->size
) {
664 * Assumes that SGE is stopped and all interrupts are disabled.
666 static void free_tx_resources(struct sge
*sge
)
668 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
669 unsigned int size
, i
;
671 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
672 struct cmdQ
*q
= &sge
->cmdQ
[i
];
676 free_cmdQ_buffers(sge
, q
, q
->in_use
);
680 size
= sizeof(struct cmdQ_e
) * q
->size
;
681 pci_free_consistent(pdev
, size
, q
->entries
,
688 * Allocates basic TX resources, consisting of memory mapped command Qs.
690 static int alloc_tx_resources(struct sge
*sge
, struct sge_params
*p
)
692 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
693 unsigned int size
, i
;
695 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
696 struct cmdQ
*q
= &sge
->cmdQ
[i
];
700 q
->size
= p
->cmdQ_size
[i
];
703 q
->processed
= q
->cleaned
= 0;
705 spin_lock_init(&q
->lock
);
706 size
= sizeof(struct cmdQ_e
) * q
->size
;
707 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
711 size
= sizeof(struct cmdQ_ce
) * q
->size
;
712 q
->centries
= kzalloc(size
, GFP_KERNEL
);
718 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
719 * only. For queue 0 set the stop threshold so we can handle one more
720 * packet from each port, plus reserve an additional 24 entries for
721 * Ethernet packets only. Queue 1 never suspends nor do we reserve
722 * space for Ethernet packets.
724 sge
->cmdQ
[0].stop_thres
= sge
->adapter
->params
.nports
*
729 free_tx_resources(sge
);
733 static inline void setup_ring_params(struct adapter
*adapter
, u64 addr
,
734 u32 size
, int base_reg_lo
,
735 int base_reg_hi
, int size_reg
)
737 writel((u32
)addr
, adapter
->regs
+ base_reg_lo
);
738 writel(addr
>> 32, adapter
->regs
+ base_reg_hi
);
739 writel(size
, adapter
->regs
+ size_reg
);
743 * Enable/disable VLAN acceleration.
745 void t1_set_vlan_accel(struct adapter
*adapter
, int on_off
)
747 struct sge
*sge
= adapter
->sge
;
749 sge
->sge_control
&= ~F_VLAN_XTRACT
;
751 sge
->sge_control
|= F_VLAN_XTRACT
;
752 if (adapter
->open_device_map
) {
753 writel(sge
->sge_control
, adapter
->regs
+ A_SG_CONTROL
);
754 readl(adapter
->regs
+ A_SG_CONTROL
); /* flush */
759 * Programs the various SGE registers. However, the engine is not yet enabled,
760 * but sge->sge_control is setup and ready to go.
762 static void configure_sge(struct sge
*sge
, struct sge_params
*p
)
764 struct adapter
*ap
= sge
->adapter
;
766 writel(0, ap
->regs
+ A_SG_CONTROL
);
767 setup_ring_params(ap
, sge
->cmdQ
[0].dma_addr
, sge
->cmdQ
[0].size
,
768 A_SG_CMD0BASELWR
, A_SG_CMD0BASEUPR
, A_SG_CMD0SIZE
);
769 setup_ring_params(ap
, sge
->cmdQ
[1].dma_addr
, sge
->cmdQ
[1].size
,
770 A_SG_CMD1BASELWR
, A_SG_CMD1BASEUPR
, A_SG_CMD1SIZE
);
771 setup_ring_params(ap
, sge
->freelQ
[0].dma_addr
,
772 sge
->freelQ
[0].size
, A_SG_FL0BASELWR
,
773 A_SG_FL0BASEUPR
, A_SG_FL0SIZE
);
774 setup_ring_params(ap
, sge
->freelQ
[1].dma_addr
,
775 sge
->freelQ
[1].size
, A_SG_FL1BASELWR
,
776 A_SG_FL1BASEUPR
, A_SG_FL1SIZE
);
778 /* The threshold comparison uses <. */
779 writel(SGE_RX_SM_BUF_SIZE
+ 1, ap
->regs
+ A_SG_FLTHRESHOLD
);
781 setup_ring_params(ap
, sge
->respQ
.dma_addr
, sge
->respQ
.size
,
782 A_SG_RSPBASELWR
, A_SG_RSPBASEUPR
, A_SG_RSPSIZE
);
783 writel((u32
)sge
->respQ
.size
- 1, ap
->regs
+ A_SG_RSPQUEUECREDIT
);
785 sge
->sge_control
= F_CMDQ0_ENABLE
| F_CMDQ1_ENABLE
| F_FL0_ENABLE
|
786 F_FL1_ENABLE
| F_CPL_ENABLE
| F_RESPONSE_QUEUE_ENABLE
|
787 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS
| F_ISCSI_COALESCE
|
788 V_RX_PKT_OFFSET(sge
->rx_pkt_pad
);
790 #if defined(__BIG_ENDIAN_BITFIELD)
791 sge
->sge_control
|= F_ENABLE_BIG_ENDIAN
;
794 /* Initialize no-resource timer */
795 sge
->intrtimer_nres
= SGE_INTRTIMER_NRES
* core_ticks_per_usec(ap
);
797 t1_sge_set_coalesce_params(sge
, p
);
801 * Return the payload capacity of the jumbo free-list buffers.
803 static inline unsigned int jumbo_payload_capacity(const struct sge
*sge
)
805 return sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
-
806 sge
->freelQ
[sge
->jumbo_fl
].dma_offset
-
807 sizeof(struct cpl_rx_data
);
811 * Frees all SGE related resources and the sge structure itself
813 void t1_sge_destroy(struct sge
*sge
)
817 for_each_port(sge
->adapter
, i
)
818 free_percpu(sge
->port_stats
[i
]);
820 kfree(sge
->tx_sched
);
821 free_tx_resources(sge
);
822 free_rx_resources(sge
);
827 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
828 * context Q) until the Q is full or alloc_skb fails.
830 * It is possible that the generation bits already match, indicating that the
831 * buffer is already valid and nothing needs to be done. This happens when we
832 * copied a received buffer into a new sk_buff during the interrupt processing.
834 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
835 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
838 static void refill_free_list(struct sge
*sge
, struct freelQ
*q
)
840 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
841 struct freelQ_ce
*ce
= &q
->centries
[q
->pidx
];
842 struct freelQ_e
*e
= &q
->entries
[q
->pidx
];
843 unsigned int dma_len
= q
->rx_buffer_size
- q
->dma_offset
;
845 while (q
->credits
< q
->size
) {
849 skb
= alloc_skb(q
->rx_buffer_size
, GFP_ATOMIC
);
853 skb_reserve(skb
, q
->dma_offset
);
854 mapping
= pci_map_single(pdev
, skb
->data
, dma_len
,
856 skb_reserve(skb
, sge
->rx_pkt_pad
);
859 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
860 dma_unmap_len_set(ce
, dma_len
, dma_len
);
861 e
->addr_lo
= (u32
)mapping
;
862 e
->addr_hi
= (u64
)mapping
>> 32;
863 e
->len_gen
= V_CMD_LEN(dma_len
) | V_CMD_GEN1(q
->genbit
);
865 e
->gen2
= V_CMD_GEN2(q
->genbit
);
869 if (++q
->pidx
== q
->size
) {
880 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
881 * of both rings, we go into 'few interrupt mode' in order to give the system
882 * time to free up resources.
884 static void freelQs_empty(struct sge
*sge
)
886 struct adapter
*adapter
= sge
->adapter
;
887 u32 irq_reg
= readl(adapter
->regs
+ A_SG_INT_ENABLE
);
890 refill_free_list(sge
, &sge
->freelQ
[0]);
891 refill_free_list(sge
, &sge
->freelQ
[1]);
893 if (sge
->freelQ
[0].credits
> (sge
->freelQ
[0].size
>> 2) &&
894 sge
->freelQ
[1].credits
> (sge
->freelQ
[1].size
>> 2)) {
895 irq_reg
|= F_FL_EXHAUSTED
;
896 irqholdoff_reg
= sge
->fixed_intrtimer
;
898 /* Clear the F_FL_EXHAUSTED interrupts for now */
899 irq_reg
&= ~F_FL_EXHAUSTED
;
900 irqholdoff_reg
= sge
->intrtimer_nres
;
902 writel(irqholdoff_reg
, adapter
->regs
+ A_SG_INTRTIMER
);
903 writel(irq_reg
, adapter
->regs
+ A_SG_INT_ENABLE
);
905 /* We reenable the Qs to force a freelist GTS interrupt later */
906 doorbell_pio(adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
909 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
910 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
911 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
912 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
915 * Disable SGE Interrupts
917 void t1_sge_intr_disable(struct sge
*sge
)
919 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
921 writel(val
& ~SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
922 writel(0, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
926 * Enable SGE interrupts.
928 void t1_sge_intr_enable(struct sge
*sge
)
930 u32 en
= SGE_INT_ENABLE
;
931 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
933 if (sge
->adapter
->port
[0].dev
->hw_features
& NETIF_F_TSO
)
934 en
&= ~F_PACKET_TOO_BIG
;
935 writel(en
, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
936 writel(val
| SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
940 * Clear SGE interrupts.
942 void t1_sge_intr_clear(struct sge
*sge
)
944 writel(SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_CAUSE
);
945 writel(0xffffffff, sge
->adapter
->regs
+ A_SG_INT_CAUSE
);
949 * SGE 'Error' interrupt handler
951 int t1_sge_intr_error_handler(struct sge
*sge
)
953 struct adapter
*adapter
= sge
->adapter
;
954 u32 cause
= readl(adapter
->regs
+ A_SG_INT_CAUSE
);
956 if (adapter
->port
[0].dev
->hw_features
& NETIF_F_TSO
)
957 cause
&= ~F_PACKET_TOO_BIG
;
958 if (cause
& F_RESPQ_EXHAUSTED
)
959 sge
->stats
.respQ_empty
++;
960 if (cause
& F_RESPQ_OVERFLOW
) {
961 sge
->stats
.respQ_overflow
++;
962 pr_alert("%s: SGE response queue overflow\n",
965 if (cause
& F_FL_EXHAUSTED
) {
966 sge
->stats
.freelistQ_empty
++;
969 if (cause
& F_PACKET_TOO_BIG
) {
970 sge
->stats
.pkt_too_big
++;
971 pr_alert("%s: SGE max packet size exceeded\n",
974 if (cause
& F_PACKET_MISMATCH
) {
975 sge
->stats
.pkt_mismatch
++;
976 pr_alert("%s: SGE packet mismatch\n", adapter
->name
);
978 if (cause
& SGE_INT_FATAL
)
979 t1_fatal_err(adapter
);
981 writel(cause
, adapter
->regs
+ A_SG_INT_CAUSE
);
985 const struct sge_intr_counts
*t1_sge_get_intr_counts(const struct sge
*sge
)
990 void t1_sge_get_port_stats(const struct sge
*sge
, int port
,
991 struct sge_port_stats
*ss
)
995 memset(ss
, 0, sizeof(*ss
));
996 for_each_possible_cpu(cpu
) {
997 struct sge_port_stats
*st
= per_cpu_ptr(sge
->port_stats
[port
], cpu
);
999 ss
->rx_cso_good
+= st
->rx_cso_good
;
1000 ss
->tx_cso
+= st
->tx_cso
;
1001 ss
->tx_tso
+= st
->tx_tso
;
1002 ss
->tx_need_hdrroom
+= st
->tx_need_hdrroom
;
1003 ss
->vlan_xtract
+= st
->vlan_xtract
;
1004 ss
->vlan_insert
+= st
->vlan_insert
;
1009 * recycle_fl_buf - recycle a free list buffer
1010 * @fl: the free list
1011 * @idx: index of buffer to recycle
1013 * Recycles the specified buffer on the given free list by adding it at
1014 * the next available slot on the list.
1016 static void recycle_fl_buf(struct freelQ
*fl
, int idx
)
1018 struct freelQ_e
*from
= &fl
->entries
[idx
];
1019 struct freelQ_e
*to
= &fl
->entries
[fl
->pidx
];
1021 fl
->centries
[fl
->pidx
] = fl
->centries
[idx
];
1022 to
->addr_lo
= from
->addr_lo
;
1023 to
->addr_hi
= from
->addr_hi
;
1024 to
->len_gen
= G_CMD_LEN(from
->len_gen
) | V_CMD_GEN1(fl
->genbit
);
1026 to
->gen2
= V_CMD_GEN2(fl
->genbit
);
1029 if (++fl
->pidx
== fl
->size
) {
1035 static int copybreak __read_mostly
= 256;
1036 module_param(copybreak
, int, 0);
1037 MODULE_PARM_DESC(copybreak
, "Receive copy threshold");
1040 * get_packet - return the next ingress packet buffer
1041 * @pdev: the PCI device that received the packet
1042 * @fl: the SGE free list holding the packet
1043 * @len: the actual packet length, excluding any SGE padding
1045 * Get the next packet from a free list and complete setup of the
1046 * sk_buff. If the packet is small we make a copy and recycle the
1047 * original buffer, otherwise we use the original buffer itself. If a
1048 * positive drop threshold is supplied packets are dropped and their
1049 * buffers recycled if (a) the number of remaining buffers is under the
1050 * threshold and the packet is too big to copy, or (b) the packet should
1051 * be copied but there is no memory for the copy.
1053 static inline struct sk_buff
*get_packet(struct pci_dev
*pdev
,
1054 struct freelQ
*fl
, unsigned int len
)
1056 struct sk_buff
*skb
;
1057 const struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1059 if (len
< copybreak
) {
1060 skb
= alloc_skb(len
+ 2, GFP_ATOMIC
);
1064 skb_reserve(skb
, 2); /* align IP header */
1066 pci_dma_sync_single_for_cpu(pdev
,
1067 dma_unmap_addr(ce
, dma_addr
),
1068 dma_unmap_len(ce
, dma_len
),
1069 PCI_DMA_FROMDEVICE
);
1070 skb_copy_from_linear_data(ce
->skb
, skb
->data
, len
);
1071 pci_dma_sync_single_for_device(pdev
,
1072 dma_unmap_addr(ce
, dma_addr
),
1073 dma_unmap_len(ce
, dma_len
),
1074 PCI_DMA_FROMDEVICE
);
1075 recycle_fl_buf(fl
, fl
->cidx
);
1080 if (fl
->credits
< 2) {
1081 recycle_fl_buf(fl
, fl
->cidx
);
1085 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
1086 dma_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1088 prefetch(skb
->data
);
1095 * unexpected_offload - handle an unexpected offload packet
1096 * @adapter: the adapter
1097 * @fl: the free list that received the packet
1099 * Called when we receive an unexpected offload packet (e.g., the TOE
1100 * function is disabled or the card is a NIC). Prints a message and
1101 * recycles the buffer.
1103 static void unexpected_offload(struct adapter
*adapter
, struct freelQ
*fl
)
1105 struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1106 struct sk_buff
*skb
= ce
->skb
;
1108 pci_dma_sync_single_for_cpu(adapter
->pdev
, dma_unmap_addr(ce
, dma_addr
),
1109 dma_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1110 pr_err("%s: unexpected offload packet, cmd %u\n",
1111 adapter
->name
, *skb
->data
);
1112 recycle_fl_buf(fl
, fl
->cidx
);
1116 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1117 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1118 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1119 * Note that the *_large_page_tx_descs stuff will be optimized out when
1120 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1122 * compute_large_page_descs() computes how many additional descriptors are
1123 * required to break down the stack's request.
1125 static inline unsigned int compute_large_page_tx_descs(struct sk_buff
*skb
)
1127 unsigned int count
= 0;
1129 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1130 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
1131 unsigned int i
, len
= skb_headlen(skb
);
1132 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1134 len
-= SGE_TX_DESC_MAX_PLEN
;
1136 for (i
= 0; nfrags
--; i
++) {
1137 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1139 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1141 len
-= SGE_TX_DESC_MAX_PLEN
;
1149 * Write a cmdQ entry.
1151 * Since this function writes the 'flags' field, it must not be used to
1152 * write the first cmdQ entry.
1154 static inline void write_tx_desc(struct cmdQ_e
*e
, dma_addr_t mapping
,
1155 unsigned int len
, unsigned int gen
,
1158 BUG_ON(len
> SGE_TX_DESC_MAX_PLEN
);
1160 e
->addr_lo
= (u32
)mapping
;
1161 e
->addr_hi
= (u64
)mapping
>> 32;
1162 e
->len_gen
= V_CMD_LEN(len
) | V_CMD_GEN1(gen
);
1163 e
->flags
= F_CMD_DATAVALID
| V_CMD_EOP(eop
) | V_CMD_GEN2(gen
);
1167 * See comment for previous function.
1169 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1170 * *desc_len exceeds HW's capability.
1172 static inline unsigned int write_large_page_tx_descs(unsigned int pidx
,
1174 struct cmdQ_ce
**ce
,
1176 dma_addr_t
*desc_mapping
,
1177 unsigned int *desc_len
,
1178 unsigned int nfrags
,
1181 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1182 struct cmdQ_e
*e1
= *e
;
1183 struct cmdQ_ce
*ce1
= *ce
;
1185 while (*desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1186 *desc_len
-= SGE_TX_DESC_MAX_PLEN
;
1187 write_tx_desc(e1
, *desc_mapping
, SGE_TX_DESC_MAX_PLEN
,
1188 *gen
, nfrags
== 0 && *desc_len
== 0);
1190 dma_unmap_len_set(ce1
, dma_len
, 0);
1191 *desc_mapping
+= SGE_TX_DESC_MAX_PLEN
;
1195 if (++pidx
== q
->size
) {
1210 * Write the command descriptors to transmit the given skb starting at
1211 * descriptor pidx with the given generation.
1213 static inline void write_tx_descs(struct adapter
*adapter
, struct sk_buff
*skb
,
1214 unsigned int pidx
, unsigned int gen
,
1217 dma_addr_t mapping
, desc_mapping
;
1218 struct cmdQ_e
*e
, *e1
;
1220 unsigned int i
, flags
, first_desc_len
, desc_len
,
1221 nfrags
= skb_shinfo(skb
)->nr_frags
;
1223 e
= e1
= &q
->entries
[pidx
];
1224 ce
= &q
->centries
[pidx
];
1226 mapping
= pci_map_single(adapter
->pdev
, skb
->data
,
1227 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1229 desc_mapping
= mapping
;
1230 desc_len
= skb_headlen(skb
);
1232 flags
= F_CMD_DATAVALID
| F_CMD_SOP
|
1233 V_CMD_EOP(nfrags
== 0 && desc_len
<= SGE_TX_DESC_MAX_PLEN
) |
1235 first_desc_len
= (desc_len
<= SGE_TX_DESC_MAX_PLEN
) ?
1236 desc_len
: SGE_TX_DESC_MAX_PLEN
;
1237 e
->addr_lo
= (u32
)desc_mapping
;
1238 e
->addr_hi
= (u64
)desc_mapping
>> 32;
1239 e
->len_gen
= V_CMD_LEN(first_desc_len
) | V_CMD_GEN1(gen
);
1241 dma_unmap_len_set(ce
, dma_len
, 0);
1243 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
&&
1244 desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1245 desc_mapping
+= first_desc_len
;
1246 desc_len
-= first_desc_len
;
1249 if (++pidx
== q
->size
) {
1255 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1256 &desc_mapping
, &desc_len
,
1259 if (likely(desc_len
))
1260 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1265 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
1266 dma_unmap_len_set(ce
, dma_len
, skb_headlen(skb
));
1268 for (i
= 0; nfrags
--; i
++) {
1269 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1272 if (++pidx
== q
->size
) {
1279 mapping
= pci_map_page(adapter
->pdev
, frag
->page
,
1280 frag
->page_offset
, frag
->size
,
1282 desc_mapping
= mapping
;
1283 desc_len
= frag
->size
;
1285 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1286 &desc_mapping
, &desc_len
,
1288 if (likely(desc_len
))
1289 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1292 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
1293 dma_unmap_len_set(ce
, dma_len
, frag
->size
);
1301 * Clean up completed Tx buffers.
1303 static inline void reclaim_completed_tx(struct sge
*sge
, struct cmdQ
*q
)
1305 unsigned int reclaim
= q
->processed
- q
->cleaned
;
1308 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1309 q
->processed
, q
->cleaned
);
1310 free_cmdQ_buffers(sge
, q
, reclaim
);
1311 q
->cleaned
+= reclaim
;
1316 * Called from tasklet. Checks the scheduler for any
1317 * pending skbs that can be sent.
1319 static void restart_sched(unsigned long arg
)
1321 struct sge
*sge
= (struct sge
*) arg
;
1322 struct adapter
*adapter
= sge
->adapter
;
1323 struct cmdQ
*q
= &sge
->cmdQ
[0];
1324 struct sk_buff
*skb
;
1325 unsigned int credits
, queued_skb
= 0;
1327 spin_lock(&q
->lock
);
1328 reclaim_completed_tx(sge
, q
);
1330 credits
= q
->size
- q
->in_use
;
1331 pr_debug("restart_sched credits=%d\n", credits
);
1332 while ((skb
= sched_skb(sge
, NULL
, credits
)) != NULL
) {
1333 unsigned int genbit
, pidx
, count
;
1334 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1335 count
+= compute_large_page_tx_descs(skb
);
1340 if (q
->pidx
>= q
->size
) {
1344 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1345 credits
= q
->size
- q
->in_use
;
1350 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1351 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1352 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1353 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1356 spin_unlock(&q
->lock
);
1360 * sge_rx - process an ingress ethernet packet
1361 * @sge: the sge structure
1362 * @fl: the free list that contains the packet buffer
1363 * @len: the packet length
1365 * Process an ingress ethernet pakcet and deliver it to the stack.
1367 static void sge_rx(struct sge
*sge
, struct freelQ
*fl
, unsigned int len
)
1369 struct sk_buff
*skb
;
1370 const struct cpl_rx_pkt
*p
;
1371 struct adapter
*adapter
= sge
->adapter
;
1372 struct sge_port_stats
*st
;
1373 struct net_device
*dev
;
1375 skb
= get_packet(adapter
->pdev
, fl
, len
- sge
->rx_pkt_pad
);
1376 if (unlikely(!skb
)) {
1377 sge
->stats
.rx_drops
++;
1381 p
= (const struct cpl_rx_pkt
*) skb
->data
;
1382 if (p
->iff
>= adapter
->params
.nports
) {
1386 __skb_pull(skb
, sizeof(*p
));
1388 st
= this_cpu_ptr(sge
->port_stats
[p
->iff
]);
1389 dev
= adapter
->port
[p
->iff
].dev
;
1391 skb
->protocol
= eth_type_trans(skb
, dev
);
1392 if ((dev
->features
& NETIF_F_RXCSUM
) && p
->csum
== 0xffff &&
1393 skb
->protocol
== htons(ETH_P_IP
) &&
1394 (skb
->data
[9] == IPPROTO_TCP
|| skb
->data
[9] == IPPROTO_UDP
)) {
1396 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1398 skb_checksum_none_assert(skb
);
1400 if (unlikely(adapter
->vlan_grp
&& p
->vlan_valid
)) {
1402 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
,
1405 netif_receive_skb(skb
);
1409 * Returns true if a command queue has enough available descriptors that
1410 * we can resume Tx operation after temporarily disabling its packet queue.
1412 static inline int enough_free_Tx_descs(const struct cmdQ
*q
)
1414 unsigned int r
= q
->processed
- q
->cleaned
;
1416 return q
->in_use
- r
< (q
->size
>> 1);
1420 * Called when sufficient space has become available in the SGE command queues
1421 * after the Tx packet schedulers have been suspended to restart the Tx path.
1423 static void restart_tx_queues(struct sge
*sge
)
1425 struct adapter
*adap
= sge
->adapter
;
1428 if (!enough_free_Tx_descs(&sge
->cmdQ
[0]))
1431 for_each_port(adap
, i
) {
1432 struct net_device
*nd
= adap
->port
[i
].dev
;
1434 if (test_and_clear_bit(nd
->if_port
, &sge
->stopped_tx_queues
) &&
1435 netif_running(nd
)) {
1436 sge
->stats
.cmdQ_restarted
[2]++;
1437 netif_wake_queue(nd
);
1443 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1446 static unsigned int update_tx_info(struct adapter
*adapter
,
1450 struct sge
*sge
= adapter
->sge
;
1451 struct cmdQ
*cmdq
= &sge
->cmdQ
[0];
1453 cmdq
->processed
+= pr0
;
1454 if (flags
& (F_FL0_ENABLE
| F_FL1_ENABLE
)) {
1456 flags
&= ~(F_FL0_ENABLE
| F_FL1_ENABLE
);
1458 if (flags
& F_CMDQ0_ENABLE
) {
1459 clear_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1461 if (cmdq
->cleaned
+ cmdq
->in_use
!= cmdq
->processed
&&
1462 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB
, &cmdq
->status
)) {
1463 set_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1464 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1467 tasklet_hi_schedule(&sge
->tx_sched
->sched_tsk
);
1469 flags
&= ~F_CMDQ0_ENABLE
;
1472 if (unlikely(sge
->stopped_tx_queues
!= 0))
1473 restart_tx_queues(sge
);
1479 * Process SGE responses, up to the supplied budget. Returns the number of
1480 * responses processed. A negative budget is effectively unlimited.
1482 static int process_responses(struct adapter
*adapter
, int budget
)
1484 struct sge
*sge
= adapter
->sge
;
1485 struct respQ
*q
= &sge
->respQ
;
1486 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1488 unsigned int flags
= 0;
1489 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1491 while (done
< budget
&& e
->GenerationBit
== q
->genbit
) {
1492 flags
|= e
->Qsleeping
;
1494 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1495 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1497 /* We batch updates to the TX side to avoid cacheline
1498 * ping-pong of TX state information on MP where the sender
1499 * might run on a different CPU than this function...
1501 if (unlikely((flags
& F_CMDQ0_ENABLE
) || cmdq_processed
[0] > 64)) {
1502 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1503 cmdq_processed
[0] = 0;
1506 if (unlikely(cmdq_processed
[1] > 16)) {
1507 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1508 cmdq_processed
[1] = 0;
1511 if (likely(e
->DataValid
)) {
1512 struct freelQ
*fl
= &sge
->freelQ
[e
->FreelistQid
];
1514 BUG_ON(!e
->Sop
|| !e
->Eop
);
1515 if (unlikely(e
->Offload
))
1516 unexpected_offload(adapter
, fl
);
1518 sge_rx(sge
, fl
, e
->BufferLength
);
1523 * Note: this depends on each packet consuming a
1524 * single free-list buffer; cf. the BUG above.
1526 if (++fl
->cidx
== fl
->size
)
1528 prefetch(fl
->centries
[fl
->cidx
].skb
);
1530 if (unlikely(--fl
->credits
<
1531 fl
->size
- SGE_FREEL_REFILL_THRESH
))
1532 refill_free_list(sge
, fl
);
1534 sge
->stats
.pure_rsps
++;
1537 if (unlikely(++q
->cidx
== q
->size
)) {
1544 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1545 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1550 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1551 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1556 static inline int responses_pending(const struct adapter
*adapter
)
1558 const struct respQ
*Q
= &adapter
->sge
->respQ
;
1559 const struct respQ_e
*e
= &Q
->entries
[Q
->cidx
];
1561 return e
->GenerationBit
== Q
->genbit
;
1565 * A simpler version of process_responses() that handles only pure (i.e.,
1566 * non data-carrying) responses. Such respones are too light-weight to justify
1567 * calling a softirq when using NAPI, so we handle them specially in hard
1568 * interrupt context. The function is called with a pointer to a response,
1569 * which the caller must ensure is a valid pure response. Returns 1 if it
1570 * encounters a valid data-carrying response, 0 otherwise.
1572 static int process_pure_responses(struct adapter
*adapter
)
1574 struct sge
*sge
= adapter
->sge
;
1575 struct respQ
*q
= &sge
->respQ
;
1576 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1577 const struct freelQ
*fl
= &sge
->freelQ
[e
->FreelistQid
];
1578 unsigned int flags
= 0;
1579 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1581 prefetch(fl
->centries
[fl
->cidx
].skb
);
1586 flags
|= e
->Qsleeping
;
1588 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1589 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1592 if (unlikely(++q
->cidx
== q
->size
)) {
1599 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1600 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1603 sge
->stats
.pure_rsps
++;
1604 } while (e
->GenerationBit
== q
->genbit
&& !e
->DataValid
);
1606 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1607 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1609 return e
->GenerationBit
== q
->genbit
;
1613 * Handler for new data events when using NAPI. This does not need any locking
1614 * or protection from interrupts as data interrupts are off at this point and
1615 * other adapter interrupts do not interfere.
1617 int t1_poll(struct napi_struct
*napi
, int budget
)
1619 struct adapter
*adapter
= container_of(napi
, struct adapter
, napi
);
1620 int work_done
= process_responses(adapter
, budget
);
1622 if (likely(work_done
< budget
)) {
1623 napi_complete(napi
);
1624 writel(adapter
->sge
->respQ
.cidx
,
1625 adapter
->regs
+ A_SG_SLEEPING
);
1630 irqreturn_t
t1_interrupt(int irq
, void *data
)
1632 struct adapter
*adapter
= data
;
1633 struct sge
*sge
= adapter
->sge
;
1636 if (likely(responses_pending(adapter
))) {
1637 writel(F_PL_INTR_SGE_DATA
, adapter
->regs
+ A_PL_CAUSE
);
1639 if (napi_schedule_prep(&adapter
->napi
)) {
1640 if (process_pure_responses(adapter
))
1641 __napi_schedule(&adapter
->napi
);
1643 /* no data, no NAPI needed */
1644 writel(sge
->respQ
.cidx
, adapter
->regs
+ A_SG_SLEEPING
);
1645 /* undo schedule_prep */
1646 napi_enable(&adapter
->napi
);
1652 spin_lock(&adapter
->async_lock
);
1653 handled
= t1_slow_intr_handler(adapter
);
1654 spin_unlock(&adapter
->async_lock
);
1657 sge
->stats
.unhandled_irqs
++;
1659 return IRQ_RETVAL(handled
!= 0);
1663 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1665 * The code figures out how many entries the sk_buff will require in the
1666 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1667 * has complete. Then, it doesn't access the global structure anymore, but
1668 * uses the corresponding fields on the stack. In conjunction with a spinlock
1669 * around that code, we can make the function reentrant without holding the
1670 * lock when we actually enqueue (which might be expensive, especially on
1671 * architectures with IO MMUs).
1673 * This runs with softirqs disabled.
1675 static int t1_sge_tx(struct sk_buff
*skb
, struct adapter
*adapter
,
1676 unsigned int qid
, struct net_device
*dev
)
1678 struct sge
*sge
= adapter
->sge
;
1679 struct cmdQ
*q
= &sge
->cmdQ
[qid
];
1680 unsigned int credits
, pidx
, genbit
, count
, use_sched_skb
= 0;
1682 if (!spin_trylock(&q
->lock
))
1683 return NETDEV_TX_LOCKED
;
1685 reclaim_completed_tx(sge
, q
);
1688 credits
= q
->size
- q
->in_use
;
1689 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1690 count
+= compute_large_page_tx_descs(skb
);
1692 /* Ethernet packet */
1693 if (unlikely(credits
< count
)) {
1694 if (!netif_queue_stopped(dev
)) {
1695 netif_stop_queue(dev
);
1696 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1697 sge
->stats
.cmdQ_full
[2]++;
1698 pr_err("%s: Tx ring full while queue awake!\n",
1701 spin_unlock(&q
->lock
);
1702 return NETDEV_TX_BUSY
;
1705 if (unlikely(credits
- count
< q
->stop_thres
)) {
1706 netif_stop_queue(dev
);
1707 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1708 sge
->stats
.cmdQ_full
[2]++;
1711 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1712 * through the scheduler.
1714 if (sge
->tx_sched
&& !qid
&& skb
->dev
) {
1717 /* Note that the scheduler might return a different skb than
1718 * the one passed in.
1720 skb
= sched_skb(sge
, skb
, credits
);
1722 spin_unlock(&q
->lock
);
1723 return NETDEV_TX_OK
;
1726 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1727 count
+= compute_large_page_tx_descs(skb
);
1734 if (q
->pidx
>= q
->size
) {
1738 spin_unlock(&q
->lock
);
1740 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1743 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1744 * the doorbell if the Q is asleep. There is a natural race, where
1745 * the hardware is going to sleep just after we checked, however,
1746 * then the interrupt handler will detect the outstanding TX packet
1747 * and ring the doorbell for us.
1750 doorbell_pio(adapter
, F_CMDQ1_ENABLE
);
1752 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1753 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1754 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1755 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1759 if (use_sched_skb
) {
1760 if (spin_trylock(&q
->lock
)) {
1761 credits
= q
->size
- q
->in_use
;
1766 return NETDEV_TX_OK
;
1769 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1772 * eth_hdr_len - return the length of an Ethernet header
1773 * @data: pointer to the start of the Ethernet header
1775 * Returns the length of an Ethernet header, including optional VLAN tag.
1777 static inline int eth_hdr_len(const void *data
)
1779 const struct ethhdr
*e
= data
;
1781 return e
->h_proto
== htons(ETH_P_8021Q
) ? VLAN_ETH_HLEN
: ETH_HLEN
;
1785 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1787 netdev_tx_t
t1_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1789 struct adapter
*adapter
= dev
->ml_priv
;
1790 struct sge
*sge
= adapter
->sge
;
1791 struct sge_port_stats
*st
= this_cpu_ptr(sge
->port_stats
[dev
->if_port
]);
1792 struct cpl_tx_pkt
*cpl
;
1793 struct sk_buff
*orig_skb
= skb
;
1796 if (skb
->protocol
== htons(ETH_P_CPL5
))
1800 * We are using a non-standard hard_header_len.
1801 * Allocate more header room in the rare cases it is not big enough.
1803 if (unlikely(skb_headroom(skb
) < dev
->hard_header_len
- ETH_HLEN
)) {
1804 skb
= skb_realloc_headroom(skb
, sizeof(struct cpl_tx_pkt_lso
));
1805 ++st
->tx_need_hdrroom
;
1806 dev_kfree_skb_any(orig_skb
);
1808 return NETDEV_TX_OK
;
1811 if (skb_shinfo(skb
)->gso_size
) {
1813 struct cpl_tx_pkt_lso
*hdr
;
1817 eth_type
= skb_network_offset(skb
) == ETH_HLEN
?
1818 CPL_ETH_II
: CPL_ETH_II_VLAN
;
1820 hdr
= (struct cpl_tx_pkt_lso
*)skb_push(skb
, sizeof(*hdr
));
1821 hdr
->opcode
= CPL_TX_PKT_LSO
;
1822 hdr
->ip_csum_dis
= hdr
->l4_csum_dis
= 0;
1823 hdr
->ip_hdr_words
= ip_hdr(skb
)->ihl
;
1824 hdr
->tcp_hdr_words
= tcp_hdr(skb
)->doff
;
1825 hdr
->eth_type_mss
= htons(MK_ETH_TYPE_MSS(eth_type
,
1826 skb_shinfo(skb
)->gso_size
));
1827 hdr
->len
= htonl(skb
->len
- sizeof(*hdr
));
1828 cpl
= (struct cpl_tx_pkt
*)hdr
;
1831 * Packets shorter than ETH_HLEN can break the MAC, drop them
1832 * early. Also, we may get oversized packets because some
1833 * parts of the kernel don't handle our unusual hard_header_len
1834 * right, drop those too.
1836 if (unlikely(skb
->len
< ETH_HLEN
||
1837 skb
->len
> dev
->mtu
+ eth_hdr_len(skb
->data
))) {
1838 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev
->name
,
1839 skb
->len
, eth_hdr_len(skb
->data
), dev
->mtu
);
1840 dev_kfree_skb_any(skb
);
1841 return NETDEV_TX_OK
;
1844 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
1845 ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1846 if (unlikely(skb_checksum_help(skb
))) {
1847 pr_debug("%s: unable to do udp checksum\n", dev
->name
);
1848 dev_kfree_skb_any(skb
);
1849 return NETDEV_TX_OK
;
1853 /* Hmmm, assuming to catch the gratious arp... and we'll use
1854 * it to flush out stuck espi packets...
1856 if ((unlikely(!adapter
->sge
->espibug_skb
[dev
->if_port
]))) {
1857 if (skb
->protocol
== htons(ETH_P_ARP
) &&
1858 arp_hdr(skb
)->ar_op
== htons(ARPOP_REQUEST
)) {
1859 adapter
->sge
->espibug_skb
[dev
->if_port
] = skb
;
1860 /* We want to re-use this skb later. We
1861 * simply bump the reference count and it
1862 * will not be freed...
1868 cpl
= (struct cpl_tx_pkt
*)__skb_push(skb
, sizeof(*cpl
));
1869 cpl
->opcode
= CPL_TX_PKT
;
1870 cpl
->ip_csum_dis
= 1; /* SW calculates IP csum */
1871 cpl
->l4_csum_dis
= skb
->ip_summed
== CHECKSUM_PARTIAL
? 0 : 1;
1872 /* the length field isn't used so don't bother setting it */
1874 st
->tx_cso
+= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
1876 cpl
->iff
= dev
->if_port
;
1878 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1879 if (vlan_tx_tag_present(skb
)) {
1880 cpl
->vlan_valid
= 1;
1881 cpl
->vlan
= htons(vlan_tx_tag_get(skb
));
1885 cpl
->vlan_valid
= 0;
1888 ret
= t1_sge_tx(skb
, adapter
, 0, dev
);
1890 /* If transmit busy, and we reallocated skb's due to headroom limit,
1891 * then silently discard to avoid leak.
1893 if (unlikely(ret
!= NETDEV_TX_OK
&& skb
!= orig_skb
)) {
1894 dev_kfree_skb_any(skb
);
1901 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1903 static void sge_tx_reclaim_cb(unsigned long data
)
1906 struct sge
*sge
= (struct sge
*)data
;
1908 for (i
= 0; i
< SGE_CMDQ_N
; ++i
) {
1909 struct cmdQ
*q
= &sge
->cmdQ
[i
];
1911 if (!spin_trylock(&q
->lock
))
1914 reclaim_completed_tx(sge
, q
);
1915 if (i
== 0 && q
->in_use
) { /* flush pending credits */
1916 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
1918 spin_unlock(&q
->lock
);
1920 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
1924 * Propagate changes of the SGE coalescing parameters to the HW.
1926 int t1_sge_set_coalesce_params(struct sge
*sge
, struct sge_params
*p
)
1928 sge
->fixed_intrtimer
= p
->rx_coalesce_usecs
*
1929 core_ticks_per_usec(sge
->adapter
);
1930 writel(sge
->fixed_intrtimer
, sge
->adapter
->regs
+ A_SG_INTRTIMER
);
1935 * Allocates both RX and TX resources and configures the SGE. However,
1936 * the hardware is not enabled yet.
1938 int t1_sge_configure(struct sge
*sge
, struct sge_params
*p
)
1940 if (alloc_rx_resources(sge
, p
))
1942 if (alloc_tx_resources(sge
, p
)) {
1943 free_rx_resources(sge
);
1946 configure_sge(sge
, p
);
1949 * Now that we have sized the free lists calculate the payload
1950 * capacity of the large buffers. Other parts of the driver use
1951 * this to set the max offload coalescing size so that RX packets
1952 * do not overflow our large buffers.
1954 p
->large_buf_capacity
= jumbo_payload_capacity(sge
);
1959 * Disables the DMA engine.
1961 void t1_sge_stop(struct sge
*sge
)
1964 writel(0, sge
->adapter
->regs
+ A_SG_CONTROL
);
1965 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
1967 if (is_T2(sge
->adapter
))
1968 del_timer_sync(&sge
->espibug_timer
);
1970 del_timer_sync(&sge
->tx_reclaim_timer
);
1974 for (i
= 0; i
< MAX_NPORTS
; i
++)
1975 kfree_skb(sge
->espibug_skb
[i
]);
1979 * Enables the DMA engine.
1981 void t1_sge_start(struct sge
*sge
)
1983 refill_free_list(sge
, &sge
->freelQ
[0]);
1984 refill_free_list(sge
, &sge
->freelQ
[1]);
1986 writel(sge
->sge_control
, sge
->adapter
->regs
+ A_SG_CONTROL
);
1987 doorbell_pio(sge
->adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
1988 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
1990 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
1992 if (is_T2(sge
->adapter
))
1993 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
1997 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1999 static void espibug_workaround_t204(unsigned long data
)
2001 struct adapter
*adapter
= (struct adapter
*)data
;
2002 struct sge
*sge
= adapter
->sge
;
2003 unsigned int nports
= adapter
->params
.nports
;
2004 u32 seop
[MAX_NPORTS
];
2006 if (adapter
->open_device_map
& PORT_MASK
) {
2009 if (t1_espi_get_mon_t204(adapter
, &(seop
[0]), 0) < 0)
2012 for (i
= 0; i
< nports
; i
++) {
2013 struct sk_buff
*skb
= sge
->espibug_skb
[i
];
2015 if (!netif_running(adapter
->port
[i
].dev
) ||
2016 netif_queue_stopped(adapter
->port
[i
].dev
) ||
2017 !seop
[i
] || ((seop
[i
] & 0xfff) != 0) || !skb
)
2021 skb_copy_to_linear_data_offset(skb
,
2022 sizeof(struct cpl_tx_pkt
),
2025 skb_copy_to_linear_data_offset(skb
,
2032 /* bump the reference count to avoid freeing of
2033 * the skb once the DMA has completed.
2036 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[i
].dev
);
2039 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2042 static void espibug_workaround(unsigned long data
)
2044 struct adapter
*adapter
= (struct adapter
*)data
;
2045 struct sge
*sge
= adapter
->sge
;
2047 if (netif_running(adapter
->port
[0].dev
)) {
2048 struct sk_buff
*skb
= sge
->espibug_skb
[0];
2049 u32 seop
= t1_espi_get_mon(adapter
, 0x930, 0);
2051 if ((seop
& 0xfff0fff) == 0xfff && skb
) {
2053 skb_copy_to_linear_data_offset(skb
,
2054 sizeof(struct cpl_tx_pkt
),
2057 skb_copy_to_linear_data_offset(skb
,
2064 /* bump the reference count to avoid freeing of the
2065 * skb once the DMA has completed.
2068 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[0].dev
);
2071 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2075 * Creates a t1_sge structure and returns suggested resource parameters.
2077 struct sge
* __devinit
t1_sge_create(struct adapter
*adapter
,
2078 struct sge_params
*p
)
2080 struct sge
*sge
= kzalloc(sizeof(*sge
), GFP_KERNEL
);
2086 sge
->adapter
= adapter
;
2087 sge
->netdev
= adapter
->port
[0].dev
;
2088 sge
->rx_pkt_pad
= t1_is_T1B(adapter
) ? 0 : 2;
2089 sge
->jumbo_fl
= t1_is_T1B(adapter
) ? 1 : 0;
2091 for_each_port(adapter
, i
) {
2092 sge
->port_stats
[i
] = alloc_percpu(struct sge_port_stats
);
2093 if (!sge
->port_stats
[i
])
2097 init_timer(&sge
->tx_reclaim_timer
);
2098 sge
->tx_reclaim_timer
.data
= (unsigned long)sge
;
2099 sge
->tx_reclaim_timer
.function
= sge_tx_reclaim_cb
;
2101 if (is_T2(sge
->adapter
)) {
2102 init_timer(&sge
->espibug_timer
);
2104 if (adapter
->params
.nports
> 1) {
2106 sge
->espibug_timer
.function
= espibug_workaround_t204
;
2108 sge
->espibug_timer
.function
= espibug_workaround
;
2109 sge
->espibug_timer
.data
= (unsigned long)sge
->adapter
;
2111 sge
->espibug_timeout
= 1;
2112 /* for T204, every 10ms */
2113 if (adapter
->params
.nports
> 1)
2114 sge
->espibug_timeout
= HZ
/100;
2118 p
->cmdQ_size
[0] = SGE_CMDQ0_E_N
;
2119 p
->cmdQ_size
[1] = SGE_CMDQ1_E_N
;
2120 p
->freelQ_size
[!sge
->jumbo_fl
] = SGE_FREEL_SIZE
;
2121 p
->freelQ_size
[sge
->jumbo_fl
] = SGE_JUMBO_FREEL_SIZE
;
2122 if (sge
->tx_sched
) {
2123 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
)
2124 p
->rx_coalesce_usecs
= 15;
2126 p
->rx_coalesce_usecs
= 50;
2128 p
->rx_coalesce_usecs
= 50;
2130 p
->coalesce_enable
= 0;
2131 p
->sample_interval_usecs
= 0;
2136 free_percpu(sge
->port_stats
[i
]);