2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 * This file incorporates work covered by the following copyright and
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
39 #include <linux/ipv6.h>
40 #include <linux/if_vlan.h>
41 #include <linux/mdio.h>
42 #include <linux/aer.h>
43 #include <linux/bitops.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <net/ip6_checksum.h>
47 #include <linux/crc32.h>
52 static const char alx_drv_name
[] = "alx";
54 static void alx_free_txbuf(struct alx_tx_queue
*txq
, int entry
)
56 struct alx_buffer
*txb
= &txq
->bufs
[entry
];
58 if (dma_unmap_len(txb
, size
)) {
59 dma_unmap_single(txq
->dev
,
60 dma_unmap_addr(txb
, dma
),
61 dma_unmap_len(txb
, size
),
63 dma_unmap_len_set(txb
, size
, 0);
67 dev_kfree_skb_any(txb
->skb
);
72 static int alx_refill_rx_ring(struct alx_priv
*alx
, gfp_t gfp
)
74 struct alx_rx_queue
*rxq
= alx
->qnapi
[0]->rxq
;
76 struct alx_buffer
*cur_buf
;
78 u16 cur
, next
, count
= 0;
80 next
= cur
= rxq
->write_idx
;
81 if (++next
== alx
->rx_ringsz
)
83 cur_buf
= &rxq
->bufs
[cur
];
85 while (!cur_buf
->skb
&& next
!= rxq
->read_idx
) {
86 struct alx_rfd
*rfd
= &rxq
->rfd
[cur
];
89 * When DMA RX address is set to something like
90 * 0x....fc0, it will be very likely to cause DMA
93 * To work around it, we apply rx skb with 64 bytes
94 * longer space, and offset the address whenever
95 * 0x....fc0 is detected.
97 skb
= __netdev_alloc_skb(alx
->dev
, alx
->rxbuf_size
+ 64, gfp
);
101 if (((unsigned long)skb
->data
& 0xfff) == 0xfc0)
102 skb_reserve(skb
, 64);
104 dma
= dma_map_single(&alx
->hw
.pdev
->dev
,
105 skb
->data
, alx
->rxbuf_size
,
107 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
)) {
112 /* Unfortunately, RX descriptor buffers must be 4-byte
113 * aligned, so we can't use IP alignment.
115 if (WARN_ON(dma
& 3)) {
121 dma_unmap_len_set(cur_buf
, size
, alx
->rxbuf_size
);
122 dma_unmap_addr_set(cur_buf
, dma
, dma
);
123 rfd
->addr
= cpu_to_le64(dma
);
126 if (++next
== alx
->rx_ringsz
)
128 cur_buf
= &rxq
->bufs
[cur
];
133 /* flush all updates before updating hardware */
135 rxq
->write_idx
= cur
;
136 alx_write_mem16(&alx
->hw
, ALX_RFD_PIDX
, cur
);
142 static struct alx_tx_queue
*alx_tx_queue_mapping(struct alx_priv
*alx
,
145 unsigned int r_idx
= skb
->queue_mapping
;
147 if (r_idx
>= alx
->num_txq
)
148 r_idx
= r_idx
% alx
->num_txq
;
150 return alx
->qnapi
[r_idx
]->txq
;
153 static struct netdev_queue
*alx_get_tx_queue(const struct alx_tx_queue
*txq
)
155 return netdev_get_tx_queue(txq
->netdev
, txq
->queue_idx
);
158 static inline int alx_tpd_avail(struct alx_tx_queue
*txq
)
160 if (txq
->write_idx
>= txq
->read_idx
)
161 return txq
->count
+ txq
->read_idx
- txq
->write_idx
- 1;
162 return txq
->read_idx
- txq
->write_idx
- 1;
165 static bool alx_clean_tx_irq(struct alx_tx_queue
*txq
)
167 struct alx_priv
*alx
;
168 struct netdev_queue
*tx_queue
;
169 u16 hw_read_idx
, sw_read_idx
;
170 unsigned int total_bytes
= 0, total_packets
= 0;
171 int budget
= ALX_DEFAULT_TX_WORK
;
173 alx
= netdev_priv(txq
->netdev
);
174 tx_queue
= alx_get_tx_queue(txq
);
176 sw_read_idx
= txq
->read_idx
;
177 hw_read_idx
= alx_read_mem16(&alx
->hw
, txq
->c_reg
);
179 if (sw_read_idx
!= hw_read_idx
) {
180 while (sw_read_idx
!= hw_read_idx
&& budget
> 0) {
183 skb
= txq
->bufs
[sw_read_idx
].skb
;
185 total_bytes
+= skb
->len
;
190 alx_free_txbuf(txq
, sw_read_idx
);
192 if (++sw_read_idx
== txq
->count
)
195 txq
->read_idx
= sw_read_idx
;
197 netdev_tx_completed_queue(tx_queue
, total_packets
, total_bytes
);
200 if (netif_tx_queue_stopped(tx_queue
) && netif_carrier_ok(alx
->dev
) &&
201 alx_tpd_avail(txq
) > txq
->count
/ 4)
202 netif_tx_wake_queue(tx_queue
);
204 return sw_read_idx
== hw_read_idx
;
207 static void alx_schedule_link_check(struct alx_priv
*alx
)
209 schedule_work(&alx
->link_check_wk
);
212 static void alx_schedule_reset(struct alx_priv
*alx
)
214 schedule_work(&alx
->reset_wk
);
217 static int alx_clean_rx_irq(struct alx_rx_queue
*rxq
, int budget
)
219 struct alx_priv
*alx
;
221 struct alx_buffer
*rxb
;
223 u16 length
, rfd_cleaned
= 0;
226 alx
= netdev_priv(rxq
->netdev
);
228 while (work
< budget
) {
229 rrd
= &rxq
->rrd
[rxq
->rrd_read_idx
];
230 if (!(rrd
->word3
& cpu_to_le32(1 << RRD_UPDATED_SHIFT
)))
232 rrd
->word3
&= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT
);
234 if (ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
235 RRD_SI
) != rxq
->read_idx
||
236 ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
238 alx_schedule_reset(alx
);
242 rxb
= &rxq
->bufs
[rxq
->read_idx
];
243 dma_unmap_single(rxq
->dev
,
244 dma_unmap_addr(rxb
, dma
),
245 dma_unmap_len(rxb
, size
),
247 dma_unmap_len_set(rxb
, size
, 0);
251 if (rrd
->word3
& cpu_to_le32(1 << RRD_ERR_RES_SHIFT
) ||
252 rrd
->word3
& cpu_to_le32(1 << RRD_ERR_LEN_SHIFT
)) {
254 dev_kfree_skb_any(skb
);
258 length
= ALX_GET_FIELD(le32_to_cpu(rrd
->word3
),
259 RRD_PKTLEN
) - ETH_FCS_LEN
;
260 skb_put(skb
, length
);
261 skb
->protocol
= eth_type_trans(skb
, rxq
->netdev
);
263 skb_checksum_none_assert(skb
);
264 if (alx
->dev
->features
& NETIF_F_RXCSUM
&&
265 !(rrd
->word3
& (cpu_to_le32(1 << RRD_ERR_L4_SHIFT
) |
266 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT
)))) {
267 switch (ALX_GET_FIELD(le32_to_cpu(rrd
->word2
),
269 case RRD_PID_IPV6UDP
:
270 case RRD_PID_IPV4UDP
:
271 case RRD_PID_IPV4TCP
:
272 case RRD_PID_IPV6TCP
:
273 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
278 napi_gro_receive(&rxq
->np
->napi
, skb
);
282 if (++rxq
->read_idx
== rxq
->count
)
284 if (++rxq
->rrd_read_idx
== rxq
->count
)
285 rxq
->rrd_read_idx
= 0;
287 if (++rfd_cleaned
> ALX_RX_ALLOC_THRESH
)
288 rfd_cleaned
-= alx_refill_rx_ring(alx
, GFP_ATOMIC
);
292 alx_refill_rx_ring(alx
, GFP_ATOMIC
);
297 static int alx_poll(struct napi_struct
*napi
, int budget
)
299 struct alx_napi
*np
= container_of(napi
, struct alx_napi
, napi
);
300 struct alx_priv
*alx
= np
->alx
;
301 struct alx_hw
*hw
= &alx
->hw
;
303 bool tx_complete
= true;
307 tx_complete
= alx_clean_tx_irq(np
->txq
);
309 work
= alx_clean_rx_irq(np
->rxq
, budget
);
311 if (!tx_complete
|| work
== budget
)
314 napi_complete_done(&np
->napi
, work
);
316 /* enable interrupt */
317 if (alx
->hw
.pdev
->msix_enabled
) {
318 alx_mask_msix(hw
, np
->vec_idx
, false);
320 spin_lock_irqsave(&alx
->irq_lock
, flags
);
321 alx
->int_mask
|= ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
;
322 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
323 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
331 static bool alx_intr_handle_misc(struct alx_priv
*alx
, u32 intr
)
333 struct alx_hw
*hw
= &alx
->hw
;
335 if (intr
& ALX_ISR_FATAL
) {
336 netif_warn(alx
, hw
, alx
->dev
,
337 "fatal interrupt 0x%x, resetting\n", intr
);
338 alx_schedule_reset(alx
);
342 if (intr
& ALX_ISR_ALERT
)
343 netdev_warn(alx
->dev
, "alert interrupt: 0x%x\n", intr
);
345 if (intr
& ALX_ISR_PHY
) {
346 /* suppress PHY interrupt, because the source
347 * is from PHY internal. only the internal status
348 * is cleared, the interrupt status could be cleared.
350 alx
->int_mask
&= ~ALX_ISR_PHY
;
351 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
352 alx_schedule_link_check(alx
);
358 static irqreturn_t
alx_intr_handle(struct alx_priv
*alx
, u32 intr
)
360 struct alx_hw
*hw
= &alx
->hw
;
362 spin_lock(&alx
->irq_lock
);
365 alx_write_mem32(hw
, ALX_ISR
, intr
| ALX_ISR_DIS
);
366 intr
&= alx
->int_mask
;
368 if (alx_intr_handle_misc(alx
, intr
))
371 if (intr
& (ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
)) {
372 napi_schedule(&alx
->qnapi
[0]->napi
);
373 /* mask rx/tx interrupt, enable them when napi complete */
374 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
375 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
378 alx_write_mem32(hw
, ALX_ISR
, 0);
381 spin_unlock(&alx
->irq_lock
);
385 static irqreturn_t
alx_intr_msix_ring(int irq
, void *data
)
387 struct alx_napi
*np
= data
;
388 struct alx_hw
*hw
= &np
->alx
->hw
;
390 /* mask interrupt to ACK chip */
391 alx_mask_msix(hw
, np
->vec_idx
, true);
392 /* clear interrupt status */
393 alx_write_mem32(hw
, ALX_ISR
, np
->vec_mask
);
395 napi_schedule(&np
->napi
);
400 static irqreturn_t
alx_intr_msix_misc(int irq
, void *data
)
402 struct alx_priv
*alx
= data
;
403 struct alx_hw
*hw
= &alx
->hw
;
406 /* mask interrupt to ACK chip */
407 alx_mask_msix(hw
, 0, true);
409 /* read interrupt status */
410 intr
= alx_read_mem32(hw
, ALX_ISR
);
411 intr
&= (alx
->int_mask
& ~ALX_ISR_ALL_QUEUES
);
413 if (alx_intr_handle_misc(alx
, intr
))
416 /* clear interrupt status */
417 alx_write_mem32(hw
, ALX_ISR
, intr
);
419 /* enable interrupt again */
420 alx_mask_msix(hw
, 0, false);
425 static irqreturn_t
alx_intr_msi(int irq
, void *data
)
427 struct alx_priv
*alx
= data
;
429 return alx_intr_handle(alx
, alx_read_mem32(&alx
->hw
, ALX_ISR
));
432 static irqreturn_t
alx_intr_legacy(int irq
, void *data
)
434 struct alx_priv
*alx
= data
;
435 struct alx_hw
*hw
= &alx
->hw
;
438 intr
= alx_read_mem32(hw
, ALX_ISR
);
440 if (intr
& ALX_ISR_DIS
|| !(intr
& alx
->int_mask
))
443 return alx_intr_handle(alx
, intr
);
446 static const u16 txring_header_reg
[] = {ALX_TPD_PRI0_ADDR_LO
,
447 ALX_TPD_PRI1_ADDR_LO
,
448 ALX_TPD_PRI2_ADDR_LO
,
449 ALX_TPD_PRI3_ADDR_LO
};
451 static void alx_init_ring_ptrs(struct alx_priv
*alx
)
453 struct alx_hw
*hw
= &alx
->hw
;
454 u32 addr_hi
= ((u64
)alx
->descmem
.dma
) >> 32;
458 for (i
= 0; i
< alx
->num_napi
; i
++) {
461 np
->txq
->read_idx
= 0;
462 np
->txq
->write_idx
= 0;
464 txring_header_reg
[np
->txq
->queue_idx
],
469 np
->rxq
->read_idx
= 0;
470 np
->rxq
->write_idx
= 0;
471 np
->rxq
->rrd_read_idx
= 0;
472 alx_write_mem32(hw
, ALX_RRD_ADDR_LO
, np
->rxq
->rrd_dma
);
473 alx_write_mem32(hw
, ALX_RFD_ADDR_LO
, np
->rxq
->rfd_dma
);
477 alx_write_mem32(hw
, ALX_TX_BASE_ADDR_HI
, addr_hi
);
478 alx_write_mem32(hw
, ALX_TPD_RING_SZ
, alx
->tx_ringsz
);
480 alx_write_mem32(hw
, ALX_RX_BASE_ADDR_HI
, addr_hi
);
481 alx_write_mem32(hw
, ALX_RRD_RING_SZ
, alx
->rx_ringsz
);
482 alx_write_mem32(hw
, ALX_RFD_RING_SZ
, alx
->rx_ringsz
);
483 alx_write_mem32(hw
, ALX_RFD_BUF_SZ
, alx
->rxbuf_size
);
485 /* load these pointers into the chip */
486 alx_write_mem32(hw
, ALX_SRAM9
, ALX_SRAM_LOAD_PTR
);
489 static void alx_free_txring_buf(struct alx_tx_queue
*txq
)
496 for (i
= 0; i
< txq
->count
; i
++)
497 alx_free_txbuf(txq
, i
);
499 memset(txq
->bufs
, 0, txq
->count
* sizeof(struct alx_buffer
));
500 memset(txq
->tpd
, 0, txq
->count
* sizeof(struct alx_txd
));
504 netdev_tx_reset_queue(alx_get_tx_queue(txq
));
507 static void alx_free_rxring_buf(struct alx_rx_queue
*rxq
)
509 struct alx_buffer
*cur_buf
;
515 for (i
= 0; i
< rxq
->count
; i
++) {
516 cur_buf
= rxq
->bufs
+ i
;
518 dma_unmap_single(rxq
->dev
,
519 dma_unmap_addr(cur_buf
, dma
),
520 dma_unmap_len(cur_buf
, size
),
522 dev_kfree_skb(cur_buf
->skb
);
524 dma_unmap_len_set(cur_buf
, size
, 0);
525 dma_unmap_addr_set(cur_buf
, dma
, 0);
531 rxq
->rrd_read_idx
= 0;
534 static void alx_free_buffers(struct alx_priv
*alx
)
538 for (i
= 0; i
< alx
->num_txq
; i
++)
539 if (alx
->qnapi
[i
] && alx
->qnapi
[i
]->txq
)
540 alx_free_txring_buf(alx
->qnapi
[i
]->txq
);
542 if (alx
->qnapi
[0] && alx
->qnapi
[0]->rxq
)
543 alx_free_rxring_buf(alx
->qnapi
[0]->rxq
);
546 static int alx_reinit_rings(struct alx_priv
*alx
)
548 alx_free_buffers(alx
);
550 alx_init_ring_ptrs(alx
);
552 if (!alx_refill_rx_ring(alx
, GFP_KERNEL
))
558 static void alx_add_mc_addr(struct alx_hw
*hw
, const u8
*addr
, u32
*mc_hash
)
562 crc32
= ether_crc(ETH_ALEN
, addr
);
563 reg
= (crc32
>> 31) & 0x1;
564 bit
= (crc32
>> 26) & 0x1F;
566 mc_hash
[reg
] |= BIT(bit
);
569 static void __alx_set_rx_mode(struct net_device
*netdev
)
571 struct alx_priv
*alx
= netdev_priv(netdev
);
572 struct alx_hw
*hw
= &alx
->hw
;
573 struct netdev_hw_addr
*ha
;
576 if (!(netdev
->flags
& IFF_ALLMULTI
)) {
577 netdev_for_each_mc_addr(ha
, netdev
)
578 alx_add_mc_addr(hw
, ha
->addr
, mc_hash
);
580 alx_write_mem32(hw
, ALX_HASH_TBL0
, mc_hash
[0]);
581 alx_write_mem32(hw
, ALX_HASH_TBL1
, mc_hash
[1]);
584 hw
->rx_ctrl
&= ~(ALX_MAC_CTRL_MULTIALL_EN
| ALX_MAC_CTRL_PROMISC_EN
);
585 if (netdev
->flags
& IFF_PROMISC
)
586 hw
->rx_ctrl
|= ALX_MAC_CTRL_PROMISC_EN
;
587 if (netdev
->flags
& IFF_ALLMULTI
)
588 hw
->rx_ctrl
|= ALX_MAC_CTRL_MULTIALL_EN
;
590 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
593 static void alx_set_rx_mode(struct net_device
*netdev
)
595 __alx_set_rx_mode(netdev
);
598 static int alx_set_mac_address(struct net_device
*netdev
, void *data
)
600 struct alx_priv
*alx
= netdev_priv(netdev
);
601 struct alx_hw
*hw
= &alx
->hw
;
602 struct sockaddr
*addr
= data
;
604 if (!is_valid_ether_addr(addr
->sa_data
))
605 return -EADDRNOTAVAIL
;
607 if (netdev
->addr_assign_type
& NET_ADDR_RANDOM
)
608 netdev
->addr_assign_type
^= NET_ADDR_RANDOM
;
610 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
611 memcpy(hw
->mac_addr
, addr
->sa_data
, netdev
->addr_len
);
612 alx_set_macaddr(hw
, hw
->mac_addr
);
617 static int alx_alloc_tx_ring(struct alx_priv
*alx
, struct alx_tx_queue
*txq
,
620 txq
->bufs
= kcalloc(txq
->count
, sizeof(struct alx_buffer
), GFP_KERNEL
);
624 txq
->tpd
= alx
->descmem
.virt
+ offset
;
625 txq
->tpd_dma
= alx
->descmem
.dma
+ offset
;
626 offset
+= sizeof(struct alx_txd
) * txq
->count
;
631 static int alx_alloc_rx_ring(struct alx_priv
*alx
, struct alx_rx_queue
*rxq
,
634 rxq
->bufs
= kcalloc(rxq
->count
, sizeof(struct alx_buffer
), GFP_KERNEL
);
638 rxq
->rrd
= alx
->descmem
.virt
+ offset
;
639 rxq
->rrd_dma
= alx
->descmem
.dma
+ offset
;
640 offset
+= sizeof(struct alx_rrd
) * rxq
->count
;
642 rxq
->rfd
= alx
->descmem
.virt
+ offset
;
643 rxq
->rfd_dma
= alx
->descmem
.dma
+ offset
;
644 offset
+= sizeof(struct alx_rfd
) * rxq
->count
;
649 static int alx_alloc_rings(struct alx_priv
*alx
)
653 /* physical tx/rx ring descriptors
655 * Allocate them as a single chunk because they must not cross a
656 * 4G boundary (hardware has a single register for high 32 bits
659 alx
->descmem
.size
= sizeof(struct alx_txd
) * alx
->tx_ringsz
*
661 sizeof(struct alx_rrd
) * alx
->rx_ringsz
+
662 sizeof(struct alx_rfd
) * alx
->rx_ringsz
;
663 alx
->descmem
.virt
= dma_alloc_coherent(&alx
->hw
.pdev
->dev
,
665 &alx
->descmem
.dma
, GFP_KERNEL
);
666 if (!alx
->descmem
.virt
)
669 /* alignment requirements */
670 BUILD_BUG_ON(sizeof(struct alx_txd
) % 8);
671 BUILD_BUG_ON(sizeof(struct alx_rrd
) % 8);
673 for (i
= 0; i
< alx
->num_txq
; i
++) {
674 offset
= alx_alloc_tx_ring(alx
, alx
->qnapi
[i
]->txq
, offset
);
676 netdev_err(alx
->dev
, "Allocation of tx buffer failed!\n");
681 offset
= alx_alloc_rx_ring(alx
, alx
->qnapi
[0]->rxq
, offset
);
683 netdev_err(alx
->dev
, "Allocation of rx buffer failed!\n");
690 static void alx_free_rings(struct alx_priv
*alx
)
694 alx_free_buffers(alx
);
696 for (i
= 0; i
< alx
->num_txq
; i
++)
697 if (alx
->qnapi
[i
] && alx
->qnapi
[i
]->txq
)
698 kfree(alx
->qnapi
[i
]->txq
->bufs
);
700 if (alx
->qnapi
[0] && alx
->qnapi
[0]->rxq
)
701 kfree(alx
->qnapi
[0]->rxq
->bufs
);
703 if (alx
->descmem
.virt
)
704 dma_free_coherent(&alx
->hw
.pdev
->dev
,
710 static void alx_free_napis(struct alx_priv
*alx
)
715 for (i
= 0; i
< alx
->num_napi
; i
++) {
720 netif_napi_del(&np
->napi
);
724 alx
->qnapi
[i
] = NULL
;
728 static const u16 tx_pidx_reg
[] = {ALX_TPD_PRI0_PIDX
, ALX_TPD_PRI1_PIDX
,
729 ALX_TPD_PRI2_PIDX
, ALX_TPD_PRI3_PIDX
};
730 static const u16 tx_cidx_reg
[] = {ALX_TPD_PRI0_CIDX
, ALX_TPD_PRI1_CIDX
,
731 ALX_TPD_PRI2_CIDX
, ALX_TPD_PRI3_CIDX
};
732 static const u32 tx_vect_mask
[] = {ALX_ISR_TX_Q0
, ALX_ISR_TX_Q1
,
733 ALX_ISR_TX_Q2
, ALX_ISR_TX_Q3
};
734 static const u32 rx_vect_mask
[] = {ALX_ISR_RX_Q0
, ALX_ISR_RX_Q1
,
735 ALX_ISR_RX_Q2
, ALX_ISR_RX_Q3
,
736 ALX_ISR_RX_Q4
, ALX_ISR_RX_Q5
,
737 ALX_ISR_RX_Q6
, ALX_ISR_RX_Q7
};
739 static int alx_alloc_napis(struct alx_priv
*alx
)
742 struct alx_rx_queue
*rxq
;
743 struct alx_tx_queue
*txq
;
746 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
748 /* allocate alx_napi structures */
749 for (i
= 0; i
< alx
->num_napi
; i
++) {
750 np
= kzalloc(sizeof(struct alx_napi
), GFP_KERNEL
);
755 netif_napi_add(alx
->dev
, &np
->napi
, alx_poll
, 64);
759 /* allocate tx queues */
760 for (i
= 0; i
< alx
->num_txq
; i
++) {
762 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
767 txq
->p_reg
= tx_pidx_reg
[i
];
768 txq
->c_reg
= tx_cidx_reg
[i
];
770 txq
->count
= alx
->tx_ringsz
;
771 txq
->netdev
= alx
->dev
;
772 txq
->dev
= &alx
->hw
.pdev
->dev
;
773 np
->vec_mask
|= tx_vect_mask
[i
];
774 alx
->int_mask
|= tx_vect_mask
[i
];
777 /* allocate rx queues */
779 rxq
= kzalloc(sizeof(*rxq
), GFP_KERNEL
);
784 rxq
->np
= alx
->qnapi
[0];
786 rxq
->count
= alx
->rx_ringsz
;
787 rxq
->netdev
= alx
->dev
;
788 rxq
->dev
= &alx
->hw
.pdev
->dev
;
789 np
->vec_mask
|= rx_vect_mask
[0];
790 alx
->int_mask
|= rx_vect_mask
[0];
795 netdev_err(alx
->dev
, "error allocating internal structures\n");
800 static const int txq_vec_mapping_shift
[] = {
801 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT
,
802 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT
,
803 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT
,
804 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT
,
807 static void alx_config_vector_mapping(struct alx_priv
*alx
)
809 struct alx_hw
*hw
= &alx
->hw
;
811 int i
, vector
, idx
, shift
;
813 if (alx
->hw
.pdev
->msix_enabled
) {
815 for (i
= 0, vector
= 1; i
< alx
->num_txq
; i
++, vector
++) {
816 idx
= txq_vec_mapping_shift
[i
* 2];
817 shift
= txq_vec_mapping_shift
[i
* 2 + 1];
818 tbl
[idx
] |= vector
<< shift
;
822 tbl
[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT
;
825 alx_write_mem32(hw
, ALX_MSI_MAP_TBL1
, tbl
[0]);
826 alx_write_mem32(hw
, ALX_MSI_MAP_TBL2
, tbl
[1]);
827 alx_write_mem32(hw
, ALX_MSI_ID_MAP
, 0);
830 static int alx_enable_msix(struct alx_priv
*alx
)
832 int err
, num_vec
, num_txq
, num_rxq
;
834 num_txq
= min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES
);
836 num_vec
= max_t(int, num_txq
, num_rxq
) + 1;
838 err
= pci_alloc_irq_vectors(alx
->hw
.pdev
, num_vec
, num_vec
,
841 netdev_warn(alx
->dev
, "Enabling MSI-X interrupts failed!\n");
845 alx
->num_vec
= num_vec
;
846 alx
->num_napi
= num_vec
- 1;
847 alx
->num_txq
= num_txq
;
848 alx
->num_rxq
= num_rxq
;
853 static int alx_request_msix(struct alx_priv
*alx
)
855 struct net_device
*netdev
= alx
->dev
;
856 int i
, err
, vector
= 0, free_vector
= 0;
858 err
= request_irq(pci_irq_vector(alx
->hw
.pdev
, 0), alx_intr_msix_misc
,
859 0, netdev
->name
, alx
);
863 for (i
= 0; i
< alx
->num_napi
; i
++) {
864 struct alx_napi
*np
= alx
->qnapi
[i
];
868 if (np
->txq
&& np
->rxq
)
869 sprintf(np
->irq_lbl
, "%s-TxRx-%u", netdev
->name
,
872 sprintf(np
->irq_lbl
, "%s-tx-%u", netdev
->name
,
875 sprintf(np
->irq_lbl
, "%s-rx-%u", netdev
->name
,
878 sprintf(np
->irq_lbl
, "%s-unused", netdev
->name
);
880 np
->vec_idx
= vector
;
881 err
= request_irq(pci_irq_vector(alx
->hw
.pdev
, vector
),
882 alx_intr_msix_ring
, 0, np
->irq_lbl
, np
);
889 free_irq(pci_irq_vector(alx
->hw
.pdev
, free_vector
++), alx
);
892 for (i
= 0; i
< vector
; i
++)
893 free_irq(pci_irq_vector(alx
->hw
.pdev
,free_vector
++),
900 static int alx_init_intr(struct alx_priv
*alx
)
904 ret
= pci_alloc_irq_vectors(alx
->hw
.pdev
, 1, 1,
905 PCI_IRQ_MSI
| PCI_IRQ_LEGACY
);
916 static void alx_irq_enable(struct alx_priv
*alx
)
918 struct alx_hw
*hw
= &alx
->hw
;
921 /* level-1 interrupt switch */
922 alx_write_mem32(hw
, ALX_ISR
, 0);
923 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
926 if (alx
->hw
.pdev
->msix_enabled
) {
927 /* enable all msix irqs */
928 for (i
= 0; i
< alx
->num_vec
; i
++)
929 alx_mask_msix(hw
, i
, false);
933 static void alx_irq_disable(struct alx_priv
*alx
)
935 struct alx_hw
*hw
= &alx
->hw
;
938 alx_write_mem32(hw
, ALX_ISR
, ALX_ISR_DIS
);
939 alx_write_mem32(hw
, ALX_IMR
, 0);
942 if (alx
->hw
.pdev
->msix_enabled
) {
943 for (i
= 0; i
< alx
->num_vec
; i
++) {
944 alx_mask_msix(hw
, i
, true);
945 synchronize_irq(pci_irq_vector(alx
->hw
.pdev
, i
));
948 synchronize_irq(pci_irq_vector(alx
->hw
.pdev
, 0));
952 static int alx_realloc_resources(struct alx_priv
*alx
)
958 pci_free_irq_vectors(alx
->hw
.pdev
);
960 err
= alx_init_intr(alx
);
964 err
= alx_alloc_napis(alx
);
968 err
= alx_alloc_rings(alx
);
975 static int alx_request_irq(struct alx_priv
*alx
)
977 struct pci_dev
*pdev
= alx
->hw
.pdev
;
978 struct alx_hw
*hw
= &alx
->hw
;
982 msi_ctrl
= (hw
->imt
>> 1) << ALX_MSI_RETRANS_TM_SHIFT
;
984 if (alx
->hw
.pdev
->msix_enabled
) {
985 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
, msi_ctrl
);
986 err
= alx_request_msix(alx
);
990 /* msix request failed, realloc resources */
991 err
= alx_realloc_resources(alx
);
996 if (alx
->hw
.pdev
->msi_enabled
) {
997 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
,
998 msi_ctrl
| ALX_MSI_MASK_SEL_LINE
);
999 err
= request_irq(pci_irq_vector(pdev
, 0), alx_intr_msi
, 0,
1000 alx
->dev
->name
, alx
);
1004 /* fall back to legacy interrupt */
1005 pci_free_irq_vectors(alx
->hw
.pdev
);
1008 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
, 0);
1009 err
= request_irq(pci_irq_vector(pdev
, 0), alx_intr_legacy
, IRQF_SHARED
,
1010 alx
->dev
->name
, alx
);
1013 alx_config_vector_mapping(alx
);
1015 netdev_err(alx
->dev
, "IRQ registration failed!\n");
1019 static void alx_free_irq(struct alx_priv
*alx
)
1021 struct pci_dev
*pdev
= alx
->hw
.pdev
;
1024 free_irq(pci_irq_vector(pdev
, 0), alx
);
1025 if (alx
->hw
.pdev
->msix_enabled
) {
1026 for (i
= 0; i
< alx
->num_napi
; i
++)
1027 free_irq(pci_irq_vector(pdev
, i
+ 1), alx
->qnapi
[i
]);
1030 pci_free_irq_vectors(pdev
);
1033 static int alx_identify_hw(struct alx_priv
*alx
)
1035 struct alx_hw
*hw
= &alx
->hw
;
1036 int rev
= alx_hw_revision(hw
);
1038 if (rev
> ALX_REV_C0
)
1041 hw
->max_dma_chnl
= rev
>= ALX_REV_B0
? 4 : 2;
1046 static int alx_init_sw(struct alx_priv
*alx
)
1048 struct pci_dev
*pdev
= alx
->hw
.pdev
;
1049 struct alx_hw
*hw
= &alx
->hw
;
1052 err
= alx_identify_hw(alx
);
1054 dev_err(&pdev
->dev
, "unrecognized chip, aborting\n");
1059 pdev
->device
== ALX_DEV_ID_AR8161
&&
1060 pdev
->subsystem_vendor
== PCI_VENDOR_ID_ATTANSIC
&&
1061 pdev
->subsystem_device
== 0x0091 &&
1062 pdev
->revision
== 0;
1064 hw
->smb_timer
= 400;
1065 hw
->mtu
= alx
->dev
->mtu
;
1066 alx
->rxbuf_size
= ALX_MAX_FRAME_LEN(hw
->mtu
);
1067 /* MTU range: 34 - 9256 */
1068 alx
->dev
->min_mtu
= 34;
1069 alx
->dev
->max_mtu
= ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE
);
1070 alx
->tx_ringsz
= 256;
1071 alx
->rx_ringsz
= 512;
1073 alx
->int_mask
= ALX_ISR_MISC
;
1074 hw
->dma_chnl
= hw
->max_dma_chnl
;
1075 hw
->ith_tpd
= alx
->tx_ringsz
/ 3;
1076 hw
->link_speed
= SPEED_UNKNOWN
;
1077 hw
->duplex
= DUPLEX_UNKNOWN
;
1078 hw
->adv_cfg
= ADVERTISED_Autoneg
|
1079 ADVERTISED_10baseT_Half
|
1080 ADVERTISED_10baseT_Full
|
1081 ADVERTISED_100baseT_Full
|
1082 ADVERTISED_100baseT_Half
|
1083 ADVERTISED_1000baseT_Full
;
1084 hw
->flowctrl
= ALX_FC_ANEG
| ALX_FC_RX
| ALX_FC_TX
;
1086 hw
->rx_ctrl
= ALX_MAC_CTRL_WOLSPED_SWEN
|
1087 ALX_MAC_CTRL_MHASH_ALG_HI5B
|
1088 ALX_MAC_CTRL_BRD_EN
|
1089 ALX_MAC_CTRL_PCRCE
|
1091 ALX_MAC_CTRL_RXFC_EN
|
1092 ALX_MAC_CTRL_TXFC_EN
|
1093 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT
;
1099 static netdev_features_t
alx_fix_features(struct net_device
*netdev
,
1100 netdev_features_t features
)
1102 if (netdev
->mtu
> ALX_MAX_TSO_PKT_SIZE
)
1103 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
1108 static void alx_netif_stop(struct alx_priv
*alx
)
1112 netif_trans_update(alx
->dev
);
1113 if (netif_carrier_ok(alx
->dev
)) {
1114 netif_carrier_off(alx
->dev
);
1115 netif_tx_disable(alx
->dev
);
1116 for (i
= 0; i
< alx
->num_napi
; i
++)
1117 napi_disable(&alx
->qnapi
[i
]->napi
);
1121 static void alx_halt(struct alx_priv
*alx
)
1123 struct alx_hw
*hw
= &alx
->hw
;
1125 alx_netif_stop(alx
);
1126 hw
->link_speed
= SPEED_UNKNOWN
;
1127 hw
->duplex
= DUPLEX_UNKNOWN
;
1131 /* disable l0s/l1 */
1132 alx_enable_aspm(hw
, false, false);
1133 alx_irq_disable(alx
);
1134 alx_free_buffers(alx
);
1137 static void alx_configure(struct alx_priv
*alx
)
1139 struct alx_hw
*hw
= &alx
->hw
;
1141 alx_configure_basic(hw
);
1142 alx_disable_rss(hw
);
1143 __alx_set_rx_mode(alx
->dev
);
1145 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
1148 static void alx_activate(struct alx_priv
*alx
)
1150 /* hardware setting lost, restore it */
1151 alx_reinit_rings(alx
);
1154 /* clear old interrupts */
1155 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
1157 alx_irq_enable(alx
);
1159 alx_schedule_link_check(alx
);
1162 static void alx_reinit(struct alx_priv
*alx
)
1170 static int alx_change_mtu(struct net_device
*netdev
, int mtu
)
1172 struct alx_priv
*alx
= netdev_priv(netdev
);
1173 int max_frame
= ALX_MAX_FRAME_LEN(mtu
);
1177 alx
->rxbuf_size
= max(max_frame
, ALX_DEF_RXBUF_SIZE
);
1178 netdev_update_features(netdev
);
1179 if (netif_running(netdev
))
1184 static void alx_netif_start(struct alx_priv
*alx
)
1188 netif_tx_wake_all_queues(alx
->dev
);
1189 for (i
= 0; i
< alx
->num_napi
; i
++)
1190 napi_enable(&alx
->qnapi
[i
]->napi
);
1191 netif_carrier_on(alx
->dev
);
1194 static int __alx_open(struct alx_priv
*alx
, bool resume
)
1198 err
= alx_enable_msix(alx
);
1200 err
= alx_init_intr(alx
);
1206 netif_carrier_off(alx
->dev
);
1208 err
= alx_alloc_napis(alx
);
1210 goto out_disable_adv_intr
;
1212 err
= alx_alloc_rings(alx
);
1214 goto out_free_rings
;
1218 err
= alx_request_irq(alx
);
1220 goto out_free_rings
;
1222 /* must be called after alx_request_irq because the chip stops working
1223 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1224 * requesting msi-x interrupts failed
1226 alx_reinit_rings(alx
);
1228 netif_set_real_num_tx_queues(alx
->dev
, alx
->num_txq
);
1229 netif_set_real_num_rx_queues(alx
->dev
, alx
->num_rxq
);
1231 /* clear old interrupts */
1232 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
1234 alx_irq_enable(alx
);
1237 netif_tx_start_all_queues(alx
->dev
);
1239 alx_schedule_link_check(alx
);
1243 alx_free_rings(alx
);
1244 alx_free_napis(alx
);
1245 out_disable_adv_intr
:
1246 pci_free_irq_vectors(alx
->hw
.pdev
);
1250 static void __alx_stop(struct alx_priv
*alx
)
1254 alx_free_rings(alx
);
1255 alx_free_napis(alx
);
1258 static const char *alx_speed_desc(struct alx_hw
*hw
)
1260 switch (alx_speed_to_ethadv(hw
->link_speed
, hw
->duplex
)) {
1261 case ADVERTISED_1000baseT_Full
:
1262 return "1 Gbps Full";
1263 case ADVERTISED_100baseT_Full
:
1264 return "100 Mbps Full";
1265 case ADVERTISED_100baseT_Half
:
1266 return "100 Mbps Half";
1267 case ADVERTISED_10baseT_Full
:
1268 return "10 Mbps Full";
1269 case ADVERTISED_10baseT_Half
:
1270 return "10 Mbps Half";
1272 return "Unknown speed";
1276 static void alx_check_link(struct alx_priv
*alx
)
1278 struct alx_hw
*hw
= &alx
->hw
;
1279 unsigned long flags
;
1283 /* clear PHY internal interrupt status, otherwise the main
1284 * interrupt status will be asserted forever
1286 alx_clear_phy_intr(hw
);
1288 old_speed
= hw
->link_speed
;
1289 err
= alx_read_phy_link(hw
);
1293 spin_lock_irqsave(&alx
->irq_lock
, flags
);
1294 alx
->int_mask
|= ALX_ISR_PHY
;
1295 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
1296 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
1298 if (old_speed
== hw
->link_speed
)
1301 if (hw
->link_speed
!= SPEED_UNKNOWN
) {
1302 netif_info(alx
, link
, alx
->dev
,
1303 "NIC Up: %s\n", alx_speed_desc(hw
));
1304 alx_post_phy_link(hw
);
1305 alx_enable_aspm(hw
, true, true);
1308 if (old_speed
== SPEED_UNKNOWN
)
1309 alx_netif_start(alx
);
1311 /* link is now down */
1312 alx_netif_stop(alx
);
1313 netif_info(alx
, link
, alx
->dev
, "Link Down\n");
1314 err
= alx_reset_mac(hw
);
1317 alx_irq_disable(alx
);
1319 /* MAC reset causes all HW settings to be lost, restore all */
1320 err
= alx_reinit_rings(alx
);
1324 alx_enable_aspm(hw
, false, true);
1325 alx_post_phy_link(hw
);
1326 alx_irq_enable(alx
);
1332 alx_schedule_reset(alx
);
1335 static int alx_open(struct net_device
*netdev
)
1337 return __alx_open(netdev_priv(netdev
), false);
1340 static int alx_stop(struct net_device
*netdev
)
1342 __alx_stop(netdev_priv(netdev
));
1346 static void alx_link_check(struct work_struct
*work
)
1348 struct alx_priv
*alx
;
1350 alx
= container_of(work
, struct alx_priv
, link_check_wk
);
1353 alx_check_link(alx
);
1357 static void alx_reset(struct work_struct
*work
)
1359 struct alx_priv
*alx
= container_of(work
, struct alx_priv
, reset_wk
);
1366 static int alx_tpd_req(struct sk_buff
*skb
)
1370 num
= skb_shinfo(skb
)->nr_frags
+ 1;
1371 /* we need one extra descriptor for LSOv2 */
1372 if (skb_is_gso(skb
) && skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
1378 static int alx_tx_csum(struct sk_buff
*skb
, struct alx_txd
*first
)
1382 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1385 cso
= skb_checksum_start_offset(skb
);
1389 css
= cso
+ skb
->csum_offset
;
1390 first
->word1
|= cpu_to_le32((cso
>> 1) << TPD_CXSUMSTART_SHIFT
);
1391 first
->word1
|= cpu_to_le32((css
>> 1) << TPD_CXSUMOFFSET_SHIFT
);
1392 first
->word1
|= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT
);
1397 static int alx_tso(struct sk_buff
*skb
, struct alx_txd
*first
)
1401 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1404 if (!skb_is_gso(skb
))
1407 err
= skb_cow_head(skb
, 0);
1411 if (skb
->protocol
== htons(ETH_P_IP
)) {
1412 struct iphdr
*iph
= ip_hdr(skb
);
1415 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1417 first
->word1
|= 1 << TPD_IPV4_SHIFT
;
1418 } else if (skb_is_gso_v6(skb
)) {
1419 ipv6_hdr(skb
)->payload_len
= 0;
1420 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1421 &ipv6_hdr(skb
)->daddr
,
1423 /* LSOv2: the first TPD only provides the packet length */
1424 first
->adrl
.l
.pkt_len
= skb
->len
;
1425 first
->word1
|= 1 << TPD_LSO_V2_SHIFT
;
1428 first
->word1
|= 1 << TPD_LSO_EN_SHIFT
;
1429 first
->word1
|= (skb_transport_offset(skb
) &
1430 TPD_L4HDROFFSET_MASK
) << TPD_L4HDROFFSET_SHIFT
;
1431 first
->word1
|= (skb_shinfo(skb
)->gso_size
&
1432 TPD_MSS_MASK
) << TPD_MSS_SHIFT
;
1436 static int alx_map_tx_skb(struct alx_tx_queue
*txq
, struct sk_buff
*skb
)
1438 struct alx_txd
*tpd
, *first_tpd
;
1440 int maplen
, f
, first_idx
= txq
->write_idx
;
1442 first_tpd
= &txq
->tpd
[txq
->write_idx
];
1445 if (tpd
->word1
& (1 << TPD_LSO_V2_SHIFT
)) {
1446 if (++txq
->write_idx
== txq
->count
)
1449 tpd
= &txq
->tpd
[txq
->write_idx
];
1450 tpd
->len
= first_tpd
->len
;
1451 tpd
->vlan_tag
= first_tpd
->vlan_tag
;
1452 tpd
->word1
= first_tpd
->word1
;
1455 maplen
= skb_headlen(skb
);
1456 dma
= dma_map_single(txq
->dev
, skb
->data
, maplen
,
1458 if (dma_mapping_error(txq
->dev
, dma
))
1461 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1462 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1464 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1465 tpd
->len
= cpu_to_le16(maplen
);
1467 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
1468 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1470 if (++txq
->write_idx
== txq
->count
)
1472 tpd
= &txq
->tpd
[txq
->write_idx
];
1474 tpd
->word1
= first_tpd
->word1
;
1476 maplen
= skb_frag_size(frag
);
1477 dma
= skb_frag_dma_map(txq
->dev
, frag
, 0,
1478 maplen
, DMA_TO_DEVICE
);
1479 if (dma_mapping_error(txq
->dev
, dma
))
1481 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1482 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1484 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1485 tpd
->len
= cpu_to_le16(maplen
);
1488 /* last TPD, set EOP flag and store skb */
1489 tpd
->word1
|= cpu_to_le32(1 << TPD_EOP_SHIFT
);
1490 txq
->bufs
[txq
->write_idx
].skb
= skb
;
1492 if (++txq
->write_idx
== txq
->count
)
1499 while (f
!= txq
->write_idx
) {
1500 alx_free_txbuf(txq
, f
);
1501 if (++f
== txq
->count
)
1507 static netdev_tx_t
alx_start_xmit_ring(struct sk_buff
*skb
,
1508 struct alx_tx_queue
*txq
)
1510 struct alx_priv
*alx
;
1511 struct alx_txd
*first
;
1514 alx
= netdev_priv(txq
->netdev
);
1516 if (alx_tpd_avail(txq
) < alx_tpd_req(skb
)) {
1517 netif_tx_stop_queue(alx_get_tx_queue(txq
));
1521 first
= &txq
->tpd
[txq
->write_idx
];
1522 memset(first
, 0, sizeof(*first
));
1524 tso
= alx_tso(skb
, first
);
1527 else if (!tso
&& alx_tx_csum(skb
, first
))
1530 if (alx_map_tx_skb(txq
, skb
) < 0)
1533 netdev_tx_sent_queue(alx_get_tx_queue(txq
), skb
->len
);
1535 /* flush updates before updating hardware */
1537 alx_write_mem16(&alx
->hw
, txq
->p_reg
, txq
->write_idx
);
1539 if (alx_tpd_avail(txq
) < txq
->count
/ 8)
1540 netif_tx_stop_queue(alx_get_tx_queue(txq
));
1542 return NETDEV_TX_OK
;
1545 dev_kfree_skb_any(skb
);
1546 return NETDEV_TX_OK
;
1549 static netdev_tx_t
alx_start_xmit(struct sk_buff
*skb
,
1550 struct net_device
*netdev
)
1552 struct alx_priv
*alx
= netdev_priv(netdev
);
1553 return alx_start_xmit_ring(skb
, alx_tx_queue_mapping(alx
, skb
));
1556 static void alx_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
1558 struct alx_priv
*alx
= netdev_priv(dev
);
1560 alx_schedule_reset(alx
);
1563 static int alx_mdio_read(struct net_device
*netdev
,
1564 int prtad
, int devad
, u16 addr
)
1566 struct alx_priv
*alx
= netdev_priv(netdev
);
1567 struct alx_hw
*hw
= &alx
->hw
;
1571 if (prtad
!= hw
->mdio
.prtad
)
1574 if (devad
== MDIO_DEVAD_NONE
)
1575 err
= alx_read_phy_reg(hw
, addr
, &val
);
1577 err
= alx_read_phy_ext(hw
, devad
, addr
, &val
);
1584 static int alx_mdio_write(struct net_device
*netdev
,
1585 int prtad
, int devad
, u16 addr
, u16 val
)
1587 struct alx_priv
*alx
= netdev_priv(netdev
);
1588 struct alx_hw
*hw
= &alx
->hw
;
1590 if (prtad
!= hw
->mdio
.prtad
)
1593 if (devad
== MDIO_DEVAD_NONE
)
1594 return alx_write_phy_reg(hw
, addr
, val
);
1596 return alx_write_phy_ext(hw
, devad
, addr
, val
);
1599 static int alx_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1601 struct alx_priv
*alx
= netdev_priv(netdev
);
1603 if (!netif_running(netdev
))
1606 return mdio_mii_ioctl(&alx
->hw
.mdio
, if_mii(ifr
), cmd
);
1609 #ifdef CONFIG_NET_POLL_CONTROLLER
1610 static void alx_poll_controller(struct net_device
*netdev
)
1612 struct alx_priv
*alx
= netdev_priv(netdev
);
1615 if (alx
->hw
.pdev
->msix_enabled
) {
1616 alx_intr_msix_misc(0, alx
);
1617 for (i
= 0; i
< alx
->num_txq
; i
++)
1618 alx_intr_msix_ring(0, alx
->qnapi
[i
]);
1619 } else if (alx
->hw
.pdev
->msi_enabled
)
1620 alx_intr_msi(0, alx
);
1622 alx_intr_legacy(0, alx
);
1626 static void alx_get_stats64(struct net_device
*dev
,
1627 struct rtnl_link_stats64
*net_stats
)
1629 struct alx_priv
*alx
= netdev_priv(dev
);
1630 struct alx_hw_stats
*hw_stats
= &alx
->hw
.stats
;
1632 spin_lock(&alx
->stats_lock
);
1634 alx_update_hw_stats(&alx
->hw
);
1636 net_stats
->tx_bytes
= hw_stats
->tx_byte_cnt
;
1637 net_stats
->rx_bytes
= hw_stats
->rx_byte_cnt
;
1638 net_stats
->multicast
= hw_stats
->rx_mcast
;
1639 net_stats
->collisions
= hw_stats
->tx_single_col
+
1640 hw_stats
->tx_multi_col
+
1641 hw_stats
->tx_late_col
+
1642 hw_stats
->tx_abort_col
;
1644 net_stats
->rx_errors
= hw_stats
->rx_frag
+
1645 hw_stats
->rx_fcs_err
+
1646 hw_stats
->rx_len_err
+
1647 hw_stats
->rx_ov_sz
+
1648 hw_stats
->rx_ov_rrd
+
1649 hw_stats
->rx_align_err
+
1650 hw_stats
->rx_ov_rxf
;
1652 net_stats
->rx_fifo_errors
= hw_stats
->rx_ov_rxf
;
1653 net_stats
->rx_length_errors
= hw_stats
->rx_len_err
;
1654 net_stats
->rx_crc_errors
= hw_stats
->rx_fcs_err
;
1655 net_stats
->rx_frame_errors
= hw_stats
->rx_align_err
;
1656 net_stats
->rx_dropped
= hw_stats
->rx_ov_rrd
;
1658 net_stats
->tx_errors
= hw_stats
->tx_late_col
+
1659 hw_stats
->tx_abort_col
+
1660 hw_stats
->tx_underrun
+
1663 net_stats
->tx_aborted_errors
= hw_stats
->tx_abort_col
;
1664 net_stats
->tx_fifo_errors
= hw_stats
->tx_underrun
;
1665 net_stats
->tx_window_errors
= hw_stats
->tx_late_col
;
1667 net_stats
->tx_packets
= hw_stats
->tx_ok
+ net_stats
->tx_errors
;
1668 net_stats
->rx_packets
= hw_stats
->rx_ok
+ net_stats
->rx_errors
;
1670 spin_unlock(&alx
->stats_lock
);
1673 static const struct net_device_ops alx_netdev_ops
= {
1674 .ndo_open
= alx_open
,
1675 .ndo_stop
= alx_stop
,
1676 .ndo_start_xmit
= alx_start_xmit
,
1677 .ndo_get_stats64
= alx_get_stats64
,
1678 .ndo_set_rx_mode
= alx_set_rx_mode
,
1679 .ndo_validate_addr
= eth_validate_addr
,
1680 .ndo_set_mac_address
= alx_set_mac_address
,
1681 .ndo_change_mtu
= alx_change_mtu
,
1682 .ndo_do_ioctl
= alx_ioctl
,
1683 .ndo_tx_timeout
= alx_tx_timeout
,
1684 .ndo_fix_features
= alx_fix_features
,
1685 #ifdef CONFIG_NET_POLL_CONTROLLER
1686 .ndo_poll_controller
= alx_poll_controller
,
1690 static int alx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1692 struct net_device
*netdev
;
1693 struct alx_priv
*alx
;
1695 bool phy_configured
;
1698 err
= pci_enable_device_mem(pdev
);
1702 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1703 * shared register for the high 32 bits, so only a single, aligned,
1704 * 4 GB physical address range can be used for descriptors.
1706 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
1707 dev_dbg(&pdev
->dev
, "DMA to 64-BIT addresses\n");
1709 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1711 dev_err(&pdev
->dev
, "No usable DMA config, aborting\n");
1712 goto out_pci_disable
;
1716 err
= pci_request_mem_regions(pdev
, alx_drv_name
);
1719 "pci_request_mem_regions failed\n");
1720 goto out_pci_disable
;
1723 pci_enable_pcie_error_reporting(pdev
);
1724 pci_set_master(pdev
);
1726 if (!pdev
->pm_cap
) {
1728 "Can't find power management capability, aborting\n");
1730 goto out_pci_release
;
1733 netdev
= alloc_etherdev_mqs(sizeof(*alx
),
1734 ALX_MAX_TX_QUEUES
, 1);
1737 goto out_pci_release
;
1740 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1741 alx
= netdev_priv(netdev
);
1742 spin_lock_init(&alx
->hw
.mdio_lock
);
1743 spin_lock_init(&alx
->irq_lock
);
1744 spin_lock_init(&alx
->stats_lock
);
1746 alx
->hw
.pdev
= pdev
;
1747 alx
->msg_enable
= NETIF_MSG_LINK
| NETIF_MSG_HW
| NETIF_MSG_IFUP
|
1748 NETIF_MSG_TX_ERR
| NETIF_MSG_RX_ERR
| NETIF_MSG_WOL
;
1750 pci_set_drvdata(pdev
, alx
);
1752 hw
->hw_addr
= pci_ioremap_bar(pdev
, 0);
1754 dev_err(&pdev
->dev
, "cannot map device registers\n");
1756 goto out_free_netdev
;
1759 netdev
->netdev_ops
= &alx_netdev_ops
;
1760 netdev
->ethtool_ops
= &alx_ethtool_ops
;
1761 netdev
->irq
= pci_irq_vector(pdev
, 0);
1762 netdev
->watchdog_timeo
= ALX_WATCHDOG_TIME
;
1764 if (ent
->driver_data
& ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
)
1765 pdev
->dev_flags
|= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG
;
1767 err
= alx_init_sw(alx
);
1769 dev_err(&pdev
->dev
, "net device private data init failed\n");
1775 phy_configured
= alx_phy_configured(hw
);
1777 if (!phy_configured
)
1780 err
= alx_reset_mac(hw
);
1782 dev_err(&pdev
->dev
, "MAC Reset failed, error = %d\n", err
);
1786 /* setup link to put it in a known good starting state */
1787 if (!phy_configured
) {
1788 err
= alx_setup_speed_duplex(hw
, hw
->adv_cfg
, hw
->flowctrl
);
1791 "failed to configure PHY speed/duplex (err=%d)\n",
1797 netdev
->hw_features
= NETIF_F_SG
|
1803 if (alx_get_perm_macaddr(hw
, hw
->perm_addr
)) {
1804 dev_warn(&pdev
->dev
,
1805 "Invalid permanent address programmed, using random one\n");
1806 eth_hw_addr_random(netdev
);
1807 memcpy(hw
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
1810 memcpy(hw
->mac_addr
, hw
->perm_addr
, ETH_ALEN
);
1811 memcpy(netdev
->dev_addr
, hw
->mac_addr
, ETH_ALEN
);
1812 memcpy(netdev
->perm_addr
, hw
->perm_addr
, ETH_ALEN
);
1816 hw
->mdio
.dev
= netdev
;
1817 hw
->mdio
.mode_support
= MDIO_SUPPORTS_C45
|
1820 hw
->mdio
.mdio_read
= alx_mdio_read
;
1821 hw
->mdio
.mdio_write
= alx_mdio_write
;
1823 if (!alx_get_phy_info(hw
)) {
1824 dev_err(&pdev
->dev
, "failed to identify PHY\n");
1829 INIT_WORK(&alx
->link_check_wk
, alx_link_check
);
1830 INIT_WORK(&alx
->reset_wk
, alx_reset
);
1831 netif_carrier_off(netdev
);
1833 err
= register_netdev(netdev
);
1835 dev_err(&pdev
->dev
, "register netdevice failed\n");
1840 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1846 iounmap(hw
->hw_addr
);
1848 free_netdev(netdev
);
1850 pci_release_mem_regions(pdev
);
1852 pci_disable_device(pdev
);
1856 static void alx_remove(struct pci_dev
*pdev
)
1858 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1859 struct alx_hw
*hw
= &alx
->hw
;
1861 cancel_work_sync(&alx
->link_check_wk
);
1862 cancel_work_sync(&alx
->reset_wk
);
1864 /* restore permanent mac address */
1865 alx_set_macaddr(hw
, hw
->perm_addr
);
1867 unregister_netdev(alx
->dev
);
1868 iounmap(hw
->hw_addr
);
1869 pci_release_mem_regions(pdev
);
1871 pci_disable_pcie_error_reporting(pdev
);
1872 pci_disable_device(pdev
);
1874 free_netdev(alx
->dev
);
1877 #ifdef CONFIG_PM_SLEEP
1878 static int alx_suspend(struct device
*dev
)
1880 struct alx_priv
*alx
= dev_get_drvdata(dev
);
1882 if (!netif_running(alx
->dev
))
1884 netif_device_detach(alx
->dev
);
1889 static int alx_resume(struct device
*dev
)
1891 struct alx_priv
*alx
= dev_get_drvdata(dev
);
1892 struct alx_hw
*hw
= &alx
->hw
;
1897 if (!netif_running(alx
->dev
))
1899 netif_device_attach(alx
->dev
);
1902 err
= __alx_open(alx
, true);
1908 static SIMPLE_DEV_PM_OPS(alx_pm_ops
, alx_suspend
, alx_resume
);
1909 #define ALX_PM_OPS (&alx_pm_ops)
1911 #define ALX_PM_OPS NULL
1915 static pci_ers_result_t
alx_pci_error_detected(struct pci_dev
*pdev
,
1916 pci_channel_state_t state
)
1918 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1919 struct net_device
*netdev
= alx
->dev
;
1920 pci_ers_result_t rc
= PCI_ERS_RESULT_NEED_RESET
;
1922 dev_info(&pdev
->dev
, "pci error detected\n");
1926 if (netif_running(netdev
)) {
1927 netif_device_detach(netdev
);
1931 if (state
== pci_channel_io_perm_failure
)
1932 rc
= PCI_ERS_RESULT_DISCONNECT
;
1934 pci_disable_device(pdev
);
1941 static pci_ers_result_t
alx_pci_error_slot_reset(struct pci_dev
*pdev
)
1943 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1944 struct alx_hw
*hw
= &alx
->hw
;
1945 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
1947 dev_info(&pdev
->dev
, "pci error slot reset\n");
1951 if (pci_enable_device(pdev
)) {
1952 dev_err(&pdev
->dev
, "Failed to re-enable PCI device after reset\n");
1956 pci_set_master(pdev
);
1959 if (!alx_reset_mac(hw
))
1960 rc
= PCI_ERS_RESULT_RECOVERED
;
1967 static void alx_pci_error_resume(struct pci_dev
*pdev
)
1969 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1970 struct net_device
*netdev
= alx
->dev
;
1972 dev_info(&pdev
->dev
, "pci error resume\n");
1976 if (netif_running(netdev
)) {
1978 netif_device_attach(netdev
);
1984 static const struct pci_error_handlers alx_err_handlers
= {
1985 .error_detected
= alx_pci_error_detected
,
1986 .slot_reset
= alx_pci_error_slot_reset
,
1987 .resume
= alx_pci_error_resume
,
1990 static const struct pci_device_id alx_pci_tbl
[] = {
1991 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8161
),
1992 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1993 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2200
),
1994 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1995 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2400
),
1996 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1997 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2500
),
1998 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1999 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8162
),
2000 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
2001 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8171
) },
2002 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8172
) },
2006 static struct pci_driver alx_driver
= {
2007 .name
= alx_drv_name
,
2008 .id_table
= alx_pci_tbl
,
2010 .remove
= alx_remove
,
2011 .err_handler
= &alx_err_handlers
,
2012 .driver
.pm
= ALX_PM_OPS
,
2015 module_pci_driver(alx_driver
);
2016 MODULE_DEVICE_TABLE(pci
, alx_pci_tbl
);
2017 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
2018 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
2020 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
2021 MODULE_LICENSE("GPL");