2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 * This file incorporates work covered by the following copyright and
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
39 #include <linux/ipv6.h>
40 #include <linux/if_vlan.h>
41 #include <linux/mdio.h>
42 #include <linux/aer.h>
43 #include <linux/bitops.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <net/ip6_checksum.h>
47 #include <linux/crc32.h>
52 const char alx_drv_name
[] = "alx";
55 static void alx_free_txbuf(struct alx_priv
*alx
, int entry
)
57 struct alx_buffer
*txb
= &alx
->txq
.bufs
[entry
];
59 if (dma_unmap_len(txb
, size
)) {
60 dma_unmap_single(&alx
->hw
.pdev
->dev
,
61 dma_unmap_addr(txb
, dma
),
62 dma_unmap_len(txb
, size
),
64 dma_unmap_len_set(txb
, size
, 0);
68 dev_kfree_skb_any(txb
->skb
);
73 static int alx_refill_rx_ring(struct alx_priv
*alx
, gfp_t gfp
)
75 struct alx_rx_queue
*rxq
= &alx
->rxq
;
77 struct alx_buffer
*cur_buf
;
79 u16 cur
, next
, count
= 0;
81 next
= cur
= rxq
->write_idx
;
82 if (++next
== alx
->rx_ringsz
)
84 cur_buf
= &rxq
->bufs
[cur
];
86 while (!cur_buf
->skb
&& next
!= rxq
->read_idx
) {
87 struct alx_rfd
*rfd
= &rxq
->rfd
[cur
];
89 skb
= __netdev_alloc_skb(alx
->dev
, alx
->rxbuf_size
, gfp
);
92 dma
= dma_map_single(&alx
->hw
.pdev
->dev
,
93 skb
->data
, alx
->rxbuf_size
,
95 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
)) {
100 /* Unfortunately, RX descriptor buffers must be 4-byte
101 * aligned, so we can't use IP alignment.
103 if (WARN_ON(dma
& 3)) {
109 dma_unmap_len_set(cur_buf
, size
, alx
->rxbuf_size
);
110 dma_unmap_addr_set(cur_buf
, dma
, dma
);
111 rfd
->addr
= cpu_to_le64(dma
);
114 if (++next
== alx
->rx_ringsz
)
116 cur_buf
= &rxq
->bufs
[cur
];
121 /* flush all updates before updating hardware */
123 rxq
->write_idx
= cur
;
124 alx_write_mem16(&alx
->hw
, ALX_RFD_PIDX
, cur
);
130 static inline int alx_tpd_avail(struct alx_priv
*alx
)
132 struct alx_tx_queue
*txq
= &alx
->txq
;
134 if (txq
->write_idx
>= txq
->read_idx
)
135 return alx
->tx_ringsz
+ txq
->read_idx
- txq
->write_idx
- 1;
136 return txq
->read_idx
- txq
->write_idx
- 1;
139 static bool alx_clean_tx_irq(struct alx_priv
*alx
)
141 struct alx_tx_queue
*txq
= &alx
->txq
;
142 u16 hw_read_idx
, sw_read_idx
;
143 unsigned int total_bytes
= 0, total_packets
= 0;
144 int budget
= ALX_DEFAULT_TX_WORK
;
146 sw_read_idx
= txq
->read_idx
;
147 hw_read_idx
= alx_read_mem16(&alx
->hw
, ALX_TPD_PRI0_CIDX
);
149 if (sw_read_idx
!= hw_read_idx
) {
150 while (sw_read_idx
!= hw_read_idx
&& budget
> 0) {
153 skb
= txq
->bufs
[sw_read_idx
].skb
;
155 total_bytes
+= skb
->len
;
160 alx_free_txbuf(alx
, sw_read_idx
);
162 if (++sw_read_idx
== alx
->tx_ringsz
)
165 txq
->read_idx
= sw_read_idx
;
167 netdev_completed_queue(alx
->dev
, total_packets
, total_bytes
);
170 if (netif_queue_stopped(alx
->dev
) && netif_carrier_ok(alx
->dev
) &&
171 alx_tpd_avail(alx
) > alx
->tx_ringsz
/4)
172 netif_wake_queue(alx
->dev
);
174 return sw_read_idx
== hw_read_idx
;
177 static void alx_schedule_link_check(struct alx_priv
*alx
)
179 schedule_work(&alx
->link_check_wk
);
182 static void alx_schedule_reset(struct alx_priv
*alx
)
184 schedule_work(&alx
->reset_wk
);
187 static bool alx_clean_rx_irq(struct alx_priv
*alx
, int budget
)
189 struct alx_rx_queue
*rxq
= &alx
->rxq
;
191 struct alx_buffer
*rxb
;
193 u16 length
, rfd_cleaned
= 0;
196 rrd
= &rxq
->rrd
[rxq
->rrd_read_idx
];
197 if (!(rrd
->word3
& cpu_to_le32(1 << RRD_UPDATED_SHIFT
)))
199 rrd
->word3
&= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT
);
201 if (ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
202 RRD_SI
) != rxq
->read_idx
||
203 ALX_GET_FIELD(le32_to_cpu(rrd
->word0
),
205 alx_schedule_reset(alx
);
209 rxb
= &rxq
->bufs
[rxq
->read_idx
];
210 dma_unmap_single(&alx
->hw
.pdev
->dev
,
211 dma_unmap_addr(rxb
, dma
),
212 dma_unmap_len(rxb
, size
),
214 dma_unmap_len_set(rxb
, size
, 0);
218 if (rrd
->word3
& cpu_to_le32(1 << RRD_ERR_RES_SHIFT
) ||
219 rrd
->word3
& cpu_to_le32(1 << RRD_ERR_LEN_SHIFT
)) {
221 dev_kfree_skb_any(skb
);
225 length
= ALX_GET_FIELD(le32_to_cpu(rrd
->word3
),
226 RRD_PKTLEN
) - ETH_FCS_LEN
;
227 skb_put(skb
, length
);
228 skb
->protocol
= eth_type_trans(skb
, alx
->dev
);
230 skb_checksum_none_assert(skb
);
231 if (alx
->dev
->features
& NETIF_F_RXCSUM
&&
232 !(rrd
->word3
& (cpu_to_le32(1 << RRD_ERR_L4_SHIFT
) |
233 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT
)))) {
234 switch (ALX_GET_FIELD(le32_to_cpu(rrd
->word2
),
236 case RRD_PID_IPV6UDP
:
237 case RRD_PID_IPV4UDP
:
238 case RRD_PID_IPV4TCP
:
239 case RRD_PID_IPV6TCP
:
240 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
245 napi_gro_receive(&alx
->napi
, skb
);
249 if (++rxq
->read_idx
== alx
->rx_ringsz
)
251 if (++rxq
->rrd_read_idx
== alx
->rx_ringsz
)
252 rxq
->rrd_read_idx
= 0;
254 if (++rfd_cleaned
> ALX_RX_ALLOC_THRESH
)
255 rfd_cleaned
-= alx_refill_rx_ring(alx
, GFP_ATOMIC
);
259 alx_refill_rx_ring(alx
, GFP_ATOMIC
);
264 static int alx_poll(struct napi_struct
*napi
, int budget
)
266 struct alx_priv
*alx
= container_of(napi
, struct alx_priv
, napi
);
267 struct alx_hw
*hw
= &alx
->hw
;
268 bool complete
= true;
271 complete
= alx_clean_tx_irq(alx
) &&
272 alx_clean_rx_irq(alx
, budget
);
277 napi_complete(&alx
->napi
);
279 /* enable interrupt */
280 spin_lock_irqsave(&alx
->irq_lock
, flags
);
281 alx
->int_mask
|= ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
;
282 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
283 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
290 static irqreturn_t
alx_intr_handle(struct alx_priv
*alx
, u32 intr
)
292 struct alx_hw
*hw
= &alx
->hw
;
293 bool write_int_mask
= false;
295 spin_lock(&alx
->irq_lock
);
298 alx_write_mem32(hw
, ALX_ISR
, intr
| ALX_ISR_DIS
);
299 intr
&= alx
->int_mask
;
301 if (intr
& ALX_ISR_FATAL
) {
302 netif_warn(alx
, hw
, alx
->dev
,
303 "fatal interrupt 0x%x, resetting\n", intr
);
304 alx_schedule_reset(alx
);
308 if (intr
& ALX_ISR_ALERT
)
309 netdev_warn(alx
->dev
, "alert interrupt: 0x%x\n", intr
);
311 if (intr
& ALX_ISR_PHY
) {
312 /* suppress PHY interrupt, because the source
313 * is from PHY internal. only the internal status
314 * is cleared, the interrupt status could be cleared.
316 alx
->int_mask
&= ~ALX_ISR_PHY
;
317 write_int_mask
= true;
318 alx_schedule_link_check(alx
);
321 if (intr
& (ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
)) {
322 napi_schedule(&alx
->napi
);
323 /* mask rx/tx interrupt, enable them when napi complete */
324 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
325 write_int_mask
= true;
329 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
331 alx_write_mem32(hw
, ALX_ISR
, 0);
334 spin_unlock(&alx
->irq_lock
);
338 static irqreturn_t
alx_intr_msi(int irq
, void *data
)
340 struct alx_priv
*alx
= data
;
342 return alx_intr_handle(alx
, alx_read_mem32(&alx
->hw
, ALX_ISR
));
345 static irqreturn_t
alx_intr_legacy(int irq
, void *data
)
347 struct alx_priv
*alx
= data
;
348 struct alx_hw
*hw
= &alx
->hw
;
351 intr
= alx_read_mem32(hw
, ALX_ISR
);
353 if (intr
& ALX_ISR_DIS
|| !(intr
& alx
->int_mask
))
356 return alx_intr_handle(alx
, intr
);
359 static void alx_init_ring_ptrs(struct alx_priv
*alx
)
361 struct alx_hw
*hw
= &alx
->hw
;
362 u32 addr_hi
= ((u64
)alx
->descmem
.dma
) >> 32;
364 alx
->rxq
.read_idx
= 0;
365 alx
->rxq
.write_idx
= 0;
366 alx
->rxq
.rrd_read_idx
= 0;
367 alx_write_mem32(hw
, ALX_RX_BASE_ADDR_HI
, addr_hi
);
368 alx_write_mem32(hw
, ALX_RRD_ADDR_LO
, alx
->rxq
.rrd_dma
);
369 alx_write_mem32(hw
, ALX_RRD_RING_SZ
, alx
->rx_ringsz
);
370 alx_write_mem32(hw
, ALX_RFD_ADDR_LO
, alx
->rxq
.rfd_dma
);
371 alx_write_mem32(hw
, ALX_RFD_RING_SZ
, alx
->rx_ringsz
);
372 alx_write_mem32(hw
, ALX_RFD_BUF_SZ
, alx
->rxbuf_size
);
374 alx
->txq
.read_idx
= 0;
375 alx
->txq
.write_idx
= 0;
376 alx_write_mem32(hw
, ALX_TX_BASE_ADDR_HI
, addr_hi
);
377 alx_write_mem32(hw
, ALX_TPD_PRI0_ADDR_LO
, alx
->txq
.tpd_dma
);
378 alx_write_mem32(hw
, ALX_TPD_RING_SZ
, alx
->tx_ringsz
);
380 /* load these pointers into the chip */
381 alx_write_mem32(hw
, ALX_SRAM9
, ALX_SRAM_LOAD_PTR
);
384 static void alx_free_txring_buf(struct alx_priv
*alx
)
386 struct alx_tx_queue
*txq
= &alx
->txq
;
392 for (i
= 0; i
< alx
->tx_ringsz
; i
++)
393 alx_free_txbuf(alx
, i
);
395 memset(txq
->bufs
, 0, alx
->tx_ringsz
* sizeof(struct alx_buffer
));
396 memset(txq
->tpd
, 0, alx
->tx_ringsz
* sizeof(struct alx_txd
));
400 netdev_reset_queue(alx
->dev
);
403 static void alx_free_rxring_buf(struct alx_priv
*alx
)
405 struct alx_rx_queue
*rxq
= &alx
->rxq
;
406 struct alx_buffer
*cur_buf
;
412 for (i
= 0; i
< alx
->rx_ringsz
; i
++) {
413 cur_buf
= rxq
->bufs
+ i
;
415 dma_unmap_single(&alx
->hw
.pdev
->dev
,
416 dma_unmap_addr(cur_buf
, dma
),
417 dma_unmap_len(cur_buf
, size
),
419 dev_kfree_skb(cur_buf
->skb
);
421 dma_unmap_len_set(cur_buf
, size
, 0);
422 dma_unmap_addr_set(cur_buf
, dma
, 0);
428 rxq
->rrd_read_idx
= 0;
431 static void alx_free_buffers(struct alx_priv
*alx
)
433 alx_free_txring_buf(alx
);
434 alx_free_rxring_buf(alx
);
437 static int alx_reinit_rings(struct alx_priv
*alx
)
439 alx_free_buffers(alx
);
441 alx_init_ring_ptrs(alx
);
443 if (!alx_refill_rx_ring(alx
, GFP_KERNEL
))
449 static void alx_add_mc_addr(struct alx_hw
*hw
, const u8
*addr
, u32
*mc_hash
)
453 crc32
= ether_crc(ETH_ALEN
, addr
);
454 reg
= (crc32
>> 31) & 0x1;
455 bit
= (crc32
>> 26) & 0x1F;
457 mc_hash
[reg
] |= BIT(bit
);
460 static void __alx_set_rx_mode(struct net_device
*netdev
)
462 struct alx_priv
*alx
= netdev_priv(netdev
);
463 struct alx_hw
*hw
= &alx
->hw
;
464 struct netdev_hw_addr
*ha
;
467 if (!(netdev
->flags
& IFF_ALLMULTI
)) {
468 netdev_for_each_mc_addr(ha
, netdev
)
469 alx_add_mc_addr(hw
, ha
->addr
, mc_hash
);
471 alx_write_mem32(hw
, ALX_HASH_TBL0
, mc_hash
[0]);
472 alx_write_mem32(hw
, ALX_HASH_TBL1
, mc_hash
[1]);
475 hw
->rx_ctrl
&= ~(ALX_MAC_CTRL_MULTIALL_EN
| ALX_MAC_CTRL_PROMISC_EN
);
476 if (netdev
->flags
& IFF_PROMISC
)
477 hw
->rx_ctrl
|= ALX_MAC_CTRL_PROMISC_EN
;
478 if (netdev
->flags
& IFF_ALLMULTI
)
479 hw
->rx_ctrl
|= ALX_MAC_CTRL_MULTIALL_EN
;
481 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
484 static void alx_set_rx_mode(struct net_device
*netdev
)
486 __alx_set_rx_mode(netdev
);
489 static int alx_set_mac_address(struct net_device
*netdev
, void *data
)
491 struct alx_priv
*alx
= netdev_priv(netdev
);
492 struct alx_hw
*hw
= &alx
->hw
;
493 struct sockaddr
*addr
= data
;
495 if (!is_valid_ether_addr(addr
->sa_data
))
496 return -EADDRNOTAVAIL
;
498 if (netdev
->addr_assign_type
& NET_ADDR_RANDOM
)
499 netdev
->addr_assign_type
^= NET_ADDR_RANDOM
;
501 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
502 memcpy(hw
->mac_addr
, addr
->sa_data
, netdev
->addr_len
);
503 alx_set_macaddr(hw
, hw
->mac_addr
);
508 static int alx_alloc_descriptors(struct alx_priv
*alx
)
510 alx
->txq
.bufs
= kcalloc(alx
->tx_ringsz
,
511 sizeof(struct alx_buffer
),
516 alx
->rxq
.bufs
= kcalloc(alx
->rx_ringsz
,
517 sizeof(struct alx_buffer
),
522 /* physical tx/rx ring descriptors
524 * Allocate them as a single chunk because they must not cross a
525 * 4G boundary (hardware has a single register for high 32 bits
528 alx
->descmem
.size
= sizeof(struct alx_txd
) * alx
->tx_ringsz
+
529 sizeof(struct alx_rrd
) * alx
->rx_ringsz
+
530 sizeof(struct alx_rfd
) * alx
->rx_ringsz
;
531 alx
->descmem
.virt
= dma_zalloc_coherent(&alx
->hw
.pdev
->dev
,
535 if (!alx
->descmem
.virt
)
538 alx
->txq
.tpd
= (void *)alx
->descmem
.virt
;
539 alx
->txq
.tpd_dma
= alx
->descmem
.dma
;
541 /* alignment requirement for next block */
542 BUILD_BUG_ON(sizeof(struct alx_txd
) % 8);
545 (void *)((u8
*)alx
->descmem
.virt
+
546 sizeof(struct alx_txd
) * alx
->tx_ringsz
);
547 alx
->rxq
.rrd_dma
= alx
->descmem
.dma
+
548 sizeof(struct alx_txd
) * alx
->tx_ringsz
;
550 /* alignment requirement for next block */
551 BUILD_BUG_ON(sizeof(struct alx_rrd
) % 8);
554 (void *)((u8
*)alx
->descmem
.virt
+
555 sizeof(struct alx_txd
) * alx
->tx_ringsz
+
556 sizeof(struct alx_rrd
) * alx
->rx_ringsz
);
557 alx
->rxq
.rfd_dma
= alx
->descmem
.dma
+
558 sizeof(struct alx_txd
) * alx
->tx_ringsz
+
559 sizeof(struct alx_rrd
) * alx
->rx_ringsz
;
563 kfree(alx
->txq
.bufs
);
564 kfree(alx
->rxq
.bufs
);
568 static int alx_alloc_rings(struct alx_priv
*alx
)
572 err
= alx_alloc_descriptors(alx
);
576 alx
->int_mask
&= ~ALX_ISR_ALL_QUEUES
;
577 alx
->int_mask
|= ALX_ISR_TX_Q0
| ALX_ISR_RX_Q0
;
578 alx
->tx_ringsz
= alx
->tx_ringsz
;
580 netif_napi_add(alx
->dev
, &alx
->napi
, alx_poll
, 64);
582 alx_reinit_rings(alx
);
586 static void alx_free_rings(struct alx_priv
*alx
)
588 netif_napi_del(&alx
->napi
);
589 alx_free_buffers(alx
);
591 kfree(alx
->txq
.bufs
);
592 kfree(alx
->rxq
.bufs
);
594 dma_free_coherent(&alx
->hw
.pdev
->dev
,
600 static void alx_config_vector_mapping(struct alx_priv
*alx
)
602 struct alx_hw
*hw
= &alx
->hw
;
604 alx_write_mem32(hw
, ALX_MSI_MAP_TBL1
, 0);
605 alx_write_mem32(hw
, ALX_MSI_MAP_TBL2
, 0);
606 alx_write_mem32(hw
, ALX_MSI_ID_MAP
, 0);
609 static void alx_irq_enable(struct alx_priv
*alx
)
611 struct alx_hw
*hw
= &alx
->hw
;
613 /* level-1 interrupt switch */
614 alx_write_mem32(hw
, ALX_ISR
, 0);
615 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
619 static void alx_irq_disable(struct alx_priv
*alx
)
621 struct alx_hw
*hw
= &alx
->hw
;
623 alx_write_mem32(hw
, ALX_ISR
, ALX_ISR_DIS
);
624 alx_write_mem32(hw
, ALX_IMR
, 0);
627 synchronize_irq(alx
->hw
.pdev
->irq
);
630 static int alx_request_irq(struct alx_priv
*alx
)
632 struct pci_dev
*pdev
= alx
->hw
.pdev
;
633 struct alx_hw
*hw
= &alx
->hw
;
637 msi_ctrl
= (hw
->imt
>> 1) << ALX_MSI_RETRANS_TM_SHIFT
;
639 if (!pci_enable_msi(alx
->hw
.pdev
)) {
642 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
,
643 msi_ctrl
| ALX_MSI_MASK_SEL_LINE
);
644 err
= request_irq(pdev
->irq
, alx_intr_msi
, 0,
645 alx
->dev
->name
, alx
);
648 /* fall back to legacy interrupt */
649 pci_disable_msi(alx
->hw
.pdev
);
652 alx_write_mem32(hw
, ALX_MSI_RETRANS_TIMER
, 0);
653 err
= request_irq(pdev
->irq
, alx_intr_legacy
, IRQF_SHARED
,
654 alx
->dev
->name
, alx
);
657 alx_config_vector_mapping(alx
);
661 static void alx_free_irq(struct alx_priv
*alx
)
663 struct pci_dev
*pdev
= alx
->hw
.pdev
;
665 free_irq(pdev
->irq
, alx
);
668 pci_disable_msi(alx
->hw
.pdev
);
673 static int alx_identify_hw(struct alx_priv
*alx
)
675 struct alx_hw
*hw
= &alx
->hw
;
676 int rev
= alx_hw_revision(hw
);
678 if (rev
> ALX_REV_C0
)
681 hw
->max_dma_chnl
= rev
>= ALX_REV_B0
? 4 : 2;
686 static int alx_init_sw(struct alx_priv
*alx
)
688 struct pci_dev
*pdev
= alx
->hw
.pdev
;
689 struct alx_hw
*hw
= &alx
->hw
;
692 err
= alx_identify_hw(alx
);
694 dev_err(&pdev
->dev
, "unrecognized chip, aborting\n");
699 pdev
->device
== ALX_DEV_ID_AR8161
&&
700 pdev
->subsystem_vendor
== PCI_VENDOR_ID_ATTANSIC
&&
701 pdev
->subsystem_device
== 0x0091 &&
705 hw
->mtu
= alx
->dev
->mtu
;
706 alx
->rxbuf_size
= ALIGN(ALX_RAW_MTU(hw
->mtu
), 8);
707 alx
->tx_ringsz
= 256;
708 alx
->rx_ringsz
= 512;
710 alx
->int_mask
= ALX_ISR_MISC
;
711 hw
->dma_chnl
= hw
->max_dma_chnl
;
712 hw
->ith_tpd
= alx
->tx_ringsz
/ 3;
713 hw
->link_speed
= SPEED_UNKNOWN
;
714 hw
->duplex
= DUPLEX_UNKNOWN
;
715 hw
->adv_cfg
= ADVERTISED_Autoneg
|
716 ADVERTISED_10baseT_Half
|
717 ADVERTISED_10baseT_Full
|
718 ADVERTISED_100baseT_Full
|
719 ADVERTISED_100baseT_Half
|
720 ADVERTISED_1000baseT_Full
;
721 hw
->flowctrl
= ALX_FC_ANEG
| ALX_FC_RX
| ALX_FC_TX
;
723 hw
->rx_ctrl
= ALX_MAC_CTRL_WOLSPED_SWEN
|
724 ALX_MAC_CTRL_MHASH_ALG_HI5B
|
725 ALX_MAC_CTRL_BRD_EN
|
728 ALX_MAC_CTRL_RXFC_EN
|
729 ALX_MAC_CTRL_TXFC_EN
|
730 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT
;
736 static netdev_features_t
alx_fix_features(struct net_device
*netdev
,
737 netdev_features_t features
)
739 if (netdev
->mtu
> ALX_MAX_TSO_PKT_SIZE
)
740 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
745 static void alx_netif_stop(struct alx_priv
*alx
)
747 alx
->dev
->trans_start
= jiffies
;
748 if (netif_carrier_ok(alx
->dev
)) {
749 netif_carrier_off(alx
->dev
);
750 netif_tx_disable(alx
->dev
);
751 napi_disable(&alx
->napi
);
755 static void alx_halt(struct alx_priv
*alx
)
757 struct alx_hw
*hw
= &alx
->hw
;
760 hw
->link_speed
= SPEED_UNKNOWN
;
761 hw
->duplex
= DUPLEX_UNKNOWN
;
766 alx_enable_aspm(hw
, false, false);
767 alx_irq_disable(alx
);
768 alx_free_buffers(alx
);
771 static void alx_configure(struct alx_priv
*alx
)
773 struct alx_hw
*hw
= &alx
->hw
;
775 alx_configure_basic(hw
);
777 __alx_set_rx_mode(alx
->dev
);
779 alx_write_mem32(hw
, ALX_MAC_CTRL
, hw
->rx_ctrl
);
782 static void alx_activate(struct alx_priv
*alx
)
784 /* hardware setting lost, restore it */
785 alx_reinit_rings(alx
);
788 /* clear old interrupts */
789 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
793 alx_schedule_link_check(alx
);
796 static void alx_reinit(struct alx_priv
*alx
)
804 static int alx_change_mtu(struct net_device
*netdev
, int mtu
)
806 struct alx_priv
*alx
= netdev_priv(netdev
);
807 int max_frame
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
809 if ((max_frame
< ALX_MIN_FRAME_SIZE
) ||
810 (max_frame
> ALX_MAX_FRAME_SIZE
))
813 if (netdev
->mtu
== mtu
)
818 alx
->rxbuf_size
= mtu
> ALX_DEF_RXBUF_SIZE
?
819 ALIGN(max_frame
, 8) : ALX_DEF_RXBUF_SIZE
;
820 netdev_update_features(netdev
);
821 if (netif_running(netdev
))
826 static void alx_netif_start(struct alx_priv
*alx
)
828 netif_tx_wake_all_queues(alx
->dev
);
829 napi_enable(&alx
->napi
);
830 netif_carrier_on(alx
->dev
);
833 static int __alx_open(struct alx_priv
*alx
, bool resume
)
838 netif_carrier_off(alx
->dev
);
840 err
= alx_alloc_rings(alx
);
846 err
= alx_request_irq(alx
);
850 /* clear old interrupts */
851 alx_write_mem32(&alx
->hw
, ALX_ISR
, ~(u32
)ALX_ISR_DIS
);
856 netif_tx_start_all_queues(alx
->dev
);
858 alx_schedule_link_check(alx
);
866 static void __alx_stop(struct alx_priv
*alx
)
873 static const char *alx_speed_desc(struct alx_hw
*hw
)
875 switch (alx_speed_to_ethadv(hw
->link_speed
, hw
->duplex
)) {
876 case ADVERTISED_1000baseT_Full
:
877 return "1 Gbps Full";
878 case ADVERTISED_100baseT_Full
:
879 return "100 Mbps Full";
880 case ADVERTISED_100baseT_Half
:
881 return "100 Mbps Half";
882 case ADVERTISED_10baseT_Full
:
883 return "10 Mbps Full";
884 case ADVERTISED_10baseT_Half
:
885 return "10 Mbps Half";
887 return "Unknown speed";
891 static void alx_check_link(struct alx_priv
*alx
)
893 struct alx_hw
*hw
= &alx
->hw
;
899 /* clear PHY internal interrupt status, otherwise the main
900 * interrupt status will be asserted forever
902 alx_clear_phy_intr(hw
);
904 old_speed
= hw
->link_speed
;
905 old_duplex
= hw
->duplex
;
906 err
= alx_read_phy_link(hw
);
910 spin_lock_irqsave(&alx
->irq_lock
, flags
);
911 alx
->int_mask
|= ALX_ISR_PHY
;
912 alx_write_mem32(hw
, ALX_IMR
, alx
->int_mask
);
913 spin_unlock_irqrestore(&alx
->irq_lock
, flags
);
915 if (old_speed
== hw
->link_speed
)
918 if (hw
->link_speed
!= SPEED_UNKNOWN
) {
919 netif_info(alx
, link
, alx
->dev
,
920 "NIC Up: %s\n", alx_speed_desc(hw
));
921 alx_post_phy_link(hw
);
922 alx_enable_aspm(hw
, true, true);
925 if (old_speed
== SPEED_UNKNOWN
)
926 alx_netif_start(alx
);
928 /* link is now down */
930 netif_info(alx
, link
, alx
->dev
, "Link Down\n");
931 err
= alx_reset_mac(hw
);
934 alx_irq_disable(alx
);
936 /* MAC reset causes all HW settings to be lost, restore all */
937 err
= alx_reinit_rings(alx
);
941 alx_enable_aspm(hw
, false, true);
942 alx_post_phy_link(hw
);
949 alx_schedule_reset(alx
);
952 static int alx_open(struct net_device
*netdev
)
954 return __alx_open(netdev_priv(netdev
), false);
957 static int alx_stop(struct net_device
*netdev
)
959 __alx_stop(netdev_priv(netdev
));
963 static void alx_link_check(struct work_struct
*work
)
965 struct alx_priv
*alx
;
967 alx
= container_of(work
, struct alx_priv
, link_check_wk
);
974 static void alx_reset(struct work_struct
*work
)
976 struct alx_priv
*alx
= container_of(work
, struct alx_priv
, reset_wk
);
983 static int alx_tx_csum(struct sk_buff
*skb
, struct alx_txd
*first
)
987 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
990 cso
= skb_checksum_start_offset(skb
);
994 css
= cso
+ skb
->csum_offset
;
995 first
->word1
|= cpu_to_le32((cso
>> 1) << TPD_CXSUMSTART_SHIFT
);
996 first
->word1
|= cpu_to_le32((css
>> 1) << TPD_CXSUMOFFSET_SHIFT
);
997 first
->word1
|= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT
);
1002 static int alx_map_tx_skb(struct alx_priv
*alx
, struct sk_buff
*skb
)
1004 struct alx_tx_queue
*txq
= &alx
->txq
;
1005 struct alx_txd
*tpd
, *first_tpd
;
1007 int maplen
, f
, first_idx
= txq
->write_idx
;
1009 first_tpd
= &txq
->tpd
[txq
->write_idx
];
1012 maplen
= skb_headlen(skb
);
1013 dma
= dma_map_single(&alx
->hw
.pdev
->dev
, skb
->data
, maplen
,
1015 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
))
1018 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1019 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1021 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1022 tpd
->len
= cpu_to_le16(maplen
);
1024 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
1025 struct skb_frag_struct
*frag
;
1027 frag
= &skb_shinfo(skb
)->frags
[f
];
1029 if (++txq
->write_idx
== alx
->tx_ringsz
)
1031 tpd
= &txq
->tpd
[txq
->write_idx
];
1033 tpd
->word1
= first_tpd
->word1
;
1035 maplen
= skb_frag_size(frag
);
1036 dma
= skb_frag_dma_map(&alx
->hw
.pdev
->dev
, frag
, 0,
1037 maplen
, DMA_TO_DEVICE
);
1038 if (dma_mapping_error(&alx
->hw
.pdev
->dev
, dma
))
1040 dma_unmap_len_set(&txq
->bufs
[txq
->write_idx
], size
, maplen
);
1041 dma_unmap_addr_set(&txq
->bufs
[txq
->write_idx
], dma
, dma
);
1043 tpd
->adrl
.addr
= cpu_to_le64(dma
);
1044 tpd
->len
= cpu_to_le16(maplen
);
1047 /* last TPD, set EOP flag and store skb */
1048 tpd
->word1
|= cpu_to_le32(1 << TPD_EOP_SHIFT
);
1049 txq
->bufs
[txq
->write_idx
].skb
= skb
;
1051 if (++txq
->write_idx
== alx
->tx_ringsz
)
1058 while (f
!= txq
->write_idx
) {
1059 alx_free_txbuf(alx
, f
);
1060 if (++f
== alx
->tx_ringsz
)
1066 static netdev_tx_t
alx_start_xmit(struct sk_buff
*skb
,
1067 struct net_device
*netdev
)
1069 struct alx_priv
*alx
= netdev_priv(netdev
);
1070 struct alx_tx_queue
*txq
= &alx
->txq
;
1071 struct alx_txd
*first
;
1072 int tpdreq
= skb_shinfo(skb
)->nr_frags
+ 1;
1074 if (alx_tpd_avail(alx
) < tpdreq
) {
1075 netif_stop_queue(alx
->dev
);
1079 first
= &txq
->tpd
[txq
->write_idx
];
1080 memset(first
, 0, sizeof(*first
));
1082 if (alx_tx_csum(skb
, first
))
1085 if (alx_map_tx_skb(alx
, skb
) < 0)
1088 netdev_sent_queue(alx
->dev
, skb
->len
);
1090 /* flush updates before updating hardware */
1092 alx_write_mem16(&alx
->hw
, ALX_TPD_PRI0_PIDX
, txq
->write_idx
);
1094 if (alx_tpd_avail(alx
) < alx
->tx_ringsz
/8)
1095 netif_stop_queue(alx
->dev
);
1097 return NETDEV_TX_OK
;
1101 return NETDEV_TX_OK
;
1104 static void alx_tx_timeout(struct net_device
*dev
)
1106 struct alx_priv
*alx
= netdev_priv(dev
);
1108 alx_schedule_reset(alx
);
1111 static int alx_mdio_read(struct net_device
*netdev
,
1112 int prtad
, int devad
, u16 addr
)
1114 struct alx_priv
*alx
= netdev_priv(netdev
);
1115 struct alx_hw
*hw
= &alx
->hw
;
1119 if (prtad
!= hw
->mdio
.prtad
)
1122 if (devad
== MDIO_DEVAD_NONE
)
1123 err
= alx_read_phy_reg(hw
, addr
, &val
);
1125 err
= alx_read_phy_ext(hw
, devad
, addr
, &val
);
1132 static int alx_mdio_write(struct net_device
*netdev
,
1133 int prtad
, int devad
, u16 addr
, u16 val
)
1135 struct alx_priv
*alx
= netdev_priv(netdev
);
1136 struct alx_hw
*hw
= &alx
->hw
;
1138 if (prtad
!= hw
->mdio
.prtad
)
1141 if (devad
== MDIO_DEVAD_NONE
)
1142 return alx_write_phy_reg(hw
, addr
, val
);
1144 return alx_write_phy_ext(hw
, devad
, addr
, val
);
1147 static int alx_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1149 struct alx_priv
*alx
= netdev_priv(netdev
);
1151 if (!netif_running(netdev
))
1154 return mdio_mii_ioctl(&alx
->hw
.mdio
, if_mii(ifr
), cmd
);
1157 #ifdef CONFIG_NET_POLL_CONTROLLER
1158 static void alx_poll_controller(struct net_device
*netdev
)
1160 struct alx_priv
*alx
= netdev_priv(netdev
);
1163 alx_intr_msi(0, alx
);
1165 alx_intr_legacy(0, alx
);
1169 static struct rtnl_link_stats64
*alx_get_stats64(struct net_device
*dev
,
1170 struct rtnl_link_stats64
*net_stats
)
1172 struct alx_priv
*alx
= netdev_priv(dev
);
1173 struct alx_hw_stats
*hw_stats
= &alx
->hw
.stats
;
1175 spin_lock(&alx
->stats_lock
);
1177 alx_update_hw_stats(&alx
->hw
);
1179 net_stats
->tx_bytes
= hw_stats
->tx_byte_cnt
;
1180 net_stats
->rx_bytes
= hw_stats
->rx_byte_cnt
;
1181 net_stats
->multicast
= hw_stats
->rx_mcast
;
1182 net_stats
->collisions
= hw_stats
->tx_single_col
+
1183 hw_stats
->tx_multi_col
+
1184 hw_stats
->tx_late_col
+
1185 hw_stats
->tx_abort_col
;
1187 net_stats
->rx_errors
= hw_stats
->rx_frag
+
1188 hw_stats
->rx_fcs_err
+
1189 hw_stats
->rx_len_err
+
1190 hw_stats
->rx_ov_sz
+
1191 hw_stats
->rx_ov_rrd
+
1192 hw_stats
->rx_align_err
+
1193 hw_stats
->rx_ov_rxf
;
1195 net_stats
->rx_fifo_errors
= hw_stats
->rx_ov_rxf
;
1196 net_stats
->rx_length_errors
= hw_stats
->rx_len_err
;
1197 net_stats
->rx_crc_errors
= hw_stats
->rx_fcs_err
;
1198 net_stats
->rx_frame_errors
= hw_stats
->rx_align_err
;
1199 net_stats
->rx_dropped
= hw_stats
->rx_ov_rrd
;
1201 net_stats
->tx_errors
= hw_stats
->tx_late_col
+
1202 hw_stats
->tx_abort_col
+
1203 hw_stats
->tx_underrun
+
1206 net_stats
->tx_aborted_errors
= hw_stats
->tx_abort_col
;
1207 net_stats
->tx_fifo_errors
= hw_stats
->tx_underrun
;
1208 net_stats
->tx_window_errors
= hw_stats
->tx_late_col
;
1210 net_stats
->tx_packets
= hw_stats
->tx_ok
+ net_stats
->tx_errors
;
1211 net_stats
->rx_packets
= hw_stats
->rx_ok
+ net_stats
->rx_errors
;
1213 spin_unlock(&alx
->stats_lock
);
1218 static const struct net_device_ops alx_netdev_ops
= {
1219 .ndo_open
= alx_open
,
1220 .ndo_stop
= alx_stop
,
1221 .ndo_start_xmit
= alx_start_xmit
,
1222 .ndo_get_stats64
= alx_get_stats64
,
1223 .ndo_set_rx_mode
= alx_set_rx_mode
,
1224 .ndo_validate_addr
= eth_validate_addr
,
1225 .ndo_set_mac_address
= alx_set_mac_address
,
1226 .ndo_change_mtu
= alx_change_mtu
,
1227 .ndo_do_ioctl
= alx_ioctl
,
1228 .ndo_tx_timeout
= alx_tx_timeout
,
1229 .ndo_fix_features
= alx_fix_features
,
1230 #ifdef CONFIG_NET_POLL_CONTROLLER
1231 .ndo_poll_controller
= alx_poll_controller
,
1235 static int alx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1237 struct net_device
*netdev
;
1238 struct alx_priv
*alx
;
1240 bool phy_configured
;
1243 err
= pci_enable_device_mem(pdev
);
1247 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1248 * shared register for the high 32 bits, so only a single, aligned,
1249 * 4 GB physical address range can be used for descriptors.
1251 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
1252 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
1253 dev_dbg(&pdev
->dev
, "DMA to 64-BIT addresses\n");
1255 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1257 err
= dma_set_coherent_mask(&pdev
->dev
,
1261 "No usable DMA config, aborting\n");
1262 goto out_pci_disable
;
1267 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
1268 err
= pci_request_selected_regions(pdev
, bars
, alx_drv_name
);
1271 "pci_request_selected_regions failed(bars:%d)\n", bars
);
1272 goto out_pci_disable
;
1275 pci_enable_pcie_error_reporting(pdev
);
1276 pci_set_master(pdev
);
1278 if (!pdev
->pm_cap
) {
1280 "Can't find power management capability, aborting\n");
1282 goto out_pci_release
;
1285 netdev
= alloc_etherdev(sizeof(*alx
));
1288 goto out_pci_release
;
1291 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1292 alx
= netdev_priv(netdev
);
1293 spin_lock_init(&alx
->hw
.mdio_lock
);
1294 spin_lock_init(&alx
->irq_lock
);
1295 spin_lock_init(&alx
->stats_lock
);
1297 alx
->hw
.pdev
= pdev
;
1298 alx
->msg_enable
= NETIF_MSG_LINK
| NETIF_MSG_HW
| NETIF_MSG_IFUP
|
1299 NETIF_MSG_TX_ERR
| NETIF_MSG_RX_ERR
| NETIF_MSG_WOL
;
1301 pci_set_drvdata(pdev
, alx
);
1303 hw
->hw_addr
= pci_ioremap_bar(pdev
, 0);
1305 dev_err(&pdev
->dev
, "cannot map device registers\n");
1307 goto out_free_netdev
;
1310 netdev
->netdev_ops
= &alx_netdev_ops
;
1311 SET_ETHTOOL_OPS(netdev
, &alx_ethtool_ops
);
1312 netdev
->irq
= pdev
->irq
;
1313 netdev
->watchdog_timeo
= ALX_WATCHDOG_TIME
;
1315 if (ent
->driver_data
& ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
)
1316 pdev
->dev_flags
|= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG
;
1318 err
= alx_init_sw(alx
);
1320 dev_err(&pdev
->dev
, "net device private data init failed\n");
1326 phy_configured
= alx_phy_configured(hw
);
1328 if (!phy_configured
)
1331 err
= alx_reset_mac(hw
);
1333 dev_err(&pdev
->dev
, "MAC Reset failed, error = %d\n", err
);
1337 /* setup link to put it in a known good starting state */
1338 if (!phy_configured
) {
1339 err
= alx_setup_speed_duplex(hw
, hw
->adv_cfg
, hw
->flowctrl
);
1342 "failed to configure PHY speed/duplex (err=%d)\n",
1348 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1350 if (alx_get_perm_macaddr(hw
, hw
->perm_addr
)) {
1351 dev_warn(&pdev
->dev
,
1352 "Invalid permanent address programmed, using random one\n");
1353 eth_hw_addr_random(netdev
);
1354 memcpy(hw
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
1357 memcpy(hw
->mac_addr
, hw
->perm_addr
, ETH_ALEN
);
1358 memcpy(netdev
->dev_addr
, hw
->mac_addr
, ETH_ALEN
);
1359 memcpy(netdev
->perm_addr
, hw
->perm_addr
, ETH_ALEN
);
1363 hw
->mdio
.dev
= netdev
;
1364 hw
->mdio
.mode_support
= MDIO_SUPPORTS_C45
|
1367 hw
->mdio
.mdio_read
= alx_mdio_read
;
1368 hw
->mdio
.mdio_write
= alx_mdio_write
;
1370 if (!alx_get_phy_info(hw
)) {
1371 dev_err(&pdev
->dev
, "failed to identify PHY\n");
1376 INIT_WORK(&alx
->link_check_wk
, alx_link_check
);
1377 INIT_WORK(&alx
->reset_wk
, alx_reset
);
1378 netif_carrier_off(netdev
);
1380 err
= register_netdev(netdev
);
1382 dev_err(&pdev
->dev
, "register netdevice failed\n");
1387 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1393 iounmap(hw
->hw_addr
);
1395 free_netdev(netdev
);
1397 pci_release_selected_regions(pdev
, bars
);
1399 pci_disable_device(pdev
);
1403 static void alx_remove(struct pci_dev
*pdev
)
1405 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1406 struct alx_hw
*hw
= &alx
->hw
;
1408 cancel_work_sync(&alx
->link_check_wk
);
1409 cancel_work_sync(&alx
->reset_wk
);
1411 /* restore permanent mac address */
1412 alx_set_macaddr(hw
, hw
->perm_addr
);
1414 unregister_netdev(alx
->dev
);
1415 iounmap(hw
->hw_addr
);
1416 pci_release_selected_regions(pdev
,
1417 pci_select_bars(pdev
, IORESOURCE_MEM
));
1419 pci_disable_pcie_error_reporting(pdev
);
1420 pci_disable_device(pdev
);
1422 free_netdev(alx
->dev
);
1425 #ifdef CONFIG_PM_SLEEP
1426 static int alx_suspend(struct device
*dev
)
1428 struct pci_dev
*pdev
= to_pci_dev(dev
);
1429 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1431 if (!netif_running(alx
->dev
))
1433 netif_device_detach(alx
->dev
);
1438 static int alx_resume(struct device
*dev
)
1440 struct pci_dev
*pdev
= to_pci_dev(dev
);
1441 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1442 struct alx_hw
*hw
= &alx
->hw
;
1446 if (!netif_running(alx
->dev
))
1448 netif_device_attach(alx
->dev
);
1449 return __alx_open(alx
, true);
1452 static SIMPLE_DEV_PM_OPS(alx_pm_ops
, alx_suspend
, alx_resume
);
1453 #define ALX_PM_OPS (&alx_pm_ops)
1455 #define ALX_PM_OPS NULL
1459 static pci_ers_result_t
alx_pci_error_detected(struct pci_dev
*pdev
,
1460 pci_channel_state_t state
)
1462 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1463 struct net_device
*netdev
= alx
->dev
;
1464 pci_ers_result_t rc
= PCI_ERS_RESULT_NEED_RESET
;
1466 dev_info(&pdev
->dev
, "pci error detected\n");
1470 if (netif_running(netdev
)) {
1471 netif_device_detach(netdev
);
1475 if (state
== pci_channel_io_perm_failure
)
1476 rc
= PCI_ERS_RESULT_DISCONNECT
;
1478 pci_disable_device(pdev
);
1485 static pci_ers_result_t
alx_pci_error_slot_reset(struct pci_dev
*pdev
)
1487 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1488 struct alx_hw
*hw
= &alx
->hw
;
1489 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
1491 dev_info(&pdev
->dev
, "pci error slot reset\n");
1495 if (pci_enable_device(pdev
)) {
1496 dev_err(&pdev
->dev
, "Failed to re-enable PCI device after reset\n");
1500 pci_set_master(pdev
);
1503 if (!alx_reset_mac(hw
))
1504 rc
= PCI_ERS_RESULT_RECOVERED
;
1506 pci_cleanup_aer_uncorrect_error_status(pdev
);
1513 static void alx_pci_error_resume(struct pci_dev
*pdev
)
1515 struct alx_priv
*alx
= pci_get_drvdata(pdev
);
1516 struct net_device
*netdev
= alx
->dev
;
1518 dev_info(&pdev
->dev
, "pci error resume\n");
1522 if (netif_running(netdev
)) {
1524 netif_device_attach(netdev
);
1530 static const struct pci_error_handlers alx_err_handlers
= {
1531 .error_detected
= alx_pci_error_detected
,
1532 .slot_reset
= alx_pci_error_slot_reset
,
1533 .resume
= alx_pci_error_resume
,
1536 static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl
) = {
1537 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8161
),
1538 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1539 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_E2200
),
1540 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1541 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8162
),
1542 .driver_data
= ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG
},
1543 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8171
) },
1544 { PCI_VDEVICE(ATTANSIC
, ALX_DEV_ID_AR8172
) },
1548 static struct pci_driver alx_driver
= {
1549 .name
= alx_drv_name
,
1550 .id_table
= alx_pci_tbl
,
1552 .remove
= alx_remove
,
1553 .err_handler
= &alx_err_handlers
,
1554 .driver
.pm
= ALX_PM_OPS
,
1557 module_pci_driver(alx_driver
);
1558 MODULE_DEVICE_TABLE(pci
, alx_pci_tbl
);
1559 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1560 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1562 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1563 MODULE_LICENSE("GPL");