1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Gigabit Ethernet adapters based on the Session Layer
4 * Interface (SLIC) technology by Alacritech. The driver does not
5 * support the hardware acceleration features provided by these cards.
7 * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_ether.h>
16 #include <linux/crc32.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/firmware.h>
23 #include <linux/list.h>
24 #include <linux/u64_stats_sync.h>
28 #define DRV_NAME "slicoss"
30 static const struct pci_device_id slic_id_tbl
[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH
,
32 PCI_DEVICE_ID_ALACRITECH_MOJAVE
) },
33 { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH
,
34 PCI_DEVICE_ID_ALACRITECH_OASIS
) },
38 static const char slic_stats_strings
[][ETH_GSTRING_LEN
] = {
64 static inline int slic_next_queue_idx(unsigned int idx
, unsigned int qlen
)
66 return (idx
+ 1) & (qlen
- 1);
69 static inline int slic_get_free_queue_descs(unsigned int put_idx
,
70 unsigned int done_idx
,
73 if (put_idx
>= done_idx
)
74 return (qlen
- (put_idx
- done_idx
) - 1);
75 return (done_idx
- put_idx
- 1);
78 static unsigned int slic_next_compl_idx(struct slic_device
*sdev
)
80 struct slic_stat_queue
*stq
= &sdev
->stq
;
81 unsigned int active
= stq
->active_array
;
82 struct slic_stat_desc
*descs
;
83 struct slic_stat_desc
*stat
;
86 descs
= stq
->descs
[active
];
87 stat
= &descs
[stq
->done_idx
];
90 return SLIC_INVALID_STAT_DESC_IDX
;
92 idx
= (le32_to_cpu(stat
->hnd
) & 0xffff) - 1;
97 stq
->done_idx
= slic_next_queue_idx(stq
->done_idx
, stq
->len
);
98 /* check for wraparound */
100 dma_addr_t paddr
= stq
->paddr
[active
];
102 slic_write(sdev
, SLIC_REG_RBAR
, lower_32_bits(paddr
) |
104 /* make sure new status descriptors are immediately available */
105 slic_flush_write(sdev
);
107 active
&= (SLIC_NUM_STAT_DESC_ARRAYS
- 1);
108 stq
->active_array
= active
;
113 static unsigned int slic_get_free_tx_descs(struct slic_tx_queue
*txq
)
115 /* ensure tail idx is updated */
117 return slic_get_free_queue_descs(txq
->put_idx
, txq
->done_idx
, txq
->len
);
120 static unsigned int slic_get_free_rx_descs(struct slic_rx_queue
*rxq
)
122 return slic_get_free_queue_descs(rxq
->put_idx
, rxq
->done_idx
, rxq
->len
);
125 static void slic_clear_upr_list(struct slic_upr_list
*upr_list
)
127 struct slic_upr
*upr
;
128 struct slic_upr
*tmp
;
130 spin_lock_bh(&upr_list
->lock
);
131 list_for_each_entry_safe(upr
, tmp
, &upr_list
->list
, list
) {
132 list_del(&upr
->list
);
135 upr_list
->pending
= false;
136 spin_unlock_bh(&upr_list
->lock
);
139 static void slic_start_upr(struct slic_device
*sdev
, struct slic_upr
*upr
)
143 reg
= (upr
->type
== SLIC_UPR_CONFIG
) ? SLIC_REG_RCONFIG
:
145 slic_write(sdev
, reg
, lower_32_bits(upr
->paddr
));
146 slic_flush_write(sdev
);
149 static void slic_queue_upr(struct slic_device
*sdev
, struct slic_upr
*upr
)
151 struct slic_upr_list
*upr_list
= &sdev
->upr_list
;
154 spin_lock_bh(&upr_list
->lock
);
155 pending
= upr_list
->pending
;
156 INIT_LIST_HEAD(&upr
->list
);
157 list_add_tail(&upr
->list
, &upr_list
->list
);
158 upr_list
->pending
= true;
159 spin_unlock_bh(&upr_list
->lock
);
162 slic_start_upr(sdev
, upr
);
165 static struct slic_upr
*slic_dequeue_upr(struct slic_device
*sdev
)
167 struct slic_upr_list
*upr_list
= &sdev
->upr_list
;
168 struct slic_upr
*next_upr
= NULL
;
169 struct slic_upr
*upr
= NULL
;
171 spin_lock_bh(&upr_list
->lock
);
172 if (!list_empty(&upr_list
->list
)) {
173 upr
= list_first_entry(&upr_list
->list
, struct slic_upr
, list
);
174 list_del(&upr
->list
);
176 if (list_empty(&upr_list
->list
))
177 upr_list
->pending
= false;
179 next_upr
= list_first_entry(&upr_list
->list
,
180 struct slic_upr
, list
);
182 spin_unlock_bh(&upr_list
->lock
);
183 /* trigger processing of the next upr in list */
185 slic_start_upr(sdev
, next_upr
);
190 static int slic_new_upr(struct slic_device
*sdev
, unsigned int type
,
193 struct slic_upr
*upr
;
195 upr
= kmalloc(sizeof(*upr
), GFP_ATOMIC
);
201 slic_queue_upr(sdev
, upr
);
206 static void slic_set_mcast_bit(u64
*mcmask
, unsigned char const *addr
)
210 /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb),
211 * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically
214 crc
= ether_crc(ETH_ALEN
, addr
) >> 23;
215 /* we only have space on the SLIC for 64 entries */
217 mask
|= (u64
)1 << crc
;
221 /* must be called with link_lock held */
222 static void slic_configure_rcv(struct slic_device
*sdev
)
226 val
= SLIC_GRCR_RESET
| SLIC_GRCR_ADDRAEN
| SLIC_GRCR_RCVEN
|
227 SLIC_GRCR_HASHSIZE
<< SLIC_GRCR_HASHSIZE_SHIFT
| SLIC_GRCR_RCVBAD
;
229 if (sdev
->duplex
== DUPLEX_FULL
)
230 val
|= SLIC_GRCR_CTLEN
;
233 val
|= SLIC_GRCR_RCVALL
;
235 slic_write(sdev
, SLIC_REG_WRCFG
, val
);
238 /* must be called with link_lock held */
239 static void slic_configure_xmt(struct slic_device
*sdev
)
243 val
= SLIC_GXCR_RESET
| SLIC_GXCR_XMTEN
;
245 if (sdev
->duplex
== DUPLEX_FULL
)
246 val
|= SLIC_GXCR_PAUSEEN
;
248 slic_write(sdev
, SLIC_REG_WXCFG
, val
);
251 /* must be called with link_lock held */
252 static void slic_configure_mac(struct slic_device
*sdev
)
256 if (sdev
->speed
== SPEED_1000
) {
257 val
= SLIC_GMCR_GAPBB_1000
<< SLIC_GMCR_GAPBB_SHIFT
|
258 SLIC_GMCR_GAPR1_1000
<< SLIC_GMCR_GAPR1_SHIFT
|
259 SLIC_GMCR_GAPR2_1000
<< SLIC_GMCR_GAPR2_SHIFT
|
260 SLIC_GMCR_GBIT
; /* enable GMII */
262 val
= SLIC_GMCR_GAPBB_100
<< SLIC_GMCR_GAPBB_SHIFT
|
263 SLIC_GMCR_GAPR1_100
<< SLIC_GMCR_GAPR1_SHIFT
|
264 SLIC_GMCR_GAPR2_100
<< SLIC_GMCR_GAPR2_SHIFT
;
267 if (sdev
->duplex
== DUPLEX_FULL
)
268 val
|= SLIC_GMCR_FULLD
;
270 slic_write(sdev
, SLIC_REG_WMCFG
, val
);
273 static void slic_configure_link_locked(struct slic_device
*sdev
, int speed
,
276 struct net_device
*dev
= sdev
->netdev
;
278 if (sdev
->speed
== speed
&& sdev
->duplex
== duplex
)
282 sdev
->duplex
= duplex
;
284 if (sdev
->speed
== SPEED_UNKNOWN
) {
285 if (netif_carrier_ok(dev
))
286 netif_carrier_off(dev
);
288 /* (re)configure link settings */
289 slic_configure_mac(sdev
);
290 slic_configure_xmt(sdev
);
291 slic_configure_rcv(sdev
);
292 slic_flush_write(sdev
);
294 if (!netif_carrier_ok(dev
))
295 netif_carrier_on(dev
);
299 static void slic_configure_link(struct slic_device
*sdev
, int speed
,
302 spin_lock_bh(&sdev
->link_lock
);
303 slic_configure_link_locked(sdev
, speed
, duplex
);
304 spin_unlock_bh(&sdev
->link_lock
);
307 static void slic_set_rx_mode(struct net_device
*dev
)
309 struct slic_device
*sdev
= netdev_priv(dev
);
310 struct netdev_hw_addr
*hwaddr
;
314 if (dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
315 /* Turn on all multicast addresses. We have to do this for
316 * promiscuous mode as well as ALLMCAST mode (it saves the
317 * microcode from having to keep state about the MAC
324 netdev_for_each_mc_addr(hwaddr
, dev
) {
325 slic_set_mcast_bit(&mcmask
, hwaddr
->addr
);
329 slic_write(sdev
, SLIC_REG_MCASTLOW
, lower_32_bits(mcmask
));
330 slic_write(sdev
, SLIC_REG_MCASTHIGH
, upper_32_bits(mcmask
));
332 set_promisc
= !!(dev
->flags
& IFF_PROMISC
);
334 spin_lock_bh(&sdev
->link_lock
);
335 if (sdev
->promisc
!= set_promisc
) {
336 sdev
->promisc
= set_promisc
;
337 slic_configure_rcv(sdev
);
339 spin_unlock_bh(&sdev
->link_lock
);
342 static void slic_xmit_complete(struct slic_device
*sdev
)
344 struct slic_tx_queue
*txq
= &sdev
->txq
;
345 struct net_device
*dev
= sdev
->netdev
;
346 struct slic_tx_buffer
*buff
;
347 unsigned int frames
= 0;
348 unsigned int bytes
= 0;
351 /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
352 * completions during processing keeps the loop running endlessly.
355 idx
= slic_next_compl_idx(sdev
);
356 if (idx
== SLIC_INVALID_STAT_DESC_IDX
)
360 buff
= &txq
->txbuffs
[idx
];
362 if (unlikely(!buff
->skb
)) {
364 "no skb found for desc idx %i\n", idx
);
367 dma_unmap_single(&sdev
->pdev
->dev
,
368 dma_unmap_addr(buff
, map_addr
),
369 dma_unmap_len(buff
, map_len
), DMA_TO_DEVICE
);
371 bytes
+= buff
->skb
->len
;
374 dev_kfree_skb_any(buff
->skb
);
376 } while (frames
< SLIC_MAX_TX_COMPLETIONS
);
377 /* make sure xmit sees the new value for done_idx */
380 u64_stats_update_begin(&sdev
->stats
.syncp
);
381 sdev
->stats
.tx_bytes
+= bytes
;
382 sdev
->stats
.tx_packets
+= frames
;
383 u64_stats_update_end(&sdev
->stats
.syncp
);
386 if (netif_queue_stopped(dev
) &&
387 (slic_get_free_tx_descs(txq
) >= SLIC_MIN_TX_WAKEUP_DESCS
))
388 netif_wake_queue(dev
);
389 netif_tx_unlock(dev
);
392 static void slic_refill_rx_queue(struct slic_device
*sdev
, gfp_t gfp
)
394 const unsigned int ALIGN_MASK
= SLIC_RX_BUFF_ALIGN
- 1;
395 unsigned int maplen
= SLIC_RX_BUFF_SIZE
;
396 struct slic_rx_queue
*rxq
= &sdev
->rxq
;
397 struct net_device
*dev
= sdev
->netdev
;
398 struct slic_rx_buffer
*buff
;
399 struct slic_rx_desc
*desc
;
400 unsigned int misalign
;
405 while (slic_get_free_rx_descs(rxq
) > SLIC_MAX_REQ_RX_DESCS
) {
406 skb
= alloc_skb(maplen
+ ALIGN_MASK
, gfp
);
410 paddr
= dma_map_single(&sdev
->pdev
->dev
, skb
->data
, maplen
,
412 if (dma_mapping_error(&sdev
->pdev
->dev
, paddr
)) {
413 netdev_err(dev
, "mapping rx packet failed\n");
415 dev_kfree_skb_any(skb
);
418 /* ensure head buffer descriptors are 256 byte aligned */
420 misalign
= paddr
& ALIGN_MASK
;
422 offset
= SLIC_RX_BUFF_ALIGN
- misalign
;
423 skb_reserve(skb
, offset
);
425 /* the HW expects dma chunks for descriptor + frame data */
426 desc
= (struct slic_rx_desc
*)skb
->data
;
427 /* temporarily sync descriptor for CPU to clear status */
428 dma_sync_single_for_cpu(&sdev
->pdev
->dev
, paddr
,
429 offset
+ sizeof(*desc
),
432 /* return it to HW again */
433 dma_sync_single_for_device(&sdev
->pdev
->dev
, paddr
,
434 offset
+ sizeof(*desc
),
437 buff
= &rxq
->rxbuffs
[rxq
->put_idx
];
439 dma_unmap_addr_set(buff
, map_addr
, paddr
);
440 dma_unmap_len_set(buff
, map_len
, maplen
);
441 buff
->addr_offset
= offset
;
442 /* complete write to descriptor before it is handed to HW */
444 /* head buffer descriptors are placed immediately before skb */
445 slic_write(sdev
, SLIC_REG_HBAR
, lower_32_bits(paddr
) + offset
);
446 rxq
->put_idx
= slic_next_queue_idx(rxq
->put_idx
, rxq
->len
);
450 static void slic_handle_frame_error(struct slic_device
*sdev
,
453 struct slic_stats
*stats
= &sdev
->stats
;
455 if (sdev
->model
== SLIC_MODEL_OASIS
) {
456 struct slic_rx_info_oasis
*info
;
460 info
= (struct slic_rx_info_oasis
*)skb
->data
;
461 status
= le32_to_cpu(info
->frame_status
);
462 status_b
= le32_to_cpu(info
->frame_status_b
);
463 /* transport layer */
464 if (status_b
& SLIC_VRHSTATB_TPCSUM
)
465 SLIC_INC_STATS_COUNTER(stats
, rx_tpcsum
);
466 if (status
& SLIC_VRHSTAT_TPOFLO
)
467 SLIC_INC_STATS_COUNTER(stats
, rx_tpoflow
);
468 if (status_b
& SLIC_VRHSTATB_TPHLEN
)
469 SLIC_INC_STATS_COUNTER(stats
, rx_tphlen
);
471 if (status_b
& SLIC_VRHSTATB_IPCSUM
)
472 SLIC_INC_STATS_COUNTER(stats
, rx_ipcsum
);
473 if (status_b
& SLIC_VRHSTATB_IPLERR
)
474 SLIC_INC_STATS_COUNTER(stats
, rx_iplen
);
475 if (status_b
& SLIC_VRHSTATB_IPHERR
)
476 SLIC_INC_STATS_COUNTER(stats
, rx_iphlen
);
478 if (status_b
& SLIC_VRHSTATB_RCVE
)
479 SLIC_INC_STATS_COUNTER(stats
, rx_early
);
480 if (status_b
& SLIC_VRHSTATB_BUFF
)
481 SLIC_INC_STATS_COUNTER(stats
, rx_buffoflow
);
482 if (status_b
& SLIC_VRHSTATB_CODE
)
483 SLIC_INC_STATS_COUNTER(stats
, rx_lcode
);
484 if (status_b
& SLIC_VRHSTATB_DRBL
)
485 SLIC_INC_STATS_COUNTER(stats
, rx_drbl
);
486 if (status_b
& SLIC_VRHSTATB_CRC
)
487 SLIC_INC_STATS_COUNTER(stats
, rx_crc
);
488 if (status
& SLIC_VRHSTAT_802OE
)
489 SLIC_INC_STATS_COUNTER(stats
, rx_oflow802
);
490 if (status_b
& SLIC_VRHSTATB_802UE
)
491 SLIC_INC_STATS_COUNTER(stats
, rx_uflow802
);
492 if (status_b
& SLIC_VRHSTATB_CARRE
)
493 SLIC_INC_STATS_COUNTER(stats
, tx_carrier
);
494 } else { /* mojave */
495 struct slic_rx_info_mojave
*info
;
498 info
= (struct slic_rx_info_mojave
*)skb
->data
;
499 status
= le32_to_cpu(info
->frame_status
);
500 /* transport layer */
501 if (status
& SLIC_VGBSTAT_XPERR
) {
502 u32 xerr
= status
>> SLIC_VGBSTAT_XERRSHFT
;
504 if (xerr
== SLIC_VGBSTAT_XCSERR
)
505 SLIC_INC_STATS_COUNTER(stats
, rx_tpcsum
);
506 if (xerr
== SLIC_VGBSTAT_XUFLOW
)
507 SLIC_INC_STATS_COUNTER(stats
, rx_tpoflow
);
508 if (xerr
== SLIC_VGBSTAT_XHLEN
)
509 SLIC_INC_STATS_COUNTER(stats
, rx_tphlen
);
512 if (status
& SLIC_VGBSTAT_NETERR
) {
513 u32 nerr
= status
>> SLIC_VGBSTAT_NERRSHFT
&
514 SLIC_VGBSTAT_NERRMSK
;
516 if (nerr
== SLIC_VGBSTAT_NCSERR
)
517 SLIC_INC_STATS_COUNTER(stats
, rx_ipcsum
);
518 if (nerr
== SLIC_VGBSTAT_NUFLOW
)
519 SLIC_INC_STATS_COUNTER(stats
, rx_iplen
);
520 if (nerr
== SLIC_VGBSTAT_NHLEN
)
521 SLIC_INC_STATS_COUNTER(stats
, rx_iphlen
);
524 if (status
& SLIC_VGBSTAT_LNKERR
) {
525 u32 lerr
= status
& SLIC_VGBSTAT_LERRMSK
;
527 if (lerr
== SLIC_VGBSTAT_LDEARLY
)
528 SLIC_INC_STATS_COUNTER(stats
, rx_early
);
529 if (lerr
== SLIC_VGBSTAT_LBOFLO
)
530 SLIC_INC_STATS_COUNTER(stats
, rx_buffoflow
);
531 if (lerr
== SLIC_VGBSTAT_LCODERR
)
532 SLIC_INC_STATS_COUNTER(stats
, rx_lcode
);
533 if (lerr
== SLIC_VGBSTAT_LDBLNBL
)
534 SLIC_INC_STATS_COUNTER(stats
, rx_drbl
);
535 if (lerr
== SLIC_VGBSTAT_LCRCERR
)
536 SLIC_INC_STATS_COUNTER(stats
, rx_crc
);
537 if (lerr
== SLIC_VGBSTAT_LOFLO
)
538 SLIC_INC_STATS_COUNTER(stats
, rx_oflow802
);
539 if (lerr
== SLIC_VGBSTAT_LUFLO
)
540 SLIC_INC_STATS_COUNTER(stats
, rx_uflow802
);
543 SLIC_INC_STATS_COUNTER(stats
, rx_errors
);
546 static void slic_handle_receive(struct slic_device
*sdev
, unsigned int todo
,
549 struct slic_rx_queue
*rxq
= &sdev
->rxq
;
550 struct net_device
*dev
= sdev
->netdev
;
551 struct slic_rx_buffer
*buff
;
552 struct slic_rx_desc
*desc
;
553 unsigned int frames
= 0;
554 unsigned int bytes
= 0;
559 while (todo
&& (rxq
->done_idx
!= rxq
->put_idx
)) {
560 buff
= &rxq
->rxbuffs
[rxq
->done_idx
];
566 desc
= (struct slic_rx_desc
*)skb
->data
;
568 dma_sync_single_for_cpu(&sdev
->pdev
->dev
,
569 dma_unmap_addr(buff
, map_addr
),
570 buff
->addr_offset
+ sizeof(*desc
),
573 status
= le32_to_cpu(desc
->status
);
574 if (!(status
& SLIC_IRHDDR_SVALID
)) {
575 dma_sync_single_for_device(&sdev
->pdev
->dev
,
586 dma_unmap_single(&sdev
->pdev
->dev
,
587 dma_unmap_addr(buff
, map_addr
),
588 dma_unmap_len(buff
, map_len
),
591 /* skip rx descriptor that is placed before the frame data */
592 skb_reserve(skb
, SLIC_RX_BUFF_HDR_SIZE
);
594 if (unlikely(status
& SLIC_IRHDDR_ERR
)) {
595 slic_handle_frame_error(sdev
, skb
);
596 dev_kfree_skb_any(skb
);
598 struct ethhdr
*eh
= (struct ethhdr
*)skb
->data
;
600 if (is_multicast_ether_addr(eh
->h_dest
))
601 SLIC_INC_STATS_COUNTER(&sdev
->stats
, rx_mcasts
);
603 len
= le32_to_cpu(desc
->length
) & SLIC_IRHDDR_FLEN_MSK
;
605 skb
->protocol
= eth_type_trans(skb
, dev
);
606 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
608 napi_gro_receive(&sdev
->napi
, skb
);
613 rxq
->done_idx
= slic_next_queue_idx(rxq
->done_idx
, rxq
->len
);
617 u64_stats_update_begin(&sdev
->stats
.syncp
);
618 sdev
->stats
.rx_bytes
+= bytes
;
619 sdev
->stats
.rx_packets
+= frames
;
620 u64_stats_update_end(&sdev
->stats
.syncp
);
622 slic_refill_rx_queue(sdev
, GFP_ATOMIC
);
625 static void slic_handle_link_irq(struct slic_device
*sdev
)
627 struct slic_shmem
*sm
= &sdev
->shmem
;
628 struct slic_shmem_data
*sm_data
= sm
->shmem_data
;
633 link
= le32_to_cpu(sm_data
->link
);
635 if (link
& SLIC_GIG_LINKUP
) {
636 if (link
& SLIC_GIG_SPEED_1000
)
638 else if (link
& SLIC_GIG_SPEED_100
)
643 duplex
= (link
& SLIC_GIG_FULLDUPLEX
) ? DUPLEX_FULL
:
646 duplex
= DUPLEX_UNKNOWN
;
647 speed
= SPEED_UNKNOWN
;
649 slic_configure_link(sdev
, speed
, duplex
);
652 static void slic_handle_upr_irq(struct slic_device
*sdev
, u32 irqs
)
654 struct slic_upr
*upr
;
656 /* remove upr that caused this irq (always the first entry in list) */
657 upr
= slic_dequeue_upr(sdev
);
659 netdev_warn(sdev
->netdev
, "no upr found on list\n");
663 if (upr
->type
== SLIC_UPR_LSTAT
) {
664 if (unlikely(irqs
& SLIC_ISR_UPCERR_MASK
)) {
666 slic_queue_upr(sdev
, upr
);
669 slic_handle_link_irq(sdev
);
674 static int slic_handle_link_change(struct slic_device
*sdev
)
676 return slic_new_upr(sdev
, SLIC_UPR_LSTAT
, sdev
->shmem
.link_paddr
);
679 static void slic_handle_err_irq(struct slic_device
*sdev
, u32 isr
)
681 struct slic_stats
*stats
= &sdev
->stats
;
683 if (isr
& SLIC_ISR_RMISS
)
684 SLIC_INC_STATS_COUNTER(stats
, rx_buff_miss
);
685 if (isr
& SLIC_ISR_XDROP
)
686 SLIC_INC_STATS_COUNTER(stats
, tx_dropped
);
687 if (!(isr
& (SLIC_ISR_RMISS
| SLIC_ISR_XDROP
)))
688 SLIC_INC_STATS_COUNTER(stats
, irq_errs
);
691 static void slic_handle_irq(struct slic_device
*sdev
, u32 isr
,
692 unsigned int todo
, unsigned int *done
)
694 if (isr
& SLIC_ISR_ERR
)
695 slic_handle_err_irq(sdev
, isr
);
697 if (isr
& SLIC_ISR_LEVENT
)
698 slic_handle_link_change(sdev
);
700 if (isr
& SLIC_ISR_UPC_MASK
)
701 slic_handle_upr_irq(sdev
, isr
);
703 if (isr
& SLIC_ISR_RCV
)
704 slic_handle_receive(sdev
, todo
, done
);
706 if (isr
& SLIC_ISR_CMD
)
707 slic_xmit_complete(sdev
);
710 static int slic_poll(struct napi_struct
*napi
, int todo
)
712 struct slic_device
*sdev
= container_of(napi
, struct slic_device
, napi
);
713 struct slic_shmem
*sm
= &sdev
->shmem
;
714 struct slic_shmem_data
*sm_data
= sm
->shmem_data
;
715 u32 isr
= le32_to_cpu(sm_data
->isr
);
718 slic_handle_irq(sdev
, isr
, todo
, &done
);
721 napi_complete_done(napi
, done
);
724 /* make sure sm_data->isr is cleard before irqs are reenabled */
726 slic_write(sdev
, SLIC_REG_ISR
, 0);
727 slic_flush_write(sdev
);
733 static irqreturn_t
slic_irq(int irq
, void *dev_id
)
735 struct slic_device
*sdev
= dev_id
;
736 struct slic_shmem
*sm
= &sdev
->shmem
;
737 struct slic_shmem_data
*sm_data
= sm
->shmem_data
;
739 slic_write(sdev
, SLIC_REG_ICR
, SLIC_ICR_INT_MASK
);
740 slic_flush_write(sdev
);
741 /* make sure sm_data->isr is read after ICR_INT_MASK is set */
746 /* spurious interrupt */
747 slic_write(sdev
, SLIC_REG_ISR
, 0);
748 slic_flush_write(sdev
);
752 napi_schedule_irqoff(&sdev
->napi
);
757 static void slic_card_reset(struct slic_device
*sdev
)
761 slic_write(sdev
, SLIC_REG_RESET
, SLIC_RESET_MAGIC
);
762 /* flush write by means of config space */
763 pci_read_config_word(sdev
->pdev
, PCI_COMMAND
, &cmd
);
767 static int slic_init_stat_queue(struct slic_device
*sdev
)
769 const unsigned int DESC_ALIGN_MASK
= SLIC_STATS_DESC_ALIGN
- 1;
770 struct slic_stat_queue
*stq
= &sdev
->stq
;
771 struct slic_stat_desc
*descs
;
772 unsigned int misalign
;
779 stq
->len
= SLIC_NUM_STAT_DESCS
;
780 stq
->active_array
= 0;
783 size
= stq
->len
* sizeof(*descs
) + DESC_ALIGN_MASK
;
785 for (i
= 0; i
< SLIC_NUM_STAT_DESC_ARRAYS
; i
++) {
786 descs
= dma_alloc_coherent(&sdev
->pdev
->dev
, size
, &paddr
,
789 netdev_err(sdev
->netdev
,
790 "failed to allocate status descriptors\n");
794 /* ensure correct alignment */
796 misalign
= paddr
& DESC_ALIGN_MASK
;
798 offset
= SLIC_STATS_DESC_ALIGN
- misalign
;
803 slic_write(sdev
, SLIC_REG_RBAR
, lower_32_bits(paddr
) |
805 stq
->descs
[i
] = descs
;
806 stq
->paddr
[i
] = paddr
;
807 stq
->addr_offset
[i
] = offset
;
810 stq
->mem_size
= size
;
816 dma_free_coherent(&sdev
->pdev
->dev
, stq
->mem_size
,
817 stq
->descs
[i
] - stq
->addr_offset
[i
],
818 stq
->paddr
[i
] - stq
->addr_offset
[i
]);
824 static void slic_free_stat_queue(struct slic_device
*sdev
)
826 struct slic_stat_queue
*stq
= &sdev
->stq
;
829 for (i
= 0; i
< SLIC_NUM_STAT_DESC_ARRAYS
; i
++) {
830 dma_free_coherent(&sdev
->pdev
->dev
, stq
->mem_size
,
831 stq
->descs
[i
] - stq
->addr_offset
[i
],
832 stq
->paddr
[i
] - stq
->addr_offset
[i
]);
836 static int slic_init_tx_queue(struct slic_device
*sdev
)
838 struct slic_tx_queue
*txq
= &sdev
->txq
;
839 struct slic_tx_buffer
*buff
;
840 struct slic_tx_desc
*desc
;
844 txq
->len
= SLIC_NUM_TX_DESCS
;
848 txq
->txbuffs
= kcalloc(txq
->len
, sizeof(*buff
), GFP_KERNEL
);
852 txq
->dma_pool
= dma_pool_create("slic_pool", &sdev
->pdev
->dev
,
853 sizeof(*desc
), SLIC_TX_DESC_ALIGN
,
855 if (!txq
->dma_pool
) {
857 netdev_err(sdev
->netdev
, "failed to create dma pool\n");
861 for (i
= 0; i
< txq
->len
; i
++) {
862 buff
= &txq
->txbuffs
[i
];
863 desc
= dma_pool_zalloc(txq
->dma_pool
, GFP_KERNEL
,
866 netdev_err(sdev
->netdev
,
867 "failed to alloc pool chunk (%i)\n", i
);
872 desc
->hnd
= cpu_to_le32((u32
)(i
+ 1));
873 desc
->cmd
= SLIC_CMD_XMT_REQ
;
875 desc
->type
= cpu_to_le32(SLIC_CMD_TYPE_DUMB
);
883 buff
= &txq
->txbuffs
[i
];
884 dma_pool_free(txq
->dma_pool
, buff
->desc
, buff
->desc_paddr
);
886 dma_pool_destroy(txq
->dma_pool
);
894 static void slic_free_tx_queue(struct slic_device
*sdev
)
896 struct slic_tx_queue
*txq
= &sdev
->txq
;
897 struct slic_tx_buffer
*buff
;
900 for (i
= 0; i
< txq
->len
; i
++) {
901 buff
= &txq
->txbuffs
[i
];
902 dma_pool_free(txq
->dma_pool
, buff
->desc
, buff
->desc_paddr
);
906 dma_unmap_single(&sdev
->pdev
->dev
,
907 dma_unmap_addr(buff
, map_addr
),
908 dma_unmap_len(buff
, map_len
), DMA_TO_DEVICE
);
909 consume_skb(buff
->skb
);
911 dma_pool_destroy(txq
->dma_pool
);
916 static int slic_init_rx_queue(struct slic_device
*sdev
)
918 struct slic_rx_queue
*rxq
= &sdev
->rxq
;
919 struct slic_rx_buffer
*buff
;
921 rxq
->len
= SLIC_NUM_RX_LES
;
925 buff
= kcalloc(rxq
->len
, sizeof(*buff
), GFP_KERNEL
);
930 slic_refill_rx_queue(sdev
, GFP_KERNEL
);
935 static void slic_free_rx_queue(struct slic_device
*sdev
)
937 struct slic_rx_queue
*rxq
= &sdev
->rxq
;
938 struct slic_rx_buffer
*buff
;
941 /* free rx buffers */
942 for (i
= 0; i
< rxq
->len
; i
++) {
943 buff
= &rxq
->rxbuffs
[i
];
948 dma_unmap_single(&sdev
->pdev
->dev
,
949 dma_unmap_addr(buff
, map_addr
),
950 dma_unmap_len(buff
, map_len
),
952 consume_skb(buff
->skb
);
957 static void slic_set_link_autoneg(struct slic_device
*sdev
)
959 unsigned int subid
= sdev
->pdev
->subsystem_device
;
962 if (sdev
->is_fiber
) {
963 /* We've got a fiber gigabit interface, and register 4 is
964 * different in fiber mode than in copper mode.
966 /* advertise FD only @1000 Mb */
967 val
= MII_ADVERTISE
<< 16 | ADVERTISE_1000XFULL
|
968 ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
969 /* enable PAUSE frames */
970 slic_write(sdev
, SLIC_REG_WPHY
, val
);
971 /* reset phy, enable auto-neg */
972 val
= MII_BMCR
<< 16 | BMCR_RESET
| BMCR_ANENABLE
|
974 slic_write(sdev
, SLIC_REG_WPHY
, val
);
975 } else { /* copper gigabit */
976 /* We've got a copper gigabit interface, and register 4 is
977 * different in copper mode than in fiber mode.
979 /* advertise 10/100 Mb modes */
980 val
= MII_ADVERTISE
<< 16 | ADVERTISE_100FULL
|
981 ADVERTISE_100HALF
| ADVERTISE_10FULL
| ADVERTISE_10HALF
;
982 /* enable PAUSE frames */
983 val
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
984 /* required by the Cicada PHY */
985 val
|= ADVERTISE_CSMA
;
986 slic_write(sdev
, SLIC_REG_WPHY
, val
);
988 /* advertise FD only @1000 Mb */
989 val
= MII_CTRL1000
<< 16 | ADVERTISE_1000FULL
;
990 slic_write(sdev
, SLIC_REG_WPHY
, val
);
992 if (subid
!= PCI_SUBDEVICE_ID_ALACRITECH_CICADA
) {
993 /* if a Marvell PHY enable auto crossover */
994 val
= SLIC_MIICR_REG_16
| SLIC_MRV_REG16_XOVERON
;
995 slic_write(sdev
, SLIC_REG_WPHY
, val
);
997 /* reset phy, enable auto-neg */
998 val
= MII_BMCR
<< 16 | BMCR_RESET
| BMCR_ANENABLE
|
1000 slic_write(sdev
, SLIC_REG_WPHY
, val
);
1002 /* enable and restart auto-neg (don't reset) */
1003 val
= MII_BMCR
<< 16 | BMCR_ANENABLE
| BMCR_ANRESTART
;
1004 slic_write(sdev
, SLIC_REG_WPHY
, val
);
1009 static void slic_set_mac_address(struct slic_device
*sdev
)
1011 u8
*addr
= sdev
->netdev
->dev_addr
;
1014 val
= addr
[5] | addr
[4] << 8 | addr
[3] << 16 | addr
[2] << 24;
1016 slic_write(sdev
, SLIC_REG_WRADDRAL
, val
);
1017 slic_write(sdev
, SLIC_REG_WRADDRBL
, val
);
1019 val
= addr
[0] << 8 | addr
[1];
1021 slic_write(sdev
, SLIC_REG_WRADDRAH
, val
);
1022 slic_write(sdev
, SLIC_REG_WRADDRBH
, val
);
1023 slic_flush_write(sdev
);
1026 static u32
slic_read_dword_from_firmware(const struct firmware
*fw
, int *offset
)
1031 memcpy(&val
, fw
->data
+ *offset
, sizeof(val
));
1035 return le32_to_cpu(val
);
1038 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE
);
1039 MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS
);
1041 static int slic_load_rcvseq_firmware(struct slic_device
*sdev
)
1043 const struct firmware
*fw
;
1051 file
= (sdev
->model
== SLIC_MODEL_OASIS
) ? SLIC_RCV_FIRMWARE_OASIS
:
1052 SLIC_RCV_FIRMWARE_MOJAVE
;
1053 err
= request_firmware(&fw
, file
, &sdev
->pdev
->dev
);
1055 dev_err(&sdev
->pdev
->dev
,
1056 "failed to load receive sequencer firmware %s\n", file
);
1059 /* Do an initial sanity check concerning firmware size now. A further
1060 * check follows below.
1062 if (fw
->size
< SLIC_FIRMWARE_MIN_SIZE
) {
1063 dev_err(&sdev
->pdev
->dev
,
1064 "invalid firmware size %zu (min %u expected)\n",
1065 fw
->size
, SLIC_FIRMWARE_MIN_SIZE
);
1070 codelen
= slic_read_dword_from_firmware(fw
, &idx
);
1072 /* do another sanity check against firmware size */
1073 if ((codelen
+ 4) > fw
->size
) {
1074 dev_err(&sdev
->pdev
->dev
,
1075 "invalid rcv-sequencer firmware size %zu\n", fw
->size
);
1080 /* download sequencer code to card */
1081 slic_write(sdev
, SLIC_REG_RCV_WCS
, SLIC_RCVWCS_BEGIN
);
1082 for (addr
= 0; addr
< codelen
; addr
++) {
1084 /* write out instruction address */
1085 slic_write(sdev
, SLIC_REG_RCV_WCS
, addr
);
1087 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1088 /* write out the instruction data low addr */
1089 slic_write(sdev
, SLIC_REG_RCV_WCS
, instr
);
1091 val
= (__le32
)fw
->data
[idx
];
1092 instr
= le32_to_cpu(val
);
1094 /* write out the instruction data high addr */
1095 slic_write(sdev
, SLIC_REG_RCV_WCS
, instr
);
1097 /* finish download */
1098 slic_write(sdev
, SLIC_REG_RCV_WCS
, SLIC_RCVWCS_FINISH
);
1099 slic_flush_write(sdev
);
1101 release_firmware(fw
);
1106 MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE
);
1107 MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS
);
1109 static int slic_load_firmware(struct slic_device
*sdev
)
1111 u32 sectstart
[SLIC_FIRMWARE_MAX_SECTIONS
];
1112 u32 sectsize
[SLIC_FIRMWARE_MAX_SECTIONS
];
1113 const struct firmware
*fw
;
1114 unsigned int datalen
;
1126 file
= (sdev
->model
== SLIC_MODEL_OASIS
) ? SLIC_FIRMWARE_OASIS
:
1127 SLIC_FIRMWARE_MOJAVE
;
1128 err
= request_firmware(&fw
, file
, &sdev
->pdev
->dev
);
1130 dev_err(&sdev
->pdev
->dev
, "failed to load firmware %s\n", file
);
1133 /* Do an initial sanity check concerning firmware size now. A further
1134 * check follows below.
1136 if (fw
->size
< SLIC_FIRMWARE_MIN_SIZE
) {
1137 dev_err(&sdev
->pdev
->dev
,
1138 "invalid firmware size %zu (min is %u)\n", fw
->size
,
1139 SLIC_FIRMWARE_MIN_SIZE
);
1144 numsects
= slic_read_dword_from_firmware(fw
, &idx
);
1145 if (numsects
== 0 || numsects
> SLIC_FIRMWARE_MAX_SECTIONS
) {
1146 dev_err(&sdev
->pdev
->dev
,
1147 "invalid number of sections in firmware: %u", numsects
);
1152 datalen
= numsects
* 8 + 4;
1153 for (i
= 0; i
< numsects
; i
++) {
1154 sectsize
[i
] = slic_read_dword_from_firmware(fw
, &idx
);
1155 datalen
+= sectsize
[i
];
1158 /* do another sanity check against firmware size */
1159 if (datalen
> fw
->size
) {
1160 dev_err(&sdev
->pdev
->dev
,
1161 "invalid firmware size %zu (expected >= %u)\n",
1167 for (i
= 0; i
< numsects
; i
++)
1168 sectstart
[i
] = slic_read_dword_from_firmware(fw
, &idx
);
1171 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1173 for (sect
= 0; sect
< numsects
; sect
++) {
1174 unsigned int ssize
= sectsize
[sect
] >> 3;
1176 base
= sectstart
[sect
];
1178 for (addr
= 0; addr
< ssize
; addr
++) {
1179 /* write out instruction address */
1180 slic_write(sdev
, SLIC_REG_WCS
, base
+ addr
);
1181 /* write out instruction to low addr */
1182 slic_write(sdev
, SLIC_REG_WCS
, instr
);
1183 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1184 /* write out instruction to high addr */
1185 slic_write(sdev
, SLIC_REG_WCS
, instr
);
1186 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1192 for (sect
= 0; sect
< numsects
; sect
++) {
1193 unsigned int ssize
= sectsize
[sect
] >> 3;
1195 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1196 base
= sectstart
[sect
];
1200 for (addr
= 0; addr
< ssize
; addr
++) {
1201 /* write out instruction address */
1202 slic_write(sdev
, SLIC_REG_WCS
,
1203 SLIC_WCS_COMPARE
| (base
+ addr
));
1204 /* write out instruction to low addr */
1205 slic_write(sdev
, SLIC_REG_WCS
, instr
);
1206 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1207 /* write out instruction to high addr */
1208 slic_write(sdev
, SLIC_REG_WCS
, instr
);
1209 instr
= slic_read_dword_from_firmware(fw
, &idx
);
1212 slic_flush_write(sdev
);
1214 /* everything OK, kick off the card */
1215 slic_write(sdev
, SLIC_REG_WCS
, SLIC_WCS_START
);
1216 slic_flush_write(sdev
);
1217 /* wait long enough for ucode to init card and reach the mainloop */
1220 release_firmware(fw
);
1225 static int slic_init_shmem(struct slic_device
*sdev
)
1227 struct slic_shmem
*sm
= &sdev
->shmem
;
1228 struct slic_shmem_data
*sm_data
;
1231 sm_data
= dma_alloc_coherent(&sdev
->pdev
->dev
, sizeof(*sm_data
),
1232 &paddr
, GFP_KERNEL
);
1234 dev_err(&sdev
->pdev
->dev
, "failed to allocate shared memory\n");
1238 sm
->shmem_data
= sm_data
;
1239 sm
->isr_paddr
= paddr
;
1240 sm
->link_paddr
= paddr
+ offsetof(struct slic_shmem_data
, link
);
1245 static void slic_free_shmem(struct slic_device
*sdev
)
1247 struct slic_shmem
*sm
= &sdev
->shmem
;
1248 struct slic_shmem_data
*sm_data
= sm
->shmem_data
;
1250 dma_free_coherent(&sdev
->pdev
->dev
, sizeof(*sm_data
), sm_data
,
1254 static int slic_init_iface(struct slic_device
*sdev
)
1256 struct slic_shmem
*sm
= &sdev
->shmem
;
1259 sdev
->upr_list
.pending
= false;
1261 err
= slic_init_shmem(sdev
);
1263 netdev_err(sdev
->netdev
, "failed to init shared memory\n");
1267 err
= slic_load_firmware(sdev
);
1269 netdev_err(sdev
->netdev
, "failed to load firmware\n");
1273 err
= slic_load_rcvseq_firmware(sdev
);
1275 netdev_err(sdev
->netdev
,
1276 "failed to load firmware for receive sequencer\n");
1280 slic_write(sdev
, SLIC_REG_ICR
, SLIC_ICR_INT_OFF
);
1281 slic_flush_write(sdev
);
1284 err
= slic_init_rx_queue(sdev
);
1286 netdev_err(sdev
->netdev
, "failed to init rx queue: %u\n", err
);
1290 err
= slic_init_tx_queue(sdev
);
1292 netdev_err(sdev
->netdev
, "failed to init tx queue: %u\n", err
);
1296 err
= slic_init_stat_queue(sdev
);
1298 netdev_err(sdev
->netdev
, "failed to init status queue: %u\n",
1303 slic_write(sdev
, SLIC_REG_ISP
, lower_32_bits(sm
->isr_paddr
));
1304 napi_enable(&sdev
->napi
);
1305 /* disable irq mitigation */
1306 slic_write(sdev
, SLIC_REG_INTAGG
, 0);
1307 slic_write(sdev
, SLIC_REG_ISR
, 0);
1308 slic_flush_write(sdev
);
1310 slic_set_mac_address(sdev
);
1312 spin_lock_bh(&sdev
->link_lock
);
1313 sdev
->duplex
= DUPLEX_UNKNOWN
;
1314 sdev
->speed
= SPEED_UNKNOWN
;
1315 spin_unlock_bh(&sdev
->link_lock
);
1317 slic_set_link_autoneg(sdev
);
1319 err
= request_irq(sdev
->pdev
->irq
, slic_irq
, IRQF_SHARED
, DRV_NAME
,
1322 netdev_err(sdev
->netdev
, "failed to request irq: %u\n", err
);
1326 slic_write(sdev
, SLIC_REG_ICR
, SLIC_ICR_INT_ON
);
1327 slic_flush_write(sdev
);
1328 /* request initial link status */
1329 err
= slic_handle_link_change(sdev
);
1331 netdev_warn(sdev
->netdev
,
1332 "failed to set initial link state: %u\n", err
);
1336 napi_disable(&sdev
->napi
);
1337 slic_free_stat_queue(sdev
);
1339 slic_free_tx_queue(sdev
);
1341 slic_free_rx_queue(sdev
);
1343 slic_free_shmem(sdev
);
1344 slic_card_reset(sdev
);
1349 static int slic_open(struct net_device
*dev
)
1351 struct slic_device
*sdev
= netdev_priv(dev
);
1354 netif_carrier_off(dev
);
1356 err
= slic_init_iface(sdev
);
1358 netdev_err(dev
, "failed to initialize interface: %i\n", err
);
1362 netif_start_queue(dev
);
1367 static int slic_close(struct net_device
*dev
)
1369 struct slic_device
*sdev
= netdev_priv(dev
);
1372 netif_stop_queue(dev
);
1374 /* stop irq handling */
1375 napi_disable(&sdev
->napi
);
1376 slic_write(sdev
, SLIC_REG_ICR
, SLIC_ICR_INT_OFF
);
1377 slic_write(sdev
, SLIC_REG_ISR
, 0);
1378 slic_flush_write(sdev
);
1380 free_irq(sdev
->pdev
->irq
, sdev
);
1381 /* turn off RCV and XMT and power down PHY */
1382 val
= SLIC_GXCR_RESET
| SLIC_GXCR_PAUSEEN
;
1383 slic_write(sdev
, SLIC_REG_WXCFG
, val
);
1385 val
= SLIC_GRCR_RESET
| SLIC_GRCR_CTLEN
| SLIC_GRCR_ADDRAEN
|
1386 SLIC_GRCR_HASHSIZE
<< SLIC_GRCR_HASHSIZE_SHIFT
;
1387 slic_write(sdev
, SLIC_REG_WRCFG
, val
);
1389 val
= MII_BMCR
<< 16 | BMCR_PDOWN
;
1390 slic_write(sdev
, SLIC_REG_WPHY
, val
);
1391 slic_flush_write(sdev
);
1393 slic_clear_upr_list(&sdev
->upr_list
);
1394 slic_write(sdev
, SLIC_REG_QUIESCE
, 0);
1396 slic_free_stat_queue(sdev
);
1397 slic_free_tx_queue(sdev
);
1398 slic_free_rx_queue(sdev
);
1399 slic_free_shmem(sdev
);
1401 slic_card_reset(sdev
);
1402 netif_carrier_off(dev
);
1407 static netdev_tx_t
slic_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1409 struct slic_device
*sdev
= netdev_priv(dev
);
1410 struct slic_tx_queue
*txq
= &sdev
->txq
;
1411 struct slic_tx_buffer
*buff
;
1412 struct slic_tx_desc
*desc
;
1417 if (unlikely(slic_get_free_tx_descs(txq
) < SLIC_MAX_REQ_TX_DESCS
)) {
1418 netdev_err(dev
, "BUG! not enough tx LEs left: %u\n",
1419 slic_get_free_tx_descs(txq
));
1420 return NETDEV_TX_BUSY
;
1423 maplen
= skb_headlen(skb
);
1424 paddr
= dma_map_single(&sdev
->pdev
->dev
, skb
->data
, maplen
,
1426 if (dma_mapping_error(&sdev
->pdev
->dev
, paddr
)) {
1427 netdev_err(dev
, "failed to map tx buffer\n");
1431 buff
= &txq
->txbuffs
[txq
->put_idx
];
1433 dma_unmap_addr_set(buff
, map_addr
, paddr
);
1434 dma_unmap_len_set(buff
, map_len
, maplen
);
1437 desc
->totlen
= cpu_to_le32(maplen
);
1438 desc
->paddrl
= cpu_to_le32(lower_32_bits(paddr
));
1439 desc
->paddrh
= cpu_to_le32(upper_32_bits(paddr
));
1440 desc
->len
= cpu_to_le32(maplen
);
1442 txq
->put_idx
= slic_next_queue_idx(txq
->put_idx
, txq
->len
);
1444 cbar_val
= lower_32_bits(buff
->desc_paddr
) | 1;
1445 /* complete writes to RAM and DMA before hardware is informed */
1448 slic_write(sdev
, SLIC_REG_CBAR
, cbar_val
);
1450 if (slic_get_free_tx_descs(txq
) < SLIC_MAX_REQ_TX_DESCS
)
1451 netif_stop_queue(dev
);
1453 return NETDEV_TX_OK
;
1455 dev_kfree_skb_any(skb
);
1457 return NETDEV_TX_OK
;
1460 static void slic_get_stats(struct net_device
*dev
,
1461 struct rtnl_link_stats64
*lst
)
1463 struct slic_device
*sdev
= netdev_priv(dev
);
1464 struct slic_stats
*stats
= &sdev
->stats
;
1466 SLIC_GET_STATS_COUNTER(lst
->rx_packets
, stats
, rx_packets
);
1467 SLIC_GET_STATS_COUNTER(lst
->tx_packets
, stats
, tx_packets
);
1468 SLIC_GET_STATS_COUNTER(lst
->rx_bytes
, stats
, rx_bytes
);
1469 SLIC_GET_STATS_COUNTER(lst
->tx_bytes
, stats
, tx_bytes
);
1470 SLIC_GET_STATS_COUNTER(lst
->rx_errors
, stats
, rx_errors
);
1471 SLIC_GET_STATS_COUNTER(lst
->rx_dropped
, stats
, rx_buff_miss
);
1472 SLIC_GET_STATS_COUNTER(lst
->tx_dropped
, stats
, tx_dropped
);
1473 SLIC_GET_STATS_COUNTER(lst
->multicast
, stats
, rx_mcasts
);
1474 SLIC_GET_STATS_COUNTER(lst
->rx_over_errors
, stats
, rx_buffoflow
);
1475 SLIC_GET_STATS_COUNTER(lst
->rx_crc_errors
, stats
, rx_crc
);
1476 SLIC_GET_STATS_COUNTER(lst
->rx_fifo_errors
, stats
, rx_oflow802
);
1477 SLIC_GET_STATS_COUNTER(lst
->tx_carrier_errors
, stats
, tx_carrier
);
1480 static int slic_get_sset_count(struct net_device
*dev
, int sset
)
1484 return ARRAY_SIZE(slic_stats_strings
);
1490 static void slic_get_ethtool_stats(struct net_device
*dev
,
1491 struct ethtool_stats
*eth_stats
, u64
*data
)
1493 struct slic_device
*sdev
= netdev_priv(dev
);
1494 struct slic_stats
*stats
= &sdev
->stats
;
1496 SLIC_GET_STATS_COUNTER(data
[0], stats
, rx_packets
);
1497 SLIC_GET_STATS_COUNTER(data
[1], stats
, rx_bytes
);
1498 SLIC_GET_STATS_COUNTER(data
[2], stats
, rx_mcasts
);
1499 SLIC_GET_STATS_COUNTER(data
[3], stats
, rx_errors
);
1500 SLIC_GET_STATS_COUNTER(data
[4], stats
, rx_buff_miss
);
1501 SLIC_GET_STATS_COUNTER(data
[5], stats
, rx_tpcsum
);
1502 SLIC_GET_STATS_COUNTER(data
[6], stats
, rx_tpoflow
);
1503 SLIC_GET_STATS_COUNTER(data
[7], stats
, rx_tphlen
);
1504 SLIC_GET_STATS_COUNTER(data
[8], stats
, rx_ipcsum
);
1505 SLIC_GET_STATS_COUNTER(data
[9], stats
, rx_iplen
);
1506 SLIC_GET_STATS_COUNTER(data
[10], stats
, rx_iphlen
);
1507 SLIC_GET_STATS_COUNTER(data
[11], stats
, rx_early
);
1508 SLIC_GET_STATS_COUNTER(data
[12], stats
, rx_buffoflow
);
1509 SLIC_GET_STATS_COUNTER(data
[13], stats
, rx_lcode
);
1510 SLIC_GET_STATS_COUNTER(data
[14], stats
, rx_drbl
);
1511 SLIC_GET_STATS_COUNTER(data
[15], stats
, rx_crc
);
1512 SLIC_GET_STATS_COUNTER(data
[16], stats
, rx_oflow802
);
1513 SLIC_GET_STATS_COUNTER(data
[17], stats
, rx_uflow802
);
1514 SLIC_GET_STATS_COUNTER(data
[18], stats
, tx_packets
);
1515 SLIC_GET_STATS_COUNTER(data
[19], stats
, tx_bytes
);
1516 SLIC_GET_STATS_COUNTER(data
[20], stats
, tx_carrier
);
1517 SLIC_GET_STATS_COUNTER(data
[21], stats
, tx_dropped
);
1518 SLIC_GET_STATS_COUNTER(data
[22], stats
, irq_errs
);
1521 static void slic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1523 if (stringset
== ETH_SS_STATS
) {
1524 memcpy(data
, slic_stats_strings
, sizeof(slic_stats_strings
));
1525 data
+= sizeof(slic_stats_strings
);
1529 static void slic_get_drvinfo(struct net_device
*dev
,
1530 struct ethtool_drvinfo
*info
)
1532 struct slic_device
*sdev
= netdev_priv(dev
);
1534 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1535 strlcpy(info
->bus_info
, pci_name(sdev
->pdev
), sizeof(info
->bus_info
));
1538 static const struct ethtool_ops slic_ethtool_ops
= {
1539 .get_drvinfo
= slic_get_drvinfo
,
1540 .get_link
= ethtool_op_get_link
,
1541 .get_strings
= slic_get_strings
,
1542 .get_ethtool_stats
= slic_get_ethtool_stats
,
1543 .get_sset_count
= slic_get_sset_count
,
1546 static const struct net_device_ops slic_netdev_ops
= {
1547 .ndo_open
= slic_open
,
1548 .ndo_stop
= slic_close
,
1549 .ndo_start_xmit
= slic_xmit
,
1550 .ndo_set_mac_address
= eth_mac_addr
,
1551 .ndo_get_stats64
= slic_get_stats
,
1552 .ndo_set_rx_mode
= slic_set_rx_mode
,
1553 .ndo_validate_addr
= eth_validate_addr
,
1556 static u16
slic_eeprom_csum(unsigned char *eeprom
, unsigned int len
)
1558 unsigned char *ptr
= eeprom
;
1563 memcpy(&data
, ptr
, sizeof(data
));
1564 csum
+= le16_to_cpu(data
);
1571 csum
= (csum
& 0xFFFF) + ((csum
>> 16) & 0xFFFF);
1575 /* check eeprom size, magic and checksum */
1576 static bool slic_eeprom_valid(unsigned char *eeprom
, unsigned int size
)
1578 const unsigned int MAX_SIZE
= 128;
1579 const unsigned int MIN_SIZE
= 98;
1583 if (size
< MIN_SIZE
|| size
> MAX_SIZE
)
1585 memcpy(&magic
, eeprom
, sizeof(magic
));
1586 if (le16_to_cpu(magic
) != SLIC_EEPROM_MAGIC
)
1588 /* cut checksum bytes */
1590 memcpy(&csum
, eeprom
+ size
, sizeof(csum
));
1592 return (le16_to_cpu(csum
) == slic_eeprom_csum(eeprom
, size
));
1595 static int slic_read_eeprom(struct slic_device
*sdev
)
1597 unsigned int devfn
= PCI_FUNC(sdev
->pdev
->devfn
);
1598 struct slic_shmem
*sm
= &sdev
->shmem
;
1599 struct slic_shmem_data
*sm_data
= sm
->shmem_data
;
1600 const unsigned int MAX_LOOPS
= 5000;
1601 unsigned int codesize
;
1602 unsigned char *eeprom
;
1603 struct slic_upr
*upr
;
1609 eeprom
= dma_alloc_coherent(&sdev
->pdev
->dev
, SLIC_EEPROM_SIZE
,
1610 &paddr
, GFP_KERNEL
);
1614 slic_write(sdev
, SLIC_REG_ICR
, SLIC_ICR_INT_OFF
);
1615 /* setup ISP temporarily */
1616 slic_write(sdev
, SLIC_REG_ISP
, lower_32_bits(sm
->isr_paddr
));
1618 err
= slic_new_upr(sdev
, SLIC_UPR_CONFIG
, paddr
);
1620 for (i
= 0; i
< MAX_LOOPS
; i
++) {
1621 if (le32_to_cpu(sm_data
->isr
) & SLIC_ISR_UPC
)
1625 if (i
== MAX_LOOPS
) {
1626 dev_err(&sdev
->pdev
->dev
,
1627 "timed out while waiting for eeprom data\n");
1630 upr
= slic_dequeue_upr(sdev
);
1634 slic_write(sdev
, SLIC_REG_ISP
, 0);
1635 slic_write(sdev
, SLIC_REG_ISR
, 0);
1636 slic_flush_write(sdev
);
1641 if (sdev
->model
== SLIC_MODEL_OASIS
) {
1642 struct slic_oasis_eeprom
*oee
;
1644 oee
= (struct slic_oasis_eeprom
*)eeprom
;
1647 codesize
= le16_to_cpu(oee
->eeprom_code_size
);
1649 struct slic_mojave_eeprom
*mee
;
1651 mee
= (struct slic_mojave_eeprom
*)eeprom
;
1654 codesize
= le16_to_cpu(mee
->eeprom_code_size
);
1657 if (!slic_eeprom_valid(eeprom
, codesize
)) {
1658 dev_err(&sdev
->pdev
->dev
, "invalid checksum in eeprom\n");
1662 /* set mac address */
1663 ether_addr_copy(sdev
->netdev
->dev_addr
, mac
[devfn
]);
1665 dma_free_coherent(&sdev
->pdev
->dev
, SLIC_EEPROM_SIZE
, eeprom
, paddr
);
1670 static int slic_init(struct slic_device
*sdev
)
1674 spin_lock_init(&sdev
->upper_lock
);
1675 spin_lock_init(&sdev
->link_lock
);
1676 INIT_LIST_HEAD(&sdev
->upr_list
.list
);
1677 spin_lock_init(&sdev
->upr_list
.lock
);
1678 u64_stats_init(&sdev
->stats
.syncp
);
1680 slic_card_reset(sdev
);
1682 err
= slic_load_firmware(sdev
);
1684 dev_err(&sdev
->pdev
->dev
, "failed to load firmware\n");
1688 /* we need the shared memory to read EEPROM so set it up temporarily */
1689 err
= slic_init_shmem(sdev
);
1691 dev_err(&sdev
->pdev
->dev
, "failed to init shared memory\n");
1695 err
= slic_read_eeprom(sdev
);
1697 dev_err(&sdev
->pdev
->dev
, "failed to read eeprom\n");
1701 slic_card_reset(sdev
);
1702 slic_free_shmem(sdev
);
1706 slic_free_shmem(sdev
);
1711 static bool slic_is_fiber(unsigned short subdev
)
1715 case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F
:
1716 case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F
: fallthrough
;
1718 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF
:
1719 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF
:
1720 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF
:
1721 case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF
:
1727 static void slic_configure_pci(struct pci_dev
*pdev
)
1732 pci_read_config_word(pdev
, PCI_COMMAND
, &old
);
1734 cmd
= old
| PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
1736 pci_write_config_word(pdev
, PCI_COMMAND
, cmd
);
1739 static int slic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1741 struct slic_device
*sdev
;
1742 struct net_device
*dev
;
1745 err
= pci_enable_device(pdev
);
1747 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
1751 pci_set_master(pdev
);
1752 pci_try_set_mwi(pdev
);
1754 slic_configure_pci(pdev
);
1756 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1758 dev_err(&pdev
->dev
, "failed to setup DMA\n");
1762 dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1764 err
= pci_request_regions(pdev
, DRV_NAME
);
1766 dev_err(&pdev
->dev
, "failed to obtain PCI regions\n");
1770 dev
= alloc_etherdev(sizeof(*sdev
));
1772 dev_err(&pdev
->dev
, "failed to alloc ethernet device\n");
1777 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1778 pci_set_drvdata(pdev
, dev
);
1779 dev
->irq
= pdev
->irq
;
1780 dev
->netdev_ops
= &slic_netdev_ops
;
1781 dev
->hw_features
= NETIF_F_RXCSUM
;
1782 dev
->features
|= dev
->hw_features
;
1784 dev
->ethtool_ops
= &slic_ethtool_ops
;
1786 sdev
= netdev_priv(dev
);
1787 sdev
->model
= (pdev
->device
== PCI_DEVICE_ID_ALACRITECH_OASIS
) ?
1788 SLIC_MODEL_OASIS
: SLIC_MODEL_MOJAVE
;
1789 sdev
->is_fiber
= slic_is_fiber(pdev
->subsystem_device
);
1792 sdev
->regs
= ioremap(pci_resource_start(pdev
, 0),
1793 pci_resource_len(pdev
, 0));
1795 dev_err(&pdev
->dev
, "failed to map registers\n");
1800 err
= slic_init(sdev
);
1802 dev_err(&pdev
->dev
, "failed to initialize driver\n");
1806 netif_napi_add(dev
, &sdev
->napi
, slic_poll
, SLIC_NAPI_WEIGHT
);
1807 netif_carrier_off(dev
);
1809 err
= register_netdev(dev
);
1811 dev_err(&pdev
->dev
, "failed to register net device: %i\n", err
);
1818 iounmap(sdev
->regs
);
1822 pci_release_regions(pdev
);
1824 pci_disable_device(pdev
);
1829 static void slic_remove(struct pci_dev
*pdev
)
1831 struct net_device
*dev
= pci_get_drvdata(pdev
);
1832 struct slic_device
*sdev
= netdev_priv(dev
);
1834 unregister_netdev(dev
);
1835 iounmap(sdev
->regs
);
1837 pci_release_regions(pdev
);
1838 pci_disable_device(pdev
);
1841 static struct pci_driver slic_driver
= {
1843 .id_table
= slic_id_tbl
,
1844 .probe
= slic_probe
,
1845 .remove
= slic_remove
,
1848 module_pci_driver(slic_driver
);
1850 MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
1851 MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
1852 MODULE_LICENSE("GPL");