gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / ethernet / micrel / ks8851_mll.c
blob45cc840d8e2e14131188d14c3b98a9ffde7d8ad7
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3 * drivers/net/ethernet/micrel/ks8851_mll.c
4 * Copyright (c) 2009 Micrel Inc.
5 */
7 /* Supports:
8 * KS8851 16bit MLL chip from Micrel Inc.
9 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/cache.h>
20 #include <linux/crc32.h>
21 #include <linux/crc32poly.h>
22 #include <linux/mii.h>
23 #include <linux/platform_device.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/ks8851_mll.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_net.h>
31 #include "ks8851.h"
33 #define DRV_NAME "ks8851_mll"
35 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
36 #define MAX_RECV_FRAMES 255
37 #define MAX_BUF_SIZE 2048
38 #define TX_BUF_SIZE 2000
39 #define RX_BUF_SIZE 2000
41 #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
42 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
43 #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
45 #define ENUM_BUS_NONE 0
46 #define ENUM_BUS_8BIT 1
47 #define ENUM_BUS_16BIT 2
48 #define ENUM_BUS_32BIT 3
50 #define MAX_MCAST_LST 32
51 #define HW_MCAST_SIZE 8
53 /**
54 * union ks_tx_hdr - tx header data
55 * @txb: The header as bytes
56 * @txw: The header as 16bit, little-endian words
58 * A dual representation of the tx header data to allow
59 * access to individual bytes, and to allow 16bit accesses
60 * with 16bit alignment.
62 union ks_tx_hdr {
63 u8 txb[4];
64 __le16 txw[2];
67 /**
68 * struct ks_net - KS8851 driver private data
69 * @net_device : The network device we're bound to
70 * @hw_addr : start address of data register.
71 * @hw_addr_cmd : start address of command register.
72 * @txh : temporaly buffer to save status/length.
73 * @lock : Lock to ensure that the device is not accessed when busy.
74 * @pdev : Pointer to platform device.
75 * @mii : The MII state information for the mii calls.
76 * @frame_head_info : frame header information for multi-pkt rx.
77 * @statelock : Lock on this structure for tx list.
78 * @msg_enable : The message flags controlling driver output (see ethtool).
79 * @frame_cnt : number of frames received.
80 * @bus_width : i/o bus width.
81 * @rc_rxqcr : Cached copy of KS_RXQCR.
82 * @rc_txcr : Cached copy of KS_TXCR.
83 * @rc_ier : Cached copy of KS_IER.
84 * @sharedbus : Multipex(addr and data bus) mode indicator.
85 * @cmd_reg_cache : command register cached.
86 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
87 * @promiscuous : promiscuous mode indicator.
88 * @all_mcast : mutlicast indicator.
89 * @mcast_lst_size : size of multicast list.
90 * @mcast_lst : multicast list.
91 * @mcast_bits : multicast enabed.
92 * @mac_addr : MAC address assigned to this device.
93 * @fid : frame id.
94 * @extra_byte : number of extra byte prepended rx pkt.
95 * @enabled : indicator this device works.
97 * The @lock ensures that the chip is protected when certain operations are
98 * in progress. When the read or write packet transfer is in progress, most
99 * of the chip registers are not accessible until the transfer is finished and
100 * the DMA has been de-asserted.
102 * The @statelock is used to protect information in the structure which may
103 * need to be accessed via several sources, such as the network driver layer
104 * or one of the work queues.
108 /* Receive multiplex framer header info */
109 struct type_frame_head {
110 u16 sts; /* Frame status */
111 u16 len; /* Byte count */
114 struct ks_net {
115 struct net_device *netdev;
116 void __iomem *hw_addr;
117 void __iomem *hw_addr_cmd;
118 union ks_tx_hdr txh ____cacheline_aligned;
119 struct mutex lock; /* spinlock to be interrupt safe */
120 struct platform_device *pdev;
121 struct mii_if_info mii;
122 struct type_frame_head *frame_head_info;
123 spinlock_t statelock;
124 u32 msg_enable;
125 u32 frame_cnt;
126 int bus_width;
128 u16 rc_rxqcr;
129 u16 rc_txcr;
130 u16 rc_ier;
131 u16 sharedbus;
132 u16 cmd_reg_cache;
133 u16 cmd_reg_cache_int;
134 u16 promiscuous;
135 u16 all_mcast;
136 u16 mcast_lst_size;
137 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
138 u8 mcast_bits[HW_MCAST_SIZE];
139 u8 mac_addr[6];
140 u8 fid;
141 u8 extra_byte;
142 u8 enabled;
145 static int msg_enable;
147 #define BE3 0x8000 /* Byte Enable 3 */
148 #define BE2 0x4000 /* Byte Enable 2 */
149 #define BE1 0x2000 /* Byte Enable 1 */
150 #define BE0 0x1000 /* Byte Enable 0 */
152 /* register read/write calls.
154 * All these calls issue transactions to access the chip's registers. They
155 * all require that the necessary lock is held to prevent accesses when the
156 * chip is busy transferring packet data (RX/TX FIFO accesses).
160 * ks_check_endian - Check whether endianness of the bus is correct
161 * @ks : The chip information
163 * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
164 * bus. To maintain optimum performance, the bus endianness should be set
165 * such that it matches the endianness of the CPU.
168 static int ks_check_endian(struct ks_net *ks)
170 u16 cider;
173 * Read CIDER register first, however read it the "wrong" way around.
174 * If the endian strap on the KS8851-16MLL in incorrect and the chip
175 * is operating in different endianness than the CPU, then the meaning
176 * of BE[3:0] byte-enable bits is also swapped such that:
177 * BE[3,2,1,0] becomes BE[1,0,3,2]
179 * Luckily for us, the byte-enable bits are the top four MSbits of
180 * the address register and the CIDER register is at offset 0xc0.
181 * Hence, by reading address 0xc0c0, which is not impacted by endian
182 * swapping, we assert either BE[3:2] or BE[1:0] while reading the
183 * CIDER register.
185 * If the bus configuration is correct, reading 0xc0c0 asserts
186 * BE[3:2] and this read returns 0x0000, because to read register
187 * with bottom two LSbits of address set to 0, BE[1:0] must be
188 * asserted.
190 * If the bus configuration is NOT correct, reading 0xc0c0 asserts
191 * BE[1:0] and this read returns non-zero 0x8872 value.
193 iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
194 cider = ioread16(ks->hw_addr);
195 if (!cider)
196 return 0;
198 netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
200 return -EINVAL;
204 * ks_rdreg16 - read 16 bit register from device
205 * @ks : The chip information
206 * @offset: The register address
208 * Read a 16bit register from the chip, returning the result
211 static u16 ks_rdreg16(struct ks_net *ks, int offset)
213 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
214 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
215 return ioread16(ks->hw_addr);
219 * ks_wrreg16 - write 16bit register value to chip
220 * @ks: The chip information
221 * @offset: The register address
222 * @value: The value to write
226 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
228 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
229 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
230 iowrite16(value, ks->hw_addr);
234 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
235 * @ks: The chip state
236 * @wptr: buffer address to save data
237 * @len: length in byte to read
240 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
242 len >>= 1;
243 while (len--)
244 *wptr++ = (u16)ioread16(ks->hw_addr);
248 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
249 * @ks: The chip information
250 * @wptr: buffer address
251 * @len: length in byte to write
254 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
256 len >>= 1;
257 while (len--)
258 iowrite16(*wptr++, ks->hw_addr);
261 static void ks_disable_int(struct ks_net *ks)
263 ks_wrreg16(ks, KS_IER, 0x0000);
264 } /* ks_disable_int */
266 static void ks_enable_int(struct ks_net *ks)
268 ks_wrreg16(ks, KS_IER, ks->rc_ier);
269 } /* ks_enable_int */
272 * ks_tx_fifo_space - return the available hardware buffer size.
273 * @ks: The chip information
276 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
278 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
282 * ks_save_cmd_reg - save the command register from the cache.
283 * @ks: The chip information
286 static inline void ks_save_cmd_reg(struct ks_net *ks)
288 /*ks8851 MLL has a bug to read back the command register.
289 * So rely on software to save the content of command register.
291 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
295 * ks_restore_cmd_reg - restore the command register from the cache and
296 * write to hardware register.
297 * @ks: The chip information
300 static inline void ks_restore_cmd_reg(struct ks_net *ks)
302 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
303 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
307 * ks_set_powermode - set power mode of the device
308 * @ks: The chip information
309 * @pwrmode: The power mode value to write to KS_PMECR.
311 * Change the power mode of the chip.
313 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
315 unsigned pmecr;
317 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
319 ks_rdreg16(ks, KS_GRR);
320 pmecr = ks_rdreg16(ks, KS_PMECR);
321 pmecr &= ~PMECR_PM_MASK;
322 pmecr |= pwrmode;
324 ks_wrreg16(ks, KS_PMECR, pmecr);
328 * ks_read_config - read chip configuration of bus width.
329 * @ks: The chip information
332 static void ks_read_config(struct ks_net *ks)
334 u16 reg_data = 0;
336 /* Regardless of bus width, 8 bit read should always work.*/
337 reg_data = ks_rdreg16(ks, KS_CCR);
339 /* addr/data bus are multiplexed */
340 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
342 /* There are garbage data when reading data from QMU,
343 depending on bus-width.
346 if (reg_data & CCR_8BIT) {
347 ks->bus_width = ENUM_BUS_8BIT;
348 ks->extra_byte = 1;
349 } else if (reg_data & CCR_16BIT) {
350 ks->bus_width = ENUM_BUS_16BIT;
351 ks->extra_byte = 2;
352 } else {
353 ks->bus_width = ENUM_BUS_32BIT;
354 ks->extra_byte = 4;
359 * ks_soft_reset - issue one of the soft reset to the device
360 * @ks: The device state.
361 * @op: The bit(s) to set in the GRR
363 * Issue the relevant soft-reset command to the device's GRR register
364 * specified by @op.
366 * Note, the delays are in there as a caution to ensure that the reset
367 * has time to take effect and then complete. Since the datasheet does
368 * not currently specify the exact sequence, we have chosen something
369 * that seems to work with our device.
371 static void ks_soft_reset(struct ks_net *ks, unsigned op)
373 /* Disable interrupt first */
374 ks_wrreg16(ks, KS_IER, 0x0000);
375 ks_wrreg16(ks, KS_GRR, op);
376 mdelay(10); /* wait a short time to effect reset */
377 ks_wrreg16(ks, KS_GRR, 0);
378 mdelay(1); /* wait for condition to clear */
382 static void ks_enable_qmu(struct ks_net *ks)
384 u16 w;
386 w = ks_rdreg16(ks, KS_TXCR);
387 /* Enables QMU Transmit (TXCR). */
388 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
391 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
392 * Enable
395 w = ks_rdreg16(ks, KS_RXQCR);
396 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
398 /* Enables QMU Receive (RXCR1). */
399 w = ks_rdreg16(ks, KS_RXCR1);
400 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
401 ks->enabled = true;
402 } /* ks_enable_qmu */
404 static void ks_disable_qmu(struct ks_net *ks)
406 u16 w;
408 w = ks_rdreg16(ks, KS_TXCR);
410 /* Disables QMU Transmit (TXCR). */
411 w &= ~TXCR_TXE;
412 ks_wrreg16(ks, KS_TXCR, w);
414 /* Disables QMU Receive (RXCR1). */
415 w = ks_rdreg16(ks, KS_RXCR1);
416 w &= ~RXCR1_RXE ;
417 ks_wrreg16(ks, KS_RXCR1, w);
419 ks->enabled = false;
421 } /* ks_disable_qmu */
424 * ks_read_qmu - read 1 pkt data from the QMU.
425 * @ks: The chip information
426 * @buf: buffer address to save 1 pkt
427 * @len: Pkt length
428 * Here is the sequence to read 1 pkt:
429 * 1. set sudo DMA mode
430 * 2. read prepend data
431 * 3. read pkt data
432 * 4. reset sudo DMA Mode
434 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
436 u32 r = ks->extra_byte & 0x1 ;
437 u32 w = ks->extra_byte - r;
439 /* 1. set sudo DMA mode */
440 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
441 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
443 /* 2. read prepend data */
445 * read 4 + extra bytes and discard them.
446 * extra bytes for dummy, 2 for status, 2 for len
449 /* use likely(r) for 8 bit access for performance */
450 if (unlikely(r))
451 ioread8(ks->hw_addr);
452 ks_inblk(ks, buf, w + 2 + 2);
454 /* 3. read pkt data */
455 ks_inblk(ks, buf, ALIGN(len, 4));
457 /* 4. reset sudo DMA Mode */
458 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
462 * ks_rcv - read multiple pkts data from the QMU.
463 * @ks: The chip information
464 * @netdev: The network device being opened.
466 * Read all of header information before reading pkt content.
467 * It is not allowed only port of pkts in QMU after issuing
468 * interrupt ack.
470 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
472 u32 i;
473 struct type_frame_head *frame_hdr = ks->frame_head_info;
474 struct sk_buff *skb;
476 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
478 /* read all header information */
479 for (i = 0; i < ks->frame_cnt; i++) {
480 /* Checking Received packet status */
481 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
482 /* Get packet len from hardware */
483 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
484 frame_hdr++;
487 frame_hdr = ks->frame_head_info;
488 while (ks->frame_cnt--) {
489 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
490 frame_hdr->len >= RX_BUF_SIZE ||
491 frame_hdr->len <= 0)) {
493 /* discard an invalid packet */
494 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
495 netdev->stats.rx_dropped++;
496 if (!(frame_hdr->sts & RXFSHR_RXFV))
497 netdev->stats.rx_frame_errors++;
498 else
499 netdev->stats.rx_length_errors++;
500 frame_hdr++;
501 continue;
504 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
505 if (likely(skb)) {
506 skb_reserve(skb, 2);
507 /* read data block including CRC 4 bytes */
508 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
509 skb_put(skb, frame_hdr->len - 4);
510 skb->protocol = eth_type_trans(skb, netdev);
511 netif_rx(skb);
512 /* exclude CRC size */
513 netdev->stats.rx_bytes += frame_hdr->len - 4;
514 netdev->stats.rx_packets++;
515 } else {
516 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
517 netdev->stats.rx_dropped++;
519 frame_hdr++;
524 * ks_update_link_status - link status update.
525 * @netdev: The network device being opened.
526 * @ks: The chip information
530 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
532 /* check the status of the link */
533 u32 link_up_status;
534 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
535 netif_carrier_on(netdev);
536 link_up_status = true;
537 } else {
538 netif_carrier_off(netdev);
539 link_up_status = false;
541 netif_dbg(ks, link, ks->netdev,
542 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
546 * ks_irq - device interrupt handler
547 * @irq: Interrupt number passed from the IRQ handler.
548 * @pw: The private word passed to register_irq(), our struct ks_net.
550 * This is the handler invoked to find out what happened
552 * Read the interrupt status, work out what needs to be done and then clear
553 * any of the interrupts that are not needed.
556 static irqreturn_t ks_irq(int irq, void *pw)
558 struct net_device *netdev = pw;
559 struct ks_net *ks = netdev_priv(netdev);
560 unsigned long flags;
561 u16 status;
563 spin_lock_irqsave(&ks->statelock, flags);
564 /*this should be the first in IRQ handler */
565 ks_save_cmd_reg(ks);
567 status = ks_rdreg16(ks, KS_ISR);
568 if (unlikely(!status)) {
569 ks_restore_cmd_reg(ks);
570 spin_unlock_irqrestore(&ks->statelock, flags);
571 return IRQ_NONE;
574 ks_wrreg16(ks, KS_ISR, status);
576 if (likely(status & IRQ_RXI))
577 ks_rcv(ks, netdev);
579 if (unlikely(status & IRQ_LCI))
580 ks_update_link_status(netdev, ks);
582 if (unlikely(status & IRQ_TXI))
583 netif_wake_queue(netdev);
585 if (unlikely(status & IRQ_LDI)) {
587 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
588 pmecr &= ~PMECR_WKEVT_MASK;
589 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
592 if (unlikely(status & IRQ_RXOI))
593 ks->netdev->stats.rx_over_errors++;
594 /* this should be the last in IRQ handler*/
595 ks_restore_cmd_reg(ks);
596 spin_unlock_irqrestore(&ks->statelock, flags);
597 return IRQ_HANDLED;
602 * ks_net_open - open network device
603 * @netdev: The network device being opened.
605 * Called when the network device is marked active, such as a user executing
606 * 'ifconfig up' on the device.
608 static int ks_net_open(struct net_device *netdev)
610 struct ks_net *ks = netdev_priv(netdev);
611 int err;
613 #define KS_INT_FLAGS IRQF_TRIGGER_LOW
614 /* lock the card, even if we may not actually do anything
615 * else at the moment.
618 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
620 /* reset the HW */
621 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
623 if (err) {
624 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
625 return err;
628 /* wake up powermode to normal mode */
629 ks_set_powermode(ks, PMECR_PM_NORMAL);
630 mdelay(1); /* wait for normal mode to take effect */
632 ks_wrreg16(ks, KS_ISR, 0xffff);
633 ks_enable_int(ks);
634 ks_enable_qmu(ks);
635 netif_start_queue(ks->netdev);
637 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
639 return 0;
643 * ks_net_stop - close network device
644 * @netdev: The device being closed.
646 * Called to close down a network device which has been active. Cancell any
647 * work, shutdown the RX and TX process and then place the chip into a low
648 * power state whilst it is not being used.
650 static int ks_net_stop(struct net_device *netdev)
652 struct ks_net *ks = netdev_priv(netdev);
654 netif_info(ks, ifdown, netdev, "shutting down\n");
656 netif_stop_queue(netdev);
658 mutex_lock(&ks->lock);
660 /* turn off the IRQs and ack any outstanding */
661 ks_wrreg16(ks, KS_IER, 0x0000);
662 ks_wrreg16(ks, KS_ISR, 0xffff);
664 /* shutdown RX/TX QMU */
665 ks_disable_qmu(ks);
666 ks_disable_int(ks);
668 /* set powermode to soft power down to save power */
669 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
670 free_irq(netdev->irq, netdev);
671 mutex_unlock(&ks->lock);
672 return 0;
677 * ks_write_qmu - write 1 pkt data to the QMU.
678 * @ks: The chip information
679 * @pdata: buffer address to save 1 pkt
680 * @len: Pkt length in byte
681 * Here is the sequence to write 1 pkt:
682 * 1. set sudo DMA mode
683 * 2. write status/length
684 * 3. write pkt data
685 * 4. reset sudo DMA Mode
686 * 5. reset sudo DMA mode
687 * 6. Wait until pkt is out
689 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
691 /* start header at txb[0] to align txw entries */
692 ks->txh.txw[0] = 0;
693 ks->txh.txw[1] = cpu_to_le16(len);
695 /* 1. set sudo-DMA mode */
696 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
697 /* 2. write status/lenth info */
698 ks_outblk(ks, ks->txh.txw, 4);
699 /* 3. write pkt data */
700 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
701 /* 4. reset sudo-DMA mode */
702 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
703 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
704 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
705 /* 6. wait until TXQCR_METFE is auto-cleared */
706 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
711 * ks_start_xmit - transmit packet
712 * @skb : The buffer to transmit
713 * @netdev : The device used to transmit the packet.
715 * Called by the network layer to transmit the @skb.
716 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
717 * So while tx is in-progress, prevent IRQ interrupt from happenning.
719 static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
721 netdev_tx_t retv = NETDEV_TX_OK;
722 struct ks_net *ks = netdev_priv(netdev);
723 unsigned long flags;
725 spin_lock_irqsave(&ks->statelock, flags);
727 /* Extra space are required:
728 * 4 byte for alignment, 4 for status/length, 4 for CRC
731 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
732 ks_write_qmu(ks, skb->data, skb->len);
733 /* add tx statistics */
734 netdev->stats.tx_bytes += skb->len;
735 netdev->stats.tx_packets++;
736 dev_kfree_skb(skb);
737 } else
738 retv = NETDEV_TX_BUSY;
739 spin_unlock_irqrestore(&ks->statelock, flags);
740 return retv;
744 * ks_start_rx - ready to serve pkts
745 * @ks : The chip information
748 static void ks_start_rx(struct ks_net *ks)
750 u16 cntl;
752 /* Enables QMU Receive (RXCR1). */
753 cntl = ks_rdreg16(ks, KS_RXCR1);
754 cntl |= RXCR1_RXE ;
755 ks_wrreg16(ks, KS_RXCR1, cntl);
756 } /* ks_start_rx */
759 * ks_stop_rx - stop to serve pkts
760 * @ks : The chip information
763 static void ks_stop_rx(struct ks_net *ks)
765 u16 cntl;
767 /* Disables QMU Receive (RXCR1). */
768 cntl = ks_rdreg16(ks, KS_RXCR1);
769 cntl &= ~RXCR1_RXE ;
770 ks_wrreg16(ks, KS_RXCR1, cntl);
772 } /* ks_stop_rx */
774 static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
776 static unsigned long ether_gen_crc(int length, u8 *data)
778 long crc = -1;
779 while (--length >= 0) {
780 u8 current_octet = *data++;
781 int bit;
783 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
784 crc = (crc << 1) ^
785 ((crc < 0) ^ (current_octet & 1) ?
786 ethernet_polynomial : 0);
789 return (unsigned long)crc;
790 } /* ether_gen_crc */
793 * ks_set_grpaddr - set multicast information
794 * @ks : The chip information
797 static void ks_set_grpaddr(struct ks_net *ks)
799 u8 i;
800 u32 index, position, value;
802 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
804 for (i = 0; i < ks->mcast_lst_size; i++) {
805 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
806 index = position >> 3;
807 value = 1 << (position & 7);
808 ks->mcast_bits[index] |= (u8)value;
811 for (i = 0; i < HW_MCAST_SIZE; i++) {
812 if (i & 1) {
813 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
814 (ks->mcast_bits[i] << 8) |
815 ks->mcast_bits[i - 1]);
818 } /* ks_set_grpaddr */
821 * ks_clear_mcast - clear multicast information
823 * @ks : The chip information
824 * This routine removes all mcast addresses set in the hardware.
827 static void ks_clear_mcast(struct ks_net *ks)
829 u16 i, mcast_size;
830 for (i = 0; i < HW_MCAST_SIZE; i++)
831 ks->mcast_bits[i] = 0;
833 mcast_size = HW_MCAST_SIZE >> 2;
834 for (i = 0; i < mcast_size; i++)
835 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
838 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
840 u16 cntl;
841 ks->promiscuous = promiscuous_mode;
842 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
843 cntl = ks_rdreg16(ks, KS_RXCR1);
845 cntl &= ~RXCR1_FILTER_MASK;
846 if (promiscuous_mode)
847 /* Enable Promiscuous mode */
848 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
849 else
850 /* Disable Promiscuous mode (default normal mode) */
851 cntl |= RXCR1_RXPAFMA;
853 ks_wrreg16(ks, KS_RXCR1, cntl);
855 if (ks->enabled)
856 ks_start_rx(ks);
858 } /* ks_set_promis */
860 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
862 u16 cntl;
864 ks->all_mcast = mcast;
865 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
866 cntl = ks_rdreg16(ks, KS_RXCR1);
867 cntl &= ~RXCR1_FILTER_MASK;
868 if (mcast)
869 /* Enable "Perfect with Multicast address passed mode" */
870 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
871 else
873 * Disable "Perfect with Multicast address passed
874 * mode" (normal mode).
876 cntl |= RXCR1_RXPAFMA;
878 ks_wrreg16(ks, KS_RXCR1, cntl);
880 if (ks->enabled)
881 ks_start_rx(ks);
882 } /* ks_set_mcast */
884 static void ks_set_rx_mode(struct net_device *netdev)
886 struct ks_net *ks = netdev_priv(netdev);
887 struct netdev_hw_addr *ha;
889 /* Turn on/off promiscuous mode. */
890 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
891 ks_set_promis(ks,
892 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
893 /* Turn on/off all mcast mode. */
894 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
895 ks_set_mcast(ks,
896 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
897 else
898 ks_set_promis(ks, false);
900 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
901 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
902 int i = 0;
904 netdev_for_each_mc_addr(ha, netdev) {
905 if (i >= MAX_MCAST_LST)
906 break;
907 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
909 ks->mcast_lst_size = (u8)i;
910 ks_set_grpaddr(ks);
911 } else {
913 * List too big to support so
914 * turn on all mcast mode.
916 ks->mcast_lst_size = MAX_MCAST_LST;
917 ks_set_mcast(ks, true);
919 } else {
920 ks->mcast_lst_size = 0;
921 ks_clear_mcast(ks);
923 } /* ks_set_rx_mode */
925 static void ks_set_mac(struct ks_net *ks, u8 *data)
927 u16 *pw = (u16 *)data;
928 u16 w, u;
930 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
932 u = *pw++;
933 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
934 ks_wrreg16(ks, KS_MARH, w);
936 u = *pw++;
937 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
938 ks_wrreg16(ks, KS_MARM, w);
940 u = *pw;
941 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
942 ks_wrreg16(ks, KS_MARL, w);
944 memcpy(ks->mac_addr, data, ETH_ALEN);
946 if (ks->enabled)
947 ks_start_rx(ks);
950 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
952 struct ks_net *ks = netdev_priv(netdev);
953 struct sockaddr *addr = paddr;
954 u8 *da;
956 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
958 da = (u8 *)netdev->dev_addr;
960 ks_set_mac(ks, da);
961 return 0;
964 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
966 struct ks_net *ks = netdev_priv(netdev);
968 if (!netif_running(netdev))
969 return -EINVAL;
971 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
974 static const struct net_device_ops ks_netdev_ops = {
975 .ndo_open = ks_net_open,
976 .ndo_stop = ks_net_stop,
977 .ndo_do_ioctl = ks_net_ioctl,
978 .ndo_start_xmit = ks_start_xmit,
979 .ndo_set_mac_address = ks_set_mac_address,
980 .ndo_set_rx_mode = ks_set_rx_mode,
981 .ndo_validate_addr = eth_validate_addr,
984 /* ethtool support */
986 static void ks_get_drvinfo(struct net_device *netdev,
987 struct ethtool_drvinfo *di)
989 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
990 strlcpy(di->version, "1.00", sizeof(di->version));
991 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
992 sizeof(di->bus_info));
995 static u32 ks_get_msglevel(struct net_device *netdev)
997 struct ks_net *ks = netdev_priv(netdev);
998 return ks->msg_enable;
1001 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1003 struct ks_net *ks = netdev_priv(netdev);
1004 ks->msg_enable = to;
1007 static int ks_get_link_ksettings(struct net_device *netdev,
1008 struct ethtool_link_ksettings *cmd)
1010 struct ks_net *ks = netdev_priv(netdev);
1012 mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1014 return 0;
1017 static int ks_set_link_ksettings(struct net_device *netdev,
1018 const struct ethtool_link_ksettings *cmd)
1020 struct ks_net *ks = netdev_priv(netdev);
1021 return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1024 static u32 ks_get_link(struct net_device *netdev)
1026 struct ks_net *ks = netdev_priv(netdev);
1027 return mii_link_ok(&ks->mii);
1030 static int ks_nway_reset(struct net_device *netdev)
1032 struct ks_net *ks = netdev_priv(netdev);
1033 return mii_nway_restart(&ks->mii);
1036 static const struct ethtool_ops ks_ethtool_ops = {
1037 .get_drvinfo = ks_get_drvinfo,
1038 .get_msglevel = ks_get_msglevel,
1039 .set_msglevel = ks_set_msglevel,
1040 .get_link = ks_get_link,
1041 .nway_reset = ks_nway_reset,
1042 .get_link_ksettings = ks_get_link_ksettings,
1043 .set_link_ksettings = ks_set_link_ksettings,
1046 /* MII interface controls */
1049 * ks_phy_reg - convert MII register into a KS8851 register
1050 * @reg: MII register number.
1052 * Return the KS8851 register number for the corresponding MII PHY register
1053 * if possible. Return zero if the MII register has no direct mapping to the
1054 * KS8851 register set.
1056 static int ks_phy_reg(int reg)
1058 switch (reg) {
1059 case MII_BMCR:
1060 return KS_P1MBCR;
1061 case MII_BMSR:
1062 return KS_P1MBSR;
1063 case MII_PHYSID1:
1064 return KS_PHY1ILR;
1065 case MII_PHYSID2:
1066 return KS_PHY1IHR;
1067 case MII_ADVERTISE:
1068 return KS_P1ANAR;
1069 case MII_LPA:
1070 return KS_P1ANLPR;
1073 return 0x0;
1077 * ks_phy_read - MII interface PHY register read.
1078 * @netdev: The network device the PHY is on.
1079 * @phy_addr: Address of PHY (ignored as we only have one)
1080 * @reg: The register to read.
1082 * This call reads data from the PHY register specified in @reg. Since the
1083 * device does not support all the MII registers, the non-existent values
1084 * are always returned as zero.
1086 * We return zero for unsupported registers as the MII code does not check
1087 * the value returned for any error status, and simply returns it to the
1088 * caller. The mii-tool that the driver was tested with takes any -ve error
1089 * as real PHY capabilities, thus displaying incorrect data to the user.
1091 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1093 struct ks_net *ks = netdev_priv(netdev);
1094 int ksreg;
1095 int result;
1097 ksreg = ks_phy_reg(reg);
1098 if (!ksreg)
1099 return 0x0; /* no error return allowed, so use zero */
1101 mutex_lock(&ks->lock);
1102 result = ks_rdreg16(ks, ksreg);
1103 mutex_unlock(&ks->lock);
1105 return result;
1108 static void ks_phy_write(struct net_device *netdev,
1109 int phy, int reg, int value)
1111 struct ks_net *ks = netdev_priv(netdev);
1112 int ksreg;
1114 ksreg = ks_phy_reg(reg);
1115 if (ksreg) {
1116 mutex_lock(&ks->lock);
1117 ks_wrreg16(ks, ksreg, value);
1118 mutex_unlock(&ks->lock);
1123 * ks_read_selftest - read the selftest memory info.
1124 * @ks: The device state
1126 * Read and check the TX/RX memory selftest information.
1128 static int ks_read_selftest(struct ks_net *ks)
1130 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1131 int ret = 0;
1132 unsigned rd;
1134 rd = ks_rdreg16(ks, KS_MBIR);
1136 if ((rd & both_done) != both_done) {
1137 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1138 return 0;
1141 if (rd & MBIR_TXMBFA) {
1142 netdev_err(ks->netdev, "TX memory selftest fails\n");
1143 ret |= 1;
1146 if (rd & MBIR_RXMBFA) {
1147 netdev_err(ks->netdev, "RX memory selftest fails\n");
1148 ret |= 2;
1151 netdev_info(ks->netdev, "the selftest passes\n");
1152 return ret;
1155 static void ks_setup(struct ks_net *ks)
1157 u16 w;
1160 * Configure QMU Transmit
1163 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1164 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1166 /* Setup Receive Frame Data Pointer Auto-Increment */
1167 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1169 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1170 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1172 /* Setup RxQ Command Control (RXQCR) */
1173 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1174 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1177 * set the force mode to half duplex, default is full duplex
1178 * because if the auto-negotiation fails, most switch uses
1179 * half-duplex.
1182 w = ks_rdreg16(ks, KS_P1MBCR);
1183 w &= ~BMCR_FULLDPLX;
1184 ks_wrreg16(ks, KS_P1MBCR, w);
1186 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1187 ks_wrreg16(ks, KS_TXCR, w);
1189 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1191 if (ks->promiscuous) /* bPromiscuous */
1192 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1193 else if (ks->all_mcast) /* Multicast address passed mode */
1194 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1195 else /* Normal mode */
1196 w |= RXCR1_RXPAFMA;
1198 ks_wrreg16(ks, KS_RXCR1, w);
1199 } /*ks_setup */
1202 static void ks_setup_int(struct ks_net *ks)
1204 ks->rc_ier = 0x00;
1205 /* Clear the interrupts status of the hardware. */
1206 ks_wrreg16(ks, KS_ISR, 0xffff);
1208 /* Enables the interrupts of the hardware. */
1209 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1210 } /* ks_setup_int */
1212 static int ks_hw_init(struct ks_net *ks)
1214 #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1215 ks->promiscuous = 0;
1216 ks->all_mcast = 0;
1217 ks->mcast_lst_size = 0;
1219 ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1220 GFP_KERNEL);
1221 if (!ks->frame_head_info)
1222 return false;
1224 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1225 return true;
1228 #if defined(CONFIG_OF)
1229 static const struct of_device_id ks8851_ml_dt_ids[] = {
1230 { .compatible = "micrel,ks8851-mll" },
1231 { /* sentinel */ }
1233 MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1234 #endif
1236 static int ks8851_probe(struct platform_device *pdev)
1238 int err;
1239 struct net_device *netdev;
1240 struct ks_net *ks;
1241 u16 id, data;
1242 const char *mac;
1244 netdev = alloc_etherdev(sizeof(struct ks_net));
1245 if (!netdev)
1246 return -ENOMEM;
1248 SET_NETDEV_DEV(netdev, &pdev->dev);
1250 ks = netdev_priv(netdev);
1251 ks->netdev = netdev;
1253 ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
1254 if (IS_ERR(ks->hw_addr)) {
1255 err = PTR_ERR(ks->hw_addr);
1256 goto err_free;
1259 ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
1260 if (IS_ERR(ks->hw_addr_cmd)) {
1261 err = PTR_ERR(ks->hw_addr_cmd);
1262 goto err_free;
1265 err = ks_check_endian(ks);
1266 if (err)
1267 goto err_free;
1269 netdev->irq = platform_get_irq(pdev, 0);
1271 if ((int)netdev->irq < 0) {
1272 err = netdev->irq;
1273 goto err_free;
1276 ks->pdev = pdev;
1278 mutex_init(&ks->lock);
1279 spin_lock_init(&ks->statelock);
1281 netdev->netdev_ops = &ks_netdev_ops;
1282 netdev->ethtool_ops = &ks_ethtool_ops;
1284 /* setup mii state */
1285 ks->mii.dev = netdev;
1286 ks->mii.phy_id = 1,
1287 ks->mii.phy_id_mask = 1;
1288 ks->mii.reg_num_mask = 0xf;
1289 ks->mii.mdio_read = ks_phy_read;
1290 ks->mii.mdio_write = ks_phy_write;
1292 netdev_info(netdev, "message enable is %d\n", msg_enable);
1293 /* set the default message enable */
1294 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1295 NETIF_MSG_PROBE |
1296 NETIF_MSG_LINK));
1297 ks_read_config(ks);
1299 /* simple check for a valid chip being connected to the bus */
1300 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1301 netdev_err(netdev, "failed to read device ID\n");
1302 err = -ENODEV;
1303 goto err_free;
1306 if (ks_read_selftest(ks)) {
1307 netdev_err(netdev, "failed to read device ID\n");
1308 err = -ENODEV;
1309 goto err_free;
1312 err = register_netdev(netdev);
1313 if (err)
1314 goto err_free;
1316 platform_set_drvdata(pdev, netdev);
1318 ks_soft_reset(ks, GRR_GSR);
1319 ks_hw_init(ks);
1320 ks_disable_qmu(ks);
1321 ks_setup(ks);
1322 ks_setup_int(ks);
1324 data = ks_rdreg16(ks, KS_OBCR);
1325 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1327 /* overwriting the default MAC address */
1328 if (pdev->dev.of_node) {
1329 mac = of_get_mac_address(pdev->dev.of_node);
1330 if (!IS_ERR(mac))
1331 ether_addr_copy(ks->mac_addr, mac);
1332 } else {
1333 struct ks8851_mll_platform_data *pdata;
1335 pdata = dev_get_platdata(&pdev->dev);
1336 if (!pdata) {
1337 netdev_err(netdev, "No platform data\n");
1338 err = -ENODEV;
1339 goto err_pdata;
1341 memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1343 if (!is_valid_ether_addr(ks->mac_addr)) {
1344 /* Use random MAC address if none passed */
1345 eth_random_addr(ks->mac_addr);
1346 netdev_info(netdev, "Using random mac address\n");
1348 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1350 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1352 ks_set_mac(ks, netdev->dev_addr);
1354 id = ks_rdreg16(ks, KS_CIDER);
1356 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1357 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1358 return 0;
1360 err_pdata:
1361 unregister_netdev(netdev);
1362 err_free:
1363 free_netdev(netdev);
1364 return err;
1367 static int ks8851_remove(struct platform_device *pdev)
1369 struct net_device *netdev = platform_get_drvdata(pdev);
1371 unregister_netdev(netdev);
1372 free_netdev(netdev);
1373 return 0;
1377 static struct platform_driver ks8851_platform_driver = {
1378 .driver = {
1379 .name = DRV_NAME,
1380 .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
1382 .probe = ks8851_probe,
1383 .remove = ks8851_remove,
1386 module_platform_driver(ks8851_platform_driver);
1388 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1389 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1390 MODULE_LICENSE("GPL");
1391 module_param_named(message, msg_enable, int, 0);
1392 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");