1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
4 * Copyright 2016 Freescale Semiconductor Inc.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
29 #include "fsl_ucc_hdlc.h"
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
37 static struct ucc_tdm_info utdm_primary_info
= {
52 .mode
= UCC_FAST_PROTOCOL_MODE_HDLC
,
53 .ttx_trx
= UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL
,
54 .tenc
= UCC_FAST_TX_ENCODING_NRZ
,
55 .renc
= UCC_FAST_RX_ENCODING_NRZ
,
56 .tcrc
= UCC_FAST_16_BIT_CRC
,
57 .synl
= UCC_FAST_SYNC_LEN_NOT_USED
,
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
76 static struct ucc_tdm_info utdm_info
[MAX_HDLC_NUM
];
78 static int uhdlc_init(struct ucc_hdlc_private
*priv
)
80 struct ucc_tdm_info
*ut_info
;
81 struct ucc_fast_info
*uf_info
;
86 dma_addr_t bd_dma_addr
;
91 ut_info
= priv
->ut_info
;
92 uf_info
= &ut_info
->uf_info
;
105 /* This sets HPM register in CMXUCR register which configures a
106 * open drain connected HDLC bus
109 uf_info
->brkpt_support
= 1;
111 uf_info
->uccm_mask
= ((UCC_HDLC_UCCE_RXB
| UCC_HDLC_UCCE_RXF
|
112 UCC_HDLC_UCCE_TXB
) << 16);
114 ret
= ucc_fast_init(uf_info
, &priv
->uccf
);
116 dev_err(priv
->dev
, "Failed to init uccf.");
120 priv
->uf_regs
= priv
->uccf
->uf_regs
;
121 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
124 if (priv
->loopback
) {
125 dev_info(priv
->dev
, "Loopback Mode\n");
126 /* use the same clock when work in loopback */
127 qe_setbrg(ut_info
->uf_info
.rx_clock
, 20000000, 1);
129 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
130 gumr
|= (UCC_FAST_GUMR_LOOPBACK
| UCC_FAST_GUMR_CDS
|
132 gumr
&= ~(UCC_FAST_GUMR_CTSP
| UCC_FAST_GUMR_RSYN
);
133 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
138 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
140 /* Write to QE CECR, UCCx channel to Stop Transmission */
141 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
142 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
143 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
145 /* Set UPSMR normal mode (need fixed)*/
146 iowrite32be(0, &priv
->uf_regs
->upsmr
);
149 if (priv
->hdlc_bus
) {
152 dev_info(priv
->dev
, "HDLC bus Mode\n");
153 upsmr
= ioread32be(&priv
->uf_regs
->upsmr
);
155 /* bus mode and retransmit enable, with collision window
158 upsmr
|= UCC_HDLC_UPSMR_RTE
| UCC_HDLC_UPSMR_BUS
|
160 iowrite32be(upsmr
, &priv
->uf_regs
->upsmr
);
162 /* explicitly disable CDS & CTSP */
163 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
164 gumr
&= ~(UCC_FAST_GUMR_CDS
| UCC_FAST_GUMR_CTSP
);
165 /* set automatic sync to explicitly ignore CD signal */
166 gumr
|= UCC_FAST_GUMR_SYNL_AUTO
;
167 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
170 priv
->rx_ring_size
= RX_BD_RING_LEN
;
171 priv
->tx_ring_size
= TX_BD_RING_LEN
;
173 priv
->rx_bd_base
= dma_alloc_coherent(priv
->dev
,
174 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
175 &priv
->dma_rx_bd
, GFP_KERNEL
);
177 if (!priv
->rx_bd_base
) {
178 dev_err(priv
->dev
, "Cannot allocate MURAM memory for RxBDs\n");
184 priv
->tx_bd_base
= dma_alloc_coherent(priv
->dev
,
185 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
186 &priv
->dma_tx_bd
, GFP_KERNEL
);
188 if (!priv
->tx_bd_base
) {
189 dev_err(priv
->dev
, "Cannot allocate MURAM memory for TxBDs\n");
194 /* Alloc parameter ram for ucc hdlc */
195 priv
->ucc_pram_offset
= qe_muram_alloc(sizeof(struct ucc_hdlc_param
),
196 ALIGNMENT_OF_UCC_HDLC_PRAM
);
198 if (IS_ERR_VALUE(priv
->ucc_pram_offset
)) {
199 dev_err(priv
->dev
, "Can not allocate MURAM for hdlc parameter.\n");
204 priv
->rx_skbuff
= kcalloc(priv
->rx_ring_size
,
205 sizeof(*priv
->rx_skbuff
),
207 if (!priv
->rx_skbuff
)
210 priv
->tx_skbuff
= kcalloc(priv
->tx_ring_size
,
211 sizeof(*priv
->tx_skbuff
),
213 if (!priv
->tx_skbuff
)
217 priv
->skb_dirtytx
= 0;
218 priv
->curtx_bd
= priv
->tx_bd_base
;
219 priv
->dirty_tx
= priv
->tx_bd_base
;
220 priv
->currx_bd
= priv
->rx_bd_base
;
221 priv
->currx_bdnum
= 0;
223 /* init parameter base */
224 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
225 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
226 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
228 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
229 qe_muram_addr(priv
->ucc_pram_offset
);
231 /* Zero out parameter ram */
232 memset_io(priv
->ucc_pram
, 0, sizeof(struct ucc_hdlc_param
));
234 /* Alloc riptr, tiptr */
235 riptr
= qe_muram_alloc(32, 32);
236 if (IS_ERR_VALUE(riptr
)) {
237 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
242 tiptr
= qe_muram_alloc(32, 32);
243 if (IS_ERR_VALUE(tiptr
)) {
244 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249 /* Set RIPTR, TIPTR */
250 iowrite16be(riptr
, &priv
->ucc_pram
->riptr
);
251 iowrite16be(tiptr
, &priv
->ucc_pram
->tiptr
);
254 iowrite16be(MAX_RX_BUF_LENGTH
, &priv
->ucc_pram
->mrblr
);
256 /* Set RBASE, TBASE */
257 iowrite32be(priv
->dma_rx_bd
, &priv
->ucc_pram
->rbase
);
258 iowrite32be(priv
->dma_tx_bd
, &priv
->ucc_pram
->tbase
);
260 /* Set RSTATE, TSTATE */
261 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->rstate
);
262 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->tstate
);
264 /* Set C_MASK, C_PRES for 16bit CRC */
265 iowrite32be(CRC_16BIT_MASK
, &priv
->ucc_pram
->c_mask
);
266 iowrite32be(CRC_16BIT_PRES
, &priv
->ucc_pram
->c_pres
);
268 iowrite16be(MAX_FRAME_LENGTH
, &priv
->ucc_pram
->mflr
);
269 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfthr
);
270 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfcnt
);
271 iowrite16be(priv
->hmask
, &priv
->ucc_pram
->hmask
);
272 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr1
);
273 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr2
);
274 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr3
);
275 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr4
);
278 bd_buffer
= dma_alloc_coherent(priv
->dev
,
279 (RX_BD_RING_LEN
+ TX_BD_RING_LEN
) * MAX_RX_BUF_LENGTH
,
280 &bd_dma_addr
, GFP_KERNEL
);
283 dev_err(priv
->dev
, "Could not allocate buffer descriptors\n");
288 priv
->rx_buffer
= bd_buffer
;
289 priv
->tx_buffer
= bd_buffer
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
291 priv
->dma_rx_addr
= bd_dma_addr
;
292 priv
->dma_tx_addr
= bd_dma_addr
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
294 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
295 if (i
< (RX_BD_RING_LEN
- 1))
296 bd_status
= R_E_S
| R_I_S
;
298 bd_status
= R_E_S
| R_I_S
| R_W_S
;
300 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
301 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
302 &priv
->rx_bd_base
[i
].buf
);
305 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
306 if (i
< (TX_BD_RING_LEN
- 1))
307 bd_status
= T_I_S
| T_TC_S
;
309 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
311 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
312 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
313 &priv
->tx_bd_base
[i
].buf
);
319 qe_muram_free(tiptr
);
321 qe_muram_free(riptr
);
323 kfree(priv
->tx_skbuff
);
325 kfree(priv
->rx_skbuff
);
327 qe_muram_free(priv
->ucc_pram_offset
);
329 dma_free_coherent(priv
->dev
,
330 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
331 priv
->tx_bd_base
, priv
->dma_tx_bd
);
333 dma_free_coherent(priv
->dev
,
334 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
335 priv
->rx_bd_base
, priv
->dma_rx_bd
);
337 ucc_fast_free(priv
->uccf
);
342 static netdev_tx_t
ucc_hdlc_tx(struct sk_buff
*skb
, struct net_device
*dev
)
344 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
345 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)hdlc
->priv
;
346 struct qe_bd __iomem
*bd
;
353 if (skb_headroom(skb
) < HDLC_HEAD_LEN
) {
354 dev
->stats
.tx_dropped
++;
356 netdev_err(dev
, "No enough space for hdlc head\n");
360 skb_push(skb
, HDLC_HEAD_LEN
);
362 proto_head
= (u16
*)skb
->data
;
363 *proto_head
= htons(DEFAULT_HDLC_HEAD
);
365 dev
->stats
.tx_bytes
+= skb
->len
;
369 proto_head
= (u16
*)skb
->data
;
370 if (*proto_head
!= htons(DEFAULT_PPP_HEAD
)) {
371 dev
->stats
.tx_dropped
++;
373 netdev_err(dev
, "Wrong ppp header\n");
377 dev
->stats
.tx_bytes
+= skb
->len
;
381 dev
->stats
.tx_bytes
+= skb
->len
;
385 dev
->stats
.tx_dropped
++;
389 netdev_sent_queue(dev
, skb
->len
);
390 spin_lock_irqsave(&priv
->lock
, flags
);
392 /* Start from the next BD that should be filled */
394 bd_status
= ioread16be(&bd
->status
);
395 /* Save the skb pointer so we can free it later */
396 priv
->tx_skbuff
[priv
->skb_curtx
] = skb
;
398 /* Update the current skb pointer (wrapping if this was the last) */
400 (priv
->skb_curtx
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
402 /* copy skb data to tx buffer for sdma processing */
403 memcpy(priv
->tx_buffer
+ (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
404 skb
->data
, skb
->len
);
406 /* set bd status and length */
407 bd_status
= (bd_status
& T_W_S
) | T_R_S
| T_I_S
| T_L_S
| T_TC_S
;
409 iowrite16be(skb
->len
, &bd
->length
);
410 iowrite16be(bd_status
, &bd
->status
);
412 /* Move to next BD in the ring */
413 if (!(bd_status
& T_W_S
))
416 bd
= priv
->tx_bd_base
;
418 if (bd
== priv
->dirty_tx
) {
419 if (!netif_queue_stopped(dev
))
420 netif_stop_queue(dev
);
425 spin_unlock_irqrestore(&priv
->lock
, flags
);
430 static int hdlc_tx_restart(struct ucc_hdlc_private
*priv
)
435 ucc_fast_get_qe_cr_subblock(priv
->ut_info
->uf_info
.ucc_num
);
437 qe_issue_cmd(QE_RESTART_TX
, cecr_subblock
,
438 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
442 static int hdlc_tx_done(struct ucc_hdlc_private
*priv
)
444 /* Start from the next BD that should be filled */
445 struct net_device
*dev
= priv
->ndev
;
446 unsigned int bytes_sent
= 0;
448 struct qe_bd
*bd
; /* BD pointer */
453 bd_status
= ioread16be(&bd
->status
);
455 /* Normal processing. */
456 while ((bd_status
& T_R_S
) == 0) {
459 if (bd_status
& T_UN_S
) { /* Underrun */
460 dev
->stats
.tx_fifo_errors
++;
463 if (bd_status
& T_CT_S
) { /* Carrier lost */
464 dev
->stats
.tx_carrier_errors
++;
468 /* BD contains already transmitted buffer. */
469 /* Handle the transmitted buffer and release */
470 /* the BD to be used with the current frame */
472 skb
= priv
->tx_skbuff
[priv
->skb_dirtytx
];
476 bytes_sent
+= skb
->len
;
477 dev
->stats
.tx_packets
++;
478 memset(priv
->tx_buffer
+
479 (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
481 dev_consume_skb_irq(skb
);
483 priv
->tx_skbuff
[priv
->skb_dirtytx
] = NULL
;
486 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
488 /* We freed a buffer, so now we can restart transmission */
489 if (netif_queue_stopped(dev
))
490 netif_wake_queue(dev
);
492 /* Advance the confirmation BD pointer */
493 if (!(bd_status
& T_W_S
))
496 bd
= priv
->tx_bd_base
;
497 bd_status
= ioread16be(&bd
->status
);
502 hdlc_tx_restart(priv
);
504 netdev_completed_queue(dev
, howmany
, bytes_sent
);
508 static int hdlc_rx_done(struct ucc_hdlc_private
*priv
, int rx_work_limit
)
510 struct net_device
*dev
= priv
->ndev
;
511 struct sk_buff
*skb
= NULL
;
512 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
515 u16 length
, howmany
= 0;
519 bd_status
= ioread16be(&bd
->status
);
521 /* while there are received buffers and BD is full (~R_E) */
522 while (!((bd_status
& (R_E_S
)) || (--rx_work_limit
< 0))) {
523 if (bd_status
& (RX_BD_ERRORS
)) {
524 dev
->stats
.rx_errors
++;
526 if (bd_status
& R_CD_S
)
527 dev
->stats
.collisions
++;
528 if (bd_status
& R_OV_S
)
529 dev
->stats
.rx_fifo_errors
++;
530 if (bd_status
& R_CR_S
)
531 dev
->stats
.rx_crc_errors
++;
532 if (bd_status
& R_AB_S
)
533 dev
->stats
.rx_over_errors
++;
534 if (bd_status
& R_NO_S
)
535 dev
->stats
.rx_frame_errors
++;
536 if (bd_status
& R_LG_S
)
537 dev
->stats
.rx_length_errors
++;
541 bdbuffer
= priv
->rx_buffer
+
542 (priv
->currx_bdnum
* MAX_RX_BUF_LENGTH
);
543 length
= ioread16be(&bd
->length
);
547 bdbuffer
+= HDLC_HEAD_LEN
;
548 length
-= (HDLC_HEAD_LEN
+ HDLC_CRC_SIZE
);
550 skb
= dev_alloc_skb(length
);
552 dev
->stats
.rx_dropped
++;
556 skb_put(skb
, length
);
559 memcpy(skb
->data
, bdbuffer
, length
);
564 length
-= HDLC_CRC_SIZE
;
566 skb
= dev_alloc_skb(length
);
568 dev
->stats
.rx_dropped
++;
572 skb_put(skb
, length
);
575 memcpy(skb
->data
, bdbuffer
, length
);
579 dev
->stats
.rx_packets
++;
580 dev
->stats
.rx_bytes
+= skb
->len
;
583 skb
->protocol
= hdlc_type_trans(skb
, dev
);
584 netif_receive_skb(skb
);
587 iowrite16be((bd_status
& R_W_S
) | R_E_S
| R_I_S
, &bd
->status
);
589 /* update to point at the next bd */
590 if (bd_status
& R_W_S
) {
591 priv
->currx_bdnum
= 0;
592 bd
= priv
->rx_bd_base
;
594 if (priv
->currx_bdnum
< (RX_BD_RING_LEN
- 1))
595 priv
->currx_bdnum
+= 1;
597 priv
->currx_bdnum
= RX_BD_RING_LEN
- 1;
602 bd_status
= ioread16be(&bd
->status
);
609 static int ucc_hdlc_poll(struct napi_struct
*napi
, int budget
)
611 struct ucc_hdlc_private
*priv
= container_of(napi
,
612 struct ucc_hdlc_private
,
616 /* Tx event processing */
617 spin_lock(&priv
->lock
);
619 spin_unlock(&priv
->lock
);
622 howmany
+= hdlc_rx_done(priv
, budget
- howmany
);
624 if (howmany
< budget
) {
625 napi_complete_done(napi
, howmany
);
626 qe_setbits32(priv
->uccf
->p_uccm
,
627 (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
) << 16);
633 static irqreturn_t
ucc_hdlc_irq_handler(int irq
, void *dev_id
)
635 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)dev_id
;
636 struct net_device
*dev
= priv
->ndev
;
637 struct ucc_fast_private
*uccf
;
638 struct ucc_tdm_info
*ut_info
;
642 ut_info
= priv
->ut_info
;
645 ucce
= ioread32be(uccf
->p_ucce
);
646 uccm
= ioread32be(uccf
->p_uccm
);
648 iowrite32be(ucce
, uccf
->p_ucce
);
652 if ((ucce
>> 16) & (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)) {
653 if (napi_schedule_prep(&priv
->napi
)) {
654 uccm
&= ~((UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)
656 iowrite32be(uccm
, uccf
->p_uccm
);
657 __napi_schedule(&priv
->napi
);
661 /* Errors and other events */
662 if (ucce
>> 16 & UCC_HDLC_UCCE_BSY
)
663 dev
->stats
.rx_missed_errors
++;
664 if (ucce
>> 16 & UCC_HDLC_UCCE_TXE
)
665 dev
->stats
.tx_errors
++;
670 static int uhdlc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
672 const size_t size
= sizeof(te1_settings
);
674 struct ucc_hdlc_private
*priv
= netdev_priv(dev
);
676 if (cmd
!= SIOCWANDEV
)
677 return hdlc_ioctl(dev
, ifr
, cmd
);
679 switch (ifr
->ifr_settings
.type
) {
681 ifr
->ifr_settings
.type
= IF_IFACE_E1
;
682 if (ifr
->ifr_settings
.size
< size
) {
683 ifr
->ifr_settings
.size
= size
; /* data size wanted */
686 memset(&line
, 0, sizeof(line
));
687 line
.clock_type
= priv
->clocking
;
689 if (copy_to_user(ifr
->ifr_settings
.ifs_ifsu
.sync
, &line
, size
))
694 return hdlc_ioctl(dev
, ifr
, cmd
);
698 static int uhdlc_open(struct net_device
*dev
)
701 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
702 struct ucc_hdlc_private
*priv
= hdlc
->priv
;
703 struct ucc_tdm
*utdm
= priv
->utdm
;
705 if (priv
->hdlc_busy
!= 1) {
706 if (request_irq(priv
->ut_info
->uf_info
.irq
,
707 ucc_hdlc_irq_handler
, 0, "hdlc", priv
))
710 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
711 priv
->ut_info
->uf_info
.ucc_num
);
713 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
714 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
716 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
718 /* Enable the TDM port */
720 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
723 netif_device_attach(priv
->ndev
);
724 napi_enable(&priv
->napi
);
725 netdev_reset_queue(dev
);
726 netif_start_queue(dev
);
733 static void uhdlc_memclean(struct ucc_hdlc_private
*priv
)
735 qe_muram_free(priv
->ucc_pram
->riptr
);
736 qe_muram_free(priv
->ucc_pram
->tiptr
);
738 if (priv
->rx_bd_base
) {
739 dma_free_coherent(priv
->dev
,
740 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
741 priv
->rx_bd_base
, priv
->dma_rx_bd
);
743 priv
->rx_bd_base
= NULL
;
747 if (priv
->tx_bd_base
) {
748 dma_free_coherent(priv
->dev
,
749 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
750 priv
->tx_bd_base
, priv
->dma_tx_bd
);
752 priv
->tx_bd_base
= NULL
;
756 if (priv
->ucc_pram
) {
757 qe_muram_free(priv
->ucc_pram_offset
);
758 priv
->ucc_pram
= NULL
;
759 priv
->ucc_pram_offset
= 0;
762 kfree(priv
->rx_skbuff
);
763 priv
->rx_skbuff
= NULL
;
765 kfree(priv
->tx_skbuff
);
766 priv
->tx_skbuff
= NULL
;
769 iounmap(priv
->uf_regs
);
770 priv
->uf_regs
= NULL
;
774 ucc_fast_free(priv
->uccf
);
778 if (priv
->rx_buffer
) {
779 dma_free_coherent(priv
->dev
,
780 RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
781 priv
->rx_buffer
, priv
->dma_rx_addr
);
782 priv
->rx_buffer
= NULL
;
783 priv
->dma_rx_addr
= 0;
786 if (priv
->tx_buffer
) {
787 dma_free_coherent(priv
->dev
,
788 TX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
789 priv
->tx_buffer
, priv
->dma_tx_addr
);
790 priv
->tx_buffer
= NULL
;
791 priv
->dma_tx_addr
= 0;
795 static int uhdlc_close(struct net_device
*dev
)
797 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
798 struct ucc_tdm
*utdm
= priv
->utdm
;
801 napi_disable(&priv
->napi
);
802 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
803 priv
->ut_info
->uf_info
.ucc_num
);
805 qe_issue_cmd(QE_GRACEFUL_STOP_TX
, cecr_subblock
,
806 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
807 qe_issue_cmd(QE_CLOSE_RX_BD
, cecr_subblock
,
808 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
811 utdm
->si_regs
->siglmr1_h
&= ~(0x1 << utdm
->tdm_port
);
813 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
815 free_irq(priv
->ut_info
->uf_info
.irq
, priv
);
816 netif_stop_queue(dev
);
817 netdev_reset_queue(dev
);
823 static int ucc_hdlc_attach(struct net_device
*dev
, unsigned short encoding
,
824 unsigned short parity
)
826 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
828 if (encoding
!= ENCODING_NRZ
&&
829 encoding
!= ENCODING_NRZI
)
832 if (parity
!= PARITY_NONE
&&
833 parity
!= PARITY_CRC32_PR1_CCITT
&&
834 parity
!= PARITY_CRC16_PR0_CCITT
&&
835 parity
!= PARITY_CRC16_PR1_CCITT
)
838 priv
->encoding
= encoding
;
839 priv
->parity
= parity
;
845 static void store_clk_config(struct ucc_hdlc_private
*priv
)
847 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
850 priv
->cmxsi1cr_h
= ioread32be(&qe_mux_reg
->cmxsi1cr_h
);
851 priv
->cmxsi1cr_l
= ioread32be(&qe_mux_reg
->cmxsi1cr_l
);
854 priv
->cmxsi1syr
= ioread32be(&qe_mux_reg
->cmxsi1syr
);
857 memcpy_fromio(priv
->cmxucr
, qe_mux_reg
->cmxucr
, 4 * sizeof(u32
));
860 static void resume_clk_config(struct ucc_hdlc_private
*priv
)
862 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
864 memcpy_toio(qe_mux_reg
->cmxucr
, priv
->cmxucr
, 4 * sizeof(u32
));
866 iowrite32be(priv
->cmxsi1cr_h
, &qe_mux_reg
->cmxsi1cr_h
);
867 iowrite32be(priv
->cmxsi1cr_l
, &qe_mux_reg
->cmxsi1cr_l
);
869 iowrite32be(priv
->cmxsi1syr
, &qe_mux_reg
->cmxsi1syr
);
872 static int uhdlc_suspend(struct device
*dev
)
874 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
875 struct ucc_tdm_info
*ut_info
;
876 struct ucc_fast __iomem
*uf_regs
;
881 if (!netif_running(priv
->ndev
))
884 netif_device_detach(priv
->ndev
);
885 napi_disable(&priv
->napi
);
887 ut_info
= priv
->ut_info
;
888 uf_regs
= priv
->uf_regs
;
890 /* backup gumr guemr*/
891 priv
->gumr
= ioread32be(&uf_regs
->gumr
);
892 priv
->guemr
= ioread8(&uf_regs
->guemr
);
894 priv
->ucc_pram_bak
= kmalloc(sizeof(*priv
->ucc_pram_bak
),
896 if (!priv
->ucc_pram_bak
)
899 /* backup HDLC parameter */
900 memcpy_fromio(priv
->ucc_pram_bak
, priv
->ucc_pram
,
901 sizeof(struct ucc_hdlc_param
));
903 /* store the clk configuration */
904 store_clk_config(priv
);
907 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
912 static int uhdlc_resume(struct device
*dev
)
914 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
915 struct ucc_tdm
*utdm
;
916 struct ucc_tdm_info
*ut_info
;
917 struct ucc_fast __iomem
*uf_regs
;
918 struct ucc_fast_private
*uccf
;
919 struct ucc_fast_info
*uf_info
;
927 if (!netif_running(priv
->ndev
))
931 ut_info
= priv
->ut_info
;
932 uf_info
= &ut_info
->uf_info
;
933 uf_regs
= priv
->uf_regs
;
936 /* restore gumr guemr */
937 iowrite8(priv
->guemr
, &uf_regs
->guemr
);
938 iowrite32be(priv
->gumr
, &uf_regs
->gumr
);
940 /* Set Virtual Fifo registers */
941 iowrite16be(uf_info
->urfs
, &uf_regs
->urfs
);
942 iowrite16be(uf_info
->urfet
, &uf_regs
->urfet
);
943 iowrite16be(uf_info
->urfset
, &uf_regs
->urfset
);
944 iowrite16be(uf_info
->utfs
, &uf_regs
->utfs
);
945 iowrite16be(uf_info
->utfet
, &uf_regs
->utfet
);
946 iowrite16be(uf_info
->utftt
, &uf_regs
->utftt
);
947 /* utfb, urfb are offsets from MURAM base */
948 iowrite32be(uccf
->ucc_fast_tx_virtual_fifo_base_offset
, &uf_regs
->utfb
);
949 iowrite32be(uccf
->ucc_fast_rx_virtual_fifo_base_offset
, &uf_regs
->urfb
);
951 /* Rx Tx and sync clock routing */
952 resume_clk_config(priv
);
954 iowrite32be(uf_info
->uccm_mask
, &uf_regs
->uccm
);
955 iowrite32be(0xffffffff, &uf_regs
->ucce
);
957 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
961 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
963 /* Write to QE CECR, UCCx channel to Stop Transmission */
964 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
965 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
966 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
968 /* Set UPSMR normal mode */
969 iowrite32be(0, &uf_regs
->upsmr
);
971 /* init parameter base */
972 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
973 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
974 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
976 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
977 qe_muram_addr(priv
->ucc_pram_offset
);
979 /* restore ucc parameter */
980 memcpy_toio(priv
->ucc_pram
, priv
->ucc_pram_bak
,
981 sizeof(struct ucc_hdlc_param
));
982 kfree(priv
->ucc_pram_bak
);
984 /* rebuild BD entry */
985 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
986 if (i
< (RX_BD_RING_LEN
- 1))
987 bd_status
= R_E_S
| R_I_S
;
989 bd_status
= R_E_S
| R_I_S
| R_W_S
;
991 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
992 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
993 &priv
->rx_bd_base
[i
].buf
);
996 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
997 if (i
< (TX_BD_RING_LEN
- 1))
998 bd_status
= T_I_S
| T_TC_S
;
1000 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
1002 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
1003 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
1004 &priv
->tx_bd_base
[i
].buf
);
1007 /* if hdlc is busy enable TX and RX */
1008 if (priv
->hdlc_busy
== 1) {
1009 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
1010 priv
->ut_info
->uf_info
.ucc_num
);
1012 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
1013 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
1015 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
1017 /* Enable the TDM port */
1019 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
1022 napi_enable(&priv
->napi
);
1023 netif_device_attach(priv
->ndev
);
1028 static const struct dev_pm_ops uhdlc_pm_ops
= {
1029 .suspend
= uhdlc_suspend
,
1030 .resume
= uhdlc_resume
,
1031 .freeze
= uhdlc_suspend
,
1032 .thaw
= uhdlc_resume
,
1035 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1039 #define HDLC_PM_OPS NULL
1042 static void uhdlc_tx_timeout(struct net_device
*ndev
)
1044 netdev_err(ndev
, "%s\n", __func__
);
1047 static const struct net_device_ops uhdlc_ops
= {
1048 .ndo_open
= uhdlc_open
,
1049 .ndo_stop
= uhdlc_close
,
1050 .ndo_start_xmit
= hdlc_start_xmit
,
1051 .ndo_do_ioctl
= uhdlc_ioctl
,
1052 .ndo_tx_timeout
= uhdlc_tx_timeout
,
1055 static int hdlc_map_iomem(char *name
, int init_flag
, void __iomem
**ptr
)
1057 struct device_node
*np
;
1058 struct platform_device
*pdev
;
1059 struct resource
*res
;
1060 static int siram_init_flag
;
1063 np
= of_find_compatible_node(NULL
, NULL
, name
);
1067 pdev
= of_find_device_by_node(np
);
1069 pr_err("%pOFn: failed to lookup pdev\n", np
);
1075 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1078 goto error_put_device
;
1080 *ptr
= ioremap(res
->start
, resource_size(res
));
1083 goto error_put_device
;
1086 /* We've remapped the addresses, and we don't need the device any
1087 * more, so we should release it.
1089 put_device(&pdev
->dev
);
1091 if (init_flag
&& siram_init_flag
== 0) {
1092 memset_io(*ptr
, 0, resource_size(res
));
1093 siram_init_flag
= 1;
1098 put_device(&pdev
->dev
);
1103 static int ucc_hdlc_probe(struct platform_device
*pdev
)
1105 struct device_node
*np
= pdev
->dev
.of_node
;
1106 struct ucc_hdlc_private
*uhdlc_priv
= NULL
;
1107 struct ucc_tdm_info
*ut_info
;
1108 struct ucc_tdm
*utdm
= NULL
;
1109 struct resource res
;
1110 struct net_device
*dev
;
1117 ret
= of_property_read_u32_index(np
, "cell-index", 0, &val
);
1119 dev_err(&pdev
->dev
, "Invalid ucc property\n");
1124 if (ucc_num
> (UCC_MAX_NUM
- 1) || ucc_num
< 0) {
1125 dev_err(&pdev
->dev
, ": Invalid UCC num\n");
1129 memcpy(&utdm_info
[ucc_num
], &utdm_primary_info
,
1130 sizeof(utdm_primary_info
));
1132 ut_info
= &utdm_info
[ucc_num
];
1133 ut_info
->uf_info
.ucc_num
= ucc_num
;
1135 sprop
= of_get_property(np
, "rx-clock-name", NULL
);
1137 ut_info
->uf_info
.rx_clock
= qe_clock_source(sprop
);
1138 if ((ut_info
->uf_info
.rx_clock
< QE_CLK_NONE
) ||
1139 (ut_info
->uf_info
.rx_clock
> QE_CLK24
)) {
1140 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1144 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1148 sprop
= of_get_property(np
, "tx-clock-name", NULL
);
1150 ut_info
->uf_info
.tx_clock
= qe_clock_source(sprop
);
1151 if ((ut_info
->uf_info
.tx_clock
< QE_CLK_NONE
) ||
1152 (ut_info
->uf_info
.tx_clock
> QE_CLK24
)) {
1153 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1157 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1161 ret
= of_address_to_resource(np
, 0, &res
);
1165 ut_info
->uf_info
.regs
= res
.start
;
1166 ut_info
->uf_info
.irq
= irq_of_parse_and_map(np
, 0);
1168 uhdlc_priv
= kzalloc(sizeof(*uhdlc_priv
), GFP_KERNEL
);
1173 dev_set_drvdata(&pdev
->dev
, uhdlc_priv
);
1174 uhdlc_priv
->dev
= &pdev
->dev
;
1175 uhdlc_priv
->ut_info
= ut_info
;
1177 if (of_get_property(np
, "fsl,tdm-interface", NULL
))
1178 uhdlc_priv
->tsa
= 1;
1180 if (of_get_property(np
, "fsl,ucc-internal-loopback", NULL
))
1181 uhdlc_priv
->loopback
= 1;
1183 if (of_get_property(np
, "fsl,hdlc-bus", NULL
))
1184 uhdlc_priv
->hdlc_bus
= 1;
1186 if (uhdlc_priv
->tsa
== 1) {
1187 utdm
= kzalloc(sizeof(*utdm
), GFP_KERNEL
);
1190 dev_err(&pdev
->dev
, "No mem to alloc ucc tdm data\n");
1191 goto free_uhdlc_priv
;
1193 uhdlc_priv
->utdm
= utdm
;
1194 ret
= ucc_of_parse_tdm(np
, utdm
, ut_info
);
1198 ret
= hdlc_map_iomem("fsl,t1040-qe-si", 0,
1199 (void __iomem
**)&utdm
->si_regs
);
1202 ret
= hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1203 (void __iomem
**)&utdm
->siram
);
1208 if (of_property_read_u16(np
, "fsl,hmask", &uhdlc_priv
->hmask
))
1209 uhdlc_priv
->hmask
= DEFAULT_ADDR_MASK
;
1211 ret
= uhdlc_init(uhdlc_priv
);
1213 dev_err(&pdev
->dev
, "Failed to init uhdlc\n");
1214 goto undo_uhdlc_init
;
1217 dev
= alloc_hdlcdev(uhdlc_priv
);
1220 pr_err("ucc_hdlc: unable to allocate memory\n");
1221 goto undo_uhdlc_init
;
1224 uhdlc_priv
->ndev
= dev
;
1225 hdlc
= dev_to_hdlc(dev
);
1226 dev
->tx_queue_len
= 16;
1227 dev
->netdev_ops
= &uhdlc_ops
;
1228 dev
->watchdog_timeo
= 2 * HZ
;
1229 hdlc
->attach
= ucc_hdlc_attach
;
1230 hdlc
->xmit
= ucc_hdlc_tx
;
1231 netif_napi_add(dev
, &uhdlc_priv
->napi
, ucc_hdlc_poll
, 32);
1232 if (register_hdlc_device(dev
)) {
1234 pr_err("ucc_hdlc: unable to register hdlc device\n");
1243 iounmap(utdm
->siram
);
1245 iounmap(utdm
->si_regs
);
1247 if (uhdlc_priv
->tsa
)
1254 static int ucc_hdlc_remove(struct platform_device
*pdev
)
1256 struct ucc_hdlc_private
*priv
= dev_get_drvdata(&pdev
->dev
);
1258 uhdlc_memclean(priv
);
1260 if (priv
->utdm
->si_regs
) {
1261 iounmap(priv
->utdm
->si_regs
);
1262 priv
->utdm
->si_regs
= NULL
;
1265 if (priv
->utdm
->siram
) {
1266 iounmap(priv
->utdm
->siram
);
1267 priv
->utdm
->siram
= NULL
;
1271 dev_info(&pdev
->dev
, "UCC based hdlc module removed\n");
1276 static const struct of_device_id fsl_ucc_hdlc_of_match
[] = {
1278 .compatible
= "fsl,ucc-hdlc",
1283 MODULE_DEVICE_TABLE(of
, fsl_ucc_hdlc_of_match
);
1285 static struct platform_driver ucc_hdlc_driver
= {
1286 .probe
= ucc_hdlc_probe
,
1287 .remove
= ucc_hdlc_remove
,
1291 .of_match_table
= fsl_ucc_hdlc_of_match
,
1295 module_platform_driver(ucc_hdlc_driver
);
1296 MODULE_LICENSE("GPL");