1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static struct ucc_tdm_info utdm_primary_info
= {
55 .mode
= UCC_FAST_PROTOCOL_MODE_HDLC
,
56 .ttx_trx
= UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL
,
57 .tenc
= UCC_FAST_TX_ENCODING_NRZ
,
58 .renc
= UCC_FAST_RX_ENCODING_NRZ
,
59 .tcrc
= UCC_FAST_16_BIT_CRC
,
60 .synl
= UCC_FAST_SYNC_LEN_NOT_USED
,
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
79 static struct ucc_tdm_info utdm_info
[UCC_MAX_NUM
];
81 static int uhdlc_init(struct ucc_hdlc_private
*priv
)
83 struct ucc_tdm_info
*ut_info
;
84 struct ucc_fast_info
*uf_info
;
89 dma_addr_t bd_dma_addr
;
94 ut_info
= priv
->ut_info
;
95 uf_info
= &ut_info
->uf_info
;
102 /* This sets HPM register in CMXUCR register which configures a
103 * open drain connected HDLC bus
106 uf_info
->brkpt_support
= 1;
108 uf_info
->uccm_mask
= ((UCC_HDLC_UCCE_RXB
| UCC_HDLC_UCCE_RXF
|
109 UCC_HDLC_UCCE_TXB
) << 16);
111 ret
= ucc_fast_init(uf_info
, &priv
->uccf
);
113 dev_err(priv
->dev
, "Failed to init uccf.");
117 priv
->uf_regs
= priv
->uccf
->uf_regs
;
118 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
121 if (priv
->loopback
) {
122 dev_info(priv
->dev
, "Loopback Mode\n");
123 /* use the same clock when work in loopback */
124 qe_setbrg(ut_info
->uf_info
.rx_clock
, 20000000, 1);
126 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
127 gumr
|= (UCC_FAST_GUMR_LOOPBACK
| UCC_FAST_GUMR_CDS
|
129 gumr
&= ~(UCC_FAST_GUMR_CTSP
| UCC_FAST_GUMR_RSYN
);
130 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
135 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
137 /* Write to QE CECR, UCCx channel to Stop Transmission */
138 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
139 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
140 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
142 /* Set UPSMR normal mode (need fixed)*/
143 iowrite32be(0, &priv
->uf_regs
->upsmr
);
146 if (priv
->hdlc_bus
) {
149 dev_info(priv
->dev
, "HDLC bus Mode\n");
150 upsmr
= ioread32be(&priv
->uf_regs
->upsmr
);
152 /* bus mode and retransmit enable, with collision window
155 upsmr
|= UCC_HDLC_UPSMR_RTE
| UCC_HDLC_UPSMR_BUS
|
157 iowrite32be(upsmr
, &priv
->uf_regs
->upsmr
);
159 /* explicitly disable CDS & CTSP */
160 gumr
= ioread32be(&priv
->uf_regs
->gumr
);
161 gumr
&= ~(UCC_FAST_GUMR_CDS
| UCC_FAST_GUMR_CTSP
);
162 /* set automatic sync to explicitly ignore CD signal */
163 gumr
|= UCC_FAST_GUMR_SYNL_AUTO
;
164 iowrite32be(gumr
, &priv
->uf_regs
->gumr
);
167 priv
->rx_ring_size
= RX_BD_RING_LEN
;
168 priv
->tx_ring_size
= TX_BD_RING_LEN
;
170 priv
->rx_bd_base
= dma_alloc_coherent(priv
->dev
,
171 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
172 &priv
->dma_rx_bd
, GFP_KERNEL
);
174 if (!priv
->rx_bd_base
) {
175 dev_err(priv
->dev
, "Cannot allocate MURAM memory for RxBDs\n");
181 priv
->tx_bd_base
= dma_alloc_coherent(priv
->dev
,
182 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
183 &priv
->dma_tx_bd
, GFP_KERNEL
);
185 if (!priv
->tx_bd_base
) {
186 dev_err(priv
->dev
, "Cannot allocate MURAM memory for TxBDs\n");
191 /* Alloc parameter ram for ucc hdlc */
192 priv
->ucc_pram_offset
= qe_muram_alloc(sizeof(struct ucc_hdlc_param
),
193 ALIGNMENT_OF_UCC_HDLC_PRAM
);
195 if (IS_ERR_VALUE(priv
->ucc_pram_offset
)) {
196 dev_err(priv
->dev
, "Can not allocate MURAM for hdlc parameter.\n");
201 priv
->rx_skbuff
= kcalloc(priv
->rx_ring_size
,
202 sizeof(*priv
->rx_skbuff
),
204 if (!priv
->rx_skbuff
)
207 priv
->tx_skbuff
= kcalloc(priv
->tx_ring_size
,
208 sizeof(*priv
->tx_skbuff
),
210 if (!priv
->tx_skbuff
)
214 priv
->skb_dirtytx
= 0;
215 priv
->curtx_bd
= priv
->tx_bd_base
;
216 priv
->dirty_tx
= priv
->tx_bd_base
;
217 priv
->currx_bd
= priv
->rx_bd_base
;
218 priv
->currx_bdnum
= 0;
220 /* init parameter base */
221 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
222 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
223 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
225 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
226 qe_muram_addr(priv
->ucc_pram_offset
);
228 /* Zero out parameter ram */
229 memset_io(priv
->ucc_pram
, 0, sizeof(struct ucc_hdlc_param
));
231 /* Alloc riptr, tiptr */
232 riptr
= qe_muram_alloc(32, 32);
233 if (IS_ERR_VALUE(riptr
)) {
234 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
239 tiptr
= qe_muram_alloc(32, 32);
240 if (IS_ERR_VALUE(tiptr
)) {
241 dev_err(priv
->dev
, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
245 if (riptr
!= (u16
)riptr
|| tiptr
!= (u16
)tiptr
) {
246 dev_err(priv
->dev
, "MURAM allocation out of addressable range\n");
251 /* Set RIPTR, TIPTR */
252 iowrite16be(riptr
, &priv
->ucc_pram
->riptr
);
253 iowrite16be(tiptr
, &priv
->ucc_pram
->tiptr
);
256 iowrite16be(MAX_RX_BUF_LENGTH
, &priv
->ucc_pram
->mrblr
);
258 /* Set RBASE, TBASE */
259 iowrite32be(priv
->dma_rx_bd
, &priv
->ucc_pram
->rbase
);
260 iowrite32be(priv
->dma_tx_bd
, &priv
->ucc_pram
->tbase
);
262 /* Set RSTATE, TSTATE */
263 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->rstate
);
264 iowrite32be(BMR_GBL
| BMR_BIG_ENDIAN
, &priv
->ucc_pram
->tstate
);
266 /* Set C_MASK, C_PRES for 16bit CRC */
267 iowrite32be(CRC_16BIT_MASK
, &priv
->ucc_pram
->c_mask
);
268 iowrite32be(CRC_16BIT_PRES
, &priv
->ucc_pram
->c_pres
);
270 iowrite16be(MAX_FRAME_LENGTH
, &priv
->ucc_pram
->mflr
);
271 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfthr
);
272 iowrite16be(DEFAULT_RFTHR
, &priv
->ucc_pram
->rfcnt
);
273 iowrite16be(DEFAULT_ADDR_MASK
, &priv
->ucc_pram
->hmask
);
274 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr1
);
275 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr2
);
276 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr3
);
277 iowrite16be(DEFAULT_HDLC_ADDR
, &priv
->ucc_pram
->haddr4
);
280 bd_buffer
= dma_zalloc_coherent(priv
->dev
,
281 (RX_BD_RING_LEN
+ TX_BD_RING_LEN
) *
283 &bd_dma_addr
, GFP_KERNEL
);
286 dev_err(priv
->dev
, "Could not allocate buffer descriptors\n");
291 priv
->rx_buffer
= bd_buffer
;
292 priv
->tx_buffer
= bd_buffer
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
294 priv
->dma_rx_addr
= bd_dma_addr
;
295 priv
->dma_tx_addr
= bd_dma_addr
+ RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
;
297 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
298 if (i
< (RX_BD_RING_LEN
- 1))
299 bd_status
= R_E_S
| R_I_S
;
301 bd_status
= R_E_S
| R_I_S
| R_W_S
;
303 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
304 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
305 &priv
->rx_bd_base
[i
].buf
);
308 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
309 if (i
< (TX_BD_RING_LEN
- 1))
310 bd_status
= T_I_S
| T_TC_S
;
312 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
314 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
315 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
316 &priv
->tx_bd_base
[i
].buf
);
322 qe_muram_free(tiptr
);
324 qe_muram_free(riptr
);
326 kfree(priv
->tx_skbuff
);
328 kfree(priv
->rx_skbuff
);
330 qe_muram_free(priv
->ucc_pram_offset
);
332 dma_free_coherent(priv
->dev
,
333 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
334 priv
->tx_bd_base
, priv
->dma_tx_bd
);
336 dma_free_coherent(priv
->dev
,
337 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
338 priv
->rx_bd_base
, priv
->dma_rx_bd
);
340 ucc_fast_free(priv
->uccf
);
345 static netdev_tx_t
ucc_hdlc_tx(struct sk_buff
*skb
, struct net_device
*dev
)
347 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
348 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)hdlc
->priv
;
349 struct qe_bd __iomem
*bd
;
356 if (skb_headroom(skb
) < HDLC_HEAD_LEN
) {
357 dev
->stats
.tx_dropped
++;
359 netdev_err(dev
, "No enough space for hdlc head\n");
363 skb_push(skb
, HDLC_HEAD_LEN
);
365 proto_head
= (u16
*)skb
->data
;
366 *proto_head
= htons(DEFAULT_HDLC_HEAD
);
368 dev
->stats
.tx_bytes
+= skb
->len
;
372 proto_head
= (u16
*)skb
->data
;
373 if (*proto_head
!= htons(DEFAULT_PPP_HEAD
)) {
374 dev
->stats
.tx_dropped
++;
376 netdev_err(dev
, "Wrong ppp header\n");
380 dev
->stats
.tx_bytes
+= skb
->len
;
384 dev
->stats
.tx_dropped
++;
388 spin_lock_irqsave(&priv
->lock
, flags
);
390 /* Start from the next BD that should be filled */
392 bd_status
= ioread16be(&bd
->status
);
393 /* Save the skb pointer so we can free it later */
394 priv
->tx_skbuff
[priv
->skb_curtx
] = skb
;
396 /* Update the current skb pointer (wrapping if this was the last) */
398 (priv
->skb_curtx
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
400 /* copy skb data to tx buffer for sdma processing */
401 memcpy(priv
->tx_buffer
+ (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
402 skb
->data
, skb
->len
);
404 /* set bd status and length */
405 bd_status
= (bd_status
& T_W_S
) | T_R_S
| T_I_S
| T_L_S
| T_TC_S
;
407 iowrite16be(skb
->len
, &bd
->length
);
408 iowrite16be(bd_status
, &bd
->status
);
410 /* Move to next BD in the ring */
411 if (!(bd_status
& T_W_S
))
414 bd
= priv
->tx_bd_base
;
416 if (bd
== priv
->dirty_tx
) {
417 if (!netif_queue_stopped(dev
))
418 netif_stop_queue(dev
);
423 spin_unlock_irqrestore(&priv
->lock
, flags
);
428 static int hdlc_tx_done(struct ucc_hdlc_private
*priv
)
430 /* Start from the next BD that should be filled */
431 struct net_device
*dev
= priv
->ndev
;
432 struct qe_bd
*bd
; /* BD pointer */
436 bd_status
= ioread16be(&bd
->status
);
438 /* Normal processing. */
439 while ((bd_status
& T_R_S
) == 0) {
442 /* BD contains already transmitted buffer. */
443 /* Handle the transmitted buffer and release */
444 /* the BD to be used with the current frame */
446 skb
= priv
->tx_skbuff
[priv
->skb_dirtytx
];
449 dev
->stats
.tx_packets
++;
450 memset(priv
->tx_buffer
+
451 (be32_to_cpu(bd
->buf
) - priv
->dma_tx_addr
),
453 dev_kfree_skb_irq(skb
);
455 priv
->tx_skbuff
[priv
->skb_dirtytx
] = NULL
;
458 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN
);
460 /* We freed a buffer, so now we can restart transmission */
461 if (netif_queue_stopped(dev
))
462 netif_wake_queue(dev
);
464 /* Advance the confirmation BD pointer */
465 if (!(bd_status
& T_W_S
))
468 bd
= priv
->tx_bd_base
;
469 bd_status
= ioread16be(&bd
->status
);
476 static int hdlc_rx_done(struct ucc_hdlc_private
*priv
, int rx_work_limit
)
478 struct net_device
*dev
= priv
->ndev
;
479 struct sk_buff
*skb
= NULL
;
480 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
483 u16 length
, howmany
= 0;
487 bd_status
= ioread16be(&bd
->status
);
489 /* while there are received buffers and BD is full (~R_E) */
490 while (!((bd_status
& (R_E_S
)) || (--rx_work_limit
< 0))) {
491 if (bd_status
& R_OV_S
)
492 dev
->stats
.rx_over_errors
++;
493 if (bd_status
& R_CR_S
) {
494 dev
->stats
.rx_crc_errors
++;
495 dev
->stats
.rx_dropped
++;
498 bdbuffer
= priv
->rx_buffer
+
499 (priv
->currx_bdnum
* MAX_RX_BUF_LENGTH
);
500 length
= ioread16be(&bd
->length
);
504 bdbuffer
+= HDLC_HEAD_LEN
;
505 length
-= (HDLC_HEAD_LEN
+ HDLC_CRC_SIZE
);
507 skb
= dev_alloc_skb(length
);
509 dev
->stats
.rx_dropped
++;
513 skb_put(skb
, length
);
516 memcpy(skb
->data
, bdbuffer
, length
);
520 length
-= HDLC_CRC_SIZE
;
522 skb
= dev_alloc_skb(length
);
524 dev
->stats
.rx_dropped
++;
528 skb_put(skb
, length
);
531 memcpy(skb
->data
, bdbuffer
, length
);
535 dev
->stats
.rx_packets
++;
536 dev
->stats
.rx_bytes
+= skb
->len
;
539 skb
->protocol
= hdlc_type_trans(skb
, dev
);
540 netif_receive_skb(skb
);
543 iowrite16be(bd_status
| R_E_S
| R_I_S
, &bd
->status
);
545 /* update to point at the next bd */
546 if (bd_status
& R_W_S
) {
547 priv
->currx_bdnum
= 0;
548 bd
= priv
->rx_bd_base
;
550 if (priv
->currx_bdnum
< (RX_BD_RING_LEN
- 1))
551 priv
->currx_bdnum
+= 1;
553 priv
->currx_bdnum
= RX_BD_RING_LEN
- 1;
558 bd_status
= ioread16be(&bd
->status
);
565 static int ucc_hdlc_poll(struct napi_struct
*napi
, int budget
)
567 struct ucc_hdlc_private
*priv
= container_of(napi
,
568 struct ucc_hdlc_private
,
572 /* Tx event processing */
573 spin_lock(&priv
->lock
);
575 spin_unlock(&priv
->lock
);
578 howmany
+= hdlc_rx_done(priv
, budget
- howmany
);
580 if (howmany
< budget
) {
581 napi_complete_done(napi
, howmany
);
582 qe_setbits32(priv
->uccf
->p_uccm
,
583 (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
) << 16);
589 static irqreturn_t
ucc_hdlc_irq_handler(int irq
, void *dev_id
)
591 struct ucc_hdlc_private
*priv
= (struct ucc_hdlc_private
*)dev_id
;
592 struct net_device
*dev
= priv
->ndev
;
593 struct ucc_fast_private
*uccf
;
594 struct ucc_tdm_info
*ut_info
;
598 ut_info
= priv
->ut_info
;
601 ucce
= ioread32be(uccf
->p_ucce
);
602 uccm
= ioread32be(uccf
->p_uccm
);
604 iowrite32be(ucce
, uccf
->p_ucce
);
608 if ((ucce
>> 16) & (UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)) {
609 if (napi_schedule_prep(&priv
->napi
)) {
610 uccm
&= ~((UCCE_HDLC_RX_EVENTS
| UCCE_HDLC_TX_EVENTS
)
612 iowrite32be(uccm
, uccf
->p_uccm
);
613 __napi_schedule(&priv
->napi
);
617 /* Errors and other events */
618 if (ucce
>> 16 & UCC_HDLC_UCCE_BSY
)
619 dev
->stats
.rx_errors
++;
620 if (ucce
>> 16 & UCC_HDLC_UCCE_TXE
)
621 dev
->stats
.tx_errors
++;
626 static int uhdlc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
628 const size_t size
= sizeof(te1_settings
);
630 struct ucc_hdlc_private
*priv
= netdev_priv(dev
);
632 if (cmd
!= SIOCWANDEV
)
633 return hdlc_ioctl(dev
, ifr
, cmd
);
635 switch (ifr
->ifr_settings
.type
) {
637 ifr
->ifr_settings
.type
= IF_IFACE_E1
;
638 if (ifr
->ifr_settings
.size
< size
) {
639 ifr
->ifr_settings
.size
= size
; /* data size wanted */
642 memset(&line
, 0, sizeof(line
));
643 line
.clock_type
= priv
->clocking
;
645 if (copy_to_user(ifr
->ifr_settings
.ifs_ifsu
.sync
, &line
, size
))
650 return hdlc_ioctl(dev
, ifr
, cmd
);
654 static int uhdlc_open(struct net_device
*dev
)
657 hdlc_device
*hdlc
= dev_to_hdlc(dev
);
658 struct ucc_hdlc_private
*priv
= hdlc
->priv
;
659 struct ucc_tdm
*utdm
= priv
->utdm
;
661 if (priv
->hdlc_busy
!= 1) {
662 if (request_irq(priv
->ut_info
->uf_info
.irq
,
663 ucc_hdlc_irq_handler
, 0, "hdlc", priv
))
666 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
667 priv
->ut_info
->uf_info
.ucc_num
);
669 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
670 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
672 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
674 /* Enable the TDM port */
676 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
679 netif_device_attach(priv
->ndev
);
680 napi_enable(&priv
->napi
);
681 netif_start_queue(dev
);
688 static void uhdlc_memclean(struct ucc_hdlc_private
*priv
)
690 qe_muram_free(priv
->ucc_pram
->riptr
);
691 qe_muram_free(priv
->ucc_pram
->tiptr
);
693 if (priv
->rx_bd_base
) {
694 dma_free_coherent(priv
->dev
,
695 RX_BD_RING_LEN
* sizeof(struct qe_bd
),
696 priv
->rx_bd_base
, priv
->dma_rx_bd
);
698 priv
->rx_bd_base
= NULL
;
702 if (priv
->tx_bd_base
) {
703 dma_free_coherent(priv
->dev
,
704 TX_BD_RING_LEN
* sizeof(struct qe_bd
),
705 priv
->tx_bd_base
, priv
->dma_tx_bd
);
707 priv
->tx_bd_base
= NULL
;
711 if (priv
->ucc_pram
) {
712 qe_muram_free(priv
->ucc_pram_offset
);
713 priv
->ucc_pram
= NULL
;
714 priv
->ucc_pram_offset
= 0;
717 kfree(priv
->rx_skbuff
);
718 priv
->rx_skbuff
= NULL
;
720 kfree(priv
->tx_skbuff
);
721 priv
->tx_skbuff
= NULL
;
724 iounmap(priv
->uf_regs
);
725 priv
->uf_regs
= NULL
;
729 ucc_fast_free(priv
->uccf
);
733 if (priv
->rx_buffer
) {
734 dma_free_coherent(priv
->dev
,
735 RX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
736 priv
->rx_buffer
, priv
->dma_rx_addr
);
737 priv
->rx_buffer
= NULL
;
738 priv
->dma_rx_addr
= 0;
741 if (priv
->tx_buffer
) {
742 dma_free_coherent(priv
->dev
,
743 TX_BD_RING_LEN
* MAX_RX_BUF_LENGTH
,
744 priv
->tx_buffer
, priv
->dma_tx_addr
);
745 priv
->tx_buffer
= NULL
;
746 priv
->dma_tx_addr
= 0;
750 static int uhdlc_close(struct net_device
*dev
)
752 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
753 struct ucc_tdm
*utdm
= priv
->utdm
;
756 napi_disable(&priv
->napi
);
757 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
758 priv
->ut_info
->uf_info
.ucc_num
);
760 qe_issue_cmd(QE_GRACEFUL_STOP_TX
, cecr_subblock
,
761 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
762 qe_issue_cmd(QE_CLOSE_RX_BD
, cecr_subblock
,
763 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
766 utdm
->si_regs
->siglmr1_h
&= ~(0x1 << utdm
->tdm_port
);
768 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
770 free_irq(priv
->ut_info
->uf_info
.irq
, priv
);
771 netif_stop_queue(dev
);
777 static int ucc_hdlc_attach(struct net_device
*dev
, unsigned short encoding
,
778 unsigned short parity
)
780 struct ucc_hdlc_private
*priv
= dev_to_hdlc(dev
)->priv
;
782 if (encoding
!= ENCODING_NRZ
&&
783 encoding
!= ENCODING_NRZI
)
786 if (parity
!= PARITY_NONE
&&
787 parity
!= PARITY_CRC32_PR1_CCITT
&&
788 parity
!= PARITY_CRC16_PR1_CCITT
)
791 priv
->encoding
= encoding
;
792 priv
->parity
= parity
;
798 static void store_clk_config(struct ucc_hdlc_private
*priv
)
800 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
803 priv
->cmxsi1cr_h
= ioread32be(&qe_mux_reg
->cmxsi1cr_h
);
804 priv
->cmxsi1cr_l
= ioread32be(&qe_mux_reg
->cmxsi1cr_l
);
807 priv
->cmxsi1syr
= ioread32be(&qe_mux_reg
->cmxsi1syr
);
810 memcpy_fromio(priv
->cmxucr
, qe_mux_reg
->cmxucr
, 4 * sizeof(u32
));
813 static void resume_clk_config(struct ucc_hdlc_private
*priv
)
815 struct qe_mux
*qe_mux_reg
= &qe_immr
->qmx
;
817 memcpy_toio(qe_mux_reg
->cmxucr
, priv
->cmxucr
, 4 * sizeof(u32
));
819 iowrite32be(priv
->cmxsi1cr_h
, &qe_mux_reg
->cmxsi1cr_h
);
820 iowrite32be(priv
->cmxsi1cr_l
, &qe_mux_reg
->cmxsi1cr_l
);
822 iowrite32be(priv
->cmxsi1syr
, &qe_mux_reg
->cmxsi1syr
);
825 static int uhdlc_suspend(struct device
*dev
)
827 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
828 struct ucc_tdm_info
*ut_info
;
829 struct ucc_fast __iomem
*uf_regs
;
834 if (!netif_running(priv
->ndev
))
837 netif_device_detach(priv
->ndev
);
838 napi_disable(&priv
->napi
);
840 ut_info
= priv
->ut_info
;
841 uf_regs
= priv
->uf_regs
;
843 /* backup gumr guemr*/
844 priv
->gumr
= ioread32be(&uf_regs
->gumr
);
845 priv
->guemr
= ioread8(&uf_regs
->guemr
);
847 priv
->ucc_pram_bak
= kmalloc(sizeof(*priv
->ucc_pram_bak
),
849 if (!priv
->ucc_pram_bak
)
852 /* backup HDLC parameter */
853 memcpy_fromio(priv
->ucc_pram_bak
, priv
->ucc_pram
,
854 sizeof(struct ucc_hdlc_param
));
856 /* store the clk configuration */
857 store_clk_config(priv
);
860 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
865 static int uhdlc_resume(struct device
*dev
)
867 struct ucc_hdlc_private
*priv
= dev_get_drvdata(dev
);
868 struct ucc_tdm
*utdm
;
869 struct ucc_tdm_info
*ut_info
;
870 struct ucc_fast __iomem
*uf_regs
;
871 struct ucc_fast_private
*uccf
;
872 struct ucc_fast_info
*uf_info
;
880 if (!netif_running(priv
->ndev
))
884 ut_info
= priv
->ut_info
;
885 uf_info
= &ut_info
->uf_info
;
886 uf_regs
= priv
->uf_regs
;
889 /* restore gumr guemr */
890 iowrite8(priv
->guemr
, &uf_regs
->guemr
);
891 iowrite32be(priv
->gumr
, &uf_regs
->gumr
);
893 /* Set Virtual Fifo registers */
894 iowrite16be(uf_info
->urfs
, &uf_regs
->urfs
);
895 iowrite16be(uf_info
->urfet
, &uf_regs
->urfet
);
896 iowrite16be(uf_info
->urfset
, &uf_regs
->urfset
);
897 iowrite16be(uf_info
->utfs
, &uf_regs
->utfs
);
898 iowrite16be(uf_info
->utfet
, &uf_regs
->utfet
);
899 iowrite16be(uf_info
->utftt
, &uf_regs
->utftt
);
900 /* utfb, urfb are offsets from MURAM base */
901 iowrite32be(uccf
->ucc_fast_tx_virtual_fifo_base_offset
, &uf_regs
->utfb
);
902 iowrite32be(uccf
->ucc_fast_rx_virtual_fifo_base_offset
, &uf_regs
->urfb
);
904 /* Rx Tx and sync clock routing */
905 resume_clk_config(priv
);
907 iowrite32be(uf_info
->uccm_mask
, &uf_regs
->uccm
);
908 iowrite32be(0xffffffff, &uf_regs
->ucce
);
910 ucc_fast_disable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
914 ucc_tdm_init(priv
->utdm
, priv
->ut_info
);
916 /* Write to QE CECR, UCCx channel to Stop Transmission */
917 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
918 ret
= qe_issue_cmd(QE_STOP_TX
, cecr_subblock
,
919 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
921 /* Set UPSMR normal mode */
922 iowrite32be(0, &uf_regs
->upsmr
);
924 /* init parameter base */
925 cecr_subblock
= ucc_fast_get_qe_cr_subblock(uf_info
->ucc_num
);
926 ret
= qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, cecr_subblock
,
927 QE_CR_PROTOCOL_UNSPECIFIED
, priv
->ucc_pram_offset
);
929 priv
->ucc_pram
= (struct ucc_hdlc_param __iomem
*)
930 qe_muram_addr(priv
->ucc_pram_offset
);
932 /* restore ucc parameter */
933 memcpy_toio(priv
->ucc_pram
, priv
->ucc_pram_bak
,
934 sizeof(struct ucc_hdlc_param
));
935 kfree(priv
->ucc_pram_bak
);
937 /* rebuild BD entry */
938 for (i
= 0; i
< RX_BD_RING_LEN
; i
++) {
939 if (i
< (RX_BD_RING_LEN
- 1))
940 bd_status
= R_E_S
| R_I_S
;
942 bd_status
= R_E_S
| R_I_S
| R_W_S
;
944 iowrite16be(bd_status
, &priv
->rx_bd_base
[i
].status
);
945 iowrite32be(priv
->dma_rx_addr
+ i
* MAX_RX_BUF_LENGTH
,
946 &priv
->rx_bd_base
[i
].buf
);
949 for (i
= 0; i
< TX_BD_RING_LEN
; i
++) {
950 if (i
< (TX_BD_RING_LEN
- 1))
951 bd_status
= T_I_S
| T_TC_S
;
953 bd_status
= T_I_S
| T_TC_S
| T_W_S
;
955 iowrite16be(bd_status
, &priv
->tx_bd_base
[i
].status
);
956 iowrite32be(priv
->dma_tx_addr
+ i
* MAX_RX_BUF_LENGTH
,
957 &priv
->tx_bd_base
[i
].buf
);
960 /* if hdlc is busy enable TX and RX */
961 if (priv
->hdlc_busy
== 1) {
962 cecr_subblock
= ucc_fast_get_qe_cr_subblock(
963 priv
->ut_info
->uf_info
.ucc_num
);
965 qe_issue_cmd(QE_INIT_TX_RX
, cecr_subblock
,
966 (u8
)QE_CR_PROTOCOL_UNSPECIFIED
, 0);
968 ucc_fast_enable(priv
->uccf
, COMM_DIR_RX
| COMM_DIR_TX
);
970 /* Enable the TDM port */
972 utdm
->si_regs
->siglmr1_h
|= (0x1 << utdm
->tdm_port
);
975 napi_enable(&priv
->napi
);
976 netif_device_attach(priv
->ndev
);
981 static const struct dev_pm_ops uhdlc_pm_ops
= {
982 .suspend
= uhdlc_suspend
,
983 .resume
= uhdlc_resume
,
984 .freeze
= uhdlc_suspend
,
985 .thaw
= uhdlc_resume
,
988 #define HDLC_PM_OPS (&uhdlc_pm_ops)
992 #define HDLC_PM_OPS NULL
995 static const struct net_device_ops uhdlc_ops
= {
996 .ndo_open
= uhdlc_open
,
997 .ndo_stop
= uhdlc_close
,
998 .ndo_start_xmit
= hdlc_start_xmit
,
999 .ndo_do_ioctl
= uhdlc_ioctl
,
1002 static int ucc_hdlc_probe(struct platform_device
*pdev
)
1004 struct device_node
*np
= pdev
->dev
.of_node
;
1005 struct ucc_hdlc_private
*uhdlc_priv
= NULL
;
1006 struct ucc_tdm_info
*ut_info
;
1007 struct ucc_tdm
*utdm
= NULL
;
1008 struct resource res
;
1009 struct net_device
*dev
;
1016 ret
= of_property_read_u32_index(np
, "cell-index", 0, &val
);
1018 dev_err(&pdev
->dev
, "Invalid ucc property\n");
1023 if ((ucc_num
> 3) || (ucc_num
< 0)) {
1024 dev_err(&pdev
->dev
, ": Invalid UCC num\n");
1028 memcpy(&utdm_info
[ucc_num
], &utdm_primary_info
,
1029 sizeof(utdm_primary_info
));
1031 ut_info
= &utdm_info
[ucc_num
];
1032 ut_info
->uf_info
.ucc_num
= ucc_num
;
1034 sprop
= of_get_property(np
, "rx-clock-name", NULL
);
1036 ut_info
->uf_info
.rx_clock
= qe_clock_source(sprop
);
1037 if ((ut_info
->uf_info
.rx_clock
< QE_CLK_NONE
) ||
1038 (ut_info
->uf_info
.rx_clock
> QE_CLK24
)) {
1039 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1043 dev_err(&pdev
->dev
, "Invalid rx-clock-name property\n");
1047 sprop
= of_get_property(np
, "tx-clock-name", NULL
);
1049 ut_info
->uf_info
.tx_clock
= qe_clock_source(sprop
);
1050 if ((ut_info
->uf_info
.tx_clock
< QE_CLK_NONE
) ||
1051 (ut_info
->uf_info
.tx_clock
> QE_CLK24
)) {
1052 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1056 dev_err(&pdev
->dev
, "Invalid tx-clock-name property\n");
1060 ret
= of_address_to_resource(np
, 0, &res
);
1064 ut_info
->uf_info
.regs
= res
.start
;
1065 ut_info
->uf_info
.irq
= irq_of_parse_and_map(np
, 0);
1067 uhdlc_priv
= kzalloc(sizeof(*uhdlc_priv
), GFP_KERNEL
);
1072 dev_set_drvdata(&pdev
->dev
, uhdlc_priv
);
1073 uhdlc_priv
->dev
= &pdev
->dev
;
1074 uhdlc_priv
->ut_info
= ut_info
;
1076 if (of_get_property(np
, "fsl,tdm-interface", NULL
))
1077 uhdlc_priv
->tsa
= 1;
1079 if (of_get_property(np
, "fsl,ucc-internal-loopback", NULL
))
1080 uhdlc_priv
->loopback
= 1;
1082 if (of_get_property(np
, "fsl,hdlc-bus", NULL
))
1083 uhdlc_priv
->hdlc_bus
= 1;
1085 if (uhdlc_priv
->tsa
== 1) {
1086 utdm
= kzalloc(sizeof(*utdm
), GFP_KERNEL
);
1089 dev_err(&pdev
->dev
, "No mem to alloc ucc tdm data\n");
1090 goto free_uhdlc_priv
;
1092 uhdlc_priv
->utdm
= utdm
;
1093 ret
= ucc_of_parse_tdm(np
, utdm
, ut_info
);
1098 ret
= uhdlc_init(uhdlc_priv
);
1100 dev_err(&pdev
->dev
, "Failed to init uhdlc\n");
1104 dev
= alloc_hdlcdev(uhdlc_priv
);
1107 pr_err("ucc_hdlc: unable to allocate memory\n");
1108 goto undo_uhdlc_init
;
1111 uhdlc_priv
->ndev
= dev
;
1112 hdlc
= dev_to_hdlc(dev
);
1113 dev
->tx_queue_len
= 16;
1114 dev
->netdev_ops
= &uhdlc_ops
;
1115 hdlc
->attach
= ucc_hdlc_attach
;
1116 hdlc
->xmit
= ucc_hdlc_tx
;
1117 netif_napi_add(dev
, &uhdlc_priv
->napi
, ucc_hdlc_poll
, 32);
1118 if (register_hdlc_device(dev
)) {
1120 pr_err("ucc_hdlc: unable to register hdlc device\n");
1130 if (uhdlc_priv
->tsa
)
1137 static int ucc_hdlc_remove(struct platform_device
*pdev
)
1139 struct ucc_hdlc_private
*priv
= dev_get_drvdata(&pdev
->dev
);
1141 uhdlc_memclean(priv
);
1143 if (priv
->utdm
->si_regs
) {
1144 iounmap(priv
->utdm
->si_regs
);
1145 priv
->utdm
->si_regs
= NULL
;
1148 if (priv
->utdm
->siram
) {
1149 iounmap(priv
->utdm
->siram
);
1150 priv
->utdm
->siram
= NULL
;
1154 dev_info(&pdev
->dev
, "UCC based hdlc module removed\n");
1159 static const struct of_device_id fsl_ucc_hdlc_of_match
[] = {
1161 .compatible
= "fsl,ucc-hdlc",
1166 MODULE_DEVICE_TABLE(of
, fsl_ucc_hdlc_of_match
);
1168 static struct platform_driver ucc_hdlc_driver
= {
1169 .probe
= ucc_hdlc_probe
,
1170 .remove
= ucc_hdlc_remove
,
1174 .of_match_table
= fsl_ucc_hdlc_of_match
,
1178 module_platform_driver(ucc_hdlc_driver
);
1179 MODULE_LICENSE("GPL");