1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
5 * Authors: Shlomi Gridish <gridish@freescale.com>
6 * Li Yang <leoli@freescale.com>
9 * QE UCC Fast API Set - UCC Fast specific routines implementations.
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/interrupt.h>
16 #include <linux/err.h>
17 #include <linux/export.h>
20 #include <soc/fsl/qe/immap_qe.h>
21 #include <soc/fsl/qe/qe.h>
23 #include <soc/fsl/qe/ucc.h>
24 #include <soc/fsl/qe/ucc_fast.h>
26 void ucc_fast_dump_regs(struct ucc_fast_private
* uccf
)
28 printk(KERN_INFO
"UCC%u Fast registers:\n", uccf
->uf_info
->ucc_num
);
29 printk(KERN_INFO
"Base address: 0x%p\n", uccf
->uf_regs
);
31 printk(KERN_INFO
"gumr : addr=0x%p, val=0x%08x\n",
32 &uccf
->uf_regs
->gumr
, qe_ioread32be(&uccf
->uf_regs
->gumr
));
33 printk(KERN_INFO
"upsmr : addr=0x%p, val=0x%08x\n",
34 &uccf
->uf_regs
->upsmr
, qe_ioread32be(&uccf
->uf_regs
->upsmr
));
35 printk(KERN_INFO
"utodr : addr=0x%p, val=0x%04x\n",
36 &uccf
->uf_regs
->utodr
, qe_ioread16be(&uccf
->uf_regs
->utodr
));
37 printk(KERN_INFO
"udsr : addr=0x%p, val=0x%04x\n",
38 &uccf
->uf_regs
->udsr
, qe_ioread16be(&uccf
->uf_regs
->udsr
));
39 printk(KERN_INFO
"ucce : addr=0x%p, val=0x%08x\n",
40 &uccf
->uf_regs
->ucce
, qe_ioread32be(&uccf
->uf_regs
->ucce
));
41 printk(KERN_INFO
"uccm : addr=0x%p, val=0x%08x\n",
42 &uccf
->uf_regs
->uccm
, qe_ioread32be(&uccf
->uf_regs
->uccm
));
43 printk(KERN_INFO
"uccs : addr=0x%p, val=0x%02x\n",
44 &uccf
->uf_regs
->uccs
, qe_ioread8(&uccf
->uf_regs
->uccs
));
45 printk(KERN_INFO
"urfb : addr=0x%p, val=0x%08x\n",
46 &uccf
->uf_regs
->urfb
, qe_ioread32be(&uccf
->uf_regs
->urfb
));
47 printk(KERN_INFO
"urfs : addr=0x%p, val=0x%04x\n",
48 &uccf
->uf_regs
->urfs
, qe_ioread16be(&uccf
->uf_regs
->urfs
));
49 printk(KERN_INFO
"urfet : addr=0x%p, val=0x%04x\n",
50 &uccf
->uf_regs
->urfet
, qe_ioread16be(&uccf
->uf_regs
->urfet
));
51 printk(KERN_INFO
"urfset: addr=0x%p, val=0x%04x\n",
52 &uccf
->uf_regs
->urfset
,
53 qe_ioread16be(&uccf
->uf_regs
->urfset
));
54 printk(KERN_INFO
"utfb : addr=0x%p, val=0x%08x\n",
55 &uccf
->uf_regs
->utfb
, qe_ioread32be(&uccf
->uf_regs
->utfb
));
56 printk(KERN_INFO
"utfs : addr=0x%p, val=0x%04x\n",
57 &uccf
->uf_regs
->utfs
, qe_ioread16be(&uccf
->uf_regs
->utfs
));
58 printk(KERN_INFO
"utfet : addr=0x%p, val=0x%04x\n",
59 &uccf
->uf_regs
->utfet
, qe_ioread16be(&uccf
->uf_regs
->utfet
));
60 printk(KERN_INFO
"utftt : addr=0x%p, val=0x%04x\n",
61 &uccf
->uf_regs
->utftt
, qe_ioread16be(&uccf
->uf_regs
->utftt
));
62 printk(KERN_INFO
"utpt : addr=0x%p, val=0x%04x\n",
63 &uccf
->uf_regs
->utpt
, qe_ioread16be(&uccf
->uf_regs
->utpt
));
64 printk(KERN_INFO
"urtry : addr=0x%p, val=0x%08x\n",
65 &uccf
->uf_regs
->urtry
, qe_ioread32be(&uccf
->uf_regs
->urtry
));
66 printk(KERN_INFO
"guemr : addr=0x%p, val=0x%02x\n",
67 &uccf
->uf_regs
->guemr
, qe_ioread8(&uccf
->uf_regs
->guemr
));
69 EXPORT_SYMBOL(ucc_fast_dump_regs
);
71 u32
ucc_fast_get_qe_cr_subblock(int uccf_num
)
74 case 0: return QE_CR_SUBBLOCK_UCCFAST1
;
75 case 1: return QE_CR_SUBBLOCK_UCCFAST2
;
76 case 2: return QE_CR_SUBBLOCK_UCCFAST3
;
77 case 3: return QE_CR_SUBBLOCK_UCCFAST4
;
78 case 4: return QE_CR_SUBBLOCK_UCCFAST5
;
79 case 5: return QE_CR_SUBBLOCK_UCCFAST6
;
80 case 6: return QE_CR_SUBBLOCK_UCCFAST7
;
81 case 7: return QE_CR_SUBBLOCK_UCCFAST8
;
82 default: return QE_CR_SUBBLOCK_INVALID
;
85 EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock
);
87 void ucc_fast_transmit_on_demand(struct ucc_fast_private
* uccf
)
89 qe_iowrite16be(UCC_FAST_TOD
, &uccf
->uf_regs
->utodr
);
91 EXPORT_SYMBOL(ucc_fast_transmit_on_demand
);
93 void ucc_fast_enable(struct ucc_fast_private
* uccf
, enum comm_dir mode
)
95 struct ucc_fast __iomem
*uf_regs
;
98 uf_regs
= uccf
->uf_regs
;
100 /* Enable reception and/or transmission on this UCC. */
101 gumr
= qe_ioread32be(&uf_regs
->gumr
);
102 if (mode
& COMM_DIR_TX
) {
103 gumr
|= UCC_FAST_GUMR_ENT
;
104 uccf
->enabled_tx
= 1;
106 if (mode
& COMM_DIR_RX
) {
107 gumr
|= UCC_FAST_GUMR_ENR
;
108 uccf
->enabled_rx
= 1;
110 qe_iowrite32be(gumr
, &uf_regs
->gumr
);
112 EXPORT_SYMBOL(ucc_fast_enable
);
114 void ucc_fast_disable(struct ucc_fast_private
* uccf
, enum comm_dir mode
)
116 struct ucc_fast __iomem
*uf_regs
;
119 uf_regs
= uccf
->uf_regs
;
121 /* Disable reception and/or transmission on this UCC. */
122 gumr
= qe_ioread32be(&uf_regs
->gumr
);
123 if (mode
& COMM_DIR_TX
) {
124 gumr
&= ~UCC_FAST_GUMR_ENT
;
125 uccf
->enabled_tx
= 0;
127 if (mode
& COMM_DIR_RX
) {
128 gumr
&= ~UCC_FAST_GUMR_ENR
;
129 uccf
->enabled_rx
= 0;
131 qe_iowrite32be(gumr
, &uf_regs
->gumr
);
133 EXPORT_SYMBOL(ucc_fast_disable
);
135 int ucc_fast_init(struct ucc_fast_info
* uf_info
, struct ucc_fast_private
** uccf_ret
)
137 struct ucc_fast_private
*uccf
;
138 struct ucc_fast __iomem
*uf_regs
;
145 /* check if the UCC port number is in range. */
146 if ((uf_info
->ucc_num
< 0) || (uf_info
->ucc_num
> UCC_MAX_NUM
- 1)) {
147 printk(KERN_ERR
"%s: illegal UCC number\n", __func__
);
151 /* Check that 'max_rx_buf_length' is properly aligned (4). */
152 if (uf_info
->max_rx_buf_length
& (UCC_FAST_MRBLR_ALIGNMENT
- 1)) {
153 printk(KERN_ERR
"%s: max_rx_buf_length not aligned\n",
158 /* Validate Virtual Fifo register values */
159 if (uf_info
->urfs
< UCC_FAST_URFS_MIN_VAL
) {
160 printk(KERN_ERR
"%s: urfs is too small\n", __func__
);
164 if (uf_info
->urfs
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
165 printk(KERN_ERR
"%s: urfs is not aligned\n", __func__
);
169 if (uf_info
->urfet
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
170 printk(KERN_ERR
"%s: urfet is not aligned.\n", __func__
);
174 if (uf_info
->urfset
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
175 printk(KERN_ERR
"%s: urfset is not aligned\n", __func__
);
179 if (uf_info
->utfs
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
180 printk(KERN_ERR
"%s: utfs is not aligned\n", __func__
);
184 if (uf_info
->utfet
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
185 printk(KERN_ERR
"%s: utfet is not aligned\n", __func__
);
189 if (uf_info
->utftt
& (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
- 1)) {
190 printk(KERN_ERR
"%s: utftt is not aligned\n", __func__
);
194 uccf
= kzalloc(sizeof(struct ucc_fast_private
), GFP_KERNEL
);
196 printk(KERN_ERR
"%s: Cannot allocate private data\n",
200 uccf
->ucc_fast_tx_virtual_fifo_base_offset
= -1;
201 uccf
->ucc_fast_rx_virtual_fifo_base_offset
= -1;
203 /* Fill fast UCC structure */
204 uccf
->uf_info
= uf_info
;
205 /* Set the PHY base address */
206 uccf
->uf_regs
= ioremap(uf_info
->regs
, sizeof(struct ucc_fast
));
207 if (uccf
->uf_regs
== NULL
) {
208 printk(KERN_ERR
"%s: Cannot map UCC registers\n", __func__
);
213 uccf
->enabled_tx
= 0;
214 uccf
->enabled_rx
= 0;
215 uccf
->stopped_tx
= 0;
216 uccf
->stopped_rx
= 0;
217 uf_regs
= uccf
->uf_regs
;
218 uccf
->p_ucce
= &uf_regs
->ucce
;
219 uccf
->p_uccm
= &uf_regs
->uccm
;
220 #ifdef CONFIG_UGETH_TX_ON_DEMAND
221 uccf
->p_utodr
= &uf_regs
->utodr
;
226 uccf
->rx_discarded
= 0;
227 #endif /* STATISTICS */
229 /* Set UCC to fast type */
230 ret
= ucc_set_type(uf_info
->ucc_num
, UCC_SPEED_TYPE_FAST
);
232 printk(KERN_ERR
"%s: cannot set UCC type\n", __func__
);
237 uccf
->mrblr
= uf_info
->max_rx_buf_length
;
240 /* For more details see the hardware spec. */
241 gumr
= uf_info
->ttx_trx
;
243 gumr
|= UCC_FAST_GUMR_TCI
;
245 gumr
|= UCC_FAST_GUMR_CDP
;
247 gumr
|= UCC_FAST_GUMR_CTSP
;
249 gumr
|= UCC_FAST_GUMR_CDS
;
251 gumr
|= UCC_FAST_GUMR_CTSS
;
253 gumr
|= UCC_FAST_GUMR_TXSY
;
255 gumr
|= UCC_FAST_GUMR_RSYN
;
256 gumr
|= uf_info
->synl
;
258 gumr
|= UCC_FAST_GUMR_RTSM
;
259 gumr
|= uf_info
->renc
;
261 gumr
|= UCC_FAST_GUMR_REVD
;
262 gumr
|= uf_info
->tenc
;
263 gumr
|= uf_info
->tcrc
;
264 gumr
|= uf_info
->mode
;
265 qe_iowrite32be(gumr
, &uf_regs
->gumr
);
267 /* Allocate memory for Tx Virtual Fifo */
268 uccf
->ucc_fast_tx_virtual_fifo_base_offset
=
269 qe_muram_alloc(uf_info
->utfs
, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
);
270 if (uccf
->ucc_fast_tx_virtual_fifo_base_offset
< 0) {
271 printk(KERN_ERR
"%s: cannot allocate MURAM for TX FIFO\n",
277 /* Allocate memory for Rx Virtual Fifo */
278 uccf
->ucc_fast_rx_virtual_fifo_base_offset
=
279 qe_muram_alloc(uf_info
->urfs
+
280 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR
,
281 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT
);
282 if (uccf
->ucc_fast_rx_virtual_fifo_base_offset
< 0) {
283 printk(KERN_ERR
"%s: cannot allocate MURAM for RX FIFO\n",
289 /* Set Virtual Fifo registers */
290 qe_iowrite16be(uf_info
->urfs
, &uf_regs
->urfs
);
291 qe_iowrite16be(uf_info
->urfet
, &uf_regs
->urfet
);
292 qe_iowrite16be(uf_info
->urfset
, &uf_regs
->urfset
);
293 qe_iowrite16be(uf_info
->utfs
, &uf_regs
->utfs
);
294 qe_iowrite16be(uf_info
->utfet
, &uf_regs
->utfet
);
295 qe_iowrite16be(uf_info
->utftt
, &uf_regs
->utftt
);
296 /* utfb, urfb are offsets from MURAM base */
297 qe_iowrite32be(uccf
->ucc_fast_tx_virtual_fifo_base_offset
,
299 qe_iowrite32be(uccf
->ucc_fast_rx_virtual_fifo_base_offset
,
304 ucc_set_qe_mux_grant(uf_info
->ucc_num
, uf_info
->grant_support
);
305 /* Breakpoint Support */
306 ucc_set_qe_mux_bkpt(uf_info
->ucc_num
, uf_info
->brkpt_support
);
307 /* Set Tsa or NMSI mode. */
308 ucc_set_qe_mux_tsa(uf_info
->ucc_num
, uf_info
->tsa
);
309 /* If NMSI (not Tsa), set Tx and Rx clock. */
311 /* Rx clock routing */
312 if ((uf_info
->rx_clock
!= QE_CLK_NONE
) &&
313 ucc_set_qe_mux_rxtx(uf_info
->ucc_num
, uf_info
->rx_clock
,
315 printk(KERN_ERR
"%s: illegal value for RX clock\n",
320 /* Tx clock routing */
321 if ((uf_info
->tx_clock
!= QE_CLK_NONE
) &&
322 ucc_set_qe_mux_rxtx(uf_info
->ucc_num
, uf_info
->tx_clock
,
324 printk(KERN_ERR
"%s: illegal value for TX clock\n",
330 /* tdm Rx clock routing */
331 if ((uf_info
->rx_clock
!= QE_CLK_NONE
) &&
332 ucc_set_tdm_rxtx_clk(uf_info
->tdm_num
, uf_info
->rx_clock
,
334 pr_err("%s: illegal value for RX clock", __func__
);
339 /* tdm Tx clock routing */
340 if ((uf_info
->tx_clock
!= QE_CLK_NONE
) &&
341 ucc_set_tdm_rxtx_clk(uf_info
->tdm_num
, uf_info
->tx_clock
,
343 pr_err("%s: illegal value for TX clock", __func__
);
348 /* tdm Rx sync clock routing */
349 if ((uf_info
->rx_sync
!= QE_CLK_NONE
) &&
350 ucc_set_tdm_rxtx_sync(uf_info
->tdm_num
, uf_info
->rx_sync
,
352 pr_err("%s: illegal value for RX clock", __func__
);
357 /* tdm Tx sync clock routing */
358 if ((uf_info
->tx_sync
!= QE_CLK_NONE
) &&
359 ucc_set_tdm_rxtx_sync(uf_info
->tdm_num
, uf_info
->tx_sync
,
361 pr_err("%s: illegal value for TX clock", __func__
);
367 /* Set interrupt mask register at UCC level. */
368 qe_iowrite32be(uf_info
->uccm_mask
, &uf_regs
->uccm
);
370 /* First, clear anything pending at UCC level,
371 * otherwise, old garbage may come through
372 * as soon as the dam is opened. */
374 /* Writing '1' clears */
375 qe_iowrite32be(0xffffffff, &uf_regs
->ucce
);
380 EXPORT_SYMBOL(ucc_fast_init
);
382 void ucc_fast_free(struct ucc_fast_private
* uccf
)
387 qe_muram_free(uccf
->ucc_fast_tx_virtual_fifo_base_offset
);
388 qe_muram_free(uccf
->ucc_fast_rx_virtual_fifo_base_offset
);
391 iounmap(uccf
->uf_regs
);
395 EXPORT_SYMBOL(ucc_fast_free
);