Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / drivers / clk / clk-qoriq.c
blobf3931e38fac0fb58a9bb6262e0fe7d9da33a6269
1 /*
2 * Copyright 2013 Freescale Semiconductor, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * clock driver for Freescale QorIQ SoCs.
9 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/fsl/guts.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of.h>
22 #include <linux/slab.h>
24 #define PLL_DIV1 0
25 #define PLL_DIV2 1
26 #define PLL_DIV3 2
27 #define PLL_DIV4 3
29 #define PLATFORM_PLL 0
30 #define CGA_PLL1 1
31 #define CGA_PLL2 2
32 #define CGA_PLL3 3
33 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
34 #define CGB_PLL1 4
35 #define CGB_PLL2 5
37 struct clockgen_pll_div {
38 struct clk *clk;
39 char name[32];
42 struct clockgen_pll {
43 struct clockgen_pll_div div[4];
46 #define CLKSEL_VALID 1
47 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
49 struct clockgen_sourceinfo {
50 u32 flags; /* CLKSEL_xxx */
51 int pll; /* CGx_PLLn */
52 int div; /* PLL_DIVn */
55 #define NUM_MUX_PARENTS 16
57 struct clockgen_muxinfo {
58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
61 #define NUM_HWACCEL 5
62 #define NUM_CMUX 8
64 struct clockgen;
67 * cmux freq must be >= platform pll.
68 * If not set, cmux freq must be >= platform pll/2
70 #define CG_CMUX_GE_PLAT 1
72 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
73 #define CG_VER3 4 /* version 3 cg: reg layout different */
74 #define CG_LITTLE_ENDIAN 8
76 struct clockgen_chipinfo {
77 const char *compat, *guts_compat;
78 const struct clockgen_muxinfo *cmux_groups[2];
79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
80 void (*init_periph)(struct clockgen *cg);
81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
83 u32 flags; /* CG_xxx */
86 struct clockgen {
87 struct device_node *node;
88 void __iomem *regs;
89 struct clockgen_chipinfo info; /* mutable copy */
90 struct clk *sysclk, *coreclk;
91 struct clockgen_pll pll[6];
92 struct clk *cmux[NUM_CMUX];
93 struct clk *hwaccel[NUM_HWACCEL];
94 struct clk *fman[2];
95 struct ccsr_guts __iomem *guts;
98 static struct clockgen clockgen;
100 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
102 if (cg->info.flags & CG_LITTLE_ENDIAN)
103 iowrite32(val, reg);
104 else
105 iowrite32be(val, reg);
108 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
110 u32 val;
112 if (cg->info.flags & CG_LITTLE_ENDIAN)
113 val = ioread32(reg);
114 else
115 val = ioread32be(reg);
117 return val;
120 static const struct clockgen_muxinfo p2041_cmux_grp1 = {
122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
128 static const struct clockgen_muxinfo p2041_cmux_grp2 = {
130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
136 static const struct clockgen_muxinfo p5020_cmux_grp1 = {
138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
144 static const struct clockgen_muxinfo p5020_cmux_grp2 = {
146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
152 static const struct clockgen_muxinfo p5040_cmux_grp1 = {
154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
161 static const struct clockgen_muxinfo p5040_cmux_grp2 = {
163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
170 static const struct clockgen_muxinfo p4080_cmux_grp1 = {
172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
180 static const struct clockgen_muxinfo p4080_cmux_grp2 = {
182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
190 static const struct clockgen_muxinfo t1023_cmux = {
192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
197 static const struct clockgen_muxinfo t1040_cmux = {
199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
207 static const struct clockgen_muxinfo clockgen2_cmux_cga = {
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
223 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
235 static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
247 static const struct clockgen_muxinfo ls1043a_hwa1 = {
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
260 static const struct clockgen_muxinfo ls1043a_hwa2 = {
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
269 static const struct clockgen_muxinfo ls1046a_hwa1 = {
273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
276 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
277 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
278 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
282 static const struct clockgen_muxinfo ls1046a_hwa2 = {
285 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
286 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
287 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
294 static const struct clockgen_muxinfo ls1012a_cmux = {
296 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
298 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
302 static const struct clockgen_muxinfo t1023_hwa1 = {
305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
311 static const struct clockgen_muxinfo t1023_hwa2 = {
313 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
317 static const struct clockgen_muxinfo t2080_hwa1 = {
320 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
321 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
322 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
323 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
324 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
325 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
326 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
330 static const struct clockgen_muxinfo t2080_hwa2 = {
333 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
334 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
335 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
337 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
338 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
339 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
343 static const struct clockgen_muxinfo t4240_hwa1 = {
345 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
346 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
347 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
348 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
349 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
351 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
352 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
356 static const struct clockgen_muxinfo t4240_hwa4 = {
358 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
359 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
360 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
361 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
362 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
366 static const struct clockgen_muxinfo t4240_hwa5 = {
368 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
369 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
370 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
371 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
372 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
373 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
377 #define RCWSR7_FM1_CLK_SEL 0x40000000
378 #define RCWSR7_FM2_CLK_SEL 0x20000000
379 #define RCWSR7_HWA_ASYNC_DIV 0x04000000
381 static void __init p2041_init_periph(struct clockgen *cg)
383 u32 reg;
385 reg = ioread32be(&cg->guts->rcwsr[7]);
387 if (reg & RCWSR7_FM1_CLK_SEL)
388 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
389 else
390 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
393 static void __init p4080_init_periph(struct clockgen *cg)
395 u32 reg;
397 reg = ioread32be(&cg->guts->rcwsr[7]);
399 if (reg & RCWSR7_FM1_CLK_SEL)
400 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
401 else
402 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
404 if (reg & RCWSR7_FM2_CLK_SEL)
405 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
406 else
407 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
410 static void __init p5020_init_periph(struct clockgen *cg)
412 u32 reg;
413 int div = PLL_DIV2;
415 reg = ioread32be(&cg->guts->rcwsr[7]);
416 if (reg & RCWSR7_HWA_ASYNC_DIV)
417 div = PLL_DIV4;
419 if (reg & RCWSR7_FM1_CLK_SEL)
420 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
421 else
422 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
425 static void __init p5040_init_periph(struct clockgen *cg)
427 u32 reg;
428 int div = PLL_DIV2;
430 reg = ioread32be(&cg->guts->rcwsr[7]);
431 if (reg & RCWSR7_HWA_ASYNC_DIV)
432 div = PLL_DIV4;
434 if (reg & RCWSR7_FM1_CLK_SEL)
435 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
436 else
437 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
439 if (reg & RCWSR7_FM2_CLK_SEL)
440 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
441 else
442 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
445 static void __init t1023_init_periph(struct clockgen *cg)
447 cg->fman[0] = cg->hwaccel[1];
450 static void __init t1040_init_periph(struct clockgen *cg)
452 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
455 static void __init t2080_init_periph(struct clockgen *cg)
457 cg->fman[0] = cg->hwaccel[0];
460 static void __init t4240_init_periph(struct clockgen *cg)
462 cg->fman[0] = cg->hwaccel[3];
463 cg->fman[1] = cg->hwaccel[4];
466 static const struct clockgen_chipinfo chipinfo[] = {
468 .compat = "fsl,b4420-clockgen",
469 .guts_compat = "fsl,b4860-device-config",
470 .init_periph = t2080_init_periph,
471 .cmux_groups = {
472 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
474 .hwaccel = {
475 &t2080_hwa1
477 .cmux_to_group = {
478 0, 1, 1, 1, -1
480 .pll_mask = 0x3f,
481 .flags = CG_PLL_8BIT,
484 .compat = "fsl,b4860-clockgen",
485 .guts_compat = "fsl,b4860-device-config",
486 .init_periph = t2080_init_periph,
487 .cmux_groups = {
488 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
490 .hwaccel = {
491 &t2080_hwa1
493 .cmux_to_group = {
494 0, 1, 1, 1, -1
496 .pll_mask = 0x3f,
497 .flags = CG_PLL_8BIT,
500 .compat = "fsl,ls1021a-clockgen",
501 .cmux_groups = {
502 &t1023_cmux
504 .cmux_to_group = {
505 0, -1
507 .pll_mask = 0x03,
510 .compat = "fsl,ls1043a-clockgen",
511 .init_periph = t2080_init_periph,
512 .cmux_groups = {
513 &t1040_cmux
515 .hwaccel = {
516 &ls1043a_hwa1, &ls1043a_hwa2
518 .cmux_to_group = {
519 0, -1
521 .pll_mask = 0x07,
522 .flags = CG_PLL_8BIT,
525 .compat = "fsl,ls1046a-clockgen",
526 .init_periph = t2080_init_periph,
527 .cmux_groups = {
528 &t1040_cmux
530 .hwaccel = {
531 &ls1046a_hwa1, &ls1046a_hwa2
533 .cmux_to_group = {
534 0, -1
536 .pll_mask = 0x07,
537 .flags = CG_PLL_8BIT,
540 .compat = "fsl,ls1012a-clockgen",
541 .cmux_groups = {
542 &ls1012a_cmux
544 .cmux_to_group = {
545 0, -1
547 .pll_mask = 0x03,
550 .compat = "fsl,ls2080a-clockgen",
551 .cmux_groups = {
552 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
554 .cmux_to_group = {
555 0, 0, 1, 1, -1
557 .pll_mask = 0x37,
558 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
561 .compat = "fsl,p2041-clockgen",
562 .guts_compat = "fsl,qoriq-device-config-1.0",
563 .init_periph = p2041_init_periph,
564 .cmux_groups = {
565 &p2041_cmux_grp1, &p2041_cmux_grp2
567 .cmux_to_group = {
568 0, 0, 1, 1, -1
570 .pll_mask = 0x07,
573 .compat = "fsl,p3041-clockgen",
574 .guts_compat = "fsl,qoriq-device-config-1.0",
575 .init_periph = p2041_init_periph,
576 .cmux_groups = {
577 &p2041_cmux_grp1, &p2041_cmux_grp2
579 .cmux_to_group = {
580 0, 0, 1, 1, -1
582 .pll_mask = 0x07,
585 .compat = "fsl,p4080-clockgen",
586 .guts_compat = "fsl,qoriq-device-config-1.0",
587 .init_periph = p4080_init_periph,
588 .cmux_groups = {
589 &p4080_cmux_grp1, &p4080_cmux_grp2
591 .cmux_to_group = {
592 0, 0, 0, 0, 1, 1, 1, 1
594 .pll_mask = 0x1f,
597 .compat = "fsl,p5020-clockgen",
598 .guts_compat = "fsl,qoriq-device-config-1.0",
599 .init_periph = p5020_init_periph,
600 .cmux_groups = {
601 &p2041_cmux_grp1, &p2041_cmux_grp2
603 .cmux_to_group = {
604 0, 1, -1
606 .pll_mask = 0x07,
609 .compat = "fsl,p5040-clockgen",
610 .guts_compat = "fsl,p5040-device-config",
611 .init_periph = p5040_init_periph,
612 .cmux_groups = {
613 &p5040_cmux_grp1, &p5040_cmux_grp2
615 .cmux_to_group = {
616 0, 0, 1, 1, -1
618 .pll_mask = 0x0f,
621 .compat = "fsl,t1023-clockgen",
622 .guts_compat = "fsl,t1023-device-config",
623 .init_periph = t1023_init_periph,
624 .cmux_groups = {
625 &t1023_cmux
627 .hwaccel = {
628 &t1023_hwa1, &t1023_hwa2
630 .cmux_to_group = {
631 0, 0, -1
633 .pll_mask = 0x03,
634 .flags = CG_PLL_8BIT,
637 .compat = "fsl,t1040-clockgen",
638 .guts_compat = "fsl,t1040-device-config",
639 .init_periph = t1040_init_periph,
640 .cmux_groups = {
641 &t1040_cmux
643 .cmux_to_group = {
644 0, 0, 0, 0, -1
646 .pll_mask = 0x07,
647 .flags = CG_PLL_8BIT,
650 .compat = "fsl,t2080-clockgen",
651 .guts_compat = "fsl,t2080-device-config",
652 .init_periph = t2080_init_periph,
653 .cmux_groups = {
654 &clockgen2_cmux_cga12
656 .hwaccel = {
657 &t2080_hwa1, &t2080_hwa2
659 .cmux_to_group = {
660 0, -1
662 .pll_mask = 0x07,
663 .flags = CG_PLL_8BIT,
666 .compat = "fsl,t4240-clockgen",
667 .guts_compat = "fsl,t4240-device-config",
668 .init_periph = t4240_init_periph,
669 .cmux_groups = {
670 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
672 .hwaccel = {
673 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
675 .cmux_to_group = {
676 0, 0, 1, -1
678 .pll_mask = 0x3f,
679 .flags = CG_PLL_8BIT,
684 struct mux_hwclock {
685 struct clk_hw hw;
686 struct clockgen *cg;
687 const struct clockgen_muxinfo *info;
688 u32 __iomem *reg;
689 u8 parent_to_clksel[NUM_MUX_PARENTS];
690 s8 clksel_to_parent[NUM_MUX_PARENTS];
691 int num_parents;
694 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
695 #define CLKSEL_MASK 0x78000000
696 #define CLKSEL_SHIFT 27
698 static int mux_set_parent(struct clk_hw *hw, u8 idx)
700 struct mux_hwclock *hwc = to_mux_hwclock(hw);
701 u32 clksel;
703 if (idx >= hwc->num_parents)
704 return -EINVAL;
706 clksel = hwc->parent_to_clksel[idx];
707 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
709 return 0;
712 static u8 mux_get_parent(struct clk_hw *hw)
714 struct mux_hwclock *hwc = to_mux_hwclock(hw);
715 u32 clksel;
716 s8 ret;
718 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
720 ret = hwc->clksel_to_parent[clksel];
721 if (ret < 0) {
722 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
723 return 0;
726 return ret;
729 static const struct clk_ops cmux_ops = {
730 .get_parent = mux_get_parent,
731 .set_parent = mux_set_parent,
735 * Don't allow setting for now, as the clock options haven't been
736 * sanitized for additional restrictions.
738 static const struct clk_ops hwaccel_ops = {
739 .get_parent = mux_get_parent,
742 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
743 struct mux_hwclock *hwc,
744 int idx)
746 int pll, div;
748 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
749 return NULL;
751 pll = hwc->info->clksel[idx].pll;
752 div = hwc->info->clksel[idx].div;
754 return &cg->pll[pll].div[div];
757 static struct clk * __init create_mux_common(struct clockgen *cg,
758 struct mux_hwclock *hwc,
759 const struct clk_ops *ops,
760 unsigned long min_rate,
761 unsigned long max_rate,
762 unsigned long pct80_rate,
763 const char *fmt, int idx)
765 struct clk_init_data init = {};
766 struct clk *clk;
767 const struct clockgen_pll_div *div;
768 const char *parent_names[NUM_MUX_PARENTS];
769 char name[32];
770 int i, j;
772 snprintf(name, sizeof(name), fmt, idx);
774 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
775 unsigned long rate;
777 hwc->clksel_to_parent[i] = -1;
779 div = get_pll_div(cg, hwc, i);
780 if (!div)
781 continue;
783 rate = clk_get_rate(div->clk);
785 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
786 rate > pct80_rate)
787 continue;
788 if (rate < min_rate)
789 continue;
790 if (rate > max_rate)
791 continue;
793 parent_names[j] = div->name;
794 hwc->parent_to_clksel[j] = i;
795 hwc->clksel_to_parent[i] = j;
796 j++;
799 init.name = name;
800 init.ops = ops;
801 init.parent_names = parent_names;
802 init.num_parents = hwc->num_parents = j;
803 init.flags = 0;
804 hwc->hw.init = &init;
805 hwc->cg = cg;
807 clk = clk_register(NULL, &hwc->hw);
808 if (IS_ERR(clk)) {
809 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
810 PTR_ERR(clk));
811 kfree(hwc);
812 return NULL;
815 return clk;
818 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
820 struct mux_hwclock *hwc;
821 const struct clockgen_pll_div *div;
822 unsigned long plat_rate, min_rate;
823 u64 max_rate, pct80_rate;
824 u32 clksel;
826 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
827 if (!hwc)
828 return NULL;
830 if (cg->info.flags & CG_VER3)
831 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
832 else
833 hwc->reg = cg->regs + 0x20 * idx;
835 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
838 * Find the rate for the default clksel, and treat it as the
839 * maximum rated core frequency. If this is an incorrect
840 * assumption, certain clock options (possibly including the
841 * default clksel) may be inappropriately excluded on certain
842 * chips.
844 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
845 div = get_pll_div(cg, hwc, clksel);
846 if (!div) {
847 kfree(hwc);
848 return NULL;
851 max_rate = clk_get_rate(div->clk);
852 pct80_rate = max_rate * 8;
853 do_div(pct80_rate, 10);
855 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
857 if (cg->info.flags & CG_CMUX_GE_PLAT)
858 min_rate = plat_rate;
859 else
860 min_rate = plat_rate / 2;
862 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
863 pct80_rate, "cg-cmux%d", idx);
866 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
868 struct mux_hwclock *hwc;
870 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
871 if (!hwc)
872 return NULL;
874 hwc->reg = cg->regs + 0x20 * idx + 0x10;
875 hwc->info = cg->info.hwaccel[idx];
877 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
878 "cg-hwaccel%d", idx);
881 static void __init create_muxes(struct clockgen *cg)
883 int i;
885 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
886 if (cg->info.cmux_to_group[i] < 0)
887 break;
888 if (cg->info.cmux_to_group[i] >=
889 ARRAY_SIZE(cg->info.cmux_groups)) {
890 WARN_ON_ONCE(1);
891 continue;
894 cg->cmux[i] = create_one_cmux(cg, i);
897 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
898 if (!cg->info.hwaccel[i])
899 continue;
901 cg->hwaccel[i] = create_one_hwaccel(cg, i);
905 static void __init clockgen_init(struct device_node *np);
908 * Legacy nodes may get probed before the parent clockgen node.
909 * It is assumed that device trees with legacy nodes will not
910 * contain a "clocks" property -- otherwise the input clocks may
911 * not be initialized at this point.
913 static void __init legacy_init_clockgen(struct device_node *np)
915 if (!clockgen.node)
916 clockgen_init(of_get_parent(np));
919 /* Legacy node */
920 static void __init core_mux_init(struct device_node *np)
922 struct clk *clk;
923 struct resource res;
924 int idx, rc;
926 legacy_init_clockgen(np);
928 if (of_address_to_resource(np, 0, &res))
929 return;
931 idx = (res.start & 0xf0) >> 5;
932 clk = clockgen.cmux[idx];
934 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
935 if (rc) {
936 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
937 __func__, np->name, rc);
938 return;
942 static struct clk __init
943 *sysclk_from_fixed(struct device_node *node, const char *name)
945 u32 rate;
947 if (of_property_read_u32(node, "clock-frequency", &rate))
948 return ERR_PTR(-ENODEV);
950 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
953 static struct clk __init *input_clock(const char *name, struct clk *clk)
955 const char *input_name;
957 /* Register the input clock under the desired name. */
958 input_name = __clk_get_name(clk);
959 clk = clk_register_fixed_factor(NULL, name, input_name,
960 0, 1, 1);
961 if (IS_ERR(clk))
962 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
963 PTR_ERR(clk));
965 return clk;
968 static struct clk __init *input_clock_by_name(const char *name,
969 const char *dtname)
971 struct clk *clk;
973 clk = of_clk_get_by_name(clockgen.node, dtname);
974 if (IS_ERR(clk))
975 return clk;
977 return input_clock(name, clk);
980 static struct clk __init *input_clock_by_index(const char *name, int idx)
982 struct clk *clk;
984 clk = of_clk_get(clockgen.node, 0);
985 if (IS_ERR(clk))
986 return clk;
988 return input_clock(name, clk);
991 static struct clk * __init create_sysclk(const char *name)
993 struct device_node *sysclk;
994 struct clk *clk;
996 clk = sysclk_from_fixed(clockgen.node, name);
997 if (!IS_ERR(clk))
998 return clk;
1000 clk = input_clock_by_name(name, "sysclk");
1001 if (!IS_ERR(clk))
1002 return clk;
1004 clk = input_clock_by_index(name, 0);
1005 if (!IS_ERR(clk))
1006 return clk;
1008 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1009 if (sysclk) {
1010 clk = sysclk_from_fixed(sysclk, name);
1011 if (!IS_ERR(clk))
1012 return clk;
1015 pr_err("%s: No input sysclk\n", __func__);
1016 return NULL;
1019 static struct clk * __init create_coreclk(const char *name)
1021 struct clk *clk;
1023 clk = input_clock_by_name(name, "coreclk");
1024 if (!IS_ERR(clk))
1025 return clk;
1028 * This indicates a mix of legacy nodes with the new coreclk
1029 * mechanism, which should never happen. If this error occurs,
1030 * don't use the wrong input clock just because coreclk isn't
1031 * ready yet.
1033 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1034 return clk;
1036 return NULL;
1039 /* Legacy node */
1040 static void __init sysclk_init(struct device_node *node)
1042 struct clk *clk;
1044 legacy_init_clockgen(node);
1046 clk = clockgen.sysclk;
1047 if (clk)
1048 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1051 #define PLL_KILL BIT(31)
1053 static void __init create_one_pll(struct clockgen *cg, int idx)
1055 u32 __iomem *reg;
1056 u32 mult;
1057 struct clockgen_pll *pll = &cg->pll[idx];
1058 const char *input = "cg-sysclk";
1059 int i;
1061 if (!(cg->info.pll_mask & (1 << idx)))
1062 return;
1064 if (cg->coreclk && idx != PLATFORM_PLL) {
1065 if (IS_ERR(cg->coreclk))
1066 return;
1068 input = "cg-coreclk";
1071 if (cg->info.flags & CG_VER3) {
1072 switch (idx) {
1073 case PLATFORM_PLL:
1074 reg = cg->regs + 0x60080;
1075 break;
1076 case CGA_PLL1:
1077 reg = cg->regs + 0x80;
1078 break;
1079 case CGA_PLL2:
1080 reg = cg->regs + 0xa0;
1081 break;
1082 case CGB_PLL1:
1083 reg = cg->regs + 0x10080;
1084 break;
1085 case CGB_PLL2:
1086 reg = cg->regs + 0x100a0;
1087 break;
1088 default:
1089 WARN_ONCE(1, "index %d\n", idx);
1090 return;
1092 } else {
1093 if (idx == PLATFORM_PLL)
1094 reg = cg->regs + 0xc00;
1095 else
1096 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1099 /* Get the multiple of PLL */
1100 mult = cg_in(cg, reg);
1102 /* Check if this PLL is disabled */
1103 if (mult & PLL_KILL) {
1104 pr_debug("%s(): pll %p disabled\n", __func__, reg);
1105 return;
1108 if ((cg->info.flags & CG_VER3) ||
1109 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
1110 mult = (mult & GENMASK(8, 1)) >> 1;
1111 else
1112 mult = (mult & GENMASK(6, 1)) >> 1;
1114 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1115 struct clk *clk;
1117 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1118 "cg-pll%d-div%d", idx, i + 1);
1120 clk = clk_register_fixed_factor(NULL,
1121 pll->div[i].name, input, 0, mult, i + 1);
1122 if (IS_ERR(clk)) {
1123 pr_err("%s: %s: register failed %ld\n",
1124 __func__, pll->div[i].name, PTR_ERR(clk));
1125 continue;
1128 pll->div[i].clk = clk;
1132 static void __init create_plls(struct clockgen *cg)
1134 int i;
1136 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1137 create_one_pll(cg, i);
1140 static void __init legacy_pll_init(struct device_node *np, int idx)
1142 struct clockgen_pll *pll;
1143 struct clk_onecell_data *onecell_data;
1144 struct clk **subclks;
1145 int count, rc;
1147 legacy_init_clockgen(np);
1149 pll = &clockgen.pll[idx];
1150 count = of_property_count_strings(np, "clock-output-names");
1152 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1153 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
1154 if (!subclks)
1155 return;
1157 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
1158 if (!onecell_data)
1159 goto err_clks;
1161 if (count <= 3) {
1162 subclks[0] = pll->div[0].clk;
1163 subclks[1] = pll->div[1].clk;
1164 subclks[2] = pll->div[3].clk;
1165 } else {
1166 subclks[0] = pll->div[0].clk;
1167 subclks[1] = pll->div[1].clk;
1168 subclks[2] = pll->div[2].clk;
1169 subclks[3] = pll->div[3].clk;
1172 onecell_data->clks = subclks;
1173 onecell_data->clk_num = count;
1175 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1176 if (rc) {
1177 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1178 __func__, np->name, rc);
1179 goto err_cell;
1182 return;
1183 err_cell:
1184 kfree(onecell_data);
1185 err_clks:
1186 kfree(subclks);
1189 /* Legacy node */
1190 static void __init pltfrm_pll_init(struct device_node *np)
1192 legacy_pll_init(np, PLATFORM_PLL);
1195 /* Legacy node */
1196 static void __init core_pll_init(struct device_node *np)
1198 struct resource res;
1199 int idx;
1201 if (of_address_to_resource(np, 0, &res))
1202 return;
1204 if ((res.start & 0xfff) == 0xc00) {
1206 * ls1021a devtree labels the platform PLL
1207 * with the core PLL compatible
1209 pltfrm_pll_init(np);
1210 } else {
1211 idx = (res.start & 0xf0) >> 5;
1212 legacy_pll_init(np, CGA_PLL1 + idx);
1216 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1218 struct clockgen *cg = data;
1219 struct clk *clk;
1220 struct clockgen_pll *pll;
1221 u32 type, idx;
1223 if (clkspec->args_count < 2) {
1224 pr_err("%s: insufficient phandle args\n", __func__);
1225 return ERR_PTR(-EINVAL);
1228 type = clkspec->args[0];
1229 idx = clkspec->args[1];
1231 switch (type) {
1232 case 0:
1233 if (idx != 0)
1234 goto bad_args;
1235 clk = cg->sysclk;
1236 break;
1237 case 1:
1238 if (idx >= ARRAY_SIZE(cg->cmux))
1239 goto bad_args;
1240 clk = cg->cmux[idx];
1241 break;
1242 case 2:
1243 if (idx >= ARRAY_SIZE(cg->hwaccel))
1244 goto bad_args;
1245 clk = cg->hwaccel[idx];
1246 break;
1247 case 3:
1248 if (idx >= ARRAY_SIZE(cg->fman))
1249 goto bad_args;
1250 clk = cg->fman[idx];
1251 break;
1252 case 4:
1253 pll = &cg->pll[PLATFORM_PLL];
1254 if (idx >= ARRAY_SIZE(pll->div))
1255 goto bad_args;
1256 clk = pll->div[idx].clk;
1257 break;
1258 case 5:
1259 if (idx != 0)
1260 goto bad_args;
1261 clk = cg->coreclk;
1262 if (IS_ERR(clk))
1263 clk = NULL;
1264 break;
1265 default:
1266 goto bad_args;
1269 if (!clk)
1270 return ERR_PTR(-ENOENT);
1271 return clk;
1273 bad_args:
1274 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1275 return ERR_PTR(-EINVAL);
1278 #ifdef CONFIG_PPC
1279 #include <asm/mpc85xx.h>
1281 static const u32 a4510_svrs[] __initconst = {
1282 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1283 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1284 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1285 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1286 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1287 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1288 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1289 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1290 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1291 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1292 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1293 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1294 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1297 #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1299 static bool __init has_erratum_a4510(void)
1301 u32 svr = mfspr(SPRN_SVR);
1302 int i;
1304 svr &= ~SVR_SECURITY;
1306 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1307 if (svr == a4510_svrs[i])
1308 return true;
1311 return false;
1313 #else
1314 static bool __init has_erratum_a4510(void)
1316 return false;
1318 #endif
1320 static void __init clockgen_init(struct device_node *np)
1322 int i, ret;
1323 bool is_old_ls1021a = false;
1325 /* May have already been called by a legacy probe */
1326 if (clockgen.node)
1327 return;
1329 clockgen.node = np;
1330 clockgen.regs = of_iomap(np, 0);
1331 if (!clockgen.regs &&
1332 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1333 /* Compatibility hack for old, broken device trees */
1334 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1335 is_old_ls1021a = true;
1337 if (!clockgen.regs) {
1338 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1339 return;
1342 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1343 if (of_device_is_compatible(np, chipinfo[i].compat))
1344 break;
1345 if (is_old_ls1021a &&
1346 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1347 break;
1350 if (i == ARRAY_SIZE(chipinfo)) {
1351 pr_err("%s: unknown clockgen node %s\n", __func__,
1352 np->full_name);
1353 goto err;
1355 clockgen.info = chipinfo[i];
1357 if (clockgen.info.guts_compat) {
1358 struct device_node *guts;
1360 guts = of_find_compatible_node(NULL, NULL,
1361 clockgen.info.guts_compat);
1362 if (guts) {
1363 clockgen.guts = of_iomap(guts, 0);
1364 if (!clockgen.guts) {
1365 pr_err("%s: Couldn't map %s regs\n", __func__,
1366 guts->full_name);
1372 if (has_erratum_a4510())
1373 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1375 clockgen.sysclk = create_sysclk("cg-sysclk");
1376 clockgen.coreclk = create_coreclk("cg-coreclk");
1377 create_plls(&clockgen);
1378 create_muxes(&clockgen);
1380 if (clockgen.info.init_periph)
1381 clockgen.info.init_periph(&clockgen);
1383 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1384 if (ret) {
1385 pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1386 __func__, np->name, ret);
1389 return;
1390 err:
1391 iounmap(clockgen.regs);
1392 clockgen.regs = NULL;
1395 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1396 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1397 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
1398 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1399 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
1400 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
1401 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
1403 /* Legacy nodes */
1404 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1405 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1406 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1407 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1408 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1409 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
1410 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1411 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);