2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
25 #define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
28 #include <core/tegra.h>
29 #include <subdev/timer.h>
31 #define MHZ (1000 * 1000)
33 #define MASK(w) ((1 << w) - 1)
35 #define SYS_GPCPLL_CFG_BASE 0x00137000
36 #define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
38 #define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
39 #define GPCPLL_CFG_ENABLE BIT(0)
40 #define GPCPLL_CFG_IDDQ BIT(1)
41 #define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
42 #define GPCPLL_CFG_LOCK BIT(17)
44 #define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
45 #define GPCPLL_COEFF_M_SHIFT 0
46 #define GPCPLL_COEFF_M_WIDTH 8
47 #define GPCPLL_COEFF_N_SHIFT 8
48 #define GPCPLL_COEFF_N_WIDTH 8
49 #define GPCPLL_COEFF_P_SHIFT 16
50 #define GPCPLL_COEFF_P_WIDTH 6
52 #define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
53 #define GPCPLL_CFG2_SETUP2_SHIFT 16
54 #define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
56 #define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
57 #define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
59 #define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
60 #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
61 #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
62 #define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
63 #define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
64 #define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
66 #define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
67 #define SEL_VCO_GPC2CLK_OUT_SHIFT 0
69 #define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
70 #define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
71 #define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
72 #define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
73 #define GPC2CLK_OUT_VCODIV_WIDTH 6
74 #define GPC2CLK_OUT_VCODIV_SHIFT 8
75 #define GPC2CLK_OUT_VCODIV1 0
76 #define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
77 GPC2CLK_OUT_VCODIV_SHIFT)
78 #define GPC2CLK_OUT_BYPDIV_WIDTH 6
79 #define GPC2CLK_OUT_BYPDIV_SHIFT 0
80 #define GPC2CLK_OUT_BYPDIV31 0x3c
81 #define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
82 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
83 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
84 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
85 #define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
86 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
87 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
88 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
90 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
91 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
92 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
93 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
95 static const u8 pl_to_div
[] = {
96 /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
97 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
100 /* All frequencies in Mhz */
101 struct gk20a_clk_pllg_params
{
102 u32 min_vco
, max_vco
;
109 static const struct gk20a_clk_pllg_params gk20a_pllg_params
= {
110 .min_vco
= 1000, .max_vco
= 2064,
111 .min_u
= 12, .max_u
= 38,
112 .min_m
= 1, .max_m
= 255,
113 .min_n
= 8, .max_n
= 255,
114 .min_pl
= 1, .max_pl
= 32,
118 struct nvkm_clk base
;
119 const struct gk20a_clk_pllg_params
*params
;
125 gk20a_pllg_read_mnp(struct gk20a_clk
*clk
)
127 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
130 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
131 clk
->m
= (val
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
132 clk
->n
= (val
>> GPCPLL_COEFF_N_SHIFT
) & MASK(GPCPLL_COEFF_N_WIDTH
);
133 clk
->pl
= (val
>> GPCPLL_COEFF_P_SHIFT
) & MASK(GPCPLL_COEFF_P_WIDTH
);
137 gk20a_pllg_calc_rate(struct gk20a_clk
*clk
)
142 rate
= clk
->parent_rate
* clk
->n
;
143 divider
= clk
->m
* pl_to_div
[clk
->pl
];
144 do_div(rate
, divider
);
150 gk20a_pllg_calc_mnp(struct gk20a_clk
*clk
, unsigned long rate
)
152 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
153 u32 target_clk_f
, ref_clk_f
, target_freq
;
154 u32 min_vco_f
, max_vco_f
;
155 u32 low_pl
, high_pl
, best_pl
;
156 u32 target_vco_f
, vco_f
;
160 u32 delta
, lwv
, best_delta
= ~0;
163 target_clk_f
= rate
* 2 / MHZ
;
164 ref_clk_f
= clk
->parent_rate
/ MHZ
;
166 max_vco_f
= clk
->params
->max_vco
;
167 min_vco_f
= clk
->params
->min_vco
;
168 best_m
= clk
->params
->max_m
;
169 best_n
= clk
->params
->min_n
;
170 best_pl
= clk
->params
->min_pl
;
172 target_vco_f
= target_clk_f
+ target_clk_f
/ 50;
173 if (max_vco_f
< target_vco_f
)
174 max_vco_f
= target_vco_f
;
176 /* min_pl <= high_pl <= max_pl */
177 high_pl
= (max_vco_f
+ target_vco_f
- 1) / target_vco_f
;
178 high_pl
= min(high_pl
, clk
->params
->max_pl
);
179 high_pl
= max(high_pl
, clk
->params
->min_pl
);
181 /* min_pl <= low_pl <= max_pl */
182 low_pl
= min_vco_f
/ target_vco_f
;
183 low_pl
= min(low_pl
, clk
->params
->max_pl
);
184 low_pl
= max(low_pl
, clk
->params
->min_pl
);
186 /* Find Indices of high_pl and low_pl */
187 for (pl
= 0; pl
< ARRAY_SIZE(pl_to_div
) - 1; pl
++) {
188 if (pl_to_div
[pl
] >= low_pl
) {
193 for (pl
= 0; pl
< ARRAY_SIZE(pl_to_div
) - 1; pl
++) {
194 if (pl_to_div
[pl
] >= high_pl
) {
200 nvkm_debug(subdev
, "low_PL %d(div%d), high_PL %d(div%d)", low_pl
,
201 pl_to_div
[low_pl
], high_pl
, pl_to_div
[high_pl
]);
203 /* Select lowest possible VCO */
204 for (pl
= low_pl
; pl
<= high_pl
; pl
++) {
205 target_vco_f
= target_clk_f
* pl_to_div
[pl
];
206 for (m
= clk
->params
->min_m
; m
<= clk
->params
->max_m
; m
++) {
209 if (u_f
< clk
->params
->min_u
)
211 if (u_f
> clk
->params
->max_u
)
214 n
= (target_vco_f
* m
) / ref_clk_f
;
215 n2
= ((target_vco_f
* m
) + (ref_clk_f
- 1)) / ref_clk_f
;
217 if (n
> clk
->params
->max_n
)
220 for (; n
<= n2
; n
++) {
221 if (n
< clk
->params
->min_n
)
223 if (n
> clk
->params
->max_n
)
226 vco_f
= ref_clk_f
* n
/ m
;
228 if (vco_f
>= min_vco_f
&& vco_f
<= max_vco_f
) {
229 lwv
= (vco_f
+ (pl_to_div
[pl
] / 2))
231 delta
= abs(lwv
- target_clk_f
);
233 if (delta
< best_delta
) {
248 WARN_ON(best_delta
== ~0);
252 "no best match for target @ %dMHz on gpc_pll",
259 target_freq
= gk20a_pllg_calc_rate(clk
) / MHZ
;
262 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
263 target_freq
, clk
->m
, clk
->n
, clk
->pl
, pl_to_div
[clk
->pl
]);
268 gk20a_pllg_slide(struct gk20a_clk
*clk
, u32 n
)
270 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
271 struct nvkm_device
*device
= subdev
->device
;
275 /* get old coefficients */
276 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
277 /* do nothing if NDIV is the same */
278 if (n
== ((val
>> GPCPLL_COEFF_N_SHIFT
) & MASK(GPCPLL_COEFF_N_WIDTH
)))
282 nvkm_mask(device
, GPCPLL_CFG2
, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT
,
283 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT
);
284 nvkm_mask(device
, GPCPLL_CFG3
, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT
,
285 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT
);
287 /* pll slowdown mode */
288 nvkm_mask(device
, GPCPLL_NDIV_SLOWDOWN
,
289 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
),
290 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
));
292 /* new ndiv ready for ramp */
293 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
294 val
&= ~(MASK(GPCPLL_COEFF_N_WIDTH
) << GPCPLL_COEFF_N_SHIFT
);
295 val
|= (n
& MASK(GPCPLL_COEFF_N_WIDTH
)) << GPCPLL_COEFF_N_SHIFT
;
297 nvkm_wr32(device
, GPCPLL_COEFF
, val
);
299 /* dynamic ramp to new ndiv */
300 val
= nvkm_rd32(device
, GPCPLL_NDIV_SLOWDOWN
);
301 val
|= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
;
303 nvkm_wr32(device
, GPCPLL_NDIV_SLOWDOWN
, val
);
305 for (ramp_timeout
= 500; ramp_timeout
> 0; ramp_timeout
--) {
307 val
= nvkm_rd32(device
, GPC_BCAST_NDIV_SLOWDOWN_DEBUG
);
308 if (val
& GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK
)
312 /* exit slowdown mode */
313 nvkm_mask(device
, GPCPLL_NDIV_SLOWDOWN
,
314 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT
) |
315 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT
), 0);
316 nvkm_rd32(device
, GPCPLL_NDIV_SLOWDOWN
);
318 if (ramp_timeout
<= 0) {
319 nvkm_error(subdev
, "gpcpll dynamic ramp timeout\n");
327 _gk20a_pllg_enable(struct gk20a_clk
*clk
)
329 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
330 nvkm_mask(device
, GPCPLL_CFG
, GPCPLL_CFG_ENABLE
, GPCPLL_CFG_ENABLE
);
331 nvkm_rd32(device
, GPCPLL_CFG
);
335 _gk20a_pllg_disable(struct gk20a_clk
*clk
)
337 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
338 nvkm_mask(device
, GPCPLL_CFG
, GPCPLL_CFG_ENABLE
, 0);
339 nvkm_rd32(device
, GPCPLL_CFG
);
343 _gk20a_pllg_program_mnp(struct gk20a_clk
*clk
, bool allow_slide
)
345 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
346 struct nvkm_device
*device
= subdev
->device
;
348 u32 m_old
, pl_old
, n_lo
;
350 /* get old coefficients */
351 val
= nvkm_rd32(device
, GPCPLL_COEFF
);
352 m_old
= (val
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
353 pl_old
= (val
>> GPCPLL_COEFF_P_SHIFT
) & MASK(GPCPLL_COEFF_P_WIDTH
);
355 /* do NDIV slide if there is no change in M and PL */
356 cfg
= nvkm_rd32(device
, GPCPLL_CFG
);
357 if (allow_slide
&& clk
->m
== m_old
&& clk
->pl
== pl_old
&&
358 (cfg
& GPCPLL_CFG_ENABLE
)) {
359 return gk20a_pllg_slide(clk
, clk
->n
);
362 /* slide down to NDIV_LO */
363 n_lo
= DIV_ROUND_UP(m_old
* clk
->params
->min_vco
,
364 clk
->parent_rate
/ MHZ
);
365 if (allow_slide
&& (cfg
& GPCPLL_CFG_ENABLE
)) {
366 int ret
= gk20a_pllg_slide(clk
, n_lo
);
372 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
373 nvkm_mask(device
, GPC2CLK_OUT
, GPC2CLK_OUT_VCODIV_MASK
,
374 0x2 << GPC2CLK_OUT_VCODIV_SHIFT
);
376 /* put PLL in bypass before programming it */
377 val
= nvkm_rd32(device
, SEL_VCO
);
378 val
&= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
));
380 nvkm_wr32(device
, SEL_VCO
, val
);
382 /* get out from IDDQ */
383 val
= nvkm_rd32(device
, GPCPLL_CFG
);
384 if (val
& GPCPLL_CFG_IDDQ
) {
385 val
&= ~GPCPLL_CFG_IDDQ
;
386 nvkm_wr32(device
, GPCPLL_CFG
, val
);
387 nvkm_rd32(device
, GPCPLL_CFG
);
391 _gk20a_pllg_disable(clk
);
393 nvkm_debug(subdev
, "%s: m=%d n=%d pl=%d\n", __func__
,
394 clk
->m
, clk
->n
, clk
->pl
);
396 n_lo
= DIV_ROUND_UP(clk
->m
* clk
->params
->min_vco
,
397 clk
->parent_rate
/ MHZ
);
398 val
= clk
->m
<< GPCPLL_COEFF_M_SHIFT
;
399 val
|= (allow_slide
? n_lo
: clk
->n
) << GPCPLL_COEFF_N_SHIFT
;
400 val
|= clk
->pl
<< GPCPLL_COEFF_P_SHIFT
;
401 nvkm_wr32(device
, GPCPLL_COEFF
, val
);
403 _gk20a_pllg_enable(clk
);
405 val
= nvkm_rd32(device
, GPCPLL_CFG
);
406 if (val
& GPCPLL_CFG_LOCK_DET_OFF
) {
407 val
&= ~GPCPLL_CFG_LOCK_DET_OFF
;
408 nvkm_wr32(device
, GPCPLL_CFG
, val
);
411 if (nvkm_usec(device
, 300,
412 if (nvkm_rd32(device
, GPCPLL_CFG
) & GPCPLL_CFG_LOCK
)
417 /* switch to VCO mode */
418 nvkm_mask(device
, SEL_VCO
, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
));
420 /* restore out divider 1:1 */
421 val
= nvkm_rd32(device
, GPC2CLK_OUT
);
422 val
&= ~GPC2CLK_OUT_VCODIV_MASK
;
424 nvkm_wr32(device
, GPC2CLK_OUT
, val
);
426 /* slide up to new NDIV */
427 return allow_slide
? gk20a_pllg_slide(clk
, clk
->n
) : 0;
431 gk20a_pllg_program_mnp(struct gk20a_clk
*clk
)
435 err
= _gk20a_pllg_program_mnp(clk
, true);
437 err
= _gk20a_pllg_program_mnp(clk
, false);
443 gk20a_pllg_disable(struct gk20a_clk
*clk
)
445 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
448 /* slide to VCO min */
449 val
= nvkm_rd32(device
, GPCPLL_CFG
);
450 if (val
& GPCPLL_CFG_ENABLE
) {
453 coeff
= nvkm_rd32(device
, GPCPLL_COEFF
);
454 m
= (coeff
>> GPCPLL_COEFF_M_SHIFT
) & MASK(GPCPLL_COEFF_M_WIDTH
);
455 n_lo
= DIV_ROUND_UP(m
* clk
->params
->min_vco
,
456 clk
->parent_rate
/ MHZ
);
457 gk20a_pllg_slide(clk
, n_lo
);
460 /* put PLL in bypass before disabling it */
461 nvkm_mask(device
, SEL_VCO
, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT
), 0);
463 _gk20a_pllg_disable(clk
);
466 #define GK20A_CLK_GPC_MDIV 1000
468 static struct nvkm_pstate
472 .domain
[nv_clk_src_gpc
] = 72000,
478 .domain
[nv_clk_src_gpc
] = 108000,
484 .domain
[nv_clk_src_gpc
] = 180000,
490 .domain
[nv_clk_src_gpc
] = 252000,
496 .domain
[nv_clk_src_gpc
] = 324000,
502 .domain
[nv_clk_src_gpc
] = 396000,
508 .domain
[nv_clk_src_gpc
] = 468000,
514 .domain
[nv_clk_src_gpc
] = 540000,
520 .domain
[nv_clk_src_gpc
] = 612000,
526 .domain
[nv_clk_src_gpc
] = 648000,
532 .domain
[nv_clk_src_gpc
] = 684000,
538 .domain
[nv_clk_src_gpc
] = 708000,
544 .domain
[nv_clk_src_gpc
] = 756000,
550 .domain
[nv_clk_src_gpc
] = 804000,
556 .domain
[nv_clk_src_gpc
] = 852000,
563 gk20a_clk_read(struct nvkm_clk
*base
, enum nv_clk_src src
)
565 struct gk20a_clk
*clk
= gk20a_clk(base
);
566 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
567 struct nvkm_device
*device
= subdev
->device
;
570 case nv_clk_src_crystal
:
571 return device
->crystal
;
573 gk20a_pllg_read_mnp(clk
);
574 return gk20a_pllg_calc_rate(clk
) / GK20A_CLK_GPC_MDIV
;
576 nvkm_error(subdev
, "invalid clock source %d\n", src
);
582 gk20a_clk_calc(struct nvkm_clk
*base
, struct nvkm_cstate
*cstate
)
584 struct gk20a_clk
*clk
= gk20a_clk(base
);
586 return gk20a_pllg_calc_mnp(clk
, cstate
->domain
[nv_clk_src_gpc
] *
591 gk20a_clk_prog(struct nvkm_clk
*base
)
593 struct gk20a_clk
*clk
= gk20a_clk(base
);
595 return gk20a_pllg_program_mnp(clk
);
599 gk20a_clk_tidy(struct nvkm_clk
*base
)
604 gk20a_clk_fini(struct nvkm_clk
*base
)
606 struct gk20a_clk
*clk
= gk20a_clk(base
);
607 gk20a_pllg_disable(clk
);
611 gk20a_clk_init(struct nvkm_clk
*base
)
613 struct gk20a_clk
*clk
= gk20a_clk(base
);
614 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
615 struct nvkm_device
*device
= subdev
->device
;
618 nvkm_mask(device
, GPC2CLK_OUT
, GPC2CLK_OUT_INIT_MASK
, GPC2CLK_OUT_INIT_VAL
);
620 ret
= gk20a_clk_prog(&clk
->base
);
622 nvkm_error(subdev
, "cannot initialize clock\n");
629 static const struct nvkm_clk_func
631 .init
= gk20a_clk_init
,
632 .fini
= gk20a_clk_fini
,
633 .read
= gk20a_clk_read
,
634 .calc
= gk20a_clk_calc
,
635 .prog
= gk20a_clk_prog
,
636 .tidy
= gk20a_clk_tidy
,
637 .pstates
= gk20a_pstates
,
638 .nr_pstates
= ARRAY_SIZE(gk20a_pstates
),
640 { nv_clk_src_crystal
, 0xff },
641 { nv_clk_src_gpc
, 0xff, 0, "core", GK20A_CLK_GPC_MDIV
},
647 gk20a_clk_new(struct nvkm_device
*device
, int index
, struct nvkm_clk
**pclk
)
649 struct nvkm_device_tegra
*tdev
= device
->func
->tegra(device
);
650 struct gk20a_clk
*clk
;
653 if (!(clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
)))
657 /* Finish initializing the pstates */
658 for (i
= 0; i
< ARRAY_SIZE(gk20a_pstates
); i
++) {
659 INIT_LIST_HEAD(&gk20a_pstates
[i
].list
);
660 gk20a_pstates
[i
].pstate
= i
+ 1;
663 clk
->params
= &gk20a_pllg_params
;
664 clk
->parent_rate
= clk_get_rate(tdev
->clk
);
666 ret
= nvkm_clk_ctor(&gk20a_clk
, device
, index
, true, &clk
->base
);
667 nvkm_info(&clk
->base
.subdev
, "parent clock rate: %d Mhz\n",
668 clk
->parent_rate
/ MHZ
);