2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
28 #include <subdev/bios.h>
29 #include <subdev/bios/pll.h>
30 #include <subdev/timer.h>
34 enum nv_clk_src csrc
, ssrc
, vsrc
;
42 read_div(struct mcp77_clk
*clk
)
44 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
45 return nvkm_rd32(device
, 0x004600);
49 read_pll(struct mcp77_clk
*clk
, u32 base
)
51 struct nvkm_device
*device
= clk
->base
.subdev
.device
;
52 u32 ctrl
= nvkm_rd32(device
, base
+ 0);
53 u32 coef
= nvkm_rd32(device
, base
+ 4);
54 u32 ref
= nvkm_clk_read(&clk
->base
, nv_clk_src_href
);
61 post_div
= 1 << ((nvkm_rd32(device
, 0x4070) & 0x000f0000) >> 16);
64 post_div
= (nvkm_rd32(device
, 0x4040) & 0x000f0000) >> 16;
70 N1
= (coef
& 0x0000ff00) >> 8;
71 M1
= (coef
& 0x000000ff);
72 if ((ctrl
& 0x80000000) && M1
) {
73 clock
= ref
* N1
/ M1
;
74 clock
= clock
/ post_div
;
81 mcp77_clk_read(struct nvkm_clk
*base
, enum nv_clk_src src
)
83 struct mcp77_clk
*clk
= mcp77_clk(base
);
84 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
85 struct nvkm_device
*device
= subdev
->device
;
86 u32 mast
= nvkm_rd32(device
, 0x00c054);
90 case nv_clk_src_crystal
:
91 return device
->crystal
;
93 return 100000; /* PCIE reference clock */
94 case nv_clk_src_hclkm4
:
95 return nvkm_clk_read(&clk
->base
, nv_clk_src_href
) * 4;
96 case nv_clk_src_hclkm2d3
:
97 return nvkm_clk_read(&clk
->base
, nv_clk_src_href
) * 2 / 3;
99 switch (mast
& 0x000c0000) {
100 case 0x00000000: return nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm2d3
);
101 case 0x00040000: break;
102 case 0x00080000: return nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm4
);
103 case 0x000c0000: return nvkm_clk_read(&clk
->base
, nv_clk_src_cclk
);
106 case nv_clk_src_core
:
107 P
= (nvkm_rd32(device
, 0x004028) & 0x00070000) >> 16;
109 switch (mast
& 0x00000003) {
110 case 0x00000000: return nvkm_clk_read(&clk
->base
, nv_clk_src_crystal
) >> P
;
111 case 0x00000001: return 0;
112 case 0x00000002: return nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm4
) >> P
;
113 case 0x00000003: return read_pll(clk
, 0x004028) >> P
;
116 case nv_clk_src_cclk
:
117 if ((mast
& 0x03000000) != 0x03000000)
118 return nvkm_clk_read(&clk
->base
, nv_clk_src_core
);
120 if ((mast
& 0x00000200) == 0x00000000)
121 return nvkm_clk_read(&clk
->base
, nv_clk_src_core
);
123 switch (mast
& 0x00000c00) {
124 case 0x00000000: return nvkm_clk_read(&clk
->base
, nv_clk_src_href
);
125 case 0x00000400: return nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm4
);
126 case 0x00000800: return nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm2d3
);
129 case nv_clk_src_shader
:
130 P
= (nvkm_rd32(device
, 0x004020) & 0x00070000) >> 16;
131 switch (mast
& 0x00000030) {
133 if (mast
& 0x00000040)
134 return nvkm_clk_read(&clk
->base
, nv_clk_src_href
) >> P
;
135 return nvkm_clk_read(&clk
->base
, nv_clk_src_crystal
) >> P
;
136 case 0x00000010: break;
137 case 0x00000020: return read_pll(clk
, 0x004028) >> P
;
138 case 0x00000030: return read_pll(clk
, 0x004020) >> P
;
144 case nv_clk_src_vdec
:
145 P
= (read_div(clk
) & 0x00000700) >> 8;
147 switch (mast
& 0x00400000) {
149 return nvkm_clk_read(&clk
->base
, nv_clk_src_core
) >> P
;
160 nvkm_debug(subdev
, "unknown clock source %d %08x\n", src
, mast
);
165 calc_pll(struct mcp77_clk
*clk
, u32 reg
,
166 u32 clock
, int *N
, int *M
, int *P
)
168 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
169 struct nvbios_pll pll
;
172 ret
= nvbios_pll_parse(subdev
->device
->bios
, reg
, &pll
);
176 pll
.vco2
.max_freq
= 0;
177 pll
.refclk
= nvkm_clk_read(&clk
->base
, nv_clk_src_href
);
181 return nv04_pll_calc(subdev
, &pll
, clock
, N
, M
, NULL
, NULL
, P
);
185 calc_P(u32 src
, u32 target
, int *div
)
187 u32 clk0
= src
, clk1
= src
;
188 for (*div
= 0; *div
<= 7; (*div
)++) {
189 if (clk0
<= target
) {
190 clk1
= clk0
<< (*div
? 1 : 0);
196 if (target
- clk0
<= clk1
- target
)
203 mcp77_clk_calc(struct nvkm_clk
*base
, struct nvkm_cstate
*cstate
)
205 struct mcp77_clk
*clk
= mcp77_clk(base
);
206 const int shader
= cstate
->domain
[nv_clk_src_shader
];
207 const int core
= cstate
->domain
[nv_clk_src_core
];
208 const int vdec
= cstate
->domain
[nv_clk_src_vdec
];
209 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
210 u32 out
= 0, clock
= 0;
211 int N
, M
, P1
, P2
= 0;
214 /* cclk: find suitable source, disable PLL if we can */
215 if (core
< nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm4
))
216 out
= calc_P(nvkm_clk_read(&clk
->base
, nv_clk_src_hclkm4
), core
, &divs
);
218 /* Calculate clock * 2, so shader clock can use it too */
219 clock
= calc_pll(clk
, 0x4028, (core
<< 1), &N
, &M
, &P1
);
221 if (abs(core
- out
) <= abs(core
- (clock
>> 1))) {
222 clk
->csrc
= nv_clk_src_hclkm4
;
223 clk
->cctrl
= divs
<< 16;
225 /* NVCTRL is actually used _after_ NVPOST, and after what we
226 * call NVPLL. To make matters worse, NVPOST is an integer
227 * divider instead of a right-shift number. */
233 clk
->csrc
= nv_clk_src_core
;
234 clk
->ccoef
= (N
<< 8) | M
;
236 clk
->cctrl
= (P2
+ 1) << 16;
237 clk
->cpost
= (1 << P1
) << 16;
240 /* sclk: nvpll + divisor, href or spll */
242 if (shader
== nvkm_clk_read(&clk
->base
, nv_clk_src_href
)) {
243 clk
->ssrc
= nv_clk_src_href
;
245 clock
= calc_pll(clk
, 0x4020, shader
, &N
, &M
, &P1
);
246 if (clk
->csrc
== nv_clk_src_core
)
247 out
= calc_P((core
<< 1), shader
, &divs
);
249 if (abs(shader
- out
) <=
250 abs(shader
- clock
) &&
252 clk
->ssrc
= nv_clk_src_core
;
253 clk
->sctrl
= (divs
+ P2
) << 16;
255 clk
->ssrc
= nv_clk_src_shader
;
256 clk
->scoef
= (N
<< 8) | M
;
257 clk
->sctrl
= P1
<< 16;
262 out
= calc_P(core
, vdec
, &divs
);
263 clock
= calc_P(500000, vdec
, &P1
);
264 if(abs(vdec
- out
) <= abs(vdec
- clock
)) {
265 clk
->vsrc
= nv_clk_src_cclk
;
266 clk
->vdiv
= divs
<< 16;
268 clk
->vsrc
= nv_clk_src_vdec
;
269 clk
->vdiv
= P1
<< 16;
272 /* Print strategy! */
273 nvkm_debug(subdev
, "nvpll: %08x %08x %08x\n",
274 clk
->ccoef
, clk
->cpost
, clk
->cctrl
);
275 nvkm_debug(subdev
, " spll: %08x %08x %08x\n",
276 clk
->scoef
, clk
->spost
, clk
->sctrl
);
277 nvkm_debug(subdev
, " vdiv: %08x\n", clk
->vdiv
);
278 if (clk
->csrc
== nv_clk_src_hclkm4
)
279 nvkm_debug(subdev
, "core: hrefm4\n");
281 nvkm_debug(subdev
, "core: nvpll\n");
283 if (clk
->ssrc
== nv_clk_src_hclkm4
)
284 nvkm_debug(subdev
, "shader: hrefm4\n");
285 else if (clk
->ssrc
== nv_clk_src_core
)
286 nvkm_debug(subdev
, "shader: nvpll\n");
288 nvkm_debug(subdev
, "shader: spll\n");
290 if (clk
->vsrc
== nv_clk_src_hclkm4
)
291 nvkm_debug(subdev
, "vdec: 500MHz\n");
293 nvkm_debug(subdev
, "vdec: core\n");
299 mcp77_clk_prog(struct nvkm_clk
*base
)
301 struct mcp77_clk
*clk
= mcp77_clk(base
);
302 struct nvkm_subdev
*subdev
= &clk
->base
.subdev
;
303 struct nvkm_device
*device
= subdev
->device
;
304 u32 pllmask
= 0, mast
;
306 unsigned long *f
= &flags
;
309 ret
= gt215_clk_pre(&clk
->base
, f
);
313 /* First switch to safe clocks: href */
314 mast
= nvkm_mask(device
, 0xc054, 0x03400e70, 0x03400640);
319 case nv_clk_src_hclkm4
:
320 nvkm_mask(device
, 0x4028, 0x00070000, clk
->cctrl
);
323 case nv_clk_src_core
:
324 nvkm_wr32(device
, 0x402c, clk
->ccoef
);
325 nvkm_wr32(device
, 0x4028, 0x80000000 | clk
->cctrl
);
326 nvkm_wr32(device
, 0x4040, clk
->cpost
);
327 pllmask
|= (0x3 << 8);
331 nvkm_warn(subdev
, "Reclocking failed: unknown core clock\n");
336 case nv_clk_src_href
:
337 nvkm_mask(device
, 0x4020, 0x00070000, 0x00000000);
338 /* mast |= 0x00000000; */
340 case nv_clk_src_core
:
341 nvkm_mask(device
, 0x4020, 0x00070000, clk
->sctrl
);
344 case nv_clk_src_shader
:
345 nvkm_wr32(device
, 0x4024, clk
->scoef
);
346 nvkm_wr32(device
, 0x4020, 0x80000000 | clk
->sctrl
);
347 nvkm_wr32(device
, 0x4070, clk
->spost
);
348 pllmask
|= (0x3 << 12);
352 nvkm_warn(subdev
, "Reclocking failed: unknown sclk clock\n");
356 if (nvkm_msec(device
, 2000,
357 u32 tmp
= nvkm_rd32(device
, 0x004080) & pllmask
;
364 case nv_clk_src_cclk
:
368 nvkm_wr32(device
, 0x4600, clk
->vdiv
);
371 nvkm_wr32(device
, 0xc054, mast
);
374 /* Disable some PLLs and dividers when unused */
375 if (clk
->csrc
!= nv_clk_src_core
) {
376 nvkm_wr32(device
, 0x4040, 0x00000000);
377 nvkm_mask(device
, 0x4028, 0x80000000, 0x00000000);
380 if (clk
->ssrc
!= nv_clk_src_shader
) {
381 nvkm_wr32(device
, 0x4070, 0x00000000);
382 nvkm_mask(device
, 0x4020, 0x80000000, 0x00000000);
389 gt215_clk_post(&clk
->base
, f
);
394 mcp77_clk_tidy(struct nvkm_clk
*base
)
398 static const struct nvkm_clk_func
400 .read
= mcp77_clk_read
,
401 .calc
= mcp77_clk_calc
,
402 .prog
= mcp77_clk_prog
,
403 .tidy
= mcp77_clk_tidy
,
405 { nv_clk_src_crystal
, 0xff },
406 { nv_clk_src_href
, 0xff },
407 { nv_clk_src_core
, 0xff, 0, "core", 1000 },
408 { nv_clk_src_shader
, 0xff, 0, "shader", 1000 },
409 { nv_clk_src_vdec
, 0xff, 0, "vdec", 1000 },
415 mcp77_clk_new(struct nvkm_device
*device
, int index
, struct nvkm_clk
**pclk
)
417 struct mcp77_clk
*clk
;
419 if (!(clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
)))
423 return nvkm_clk_ctor(&mcp77_clk
, device
, index
, true, &clk
->base
);