2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/fb.h>
31 #include <subdev/therm.h>
32 #include <subdev/volt.h>
34 #include <core/option.h>
36 /******************************************************************************
38 *****************************************************************************/
40 nvkm_clk_adjust(struct nvkm_clk
*clk
, bool adjust
,
41 u8 pstate
, u8 domain
, u32 input
)
43 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
44 struct nvbios_boostE boostE
;
45 u8 ver
, hdr
, cnt
, len
;
48 data
= nvbios_boostEm(bios
, pstate
, &ver
, &hdr
, &cnt
, &len
, &boostE
);
50 struct nvbios_boostS boostS
;
51 u8 idx
= 0, sver
, shdr
;
54 input
= max(boostE
.min
, input
);
55 input
= min(boostE
.max
, input
);
59 subd
= nvbios_boostSp(bios
, idx
++, data
, &sver
, &shdr
,
61 if (subd
&& boostS
.domain
== domain
) {
63 input
= input
* boostS
.percent
/ 100;
64 input
= max(boostS
.min
, input
);
65 input
= min(boostS
.max
, input
);
74 /******************************************************************************
76 *****************************************************************************/
78 nvkm_cstate_prog(struct nvkm_clk
*clk
, struct nvkm_pstate
*pstate
, int cstatei
)
80 struct nvkm_subdev
*subdev
= &clk
->subdev
;
81 struct nvkm_device
*device
= subdev
->device
;
82 struct nvkm_therm
*therm
= device
->therm
;
83 struct nvkm_volt
*volt
= device
->volt
;
84 struct nvkm_cstate
*cstate
;
87 if (!list_empty(&pstate
->list
)) {
88 cstate
= list_entry(pstate
->list
.prev
, typeof(*cstate
), head
);
90 cstate
= &pstate
->base
;
94 ret
= nvkm_therm_cstate(therm
, pstate
->fanspeed
, +1);
95 if (ret
&& ret
!= -ENODEV
) {
96 nvkm_error(subdev
, "failed to raise fan speed: %d\n", ret
);
102 ret
= nvkm_volt_set_id(volt
, cstate
->voltage
, +1);
103 if (ret
&& ret
!= -ENODEV
) {
104 nvkm_error(subdev
, "failed to raise voltage: %d\n", ret
);
109 ret
= clk
->func
->calc(clk
, cstate
);
111 ret
= clk
->func
->prog(clk
);
112 clk
->func
->tidy(clk
);
116 ret
= nvkm_volt_set_id(volt
, cstate
->voltage
, -1);
117 if (ret
&& ret
!= -ENODEV
)
118 nvkm_error(subdev
, "failed to lower voltage: %d\n", ret
);
122 ret
= nvkm_therm_cstate(therm
, pstate
->fanspeed
, -1);
123 if (ret
&& ret
!= -ENODEV
)
124 nvkm_error(subdev
, "failed to lower fan speed: %d\n", ret
);
131 nvkm_cstate_del(struct nvkm_cstate
*cstate
)
133 list_del(&cstate
->head
);
138 nvkm_cstate_new(struct nvkm_clk
*clk
, int idx
, struct nvkm_pstate
*pstate
)
140 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
141 const struct nvkm_domain
*domain
= clk
->domains
;
142 struct nvkm_cstate
*cstate
= NULL
;
143 struct nvbios_cstepX cstepX
;
147 data
= nvbios_cstepXp(bios
, idx
, &ver
, &hdr
, &cstepX
);
151 cstate
= kzalloc(sizeof(*cstate
), GFP_KERNEL
);
155 *cstate
= pstate
->base
;
156 cstate
->voltage
= cstepX
.voltage
;
158 while (domain
&& domain
->name
!= nv_clk_src_max
) {
159 if (domain
->flags
& NVKM_CLK_DOM_FLAG_CORE
) {
160 u32 freq
= nvkm_clk_adjust(clk
, true, pstate
->pstate
,
161 domain
->bios
, cstepX
.freq
);
162 cstate
->domain
[domain
->name
] = freq
;
167 list_add(&cstate
->head
, &pstate
->list
);
171 /******************************************************************************
173 *****************************************************************************/
175 nvkm_pstate_prog(struct nvkm_clk
*clk
, int pstatei
)
177 struct nvkm_subdev
*subdev
= &clk
->subdev
;
178 struct nvkm_ram
*ram
= subdev
->device
->fb
->ram
;
179 struct nvkm_pstate
*pstate
;
182 list_for_each_entry(pstate
, &clk
->states
, head
) {
183 if (idx
++ == pstatei
)
187 nvkm_debug(subdev
, "setting performance state %d\n", pstatei
);
188 clk
->pstate
= pstatei
;
190 if (ram
&& ram
->func
->calc
) {
191 int khz
= pstate
->base
.domain
[nv_clk_src_mem
];
193 ret
= ram
->func
->calc(ram
, khz
);
195 ret
= ram
->func
->prog(ram
);
197 ram
->func
->tidy(ram
);
200 return nvkm_cstate_prog(clk
, pstate
, 0);
204 nvkm_pstate_work(struct work_struct
*work
)
206 struct nvkm_clk
*clk
= container_of(work
, typeof(*clk
), work
);
207 struct nvkm_subdev
*subdev
= &clk
->subdev
;
210 if (!atomic_xchg(&clk
->waiting
, 0))
212 clk
->pwrsrc
= power_supply_is_system_supplied();
214 nvkm_trace(subdev
, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
215 clk
->pstate
, clk
->pwrsrc
, clk
->ustate_ac
, clk
->ustate_dc
,
216 clk
->astate
, clk
->tstate
, clk
->dstate
);
218 pstate
= clk
->pwrsrc
? clk
->ustate_ac
: clk
->ustate_dc
;
219 if (clk
->state_nr
&& pstate
!= -1) {
220 pstate
= (pstate
< 0) ? clk
->astate
: pstate
;
221 pstate
= min(pstate
, clk
->state_nr
- 1 + clk
->tstate
);
222 pstate
= max(pstate
, clk
->dstate
);
224 pstate
= clk
->pstate
= -1;
227 nvkm_trace(subdev
, "-> %d\n", pstate
);
228 if (pstate
!= clk
->pstate
) {
229 int ret
= nvkm_pstate_prog(clk
, pstate
);
231 nvkm_error(subdev
, "error setting pstate %d: %d\n",
236 wake_up_all(&clk
->wait
);
237 nvkm_notify_get(&clk
->pwrsrc_ntfy
);
241 nvkm_pstate_calc(struct nvkm_clk
*clk
, bool wait
)
243 atomic_set(&clk
->waiting
, 1);
244 schedule_work(&clk
->work
);
246 wait_event(clk
->wait
, !atomic_read(&clk
->waiting
));
251 nvkm_pstate_info(struct nvkm_clk
*clk
, struct nvkm_pstate
*pstate
)
253 const struct nvkm_domain
*clock
= clk
->domains
- 1;
254 struct nvkm_cstate
*cstate
;
255 struct nvkm_subdev
*subdev
= &clk
->subdev
;
256 char info
[3][32] = { "", "", "" };
260 if (pstate
->pstate
!= 0xff)
261 snprintf(name
, sizeof(name
), "%02x", pstate
->pstate
);
263 while ((++clock
)->name
!= nv_clk_src_max
) {
264 u32 lo
= pstate
->base
.domain
[clock
->name
];
269 nvkm_debug(subdev
, "%02x: %10d KHz\n", clock
->name
, lo
);
270 list_for_each_entry(cstate
, &pstate
->list
, head
) {
271 u32 freq
= cstate
->domain
[clock
->name
];
274 nvkm_debug(subdev
, "%10d KHz\n", freq
);
277 if (clock
->mname
&& ++i
< ARRAY_SIZE(info
)) {
281 snprintf(info
[i
], sizeof(info
[i
]), "%s %d MHz",
284 snprintf(info
[i
], sizeof(info
[i
]),
285 "%s %d-%d MHz", clock
->mname
, lo
, hi
);
290 nvkm_debug(subdev
, "%s: %s %s %s\n", name
, info
[0], info
[1], info
[2]);
294 nvkm_pstate_del(struct nvkm_pstate
*pstate
)
296 struct nvkm_cstate
*cstate
, *temp
;
298 list_for_each_entry_safe(cstate
, temp
, &pstate
->list
, head
) {
299 nvkm_cstate_del(cstate
);
302 list_del(&pstate
->head
);
307 nvkm_pstate_new(struct nvkm_clk
*clk
, int idx
)
309 struct nvkm_bios
*bios
= clk
->subdev
.device
->bios
;
310 const struct nvkm_domain
*domain
= clk
->domains
- 1;
311 struct nvkm_pstate
*pstate
;
312 struct nvkm_cstate
*cstate
;
313 struct nvbios_cstepE cstepE
;
314 struct nvbios_perfE perfE
;
315 u8 ver
, hdr
, cnt
, len
;
318 data
= nvbios_perfEp(bios
, idx
, &ver
, &hdr
, &cnt
, &len
, &perfE
);
321 if (perfE
.pstate
== 0xff)
324 pstate
= kzalloc(sizeof(*pstate
), GFP_KERNEL
);
325 cstate
= &pstate
->base
;
329 INIT_LIST_HEAD(&pstate
->list
);
331 pstate
->pstate
= perfE
.pstate
;
332 pstate
->fanspeed
= perfE
.fanspeed
;
333 cstate
->voltage
= perfE
.voltage
;
334 cstate
->domain
[nv_clk_src_core
] = perfE
.core
;
335 cstate
->domain
[nv_clk_src_shader
] = perfE
.shader
;
336 cstate
->domain
[nv_clk_src_mem
] = perfE
.memory
;
337 cstate
->domain
[nv_clk_src_vdec
] = perfE
.vdec
;
338 cstate
->domain
[nv_clk_src_dom6
] = perfE
.disp
;
340 while (ver
>= 0x40 && (++domain
)->name
!= nv_clk_src_max
) {
341 struct nvbios_perfS perfS
;
342 u8 sver
= ver
, shdr
= hdr
;
343 u32 perfSe
= nvbios_perfSp(bios
, data
, domain
->bios
,
344 &sver
, &shdr
, cnt
, len
, &perfS
);
345 if (perfSe
== 0 || sver
!= 0x40)
348 if (domain
->flags
& NVKM_CLK_DOM_FLAG_CORE
) {
349 perfS
.v40
.freq
= nvkm_clk_adjust(clk
, false,
355 cstate
->domain
[domain
->name
] = perfS
.v40
.freq
;
358 data
= nvbios_cstepEm(bios
, pstate
->pstate
, &ver
, &hdr
, &cstepE
);
360 int idx
= cstepE
.index
;
362 nvkm_cstate_new(clk
, idx
, pstate
);
366 nvkm_pstate_info(clk
, pstate
);
367 list_add_tail(&pstate
->head
, &clk
->states
);
372 /******************************************************************************
373 * Adjustment triggers
374 *****************************************************************************/
376 nvkm_clk_ustate_update(struct nvkm_clk
*clk
, int req
)
378 struct nvkm_pstate
*pstate
;
381 if (!clk
->allow_reclock
)
384 if (req
!= -1 && req
!= -2) {
385 list_for_each_entry(pstate
, &clk
->states
, head
) {
386 if (pstate
->pstate
== req
)
391 if (pstate
->pstate
!= req
)
400 nvkm_clk_nstate(struct nvkm_clk
*clk
, const char *mode
, int arglen
)
404 if (clk
->allow_reclock
&& !strncasecmpz(mode
, "auto", arglen
))
407 if (strncasecmpz(mode
, "disabled", arglen
)) {
408 char save
= mode
[arglen
];
411 ((char *)mode
)[arglen
] = '\0';
412 if (!kstrtol(mode
, 0, &v
)) {
413 ret
= nvkm_clk_ustate_update(clk
, v
);
417 ((char *)mode
)[arglen
] = save
;
424 nvkm_clk_ustate(struct nvkm_clk
*clk
, int req
, int pwr
)
426 int ret
= nvkm_clk_ustate_update(clk
, req
);
428 if (ret
-= 2, pwr
) clk
->ustate_ac
= ret
;
429 else clk
->ustate_dc
= ret
;
430 return nvkm_pstate_calc(clk
, true);
436 nvkm_clk_astate(struct nvkm_clk
*clk
, int req
, int rel
, bool wait
)
438 if (!rel
) clk
->astate
= req
;
439 if ( rel
) clk
->astate
+= rel
;
440 clk
->astate
= min(clk
->astate
, clk
->state_nr
- 1);
441 clk
->astate
= max(clk
->astate
, 0);
442 return nvkm_pstate_calc(clk
, wait
);
446 nvkm_clk_tstate(struct nvkm_clk
*clk
, int req
, int rel
)
448 if (!rel
) clk
->tstate
= req
;
449 if ( rel
) clk
->tstate
+= rel
;
450 clk
->tstate
= min(clk
->tstate
, 0);
451 clk
->tstate
= max(clk
->tstate
, -(clk
->state_nr
- 1));
452 return nvkm_pstate_calc(clk
, true);
456 nvkm_clk_dstate(struct nvkm_clk
*clk
, int req
, int rel
)
458 if (!rel
) clk
->dstate
= req
;
459 if ( rel
) clk
->dstate
+= rel
;
460 clk
->dstate
= min(clk
->dstate
, clk
->state_nr
- 1);
461 clk
->dstate
= max(clk
->dstate
, 0);
462 return nvkm_pstate_calc(clk
, true);
466 nvkm_clk_pwrsrc(struct nvkm_notify
*notify
)
468 struct nvkm_clk
*clk
=
469 container_of(notify
, typeof(*clk
), pwrsrc_ntfy
);
470 nvkm_pstate_calc(clk
, false);
471 return NVKM_NOTIFY_DROP
;
474 /******************************************************************************
475 * subdev base class implementation
476 *****************************************************************************/
479 nvkm_clk_read(struct nvkm_clk
*clk
, enum nv_clk_src src
)
481 return clk
->func
->read(clk
, src
);
485 nvkm_clk_fini(struct nvkm_subdev
*subdev
, bool suspend
)
487 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
488 nvkm_notify_put(&clk
->pwrsrc_ntfy
);
489 flush_work(&clk
->work
);
491 clk
->func
->fini(clk
);
496 nvkm_clk_init(struct nvkm_subdev
*subdev
)
498 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
499 const struct nvkm_domain
*clock
= clk
->domains
;
502 memset(&clk
->bstate
, 0x00, sizeof(clk
->bstate
));
503 INIT_LIST_HEAD(&clk
->bstate
.list
);
504 clk
->bstate
.pstate
= 0xff;
506 while (clock
->name
!= nv_clk_src_max
) {
507 ret
= nvkm_clk_read(clk
, clock
->name
);
509 nvkm_error(subdev
, "%02x freq unknown\n", clock
->name
);
512 clk
->bstate
.base
.domain
[clock
->name
] = ret
;
516 nvkm_pstate_info(clk
, &clk
->bstate
);
519 return clk
->func
->init(clk
);
521 clk
->astate
= clk
->state_nr
- 1;
525 nvkm_pstate_calc(clk
, true);
530 nvkm_clk_dtor(struct nvkm_subdev
*subdev
)
532 struct nvkm_clk
*clk
= nvkm_clk(subdev
);
533 struct nvkm_pstate
*pstate
, *temp
;
535 nvkm_notify_fini(&clk
->pwrsrc_ntfy
);
537 /* Early return if the pstates have been provided statically */
538 if (clk
->func
->pstates
)
541 list_for_each_entry_safe(pstate
, temp
, &clk
->states
, head
) {
542 nvkm_pstate_del(pstate
);
548 static const struct nvkm_subdev_func
550 .dtor
= nvkm_clk_dtor
,
551 .init
= nvkm_clk_init
,
552 .fini
= nvkm_clk_fini
,
556 nvkm_clk_ctor(const struct nvkm_clk_func
*func
, struct nvkm_device
*device
,
557 int index
, bool allow_reclock
, struct nvkm_clk
*clk
)
559 int ret
, idx
, arglen
;
562 nvkm_subdev_ctor(&nvkm_clk
, device
, index
, 0, &clk
->subdev
);
564 INIT_LIST_HEAD(&clk
->states
);
565 clk
->domains
= func
->domains
;
568 clk
->allow_reclock
= allow_reclock
;
570 INIT_WORK(&clk
->work
, nvkm_pstate_work
);
571 init_waitqueue_head(&clk
->wait
);
572 atomic_set(&clk
->waiting
, 0);
574 /* If no pstates are provided, try and fetch them from the BIOS */
575 if (!func
->pstates
) {
578 ret
= nvkm_pstate_new(clk
, idx
++);
581 for (idx
= 0; idx
< func
->nr_pstates
; idx
++)
582 list_add_tail(&func
->pstates
[idx
].head
, &clk
->states
);
583 clk
->state_nr
= func
->nr_pstates
;
586 ret
= nvkm_notify_init(NULL
, &device
->event
, nvkm_clk_pwrsrc
, true,
587 NULL
, 0, 0, &clk
->pwrsrc_ntfy
);
591 mode
= nvkm_stropt(device
->cfgopt
, "NvClkMode", &arglen
);
593 clk
->ustate_ac
= nvkm_clk_nstate(clk
, mode
, arglen
);
594 clk
->ustate_dc
= nvkm_clk_nstate(clk
, mode
, arglen
);
597 mode
= nvkm_stropt(device
->cfgopt
, "NvClkModeAC", &arglen
);
599 clk
->ustate_ac
= nvkm_clk_nstate(clk
, mode
, arglen
);
601 mode
= nvkm_stropt(device
->cfgopt
, "NvClkModeDC", &arglen
);
603 clk
->ustate_dc
= nvkm_clk_nstate(clk
, mode
, arglen
);
609 nvkm_clk_new_(const struct nvkm_clk_func
*func
, struct nvkm_device
*device
,
610 int index
, bool allow_reclock
, struct nvkm_clk
**pclk
)
612 if (!(*pclk
= kzalloc(sizeof(**pclk
), GFP_KERNEL
)))
614 return nvkm_clk_ctor(func
, device
, index
, allow_reclock
, *pclk
);