2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
25 #include <subdev/clk.h>
26 #include <subdev/timer.h>
27 #include <subdev/volt.h>
32 struct gk20a_pmu_dvfs_data
{
36 unsigned int avg_load
;
41 struct nvkm_alarm alarm
;
42 struct gk20a_pmu_dvfs_data
*data
;
45 struct gk20a_pmu_dvfs_dev_status
{
52 gk20a_pmu_dvfs_target(struct gk20a_pmu
*pmu
, int *state
)
54 struct nvkm_clk
*clk
= pmu
->base
.subdev
.device
->clk
;
56 return nvkm_clk_astate(clk
, *state
, 0, false);
60 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu
*pmu
, int *state
)
62 struct nvkm_clk
*clk
= pmu
->base
.subdev
.device
->clk
;
69 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu
*pmu
,
72 struct gk20a_pmu_dvfs_data
*data
= pmu
->data
;
73 struct nvkm_clk
*clk
= pmu
->base
.subdev
.device
->clk
;
76 /* For GK20A, the performance level is directly mapped to pstate */
77 level
= cur_level
= clk
->pstate
;
79 if (load
> data
->p_load_max
) {
80 level
= min(clk
->state_nr
- 1, level
+ (clk
->state_nr
/ 3));
82 level
+= ((load
- data
->p_load_target
) * 10 /
83 data
->p_load_target
) / 2;
84 level
= max(0, level
);
85 level
= min(clk
->state_nr
- 1, level
);
88 nvkm_trace(&pmu
->base
.subdev
, "cur level = %d, new level = %d\n",
93 if (level
== cur_level
)
100 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu
*pmu
,
101 struct gk20a_pmu_dvfs_dev_status
*status
)
103 struct nvkm_device
*device
= pmu
->base
.subdev
.device
;
104 status
->busy
= nvkm_rd32(device
, 0x10a508 + (BUSY_SLOT
* 0x10));
105 status
->total
= nvkm_rd32(device
, 0x10a508 + (CLK_SLOT
* 0x10));
110 gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu
*pmu
)
112 struct nvkm_device
*device
= pmu
->base
.subdev
.device
;
113 nvkm_wr32(device
, 0x10a508 + (BUSY_SLOT
* 0x10), 0x80000000);
114 nvkm_wr32(device
, 0x10a508 + (CLK_SLOT
* 0x10), 0x80000000);
118 gk20a_pmu_dvfs_work(struct nvkm_alarm
*alarm
)
120 struct gk20a_pmu
*pmu
=
121 container_of(alarm
, struct gk20a_pmu
, alarm
);
122 struct gk20a_pmu_dvfs_data
*data
= pmu
->data
;
123 struct gk20a_pmu_dvfs_dev_status status
;
124 struct nvkm_subdev
*subdev
= &pmu
->base
.subdev
;
125 struct nvkm_device
*device
= subdev
->device
;
126 struct nvkm_clk
*clk
= device
->clk
;
127 struct nvkm_timer
*tmr
= device
->timer
;
128 struct nvkm_volt
*volt
= device
->volt
;
133 * The PMU is initialized before CLK and VOLT, so we have to make sure the
134 * CLK and VOLT are ready here.
139 ret
= gk20a_pmu_dvfs_get_dev_status(pmu
, &status
);
141 nvkm_warn(subdev
, "failed to get device status\n");
146 utilization
= div_u64((u64
)status
.busy
* 100, status
.total
);
148 data
->avg_load
= (data
->p_smooth
* data
->avg_load
) + utilization
;
149 data
->avg_load
/= data
->p_smooth
+ 1;
150 nvkm_trace(subdev
, "utilization = %d %%, avg_load = %d %%\n",
151 utilization
, data
->avg_load
);
153 ret
= gk20a_pmu_dvfs_get_cur_state(pmu
, &state
);
155 nvkm_warn(subdev
, "failed to get current state\n");
159 if (gk20a_pmu_dvfs_get_target_state(pmu
, &state
, data
->avg_load
)) {
160 nvkm_trace(subdev
, "set new state to %d\n", state
);
161 gk20a_pmu_dvfs_target(pmu
, &state
);
165 gk20a_pmu_dvfs_reset_dev_status(pmu
);
166 nvkm_timer_alarm(tmr
, 100000000, alarm
);
170 gk20a_pmu_fini(struct nvkm_subdev
*subdev
, bool suspend
)
172 struct gk20a_pmu
*pmu
= gk20a_pmu(subdev
);
173 nvkm_timer_alarm_cancel(subdev
->device
->timer
, &pmu
->alarm
);
178 gk20a_pmu_dtor(struct nvkm_subdev
*subdev
)
180 return gk20a_pmu(subdev
);
184 gk20a_pmu_init(struct nvkm_subdev
*subdev
)
186 struct gk20a_pmu
*pmu
= gk20a_pmu(subdev
);
187 struct nvkm_device
*device
= pmu
->base
.subdev
.device
;
189 /* init pwr perf counter */
190 nvkm_wr32(device
, 0x10a504 + (BUSY_SLOT
* 0x10), 0x00200001);
191 nvkm_wr32(device
, 0x10a50c + (BUSY_SLOT
* 0x10), 0x00000002);
192 nvkm_wr32(device
, 0x10a50c + (CLK_SLOT
* 0x10), 0x00000003);
194 nvkm_timer_alarm(device
->timer
, 2000000000, &pmu
->alarm
);
198 static struct gk20a_pmu_dvfs_data
205 static const struct nvkm_subdev_func
207 .init
= gk20a_pmu_init
,
208 .fini
= gk20a_pmu_fini
,
209 .dtor
= gk20a_pmu_dtor
,
213 gk20a_pmu_new(struct nvkm_device
*device
, int index
, struct nvkm_pmu
**ppmu
)
215 static const struct nvkm_pmu_func func
= {};
216 struct gk20a_pmu
*pmu
;
218 if (!(pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
)))
220 pmu
->base
.func
= &func
;
223 nvkm_subdev_ctor(&gk20a_pmu
, device
, index
, 0, &pmu
->base
.subdev
);
224 pmu
->data
= &gk20a_dvfs_data
;
225 nvkm_alarm_init(&pmu
->alarm
, gk20a_pmu_dvfs_work
);