2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "fuc/gt215.fuc3.h"
27 #include <subdev/timer.h>
30 gt215_pmu_send(struct nvkm_pmu
*pmu
, u32 reply
[2],
31 u32 process
, u32 message
, u32 data0
, u32 data1
)
33 struct nvkm_subdev
*subdev
= &pmu
->subdev
;
34 struct nvkm_device
*device
= subdev
->device
;
37 mutex_lock(&subdev
->mutex
);
38 /* wait for a free slot in the fifo */
39 addr
= nvkm_rd32(device
, 0x10a4a0);
40 if (nvkm_msec(device
, 2000,
41 u32 tmp
= nvkm_rd32(device
, 0x10a4b0);
42 if (tmp
!= (addr
^ 8))
45 mutex_unlock(&subdev
->mutex
);
49 /* we currently only support a single process at a time waiting
50 * on a synchronous reply, take the PMU mutex and tell the
51 * receive handler what we're waiting for
54 pmu
->recv
.message
= message
;
55 pmu
->recv
.process
= process
;
58 /* acquire data segment access */
60 nvkm_wr32(device
, 0x10a580, 0x00000001);
61 } while (nvkm_rd32(device
, 0x10a580) != 0x00000001);
63 /* write the packet */
64 nvkm_wr32(device
, 0x10a1c0, 0x01000000 | (((addr
& 0x07) << 4) +
66 nvkm_wr32(device
, 0x10a1c4, process
);
67 nvkm_wr32(device
, 0x10a1c4, message
);
68 nvkm_wr32(device
, 0x10a1c4, data0
);
69 nvkm_wr32(device
, 0x10a1c4, data1
);
70 nvkm_wr32(device
, 0x10a4a0, (addr
+ 1) & 0x0f);
72 /* release data segment access */
73 nvkm_wr32(device
, 0x10a580, 0x00000000);
75 /* wait for reply, if requested */
77 wait_event(pmu
->recv
.wait
, (pmu
->recv
.process
== 0));
78 reply
[0] = pmu
->recv
.data
[0];
79 reply
[1] = pmu
->recv
.data
[1];
82 mutex_unlock(&subdev
->mutex
);
87 gt215_pmu_recv(struct nvkm_pmu
*pmu
)
89 struct nvkm_subdev
*subdev
= &pmu
->subdev
;
90 struct nvkm_device
*device
= subdev
->device
;
91 u32 process
, message
, data0
, data1
;
93 /* nothing to do if GET == PUT */
94 u32 addr
= nvkm_rd32(device
, 0x10a4cc);
95 if (addr
== nvkm_rd32(device
, 0x10a4c8))
98 /* acquire data segment access */
100 nvkm_wr32(device
, 0x10a580, 0x00000002);
101 } while (nvkm_rd32(device
, 0x10a580) != 0x00000002);
103 /* read the packet */
104 nvkm_wr32(device
, 0x10a1c0, 0x02000000 | (((addr
& 0x07) << 4) +
106 process
= nvkm_rd32(device
, 0x10a1c4);
107 message
= nvkm_rd32(device
, 0x10a1c4);
108 data0
= nvkm_rd32(device
, 0x10a1c4);
109 data1
= nvkm_rd32(device
, 0x10a1c4);
110 nvkm_wr32(device
, 0x10a4cc, (addr
+ 1) & 0x0f);
112 /* release data segment access */
113 nvkm_wr32(device
, 0x10a580, 0x00000000);
115 /* wake process if it's waiting on a synchronous reply */
116 if (pmu
->recv
.process
) {
117 if (process
== pmu
->recv
.process
&&
118 message
== pmu
->recv
.message
) {
119 pmu
->recv
.data
[0] = data0
;
120 pmu
->recv
.data
[1] = data1
;
121 pmu
->recv
.process
= 0;
122 wake_up(&pmu
->recv
.wait
);
127 /* right now there's no other expected responses from the engine,
128 * so assume that any unexpected message is an error.
130 nvkm_warn(subdev
, "%c%c%c%c %08x %08x %08x %08x\n",
131 (char)((process
& 0x000000ff) >> 0),
132 (char)((process
& 0x0000ff00) >> 8),
133 (char)((process
& 0x00ff0000) >> 16),
134 (char)((process
& 0xff000000) >> 24),
135 process
, message
, data0
, data1
);
139 gt215_pmu_intr(struct nvkm_pmu
*pmu
)
141 struct nvkm_subdev
*subdev
= &pmu
->subdev
;
142 struct nvkm_device
*device
= subdev
->device
;
143 u32 disp
= nvkm_rd32(device
, 0x10a01c);
144 u32 intr
= nvkm_rd32(device
, 0x10a008) & disp
& ~(disp
>> 16);
146 if (intr
& 0x00000020) {
147 u32 stat
= nvkm_rd32(device
, 0x10a16c);
148 if (stat
& 0x80000000) {
149 nvkm_error(subdev
, "UAS fault at %06x addr %08x\n",
151 nvkm_rd32(device
, 0x10a168));
152 nvkm_wr32(device
, 0x10a16c, 0x00000000);
157 if (intr
& 0x00000040) {
158 schedule_work(&pmu
->recv
.work
);
159 nvkm_wr32(device
, 0x10a004, 0x00000040);
163 if (intr
& 0x00000080) {
164 nvkm_info(subdev
, "wr32 %06x %08x\n",
165 nvkm_rd32(device
, 0x10a7a0),
166 nvkm_rd32(device
, 0x10a7a4));
167 nvkm_wr32(device
, 0x10a004, 0x00000080);
172 nvkm_error(subdev
, "intr %08x\n", intr
);
173 nvkm_wr32(device
, 0x10a004, intr
);
178 gt215_pmu_fini(struct nvkm_pmu
*pmu
)
180 nvkm_wr32(pmu
->subdev
.device
, 0x10a014, 0x00000060);
184 gt215_pmu_reset(struct nvkm_pmu
*pmu
)
186 struct nvkm_device
*device
= pmu
->subdev
.device
;
187 nvkm_mask(device
, 0x022210, 0x00000001, 0x00000000);
188 nvkm_mask(device
, 0x022210, 0x00000001, 0x00000001);
189 nvkm_rd32(device
, 0x022210);
193 gt215_pmu_enabled(struct nvkm_pmu
*pmu
)
195 return nvkm_rd32(pmu
->subdev
.device
, 0x022210) & 0x00000001;
199 gt215_pmu_init(struct nvkm_pmu
*pmu
)
201 struct nvkm_device
*device
= pmu
->subdev
.device
;
204 /* upload data segment */
205 nvkm_wr32(device
, 0x10a1c0, 0x01000000);
206 for (i
= 0; i
< pmu
->func
->data
.size
/ 4; i
++)
207 nvkm_wr32(device
, 0x10a1c4, pmu
->func
->data
.data
[i
]);
209 /* upload code segment */
210 nvkm_wr32(device
, 0x10a180, 0x01000000);
211 for (i
= 0; i
< pmu
->func
->code
.size
/ 4; i
++) {
213 nvkm_wr32(device
, 0x10a188, i
>> 6);
214 nvkm_wr32(device
, 0x10a184, pmu
->func
->code
.data
[i
]);
217 /* start it running */
218 nvkm_wr32(device
, 0x10a10c, 0x00000000);
219 nvkm_wr32(device
, 0x10a104, 0x00000000);
220 nvkm_wr32(device
, 0x10a100, 0x00000002);
222 /* wait for valid host->pmu ring configuration */
223 if (nvkm_msec(device
, 2000,
224 if (nvkm_rd32(device
, 0x10a4d0))
228 pmu
->send
.base
= nvkm_rd32(device
, 0x10a4d0) & 0x0000ffff;
229 pmu
->send
.size
= nvkm_rd32(device
, 0x10a4d0) >> 16;
231 /* wait for valid pmu->host ring configuration */
232 if (nvkm_msec(device
, 2000,
233 if (nvkm_rd32(device
, 0x10a4dc))
237 pmu
->recv
.base
= nvkm_rd32(device
, 0x10a4dc) & 0x0000ffff;
238 pmu
->recv
.size
= nvkm_rd32(device
, 0x10a4dc) >> 16;
240 nvkm_wr32(device
, 0x10a010, 0x000000e0);
244 const struct nvkm_falcon_func
248 .load_imem
= nvkm_falcon_v1_load_imem
,
249 .load_dmem
= nvkm_falcon_v1_load_dmem
,
250 .read_dmem
= nvkm_falcon_v1_read_dmem
,
251 .bind_context
= nvkm_falcon_v1_bind_context
,
252 .wait_for_halt
= nvkm_falcon_v1_wait_for_halt
,
253 .clear_interrupt
= nvkm_falcon_v1_clear_interrupt
,
254 .set_start_addr
= nvkm_falcon_v1_set_start_addr
,
255 .start
= nvkm_falcon_v1_start
,
256 .enable
= nvkm_falcon_v1_enable
,
257 .disable
= nvkm_falcon_v1_disable
,
258 .cmdq
= { 0x4a0, 0x4b0, 4 },
259 .msgq
= { 0x4c8, 0x4cc, 0 },
262 static const struct nvkm_pmu_func
264 .flcn
= >215_pmu_flcn
,
265 .code
.data
= gt215_pmu_code
,
266 .code
.size
= sizeof(gt215_pmu_code
),
267 .data
.data
= gt215_pmu_data
,
268 .data
.size
= sizeof(gt215_pmu_data
),
269 .enabled
= gt215_pmu_enabled
,
270 .reset
= gt215_pmu_reset
,
271 .init
= gt215_pmu_init
,
272 .fini
= gt215_pmu_fini
,
273 .intr
= gt215_pmu_intr
,
274 .send
= gt215_pmu_send
,
275 .recv
= gt215_pmu_recv
,
278 static const struct nvkm_pmu_fwif
280 { -1, gf100_pmu_nofw
, >215_pmu
},
285 gt215_pmu_new(struct nvkm_device
*device
, int index
, struct nvkm_pmu
**ppmu
)
287 return nvkm_pmu_new_(gt215_pmu_fwif
, device
, index
, ppmu
);