2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
30 gf119_disp_super(struct work_struct
*work
)
32 struct nv50_disp
*disp
=
33 container_of(work
, struct nv50_disp
, supervisor
);
34 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
35 struct nvkm_device
*device
= subdev
->device
;
36 struct nvkm_head
*head
;
39 nvkm_debug(subdev
, "supervisor %d\n", ffs(disp
->super
));
40 list_for_each_entry(head
, &disp
->base
.head
, head
) {
41 mask
[head
->id
] = nvkm_rd32(device
, 0x6101d4 + (head
->id
* 0x800));
42 HEAD_DBG(head
, "%08x", mask
[head
->id
]);
45 if (disp
->super
& 0x00000001) {
46 nv50_disp_chan_mthd(disp
->chan
[0], NV_DBG_DEBUG
);
47 nv50_disp_super_1(disp
);
48 list_for_each_entry(head
, &disp
->base
.head
, head
) {
49 if (!(mask
[head
->id
] & 0x00001000))
51 nv50_disp_super_1_0(disp
, head
);
54 if (disp
->super
& 0x00000002) {
55 list_for_each_entry(head
, &disp
->base
.head
, head
) {
56 if (!(mask
[head
->id
] & 0x00001000))
58 nv50_disp_super_2_0(disp
, head
);
60 nvkm_outp_route(&disp
->base
);
61 list_for_each_entry(head
, &disp
->base
.head
, head
) {
62 if (!(mask
[head
->id
] & 0x00010000))
64 nv50_disp_super_2_1(disp
, head
);
66 list_for_each_entry(head
, &disp
->base
.head
, head
) {
67 if (!(mask
[head
->id
] & 0x00001000))
69 nv50_disp_super_2_2(disp
, head
);
72 if (disp
->super
& 0x00000004) {
73 list_for_each_entry(head
, &disp
->base
.head
, head
) {
74 if (!(mask
[head
->id
] & 0x00001000))
76 nv50_disp_super_3_0(disp
, head
);
80 list_for_each_entry(head
, &disp
->base
.head
, head
)
81 nvkm_wr32(device
, 0x6101d4 + (head
->id
* 0x800), 0x00000000);
82 nvkm_wr32(device
, 0x6101d0, 0x80000000);
86 gf119_disp_intr_error(struct nv50_disp
*disp
, int chid
)
88 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
89 struct nvkm_device
*device
= subdev
->device
;
90 u32 mthd
= nvkm_rd32(device
, 0x6101f0 + (chid
* 12));
91 u32 data
= nvkm_rd32(device
, 0x6101f4 + (chid
* 12));
92 u32 unkn
= nvkm_rd32(device
, 0x6101f8 + (chid
* 12));
94 nvkm_error(subdev
, "chid %d mthd %04x data %08x %08x %08x\n",
95 chid
, (mthd
& 0x0000ffc), data
, mthd
, unkn
);
97 if (chid
< ARRAY_SIZE(disp
->chan
)) {
98 switch (mthd
& 0xffc) {
100 nv50_disp_chan_mthd(disp
->chan
[chid
], NV_DBG_ERROR
);
107 nvkm_wr32(device
, 0x61009c, (1 << chid
));
108 nvkm_wr32(device
, 0x6101f0 + (chid
* 12), 0x90000000);
112 gf119_disp_intr(struct nv50_disp
*disp
)
114 struct nvkm_subdev
*subdev
= &disp
->base
.engine
.subdev
;
115 struct nvkm_device
*device
= subdev
->device
;
116 struct nvkm_head
*head
;
117 u32 intr
= nvkm_rd32(device
, 0x610088);
119 if (intr
& 0x00000001) {
120 u32 stat
= nvkm_rd32(device
, 0x61008c);
122 int chid
= __ffs(stat
); stat
&= ~(1 << chid
);
123 nv50_disp_chan_uevent_send(disp
, chid
);
124 nvkm_wr32(device
, 0x61008c, 1 << chid
);
129 if (intr
& 0x00000002) {
130 u32 stat
= nvkm_rd32(device
, 0x61009c);
131 int chid
= ffs(stat
) - 1;
133 disp
->func
->intr_error(disp
, chid
);
137 if (intr
& 0x00100000) {
138 u32 stat
= nvkm_rd32(device
, 0x6100ac);
139 if (stat
& 0x00000007) {
140 disp
->super
= (stat
& 0x00000007);
141 queue_work(disp
->wq
, &disp
->supervisor
);
142 nvkm_wr32(device
, 0x6100ac, disp
->super
);
147 nvkm_warn(subdev
, "intr24 %08x\n", stat
);
148 nvkm_wr32(device
, 0x6100ac, stat
);
154 list_for_each_entry(head
, &disp
->base
.head
, head
) {
155 const u32 hoff
= head
->id
* 0x800;
156 u32 mask
= 0x01000000 << head
->id
;
158 u32 stat
= nvkm_rd32(device
, 0x6100bc + hoff
);
159 if (stat
& 0x00000001)
160 nvkm_disp_vblank(&disp
->base
, head
->id
);
161 nvkm_mask(device
, 0x6100bc + hoff
, 0, 0);
162 nvkm_rd32(device
, 0x6100c0 + hoff
);
168 gf119_disp_new_(const struct nv50_disp_func
*func
, struct nvkm_device
*device
,
169 int index
, struct nvkm_disp
**pdisp
)
171 u32 heads
= nvkm_rd32(device
, 0x022448);
172 return nv50_disp_new_(func
, device
, index
, heads
, pdisp
);
175 static const struct nv50_disp_func
177 .intr
= gf119_disp_intr
,
178 .intr_error
= gf119_disp_intr_error
,
179 .uevent
= &gf119_disp_chan_uevent
,
180 .super
= gf119_disp_super
,
181 .root
= &gf119_disp_root_oclass
,
182 .head
.new = gf119_head_new
,
183 .dac
= { .nr
= 3, .new = gf119_dac_new
},
184 .sor
= { .nr
= 4, .new = gf119_sor_new
},
188 gf119_disp_new(struct nvkm_device
*device
, int index
, struct nvkm_disp
**pdisp
)
190 return gf119_disp_new_(&gf119_disp
, device
, index
, pdisp
);