Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / engine / disp / gf119.c
blobd8765b57180b9530154583bd37c41ef255dc2d0a
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include "nv50.h"
25 #include "head.h"
26 #include "ior.h"
27 #include "rootnv50.h"
29 void
30 gf119_disp_super(struct work_struct *work)
32 struct nv50_disp *disp =
33 container_of(work, struct nv50_disp, supervisor);
34 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
35 struct nvkm_device *device = subdev->device;
36 struct nvkm_head *head;
37 u32 mask[4];
39 nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
40 list_for_each_entry(head, &disp->base.head, head) {
41 mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
42 HEAD_DBG(head, "%08x", mask[head->id]);
45 if (disp->super & 0x00000001) {
46 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
47 nv50_disp_super_1(disp);
48 list_for_each_entry(head, &disp->base.head, head) {
49 if (!(mask[head->id] & 0x00001000))
50 continue;
51 nv50_disp_super_1_0(disp, head);
53 } else
54 if (disp->super & 0x00000002) {
55 list_for_each_entry(head, &disp->base.head, head) {
56 if (!(mask[head->id] & 0x00001000))
57 continue;
58 nv50_disp_super_2_0(disp, head);
60 nvkm_outp_route(&disp->base);
61 list_for_each_entry(head, &disp->base.head, head) {
62 if (!(mask[head->id] & 0x00010000))
63 continue;
64 nv50_disp_super_2_1(disp, head);
66 list_for_each_entry(head, &disp->base.head, head) {
67 if (!(mask[head->id] & 0x00001000))
68 continue;
69 nv50_disp_super_2_2(disp, head);
71 } else
72 if (disp->super & 0x00000004) {
73 list_for_each_entry(head, &disp->base.head, head) {
74 if (!(mask[head->id] & 0x00001000))
75 continue;
76 nv50_disp_super_3_0(disp, head);
80 list_for_each_entry(head, &disp->base.head, head)
81 nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
82 nvkm_wr32(device, 0x6101d0, 0x80000000);
85 void
86 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
88 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
89 struct nvkm_device *device = subdev->device;
90 u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
91 u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
92 u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
94 nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
95 chid, (mthd & 0x0000ffc), data, mthd, unkn);
97 if (chid < ARRAY_SIZE(disp->chan)) {
98 switch (mthd & 0xffc) {
99 case 0x0080:
100 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
101 break;
102 default:
103 break;
107 nvkm_wr32(device, 0x61009c, (1 << chid));
108 nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
111 void
112 gf119_disp_intr(struct nv50_disp *disp)
114 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
115 struct nvkm_device *device = subdev->device;
116 struct nvkm_head *head;
117 u32 intr = nvkm_rd32(device, 0x610088);
119 if (intr & 0x00000001) {
120 u32 stat = nvkm_rd32(device, 0x61008c);
121 while (stat) {
122 int chid = __ffs(stat); stat &= ~(1 << chid);
123 nv50_disp_chan_uevent_send(disp, chid);
124 nvkm_wr32(device, 0x61008c, 1 << chid);
126 intr &= ~0x00000001;
129 if (intr & 0x00000002) {
130 u32 stat = nvkm_rd32(device, 0x61009c);
131 int chid = ffs(stat) - 1;
132 if (chid >= 0)
133 disp->func->intr_error(disp, chid);
134 intr &= ~0x00000002;
137 if (intr & 0x00100000) {
138 u32 stat = nvkm_rd32(device, 0x6100ac);
139 if (stat & 0x00000007) {
140 disp->super = (stat & 0x00000007);
141 queue_work(disp->wq, &disp->supervisor);
142 nvkm_wr32(device, 0x6100ac, disp->super);
143 stat &= ~0x00000007;
146 if (stat) {
147 nvkm_warn(subdev, "intr24 %08x\n", stat);
148 nvkm_wr32(device, 0x6100ac, stat);
151 intr &= ~0x00100000;
154 list_for_each_entry(head, &disp->base.head, head) {
155 const u32 hoff = head->id * 0x800;
156 u32 mask = 0x01000000 << head->id;
157 if (mask & intr) {
158 u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
159 if (stat & 0x00000001)
160 nvkm_disp_vblank(&disp->base, head->id);
161 nvkm_mask(device, 0x6100bc + hoff, 0, 0);
162 nvkm_rd32(device, 0x6100c0 + hoff);
168 gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
169 int index, struct nvkm_disp **pdisp)
171 u32 heads = nvkm_rd32(device, 0x022448);
172 return nv50_disp_new_(func, device, index, heads, pdisp);
175 static const struct nv50_disp_func
176 gf119_disp = {
177 .intr = gf119_disp_intr,
178 .intr_error = gf119_disp_intr_error,
179 .uevent = &gf119_disp_chan_uevent,
180 .super = gf119_disp_super,
181 .root = &gf119_disp_root_oclass,
182 .head.new = gf119_head_new,
183 .dac = { .nr = 3, .new = gf119_dac_new },
184 .sor = { .nr = 4, .new = gf119_sor_new },
188 gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
190 return gf119_disp_new_(&gf119_disp, device, index, pdisp);