OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / gpu / drm / nouveau / nvc0_copy.c
blobdddf006f6d88721451abe4d9634892bd139e8662
1 /*
2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
25 #include <linux/firmware.h>
26 #include "drmP.h"
27 #include "nouveau_drv.h"
28 #include "nouveau_util.h"
29 #include "nouveau_vm.h"
30 #include "nouveau_ramht.h"
31 #include "nvc0_copy.fuc.h"
33 struct nvc0_copy_engine {
34 struct nouveau_exec_engine base;
35 u32 irq;
36 u32 pmc;
37 u32 fuc;
38 u32 ctx;
41 static int
42 nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
44 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *ramin = chan->ramin;
48 struct nouveau_gpuobj *ctx = NULL;
49 int ret;
51 ret = nouveau_gpuobj_new(dev, chan, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret)
55 return ret;
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
59 dev_priv->engine.instmem.flush(dev);
61 chan->engctx[engine] = ctx;
62 return 0;
65 static int
66 nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
67 u32 handle, u16 class)
69 return 0;
72 static void
73 nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
75 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
76 struct nouveau_gpuobj *ctx = chan->engctx[engine];
77 struct drm_device *dev = chan->dev;
78 u32 inst;
80 inst = (chan->ramin->vinst >> 12);
81 inst |= 0x40000000;
83 /* disable fifo access */
84 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
85 /* mark channel as unloaded if it's currently active */
86 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
87 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
88 /* mark next channel as invalid if it's about to be loaded */
89 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
90 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
91 /* restore fifo access */
92 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
94 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
95 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
96 nouveau_gpuobj_ref(NULL, &ctx);
98 chan->engctx[engine] = ctx;
101 static int
102 nvc0_copy_init(struct drm_device *dev, int engine)
104 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
105 int i;
107 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
108 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
109 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
111 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
112 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
113 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
115 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
116 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
117 if ((i & 0x3f) == 0)
118 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
119 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
122 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
123 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
124 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
125 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
126 return 0;
129 static int
130 nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
134 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
136 /* trigger fuc context unload */
137 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
138 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
139 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
140 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
142 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
143 return 0;
146 static struct nouveau_enum nvc0_copy_isr_error_name[] = {
147 { 0x0001, "ILLEGAL_MTHD" },
148 { 0x0002, "INVALID_ENUM" },
149 { 0x0003, "INVALID_BITFIELD" },
153 static void
154 nvc0_copy_isr(struct drm_device *dev, int engine)
156 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
157 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
158 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
159 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
160 u32 chid = nvc0_graph_isr_chid(dev, inst);
161 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
162 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
163 u32 mthd = (addr & 0x07ff) << 2;
164 u32 subc = (addr & 0x3800) >> 11;
165 u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
169 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
170 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
173 stat &= ~0x00000040;
176 if (stat) {
177 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, pcopy->fuc + 0x004, stat);
182 static void
183 nvc0_copy_isr_0(struct drm_device *dev)
185 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
188 static void
189 nvc0_copy_isr_1(struct drm_device *dev)
191 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
194 static void
195 nvc0_copy_destroy(struct drm_device *dev, int engine)
197 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
199 nouveau_irq_unregister(dev, pcopy->irq);
201 if (engine == NVOBJ_ENGINE_COPY0)
202 NVOBJ_ENGINE_DEL(dev, COPY0);
203 else
204 NVOBJ_ENGINE_DEL(dev, COPY1);
205 kfree(pcopy);
209 nvc0_copy_create(struct drm_device *dev, int engine)
211 struct nvc0_copy_engine *pcopy;
213 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
214 if (!pcopy)
215 return -ENOMEM;
217 pcopy->base.destroy = nvc0_copy_destroy;
218 pcopy->base.init = nvc0_copy_init;
219 pcopy->base.fini = nvc0_copy_fini;
220 pcopy->base.context_new = nvc0_copy_context_new;
221 pcopy->base.context_del = nvc0_copy_context_del;
222 pcopy->base.object_new = nvc0_copy_object_new;
224 if (engine == 0) {
225 pcopy->irq = 5;
226 pcopy->pmc = 0x00000040;
227 pcopy->fuc = 0x104000;
228 pcopy->ctx = 0x0230;
229 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
230 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
231 NVOBJ_CLASS(dev, 0x90b5, COPY0);
232 } else {
233 pcopy->irq = 6;
234 pcopy->pmc = 0x00000080;
235 pcopy->fuc = 0x105000;
236 pcopy->ctx = 0x0240;
237 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
238 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
239 NVOBJ_CLASS(dev, 0x90b8, COPY1);
242 return 0;