2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/ramht.h>
29 #include <subdev/instmem.h>
31 #include <nvif/class.h>
32 #include <nvif/cl006b.h>
33 #include <nvif/unpack.h>
36 nv40_fifo_dma_engine(struct nvkm_engine
*engine
, u32
*reg
, u32
*ctx
)
38 switch (engine
->subdev
.index
) {
39 case NVKM_ENGINE_DMAOBJ
:
46 case NVKM_ENGINE_MPEG
:
57 nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan
*base
,
58 struct nvkm_engine
*engine
, bool suspend
)
60 struct nv04_fifo_chan
*chan
= nv04_fifo_chan(base
);
61 struct nv04_fifo
*fifo
= chan
->fifo
;
62 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
63 struct nvkm_instmem
*imem
= device
->imem
;
68 if (!nv40_fifo_dma_engine(engine
, ®
, &ctx
))
71 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
72 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000000);
74 chid
= nvkm_rd32(device
, 0x003204) & (fifo
->base
.nr
- 1);
75 if (chid
== chan
->base
.chid
)
76 nvkm_wr32(device
, reg
, 0x00000000);
77 nvkm_kmap(imem
->ramfc
);
78 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ ctx
, 0x00000000);
79 nvkm_done(imem
->ramfc
);
81 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000001);
82 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
87 nv40_fifo_dma_engine_init(struct nvkm_fifo_chan
*base
,
88 struct nvkm_engine
*engine
)
90 struct nv04_fifo_chan
*chan
= nv04_fifo_chan(base
);
91 struct nv04_fifo
*fifo
= chan
->fifo
;
92 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
93 struct nvkm_instmem
*imem
= device
->imem
;
98 if (!nv40_fifo_dma_engine(engine
, ®
, &ctx
))
100 inst
= chan
->engn
[engine
->subdev
.index
]->addr
>> 4;
102 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
103 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000000);
105 chid
= nvkm_rd32(device
, 0x003204) & (fifo
->base
.nr
- 1);
106 if (chid
== chan
->base
.chid
)
107 nvkm_wr32(device
, reg
, inst
);
108 nvkm_kmap(imem
->ramfc
);
109 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ ctx
, inst
);
110 nvkm_done(imem
->ramfc
);
112 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000001);
113 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
118 nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan
*base
,
119 struct nvkm_engine
*engine
)
121 struct nv04_fifo_chan
*chan
= nv04_fifo_chan(base
);
122 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
]);
126 nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan
*base
,
127 struct nvkm_engine
*engine
,
128 struct nvkm_object
*object
)
130 struct nv04_fifo_chan
*chan
= nv04_fifo_chan(base
);
131 const int engn
= engine
->subdev
.index
;
134 if (!nv40_fifo_dma_engine(engine
, ®
, &ctx
))
137 return nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
]);
141 nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan
*base
,
142 struct nvkm_object
*object
)
144 struct nv04_fifo_chan
*chan
= nv04_fifo_chan(base
);
145 struct nvkm_instmem
*imem
= chan
->fifo
->base
.engine
.subdev
.device
->imem
;
146 u32 context
= chan
->base
.chid
<< 23;
147 u32 handle
= object
->handle
;
150 switch (object
->engine
->subdev
.index
) {
151 case NVKM_ENGINE_DMAOBJ
:
152 case NVKM_ENGINE_SW
: context
|= 0x00000000; break;
153 case NVKM_ENGINE_GR
: context
|= 0x00100000; break;
154 case NVKM_ENGINE_MPEG
: context
|= 0x00200000; break;
160 mutex_lock(&chan
->fifo
->base
.engine
.subdev
.mutex
);
161 hash
= nvkm_ramht_insert(imem
->ramht
, object
, chan
->base
.chid
, 4,
163 mutex_unlock(&chan
->fifo
->base
.engine
.subdev
.mutex
);
167 static const struct nvkm_fifo_chan_func
168 nv40_fifo_dma_func
= {
169 .dtor
= nv04_fifo_dma_dtor
,
170 .init
= nv04_fifo_dma_init
,
171 .fini
= nv04_fifo_dma_fini
,
172 .engine_ctor
= nv40_fifo_dma_engine_ctor
,
173 .engine_dtor
= nv40_fifo_dma_engine_dtor
,
174 .engine_init
= nv40_fifo_dma_engine_init
,
175 .engine_fini
= nv40_fifo_dma_engine_fini
,
176 .object_ctor
= nv40_fifo_dma_object_ctor
,
177 .object_dtor
= nv04_fifo_dma_object_dtor
,
181 nv40_fifo_dma_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
182 void *data
, u32 size
, struct nvkm_object
**pobject
)
184 struct nvkm_object
*parent
= oclass
->parent
;
186 struct nv03_channel_dma_v0 v0
;
188 struct nv04_fifo
*fifo
= nv04_fifo(base
);
189 struct nv04_fifo_chan
*chan
= NULL
;
190 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
191 struct nvkm_instmem
*imem
= device
->imem
;
194 nvif_ioctl(parent
, "create channel dma size %d\n", size
);
195 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
196 nvif_ioctl(parent
, "create channel dma vers %d pushbuf %llx "
197 "offset %08x\n", args
->v0
.version
,
198 args
->v0
.pushbuf
, args
->v0
.offset
);
199 if (!args
->v0
.pushbuf
)
204 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
206 *pobject
= &chan
->base
.object
;
208 ret
= nvkm_fifo_chan_ctor(&nv40_fifo_dma_func
, &fifo
->base
,
209 0x1000, 0x1000, false, 0, args
->v0
.pushbuf
,
210 (1ULL << NVKM_ENGINE_DMAOBJ
) |
211 (1ULL << NVKM_ENGINE_GR
) |
212 (1ULL << NVKM_ENGINE_MPEG
) |
213 (1ULL << NVKM_ENGINE_SW
),
214 0, 0xc00000, 0x1000, oclass
, &chan
->base
);
219 args
->v0
.chid
= chan
->base
.chid
;
220 chan
->ramfc
= chan
->base
.chid
* 128;
222 nvkm_kmap(imem
->ramfc
);
223 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x00, args
->v0
.offset
);
224 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x04, args
->v0
.offset
);
225 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x0c, chan
->base
.push
->addr
>> 4);
226 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x18, 0x30000000 |
227 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
228 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
230 NV_PFIFO_CACHE1_BIG_ENDIAN
|
232 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
233 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x3c, 0x0001ffff);
234 nvkm_done(imem
->ramfc
);
238 const struct nvkm_fifo_chan_oclass
239 nv40_fifo_dma_oclass
= {
240 .base
.oclass
= NV40_CHANNEL_DMA
,
243 .ctor
= nv40_fifo_dma_new
,