2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
32 nvkm_gpuobj_destroy(struct nvkm_gpuobj
*gpuobj
)
36 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_FREE
) {
37 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
38 nv_wo32(gpuobj
, i
, 0x00000000);
42 nvkm_mm_free(&nv_gpuobj(gpuobj
->parent
)->heap
, &gpuobj
->node
);
44 if (gpuobj
->heap
.block_size
)
45 nvkm_mm_fini(&gpuobj
->heap
);
47 nvkm_object_destroy(&gpuobj
->object
);
51 nvkm_gpuobj_create_(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
52 struct nvkm_oclass
*oclass
, u32 pclass
,
53 struct nvkm_object
*pargpu
, u32 size
, u32 align
, u32 flags
,
54 int length
, void **pobject
)
56 struct nvkm_instmem
*imem
= nvkm_instmem(parent
);
57 struct nvkm_bar
*bar
= nvkm_bar(parent
);
58 struct nvkm_gpuobj
*gpuobj
;
59 struct nvkm_mm
*heap
= NULL
;
66 while ((pargpu
= nv_pclass(pargpu
, NV_GPUOBJ_CLASS
))) {
67 if (nv_gpuobj(pargpu
)->heap
.block_size
)
69 pargpu
= pargpu
->parent
;
72 if (unlikely(pargpu
== NULL
)) {
73 nv_error(parent
, "no gpuobj heap\n");
77 addr
= nv_gpuobj(pargpu
)->addr
;
78 heap
= &nv_gpuobj(pargpu
)->heap
;
79 atomic_inc(&parent
->refcount
);
81 ret
= imem
->alloc(imem
, parent
, size
, align
, &parent
);
86 addr
= nv_memobj(pargpu
)->addr
;
87 size
= nv_memobj(pargpu
)->size
;
89 if (bar
&& bar
->alloc
) {
90 struct nvkm_instobj
*iobj
= (void *)parent
;
91 struct nvkm_mem
**mem
= (void *)(iobj
+ 1);
92 struct nvkm_mem
*node
= *mem
;
93 if (!bar
->alloc(bar
, parent
, node
, &pargpu
)) {
94 nvkm_object_ref(NULL
, &parent
);
100 ret
= nvkm_object_create_(parent
, engine
, oclass
, pclass
|
101 NV_GPUOBJ_CLASS
, length
, pobject
);
102 nvkm_object_ref(NULL
, &parent
);
107 gpuobj
->parent
= pargpu
;
108 gpuobj
->flags
= flags
;
113 ret
= nvkm_mm_head(heap
, 0, 1, size
, size
, max(align
, (u32
)1),
118 gpuobj
->addr
+= gpuobj
->node
->offset
;
121 if (gpuobj
->flags
& NVOBJ_FLAG_HEAP
) {
122 ret
= nvkm_mm_init(&gpuobj
->heap
, 0, gpuobj
->size
, 1);
127 if (flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
128 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
129 nv_wo32(gpuobj
, i
, 0x00000000);
135 struct nvkm_gpuobj_class
{
136 struct nvkm_object
*pargpu
;
143 _nvkm_gpuobj_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
144 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
145 struct nvkm_object
**pobject
)
147 struct nvkm_gpuobj_class
*args
= data
;
148 struct nvkm_gpuobj
*object
;
151 ret
= nvkm_gpuobj_create(parent
, engine
, oclass
, 0, args
->pargpu
,
152 args
->size
, args
->align
, args
->flags
,
154 *pobject
= nv_object(object
);
162 _nvkm_gpuobj_dtor(struct nvkm_object
*object
)
164 nvkm_gpuobj_destroy(nv_gpuobj(object
));
168 _nvkm_gpuobj_init(struct nvkm_object
*object
)
170 return nvkm_gpuobj_init(nv_gpuobj(object
));
174 _nvkm_gpuobj_fini(struct nvkm_object
*object
, bool suspend
)
176 return nvkm_gpuobj_fini(nv_gpuobj(object
), suspend
);
180 _nvkm_gpuobj_rd32(struct nvkm_object
*object
, u64 addr
)
182 struct nvkm_gpuobj
*gpuobj
= nv_gpuobj(object
);
183 struct nvkm_ofuncs
*pfuncs
= nv_ofuncs(gpuobj
->parent
);
185 addr
+= gpuobj
->node
->offset
;
186 return pfuncs
->rd32(gpuobj
->parent
, addr
);
190 _nvkm_gpuobj_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
192 struct nvkm_gpuobj
*gpuobj
= nv_gpuobj(object
);
193 struct nvkm_ofuncs
*pfuncs
= nv_ofuncs(gpuobj
->parent
);
195 addr
+= gpuobj
->node
->offset
;
196 pfuncs
->wr32(gpuobj
->parent
, addr
, data
);
199 static struct nvkm_oclass
200 _nvkm_gpuobj_oclass
= {
201 .handle
= 0x00000000,
202 .ofuncs
= &(struct nvkm_ofuncs
) {
203 .ctor
= _nvkm_gpuobj_ctor
,
204 .dtor
= _nvkm_gpuobj_dtor
,
205 .init
= _nvkm_gpuobj_init
,
206 .fini
= _nvkm_gpuobj_fini
,
207 .rd32
= _nvkm_gpuobj_rd32
,
208 .wr32
= _nvkm_gpuobj_wr32
,
213 nvkm_gpuobj_new(struct nvkm_object
*parent
, struct nvkm_object
*pargpu
,
214 u32 size
, u32 align
, u32 flags
,
215 struct nvkm_gpuobj
**pgpuobj
)
217 struct nvkm_object
*engine
= parent
;
218 struct nvkm_gpuobj_class args
= {
225 if (!nv_iclass(engine
, NV_SUBDEV_CLASS
))
226 engine
= &engine
->engine
->subdev
.object
;
227 BUG_ON(engine
== NULL
);
229 return nvkm_object_ctor(parent
, engine
, &_nvkm_gpuobj_oclass
,
231 (struct nvkm_object
**)pgpuobj
);
235 nvkm_gpuobj_map(struct nvkm_gpuobj
*gpuobj
, u32 access
, struct nvkm_vma
*vma
)
237 struct nvkm_bar
*bar
= nvkm_bar(gpuobj
);
240 if (bar
&& bar
->umap
) {
241 struct nvkm_instobj
*iobj
= (void *)
242 nv_pclass(nv_object(gpuobj
), NV_MEMOBJ_CLASS
);
243 struct nvkm_mem
**mem
= (void *)(iobj
+ 1);
244 ret
= bar
->umap(bar
, *mem
, access
, vma
);
251 nvkm_gpuobj_map_vm(struct nvkm_gpuobj
*gpuobj
, struct nvkm_vm
*vm
,
252 u32 access
, struct nvkm_vma
*vma
)
254 struct nvkm_instobj
*iobj
= (void *)
255 nv_pclass(nv_object(gpuobj
), NV_MEMOBJ_CLASS
);
256 struct nvkm_mem
**mem
= (void *)(iobj
+ 1);
259 ret
= nvkm_vm_get(vm
, gpuobj
->size
, 12, access
, vma
);
263 nvkm_vm_map(vma
, *mem
);
268 nvkm_gpuobj_unmap(struct nvkm_vma
*vma
)
276 /* the below is basically only here to support sharing the paged dma object
277 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
282 nvkm_gpudup_dtor(struct nvkm_object
*object
)
284 struct nvkm_gpuobj
*gpuobj
= (void *)object
;
285 nvkm_object_ref(NULL
, &gpuobj
->parent
);
286 nvkm_object_destroy(&gpuobj
->object
);
289 static struct nvkm_oclass
290 nvkm_gpudup_oclass
= {
291 .handle
= NV_GPUOBJ_CLASS
,
292 .ofuncs
= &(struct nvkm_ofuncs
) {
293 .dtor
= nvkm_gpudup_dtor
,
294 .init
= nvkm_object_init
,
295 .fini
= nvkm_object_fini
,
300 nvkm_gpuobj_dup(struct nvkm_object
*parent
, struct nvkm_gpuobj
*base
,
301 struct nvkm_gpuobj
**pgpuobj
)
303 struct nvkm_gpuobj
*gpuobj
;
306 ret
= nvkm_object_create(parent
, &parent
->engine
->subdev
.object
,
307 &nvkm_gpudup_oclass
, 0, &gpuobj
);
312 nvkm_object_ref(nv_object(base
), &gpuobj
->parent
);
313 gpuobj
->addr
= base
->addr
;
314 gpuobj
->size
= base
->size
;