2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_ramht.h"
38 #include "nouveau_vm.h"
39 #include "nv50_display.h"
41 struct nouveau_gpuobj_method
{
42 struct list_head head
;
44 int (*exec
)(struct nouveau_channel
*, u32
class, u32 mthd
, u32 data
);
47 struct nouveau_gpuobj_class
{
48 struct list_head head
;
49 struct list_head methods
;
55 nouveau_gpuobj_class_new(struct drm_device
*dev
, u32
class, u32 engine
)
57 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
58 struct nouveau_gpuobj_class
*oc
;
60 oc
= kzalloc(sizeof(*oc
), GFP_KERNEL
);
64 INIT_LIST_HEAD(&oc
->methods
);
67 list_add(&oc
->head
, &dev_priv
->classes
);
72 nouveau_gpuobj_mthd_new(struct drm_device
*dev
, u32
class, u32 mthd
,
73 int (*exec
)(struct nouveau_channel
*, u32
, u32
, u32
))
75 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
76 struct nouveau_gpuobj_method
*om
;
77 struct nouveau_gpuobj_class
*oc
;
79 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
87 om
= kzalloc(sizeof(*om
), GFP_KERNEL
);
93 list_add(&om
->head
, &oc
->methods
);
98 nouveau_gpuobj_mthd_call(struct nouveau_channel
*chan
,
99 u32
class, u32 mthd
, u32 data
)
101 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
102 struct nouveau_gpuobj_method
*om
;
103 struct nouveau_gpuobj_class
*oc
;
105 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
109 list_for_each_entry(om
, &oc
->methods
, head
) {
110 if (om
->mthd
== mthd
)
111 return om
->exec(chan
, class, mthd
, data
);
119 nouveau_gpuobj_mthd_call2(struct drm_device
*dev
, int chid
,
120 u32
class, u32 mthd
, u32 data
)
122 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
123 struct nouveau_channel
*chan
= NULL
;
127 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
128 if (chid
>= 0 && chid
< dev_priv
->engine
.fifo
.channels
)
129 chan
= dev_priv
->channels
.ptr
[chid
];
131 ret
= nouveau_gpuobj_mthd_call(chan
, class, mthd
, data
);
132 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
136 /* NVidia uses context objects to drive drawing operations.
138 Context objects can be selected into 8 subchannels in the FIFO,
139 and then used via DMA command buffers.
141 A context object is referenced by a user defined handle (CARD32). The HW
142 looks up graphics objects in a hash table in the instance RAM.
144 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
145 the handle, the second one a bitfield, that contains the address of the
146 object in instance RAM.
148 The format of the second CARD32 seems to be:
152 15: 0 instance_addr >> 4
153 17:16 engine (here uses 1 = graphics)
154 28:24 channel id (here uses 0)
159 15: 0 instance_addr >> 4 (maybe 19-0)
160 21:20 engine (here uses 1 = graphics)
161 I'm unsure about the other bits, but using 0 seems to work.
163 The key into the hash table depends on the object handle and channel id and
168 nouveau_gpuobj_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
169 uint32_t size
, int align
, uint32_t flags
,
170 struct nouveau_gpuobj
**gpuobj_ret
)
172 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
173 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
174 struct nouveau_gpuobj
*gpuobj
;
175 struct drm_mm_node
*ramin
= NULL
;
178 NV_DEBUG(dev
, "ch%d size=%u align=%d flags=0x%08x\n",
179 chan
? chan
->id
: -1, size
, align
, flags
);
181 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
184 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
186 gpuobj
->flags
= flags
;
187 kref_init(&gpuobj
->refcount
);
190 spin_lock(&dev_priv
->ramin_lock
);
191 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
192 spin_unlock(&dev_priv
->ramin_lock
);
194 if (!(flags
& NVOBJ_FLAG_VM
) && chan
) {
195 ramin
= drm_mm_search_free(&chan
->ramin_heap
, size
, align
, 0);
197 ramin
= drm_mm_get_block(ramin
, size
, align
);
199 nouveau_gpuobj_ref(NULL
, &gpuobj
);
203 gpuobj
->pinst
= chan
->ramin
->pinst
;
204 if (gpuobj
->pinst
!= ~0)
205 gpuobj
->pinst
+= ramin
->start
;
207 gpuobj
->cinst
= ramin
->start
;
208 gpuobj
->vinst
= ramin
->start
+ chan
->ramin
->vinst
;
209 gpuobj
->node
= ramin
;
211 ret
= instmem
->get(gpuobj
, chan
, size
, align
);
213 nouveau_gpuobj_ref(NULL
, &gpuobj
);
218 if (!(flags
& NVOBJ_FLAG_DONT_MAP
))
219 ret
= instmem
->map(gpuobj
);
223 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
226 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
227 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
228 nv_wo32(gpuobj
, i
, 0);
233 *gpuobj_ret
= gpuobj
;
238 nouveau_gpuobj_init(struct drm_device
*dev
)
240 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
244 INIT_LIST_HEAD(&dev_priv
->gpuobj_list
);
245 INIT_LIST_HEAD(&dev_priv
->classes
);
246 spin_lock_init(&dev_priv
->ramin_lock
);
247 dev_priv
->ramin_base
= ~0;
253 nouveau_gpuobj_takedown(struct drm_device
*dev
)
255 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
256 struct nouveau_gpuobj_method
*om
, *tm
;
257 struct nouveau_gpuobj_class
*oc
, *tc
;
261 list_for_each_entry_safe(oc
, tc
, &dev_priv
->classes
, head
) {
262 list_for_each_entry_safe(om
, tm
, &oc
->methods
, head
) {
270 BUG_ON(!list_empty(&dev_priv
->gpuobj_list
));
275 nouveau_gpuobj_del(struct kref
*ref
)
277 struct nouveau_gpuobj
*gpuobj
=
278 container_of(ref
, struct nouveau_gpuobj
, refcount
);
279 struct drm_device
*dev
= gpuobj
->dev
;
280 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
281 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
284 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
286 if (gpuobj
->node
&& (gpuobj
->flags
& NVOBJ_FLAG_ZERO_FREE
)) {
287 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
288 nv_wo32(gpuobj
, i
, 0);
293 gpuobj
->dtor(dev
, gpuobj
);
295 if (gpuobj
->cinst
== NVOBJ_CINST_GLOBAL
) {
297 instmem
->unmap(gpuobj
);
298 instmem
->put(gpuobj
);
302 spin_lock(&dev_priv
->ramin_lock
);
303 drm_mm_put_block(gpuobj
->node
);
304 spin_unlock(&dev_priv
->ramin_lock
);
308 spin_lock(&dev_priv
->ramin_lock
);
309 list_del(&gpuobj
->list
);
310 spin_unlock(&dev_priv
->ramin_lock
);
316 nouveau_gpuobj_ref(struct nouveau_gpuobj
*ref
, struct nouveau_gpuobj
**ptr
)
319 kref_get(&ref
->refcount
);
322 kref_put(&(*ptr
)->refcount
, nouveau_gpuobj_del
);
328 nouveau_gpuobj_new_fake(struct drm_device
*dev
, u32 pinst
, u64 vinst
,
329 u32 size
, u32 flags
, struct nouveau_gpuobj
**pgpuobj
)
331 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
332 struct nouveau_gpuobj
*gpuobj
= NULL
;
336 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
337 pinst
, vinst
, size
, flags
);
339 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
342 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
344 gpuobj
->flags
= flags
;
345 kref_init(&gpuobj
->refcount
);
347 gpuobj
->pinst
= pinst
;
348 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
349 gpuobj
->vinst
= vinst
;
351 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
352 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
353 nv_wo32(gpuobj
, i
, 0);
354 dev_priv
->engine
.instmem
.flush(dev
);
357 spin_lock(&dev_priv
->ramin_lock
);
358 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
359 spin_unlock(&dev_priv
->ramin_lock
);
365 DMA objects are used to reference a piece of memory in the
366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
367 and looks as follows:
370 11:0 class (seems like I can always use 0 here)
371 12 page table present?
372 13 page entry linear?
373 15:14 access: 0 rw, 1 ro, 2 wo
374 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
375 31:20 dma adjust (bits 0-11 of the address)
377 dma limit (size of transfer)
379 1 0 readonly, 1 readwrite
380 31:12 dma frame address of the page (bits 12-31 of the address)
382 page table terminator, same value as the first pte, as does nvidia
383 rivatv uses 0xffffffff
385 Non linear page tables need a list of frame addresses afterwards,
386 the rivatv project has some info on this.
388 The method below creates a DMA object in instance RAM and returns a handle
389 to it that can be used to set up context objects.
393 nv50_gpuobj_dma_init(struct nouveau_gpuobj
*obj
, u32 offset
, int class,
394 u64 base
, u64 size
, int target
, int access
,
397 struct drm_nouveau_private
*dev_priv
= obj
->dev
->dev_private
;
398 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
401 flags0
= (comp
<< 29) | (type
<< 22) | class;
402 flags0
|= 0x00100000;
405 case NV_MEM_ACCESS_RO
: flags0
|= 0x00040000; break;
406 case NV_MEM_ACCESS_RW
:
407 case NV_MEM_ACCESS_WO
: flags0
|= 0x00080000; break;
413 case NV_MEM_TARGET_VRAM
:
414 flags0
|= 0x00010000;
416 case NV_MEM_TARGET_PCI
:
417 flags0
|= 0x00020000;
419 case NV_MEM_TARGET_PCI_NOSNOOP
:
420 flags0
|= 0x00030000;
422 case NV_MEM_TARGET_GART
:
423 base
+= dev_priv
->gart_info
.aper_base
;
425 flags0
&= ~0x00100000;
429 /* convert to base + limit */
430 size
= (base
+ size
) - 1;
432 nv_wo32(obj
, offset
+ 0x00, flags0
);
433 nv_wo32(obj
, offset
+ 0x04, lower_32_bits(size
));
434 nv_wo32(obj
, offset
+ 0x08, lower_32_bits(base
));
435 nv_wo32(obj
, offset
+ 0x0c, upper_32_bits(size
) << 24 |
436 upper_32_bits(base
));
437 nv_wo32(obj
, offset
+ 0x10, 0x00000000);
438 nv_wo32(obj
, offset
+ 0x14, 0x00000000);
440 pinstmem
->flush(obj
->dev
);
444 nv50_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
, u64 size
,
445 int target
, int access
, u32 type
, u32 comp
,
446 struct nouveau_gpuobj
**pobj
)
448 struct drm_device
*dev
= chan
->dev
;
451 ret
= nouveau_gpuobj_new(dev
, chan
, 24, 16, NVOBJ_FLAG_ZERO_FREE
, pobj
);
455 nv50_gpuobj_dma_init(*pobj
, 0, class, base
, size
, target
,
461 nouveau_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
,
462 u64 size
, int access
, int target
,
463 struct nouveau_gpuobj
**pobj
)
465 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
466 struct drm_device
*dev
= chan
->dev
;
467 struct nouveau_gpuobj
*obj
;
471 if (dev_priv
->card_type
>= NV_50
) {
472 u32 comp
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_COMP_VM
: 0;
473 u32 type
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_TYPE_VM
: 0;
475 return nv50_gpuobj_dma_new(chan
, class, base
, size
,
476 target
, access
, type
, comp
, pobj
);
479 if (target
== NV_MEM_TARGET_GART
) {
480 struct nouveau_gpuobj
*gart
= dev_priv
->gart_info
.sg_ctxdma
;
482 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_PDMA
) {
484 nouveau_gpuobj_ref(gart
, pobj
);
488 base
= nouveau_sgdma_get_physical(dev
, base
);
489 target
= NV_MEM_TARGET_PCI
;
491 base
+= dev_priv
->gart_info
.aper_base
;
492 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
)
493 target
= NV_MEM_TARGET_PCI_NOSNOOP
;
495 target
= NV_MEM_TARGET_PCI
;
500 flags0
|= 0x00003000; /* PT present, PT linear */
504 case NV_MEM_TARGET_PCI
:
505 flags0
|= 0x00020000;
507 case NV_MEM_TARGET_PCI_NOSNOOP
:
508 flags0
|= 0x00030000;
515 case NV_MEM_ACCESS_RO
:
516 flags0
|= 0x00004000;
518 case NV_MEM_ACCESS_WO
:
519 flags0
|= 0x00008000;
521 flags2
|= 0x00000002;
525 flags0
|= (base
& 0x00000fff) << 20;
526 flags2
|= (base
& 0xfffff000);
528 ret
= nouveau_gpuobj_new(dev
, chan
, 16, 16, NVOBJ_FLAG_ZERO_FREE
, &obj
);
532 nv_wo32(obj
, 0x00, flags0
);
533 nv_wo32(obj
, 0x04, size
- 1);
534 nv_wo32(obj
, 0x08, flags2
);
535 nv_wo32(obj
, 0x0c, flags2
);
537 obj
->engine
= NVOBJ_ENGINE_SW
;
543 /* Context objects in the instance RAM have the following structure.
544 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
554 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
555 18 synchronize enable
556 19 endian: 1 big, 0 little
558 23 single step enable
559 24 patch status: 0 invalid, 1 valid
560 25 context_surface 0: 1 valid
561 26 context surface 1: 1 valid
562 27 context pattern: 1 valid
563 28 context rop: 1 valid
564 29,30 context beta, beta4
568 31:16 notify instance address
570 15:0 dma 0 instance address
571 31:16 dma 1 instance address
576 No idea what the exact format is. Here's what can be deducted:
579 11:0 class (maybe uses more bits here?)
582 25 patch status valid ?
584 15:0 DMA notifier (maybe 20:0)
586 15:0 DMA 0 instance (maybe 20:0)
589 15:0 DMA 1 instance (maybe 20:0)
595 nouveau_gpuobj_sw_new(struct nouveau_channel
*chan
, u32 handle
, u16
class)
597 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
598 struct nouveau_gpuobj
*gpuobj
;
601 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
604 gpuobj
->dev
= chan
->dev
;
605 gpuobj
->engine
= NVOBJ_ENGINE_SW
;
606 gpuobj
->class = class;
607 kref_init(&gpuobj
->refcount
);
608 gpuobj
->cinst
= 0x40;
610 spin_lock(&dev_priv
->ramin_lock
);
611 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
612 spin_unlock(&dev_priv
->ramin_lock
);
614 ret
= nouveau_ramht_insert(chan
, handle
, gpuobj
);
615 nouveau_gpuobj_ref(NULL
, &gpuobj
);
620 nouveau_gpuobj_gr_new(struct nouveau_channel
*chan
, u32 handle
, int class)
622 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
623 struct drm_device
*dev
= chan
->dev
;
624 struct nouveau_gpuobj_class
*oc
;
627 NV_DEBUG(dev
, "ch%d class=0x%04x\n", chan
->id
, class);
629 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
630 struct nouveau_exec_engine
*eng
= dev_priv
->eng
[oc
->engine
];
635 if (oc
->engine
== NVOBJ_ENGINE_SW
)
636 return nouveau_gpuobj_sw_new(chan
, handle
, class);
638 if (!chan
->engctx
[oc
->engine
]) {
639 ret
= eng
->context_new(chan
, oc
->engine
);
644 return eng
->object_new(chan
, oc
->engine
, handle
, class);
647 NV_ERROR(dev
, "illegal object class: 0x%x\n", class);
652 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
654 struct drm_device
*dev
= chan
->dev
;
655 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
660 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
662 /* Base amount for object storage (4KiB enough?) */
666 if (dev_priv
->card_type
== NV_50
) {
667 /* Various fixed table thingos */
668 size
+= 0x1400; /* mostly unknown stuff */
669 size
+= 0x4000; /* vm pd */
671 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
677 ret
= nouveau_gpuobj_new(dev
, NULL
, size
, 0x1000, 0, &chan
->ramin
);
679 NV_ERROR(dev
, "Error allocating channel PRAMIN: %d\n", ret
);
683 ret
= drm_mm_init(&chan
->ramin_heap
, base
, size
);
685 NV_ERROR(dev
, "Error creating PRAMIN heap: %d\n", ret
);
686 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
694 nvc0_gpuobj_channel_init(struct nouveau_channel
*chan
, struct nouveau_vm
*vm
)
696 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
697 struct drm_device
*dev
= chan
->dev
;
698 struct nouveau_gpuobj
*pgd
= NULL
;
699 struct nouveau_vm_pgd
*vpgd
;
702 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 0x1000, 0, &chan
->ramin
);
706 /* create page directory for this vm if none currently exists,
707 * will be destroyed automagically when last reference to the
710 if (list_empty(&vm
->pgd_list
)) {
711 ret
= nouveau_gpuobj_new(dev
, NULL
, 65536, 0x1000, 0, &pgd
);
715 nouveau_vm_ref(vm
, &chan
->vm
, pgd
);
716 nouveau_gpuobj_ref(NULL
, &pgd
);
718 /* point channel at vm's page directory */
719 vpgd
= list_first_entry(&vm
->pgd_list
, struct nouveau_vm_pgd
, head
);
720 nv_wo32(chan
->ramin
, 0x0200, lower_32_bits(vpgd
->obj
->vinst
));
721 nv_wo32(chan
->ramin
, 0x0204, upper_32_bits(vpgd
->obj
->vinst
));
722 nv_wo32(chan
->ramin
, 0x0208, 0xffffffff);
723 nv_wo32(chan
->ramin
, 0x020c, 0x000000ff);
725 /* map display semaphore buffers into channel's vm */
726 if (dev_priv
->card_type
>= NV_D0
)
729 for (i
= 0; i
< 2; i
++) {
730 struct nv50_display_crtc
*dispc
= &nv50_display(dev
)->crtc
[i
];
732 ret
= nouveau_bo_vma_add(dispc
->sem
.bo
, chan
->vm
,
733 &chan
->dispc_vma
[i
]);
742 nouveau_gpuobj_channel_init(struct nouveau_channel
*chan
,
743 uint32_t vram_h
, uint32_t tt_h
)
745 struct drm_device
*dev
= chan
->dev
;
746 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
747 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(chan
->file_priv
);
748 struct nouveau_vm
*vm
= fpriv
? fpriv
->vm
: dev_priv
->chan_vm
;
749 struct nouveau_gpuobj
*vram
= NULL
, *tt
= NULL
;
752 NV_DEBUG(dev
, "ch%d vram=0x%08x tt=0x%08x\n", chan
->id
, vram_h
, tt_h
);
753 if (dev_priv
->card_type
>= NV_C0
)
754 return nvc0_gpuobj_channel_init(chan
, vm
);
756 /* Allocate a chunk of memory for per-channel object storage */
757 ret
= nouveau_gpuobj_channel_init_pramin(chan
);
759 NV_ERROR(dev
, "init pramin\n");
764 * - Allocate per-channel page-directory
765 * - Link with shared channel VM
768 u32 pgd_offs
= (dev_priv
->chipset
== 0x50) ? 0x1400 : 0x0200;
769 u64 vm_vinst
= chan
->ramin
->vinst
+ pgd_offs
;
770 u32 vm_pinst
= chan
->ramin
->pinst
;
773 vm_pinst
+= pgd_offs
;
775 ret
= nouveau_gpuobj_new_fake(dev
, vm_pinst
, vm_vinst
, 0x4000,
780 nouveau_vm_ref(vm
, &chan
->vm
, chan
->vm_pd
);
784 if (dev_priv
->card_type
< NV_50
) {
785 nouveau_ramht_ref(dev_priv
->ramht
, &chan
->ramht
, NULL
);
787 struct nouveau_gpuobj
*ramht
= NULL
;
789 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 16,
790 NVOBJ_FLAG_ZERO_ALLOC
, &ramht
);
794 ret
= nouveau_ramht_new(dev
, ramht
, &chan
->ramht
);
795 nouveau_gpuobj_ref(NULL
, &ramht
);
799 /* dma objects for display sync channel semaphore blocks */
800 for (i
= 0; i
< dev
->mode_config
.num_crtc
; i
++) {
801 struct nouveau_gpuobj
*sem
= NULL
;
802 struct nv50_display_crtc
*dispc
=
803 &nv50_display(dev
)->crtc
[i
];
804 u64 offset
= dispc
->sem
.bo
->bo
.offset
;
806 ret
= nouveau_gpuobj_dma_new(chan
, 0x3d, offset
, 0xfff,
808 NV_MEM_TARGET_VRAM
, &sem
);
812 ret
= nouveau_ramht_insert(chan
, NvEvoSema0
+ i
, sem
);
813 nouveau_gpuobj_ref(NULL
, &sem
);
820 if (dev_priv
->card_type
>= NV_50
) {
821 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
822 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
823 NV_MEM_TARGET_VM
, &vram
);
825 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
829 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
830 0, dev_priv
->fb_available_size
,
832 NV_MEM_TARGET_VRAM
, &vram
);
834 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
839 ret
= nouveau_ramht_insert(chan
, vram_h
, vram
);
840 nouveau_gpuobj_ref(NULL
, &vram
);
842 NV_ERROR(dev
, "Error adding VRAM ctxdma to RAMHT: %d\n", ret
);
846 /* TT memory ctxdma */
847 if (dev_priv
->card_type
>= NV_50
) {
848 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
849 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
850 NV_MEM_TARGET_VM
, &tt
);
852 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
853 0, dev_priv
->gart_info
.aper_size
,
855 NV_MEM_TARGET_GART
, &tt
);
859 NV_ERROR(dev
, "Error creating TT ctxdma: %d\n", ret
);
863 ret
= nouveau_ramht_insert(chan
, tt_h
, tt
);
864 nouveau_gpuobj_ref(NULL
, &tt
);
866 NV_ERROR(dev
, "Error adding TT ctxdma to RAMHT: %d\n", ret
);
874 nouveau_gpuobj_channel_takedown(struct nouveau_channel
*chan
)
876 struct drm_device
*dev
= chan
->dev
;
877 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
880 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
882 if (dev_priv
->card_type
>= NV_50
&& dev_priv
->card_type
<= NV_C0
) {
883 struct nv50_display
*disp
= nv50_display(dev
);
885 for (i
= 0; i
< dev
->mode_config
.num_crtc
; i
++) {
886 struct nv50_display_crtc
*dispc
= &disp
->crtc
[i
];
887 nouveau_bo_vma_del(dispc
->sem
.bo
, &chan
->dispc_vma
[i
]);
891 nouveau_vm_ref(NULL
, &chan
->vm
, chan
->vm_pd
);
892 nouveau_gpuobj_ref(NULL
, &chan
->vm_pd
);
894 if (drm_mm_initialized(&chan
->ramin_heap
))
895 drm_mm_takedown(&chan
->ramin_heap
);
896 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
900 nouveau_gpuobj_suspend(struct drm_device
*dev
)
902 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
903 struct nouveau_gpuobj
*gpuobj
;
906 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
907 if (gpuobj
->cinst
!= NVOBJ_CINST_GLOBAL
)
910 gpuobj
->suspend
= vmalloc(gpuobj
->size
);
911 if (!gpuobj
->suspend
) {
912 nouveau_gpuobj_resume(dev
);
916 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
917 gpuobj
->suspend
[i
/4] = nv_ro32(gpuobj
, i
);
924 nouveau_gpuobj_resume(struct drm_device
*dev
)
926 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
927 struct nouveau_gpuobj
*gpuobj
;
930 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
931 if (!gpuobj
->suspend
)
934 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
935 nv_wo32(gpuobj
, i
, gpuobj
->suspend
[i
/4]);
937 vfree(gpuobj
->suspend
);
938 gpuobj
->suspend
= NULL
;
941 dev_priv
->engine
.instmem
.flush(dev
);
944 int nouveau_ioctl_grobj_alloc(struct drm_device
*dev
, void *data
,
945 struct drm_file
*file_priv
)
947 struct drm_nouveau_grobj_alloc
*init
= data
;
948 struct nouveau_channel
*chan
;
951 if (init
->handle
== ~0)
954 chan
= nouveau_channel_get(file_priv
, init
->channel
);
956 return PTR_ERR(chan
);
958 if (nouveau_ramht_find(chan
, init
->handle
)) {
963 ret
= nouveau_gpuobj_gr_new(chan
, init
->handle
, init
->class);
965 NV_ERROR(dev
, "Error creating object: %d (%d/0x%08x)\n",
966 ret
, init
->channel
, init
->handle
);
970 nouveau_channel_put(&chan
);
974 int nouveau_ioctl_gpuobj_free(struct drm_device
*dev
, void *data
,
975 struct drm_file
*file_priv
)
977 struct drm_nouveau_gpuobj_free
*objfree
= data
;
978 struct nouveau_channel
*chan
;
981 chan
= nouveau_channel_get(file_priv
, objfree
->channel
);
983 return PTR_ERR(chan
);
985 /* Synchronize with the user channel */
986 nouveau_channel_idle(chan
);
988 ret
= nouveau_ramht_remove(chan
, objfree
->handle
);
989 nouveau_channel_put(&chan
);
994 nv_ro32(struct nouveau_gpuobj
*gpuobj
, u32 offset
)
996 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
997 struct drm_device
*dev
= gpuobj
->dev
;
1000 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
1001 u64 ptr
= gpuobj
->vinst
+ offset
;
1002 u32 base
= ptr
>> 16;
1005 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
1006 if (dev_priv
->ramin_base
!= base
) {
1007 dev_priv
->ramin_base
= base
;
1008 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
1010 val
= nv_rd32(dev
, 0x700000 + (ptr
& 0xffff));
1011 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
1015 return nv_ri32(dev
, gpuobj
->pinst
+ offset
);
1019 nv_wo32(struct nouveau_gpuobj
*gpuobj
, u32 offset
, u32 val
)
1021 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
1022 struct drm_device
*dev
= gpuobj
->dev
;
1023 unsigned long flags
;
1025 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
1026 u64 ptr
= gpuobj
->vinst
+ offset
;
1027 u32 base
= ptr
>> 16;
1029 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
1030 if (dev_priv
->ramin_base
!= base
) {
1031 dev_priv
->ramin_base
= base
;
1032 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
1034 nv_wr32(dev
, 0x700000 + (ptr
& 0xffff), val
);
1035 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
1039 nv_wi32(dev
, gpuobj
->pinst
+ offset
, val
);