2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_fifo.h"
38 #include "nouveau_ramht.h"
39 #include "nouveau_software.h"
40 #include "nouveau_vm.h"
42 struct nouveau_gpuobj_method
{
43 struct list_head head
;
45 int (*exec
)(struct nouveau_channel
*, u32
class, u32 mthd
, u32 data
);
48 struct nouveau_gpuobj_class
{
49 struct list_head head
;
50 struct list_head methods
;
56 nouveau_gpuobj_class_new(struct drm_device
*dev
, u32
class, u32 engine
)
58 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
59 struct nouveau_gpuobj_class
*oc
;
61 oc
= kzalloc(sizeof(*oc
), GFP_KERNEL
);
65 INIT_LIST_HEAD(&oc
->methods
);
68 list_add(&oc
->head
, &dev_priv
->classes
);
73 nouveau_gpuobj_mthd_new(struct drm_device
*dev
, u32
class, u32 mthd
,
74 int (*exec
)(struct nouveau_channel
*, u32
, u32
, u32
))
76 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
77 struct nouveau_gpuobj_method
*om
;
78 struct nouveau_gpuobj_class
*oc
;
80 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
88 om
= kzalloc(sizeof(*om
), GFP_KERNEL
);
94 list_add(&om
->head
, &oc
->methods
);
99 nouveau_gpuobj_mthd_call(struct nouveau_channel
*chan
,
100 u32
class, u32 mthd
, u32 data
)
102 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
103 struct nouveau_gpuobj_method
*om
;
104 struct nouveau_gpuobj_class
*oc
;
106 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
110 list_for_each_entry(om
, &oc
->methods
, head
) {
111 if (om
->mthd
== mthd
)
112 return om
->exec(chan
, class, mthd
, data
);
120 nouveau_gpuobj_mthd_call2(struct drm_device
*dev
, int chid
,
121 u32
class, u32 mthd
, u32 data
)
123 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
124 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
125 struct nouveau_channel
*chan
= NULL
;
129 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
130 if (chid
>= 0 && chid
< pfifo
->channels
)
131 chan
= dev_priv
->channels
.ptr
[chid
];
133 ret
= nouveau_gpuobj_mthd_call(chan
, class, mthd
, data
);
134 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
139 nouveau_gpuobj_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
140 uint32_t size
, int align
, uint32_t flags
,
141 struct nouveau_gpuobj
**gpuobj_ret
)
143 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
144 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
145 struct nouveau_gpuobj
*gpuobj
;
146 struct drm_mm_node
*ramin
= NULL
;
149 NV_DEBUG(dev
, "ch%d size=%u align=%d flags=0x%08x\n",
150 chan
? chan
->id
: -1, size
, align
, flags
);
152 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
155 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
157 gpuobj
->flags
= flags
;
158 kref_init(&gpuobj
->refcount
);
161 spin_lock(&dev_priv
->ramin_lock
);
162 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
163 spin_unlock(&dev_priv
->ramin_lock
);
165 if (!(flags
& NVOBJ_FLAG_VM
) && chan
) {
166 ramin
= drm_mm_search_free(&chan
->ramin_heap
, size
, align
, 0);
168 ramin
= drm_mm_get_block(ramin
, size
, align
);
170 nouveau_gpuobj_ref(NULL
, &gpuobj
);
174 gpuobj
->pinst
= chan
->ramin
->pinst
;
175 if (gpuobj
->pinst
!= ~0)
176 gpuobj
->pinst
+= ramin
->start
;
178 gpuobj
->cinst
= ramin
->start
;
179 gpuobj
->vinst
= ramin
->start
+ chan
->ramin
->vinst
;
180 gpuobj
->node
= ramin
;
182 ret
= instmem
->get(gpuobj
, chan
, size
, align
);
184 nouveau_gpuobj_ref(NULL
, &gpuobj
);
189 if (!(flags
& NVOBJ_FLAG_DONT_MAP
))
190 ret
= instmem
->map(gpuobj
);
194 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
197 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
198 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
199 nv_wo32(gpuobj
, i
, 0);
204 *gpuobj_ret
= gpuobj
;
209 nouveau_gpuobj_init(struct drm_device
*dev
)
211 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
215 INIT_LIST_HEAD(&dev_priv
->gpuobj_list
);
216 INIT_LIST_HEAD(&dev_priv
->classes
);
217 spin_lock_init(&dev_priv
->ramin_lock
);
218 dev_priv
->ramin_base
= ~0;
224 nouveau_gpuobj_takedown(struct drm_device
*dev
)
226 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
227 struct nouveau_gpuobj_method
*om
, *tm
;
228 struct nouveau_gpuobj_class
*oc
, *tc
;
232 list_for_each_entry_safe(oc
, tc
, &dev_priv
->classes
, head
) {
233 list_for_each_entry_safe(om
, tm
, &oc
->methods
, head
) {
241 WARN_ON(!list_empty(&dev_priv
->gpuobj_list
));
246 nouveau_gpuobj_del(struct kref
*ref
)
248 struct nouveau_gpuobj
*gpuobj
=
249 container_of(ref
, struct nouveau_gpuobj
, refcount
);
250 struct drm_device
*dev
= gpuobj
->dev
;
251 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
252 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
255 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
257 if (gpuobj
->node
&& (gpuobj
->flags
& NVOBJ_FLAG_ZERO_FREE
)) {
258 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
259 nv_wo32(gpuobj
, i
, 0);
264 gpuobj
->dtor(dev
, gpuobj
);
266 if (gpuobj
->cinst
== NVOBJ_CINST_GLOBAL
) {
268 instmem
->unmap(gpuobj
);
269 instmem
->put(gpuobj
);
273 spin_lock(&dev_priv
->ramin_lock
);
274 drm_mm_put_block(gpuobj
->node
);
275 spin_unlock(&dev_priv
->ramin_lock
);
279 spin_lock(&dev_priv
->ramin_lock
);
280 list_del(&gpuobj
->list
);
281 spin_unlock(&dev_priv
->ramin_lock
);
287 nouveau_gpuobj_ref(struct nouveau_gpuobj
*ref
, struct nouveau_gpuobj
**ptr
)
290 kref_get(&ref
->refcount
);
293 kref_put(&(*ptr
)->refcount
, nouveau_gpuobj_del
);
299 nouveau_gpuobj_new_fake(struct drm_device
*dev
, u32 pinst
, u64 vinst
,
300 u32 size
, u32 flags
, struct nouveau_gpuobj
**pgpuobj
)
302 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
303 struct nouveau_gpuobj
*gpuobj
= NULL
;
307 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
308 pinst
, vinst
, size
, flags
);
310 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
313 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
315 gpuobj
->flags
= flags
;
316 kref_init(&gpuobj
->refcount
);
318 gpuobj
->pinst
= pinst
;
319 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
320 gpuobj
->vinst
= vinst
;
322 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
323 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
324 nv_wo32(gpuobj
, i
, 0);
325 dev_priv
->engine
.instmem
.flush(dev
);
328 spin_lock(&dev_priv
->ramin_lock
);
329 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
330 spin_unlock(&dev_priv
->ramin_lock
);
336 nv50_gpuobj_dma_init(struct nouveau_gpuobj
*obj
, u32 offset
, int class,
337 u64 base
, u64 size
, int target
, int access
,
340 struct drm_nouveau_private
*dev_priv
= obj
->dev
->dev_private
;
341 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
344 flags0
= (comp
<< 29) | (type
<< 22) | class;
345 flags0
|= 0x00100000;
348 case NV_MEM_ACCESS_RO
: flags0
|= 0x00040000; break;
349 case NV_MEM_ACCESS_RW
:
350 case NV_MEM_ACCESS_WO
: flags0
|= 0x00080000; break;
356 case NV_MEM_TARGET_VRAM
:
357 flags0
|= 0x00010000;
359 case NV_MEM_TARGET_PCI
:
360 flags0
|= 0x00020000;
362 case NV_MEM_TARGET_PCI_NOSNOOP
:
363 flags0
|= 0x00030000;
365 case NV_MEM_TARGET_GART
:
366 base
+= dev_priv
->gart_info
.aper_base
;
368 flags0
&= ~0x00100000;
372 /* convert to base + limit */
373 size
= (base
+ size
) - 1;
375 nv_wo32(obj
, offset
+ 0x00, flags0
);
376 nv_wo32(obj
, offset
+ 0x04, lower_32_bits(size
));
377 nv_wo32(obj
, offset
+ 0x08, lower_32_bits(base
));
378 nv_wo32(obj
, offset
+ 0x0c, upper_32_bits(size
) << 24 |
379 upper_32_bits(base
));
380 nv_wo32(obj
, offset
+ 0x10, 0x00000000);
381 nv_wo32(obj
, offset
+ 0x14, 0x00000000);
383 pinstmem
->flush(obj
->dev
);
387 nv50_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
, u64 size
,
388 int target
, int access
, u32 type
, u32 comp
,
389 struct nouveau_gpuobj
**pobj
)
391 struct drm_device
*dev
= chan
->dev
;
394 ret
= nouveau_gpuobj_new(dev
, chan
, 24, 16, NVOBJ_FLAG_ZERO_FREE
, pobj
);
398 nv50_gpuobj_dma_init(*pobj
, 0, class, base
, size
, target
,
404 nouveau_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
,
405 u64 size
, int access
, int target
,
406 struct nouveau_gpuobj
**pobj
)
408 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
409 struct drm_device
*dev
= chan
->dev
;
410 struct nouveau_gpuobj
*obj
;
414 if (dev_priv
->card_type
>= NV_50
) {
415 u32 comp
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_COMP_VM
: 0;
416 u32 type
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_TYPE_VM
: 0;
418 return nv50_gpuobj_dma_new(chan
, class, base
, size
,
419 target
, access
, type
, comp
, pobj
);
422 if (target
== NV_MEM_TARGET_GART
) {
423 struct nouveau_gpuobj
*gart
= dev_priv
->gart_info
.sg_ctxdma
;
425 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_PDMA
) {
427 nouveau_gpuobj_ref(gart
, pobj
);
431 base
= nouveau_sgdma_get_physical(dev
, base
);
432 target
= NV_MEM_TARGET_PCI
;
434 base
+= dev_priv
->gart_info
.aper_base
;
435 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
)
436 target
= NV_MEM_TARGET_PCI_NOSNOOP
;
438 target
= NV_MEM_TARGET_PCI
;
443 flags0
|= 0x00003000; /* PT present, PT linear */
447 case NV_MEM_TARGET_PCI
:
448 flags0
|= 0x00020000;
450 case NV_MEM_TARGET_PCI_NOSNOOP
:
451 flags0
|= 0x00030000;
458 case NV_MEM_ACCESS_RO
:
459 flags0
|= 0x00004000;
461 case NV_MEM_ACCESS_WO
:
462 flags0
|= 0x00008000;
464 flags2
|= 0x00000002;
468 flags0
|= (base
& 0x00000fff) << 20;
469 flags2
|= (base
& 0xfffff000);
471 ret
= nouveau_gpuobj_new(dev
, chan
, 16, 16, NVOBJ_FLAG_ZERO_FREE
, &obj
);
475 nv_wo32(obj
, 0x00, flags0
);
476 nv_wo32(obj
, 0x04, size
- 1);
477 nv_wo32(obj
, 0x08, flags2
);
478 nv_wo32(obj
, 0x0c, flags2
);
480 obj
->engine
= NVOBJ_ENGINE_SW
;
487 nouveau_gpuobj_gr_new(struct nouveau_channel
*chan
, u32 handle
, int class)
489 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
490 struct drm_device
*dev
= chan
->dev
;
491 struct nouveau_gpuobj_class
*oc
;
494 NV_DEBUG(dev
, "ch%d class=0x%04x\n", chan
->id
, class);
496 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
497 struct nouveau_exec_engine
*eng
= dev_priv
->eng
[oc
->engine
];
502 if (!chan
->engctx
[oc
->engine
]) {
503 ret
= eng
->context_new(chan
, oc
->engine
);
508 return eng
->object_new(chan
, oc
->engine
, handle
, class);
515 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
517 struct drm_device
*dev
= chan
->dev
;
518 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
523 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
525 /* Base amount for object storage (4KiB enough?) */
529 if (dev_priv
->card_type
== NV_50
) {
530 /* Various fixed table thingos */
531 size
+= 0x1400; /* mostly unknown stuff */
532 size
+= 0x4000; /* vm pd */
534 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
540 ret
= nouveau_gpuobj_new(dev
, NULL
, size
, 0x1000, 0, &chan
->ramin
);
542 NV_ERROR(dev
, "Error allocating channel PRAMIN: %d\n", ret
);
546 ret
= drm_mm_init(&chan
->ramin_heap
, base
, size
- base
);
548 NV_ERROR(dev
, "Error creating PRAMIN heap: %d\n", ret
);
549 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
557 nvc0_gpuobj_channel_init(struct nouveau_channel
*chan
, struct nouveau_vm
*vm
)
559 struct drm_device
*dev
= chan
->dev
;
560 struct nouveau_gpuobj
*pgd
= NULL
;
561 struct nouveau_vm_pgd
*vpgd
;
564 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 0x1000, 0, &chan
->ramin
);
568 /* create page directory for this vm if none currently exists,
569 * will be destroyed automagically when last reference to the
572 if (list_empty(&vm
->pgd_list
)) {
573 ret
= nouveau_gpuobj_new(dev
, NULL
, 65536, 0x1000, 0, &pgd
);
577 nouveau_vm_ref(vm
, &chan
->vm
, pgd
);
578 nouveau_gpuobj_ref(NULL
, &pgd
);
580 /* point channel at vm's page directory */
581 vpgd
= list_first_entry(&vm
->pgd_list
, struct nouveau_vm_pgd
, head
);
582 nv_wo32(chan
->ramin
, 0x0200, lower_32_bits(vpgd
->obj
->vinst
));
583 nv_wo32(chan
->ramin
, 0x0204, upper_32_bits(vpgd
->obj
->vinst
));
584 nv_wo32(chan
->ramin
, 0x0208, 0xffffffff);
585 nv_wo32(chan
->ramin
, 0x020c, 0x000000ff);
591 nouveau_gpuobj_channel_init(struct nouveau_channel
*chan
,
592 uint32_t vram_h
, uint32_t tt_h
)
594 struct drm_device
*dev
= chan
->dev
;
595 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
596 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(chan
->file_priv
);
597 struct nouveau_vm
*vm
= fpriv
? fpriv
->vm
: dev_priv
->chan_vm
;
598 struct nouveau_gpuobj
*vram
= NULL
, *tt
= NULL
;
601 NV_DEBUG(dev
, "ch%d vram=0x%08x tt=0x%08x\n", chan
->id
, vram_h
, tt_h
);
602 if (dev_priv
->card_type
>= NV_C0
)
603 return nvc0_gpuobj_channel_init(chan
, vm
);
605 /* Allocate a chunk of memory for per-channel object storage */
606 ret
= nouveau_gpuobj_channel_init_pramin(chan
);
608 NV_ERROR(dev
, "init pramin\n");
613 * - Allocate per-channel page-directory
614 * - Link with shared channel VM
617 u32 pgd_offs
= (dev_priv
->chipset
== 0x50) ? 0x1400 : 0x0200;
618 u64 vm_vinst
= chan
->ramin
->vinst
+ pgd_offs
;
619 u32 vm_pinst
= chan
->ramin
->pinst
;
622 vm_pinst
+= pgd_offs
;
624 ret
= nouveau_gpuobj_new_fake(dev
, vm_pinst
, vm_vinst
, 0x4000,
629 nouveau_vm_ref(vm
, &chan
->vm
, chan
->vm_pd
);
633 if (dev_priv
->card_type
< NV_50
) {
634 nouveau_ramht_ref(dev_priv
->ramht
, &chan
->ramht
, NULL
);
636 struct nouveau_gpuobj
*ramht
= NULL
;
638 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 16,
639 NVOBJ_FLAG_ZERO_ALLOC
, &ramht
);
643 ret
= nouveau_ramht_new(dev
, ramht
, &chan
->ramht
);
644 nouveau_gpuobj_ref(NULL
, &ramht
);
650 if (dev_priv
->card_type
>= NV_50
) {
651 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
652 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
653 NV_MEM_TARGET_VM
, &vram
);
655 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
659 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
660 0, dev_priv
->fb_available_size
,
662 NV_MEM_TARGET_VRAM
, &vram
);
664 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
669 ret
= nouveau_ramht_insert(chan
, vram_h
, vram
);
670 nouveau_gpuobj_ref(NULL
, &vram
);
672 NV_ERROR(dev
, "Error adding VRAM ctxdma to RAMHT: %d\n", ret
);
676 /* TT memory ctxdma */
677 if (dev_priv
->card_type
>= NV_50
) {
678 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
679 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
680 NV_MEM_TARGET_VM
, &tt
);
682 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
683 0, dev_priv
->gart_info
.aper_size
,
685 NV_MEM_TARGET_GART
, &tt
);
689 NV_ERROR(dev
, "Error creating TT ctxdma: %d\n", ret
);
693 ret
= nouveau_ramht_insert(chan
, tt_h
, tt
);
694 nouveau_gpuobj_ref(NULL
, &tt
);
696 NV_ERROR(dev
, "Error adding TT ctxdma to RAMHT: %d\n", ret
);
704 nouveau_gpuobj_channel_takedown(struct nouveau_channel
*chan
)
706 NV_DEBUG(chan
->dev
, "ch%d\n", chan
->id
);
708 nouveau_vm_ref(NULL
, &chan
->vm
, chan
->vm_pd
);
709 nouveau_gpuobj_ref(NULL
, &chan
->vm_pd
);
711 if (drm_mm_initialized(&chan
->ramin_heap
))
712 drm_mm_takedown(&chan
->ramin_heap
);
713 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
717 nouveau_gpuobj_suspend(struct drm_device
*dev
)
719 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
720 struct nouveau_gpuobj
*gpuobj
;
723 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
724 if (gpuobj
->cinst
!= NVOBJ_CINST_GLOBAL
)
727 gpuobj
->suspend
= vmalloc(gpuobj
->size
);
728 if (!gpuobj
->suspend
) {
729 nouveau_gpuobj_resume(dev
);
733 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
734 gpuobj
->suspend
[i
/4] = nv_ro32(gpuobj
, i
);
741 nouveau_gpuobj_resume(struct drm_device
*dev
)
743 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
744 struct nouveau_gpuobj
*gpuobj
;
747 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
748 if (!gpuobj
->suspend
)
751 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
752 nv_wo32(gpuobj
, i
, gpuobj
->suspend
[i
/4]);
754 vfree(gpuobj
->suspend
);
755 gpuobj
->suspend
= NULL
;
758 dev_priv
->engine
.instmem
.flush(dev
);
762 nv_ro32(struct nouveau_gpuobj
*gpuobj
, u32 offset
)
764 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
765 struct drm_device
*dev
= gpuobj
->dev
;
768 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
769 u64 ptr
= gpuobj
->vinst
+ offset
;
770 u32 base
= ptr
>> 16;
773 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
774 if (dev_priv
->ramin_base
!= base
) {
775 dev_priv
->ramin_base
= base
;
776 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
778 val
= nv_rd32(dev
, 0x700000 + (ptr
& 0xffff));
779 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
783 return nv_ri32(dev
, gpuobj
->pinst
+ offset
);
787 nv_wo32(struct nouveau_gpuobj
*gpuobj
, u32 offset
, u32 val
)
789 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
790 struct drm_device
*dev
= gpuobj
->dev
;
793 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
794 u64 ptr
= gpuobj
->vinst
+ offset
;
795 u32 base
= ptr
>> 16;
797 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
798 if (dev_priv
->ramin_base
!= base
) {
799 dev_priv
->ramin_base
= base
;
800 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
802 nv_wr32(dev
, 0x700000 + (ptr
& 0xffff), val
);
803 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
807 nv_wi32(dev
, gpuobj
->pinst
+ offset
, val
);