2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
28 #include "drm_sarea.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_drm.h"
32 static int nouveau_init_card_mappings(struct drm_device
*dev
)
34 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
37 /* resource 0 is mmio regs */
38 /* resource 1 is linear FB */
39 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
40 /* resource 6 is bios */
42 /* map the mmio regs */
43 ret
= drm_addmap(dev
, drm_get_resource_start(dev
, 0),
44 drm_get_resource_len(dev
, 0),
45 _DRM_REGISTERS
, _DRM_READ_ONLY
, &dev_priv
->mmio
);
47 DRM_ERROR("Unable to initialize the mmio mapping (%d). "
48 "Please report your setup to " DRIVER_EMAIL
"\n",
52 DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv
->mmio
->offset
);
54 /* map larger RAMIN aperture on NV40 cards */
55 dev_priv
->ramin
= NULL
;
56 if (dev_priv
->card_type
>= NV_40
) {
57 int ramin_resource
= 2;
58 if (drm_get_resource_len(dev
, ramin_resource
) == 0)
62 drm_get_resource_start(dev
, ramin_resource
),
63 drm_get_resource_len(dev
, ramin_resource
),
64 _DRM_REGISTERS
, _DRM_READ_ONLY
,
67 DRM_ERROR("Failed to init RAMIN mapping, "
68 "limited instance memory available\n");
69 dev_priv
->ramin
= NULL
;
73 /* On older cards (or if the above failed), create a map covering
74 * the BAR0 PRAMIN aperture */
75 if (!dev_priv
->ramin
) {
77 drm_get_resource_start(dev
, 0) + NV_RAMIN
,
79 _DRM_REGISTERS
, _DRM_READ_ONLY
,
82 DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret
);
90 static int nouveau_stub_init(struct drm_device
*dev
) { return 0; }
91 static void nouveau_stub_takedown(struct drm_device
*dev
) {}
93 static int nouveau_init_engine_ptrs(struct drm_device
*dev
)
95 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
96 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
98 switch (dev_priv
->chipset
& 0xf0) {
100 engine
->instmem
.init
= nv04_instmem_init
;
101 engine
->instmem
.takedown
= nv04_instmem_takedown
;
102 engine
->instmem
.populate
= nv04_instmem_populate
;
103 engine
->instmem
.clear
= nv04_instmem_clear
;
104 engine
->instmem
.bind
= nv04_instmem_bind
;
105 engine
->instmem
.unbind
= nv04_instmem_unbind
;
106 engine
->mc
.init
= nv04_mc_init
;
107 engine
->mc
.takedown
= nv04_mc_takedown
;
108 engine
->timer
.init
= nv04_timer_init
;
109 engine
->timer
.read
= nv04_timer_read
;
110 engine
->timer
.takedown
= nv04_timer_takedown
;
111 engine
->fb
.init
= nv04_fb_init
;
112 engine
->fb
.takedown
= nv04_fb_takedown
;
113 engine
->graph
.init
= nv04_graph_init
;
114 engine
->graph
.takedown
= nv04_graph_takedown
;
115 engine
->graph
.create_context
= nv04_graph_create_context
;
116 engine
->graph
.destroy_context
= nv04_graph_destroy_context
;
117 engine
->graph
.load_context
= nv04_graph_load_context
;
118 engine
->graph
.save_context
= nv04_graph_save_context
;
119 engine
->fifo
.channels
= 16;
120 engine
->fifo
.init
= nouveau_fifo_init
;
121 engine
->fifo
.takedown
= nouveau_stub_takedown
;
122 engine
->fifo
.channel_id
= nv04_fifo_channel_id
;
123 engine
->fifo
.create_context
= nv04_fifo_create_context
;
124 engine
->fifo
.destroy_context
= nv04_fifo_destroy_context
;
125 engine
->fifo
.load_context
= nv04_fifo_load_context
;
126 engine
->fifo
.save_context
= nv04_fifo_save_context
;
129 engine
->instmem
.init
= nv04_instmem_init
;
130 engine
->instmem
.takedown
= nv04_instmem_takedown
;
131 engine
->instmem
.populate
= nv04_instmem_populate
;
132 engine
->instmem
.clear
= nv04_instmem_clear
;
133 engine
->instmem
.bind
= nv04_instmem_bind
;
134 engine
->instmem
.unbind
= nv04_instmem_unbind
;
135 engine
->mc
.init
= nv04_mc_init
;
136 engine
->mc
.takedown
= nv04_mc_takedown
;
137 engine
->timer
.init
= nv04_timer_init
;
138 engine
->timer
.read
= nv04_timer_read
;
139 engine
->timer
.takedown
= nv04_timer_takedown
;
140 engine
->fb
.init
= nv10_fb_init
;
141 engine
->fb
.takedown
= nv10_fb_takedown
;
142 engine
->graph
.init
= nv10_graph_init
;
143 engine
->graph
.takedown
= nv10_graph_takedown
;
144 engine
->graph
.create_context
= nv10_graph_create_context
;
145 engine
->graph
.destroy_context
= nv10_graph_destroy_context
;
146 engine
->graph
.load_context
= nv10_graph_load_context
;
147 engine
->graph
.save_context
= nv10_graph_save_context
;
148 engine
->fifo
.channels
= 32;
149 engine
->fifo
.init
= nouveau_fifo_init
;
150 engine
->fifo
.takedown
= nouveau_stub_takedown
;
151 engine
->fifo
.channel_id
= nv10_fifo_channel_id
;
152 engine
->fifo
.create_context
= nv10_fifo_create_context
;
153 engine
->fifo
.destroy_context
= nv10_fifo_destroy_context
;
154 engine
->fifo
.load_context
= nv10_fifo_load_context
;
155 engine
->fifo
.save_context
= nv10_fifo_save_context
;
158 engine
->instmem
.init
= nv04_instmem_init
;
159 engine
->instmem
.takedown
= nv04_instmem_takedown
;
160 engine
->instmem
.populate
= nv04_instmem_populate
;
161 engine
->instmem
.clear
= nv04_instmem_clear
;
162 engine
->instmem
.bind
= nv04_instmem_bind
;
163 engine
->instmem
.unbind
= nv04_instmem_unbind
;
164 engine
->mc
.init
= nv04_mc_init
;
165 engine
->mc
.takedown
= nv04_mc_takedown
;
166 engine
->timer
.init
= nv04_timer_init
;
167 engine
->timer
.read
= nv04_timer_read
;
168 engine
->timer
.takedown
= nv04_timer_takedown
;
169 engine
->fb
.init
= nv10_fb_init
;
170 engine
->fb
.takedown
= nv10_fb_takedown
;
171 engine
->graph
.init
= nv20_graph_init
;
172 engine
->graph
.takedown
= nv20_graph_takedown
;
173 engine
->graph
.create_context
= nv20_graph_create_context
;
174 engine
->graph
.destroy_context
= nv20_graph_destroy_context
;
175 engine
->graph
.load_context
= nv20_graph_load_context
;
176 engine
->graph
.save_context
= nv20_graph_save_context
;
177 engine
->fifo
.channels
= 32;
178 engine
->fifo
.init
= nouveau_fifo_init
;
179 engine
->fifo
.takedown
= nouveau_stub_takedown
;
180 engine
->fifo
.channel_id
= nv10_fifo_channel_id
;
181 engine
->fifo
.create_context
= nv10_fifo_create_context
;
182 engine
->fifo
.destroy_context
= nv10_fifo_destroy_context
;
183 engine
->fifo
.load_context
= nv10_fifo_load_context
;
184 engine
->fifo
.save_context
= nv10_fifo_save_context
;
187 engine
->instmem
.init
= nv04_instmem_init
;
188 engine
->instmem
.takedown
= nv04_instmem_takedown
;
189 engine
->instmem
.populate
= nv04_instmem_populate
;
190 engine
->instmem
.clear
= nv04_instmem_clear
;
191 engine
->instmem
.bind
= nv04_instmem_bind
;
192 engine
->instmem
.unbind
= nv04_instmem_unbind
;
193 engine
->mc
.init
= nv04_mc_init
;
194 engine
->mc
.takedown
= nv04_mc_takedown
;
195 engine
->timer
.init
= nv04_timer_init
;
196 engine
->timer
.read
= nv04_timer_read
;
197 engine
->timer
.takedown
= nv04_timer_takedown
;
198 engine
->fb
.init
= nv10_fb_init
;
199 engine
->fb
.takedown
= nv10_fb_takedown
;
200 engine
->graph
.init
= nv30_graph_init
;
201 engine
->graph
.takedown
= nv20_graph_takedown
;
202 engine
->graph
.create_context
= nv20_graph_create_context
;
203 engine
->graph
.destroy_context
= nv20_graph_destroy_context
;
204 engine
->graph
.load_context
= nv20_graph_load_context
;
205 engine
->graph
.save_context
= nv20_graph_save_context
;
206 engine
->fifo
.channels
= 32;
207 engine
->fifo
.init
= nouveau_fifo_init
;
208 engine
->fifo
.takedown
= nouveau_stub_takedown
;
209 engine
->fifo
.channel_id
= nv10_fifo_channel_id
;
210 engine
->fifo
.create_context
= nv10_fifo_create_context
;
211 engine
->fifo
.destroy_context
= nv10_fifo_destroy_context
;
212 engine
->fifo
.load_context
= nv10_fifo_load_context
;
213 engine
->fifo
.save_context
= nv10_fifo_save_context
;
217 engine
->instmem
.init
= nv04_instmem_init
;
218 engine
->instmem
.takedown
= nv04_instmem_takedown
;
219 engine
->instmem
.populate
= nv04_instmem_populate
;
220 engine
->instmem
.clear
= nv04_instmem_clear
;
221 engine
->instmem
.bind
= nv04_instmem_bind
;
222 engine
->instmem
.unbind
= nv04_instmem_unbind
;
223 engine
->mc
.init
= nv40_mc_init
;
224 engine
->mc
.takedown
= nv40_mc_takedown
;
225 engine
->timer
.init
= nv04_timer_init
;
226 engine
->timer
.read
= nv04_timer_read
;
227 engine
->timer
.takedown
= nv04_timer_takedown
;
228 engine
->fb
.init
= nv40_fb_init
;
229 engine
->fb
.takedown
= nv40_fb_takedown
;
230 engine
->graph
.init
= nv40_graph_init
;
231 engine
->graph
.takedown
= nv40_graph_takedown
;
232 engine
->graph
.create_context
= nv40_graph_create_context
;
233 engine
->graph
.destroy_context
= nv40_graph_destroy_context
;
234 engine
->graph
.load_context
= nv40_graph_load_context
;
235 engine
->graph
.save_context
= nv40_graph_save_context
;
236 engine
->fifo
.channels
= 32;
237 engine
->fifo
.init
= nv40_fifo_init
;
238 engine
->fifo
.takedown
= nouveau_stub_takedown
;
239 engine
->fifo
.channel_id
= nv10_fifo_channel_id
;
240 engine
->fifo
.create_context
= nv40_fifo_create_context
;
241 engine
->fifo
.destroy_context
= nv40_fifo_destroy_context
;
242 engine
->fifo
.load_context
= nv40_fifo_load_context
;
243 engine
->fifo
.save_context
= nv40_fifo_save_context
;
246 case 0x80: /* gotta love NVIDIA's consistency.. */
249 engine
->instmem
.init
= nv50_instmem_init
;
250 engine
->instmem
.takedown
= nv50_instmem_takedown
;
251 engine
->instmem
.populate
= nv50_instmem_populate
;
252 engine
->instmem
.clear
= nv50_instmem_clear
;
253 engine
->instmem
.bind
= nv50_instmem_bind
;
254 engine
->instmem
.unbind
= nv50_instmem_unbind
;
255 engine
->mc
.init
= nv50_mc_init
;
256 engine
->mc
.takedown
= nv50_mc_takedown
;
257 engine
->timer
.init
= nv04_timer_init
;
258 engine
->timer
.read
= nv04_timer_read
;
259 engine
->timer
.takedown
= nv04_timer_takedown
;
260 engine
->fb
.init
= nouveau_stub_init
;
261 engine
->fb
.takedown
= nouveau_stub_takedown
;
262 engine
->graph
.init
= nv50_graph_init
;
263 engine
->graph
.takedown
= nv50_graph_takedown
;
264 engine
->graph
.create_context
= nv50_graph_create_context
;
265 engine
->graph
.destroy_context
= nv50_graph_destroy_context
;
266 engine
->graph
.load_context
= nv50_graph_load_context
;
267 engine
->graph
.save_context
= nv50_graph_save_context
;
268 engine
->fifo
.channels
= 128;
269 engine
->fifo
.init
= nv50_fifo_init
;
270 engine
->fifo
.takedown
= nv50_fifo_takedown
;
271 engine
->fifo
.channel_id
= nv50_fifo_channel_id
;
272 engine
->fifo
.create_context
= nv50_fifo_create_context
;
273 engine
->fifo
.destroy_context
= nv50_fifo_destroy_context
;
274 engine
->fifo
.load_context
= nv50_fifo_load_context
;
275 engine
->fifo
.save_context
= nv50_fifo_save_context
;
278 DRM_ERROR("NV%02x unsupported\n", dev_priv
->chipset
);
286 nouveau_card_init(struct drm_device
*dev
)
288 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
289 struct nouveau_engine
*engine
;
292 DRM_DEBUG("prev state = %d\n", dev_priv
->init_state
);
294 if (dev_priv
->init_state
== NOUVEAU_CARD_INIT_DONE
)
298 /* Determine exact chipset we're running on */
299 if (dev_priv
->card_type
< NV_10
)
300 dev_priv
->chipset
= dev_priv
->card_type
;
303 (NV_READ(NV03_PMC_BOOT_0
) & 0x0ff00000) >> 20;
305 /* Initialise internal driver API hooks */
306 ret
= nouveau_init_engine_ptrs(dev
);
308 engine
= &dev_priv
->Engine
;
309 dev_priv
->init_state
= NOUVEAU_CARD_INIT_FAILED
;
311 ret
= nouveau_gpuobj_early_init(dev
);
314 /* Initialise instance memory, must happen before mem_init so we
315 * know exactly how much VRAM we're able to use for "normal"
318 ret
= engine
->instmem
.init(dev
);
321 /* Setup the memory manager */
323 ret
= nouveau_mem_init_ttm(dev
);
326 ret
= nouveau_mem_init(dev
);
330 ret
= nouveau_gpuobj_init(dev
);
333 /* Parse BIOS tables / Run init tables? */
336 ret
= engine
->mc
.init(dev
);
340 ret
= engine
->timer
.init(dev
);
344 ret
= engine
->fb
.init(dev
);
348 ret
= engine
->graph
.init(dev
);
352 ret
= engine
->fifo
.init(dev
);
355 /* this call irq_preinstall, register irq handler and
356 * call irq_postinstall
358 ret
= drm_irq_install(dev
);
361 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
363 ret
= nouveau_dma_channel_init(dev
);
366 ret
= nouveau_backlight_init(dev
);
368 DRM_ERROR("Error code %d when trying to register backlight\n",
371 dev_priv
->init_state
= NOUVEAU_CARD_INIT_DONE
;
375 static void nouveau_card_takedown(struct drm_device
*dev
)
377 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
378 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
380 DRM_DEBUG("prev state = %d\n", dev_priv
->init_state
);
382 if (dev_priv
->init_state
!= NOUVEAU_CARD_INIT_DOWN
) {
383 nouveau_backlight_exit(dev
);
385 nouveau_dma_channel_takedown(dev
);
387 engine
->fifo
.takedown(dev
);
388 engine
->graph
.takedown(dev
);
389 engine
->fb
.takedown(dev
);
390 engine
->timer
.takedown(dev
);
391 engine
->mc
.takedown(dev
);
393 nouveau_sgdma_nottm_hack_takedown(dev
);
394 nouveau_sgdma_takedown(dev
);
396 nouveau_gpuobj_takedown(dev
);
397 nouveau_gpuobj_del(dev
, &dev_priv
->vm_vram_pt
);
399 nouveau_mem_close(dev
);
400 engine
->instmem
.takedown(dev
);
402 drm_irq_uninstall(dev
);
404 nouveau_gpuobj_late_takedown(dev
);
406 dev_priv
->init_state
= NOUVEAU_CARD_INIT_DOWN
;
410 /* here a client dies, release the stuff that was allocated for its
412 void nouveau_preclose(struct drm_device
*dev
, struct drm_file
*file_priv
)
414 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
416 nouveau_fifo_cleanup(dev
, file_priv
);
417 nouveau_mem_release(file_priv
,dev_priv
->fb_heap
);
418 nouveau_mem_release(file_priv
,dev_priv
->agp_heap
);
419 nouveau_mem_release(file_priv
,dev_priv
->pci_heap
);
422 /* first module load, setup the mmio/fb mapping */
423 int nouveau_firstopen(struct drm_device
*dev
)
425 #if defined(__powerpc__)
426 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
427 struct device_node
*dn
;
430 /* Map any PCI resources we need on the card */
431 ret
= nouveau_init_card_mappings(dev
);
434 #if defined(__powerpc__)
435 /* Put the card in BE mode if it's not */
436 if (NV_READ(NV03_PMC_BOOT_1
))
437 NV_WRITE(NV03_PMC_BOOT_1
,0x00000001);
442 #if defined(__linux__) && defined(__powerpc__)
443 /* if we have an OF card, copy vbios to RAMIN */
444 dn
= pci_device_to_OF_node(dev
->pdev
);
448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
449 const uint32_t *bios
= of_get_property(dn
, "NVDA,BMP", &size
);
451 const uint32_t *bios
= get_property(dn
, "NVDA,BMP", &size
);
457 NV_WI32(i
, bios
[i
/4]);
458 DRM_INFO("OF bios successfully copied (%d bytes)\n",size
);
461 DRM_INFO("Unable to get the OF bios\n");
464 DRM_INFO("Unable to get the OF node\n");
469 #define NV40_CHIPSET_MASK 0x00000baf
470 #define NV44_CHIPSET_MASK 0x00005450
472 int nouveau_load(struct drm_device
*dev
, unsigned long flags
)
474 struct drm_nouveau_private
*dev_priv
;
477 uint8_t architecture
= 0;
479 dev_priv
= drm_calloc(1, sizeof(*dev_priv
), DRM_MEM_DRIVER
);
483 dev_priv
->flags
= flags
& NOUVEAU_FLAGS
;
484 dev_priv
->init_state
= NOUVEAU_CARD_INIT_DOWN
;
486 DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev
->pci_vendor
, dev
->pci_device
, dev
->pdev
->class);
488 /* Time to determine the card architecture */
489 regs
= ioremap_nocache(pci_resource_start(dev
->pdev
, 0), 0x8);
491 DRM_ERROR("Could not ioremap to determine register\n");
495 reg0
= readl(regs
+NV03_PMC_BOOT_0
);
496 reg1
= readl(regs
+NV03_PMC_BOOT_1
);
497 #if defined(__powerpc__)
502 /* We're dealing with >=NV10 */
503 if ((reg0
& 0x0f000000) > 0 ) {
504 /* Bit 27-20 contain the architecture in hex */
505 architecture
= (reg0
& 0xff00000) >> 20;
507 } else if ((reg0
& 0xff00fff0) == 0x20004000) {
513 if (architecture
>= 0x80) {
514 dev_priv
->card_type
= NV_50
;
515 } else if (architecture
>= 0x60) {
516 /* FIXME we need to figure out who's who for NV6x */
517 dev_priv
->card_type
= NV_44
;
518 } else if (architecture
>= 0x50) {
519 dev_priv
->card_type
= NV_50
;
520 } else if (architecture
>= 0x40) {
521 uint8_t subarch
= architecture
& 0xf;
522 /* Selection criteria borrowed from NV40EXA */
523 if (NV40_CHIPSET_MASK
& (1 << subarch
)) {
524 dev_priv
->card_type
= NV_40
;
525 } else if (NV44_CHIPSET_MASK
& (1 << subarch
)) {
526 dev_priv
->card_type
= NV_44
;
528 dev_priv
->card_type
= NV_UNKNOWN
;
530 } else if (architecture
>= 0x30) {
531 dev_priv
->card_type
= NV_30
;
532 } else if (architecture
>= 0x20) {
533 dev_priv
->card_type
= NV_20
;
534 } else if (architecture
>= 0x17) {
535 dev_priv
->card_type
= NV_17
;
536 } else if (architecture
>= 0x11) {
537 dev_priv
->card_type
= NV_11
;
538 } else if (architecture
>= 0x10) {
539 dev_priv
->card_type
= NV_10
;
540 } else if (architecture
>= 0x04) {
541 dev_priv
->card_type
= NV_04
;
543 dev_priv
->card_type
= NV_UNKNOWN
;
546 DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv
->card_type
,reg0
);
548 if (dev_priv
->card_type
== NV_UNKNOWN
) {
553 if (dev
->pci_device
== 0x01a0) {
554 dev_priv
->flags
|= NV_NFORCE
;
555 } else if (dev
->pci_device
== 0x01f0) {
556 dev_priv
->flags
|= NV_NFORCE2
;
559 dev
->dev_private
= (void *)dev_priv
;
564 void nouveau_lastclose(struct drm_device
*dev
)
566 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
568 /* In the case of an error dev_priv may not be be allocated yet */
569 if (dev_priv
&& dev_priv
->card_type
) {
570 nouveau_card_takedown(dev
);
572 if(dev_priv
->fb_mtrr
>0)
574 drm_mtrr_del(dev_priv
->fb_mtrr
, drm_get_resource_start(dev
, 1),nouveau_mem_fb_amount(dev
), DRM_MTRR_WC
);
580 int nouveau_unload(struct drm_device
*dev
)
582 drm_free(dev
->dev_private
, sizeof(*dev
->dev_private
), DRM_MEM_DRIVER
);
583 dev
->dev_private
= NULL
;
588 nouveau_ioctl_card_init(struct drm_device
*dev
, void *data
,
589 struct drm_file
*file_priv
)
591 return nouveau_card_init(dev
);
594 int nouveau_ioctl_getparam(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
596 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
597 struct drm_nouveau_getparam
*getparam
= data
;
599 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
601 switch (getparam
->param
) {
602 case NOUVEAU_GETPARAM_CHIPSET_ID
:
603 getparam
->value
= dev_priv
->chipset
;
605 case NOUVEAU_GETPARAM_PCI_VENDOR
:
606 getparam
->value
=dev
->pci_vendor
;
608 case NOUVEAU_GETPARAM_PCI_DEVICE
:
609 getparam
->value
=dev
->pci_device
;
611 case NOUVEAU_GETPARAM_BUS_TYPE
:
612 if (drm_device_is_agp(dev
))
613 getparam
->value
=NV_AGP
;
614 else if (drm_device_is_pcie(dev
))
615 getparam
->value
=NV_PCIE
;
617 getparam
->value
=NV_PCI
;
619 case NOUVEAU_GETPARAM_FB_PHYSICAL
:
620 getparam
->value
=dev_priv
->fb_phys
;
622 case NOUVEAU_GETPARAM_AGP_PHYSICAL
:
623 getparam
->value
=dev_priv
->gart_info
.aper_base
;
625 case NOUVEAU_GETPARAM_PCI_PHYSICAL
:
627 getparam
->value
=(unsigned long)dev
->sg
->virtual;
630 DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
634 case NOUVEAU_GETPARAM_FB_SIZE
:
635 getparam
->value
=dev_priv
->fb_available_size
;
637 case NOUVEAU_GETPARAM_AGP_SIZE
:
638 getparam
->value
=dev_priv
->gart_info
.aper_size
;
640 case NOUVEAU_GETPARAM_MM_ENABLED
:
643 case NOUVEAU_GETPARAM_VM_VRAM_BASE
:
644 if (dev_priv
->card_type
>= NV_50
)
645 getparam
->value
= 0x20000000;
650 DRM_ERROR("unknown parameter %lld\n", getparam
->param
);
657 int nouveau_ioctl_setparam(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
659 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
660 struct drm_nouveau_setparam
*setparam
= data
;
662 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
664 switch (setparam
->param
) {
665 case NOUVEAU_SETPARAM_CMDBUF_LOCATION
:
666 switch (setparam
->value
) {
667 case NOUVEAU_MEM_AGP
:
669 case NOUVEAU_MEM_PCI
:
670 case NOUVEAU_MEM_AGP
| NOUVEAU_MEM_PCI_ACCEPTABLE
:
673 DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
677 dev_priv
->config
.cmdbuf
.location
= setparam
->value
;
679 case NOUVEAU_SETPARAM_CMDBUF_SIZE
:
680 dev_priv
->config
.cmdbuf
.size
= setparam
->value
;
683 DRM_ERROR("unknown parameter %lld\n", setparam
->param
);
691 void nouveau_wait_for_idle(struct drm_device
*dev
)
693 struct drm_nouveau_private
*dev_priv
=dev
->dev_private
;
694 switch(dev_priv
->card_type
) {
698 /* This stuff is more or less a copy of what is seen
699 * in nv28 kmmio dump.
701 uint64_t started
= dev_priv
->Engine
.timer
.read(dev
);
702 uint64_t stopped
= started
;
705 uint32_t pmc_e
= NV_READ(NV03_PMC_ENABLE
);
707 status
= NV_READ(NV04_PGRAPH_STATUS
);
710 stopped
= dev_priv
->Engine
.timer
.read(dev
);
711 /* It'll never wrap anyway... */
712 } while (stopped
- started
< 1000000000ULL);
714 DRM_ERROR("timed out with status 0x%08x\n",
720 static int nouveau_suspend(struct drm_device
*dev
)
723 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
724 struct nouveau_suspend_resume
*susres
= &dev_priv
->susres
;
725 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
726 struct nouveau_channel
*current_fifo
;
729 if (dev_priv
->card_type
>= NV_50
) {
730 DRM_DEBUG("Suspend not supported for NV50+\n");
734 drm_free(susres
->ramin_copy
, susres
->ramin_size
, DRM_MEM_DRIVER
);
735 susres
->ramin_size
= 0;
736 list_for_each(p
, dev_priv
->ramin_heap
)
737 if (p
->file_priv
&& (p
->start
+ p
->size
) > susres
->ramin_size
)
738 susres
->ramin_size
= p
->start
+ p
->size
;
739 if (!(susres
->ramin_copy
= drm_alloc(susres
->ramin_size
, DRM_MEM_DRIVER
))) {
740 DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n");
744 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
745 uint64_t t_start
= engine
->timer
.read(dev
);
747 if (dev_priv
->fifos
[i
] == NULL
)
750 /* Give the channel a chance to idle, wait 2s (hopefully) */
751 while (!nouveau_channel_idle(dev_priv
->fifos
[i
]))
752 if (engine
->timer
.read(dev
) - t_start
> 2000000000ULL) {
753 DRM_ERROR("Failed to idle channel %d before"
754 "suspend.", dev_priv
->fifos
[i
]->id
);
758 nouveau_wait_for_idle(dev
);
760 NV_WRITE(NV04_PGRAPH_FIFO
, 0);
761 /* disable the fifo caches */
762 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000000);
763 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
764 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) & ~1);
765 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000000);
766 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000000);
768 susres
->fifo_mode
= NV_READ(NV04_PFIFO_MODE
);
770 if (dev_priv
->card_type
>= NV_10
) {
771 susres
->graph_state
= NV_READ(NV10_PGRAPH_STATE
);
772 susres
->graph_ctx_control
= NV_READ(NV10_PGRAPH_CTX_CONTROL
);
774 susres
->graph_state
= NV_READ(NV04_PGRAPH_STATE
);
775 susres
->graph_ctx_control
= NV_READ(NV04_PGRAPH_CTX_CONTROL
);
778 current_fifo
= dev_priv
->fifos
[engine
->fifo
.channel_id(dev
)];
779 /* channel may have been deleted but no replacement yet loaded */
781 engine
->fifo
.save_context(current_fifo
);
782 engine
->graph
.save_context(current_fifo
);
784 nouveau_wait_for_idle(dev
);
786 for (i
= 0; i
< susres
->ramin_size
/ 4; i
++)
787 susres
->ramin_copy
[i
] = NV_RI32(i
<< 2);
789 /* reenable the fifo caches */
790 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
791 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
792 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000001);
793 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000001);
794 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000001);
795 NV_WRITE(NV04_PGRAPH_FIFO
, 1);
800 static int nouveau_resume(struct drm_device
*dev
)
802 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
803 struct nouveau_suspend_resume
*susres
= &dev_priv
->susres
;
804 struct nouveau_engine
*engine
= &dev_priv
->Engine
;
807 if (!susres
->ramin_copy
)
810 DRM_DEBUG("Doing resume\n");
812 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
813 struct drm_agp_info info
;
814 struct drm_agp_mode mode
;
816 /* agp bridge drivers don't re-enable agp on resume. lame. */
817 if ((i
= drm_agp_info(dev
, &info
))) {
818 DRM_ERROR("Unable to get AGP info: %d\n", i
);
821 mode
.mode
= info
.mode
;
822 if ((i
= drm_agp_enable(dev
, mode
))) {
823 DRM_ERROR("Unable to enable AGP: %d\n", i
);
828 for (i
= 0; i
< susres
->ramin_size
/ 4; i
++)
829 NV_WI32(i
<< 2, susres
->ramin_copy
[i
]);
831 engine
->mc
.init(dev
);
832 engine
->timer
.init(dev
);
833 engine
->fb
.init(dev
);
834 engine
->graph
.init(dev
);
835 engine
->fifo
.init(dev
);
837 NV_WRITE(NV04_PGRAPH_FIFO
, 0);
838 /* disable the fifo caches */
839 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000000);
840 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
841 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) & ~1);
842 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000000);
843 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000000);
845 /* PMC power cycling PFIFO in init clobbers some of the stuff stored in
846 * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful
848 for (i
= 0; i
< susres
->ramin_size
/ 4; i
++)
849 NV_WI32(i
<< 2, susres
->ramin_copy
[i
]);
851 engine
->fifo
.load_context(dev_priv
->fifos
[0]);
852 NV_WRITE(NV04_PFIFO_MODE
, susres
->fifo_mode
);
854 engine
->graph
.load_context(dev_priv
->fifos
[0]);
855 nouveau_wait_for_idle(dev
);
857 if (dev_priv
->card_type
>= NV_10
) {
858 NV_WRITE(NV10_PGRAPH_STATE
, susres
->graph_state
);
859 NV_WRITE(NV10_PGRAPH_CTX_CONTROL
, susres
->graph_ctx_control
);
861 NV_WRITE(NV04_PGRAPH_STATE
, susres
->graph_state
);
862 NV_WRITE(NV04_PGRAPH_CTX_CONTROL
, susres
->graph_ctx_control
);
865 /* reenable the fifo caches */
866 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH
,
867 NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
868 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0
, 0x00000001);
869 NV_WRITE(NV04_PFIFO_CACHE1_PULL0
, 0x00000001);
870 NV_WRITE(NV03_PFIFO_CACHES
, 0x00000001);
871 NV_WRITE(NV04_PGRAPH_FIFO
, 0x1);
873 if (dev
->irq_enabled
)
874 nouveau_irq_postinstall(dev
);
876 drm_free(susres
->ramin_copy
, susres
->ramin_size
, DRM_MEM_DRIVER
);
877 susres
->ramin_copy
= NULL
;
878 susres
->ramin_size
= 0;
883 int nouveau_ioctl_suspend(struct drm_device
*dev
, void *data
,
884 struct drm_file
*file_priv
)
886 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
888 return nouveau_suspend(dev
);
891 int nouveau_ioctl_resume(struct drm_device
*dev
, void *data
,
892 struct drm_file
*file_priv
)
894 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
896 return nouveau_resume(dev
);