2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Ben Skeggs <bskeggs@redhat.com>
30 * Roy Spliet <r.spliet@student.tudelft.nl>
36 #include "drm_sarea.h"
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include "nouveau_mm.h"
41 #include "nouveau_vm.h"
42 #include "nouveau_fifo.h"
43 #include "nouveau_fence.h"
46 * NV10-NV40 tiling helpers
50 nv10_mem_update_tile_region(struct drm_device
*dev
,
51 struct nouveau_tile_reg
*tile
, uint32_t addr
,
52 uint32_t size
, uint32_t pitch
, uint32_t flags
)
54 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
55 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
56 int i
= tile
- dev_priv
->tile
.reg
, j
;
59 nouveau_fence_unref(&tile
->fence
);
62 pfb
->free_tile_region(dev
, i
);
65 pfb
->init_tile_region(dev
, i
, addr
, size
, pitch
, flags
);
67 spin_lock_irqsave(&dev_priv
->context_switch_lock
, save
);
68 nv_wr32(dev
, NV03_PFIFO_CACHES
, 0);
69 nv04_fifo_cache_pull(dev
, false);
71 nouveau_wait_for_idle(dev
);
73 pfb
->set_tile_region(dev
, i
);
74 for (j
= 0; j
< NVOBJ_ENGINE_NR
; j
++) {
75 if (dev_priv
->eng
[j
] && dev_priv
->eng
[j
]->set_tile_region
)
76 dev_priv
->eng
[j
]->set_tile_region(dev
, i
);
79 nv04_fifo_cache_pull(dev
, true);
80 nv_wr32(dev
, NV03_PFIFO_CACHES
, 1);
81 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, save
);
84 static struct nouveau_tile_reg
*
85 nv10_mem_get_tile_region(struct drm_device
*dev
, int i
)
87 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
88 struct nouveau_tile_reg
*tile
= &dev_priv
->tile
.reg
[i
];
90 spin_lock(&dev_priv
->tile
.lock
);
93 (!tile
->fence
|| nouveau_fence_done(tile
->fence
)))
98 spin_unlock(&dev_priv
->tile
.lock
);
103 nv10_mem_put_tile_region(struct drm_device
*dev
, struct nouveau_tile_reg
*tile
,
104 struct nouveau_fence
*fence
)
106 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
109 spin_lock(&dev_priv
->tile
.lock
);
111 /* Mark it as pending. */
113 nouveau_fence_ref(fence
);
117 spin_unlock(&dev_priv
->tile
.lock
);
121 struct nouveau_tile_reg
*
122 nv10_mem_set_tiling(struct drm_device
*dev
, uint32_t addr
, uint32_t size
,
123 uint32_t pitch
, uint32_t flags
)
125 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
126 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
127 struct nouveau_tile_reg
*tile
, *found
= NULL
;
130 for (i
= 0; i
< pfb
->num_tiles
; i
++) {
131 tile
= nv10_mem_get_tile_region(dev
, i
);
133 if (pitch
&& !found
) {
137 } else if (tile
&& tile
->pitch
) {
138 /* Kill an unused tile region. */
139 nv10_mem_update_tile_region(dev
, tile
, 0, 0, 0, 0);
142 nv10_mem_put_tile_region(dev
, tile
, NULL
);
146 nv10_mem_update_tile_region(dev
, found
, addr
, size
,
155 nouveau_mem_vram_fini(struct drm_device
*dev
)
157 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
159 ttm_bo_device_release(&dev_priv
->ttm
.bdev
);
161 nouveau_ttm_global_release(dev_priv
);
163 if (dev_priv
->fb_mtrr
>= 0) {
164 drm_mtrr_del(dev_priv
->fb_mtrr
,
165 pci_resource_start(dev
->pdev
, 1),
166 pci_resource_len(dev
->pdev
, 1), DRM_MTRR_WC
);
167 dev_priv
->fb_mtrr
= -1;
172 nouveau_mem_gart_fini(struct drm_device
*dev
)
174 nouveau_sgdma_takedown(dev
);
176 if (drm_core_has_AGP(dev
) && dev
->agp
) {
177 struct drm_agp_mem
*entry
, *tempe
;
179 /* Remove AGP resources, but leave dev->agp
180 intact until drv_cleanup is called. */
181 list_for_each_entry_safe(entry
, tempe
, &dev
->agp
->memory
, head
) {
183 drm_unbind_agp(entry
->memory
);
184 drm_free_agp(entry
->memory
, entry
->pages
);
187 INIT_LIST_HEAD(&dev
->agp
->memory
);
189 if (dev
->agp
->acquired
)
190 drm_agp_release(dev
);
192 dev
->agp
->acquired
= 0;
193 dev
->agp
->enabled
= 0;
198 nouveau_mem_flags_valid(struct drm_device
*dev
, u32 tile_flags
)
200 if (!(tile_flags
& NOUVEAU_GEM_TILE_LAYOUT_MASK
))
208 get_agp_mode(struct drm_device
*dev
, unsigned long mode
)
210 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
213 * FW seems to be broken on nv18, it makes the card lock up
216 if (dev_priv
->chipset
== 0x18)
217 mode
&= ~PCI_AGP_COMMAND_FW
;
220 * AGP mode set in the command line.
222 if (nouveau_agpmode
> 0) {
223 bool agpv3
= mode
& 0x8;
224 int rate
= agpv3
? nouveau_agpmode
/ 4 : nouveau_agpmode
;
226 mode
= (mode
& ~0x7) | (rate
& 0x7);
234 nouveau_mem_reset_agp(struct drm_device
*dev
)
237 uint32_t saved_pci_nv_1
, pmc_enable
;
240 /* First of all, disable fast writes, otherwise if it's
241 * already enabled in the AGP bridge and we disable the card's
242 * AGP controller we might be locking ourselves out of it. */
243 if ((nv_rd32(dev
, NV04_PBUS_PCI_NV_19
) |
244 dev
->agp
->mode
) & PCI_AGP_COMMAND_FW
) {
245 struct drm_agp_info info
;
246 struct drm_agp_mode mode
;
248 ret
= drm_agp_info(dev
, &info
);
252 mode
.mode
= get_agp_mode(dev
, info
.mode
) & ~PCI_AGP_COMMAND_FW
;
253 ret
= drm_agp_enable(dev
, mode
);
258 saved_pci_nv_1
= nv_rd32(dev
, NV04_PBUS_PCI_NV_1
);
260 /* clear busmaster bit */
261 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
& ~0x4);
263 nv_wr32(dev
, NV04_PBUS_PCI_NV_19
, 0);
265 /* power cycle pgraph, if enabled */
266 pmc_enable
= nv_rd32(dev
, NV03_PMC_ENABLE
);
267 if (pmc_enable
& NV_PMC_ENABLE_PGRAPH
) {
268 nv_wr32(dev
, NV03_PMC_ENABLE
,
269 pmc_enable
& ~NV_PMC_ENABLE_PGRAPH
);
270 nv_wr32(dev
, NV03_PMC_ENABLE
, nv_rd32(dev
, NV03_PMC_ENABLE
) |
271 NV_PMC_ENABLE_PGRAPH
);
274 /* and restore (gives effect of resetting AGP) */
275 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
);
282 nouveau_mem_init_agp(struct drm_device
*dev
)
285 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
286 struct drm_agp_info info
;
287 struct drm_agp_mode mode
;
290 if (!dev
->agp
->acquired
) {
291 ret
= drm_agp_acquire(dev
);
293 NV_ERROR(dev
, "Unable to acquire AGP: %d\n", ret
);
298 nouveau_mem_reset_agp(dev
);
300 ret
= drm_agp_info(dev
, &info
);
302 NV_ERROR(dev
, "Unable to get AGP info: %d\n", ret
);
306 /* see agp.h for the AGPSTAT_* modes available */
307 mode
.mode
= get_agp_mode(dev
, info
.mode
);
308 ret
= drm_agp_enable(dev
, mode
);
310 NV_ERROR(dev
, "Unable to enable AGP: %d\n", ret
);
314 dev_priv
->gart_info
.type
= NOUVEAU_GART_AGP
;
315 dev_priv
->gart_info
.aper_base
= info
.aperture_base
;
316 dev_priv
->gart_info
.aper_size
= info
.aperture_size
;
321 static const struct vram_types
{
324 } vram_type_map
[] = {
325 { NV_MEM_TYPE_STOLEN
, "stolen system memory" },
326 { NV_MEM_TYPE_SGRAM
, "SGRAM" },
327 { NV_MEM_TYPE_SDRAM
, "SDRAM" },
328 { NV_MEM_TYPE_DDR1
, "DDR1" },
329 { NV_MEM_TYPE_DDR2
, "DDR2" },
330 { NV_MEM_TYPE_DDR3
, "DDR3" },
331 { NV_MEM_TYPE_GDDR2
, "GDDR2" },
332 { NV_MEM_TYPE_GDDR3
, "GDDR3" },
333 { NV_MEM_TYPE_GDDR4
, "GDDR4" },
334 { NV_MEM_TYPE_GDDR5
, "GDDR5" },
335 { NV_MEM_TYPE_UNKNOWN
, "unknown type" }
339 nouveau_mem_vram_init(struct drm_device
*dev
)
341 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
342 struct ttm_bo_device
*bdev
= &dev_priv
->ttm
.bdev
;
343 const struct vram_types
*vram_type
;
347 if (dev_priv
->card_type
>= NV_50
) {
348 if (pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(40)))
351 if (0 && pci_is_pcie(dev
->pdev
) &&
352 dev_priv
->chipset
> 0x40 &&
353 dev_priv
->chipset
!= 0x45) {
354 if (pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(39)))
358 ret
= pci_set_dma_mask(dev
->pdev
, DMA_BIT_MASK(dma_bits
));
361 ret
= pci_set_consistent_dma_mask(dev
->pdev
, DMA_BIT_MASK(dma_bits
));
363 /* Reset to default value. */
364 pci_set_consistent_dma_mask(dev
->pdev
, DMA_BIT_MASK(32));
368 ret
= nouveau_ttm_global_init(dev_priv
);
372 ret
= ttm_bo_device_init(&dev_priv
->ttm
.bdev
,
373 dev_priv
->ttm
.bo_global_ref
.ref
.object
,
374 &nouveau_bo_driver
, DRM_FILE_PAGE_OFFSET
,
375 dma_bits
<= 32 ? true : false);
377 NV_ERROR(dev
, "Error initialising bo driver: %d\n", ret
);
381 vram_type
= vram_type_map
;
382 while (vram_type
->value
!= NV_MEM_TYPE_UNKNOWN
) {
383 if (nouveau_vram_type
) {
384 if (!strcasecmp(nouveau_vram_type
, vram_type
->name
))
386 dev_priv
->vram_type
= vram_type
->value
;
388 if (vram_type
->value
== dev_priv
->vram_type
)
394 NV_INFO(dev
, "Detected %dMiB VRAM (%s)\n",
395 (int)(dev_priv
->vram_size
>> 20), vram_type
->name
);
396 if (dev_priv
->vram_sys_base
) {
397 NV_INFO(dev
, "Stolen system memory at: 0x%010llx\n",
398 dev_priv
->vram_sys_base
);
401 dev_priv
->fb_available_size
= dev_priv
->vram_size
;
402 dev_priv
->fb_mappable_pages
= dev_priv
->fb_available_size
;
403 if (dev_priv
->fb_mappable_pages
> pci_resource_len(dev
->pdev
, 1))
404 dev_priv
->fb_mappable_pages
= pci_resource_len(dev
->pdev
, 1);
405 dev_priv
->fb_mappable_pages
>>= PAGE_SHIFT
;
407 dev_priv
->fb_available_size
-= dev_priv
->ramin_rsvd_vram
;
408 dev_priv
->fb_aper_free
= dev_priv
->fb_available_size
;
411 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
412 dev_priv
->fb_available_size
>> PAGE_SHIFT
);
414 NV_ERROR(dev
, "Failed VRAM mm init: %d\n", ret
);
418 if (dev_priv
->card_type
< NV_50
) {
419 ret
= nouveau_bo_new(dev
, 256*1024, 0, TTM_PL_FLAG_VRAM
,
420 0, 0, NULL
, &dev_priv
->vga_ram
);
422 ret
= nouveau_bo_pin(dev_priv
->vga_ram
,
426 NV_WARN(dev
, "failed to reserve VGA memory\n");
427 nouveau_bo_ref(NULL
, &dev_priv
->vga_ram
);
431 dev_priv
->fb_mtrr
= drm_mtrr_add(pci_resource_start(dev
->pdev
, 1),
432 pci_resource_len(dev
->pdev
, 1),
438 nouveau_mem_gart_init(struct drm_device
*dev
)
440 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
441 struct ttm_bo_device
*bdev
= &dev_priv
->ttm
.bdev
;
444 dev_priv
->gart_info
.type
= NOUVEAU_GART_NONE
;
446 #if !defined(__powerpc__) && !defined(__ia64__)
447 if (drm_pci_device_is_agp(dev
) && dev
->agp
&& nouveau_agpmode
) {
448 ret
= nouveau_mem_init_agp(dev
);
450 NV_ERROR(dev
, "Error initialising AGP: %d\n", ret
);
454 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_NONE
) {
455 ret
= nouveau_sgdma_init(dev
);
457 NV_ERROR(dev
, "Error initialising PCI(E): %d\n", ret
);
462 NV_INFO(dev
, "%d MiB GART (aperture)\n",
463 (int)(dev_priv
->gart_info
.aper_size
>> 20));
464 dev_priv
->gart_info
.aper_free
= dev_priv
->gart_info
.aper_size
;
466 ret
= ttm_bo_init_mm(bdev
, TTM_PL_TT
,
467 dev_priv
->gart_info
.aper_size
>> PAGE_SHIFT
);
469 NV_ERROR(dev
, "Failed TT mm init: %d\n", ret
);
477 nv40_mem_timing_calc(struct drm_device
*dev
, u32 freq
,
478 struct nouveau_pm_tbl_entry
*e
, u8 len
,
479 struct nouveau_pm_memtiming
*boot
,
480 struct nouveau_pm_memtiming
*t
)
482 t
->reg
[0] = (e
->tRP
<< 24 | e
->tRAS
<< 16 | e
->tRFC
<< 8 | e
->tRC
);
484 /* XXX: I don't trust the -1's and +1's... they must come
486 t
->reg
[1] = (e
->tWR
+ 2 + (t
->tCWL
- 1)) << 24 |
488 (e
->tWTR
+ 2 + (t
->tCWL
- 1)) << 8 |
489 (e
->tCL
+ 2 - (t
->tCWL
- 1));
491 t
->reg
[2] = 0x20200000 |
492 ((t
->tCWL
- 1) << 24 |
497 NV_DEBUG(dev
, "Entry %d: 220: %08x %08x %08x\n", t
->id
,
498 t
->reg
[0], t
->reg
[1], t
->reg
[2]);
503 nv50_mem_timing_calc(struct drm_device
*dev
, u32 freq
,
504 struct nouveau_pm_tbl_entry
*e
, u8 len
,
505 struct nouveau_pm_memtiming
*boot
,
506 struct nouveau_pm_memtiming
*t
)
508 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
510 uint8_t unk18
= 1, unk20
= 0, unk21
= 0, tmp7_3
;
512 if (bit_table(dev
, 'P', &P
))
515 switch (min(len
, (u8
) 22)) {
528 t
->reg
[0] = (e
->tRP
<< 24 | e
->tRAS
<< 16 | e
->tRFC
<< 8 | e
->tRC
);
530 t
->reg
[1] = (e
->tWR
+ 2 + (t
->tCWL
- 1)) << 24 |
531 max(unk18
, (u8
) 1) << 16 |
532 (e
->tWTR
+ 2 + (t
->tCWL
- 1)) << 8;
534 t
->reg
[2] = ((t
->tCWL
- 1) << 24 |
539 t
->reg
[4] = e
->tUNK_13
<< 8 | e
->tUNK_13
;
541 t
->reg
[5] = (e
->tRFC
<< 24 | max(e
->tRCDRD
, e
->tRCDWR
) << 16 | e
->tRP
);
543 t
->reg
[8] = boot
->reg
[8] & 0xffffff00;
545 if (P
.version
== 1) {
546 t
->reg
[1] |= (e
->tCL
+ 2 - (t
->tCWL
- 1));
548 t
->reg
[3] = (0x14 + e
->tCL
) << 24 |
553 t
->reg
[4] |= boot
->reg
[4] & 0xffff0000;
555 t
->reg
[6] = (0x33 - t
->tCWL
) << 16 |
557 (0x2e + e
->tCL
- t
->tCWL
);
559 t
->reg
[7] = 0x4000202 | (e
->tCL
- 1) << 16;
561 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
562 if (dev_priv
->vram_type
== NV_MEM_TYPE_DDR2
) {
563 t
->reg
[5] |= (e
->tCL
+ 3) << 8;
564 t
->reg
[6] |= (t
->tCWL
- 2) << 8;
565 t
->reg
[8] |= (e
->tCL
- 4);
567 t
->reg
[5] |= (e
->tCL
+ 2) << 8;
568 t
->reg
[6] |= t
->tCWL
<< 8;
569 t
->reg
[8] |= (e
->tCL
- 2);
572 t
->reg
[1] |= (5 + e
->tCL
- (t
->tCWL
));
574 /* XXX: 0xb? 0x30? */
575 t
->reg
[3] = (0x30 + e
->tCL
) << 24 |
576 (boot
->reg
[3] & 0x00ff0000)|
577 (0xb + e
->tCL
) << 8 |
580 t
->reg
[4] |= (unk20
<< 24 | unk21
<< 16);
583 t
->reg
[5] |= (t
->tCWL
+ 6) << 8;
585 t
->reg
[6] = (0x5a + e
->tCL
) << 16 |
586 (6 - e
->tCL
+ t
->tCWL
) << 8 |
587 (0x50 + e
->tCL
- t
->tCWL
);
589 tmp7_3
= (boot
->reg
[7] & 0xff000000) >> 24;
590 t
->reg
[7] = (tmp7_3
<< 24) |
591 ((tmp7_3
- 6 + e
->tCL
) << 16) |
595 NV_DEBUG(dev
, "Entry %d: 220: %08x %08x %08x %08x\n", t
->id
,
596 t
->reg
[0], t
->reg
[1], t
->reg
[2], t
->reg
[3]);
597 NV_DEBUG(dev
, " 230: %08x %08x %08x %08x\n",
598 t
->reg
[4], t
->reg
[5], t
->reg
[6], t
->reg
[7]);
599 NV_DEBUG(dev
, " 240: %08x\n", t
->reg
[8]);
604 nvc0_mem_timing_calc(struct drm_device
*dev
, u32 freq
,
605 struct nouveau_pm_tbl_entry
*e
, u8 len
,
606 struct nouveau_pm_memtiming
*boot
,
607 struct nouveau_pm_memtiming
*t
)
612 t
->reg
[0] = (e
->tRP
<< 24 | (e
->tRAS
& 0x7f) << 17 |
613 e
->tRFC
<< 8 | e
->tRC
);
615 t
->reg
[1] = (boot
->reg
[1] & 0xff000000) |
616 (e
->tRCDWR
& 0x0f) << 20 |
617 (e
->tRCDRD
& 0x0f) << 14 |
621 t
->reg
[2] = (boot
->reg
[2] & 0xff0000ff) |
622 e
->tWR
<< 16 | e
->tWTR
<< 8;
624 t
->reg
[3] = (e
->tUNK_20
& 0x1f) << 9 |
625 (e
->tUNK_21
& 0xf) << 5 |
628 t
->reg
[4] = (boot
->reg
[4] & 0xfff00fff) |
629 (e
->tRRD
&0x1f) << 15;
631 NV_DEBUG(dev
, "Entry %d: 290: %08x %08x %08x %08x\n", t
->id
,
632 t
->reg
[0], t
->reg
[1], t
->reg
[2], t
->reg
[3]);
633 NV_DEBUG(dev
, " 2a0: %08x\n", t
->reg
[4]);
638 * MR generation methods
642 nouveau_mem_ddr2_mr(struct drm_device
*dev
, u32 freq
,
643 struct nouveau_pm_tbl_entry
*e
, u8 len
,
644 struct nouveau_pm_memtiming
*boot
,
645 struct nouveau_pm_memtiming
*t
)
647 t
->drive_strength
= 0;
651 t
->odt
= e
->RAM_FT1
& 0x07;
654 if (e
->tCL
>= NV_MEM_CL_DDR2_MAX
) {
655 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
659 if (e
->tWR
>= NV_MEM_WR_DDR2_MAX
) {
660 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
665 NV_WARN(dev
, "(%u) Invalid odt value, assuming disabled: %x",
670 t
->mr
[0] = (boot
->mr
[0] & 0x100f) |
673 t
->mr
[1] = (boot
->mr
[1] & 0x101fbb) |
674 (t
->odt
& 0x1) << 2 |
677 NV_DEBUG(dev
, "(%u) MR: %08x", t
->id
, t
->mr
[0]);
681 uint8_t nv_mem_wr_lut_ddr3
[NV_MEM_WR_DDR3_MAX
] = {
682 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
685 nouveau_mem_ddr3_mr(struct drm_device
*dev
, u32 freq
,
686 struct nouveau_pm_tbl_entry
*e
, u8 len
,
687 struct nouveau_pm_memtiming
*boot
,
688 struct nouveau_pm_memtiming
*t
)
692 t
->drive_strength
= 0;
696 t
->odt
= e
->RAM_FT1
& 0x07;
699 if (e
->tCL
>= NV_MEM_CL_DDR3_MAX
|| e
->tCL
< 4) {
700 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
704 if (e
->tWR
>= NV_MEM_WR_DDR3_MAX
|| e
->tWR
< 4) {
705 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
710 NV_WARN(dev
, "(%u) Invalid tCWL: %u", t
->id
, e
->tCWL
);
714 t
->mr
[0] = (boot
->mr
[0] & 0x180b) |
718 (nv_mem_wr_lut_ddr3
[e
->tWR
]) << 9;
719 t
->mr
[1] = (boot
->mr
[1] & 0x101dbb) |
720 (t
->odt
& 0x1) << 2 |
721 (t
->odt
& 0x2) << 5 |
723 t
->mr
[2] = (boot
->mr
[2] & 0x20ffb7) | (e
->tCWL
- 5) << 3;
725 NV_DEBUG(dev
, "(%u) MR: %08x %08x", t
->id
, t
->mr
[0], t
->mr
[2]);
729 uint8_t nv_mem_cl_lut_gddr3
[NV_MEM_CL_GDDR3_MAX
] = {
730 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
731 uint8_t nv_mem_wr_lut_gddr3
[NV_MEM_WR_GDDR3_MAX
] = {
732 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
735 nouveau_mem_gddr3_mr(struct drm_device
*dev
, u32 freq
,
736 struct nouveau_pm_tbl_entry
*e
, u8 len
,
737 struct nouveau_pm_memtiming
*boot
,
738 struct nouveau_pm_memtiming
*t
)
741 t
->drive_strength
= boot
->drive_strength
;
744 t
->drive_strength
= (e
->RAM_FT1
& 0x30) >> 4;
745 t
->odt
= e
->RAM_FT1
& 0x07;
748 if (e
->tCL
>= NV_MEM_CL_GDDR3_MAX
) {
749 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
753 if (e
->tWR
>= NV_MEM_WR_GDDR3_MAX
) {
754 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
759 NV_WARN(dev
, "(%u) Invalid odt value, assuming autocal: %x",
764 t
->mr
[0] = (boot
->mr
[0] & 0xe0b) |
766 ((nv_mem_cl_lut_gddr3
[e
->tCL
] & 0x7) << 4) |
767 ((nv_mem_cl_lut_gddr3
[e
->tCL
] & 0x8) >> 2);
768 t
->mr
[1] = (boot
->mr
[1] & 0x100f40) | t
->drive_strength
|
770 (nv_mem_wr_lut_gddr3
[e
->tWR
] & 0xf) << 4;
771 t
->mr
[2] = boot
->mr
[2];
773 NV_DEBUG(dev
, "(%u) MR: %08x %08x %08x", t
->id
,
774 t
->mr
[0], t
->mr
[1], t
->mr
[2]);
779 nouveau_mem_gddr5_mr(struct drm_device
*dev
, u32 freq
,
780 struct nouveau_pm_tbl_entry
*e
, u8 len
,
781 struct nouveau_pm_memtiming
*boot
,
782 struct nouveau_pm_memtiming
*t
)
785 t
->drive_strength
= boot
->drive_strength
;
788 t
->drive_strength
= (e
->RAM_FT1
& 0x30) >> 4;
789 t
->odt
= e
->RAM_FT1
& 0x03;
792 if (e
->tCL
>= NV_MEM_CL_GDDR5_MAX
) {
793 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
797 if (e
->tWR
>= NV_MEM_WR_GDDR5_MAX
) {
798 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
803 NV_WARN(dev
, "(%u) Invalid odt value, assuming autocal: %x",
808 t
->mr
[0] = (boot
->mr
[0] & 0x007) |
809 ((e
->tCL
- 5) << 3) |
811 t
->mr
[1] = (boot
->mr
[1] & 0x1007f0) |
815 NV_DEBUG(dev
, "(%u) MR: %08x %08x", t
->id
, t
->mr
[0], t
->mr
[1]);
820 nouveau_mem_timing_calc(struct drm_device
*dev
, u32 freq
,
821 struct nouveau_pm_memtiming
*t
)
823 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
824 struct nouveau_pm_engine
*pm
= &dev_priv
->engine
.pm
;
825 struct nouveau_pm_memtiming
*boot
= &pm
->boot
.timing
;
826 struct nouveau_pm_tbl_entry
*e
;
827 u8 ver
, len
, *ptr
, *ramcfg
;
830 ptr
= nouveau_perf_timing(dev
, freq
, &ver
, &len
);
831 if (!ptr
|| ptr
[0] == 0x00) {
835 e
= (struct nouveau_pm_tbl_entry
*)ptr
;
837 t
->tCWL
= boot
->tCWL
;
839 switch (dev_priv
->card_type
) {
841 ret
= nv40_mem_timing_calc(dev
, freq
, e
, len
, boot
, t
);
844 ret
= nv50_mem_timing_calc(dev
, freq
, e
, len
, boot
, t
);
848 ret
= nvc0_mem_timing_calc(dev
, freq
, e
, len
, boot
, t
);
855 switch (dev_priv
->vram_type
* !ret
) {
856 case NV_MEM_TYPE_GDDR3
:
857 ret
= nouveau_mem_gddr3_mr(dev
, freq
, e
, len
, boot
, t
);
859 case NV_MEM_TYPE_GDDR5
:
860 ret
= nouveau_mem_gddr5_mr(dev
, freq
, e
, len
, boot
, t
);
862 case NV_MEM_TYPE_DDR2
:
863 ret
= nouveau_mem_ddr2_mr(dev
, freq
, e
, len
, boot
, t
);
865 case NV_MEM_TYPE_DDR3
:
866 ret
= nouveau_mem_ddr3_mr(dev
, freq
, e
, len
, boot
, t
);
873 ramcfg
= nouveau_perf_ramcfg(dev
, freq
, &ver
, &len
);
878 dll_off
= !!(ramcfg
[3] & 0x04);
880 dll_off
= !!(ramcfg
[2] & 0x40);
882 switch (dev_priv
->vram_type
) {
883 case NV_MEM_TYPE_GDDR3
:
884 t
->mr
[1] &= ~0x00000040;
885 t
->mr
[1] |= 0x00000040 * dll_off
;
888 t
->mr
[1] &= ~0x00000001;
889 t
->mr
[1] |= 0x00000001 * dll_off
;
898 nouveau_mem_timing_read(struct drm_device
*dev
, struct nouveau_pm_memtiming
*t
)
900 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
901 u32 timing_base
, timing_regs
, mr_base
;
904 if (dev_priv
->card_type
>= 0xC0) {
905 timing_base
= 0x10f290;
908 timing_base
= 0x100220;
914 switch (dev_priv
->card_type
) {
930 for(i
= 0; i
< timing_regs
; i
++)
931 t
->reg
[i
] = nv_rd32(dev
, timing_base
+ (0x04 * i
));
934 if (dev_priv
->card_type
< NV_C0
) {
935 t
->tCWL
= ((nv_rd32(dev
, 0x100228) & 0x0f000000) >> 24) + 1;
936 } else if (dev_priv
->card_type
<= NV_D0
) {
937 t
->tCWL
= ((nv_rd32(dev
, 0x10f294) & 0x00000f80) >> 7);
940 t
->mr
[0] = nv_rd32(dev
, mr_base
);
941 t
->mr
[1] = nv_rd32(dev
, mr_base
+ 0x04);
942 t
->mr
[2] = nv_rd32(dev
, mr_base
+ 0x20);
943 t
->mr
[3] = nv_rd32(dev
, mr_base
+ 0x24);
946 t
->drive_strength
= 0;
948 switch (dev_priv
->vram_type
) {
949 case NV_MEM_TYPE_DDR3
:
950 t
->odt
|= (t
->mr
[1] & 0x200) >> 7;
951 case NV_MEM_TYPE_DDR2
:
952 t
->odt
|= (t
->mr
[1] & 0x04) >> 2 |
953 (t
->mr
[1] & 0x40) >> 5;
955 case NV_MEM_TYPE_GDDR3
:
956 case NV_MEM_TYPE_GDDR5
:
957 t
->drive_strength
= t
->mr
[1] & 0x03;
958 t
->odt
= (t
->mr
[1] & 0x0c) >> 2;
966 nouveau_mem_exec(struct nouveau_mem_exec_func
*exec
,
967 struct nouveau_pm_level
*perflvl
)
969 struct drm_nouveau_private
*dev_priv
= exec
->dev
->dev_private
;
970 struct nouveau_pm_memtiming
*info
= &perflvl
->timing
;
971 u32 tMRD
= 1000, tCKSRE
= 0, tCKSRX
= 0, tXS
= 0, tDLLK
= 0;
972 u32 mr
[3] = { info
->mr
[0], info
->mr
[1], info
->mr
[2] };
975 switch (dev_priv
->vram_type
) {
976 case NV_MEM_TYPE_DDR2
:
978 mr1_dlloff
= 0x00000001;
980 case NV_MEM_TYPE_DDR3
:
984 mr1_dlloff
= 0x00000001;
986 case NV_MEM_TYPE_GDDR3
:
988 mr1_dlloff
= 0x00000040;
991 NV_ERROR(exec
->dev
, "cannot reclock unsupported memtype\n");
995 /* fetch current MRs */
996 switch (dev_priv
->vram_type
) {
997 case NV_MEM_TYPE_GDDR3
:
998 case NV_MEM_TYPE_DDR3
:
999 mr
[2] = exec
->mrg(exec
, 2);
1001 mr
[1] = exec
->mrg(exec
, 1);
1002 mr
[0] = exec
->mrg(exec
, 0);
1006 /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
1007 if (!(mr
[1] & mr1_dlloff
) && (info
->mr
[1] & mr1_dlloff
)) {
1008 exec
->precharge(exec
);
1009 exec
->mrs (exec
, 1, mr
[1] | mr1_dlloff
);
1010 exec
->wait(exec
, tMRD
);
1013 /* enter self-refresh mode */
1014 exec
->precharge(exec
);
1015 exec
->refresh(exec
);
1016 exec
->refresh(exec
);
1017 exec
->refresh_auto(exec
, false);
1018 exec
->refresh_self(exec
, true);
1019 exec
->wait(exec
, tCKSRE
);
1021 /* modify input clock frequency */
1022 exec
->clock_set(exec
);
1024 /* exit self-refresh mode */
1025 exec
->wait(exec
, tCKSRX
);
1026 exec
->precharge(exec
);
1027 exec
->refresh_self(exec
, false);
1028 exec
->refresh_auto(exec
, true);
1029 exec
->wait(exec
, tXS
);
1030 exec
->wait(exec
, tXS
);
1033 if (mr
[2] != info
->mr
[2]) {
1034 exec
->mrs (exec
, 2, info
->mr
[2]);
1035 exec
->wait(exec
, tMRD
);
1038 if (mr
[1] != info
->mr
[1]) {
1039 /* need to keep DLL off until later, at least on GDDR3 */
1040 exec
->mrs (exec
, 1, info
->mr
[1] | (mr
[1] & mr1_dlloff
));
1041 exec
->wait(exec
, tMRD
);
1044 if (mr
[0] != info
->mr
[0]) {
1045 exec
->mrs (exec
, 0, info
->mr
[0]);
1046 exec
->wait(exec
, tMRD
);
1049 /* update PFB timing registers */
1050 exec
->timing_set(exec
);
1052 /* DLL (enable + ) reset */
1053 if (!(info
->mr
[1] & mr1_dlloff
)) {
1054 if (mr
[1] & mr1_dlloff
) {
1055 exec
->mrs (exec
, 1, info
->mr
[1]);
1056 exec
->wait(exec
, tMRD
);
1058 exec
->mrs (exec
, 0, info
->mr
[0] | 0x00000100);
1059 exec
->wait(exec
, tMRD
);
1060 exec
->mrs (exec
, 0, info
->mr
[0] | 0x00000000);
1061 exec
->wait(exec
, tMRD
);
1062 exec
->wait(exec
, tDLLK
);
1063 if (dev_priv
->vram_type
== NV_MEM_TYPE_GDDR3
)
1064 exec
->precharge(exec
);
1071 nouveau_mem_vbios_type(struct drm_device
*dev
)
1074 u8 ramcfg
= (nv_rd32(dev
, 0x101000) & 0x0000003c) >> 2;
1075 if (!bit_table(dev
, 'M', &M
) || M
.version
!= 2 || M
.length
< 5) {
1076 u8
*table
= ROMPTR(dev
, M
.data
[3]);
1077 if (table
&& table
[0] == 0x10 && ramcfg
< table
[3]) {
1078 u8
*entry
= table
+ table
[1] + (ramcfg
* table
[2]);
1079 switch (entry
[0] & 0x0f) {
1080 case 0: return NV_MEM_TYPE_DDR2
;
1081 case 1: return NV_MEM_TYPE_DDR3
;
1082 case 2: return NV_MEM_TYPE_GDDR3
;
1083 case 3: return NV_MEM_TYPE_GDDR5
;
1090 return NV_MEM_TYPE_UNKNOWN
;
1094 nouveau_vram_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
1101 nouveau_vram_manager_fini(struct ttm_mem_type_manager
*man
)
1108 nouveau_mem_node_cleanup(struct nouveau_mem
*node
)
1110 if (node
->vma
[0].node
) {
1111 nouveau_vm_unmap(&node
->vma
[0]);
1112 nouveau_vm_put(&node
->vma
[0]);
1115 if (node
->vma
[1].node
) {
1116 nouveau_vm_unmap(&node
->vma
[1]);
1117 nouveau_vm_put(&node
->vma
[1]);
1122 nouveau_vram_manager_del(struct ttm_mem_type_manager
*man
,
1123 struct ttm_mem_reg
*mem
)
1125 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(man
->bdev
);
1126 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
1127 struct drm_device
*dev
= dev_priv
->dev
;
1129 nouveau_mem_node_cleanup(mem
->mm_node
);
1130 vram
->put(dev
, (struct nouveau_mem
**)&mem
->mm_node
);
1134 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
1135 struct ttm_buffer_object
*bo
,
1136 struct ttm_placement
*placement
,
1137 struct ttm_mem_reg
*mem
)
1139 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(man
->bdev
);
1140 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
1141 struct drm_device
*dev
= dev_priv
->dev
;
1142 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1143 struct nouveau_mem
*node
;
1147 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_NONCONTIG
)
1148 size_nc
= 1 << nvbo
->page_shift
;
1150 ret
= vram
->get(dev
, mem
->num_pages
<< PAGE_SHIFT
,
1151 mem
->page_alignment
<< PAGE_SHIFT
, size_nc
,
1152 (nvbo
->tile_flags
>> 8) & 0x3ff, &node
);
1154 mem
->mm_node
= NULL
;
1155 return (ret
== -ENOSPC
) ? 0 : ret
;
1158 node
->page_shift
= nvbo
->page_shift
;
1160 mem
->mm_node
= node
;
1161 mem
->start
= node
->offset
>> PAGE_SHIFT
;
1166 nouveau_vram_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
1168 struct nouveau_mm
*mm
= man
->priv
;
1169 struct nouveau_mm_node
*r
;
1170 u32 total
= 0, free
= 0;
1172 mutex_lock(&mm
->mutex
);
1173 list_for_each_entry(r
, &mm
->nodes
, nl_entry
) {
1174 printk(KERN_DEBUG
"%s %d: 0x%010llx 0x%010llx\n",
1175 prefix
, r
->type
, ((u64
)r
->offset
<< 12),
1176 (((u64
)r
->offset
+ r
->length
) << 12));
1182 mutex_unlock(&mm
->mutex
);
1184 printk(KERN_DEBUG
"%s total: 0x%010llx free: 0x%010llx\n",
1185 prefix
, (u64
)total
<< 12, (u64
)free
<< 12);
1186 printk(KERN_DEBUG
"%s block: 0x%08x\n",
1187 prefix
, mm
->block_size
<< 12);
1190 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
1191 nouveau_vram_manager_init
,
1192 nouveau_vram_manager_fini
,
1193 nouveau_vram_manager_new
,
1194 nouveau_vram_manager_del
,
1195 nouveau_vram_manager_debug
1199 nouveau_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
1205 nouveau_gart_manager_fini(struct ttm_mem_type_manager
*man
)
1211 nouveau_gart_manager_del(struct ttm_mem_type_manager
*man
,
1212 struct ttm_mem_reg
*mem
)
1214 nouveau_mem_node_cleanup(mem
->mm_node
);
1215 kfree(mem
->mm_node
);
1216 mem
->mm_node
= NULL
;
1220 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
1221 struct ttm_buffer_object
*bo
,
1222 struct ttm_placement
*placement
,
1223 struct ttm_mem_reg
*mem
)
1225 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1226 struct nouveau_mem
*node
;
1228 if (unlikely((mem
->num_pages
<< PAGE_SHIFT
) >=
1229 dev_priv
->gart_info
.aper_size
))
1232 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1235 node
->page_shift
= 12;
1237 mem
->mm_node
= node
;
1243 nouveau_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
1247 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
1248 nouveau_gart_manager_init
,
1249 nouveau_gart_manager_fini
,
1250 nouveau_gart_manager_new
,
1251 nouveau_gart_manager_del
,
1252 nouveau_gart_manager_debug