FRV: Use generic show_interrupts()
[cris-mirror.git] / drivers / gpu / drm / nouveau / nouveau_mem.c
blob2683377f4131ed73b29a9d57282c04fab8874246
1 /*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
33 #include "drmP.h"
34 #include "drm.h"
35 #include "drm_sarea.h"
37 #include "nouveau_drv.h"
38 #include "nouveau_pm.h"
39 #include "nouveau_mm.h"
40 #include "nouveau_vm.h"
43 * NV10-NV40 tiling helpers
46 static void
47 nv10_mem_update_tile_region(struct drm_device *dev,
48 struct nouveau_tile_reg *tile, uint32_t addr,
49 uint32_t size, uint32_t pitch, uint32_t flags)
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
55 int i = tile - dev_priv->tile.reg;
56 unsigned long save;
58 nouveau_fence_unref(&tile->fence);
60 if (tile->pitch)
61 pfb->free_tile_region(dev, i);
63 if (pitch)
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 pfifo->reassign(dev, false);
68 pfifo->cache_pull(dev, false);
70 nouveau_wait_for_idle(dev);
72 pfb->set_tile_region(dev, i);
73 pgraph->set_tile_region(dev, i);
75 pfifo->cache_pull(dev, true);
76 pfifo->reassign(dev, true);
77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
80 static struct nouveau_tile_reg *
81 nv10_mem_get_tile_region(struct drm_device *dev, int i)
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
86 spin_lock(&dev_priv->tile.lock);
88 if (!tile->used &&
89 (!tile->fence || nouveau_fence_signalled(tile->fence)))
90 tile->used = true;
91 else
92 tile = NULL;
94 spin_unlock(&dev_priv->tile.lock);
95 return tile;
98 void
99 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
100 struct nouveau_fence *fence)
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
104 if (tile) {
105 spin_lock(&dev_priv->tile.lock);
106 if (fence) {
107 /* Mark it as pending. */
108 tile->fence = fence;
109 nouveau_fence_ref(fence);
112 tile->used = false;
113 spin_unlock(&dev_priv->tile.lock);
117 struct nouveau_tile_reg *
118 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
119 uint32_t pitch, uint32_t flags)
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
123 struct nouveau_tile_reg *tile, *found = NULL;
124 int i;
126 for (i = 0; i < pfb->num_tiles; i++) {
127 tile = nv10_mem_get_tile_region(dev, i);
129 if (pitch && !found) {
130 found = tile;
131 continue;
133 } else if (tile && tile->pitch) {
134 /* Kill an unused tile region. */
135 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
138 nv10_mem_put_tile_region(dev, tile, NULL);
141 if (found)
142 nv10_mem_update_tile_region(dev, found, addr, size,
143 pitch, flags);
144 return found;
148 * Cleanup everything
150 void
151 nouveau_mem_vram_fini(struct drm_device *dev)
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
157 ttm_bo_device_release(&dev_priv->ttm.bdev);
159 nouveau_ttm_global_release(dev_priv);
161 if (dev_priv->fb_mtrr >= 0) {
162 drm_mtrr_del(dev_priv->fb_mtrr,
163 pci_resource_start(dev->pdev, 1),
164 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
165 dev_priv->fb_mtrr = -1;
169 void
170 nouveau_mem_gart_fini(struct drm_device *dev)
172 nouveau_sgdma_takedown(dev);
174 if (drm_core_has_AGP(dev) && dev->agp) {
175 struct drm_agp_mem *entry, *tempe;
177 /* Remove AGP resources, but leave dev->agp
178 intact until drv_cleanup is called. */
179 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
180 if (entry->bound)
181 drm_unbind_agp(entry->memory);
182 drm_free_agp(entry->memory, entry->pages);
183 kfree(entry);
185 INIT_LIST_HEAD(&dev->agp->memory);
187 if (dev->agp->acquired)
188 drm_agp_release(dev);
190 dev->agp->acquired = 0;
191 dev->agp->enabled = 0;
195 static uint32_t
196 nouveau_mem_detect_nv04(struct drm_device *dev)
198 uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
200 if (boot0 & 0x00000100)
201 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
203 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
204 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
205 return 32 * 1024 * 1024;
206 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
207 return 16 * 1024 * 1024;
208 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
209 return 8 * 1024 * 1024;
210 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
211 return 4 * 1024 * 1024;
214 return 0;
217 static uint32_t
218 nouveau_mem_detect_nforce(struct drm_device *dev)
220 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct pci_dev *bridge;
222 uint32_t mem;
224 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
225 if (!bridge) {
226 NV_ERROR(dev, "no bridge device\n");
227 return 0;
230 if (dev_priv->flags & NV_NFORCE) {
231 pci_read_config_dword(bridge, 0x7C, &mem);
232 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
233 } else
234 if (dev_priv->flags & NV_NFORCE2) {
235 pci_read_config_dword(bridge, 0x84, &mem);
236 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
239 NV_ERROR(dev, "impossible!\n");
240 return 0;
244 nouveau_mem_detect(struct drm_device *dev)
246 struct drm_nouveau_private *dev_priv = dev->dev_private;
248 if (dev_priv->card_type == NV_04) {
249 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
250 } else
251 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
252 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
253 } else
254 if (dev_priv->card_type < NV_50) {
255 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
256 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
259 if (dev_priv->vram_size)
260 return 0;
261 return -ENOMEM;
264 bool
265 nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
267 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
268 return true;
270 return false;
273 #if __OS_HAS_AGP
274 static unsigned long
275 get_agp_mode(struct drm_device *dev, unsigned long mode)
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
280 * FW seems to be broken on nv18, it makes the card lock up
281 * randomly.
283 if (dev_priv->chipset == 0x18)
284 mode &= ~PCI_AGP_COMMAND_FW;
287 * AGP mode set in the command line.
289 if (nouveau_agpmode > 0) {
290 bool agpv3 = mode & 0x8;
291 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
293 mode = (mode & ~0x7) | (rate & 0x7);
296 return mode;
298 #endif
301 nouveau_mem_reset_agp(struct drm_device *dev)
303 #if __OS_HAS_AGP
304 uint32_t saved_pci_nv_1, pmc_enable;
305 int ret;
307 /* First of all, disable fast writes, otherwise if it's
308 * already enabled in the AGP bridge and we disable the card's
309 * AGP controller we might be locking ourselves out of it. */
310 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
311 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
312 struct drm_agp_info info;
313 struct drm_agp_mode mode;
315 ret = drm_agp_info(dev, &info);
316 if (ret)
317 return ret;
319 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
320 ret = drm_agp_enable(dev, mode);
321 if (ret)
322 return ret;
325 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
327 /* clear busmaster bit */
328 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
329 /* disable AGP */
330 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
332 /* power cycle pgraph, if enabled */
333 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
334 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
335 nv_wr32(dev, NV03_PMC_ENABLE,
336 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
337 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
338 NV_PMC_ENABLE_PGRAPH);
341 /* and restore (gives effect of resetting AGP) */
342 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
343 #endif
345 return 0;
349 nouveau_mem_init_agp(struct drm_device *dev)
351 #if __OS_HAS_AGP
352 struct drm_nouveau_private *dev_priv = dev->dev_private;
353 struct drm_agp_info info;
354 struct drm_agp_mode mode;
355 int ret;
357 if (!dev->agp->acquired) {
358 ret = drm_agp_acquire(dev);
359 if (ret) {
360 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
361 return ret;
365 nouveau_mem_reset_agp(dev);
367 ret = drm_agp_info(dev, &info);
368 if (ret) {
369 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
370 return ret;
373 /* see agp.h for the AGPSTAT_* modes available */
374 mode.mode = get_agp_mode(dev, info.mode);
375 ret = drm_agp_enable(dev, mode);
376 if (ret) {
377 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
378 return ret;
381 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
382 dev_priv->gart_info.aper_base = info.aperture_base;
383 dev_priv->gart_info.aper_size = info.aperture_size;
384 #endif
385 return 0;
389 nouveau_mem_vram_init(struct drm_device *dev)
391 struct drm_nouveau_private *dev_priv = dev->dev_private;
392 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
393 int ret, dma_bits;
395 dma_bits = 32;
396 if (dev_priv->card_type >= NV_50) {
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40;
399 } else
400 if (drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset != 0x40 &&
402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
404 dma_bits = 39;
407 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
408 if (ret)
409 return ret;
411 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
413 ret = nouveau_ttm_global_init(dev_priv);
414 if (ret)
415 return ret;
417 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
418 dev_priv->ttm.bo_global_ref.ref.object,
419 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
420 dma_bits <= 32 ? true : false);
421 if (ret) {
422 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
423 return ret;
426 /* reserve space at end of VRAM for PRAMIN */
427 if (dev_priv->card_type >= NV_50) {
428 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
429 } else
430 if (dev_priv->card_type >= NV_40) {
431 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
432 u32 rsvd;
434 /* estimate grctx size, the magics come from nv40_grctx.c */
435 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
436 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
437 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
438 else rsvd = 0x4a40 * vs;
439 rsvd += 16 * 1024;
440 rsvd *= dev_priv->engine.fifo.channels;
442 /* pciegart table */
443 if (drm_pci_device_is_pcie(dev))
444 rsvd += 512 * 1024;
446 /* object storage */
447 rsvd += 512 * 1024;
449 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
450 } else {
451 dev_priv->ramin_rsvd_vram = 512 * 1024;
454 ret = dev_priv->engine.vram.init(dev);
455 if (ret)
456 return ret;
458 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
459 if (dev_priv->vram_sys_base) {
460 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
461 dev_priv->vram_sys_base);
464 dev_priv->fb_available_size = dev_priv->vram_size;
465 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
466 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
467 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
468 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
470 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
471 dev_priv->fb_aper_free = dev_priv->fb_available_size;
473 /* mappable vram */
474 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
475 dev_priv->fb_available_size >> PAGE_SHIFT);
476 if (ret) {
477 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
478 return ret;
481 if (dev_priv->card_type < NV_50) {
482 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
483 0, 0, &dev_priv->vga_ram);
484 if (ret == 0)
485 ret = nouveau_bo_pin(dev_priv->vga_ram,
486 TTM_PL_FLAG_VRAM);
488 if (ret) {
489 NV_WARN(dev, "failed to reserve VGA memory\n");
490 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
494 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
495 pci_resource_len(dev->pdev, 1),
496 DRM_MTRR_WC);
497 return 0;
501 nouveau_mem_gart_init(struct drm_device *dev)
503 struct drm_nouveau_private *dev_priv = dev->dev_private;
504 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
505 int ret;
507 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
509 #if !defined(__powerpc__) && !defined(__ia64__)
510 if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
511 ret = nouveau_mem_init_agp(dev);
512 if (ret)
513 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
515 #endif
517 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
518 ret = nouveau_sgdma_init(dev);
519 if (ret) {
520 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
521 return ret;
525 NV_INFO(dev, "%d MiB GART (aperture)\n",
526 (int)(dev_priv->gart_info.aper_size >> 20));
527 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
529 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
530 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
531 if (ret) {
532 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
533 return ret;
536 return 0;
539 void
540 nouveau_mem_timing_init(struct drm_device *dev)
542 /* cards < NVC0 only */
543 struct drm_nouveau_private *dev_priv = dev->dev_private;
544 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
545 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
546 struct nvbios *bios = &dev_priv->vbios;
547 struct bit_entry P;
548 u8 tUNK_0, tUNK_1, tUNK_2;
549 u8 tRP; /* Byte 3 */
550 u8 tRAS; /* Byte 5 */
551 u8 tRFC; /* Byte 7 */
552 u8 tRC; /* Byte 9 */
553 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
554 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
555 u8 *mem = NULL, *entry;
556 int i, recordlen, entries;
558 if (bios->type == NVBIOS_BIT) {
559 if (bit_table(dev, 'P', &P))
560 return;
562 if (P.version == 1)
563 mem = ROMPTR(bios, P.data[4]);
564 else
565 if (P.version == 2)
566 mem = ROMPTR(bios, P.data[8]);
567 else {
568 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
570 } else {
571 NV_DEBUG(dev, "BMP version too old for memory\n");
572 return;
575 if (!mem) {
576 NV_DEBUG(dev, "memory timing table pointer invalid\n");
577 return;
580 if (mem[0] != 0x10) {
581 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
582 return;
585 /* validate record length */
586 entries = mem[2];
587 recordlen = mem[3];
588 if (recordlen < 15) {
589 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
590 return;
593 /* parse vbios entries into common format */
594 memtimings->timing =
595 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
596 if (!memtimings->timing)
597 return;
599 entry = mem + mem[1];
600 for (i = 0; i < entries; i++, entry += recordlen) {
601 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
602 if (entry[0] == 0)
603 continue;
605 tUNK_18 = 1;
606 tUNK_19 = 1;
607 tUNK_20 = 0;
608 tUNK_21 = 0;
609 switch (min(recordlen, 22)) {
610 case 22:
611 tUNK_21 = entry[21];
612 case 21:
613 tUNK_20 = entry[20];
614 case 20:
615 tUNK_19 = entry[19];
616 case 19:
617 tUNK_18 = entry[18];
618 default:
619 tUNK_0 = entry[0];
620 tUNK_1 = entry[1];
621 tUNK_2 = entry[2];
622 tRP = entry[3];
623 tRAS = entry[5];
624 tRFC = entry[7];
625 tRC = entry[9];
626 tUNK_10 = entry[10];
627 tUNK_11 = entry[11];
628 tUNK_12 = entry[12];
629 tUNK_13 = entry[13];
630 tUNK_14 = entry[14];
631 break;
634 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
636 /* XXX: I don't trust the -1's and +1's... they must come
637 * from somewhere! */
638 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
639 tUNK_18 << 16 |
640 (tUNK_1 + tUNK_19 + 1) << 8 |
641 (tUNK_2 - 1));
643 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
644 if(recordlen > 19) {
645 timing->reg_100228 += (tUNK_19 - 1) << 24;
646 }/* I cannot back-up this else-statement right now
647 else {
648 timing->reg_100228 += tUNK_12 << 24;
651 /* XXX: reg_10022c */
652 timing->reg_10022c = tUNK_2 - 1;
654 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
655 tUNK_13 << 8 | tUNK_13);
657 /* XXX: +6? */
658 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
659 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
661 /* XXX; reg_100238, reg_10023c
662 * reg: 0x00??????
663 * reg_10023c:
664 * 0 for pre-NV50 cards
665 * 0x????0202 for NV50+ cards (empirical evidence) */
666 if(dev_priv->card_type >= NV_50) {
667 timing->reg_10023c = 0x202;
670 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
671 timing->reg_100220, timing->reg_100224,
672 timing->reg_100228, timing->reg_10022c);
673 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
674 timing->reg_100230, timing->reg_100234,
675 timing->reg_100238, timing->reg_10023c);
678 memtimings->nr_timing = entries;
679 memtimings->supported = true;
682 void
683 nouveau_mem_timing_fini(struct drm_device *dev)
685 struct drm_nouveau_private *dev_priv = dev->dev_private;
686 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
688 kfree(mem->timing);
691 static int
692 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
694 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
695 struct nouveau_mm *mm;
696 u64 size, block, rsvd;
697 int ret;
699 rsvd = (256 * 1024); /* vga memory */
700 size = (p_size << PAGE_SHIFT) - rsvd;
701 block = dev_priv->vram_rblock_size;
703 ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
704 if (ret)
705 return ret;
707 man->priv = mm;
708 return 0;
711 static int
712 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
714 struct nouveau_mm *mm = man->priv;
715 int ret;
717 ret = nouveau_mm_fini(&mm);
718 if (ret)
719 return ret;
721 man->priv = NULL;
722 return 0;
725 static void
726 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
727 struct ttm_mem_reg *mem)
729 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
730 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
731 struct nouveau_mem *node = mem->mm_node;
732 struct drm_device *dev = dev_priv->dev;
734 if (node->tmp_vma.node) {
735 nouveau_vm_unmap(&node->tmp_vma);
736 nouveau_vm_put(&node->tmp_vma);
739 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
742 static int
743 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
744 struct ttm_buffer_object *bo,
745 struct ttm_placement *placement,
746 struct ttm_mem_reg *mem)
748 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
749 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
750 struct drm_device *dev = dev_priv->dev;
751 struct nouveau_bo *nvbo = nouveau_bo(bo);
752 struct nouveau_mem *node;
753 u32 size_nc = 0;
754 int ret;
756 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
757 size_nc = 1 << nvbo->vma.node->type;
759 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
760 mem->page_alignment << PAGE_SHIFT, size_nc,
761 (nvbo->tile_flags >> 8) & 0x3ff, &node);
762 if (ret) {
763 mem->mm_node = NULL;
764 return (ret == -ENOSPC) ? 0 : ret;
767 node->page_shift = 12;
768 if (nvbo->vma.node)
769 node->page_shift = nvbo->vma.node->type;
771 mem->mm_node = node;
772 mem->start = node->offset >> PAGE_SHIFT;
773 return 0;
776 void
777 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
779 struct nouveau_mm *mm = man->priv;
780 struct nouveau_mm_node *r;
781 u32 total = 0, free = 0;
783 mutex_lock(&mm->mutex);
784 list_for_each_entry(r, &mm->nodes, nl_entry) {
785 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
786 prefix, r->type, ((u64)r->offset << 12),
787 (((u64)r->offset + r->length) << 12));
789 total += r->length;
790 if (!r->type)
791 free += r->length;
793 mutex_unlock(&mm->mutex);
795 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
796 prefix, (u64)total << 12, (u64)free << 12);
797 printk(KERN_DEBUG "%s block: 0x%08x\n",
798 prefix, mm->block_size << 12);
801 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
802 nouveau_vram_manager_init,
803 nouveau_vram_manager_fini,
804 nouveau_vram_manager_new,
805 nouveau_vram_manager_del,
806 nouveau_vram_manager_debug
809 static int
810 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
812 return 0;
815 static int
816 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
818 return 0;
821 static void
822 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
823 struct ttm_mem_reg *mem)
825 struct nouveau_mem *node = mem->mm_node;
827 if (node->tmp_vma.node) {
828 nouveau_vm_unmap(&node->tmp_vma);
829 nouveau_vm_put(&node->tmp_vma);
831 mem->mm_node = NULL;
834 static int
835 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
836 struct ttm_buffer_object *bo,
837 struct ttm_placement *placement,
838 struct ttm_mem_reg *mem)
840 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
841 struct nouveau_bo *nvbo = nouveau_bo(bo);
842 struct nouveau_vma *vma = &nvbo->vma;
843 struct nouveau_vm *vm = vma->vm;
844 struct nouveau_mem *node;
845 int ret;
847 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
848 dev_priv->gart_info.aper_size))
849 return -ENOMEM;
851 node = kzalloc(sizeof(*node), GFP_KERNEL);
852 if (!node)
853 return -ENOMEM;
855 /* This node must be for evicting large-paged VRAM
856 * to system memory. Due to a nv50 limitation of
857 * not being able to mix large/small pages within
858 * the same PDE, we need to create a temporary
859 * small-paged VMA for the eviction.
861 if (vma->node->type != vm->spg_shift) {
862 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
863 vm->spg_shift, NV_MEM_ACCESS_RW,
864 &node->tmp_vma);
865 if (ret) {
866 kfree(node);
867 return ret;
871 node->page_shift = nvbo->vma.node->type;
872 mem->mm_node = node;
873 mem->start = 0;
874 return 0;
877 void
878 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
882 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
883 nouveau_gart_manager_init,
884 nouveau_gart_manager_fini,
885 nouveau_gart_manager_new,
886 nouveau_gart_manager_del,
887 nouveau_gart_manager_debug