x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
blob0843ebc910d4d6062ce94023f70dde1f1cc00ea0
1 #include <linux/pagemap.h>
2 #include <linux/slab.h>
4 #include <subdev/fb.h>
6 #include "nouveau_drm.h"
7 #include "nouveau_ttm.h"
9 struct nouveau_sgdma_be {
10 /* this has to be the first field so populate/unpopulated in
11 * nouve_bo.c works properly, otherwise have to move them here
13 struct ttm_dma_tt ttm;
14 struct drm_device *dev;
15 struct nouveau_mem *node;
18 static void
19 nouveau_sgdma_destroy(struct ttm_tt *ttm)
21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23 if (ttm) {
24 ttm_dma_tt_fini(&nvbe->ttm);
25 kfree(nvbe);
29 static int
30 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
33 struct nouveau_mem *node = mem->mm_node;
34 u64 size = mem->num_pages << 12;
36 if (ttm->sg) {
37 node->sg = ttm->sg;
38 nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
39 } else {
40 node->pages = nvbe->ttm.dma_address;
41 nouveau_vm_map_sg(&node->vma[0], 0, size, node);
44 nvbe->node = node;
45 return 0;
48 static int
49 nv04_sgdma_unbind(struct ttm_tt *ttm)
51 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
52 nouveau_vm_unmap(&nvbe->node->vma[0]);
53 return 0;
56 static struct ttm_backend_func nv04_sgdma_backend = {
57 .bind = nv04_sgdma_bind,
58 .unbind = nv04_sgdma_unbind,
59 .destroy = nouveau_sgdma_destroy
62 static int
63 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
65 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
66 struct nouveau_mem *node = mem->mm_node;
68 /* noop: bound in move_notify() */
69 if (ttm->sg) {
70 node->sg = ttm->sg;
71 } else
72 node->pages = nvbe->ttm.dma_address;
73 return 0;
76 static int
77 nv50_sgdma_unbind(struct ttm_tt *ttm)
79 /* noop: unbound in move_notify() */
80 return 0;
83 static struct ttm_backend_func nv50_sgdma_backend = {
84 .bind = nv50_sgdma_bind,
85 .unbind = nv50_sgdma_unbind,
86 .destroy = nouveau_sgdma_destroy
89 struct ttm_tt *
90 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
91 unsigned long size, uint32_t page_flags,
92 struct page *dummy_read_page)
94 struct nouveau_drm *drm = nouveau_bdev(bdev);
95 struct nouveau_sgdma_be *nvbe;
97 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
98 if (!nvbe)
99 return NULL;
101 nvbe->dev = drm->dev;
102 if (nv_device(drm->device)->card_type < NV_50)
103 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
108 return NULL;
109 return &nvbe->ttm.ttm;