FRV: Use generic show_interrupts()
[cris-mirror.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
bloba33fe4019286e39294af123b7f8140973451c623
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
14 dma_addr_t *pages;
15 bool *ttm_alloced;
16 unsigned nr_pages;
18 u64 offset;
19 bool bound;
22 static int
23 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev;
30 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
32 if (nvbe->pages)
33 return -EINVAL;
35 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36 if (!nvbe->pages)
37 return -ENOMEM;
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced)
41 return -ENOMEM;
43 nvbe->nr_pages = 0;
44 while (num_pages--) {
45 if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
46 nvbe->pages[nvbe->nr_pages] =
47 dma_addrs[nvbe->nr_pages];
48 nvbe->ttm_alloced[nvbe->nr_pages] = true;
49 } else {
50 nvbe->pages[nvbe->nr_pages] =
51 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
52 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
53 if (pci_dma_mapping_error(dev->pdev,
54 nvbe->pages[nvbe->nr_pages])) {
55 be->func->clear(be);
56 return -EFAULT;
60 nvbe->nr_pages++;
63 return 0;
66 static void
67 nouveau_sgdma_clear(struct ttm_backend *be)
69 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
70 struct drm_device *dev;
72 if (nvbe && nvbe->pages) {
73 dev = nvbe->dev;
74 NV_DEBUG(dev, "\n");
76 if (nvbe->bound)
77 be->func->unbind(be);
79 while (nvbe->nr_pages--) {
80 if (!nvbe->ttm_alloced[nvbe->nr_pages])
81 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
84 kfree(nvbe->pages);
85 kfree(nvbe->ttm_alloced);
86 nvbe->pages = NULL;
87 nvbe->ttm_alloced = NULL;
88 nvbe->nr_pages = 0;
92 static void
93 nouveau_sgdma_destroy(struct ttm_backend *be)
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
97 if (be) {
98 NV_DEBUG(nvbe->dev, "\n");
100 if (nvbe) {
101 if (nvbe->pages)
102 be->func->clear(be);
103 kfree(nvbe);
108 static int
109 nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
111 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
112 struct drm_device *dev = nvbe->dev;
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
115 unsigned i, j, pte;
117 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
119 nvbe->offset = mem->start << PAGE_SHIFT;
120 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
121 for (i = 0; i < nvbe->nr_pages; i++) {
122 dma_addr_t dma_offset = nvbe->pages[i];
123 uint32_t offset_l = lower_32_bits(dma_offset);
125 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
126 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
127 dma_offset += NV_CTXDMA_PAGE_SIZE;
131 nvbe->bound = true;
132 return 0;
135 static int
136 nv04_sgdma_unbind(struct ttm_backend *be)
138 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
139 struct drm_device *dev = nvbe->dev;
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
142 unsigned i, j, pte;
144 NV_DEBUG(dev, "\n");
146 if (!nvbe->bound)
147 return 0;
149 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
150 for (i = 0; i < nvbe->nr_pages; i++) {
151 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
152 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155 nvbe->bound = false;
156 return 0;
159 static struct ttm_backend_func nv04_sgdma_backend = {
160 .populate = nouveau_sgdma_populate,
161 .clear = nouveau_sgdma_clear,
162 .bind = nv04_sgdma_bind,
163 .unbind = nv04_sgdma_unbind,
164 .destroy = nouveau_sgdma_destroy
167 static void
168 nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
170 struct drm_device *dev = nvbe->dev;
172 nv_wr32(dev, 0x100810, 0x00000022);
173 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
174 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
175 nv_rd32(dev, 0x100810));
176 nv_wr32(dev, 0x100810, 0x00000000);
179 static int
180 nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
182 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
183 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
184 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
185 dma_addr_t *list = nvbe->pages;
186 u32 pte = mem->start << 2;
187 u32 cnt = nvbe->nr_pages;
189 nvbe->offset = mem->start << PAGE_SHIFT;
191 while (cnt--) {
192 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
193 pte += 4;
196 nv41_sgdma_flush(nvbe);
197 nvbe->bound = true;
198 return 0;
201 static int
202 nv41_sgdma_unbind(struct ttm_backend *be)
204 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
205 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
206 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
207 u32 pte = (nvbe->offset >> 12) << 2;
208 u32 cnt = nvbe->nr_pages;
210 while (cnt--) {
211 nv_wo32(pgt, pte, 0x00000000);
212 pte += 4;
215 nv41_sgdma_flush(nvbe);
216 nvbe->bound = false;
217 return 0;
220 static struct ttm_backend_func nv41_sgdma_backend = {
221 .populate = nouveau_sgdma_populate,
222 .clear = nouveau_sgdma_clear,
223 .bind = nv41_sgdma_bind,
224 .unbind = nv41_sgdma_unbind,
225 .destroy = nouveau_sgdma_destroy
228 static void
229 nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
231 struct drm_device *dev = nvbe->dev;
233 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
234 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
235 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
236 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
237 nv_rd32(dev, 0x100808));
238 nv_wr32(dev, 0x100808, 0x00000000);
241 static void
242 nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
244 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
245 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
246 u32 pte, tmp[4];
248 pte = base >> 2;
249 base &= ~0x0000000f;
251 tmp[0] = nv_ro32(pgt, base + 0x0);
252 tmp[1] = nv_ro32(pgt, base + 0x4);
253 tmp[2] = nv_ro32(pgt, base + 0x8);
254 tmp[3] = nv_ro32(pgt, base + 0xc);
255 while (cnt--) {
256 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
257 switch (pte++ & 0x3) {
258 case 0:
259 tmp[0] &= ~0x07ffffff;
260 tmp[0] |= addr;
261 break;
262 case 1:
263 tmp[0] &= ~0xf8000000;
264 tmp[0] |= addr << 27;
265 tmp[1] &= ~0x003fffff;
266 tmp[1] |= addr >> 5;
267 break;
268 case 2:
269 tmp[1] &= ~0xffc00000;
270 tmp[1] |= addr << 22;
271 tmp[2] &= ~0x0001ffff;
272 tmp[2] |= addr >> 10;
273 break;
274 case 3:
275 tmp[2] &= ~0xfffe0000;
276 tmp[2] |= addr << 17;
277 tmp[3] &= ~0x00000fff;
278 tmp[3] |= addr >> 15;
279 break;
283 tmp[3] |= 0x40000000;
285 nv_wo32(pgt, base + 0x0, tmp[0]);
286 nv_wo32(pgt, base + 0x4, tmp[1]);
287 nv_wo32(pgt, base + 0x8, tmp[2]);
288 nv_wo32(pgt, base + 0xc, tmp[3]);
291 static int
292 nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
294 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
295 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
296 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
297 dma_addr_t *list = nvbe->pages;
298 u32 pte = mem->start << 2, tmp[4];
299 u32 cnt = nvbe->nr_pages;
300 int i;
302 nvbe->offset = mem->start << PAGE_SHIFT;
304 if (pte & 0x0000000c) {
305 u32 max = 4 - ((pte >> 2) & 0x3);
306 u32 part = (cnt > max) ? max : cnt;
307 nv44_sgdma_fill(pgt, list, pte, part);
308 pte += (part << 2);
309 list += part;
310 cnt -= part;
313 while (cnt >= 4) {
314 for (i = 0; i < 4; i++)
315 tmp[i] = *list++ >> 12;
316 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
317 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
318 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
319 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
320 pte += 0x10;
321 cnt -= 4;
324 if (cnt)
325 nv44_sgdma_fill(pgt, list, pte, cnt);
327 nv44_sgdma_flush(nvbe);
328 nvbe->bound = true;
329 return 0;
332 static int
333 nv44_sgdma_unbind(struct ttm_backend *be)
335 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
336 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
337 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
338 u32 pte = (nvbe->offset >> 12) << 2;
339 u32 cnt = nvbe->nr_pages;
341 if (pte & 0x0000000c) {
342 u32 max = 4 - ((pte >> 2) & 0x3);
343 u32 part = (cnt > max) ? max : cnt;
344 nv44_sgdma_fill(pgt, NULL, pte, part);
345 pte += (part << 2);
346 cnt -= part;
349 while (cnt >= 4) {
350 nv_wo32(pgt, pte + 0x0, 0x00000000);
351 nv_wo32(pgt, pte + 0x4, 0x00000000);
352 nv_wo32(pgt, pte + 0x8, 0x00000000);
353 nv_wo32(pgt, pte + 0xc, 0x00000000);
354 pte += 0x10;
355 cnt -= 4;
358 if (cnt)
359 nv44_sgdma_fill(pgt, NULL, pte, cnt);
361 nv44_sgdma_flush(nvbe);
362 nvbe->bound = false;
363 return 0;
366 static struct ttm_backend_func nv44_sgdma_backend = {
367 .populate = nouveau_sgdma_populate,
368 .clear = nouveau_sgdma_clear,
369 .bind = nv44_sgdma_bind,
370 .unbind = nv44_sgdma_unbind,
371 .destroy = nouveau_sgdma_destroy
374 static int
375 nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
377 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
378 struct nouveau_mem *node = mem->mm_node;
379 /* noop: bound in move_notify() */
380 node->pages = nvbe->pages;
381 nvbe->pages = (dma_addr_t *)node;
382 nvbe->bound = true;
383 return 0;
386 static int
387 nv50_sgdma_unbind(struct ttm_backend *be)
389 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
390 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
391 /* noop: unbound in move_notify() */
392 nvbe->pages = node->pages;
393 node->pages = NULL;
394 nvbe->bound = false;
395 return 0;
398 static struct ttm_backend_func nv50_sgdma_backend = {
399 .populate = nouveau_sgdma_populate,
400 .clear = nouveau_sgdma_clear,
401 .bind = nv50_sgdma_bind,
402 .unbind = nv50_sgdma_unbind,
403 .destroy = nouveau_sgdma_destroy
406 struct ttm_backend *
407 nouveau_sgdma_init_ttm(struct drm_device *dev)
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410 struct nouveau_sgdma_be *nvbe;
412 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
413 if (!nvbe)
414 return NULL;
416 nvbe->dev = dev;
418 nvbe->backend.func = dev_priv->gart_info.func;
419 return &nvbe->backend;
423 nouveau_sgdma_init(struct drm_device *dev)
425 struct drm_nouveau_private *dev_priv = dev->dev_private;
426 struct nouveau_gpuobj *gpuobj = NULL;
427 u32 aper_size, align;
428 int ret;
430 if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
431 aper_size = 512 * 1024 * 1024;
432 else
433 aper_size = 64 * 1024 * 1024;
435 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
436 * christmas. The cards before it have them, the cards after
437 * it have them, why is NV44 so unloved?
439 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
440 if (!dev_priv->gart_info.dummy.page)
441 return -ENOMEM;
443 dev_priv->gart_info.dummy.addr =
444 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
445 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
446 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
447 NV_ERROR(dev, "error mapping dummy page\n");
448 __free_page(dev_priv->gart_info.dummy.page);
449 dev_priv->gart_info.dummy.page = NULL;
450 return -ENOMEM;
453 if (dev_priv->card_type >= NV_50) {
454 dev_priv->gart_info.aper_base = 0;
455 dev_priv->gart_info.aper_size = aper_size;
456 dev_priv->gart_info.type = NOUVEAU_GART_HW;
457 dev_priv->gart_info.func = &nv50_sgdma_backend;
458 } else
459 if (drm_pci_device_is_pcie(dev) &&
460 dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
461 if (nv44_graph_class(dev)) {
462 dev_priv->gart_info.func = &nv44_sgdma_backend;
463 align = 512 * 1024;
464 } else {
465 dev_priv->gart_info.func = &nv41_sgdma_backend;
466 align = 16;
469 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
470 NVOBJ_FLAG_ZERO_ALLOC |
471 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
472 if (ret) {
473 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
474 return ret;
477 dev_priv->gart_info.sg_ctxdma = gpuobj;
478 dev_priv->gart_info.aper_base = 0;
479 dev_priv->gart_info.aper_size = aper_size;
480 dev_priv->gart_info.type = NOUVEAU_GART_HW;
481 } else {
482 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
483 NVOBJ_FLAG_ZERO_ALLOC |
484 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
485 if (ret) {
486 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
487 return ret;
490 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
491 (1 << 12) /* PT present */ |
492 (0 << 13) /* PT *not* linear */ |
493 (0 << 14) /* RW */ |
494 (2 << 16) /* PCI */);
495 nv_wo32(gpuobj, 4, aper_size - 1);
497 dev_priv->gart_info.sg_ctxdma = gpuobj;
498 dev_priv->gart_info.aper_base = 0;
499 dev_priv->gart_info.aper_size = aper_size;
500 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
501 dev_priv->gart_info.func = &nv04_sgdma_backend;
504 return 0;
507 void
508 nouveau_sgdma_takedown(struct drm_device *dev)
510 struct drm_nouveau_private *dev_priv = dev->dev_private;
512 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
514 if (dev_priv->gart_info.dummy.page) {
515 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
516 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
517 __free_page(dev_priv->gart_info.dummy.page);
518 dev_priv->gart_info.dummy.page = NULL;
522 uint32_t
523 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
525 struct drm_nouveau_private *dev_priv = dev->dev_private;
526 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
527 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
529 BUG_ON(dev_priv->card_type >= NV_50);
531 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
532 (offset & NV_CTXDMA_PAGE_MASK);