2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 struct ttm_backend backend
;
12 struct drm_device
*dev
;
23 nouveau_sgdma_populate(struct ttm_backend
*be
, unsigned long num_pages
,
24 struct page
**pages
, struct page
*dummy_read_page
,
25 dma_addr_t
*dma_addrs
)
27 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
28 struct drm_device
*dev
= nvbe
->dev
;
30 NV_DEBUG(nvbe
->dev
, "num_pages = %ld\n", num_pages
);
35 nvbe
->pages
= kmalloc(sizeof(dma_addr_t
) * num_pages
, GFP_KERNEL
);
39 nvbe
->ttm_alloced
= kmalloc(sizeof(bool) * num_pages
, GFP_KERNEL
);
40 if (!nvbe
->ttm_alloced
)
45 if (dma_addrs
[nvbe
->nr_pages
] != DMA_ERROR_CODE
) {
46 nvbe
->pages
[nvbe
->nr_pages
] =
47 dma_addrs
[nvbe
->nr_pages
];
48 nvbe
->ttm_alloced
[nvbe
->nr_pages
] = true;
50 nvbe
->pages
[nvbe
->nr_pages
] =
51 pci_map_page(dev
->pdev
, pages
[nvbe
->nr_pages
], 0,
52 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
53 if (pci_dma_mapping_error(dev
->pdev
,
54 nvbe
->pages
[nvbe
->nr_pages
])) {
67 nouveau_sgdma_clear(struct ttm_backend
*be
)
69 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
70 struct drm_device
*dev
;
72 if (nvbe
&& nvbe
->pages
) {
79 while (nvbe
->nr_pages
--) {
80 if (!nvbe
->ttm_alloced
[nvbe
->nr_pages
])
81 pci_unmap_page(dev
->pdev
, nvbe
->pages
[nvbe
->nr_pages
],
82 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
85 kfree(nvbe
->ttm_alloced
);
87 nvbe
->ttm_alloced
= NULL
;
93 nouveau_sgdma_destroy(struct ttm_backend
*be
)
95 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
98 NV_DEBUG(nvbe
->dev
, "\n");
109 nv04_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
111 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
112 struct drm_device
*dev
= nvbe
->dev
;
113 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
114 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
117 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->start
);
119 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
120 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
121 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
122 dma_addr_t dma_offset
= nvbe
->pages
[i
];
123 uint32_t offset_l
= lower_32_bits(dma_offset
);
125 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++) {
126 nv_wo32(gpuobj
, (pte
* 4) + 0, offset_l
| 3);
127 dma_offset
+= NV_CTXDMA_PAGE_SIZE
;
136 nv04_sgdma_unbind(struct ttm_backend
*be
)
138 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
139 struct drm_device
*dev
= nvbe
->dev
;
140 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
141 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
149 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
150 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
151 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++)
152 nv_wo32(gpuobj
, (pte
* 4) + 0, 0x00000000);
159 static struct ttm_backend_func nv04_sgdma_backend
= {
160 .populate
= nouveau_sgdma_populate
,
161 .clear
= nouveau_sgdma_clear
,
162 .bind
= nv04_sgdma_bind
,
163 .unbind
= nv04_sgdma_unbind
,
164 .destroy
= nouveau_sgdma_destroy
168 nv41_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
170 struct drm_device
*dev
= nvbe
->dev
;
172 nv_wr32(dev
, 0x100810, 0x00000022);
173 if (!nv_wait(dev
, 0x100810, 0x00000100, 0x00000100))
174 NV_ERROR(dev
, "vm flush timeout: 0x%08x\n",
175 nv_rd32(dev
, 0x100810));
176 nv_wr32(dev
, 0x100810, 0x00000000);
180 nv41_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
182 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
183 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
184 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
185 dma_addr_t
*list
= nvbe
->pages
;
186 u32 pte
= mem
->start
<< 2;
187 u32 cnt
= nvbe
->nr_pages
;
189 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
192 nv_wo32(pgt
, pte
, (*list
++ >> 7) | 1);
196 nv41_sgdma_flush(nvbe
);
202 nv41_sgdma_unbind(struct ttm_backend
*be
)
204 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
205 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
206 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
207 u32 pte
= (nvbe
->offset
>> 12) << 2;
208 u32 cnt
= nvbe
->nr_pages
;
211 nv_wo32(pgt
, pte
, 0x00000000);
215 nv41_sgdma_flush(nvbe
);
220 static struct ttm_backend_func nv41_sgdma_backend
= {
221 .populate
= nouveau_sgdma_populate
,
222 .clear
= nouveau_sgdma_clear
,
223 .bind
= nv41_sgdma_bind
,
224 .unbind
= nv41_sgdma_unbind
,
225 .destroy
= nouveau_sgdma_destroy
229 nv44_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
231 struct drm_device
*dev
= nvbe
->dev
;
233 nv_wr32(dev
, 0x100814, (nvbe
->nr_pages
- 1) << 12);
234 nv_wr32(dev
, 0x100808, nvbe
->offset
| 0x20);
235 if (!nv_wait(dev
, 0x100808, 0x00000001, 0x00000001))
236 NV_ERROR(dev
, "gart flush timeout: 0x%08x\n",
237 nv_rd32(dev
, 0x100808));
238 nv_wr32(dev
, 0x100808, 0x00000000);
242 nv44_sgdma_fill(struct nouveau_gpuobj
*pgt
, dma_addr_t
*list
, u32 base
, u32 cnt
)
244 struct drm_nouveau_private
*dev_priv
= pgt
->dev
->dev_private
;
245 dma_addr_t dummy
= dev_priv
->gart_info
.dummy
.addr
;
251 tmp
[0] = nv_ro32(pgt
, base
+ 0x0);
252 tmp
[1] = nv_ro32(pgt
, base
+ 0x4);
253 tmp
[2] = nv_ro32(pgt
, base
+ 0x8);
254 tmp
[3] = nv_ro32(pgt
, base
+ 0xc);
256 u32 addr
= list
? (*list
++ >> 12) : (dummy
>> 12);
257 switch (pte
++ & 0x3) {
259 tmp
[0] &= ~0x07ffffff;
263 tmp
[0] &= ~0xf8000000;
264 tmp
[0] |= addr
<< 27;
265 tmp
[1] &= ~0x003fffff;
269 tmp
[1] &= ~0xffc00000;
270 tmp
[1] |= addr
<< 22;
271 tmp
[2] &= ~0x0001ffff;
272 tmp
[2] |= addr
>> 10;
275 tmp
[2] &= ~0xfffe0000;
276 tmp
[2] |= addr
<< 17;
277 tmp
[3] &= ~0x00000fff;
278 tmp
[3] |= addr
>> 15;
283 tmp
[3] |= 0x40000000;
285 nv_wo32(pgt
, base
+ 0x0, tmp
[0]);
286 nv_wo32(pgt
, base
+ 0x4, tmp
[1]);
287 nv_wo32(pgt
, base
+ 0x8, tmp
[2]);
288 nv_wo32(pgt
, base
+ 0xc, tmp
[3]);
292 nv44_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
294 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
295 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
296 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
297 dma_addr_t
*list
= nvbe
->pages
;
298 u32 pte
= mem
->start
<< 2, tmp
[4];
299 u32 cnt
= nvbe
->nr_pages
;
302 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
304 if (pte
& 0x0000000c) {
305 u32 max
= 4 - ((pte
>> 2) & 0x3);
306 u32 part
= (cnt
> max
) ? max
: cnt
;
307 nv44_sgdma_fill(pgt
, list
, pte
, part
);
314 for (i
= 0; i
< 4; i
++)
315 tmp
[i
] = *list
++ >> 12;
316 nv_wo32(pgt
, pte
+ 0x0, tmp
[0] >> 0 | tmp
[1] << 27);
317 nv_wo32(pgt
, pte
+ 0x4, tmp
[1] >> 5 | tmp
[2] << 22);
318 nv_wo32(pgt
, pte
+ 0x8, tmp
[2] >> 10 | tmp
[3] << 17);
319 nv_wo32(pgt
, pte
+ 0xc, tmp
[3] >> 15 | 0x40000000);
325 nv44_sgdma_fill(pgt
, list
, pte
, cnt
);
327 nv44_sgdma_flush(nvbe
);
333 nv44_sgdma_unbind(struct ttm_backend
*be
)
335 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
336 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
337 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
338 u32 pte
= (nvbe
->offset
>> 12) << 2;
339 u32 cnt
= nvbe
->nr_pages
;
341 if (pte
& 0x0000000c) {
342 u32 max
= 4 - ((pte
>> 2) & 0x3);
343 u32 part
= (cnt
> max
) ? max
: cnt
;
344 nv44_sgdma_fill(pgt
, NULL
, pte
, part
);
350 nv_wo32(pgt
, pte
+ 0x0, 0x00000000);
351 nv_wo32(pgt
, pte
+ 0x4, 0x00000000);
352 nv_wo32(pgt
, pte
+ 0x8, 0x00000000);
353 nv_wo32(pgt
, pte
+ 0xc, 0x00000000);
359 nv44_sgdma_fill(pgt
, NULL
, pte
, cnt
);
361 nv44_sgdma_flush(nvbe
);
366 static struct ttm_backend_func nv44_sgdma_backend
= {
367 .populate
= nouveau_sgdma_populate
,
368 .clear
= nouveau_sgdma_clear
,
369 .bind
= nv44_sgdma_bind
,
370 .unbind
= nv44_sgdma_unbind
,
371 .destroy
= nouveau_sgdma_destroy
375 nv50_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
377 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
378 struct nouveau_mem
*node
= mem
->mm_node
;
379 /* noop: bound in move_notify() */
380 node
->pages
= nvbe
->pages
;
381 nvbe
->pages
= (dma_addr_t
*)node
;
387 nv50_sgdma_unbind(struct ttm_backend
*be
)
389 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
390 struct nouveau_mem
*node
= (struct nouveau_mem
*)nvbe
->pages
;
391 /* noop: unbound in move_notify() */
392 nvbe
->pages
= node
->pages
;
398 static struct ttm_backend_func nv50_sgdma_backend
= {
399 .populate
= nouveau_sgdma_populate
,
400 .clear
= nouveau_sgdma_clear
,
401 .bind
= nv50_sgdma_bind
,
402 .unbind
= nv50_sgdma_unbind
,
403 .destroy
= nouveau_sgdma_destroy
407 nouveau_sgdma_init_ttm(struct drm_device
*dev
)
409 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
410 struct nouveau_sgdma_be
*nvbe
;
412 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
418 nvbe
->backend
.func
= dev_priv
->gart_info
.func
;
419 return &nvbe
->backend
;
423 nouveau_sgdma_init(struct drm_device
*dev
)
425 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
426 struct nouveau_gpuobj
*gpuobj
= NULL
;
427 u32 aper_size
, align
;
430 if (dev_priv
->card_type
>= NV_50
|| drm_pci_device_is_pcie(dev
))
431 aper_size
= 512 * 1024 * 1024;
433 aper_size
= 64 * 1024 * 1024;
435 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
436 * christmas. The cards before it have them, the cards after
437 * it have them, why is NV44 so unloved?
439 dev_priv
->gart_info
.dummy
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
);
440 if (!dev_priv
->gart_info
.dummy
.page
)
443 dev_priv
->gart_info
.dummy
.addr
=
444 pci_map_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.page
,
445 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
446 if (pci_dma_mapping_error(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
)) {
447 NV_ERROR(dev
, "error mapping dummy page\n");
448 __free_page(dev_priv
->gart_info
.dummy
.page
);
449 dev_priv
->gart_info
.dummy
.page
= NULL
;
453 if (dev_priv
->card_type
>= NV_50
) {
454 dev_priv
->gart_info
.aper_base
= 0;
455 dev_priv
->gart_info
.aper_size
= aper_size
;
456 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
457 dev_priv
->gart_info
.func
= &nv50_sgdma_backend
;
459 if (drm_pci_device_is_pcie(dev
) &&
460 dev_priv
->chipset
!= 0x40 && dev_priv
->chipset
!= 0x45) {
461 if (nv44_graph_class(dev
)) {
462 dev_priv
->gart_info
.func
= &nv44_sgdma_backend
;
465 dev_priv
->gart_info
.func
= &nv41_sgdma_backend
;
469 ret
= nouveau_gpuobj_new(dev
, NULL
, aper_size
/ 1024, align
,
470 NVOBJ_FLAG_ZERO_ALLOC
|
471 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
473 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
477 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
478 dev_priv
->gart_info
.aper_base
= 0;
479 dev_priv
->gart_info
.aper_size
= aper_size
;
480 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
482 ret
= nouveau_gpuobj_new(dev
, NULL
, (aper_size
/ 1024) + 8, 16,
483 NVOBJ_FLAG_ZERO_ALLOC
|
484 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
486 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
490 nv_wo32(gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
491 (1 << 12) /* PT present */ |
492 (0 << 13) /* PT *not* linear */ |
494 (2 << 16) /* PCI */);
495 nv_wo32(gpuobj
, 4, aper_size
- 1);
497 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
498 dev_priv
->gart_info
.aper_base
= 0;
499 dev_priv
->gart_info
.aper_size
= aper_size
;
500 dev_priv
->gart_info
.type
= NOUVEAU_GART_PDMA
;
501 dev_priv
->gart_info
.func
= &nv04_sgdma_backend
;
508 nouveau_sgdma_takedown(struct drm_device
*dev
)
510 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
512 nouveau_gpuobj_ref(NULL
, &dev_priv
->gart_info
.sg_ctxdma
);
514 if (dev_priv
->gart_info
.dummy
.page
) {
515 pci_unmap_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
,
516 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
517 __free_page(dev_priv
->gart_info
.dummy
.page
);
518 dev_priv
->gart_info
.dummy
.page
= NULL
;
523 nouveau_sgdma_get_physical(struct drm_device
*dev
, uint32_t offset
)
525 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
526 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
527 int pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
529 BUG_ON(dev_priv
->card_type
>= NV_50
);
531 return (nv_ro32(gpuobj
, 4 * pte
) & ~NV_CTXDMA_PAGE_MASK
) |
532 (offset
& NV_CTXDMA_PAGE_MASK
);