2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 struct ttm_backend backend
;
12 struct drm_device
*dev
;
23 nouveau_sgdma_populate(struct ttm_backend
*be
, unsigned long num_pages
,
24 struct page
**pages
, struct page
*dummy_read_page
,
25 dma_addr_t
*dma_addrs
)
27 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
28 struct drm_device
*dev
= nvbe
->dev
;
31 NV_DEBUG(nvbe
->dev
, "num_pages = %ld\n", num_pages
);
33 nvbe
->pages
= dma_addrs
;
34 nvbe
->nr_pages
= num_pages
;
35 nvbe
->unmap_pages
= true;
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe
->unmap_pages
= false;
43 for (i
= 0; i
< num_pages
; i
++) {
44 nvbe
->pages
[i
] = pci_map_page(dev
->pdev
, pages
[i
], 0,
45 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
46 if (pci_dma_mapping_error(dev
->pdev
, nvbe
->pages
[i
])) {
57 nouveau_sgdma_clear(struct ttm_backend
*be
)
59 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
60 struct drm_device
*dev
= nvbe
->dev
;
65 if (nvbe
->unmap_pages
) {
66 while (nvbe
->nr_pages
--) {
67 pci_unmap_page(dev
->pdev
, nvbe
->pages
[nvbe
->nr_pages
],
68 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
74 nouveau_sgdma_destroy(struct ttm_backend
*be
)
76 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
79 NV_DEBUG(nvbe
->dev
, "\n");
90 nv04_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
92 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
93 struct drm_device
*dev
= nvbe
->dev
;
94 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
95 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
98 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->start
);
100 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
101 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
102 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
103 dma_addr_t dma_offset
= nvbe
->pages
[i
];
104 uint32_t offset_l
= lower_32_bits(dma_offset
);
106 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++) {
107 nv_wo32(gpuobj
, (pte
* 4) + 0, offset_l
| 3);
108 offset_l
+= NV_CTXDMA_PAGE_SIZE
;
117 nv04_sgdma_unbind(struct ttm_backend
*be
)
119 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
120 struct drm_device
*dev
= nvbe
->dev
;
121 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
122 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
130 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
131 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
132 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++)
133 nv_wo32(gpuobj
, (pte
* 4) + 0, 0x00000000);
140 static struct ttm_backend_func nv04_sgdma_backend
= {
141 .populate
= nouveau_sgdma_populate
,
142 .clear
= nouveau_sgdma_clear
,
143 .bind
= nv04_sgdma_bind
,
144 .unbind
= nv04_sgdma_unbind
,
145 .destroy
= nouveau_sgdma_destroy
149 nv41_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
151 struct drm_device
*dev
= nvbe
->dev
;
153 nv_wr32(dev
, 0x100810, 0x00000022);
154 if (!nv_wait(dev
, 0x100810, 0x00000100, 0x00000100))
155 NV_ERROR(dev
, "vm flush timeout: 0x%08x\n",
156 nv_rd32(dev
, 0x100810));
157 nv_wr32(dev
, 0x100810, 0x00000000);
161 nv41_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
163 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
164 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
165 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
166 dma_addr_t
*list
= nvbe
->pages
;
167 u32 pte
= mem
->start
<< 2;
168 u32 cnt
= nvbe
->nr_pages
;
170 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
173 nv_wo32(pgt
, pte
, (*list
++ >> 7) | 1);
177 nv41_sgdma_flush(nvbe
);
183 nv41_sgdma_unbind(struct ttm_backend
*be
)
185 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
186 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
187 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
188 u32 pte
= (nvbe
->offset
>> 12) << 2;
189 u32 cnt
= nvbe
->nr_pages
;
192 nv_wo32(pgt
, pte
, 0x00000000);
196 nv41_sgdma_flush(nvbe
);
201 static struct ttm_backend_func nv41_sgdma_backend
= {
202 .populate
= nouveau_sgdma_populate
,
203 .clear
= nouveau_sgdma_clear
,
204 .bind
= nv41_sgdma_bind
,
205 .unbind
= nv41_sgdma_unbind
,
206 .destroy
= nouveau_sgdma_destroy
210 nv44_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
212 struct drm_device
*dev
= nvbe
->dev
;
214 nv_wr32(dev
, 0x100814, (nvbe
->nr_pages
- 1) << 12);
215 nv_wr32(dev
, 0x100808, nvbe
->offset
| 0x20);
216 if (!nv_wait(dev
, 0x100808, 0x00000001, 0x00000001))
217 NV_ERROR(dev
, "gart flush timeout: 0x%08x\n",
218 nv_rd32(dev
, 0x100808));
219 nv_wr32(dev
, 0x100808, 0x00000000);
223 nv44_sgdma_fill(struct nouveau_gpuobj
*pgt
, dma_addr_t
*list
, u32 base
, u32 cnt
)
225 struct drm_nouveau_private
*dev_priv
= pgt
->dev
->dev_private
;
226 dma_addr_t dummy
= dev_priv
->gart_info
.dummy
.addr
;
232 tmp
[0] = nv_ro32(pgt
, base
+ 0x0);
233 tmp
[1] = nv_ro32(pgt
, base
+ 0x4);
234 tmp
[2] = nv_ro32(pgt
, base
+ 0x8);
235 tmp
[3] = nv_ro32(pgt
, base
+ 0xc);
237 u32 addr
= list
? (*list
++ >> 12) : (dummy
>> 12);
238 switch (pte
++ & 0x3) {
240 tmp
[0] &= ~0x07ffffff;
244 tmp
[0] &= ~0xf8000000;
245 tmp
[0] |= addr
<< 27;
246 tmp
[1] &= ~0x003fffff;
250 tmp
[1] &= ~0xffc00000;
251 tmp
[1] |= addr
<< 22;
252 tmp
[2] &= ~0x0001ffff;
253 tmp
[2] |= addr
>> 10;
256 tmp
[2] &= ~0xfffe0000;
257 tmp
[2] |= addr
<< 17;
258 tmp
[3] &= ~0x00000fff;
259 tmp
[3] |= addr
>> 15;
264 tmp
[3] |= 0x40000000;
266 nv_wo32(pgt
, base
+ 0x0, tmp
[0]);
267 nv_wo32(pgt
, base
+ 0x4, tmp
[1]);
268 nv_wo32(pgt
, base
+ 0x8, tmp
[2]);
269 nv_wo32(pgt
, base
+ 0xc, tmp
[3]);
273 nv44_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
275 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
276 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
277 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
278 dma_addr_t
*list
= nvbe
->pages
;
279 u32 pte
= mem
->start
<< 2, tmp
[4];
280 u32 cnt
= nvbe
->nr_pages
;
283 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
285 if (pte
& 0x0000000c) {
286 u32 max
= 4 - ((pte
>> 2) & 0x3);
287 u32 part
= (cnt
> max
) ? max
: cnt
;
288 nv44_sgdma_fill(pgt
, list
, pte
, part
);
295 for (i
= 0; i
< 4; i
++)
296 tmp
[i
] = *list
++ >> 12;
297 nv_wo32(pgt
, pte
+ 0x0, tmp
[0] >> 0 | tmp
[1] << 27);
298 nv_wo32(pgt
, pte
+ 0x4, tmp
[1] >> 5 | tmp
[2] << 22);
299 nv_wo32(pgt
, pte
+ 0x8, tmp
[2] >> 10 | tmp
[3] << 17);
300 nv_wo32(pgt
, pte
+ 0xc, tmp
[3] >> 15 | 0x40000000);
306 nv44_sgdma_fill(pgt
, list
, pte
, cnt
);
308 nv44_sgdma_flush(nvbe
);
314 nv44_sgdma_unbind(struct ttm_backend
*be
)
316 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
317 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
318 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
319 u32 pte
= (nvbe
->offset
>> 12) << 2;
320 u32 cnt
= nvbe
->nr_pages
;
322 if (pte
& 0x0000000c) {
323 u32 max
= 4 - ((pte
>> 2) & 0x3);
324 u32 part
= (cnt
> max
) ? max
: cnt
;
325 nv44_sgdma_fill(pgt
, NULL
, pte
, part
);
331 nv_wo32(pgt
, pte
+ 0x0, 0x00000000);
332 nv_wo32(pgt
, pte
+ 0x4, 0x00000000);
333 nv_wo32(pgt
, pte
+ 0x8, 0x00000000);
334 nv_wo32(pgt
, pte
+ 0xc, 0x00000000);
340 nv44_sgdma_fill(pgt
, NULL
, pte
, cnt
);
342 nv44_sgdma_flush(nvbe
);
347 static struct ttm_backend_func nv44_sgdma_backend
= {
348 .populate
= nouveau_sgdma_populate
,
349 .clear
= nouveau_sgdma_clear
,
350 .bind
= nv44_sgdma_bind
,
351 .unbind
= nv44_sgdma_unbind
,
352 .destroy
= nouveau_sgdma_destroy
356 nv50_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
358 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
359 struct nouveau_mem
*node
= mem
->mm_node
;
360 /* noop: bound in move_notify() */
361 node
->pages
= nvbe
->pages
;
362 nvbe
->pages
= (dma_addr_t
*)node
;
368 nv50_sgdma_unbind(struct ttm_backend
*be
)
370 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
371 struct nouveau_mem
*node
= (struct nouveau_mem
*)nvbe
->pages
;
372 /* noop: unbound in move_notify() */
373 nvbe
->pages
= node
->pages
;
379 static struct ttm_backend_func nv50_sgdma_backend
= {
380 .populate
= nouveau_sgdma_populate
,
381 .clear
= nouveau_sgdma_clear
,
382 .bind
= nv50_sgdma_bind
,
383 .unbind
= nv50_sgdma_unbind
,
384 .destroy
= nouveau_sgdma_destroy
388 nouveau_sgdma_init_ttm(struct drm_device
*dev
)
390 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
391 struct nouveau_sgdma_be
*nvbe
;
393 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
399 nvbe
->backend
.func
= dev_priv
->gart_info
.func
;
400 return &nvbe
->backend
;
404 nouveau_sgdma_init(struct drm_device
*dev
)
406 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
407 struct nouveau_gpuobj
*gpuobj
= NULL
;
408 u32 aper_size
, align
;
411 if (dev_priv
->card_type
>= NV_40
&& pci_is_pcie(dev
->pdev
))
412 aper_size
= 512 * 1024 * 1024;
414 aper_size
= 64 * 1024 * 1024;
416 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
417 * christmas. The cards before it have them, the cards after
418 * it have them, why is NV44 so unloved?
420 dev_priv
->gart_info
.dummy
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
);
421 if (!dev_priv
->gart_info
.dummy
.page
)
424 dev_priv
->gart_info
.dummy
.addr
=
425 pci_map_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.page
,
426 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
427 if (pci_dma_mapping_error(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
)) {
428 NV_ERROR(dev
, "error mapping dummy page\n");
429 __free_page(dev_priv
->gart_info
.dummy
.page
);
430 dev_priv
->gart_info
.dummy
.page
= NULL
;
434 if (dev_priv
->card_type
>= NV_50
) {
435 dev_priv
->gart_info
.aper_base
= 0;
436 dev_priv
->gart_info
.aper_size
= aper_size
;
437 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
438 dev_priv
->gart_info
.func
= &nv50_sgdma_backend
;
440 if (0 && pci_is_pcie(dev
->pdev
) &&
441 dev_priv
->chipset
> 0x40 && dev_priv
->chipset
!= 0x45) {
442 if (nv44_graph_class(dev
)) {
443 dev_priv
->gart_info
.func
= &nv44_sgdma_backend
;
446 dev_priv
->gart_info
.func
= &nv41_sgdma_backend
;
450 ret
= nouveau_gpuobj_new(dev
, NULL
, aper_size
/ 1024, align
,
451 NVOBJ_FLAG_ZERO_ALLOC
|
452 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
454 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
458 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
459 dev_priv
->gart_info
.aper_base
= 0;
460 dev_priv
->gart_info
.aper_size
= aper_size
;
461 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
463 ret
= nouveau_gpuobj_new(dev
, NULL
, (aper_size
/ 1024) + 8, 16,
464 NVOBJ_FLAG_ZERO_ALLOC
|
465 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
467 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
471 nv_wo32(gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
472 (1 << 12) /* PT present */ |
473 (0 << 13) /* PT *not* linear */ |
475 (2 << 16) /* PCI */);
476 nv_wo32(gpuobj
, 4, aper_size
- 1);
478 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
479 dev_priv
->gart_info
.aper_base
= 0;
480 dev_priv
->gart_info
.aper_size
= aper_size
;
481 dev_priv
->gart_info
.type
= NOUVEAU_GART_PDMA
;
482 dev_priv
->gart_info
.func
= &nv04_sgdma_backend
;
489 nouveau_sgdma_takedown(struct drm_device
*dev
)
491 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
493 nouveau_gpuobj_ref(NULL
, &dev_priv
->gart_info
.sg_ctxdma
);
495 if (dev_priv
->gart_info
.dummy
.page
) {
496 pci_unmap_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
,
497 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
498 __free_page(dev_priv
->gart_info
.dummy
.page
);
499 dev_priv
->gart_info
.dummy
.page
= NULL
;
504 nouveau_sgdma_get_physical(struct drm_device
*dev
, uint32_t offset
)
506 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
507 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
508 int pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
510 BUG_ON(dev_priv
->card_type
>= NV_50
);
512 return (nv_ro32(gpuobj
, 4 * pte
) & ~NV_CTXDMA_PAGE_MASK
) |
513 (offset
& NV_CTXDMA_PAGE_MASK
);