2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 /* this has to be the first field so populate/unpopulated in
12 * nouve_bo.c works properly, otherwise have to move them here
14 struct ttm_dma_tt ttm
;
15 struct drm_device
*dev
;
20 nouveau_sgdma_destroy(struct ttm_tt
*ttm
)
22 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
25 NV_DEBUG(nvbe
->dev
, "\n");
26 ttm_dma_tt_fini(&nvbe
->ttm
);
32 nv04_sgdma_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
34 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
35 struct drm_device
*dev
= nvbe
->dev
;
36 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
37 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
40 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->start
);
42 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
43 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
44 for (i
= 0; i
< ttm
->num_pages
; i
++) {
45 dma_addr_t dma_offset
= nvbe
->ttm
.dma_address
[i
];
46 uint32_t offset_l
= lower_32_bits(dma_offset
);
48 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++) {
49 nv_wo32(gpuobj
, (pte
* 4) + 0, offset_l
| 3);
50 offset_l
+= NV_CTXDMA_PAGE_SIZE
;
58 nv04_sgdma_unbind(struct ttm_tt
*ttm
)
60 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
61 struct drm_device
*dev
= nvbe
->dev
;
62 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
63 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
68 if (ttm
->state
!= tt_bound
)
71 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
72 for (i
= 0; i
< ttm
->num_pages
; i
++) {
73 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++)
74 nv_wo32(gpuobj
, (pte
* 4) + 0, 0x00000000);
80 static struct ttm_backend_func nv04_sgdma_backend
= {
81 .bind
= nv04_sgdma_bind
,
82 .unbind
= nv04_sgdma_unbind
,
83 .destroy
= nouveau_sgdma_destroy
87 nv41_sgdma_flush(struct nouveau_sgdma_be
*nvbe
)
89 struct drm_device
*dev
= nvbe
->dev
;
91 nv_wr32(dev
, 0x100810, 0x00000022);
92 if (!nv_wait(dev
, 0x100810, 0x00000100, 0x00000100))
93 NV_ERROR(dev
, "vm flush timeout: 0x%08x\n",
94 nv_rd32(dev
, 0x100810));
95 nv_wr32(dev
, 0x100810, 0x00000000);
99 nv41_sgdma_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
101 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
102 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
103 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
104 dma_addr_t
*list
= nvbe
->ttm
.dma_address
;
105 u32 pte
= mem
->start
<< 2;
106 u32 cnt
= ttm
->num_pages
;
108 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
111 nv_wo32(pgt
, pte
, (*list
++ >> 7) | 1);
115 nv41_sgdma_flush(nvbe
);
120 nv41_sgdma_unbind(struct ttm_tt
*ttm
)
122 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
123 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
124 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
125 u32 pte
= (nvbe
->offset
>> 12) << 2;
126 u32 cnt
= ttm
->num_pages
;
129 nv_wo32(pgt
, pte
, 0x00000000);
133 nv41_sgdma_flush(nvbe
);
137 static struct ttm_backend_func nv41_sgdma_backend
= {
138 .bind
= nv41_sgdma_bind
,
139 .unbind
= nv41_sgdma_unbind
,
140 .destroy
= nouveau_sgdma_destroy
144 nv44_sgdma_flush(struct ttm_tt
*ttm
)
146 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
147 struct drm_device
*dev
= nvbe
->dev
;
149 nv_wr32(dev
, 0x100814, (ttm
->num_pages
- 1) << 12);
150 nv_wr32(dev
, 0x100808, nvbe
->offset
| 0x20);
151 if (!nv_wait(dev
, 0x100808, 0x00000001, 0x00000001))
152 NV_ERROR(dev
, "gart flush timeout: 0x%08x\n",
153 nv_rd32(dev
, 0x100808));
154 nv_wr32(dev
, 0x100808, 0x00000000);
158 nv44_sgdma_fill(struct nouveau_gpuobj
*pgt
, dma_addr_t
*list
, u32 base
, u32 cnt
)
160 struct drm_nouveau_private
*dev_priv
= pgt
->dev
->dev_private
;
161 dma_addr_t dummy
= dev_priv
->gart_info
.dummy
.addr
;
167 tmp
[0] = nv_ro32(pgt
, base
+ 0x0);
168 tmp
[1] = nv_ro32(pgt
, base
+ 0x4);
169 tmp
[2] = nv_ro32(pgt
, base
+ 0x8);
170 tmp
[3] = nv_ro32(pgt
, base
+ 0xc);
172 u32 addr
= list
? (*list
++ >> 12) : (dummy
>> 12);
173 switch (pte
++ & 0x3) {
175 tmp
[0] &= ~0x07ffffff;
179 tmp
[0] &= ~0xf8000000;
180 tmp
[0] |= addr
<< 27;
181 tmp
[1] &= ~0x003fffff;
185 tmp
[1] &= ~0xffc00000;
186 tmp
[1] |= addr
<< 22;
187 tmp
[2] &= ~0x0001ffff;
188 tmp
[2] |= addr
>> 10;
191 tmp
[2] &= ~0xfffe0000;
192 tmp
[2] |= addr
<< 17;
193 tmp
[3] &= ~0x00000fff;
194 tmp
[3] |= addr
>> 15;
199 tmp
[3] |= 0x40000000;
201 nv_wo32(pgt
, base
+ 0x0, tmp
[0]);
202 nv_wo32(pgt
, base
+ 0x4, tmp
[1]);
203 nv_wo32(pgt
, base
+ 0x8, tmp
[2]);
204 nv_wo32(pgt
, base
+ 0xc, tmp
[3]);
208 nv44_sgdma_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
210 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
211 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
212 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
213 dma_addr_t
*list
= nvbe
->ttm
.dma_address
;
214 u32 pte
= mem
->start
<< 2, tmp
[4];
215 u32 cnt
= ttm
->num_pages
;
218 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
220 if (pte
& 0x0000000c) {
221 u32 max
= 4 - ((pte
>> 2) & 0x3);
222 u32 part
= (cnt
> max
) ? max
: cnt
;
223 nv44_sgdma_fill(pgt
, list
, pte
, part
);
230 for (i
= 0; i
< 4; i
++)
231 tmp
[i
] = *list
++ >> 12;
232 nv_wo32(pgt
, pte
+ 0x0, tmp
[0] >> 0 | tmp
[1] << 27);
233 nv_wo32(pgt
, pte
+ 0x4, tmp
[1] >> 5 | tmp
[2] << 22);
234 nv_wo32(pgt
, pte
+ 0x8, tmp
[2] >> 10 | tmp
[3] << 17);
235 nv_wo32(pgt
, pte
+ 0xc, tmp
[3] >> 15 | 0x40000000);
241 nv44_sgdma_fill(pgt
, list
, pte
, cnt
);
243 nv44_sgdma_flush(ttm
);
248 nv44_sgdma_unbind(struct ttm_tt
*ttm
)
250 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
251 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
252 struct nouveau_gpuobj
*pgt
= dev_priv
->gart_info
.sg_ctxdma
;
253 u32 pte
= (nvbe
->offset
>> 12) << 2;
254 u32 cnt
= ttm
->num_pages
;
256 if (pte
& 0x0000000c) {
257 u32 max
= 4 - ((pte
>> 2) & 0x3);
258 u32 part
= (cnt
> max
) ? max
: cnt
;
259 nv44_sgdma_fill(pgt
, NULL
, pte
, part
);
265 nv_wo32(pgt
, pte
+ 0x0, 0x00000000);
266 nv_wo32(pgt
, pte
+ 0x4, 0x00000000);
267 nv_wo32(pgt
, pte
+ 0x8, 0x00000000);
268 nv_wo32(pgt
, pte
+ 0xc, 0x00000000);
274 nv44_sgdma_fill(pgt
, NULL
, pte
, cnt
);
276 nv44_sgdma_flush(ttm
);
280 static struct ttm_backend_func nv44_sgdma_backend
= {
281 .bind
= nv44_sgdma_bind
,
282 .unbind
= nv44_sgdma_unbind
,
283 .destroy
= nouveau_sgdma_destroy
287 nv50_sgdma_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*mem
)
289 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)ttm
;
290 struct nouveau_mem
*node
= mem
->mm_node
;
292 /* noop: bound in move_notify() */
293 node
->pages
= nvbe
->ttm
.dma_address
;
298 nv50_sgdma_unbind(struct ttm_tt
*ttm
)
300 /* noop: unbound in move_notify() */
304 static struct ttm_backend_func nv50_sgdma_backend
= {
305 .bind
= nv50_sgdma_bind
,
306 .unbind
= nv50_sgdma_unbind
,
307 .destroy
= nouveau_sgdma_destroy
311 nouveau_sgdma_create_ttm(struct ttm_bo_device
*bdev
,
312 unsigned long size
, uint32_t page_flags
,
313 struct page
*dummy_read_page
)
315 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
316 struct drm_device
*dev
= dev_priv
->dev
;
317 struct nouveau_sgdma_be
*nvbe
;
319 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
324 nvbe
->ttm
.ttm
.func
= dev_priv
->gart_info
.func
;
326 if (ttm_dma_tt_init(&nvbe
->ttm
, bdev
, size
, page_flags
, dummy_read_page
)) {
330 return &nvbe
->ttm
.ttm
;
334 nouveau_sgdma_init(struct drm_device
*dev
)
336 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
337 struct nouveau_gpuobj
*gpuobj
= NULL
;
338 u32 aper_size
, align
;
341 if (dev_priv
->card_type
>= NV_40
&& pci_is_pcie(dev
->pdev
))
342 aper_size
= 512 * 1024 * 1024;
344 aper_size
= 64 * 1024 * 1024;
346 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
347 * christmas. The cards before it have them, the cards after
348 * it have them, why is NV44 so unloved?
350 dev_priv
->gart_info
.dummy
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
);
351 if (!dev_priv
->gart_info
.dummy
.page
)
354 dev_priv
->gart_info
.dummy
.addr
=
355 pci_map_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.page
,
356 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
357 if (pci_dma_mapping_error(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
)) {
358 NV_ERROR(dev
, "error mapping dummy page\n");
359 __free_page(dev_priv
->gart_info
.dummy
.page
);
360 dev_priv
->gart_info
.dummy
.page
= NULL
;
364 if (dev_priv
->card_type
>= NV_50
) {
365 dev_priv
->gart_info
.aper_base
= 0;
366 dev_priv
->gart_info
.aper_size
= aper_size
;
367 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
368 dev_priv
->gart_info
.func
= &nv50_sgdma_backend
;
370 if (0 && pci_is_pcie(dev
->pdev
) &&
371 dev_priv
->chipset
> 0x40 && dev_priv
->chipset
!= 0x45) {
372 if (nv44_graph_class(dev
)) {
373 dev_priv
->gart_info
.func
= &nv44_sgdma_backend
;
376 dev_priv
->gart_info
.func
= &nv41_sgdma_backend
;
380 ret
= nouveau_gpuobj_new(dev
, NULL
, aper_size
/ 1024, align
,
381 NVOBJ_FLAG_ZERO_ALLOC
|
382 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
384 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
388 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
389 dev_priv
->gart_info
.aper_base
= 0;
390 dev_priv
->gart_info
.aper_size
= aper_size
;
391 dev_priv
->gart_info
.type
= NOUVEAU_GART_HW
;
393 ret
= nouveau_gpuobj_new(dev
, NULL
, (aper_size
/ 1024) + 8, 16,
394 NVOBJ_FLAG_ZERO_ALLOC
|
395 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
397 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
401 nv_wo32(gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
402 (1 << 12) /* PT present */ |
403 (0 << 13) /* PT *not* linear */ |
405 (2 << 16) /* PCI */);
406 nv_wo32(gpuobj
, 4, aper_size
- 1);
408 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
409 dev_priv
->gart_info
.aper_base
= 0;
410 dev_priv
->gart_info
.aper_size
= aper_size
;
411 dev_priv
->gart_info
.type
= NOUVEAU_GART_PDMA
;
412 dev_priv
->gart_info
.func
= &nv04_sgdma_backend
;
419 nouveau_sgdma_takedown(struct drm_device
*dev
)
421 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
423 nouveau_gpuobj_ref(NULL
, &dev_priv
->gart_info
.sg_ctxdma
);
425 if (dev_priv
->gart_info
.dummy
.page
) {
426 pci_unmap_page(dev
->pdev
, dev_priv
->gart_info
.dummy
.addr
,
427 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
428 __free_page(dev_priv
->gart_info
.dummy
.page
);
429 dev_priv
->gart_info
.dummy
.page
= NULL
;
434 nouveau_sgdma_get_physical(struct drm_device
*dev
, uint32_t offset
)
436 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
437 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
438 int pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
440 BUG_ON(dev_priv
->card_type
>= NV_50
);
442 return (nv_ro32(gpuobj
, 4 * pte
) & ~NV_CTXDMA_PAGE_MASK
) |
443 (offset
& NV_CTXDMA_PAGE_MASK
);