2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/amdgpu_drm.h>
31 #include <asm/set_memory.h>
37 * The GART (Graphics Aperture Remapping Table) is an aperture
38 * in the GPU's address space. System pages can be mapped into
39 * the aperture and look like contiguous pages from the GPU's
40 * perspective. A page table maps the pages in the aperture
41 * to the actual backing pages in system memory.
43 * Radeon GPUs support both an internal GART, as described above,
44 * and AGP. AGP works similarly, but the GART table is configured
45 * and maintained by the northbridge rather than the driver.
46 * Radeon hw has a separate AGP aperture that is programmed to
47 * point to the AGP aperture provided by the northbridge and the
48 * requests are passed through to the northbridge aperture.
49 * Both AGP and internal GART can be used at the same time, however
50 * that is not currently supported by the driver.
52 * This file handles the common internal GART management.
56 * Common GART table functions.
60 * amdgpu_dummy_page_init - init dummy page used by the driver
62 * @adev: amdgpu_device pointer
64 * Allocate the dummy page used by the driver (all asics).
65 * This dummy page is used by the driver as a filler for gart entries
66 * when pages are taken out of the GART
67 * Returns 0 on sucess, -ENOMEM on failure.
69 static int amdgpu_gart_dummy_page_init(struct amdgpu_device
*adev
)
71 struct page
*dummy_page
= adev
->mman
.bdev
.glob
->dummy_read_page
;
73 if (adev
->dummy_page_addr
)
75 adev
->dummy_page_addr
= pci_map_page(adev
->pdev
, dummy_page
, 0,
76 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
77 if (pci_dma_mapping_error(adev
->pdev
, adev
->dummy_page_addr
)) {
78 dev_err(&adev
->pdev
->dev
, "Failed to DMA MAP the dummy page\n");
79 adev
->dummy_page_addr
= 0;
86 * amdgpu_dummy_page_fini - free dummy page used by the driver
88 * @adev: amdgpu_device pointer
90 * Frees the dummy page used by the driver (all asics).
92 static void amdgpu_gart_dummy_page_fini(struct amdgpu_device
*adev
)
94 if (!adev
->dummy_page_addr
)
96 pci_unmap_page(adev
->pdev
, adev
->dummy_page_addr
,
97 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
98 adev
->dummy_page_addr
= 0;
102 * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
104 * @adev: amdgpu_device pointer
106 * Allocate video memory for GART page table
107 * (pcie r4xx, r5xx+). These asics require the
108 * gart table to be in video memory.
109 * Returns 0 for success, error for failure.
111 int amdgpu_gart_table_vram_alloc(struct amdgpu_device
*adev
)
115 if (adev
->gart
.bo
== NULL
) {
116 struct amdgpu_bo_param bp
;
118 memset(&bp
, 0, sizeof(bp
));
119 bp
.size
= adev
->gart
.table_size
;
120 bp
.byte_align
= PAGE_SIZE
;
121 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
122 bp
.flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
123 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
124 bp
.type
= ttm_bo_type_kernel
;
126 r
= amdgpu_bo_create(adev
, &bp
, &adev
->gart
.bo
);
135 * amdgpu_gart_table_vram_pin - pin gart page table in vram
137 * @adev: amdgpu_device pointer
139 * Pin the GART page table in vram so it will not be moved
140 * by the memory manager (pcie r4xx, r5xx+). These asics require the
141 * gart table to be in video memory.
142 * Returns 0 for success, error for failure.
144 int amdgpu_gart_table_vram_pin(struct amdgpu_device
*adev
)
148 r
= amdgpu_bo_reserve(adev
->gart
.bo
, false);
149 if (unlikely(r
!= 0))
151 r
= amdgpu_bo_pin(adev
->gart
.bo
, AMDGPU_GEM_DOMAIN_VRAM
);
153 amdgpu_bo_unreserve(adev
->gart
.bo
);
156 r
= amdgpu_bo_kmap(adev
->gart
.bo
, &adev
->gart
.ptr
);
158 amdgpu_bo_unpin(adev
->gart
.bo
);
159 amdgpu_bo_unreserve(adev
->gart
.bo
);
164 * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
166 * @adev: amdgpu_device pointer
168 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
169 * These asics require the gart table to be in video memory.
171 void amdgpu_gart_table_vram_unpin(struct amdgpu_device
*adev
)
175 if (adev
->gart
.bo
== NULL
) {
178 r
= amdgpu_bo_reserve(adev
->gart
.bo
, true);
179 if (likely(r
== 0)) {
180 amdgpu_bo_kunmap(adev
->gart
.bo
);
181 amdgpu_bo_unpin(adev
->gart
.bo
);
182 amdgpu_bo_unreserve(adev
->gart
.bo
);
183 adev
->gart
.ptr
= NULL
;
188 * amdgpu_gart_table_vram_free - free gart page table vram
190 * @adev: amdgpu_device pointer
192 * Free the video memory used for the GART page table
193 * (pcie r4xx, r5xx+). These asics require the gart table to
194 * be in video memory.
196 void amdgpu_gart_table_vram_free(struct amdgpu_device
*adev
)
198 if (adev
->gart
.bo
== NULL
) {
201 amdgpu_bo_unref(&adev
->gart
.bo
);
205 * Common gart functions.
208 * amdgpu_gart_unbind - unbind pages from the gart page table
210 * @adev: amdgpu_device pointer
211 * @offset: offset into the GPU's gart aperture
212 * @pages: number of pages to unbind
214 * Unbinds the requested pages from the gart page table and
215 * replaces them with the dummy page (all asics).
216 * Returns 0 for success, -EINVAL for failure.
218 int amdgpu_gart_unbind(struct amdgpu_device
*adev
, uint64_t offset
,
225 /* Starting from VEGA10, system bit must be 0 to mean invalid. */
228 if (!adev
->gart
.ready
) {
229 WARN(1, "trying to unbind memory from uninitialized GART !\n");
233 t
= offset
/ AMDGPU_GPU_PAGE_SIZE
;
234 p
= t
/ AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
235 for (i
= 0; i
< pages
; i
++, p
++) {
236 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
237 adev
->gart
.pages
[p
] = NULL
;
239 page_base
= adev
->dummy_page_addr
;
243 for (j
= 0; j
< AMDGPU_GPU_PAGES_IN_CPU_PAGE
; j
++, t
++) {
244 amdgpu_gmc_set_pte_pde(adev
, adev
->gart
.ptr
,
245 t
, page_base
, flags
);
246 page_base
+= AMDGPU_GPU_PAGE_SIZE
;
250 amdgpu_asic_flush_hdp(adev
, NULL
);
251 amdgpu_gmc_flush_gpu_tlb(adev
, 0, 0);
256 * amdgpu_gart_map - map dma_addresses into GART entries
258 * @adev: amdgpu_device pointer
259 * @offset: offset into the GPU's gart aperture
260 * @pages: number of pages to bind
261 * @dma_addr: DMA addresses of pages
262 * @flags: page table entry flags
263 * @dst: CPU address of the gart table
265 * Map the dma_addresses into GART entries (all asics).
266 * Returns 0 for success, -EINVAL for failure.
268 int amdgpu_gart_map(struct amdgpu_device
*adev
, uint64_t offset
,
269 int pages
, dma_addr_t
*dma_addr
, uint64_t flags
,
275 if (!adev
->gart
.ready
) {
276 WARN(1, "trying to bind memory to uninitialized GART !\n");
280 t
= offset
/ AMDGPU_GPU_PAGE_SIZE
;
282 for (i
= 0; i
< pages
; i
++) {
283 page_base
= dma_addr
[i
];
284 for (j
= 0; j
< AMDGPU_GPU_PAGES_IN_CPU_PAGE
; j
++, t
++) {
285 amdgpu_gmc_set_pte_pde(adev
, dst
, t
, page_base
, flags
);
286 page_base
+= AMDGPU_GPU_PAGE_SIZE
;
293 * amdgpu_gart_bind - bind pages into the gart page table
295 * @adev: amdgpu_device pointer
296 * @offset: offset into the GPU's gart aperture
297 * @pages: number of pages to bind
298 * @pagelist: pages to bind
299 * @dma_addr: DMA addresses of pages
301 * Binds the requested pages to the gart page table
303 * Returns 0 for success, -EINVAL for failure.
305 int amdgpu_gart_bind(struct amdgpu_device
*adev
, uint64_t offset
,
306 int pages
, struct page
**pagelist
, dma_addr_t
*dma_addr
,
309 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
314 if (!adev
->gart
.ready
) {
315 WARN(1, "trying to bind memory to uninitialized GART !\n");
319 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
320 t
= offset
/ AMDGPU_GPU_PAGE_SIZE
;
321 p
= t
/ AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
322 for (i
= 0; i
< pages
; i
++, p
++)
323 adev
->gart
.pages
[p
] = pagelist
? pagelist
[i
] : NULL
;
329 r
= amdgpu_gart_map(adev
, offset
, pages
, dma_addr
, flags
,
335 amdgpu_asic_flush_hdp(adev
, NULL
);
336 amdgpu_gmc_flush_gpu_tlb(adev
, 0, 0);
341 * amdgpu_gart_init - init the driver info for managing the gart
343 * @adev: amdgpu_device pointer
345 * Allocate the dummy page and init the gart driver info (all asics).
346 * Returns 0 for success, error for failure.
348 int amdgpu_gart_init(struct amdgpu_device
*adev
)
352 if (adev
->dummy_page_addr
)
355 /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
356 if (PAGE_SIZE
< AMDGPU_GPU_PAGE_SIZE
) {
357 DRM_ERROR("Page size is smaller than GPU page size!\n");
360 r
= amdgpu_gart_dummy_page_init(adev
);
363 /* Compute table size */
364 adev
->gart
.num_cpu_pages
= adev
->gmc
.gart_size
/ PAGE_SIZE
;
365 adev
->gart
.num_gpu_pages
= adev
->gmc
.gart_size
/ AMDGPU_GPU_PAGE_SIZE
;
366 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
367 adev
->gart
.num_cpu_pages
, adev
->gart
.num_gpu_pages
);
369 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
370 /* Allocate pages table */
371 adev
->gart
.pages
= vzalloc(array_size(sizeof(void *),
372 adev
->gart
.num_cpu_pages
));
373 if (adev
->gart
.pages
== NULL
)
381 * amdgpu_gart_fini - tear down the driver info for managing the gart
383 * @adev: amdgpu_device pointer
385 * Tear down the gart driver info and free the dummy page (all asics).
387 void amdgpu_gart_fini(struct amdgpu_device
*adev
)
389 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
390 vfree(adev
->gart
.pages
);
391 adev
->gart
.pages
= NULL
;
393 amdgpu_gart_dummy_page_fini(adev
);