2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <linux/sched/mm.h>
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
41 struct amdgpu_bo_list_entry
;
47 /* Maximum number of PTEs the hardware can write with one command */
48 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
50 /* number of entries in page table */
51 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
53 #define AMDGPU_PTE_VALID (1ULL << 0)
54 #define AMDGPU_PTE_SYSTEM (1ULL << 1)
55 #define AMDGPU_PTE_SNOOPED (1ULL << 2)
58 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
60 #define AMDGPU_PTE_READABLE (1ULL << 5)
61 #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
63 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
65 /* TILED for VEGA10, reserved for older ASICs */
66 #define AMDGPU_PTE_PRT (1ULL << 51)
68 /* PDE is handled as PTE for VEGA10 */
69 #define AMDGPU_PDE_PTE (1ULL << 54)
71 #define AMDGPU_PTE_LOG (1ULL << 55)
73 /* PTE is handled as PDE for VEGA10 (Translate Further) */
74 #define AMDGPU_PTE_TF (1ULL << 56)
76 /* PDE Block Fragment Size for VEGA10 */
77 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
81 #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
82 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
84 #define AMDGPU_MTYPE_NC 0
85 #define AMDGPU_MTYPE_CC 2
87 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
88 | AMDGPU_PTE_SNOOPED \
89 | AMDGPU_PTE_EXECUTABLE \
90 | AMDGPU_PTE_READABLE \
91 | AMDGPU_PTE_WRITEABLE \
92 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
95 #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
96 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
98 /* How to programm VM fault handling */
99 #define AMDGPU_VM_FAULT_STOP_NEVER 0
100 #define AMDGPU_VM_FAULT_STOP_FIRST 1
101 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
103 /* Reserve 4MB VRAM for page tables */
104 #define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
106 /* max number of VMHUB */
107 #define AMDGPU_MAX_VMHUBS 3
108 #define AMDGPU_GFXHUB_0 0
109 #define AMDGPU_MMHUB_0 1
110 #define AMDGPU_MMHUB_1 2
112 /* hardcode that limit for now */
113 #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
115 /* max vmids dedicated for process */
116 #define AMDGPU_VM_MAX_RESERVED_VMID 1
118 #define AMDGPU_VM_CONTEXT_GFX 0
119 #define AMDGPU_VM_CONTEXT_COMPUTE 1
121 /* See vm_update_mode */
122 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
123 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
125 /* VMPT level enumerate, and the hiberachy is:
126 * PDB2->PDB1->PDB0->PTB
128 enum amdgpu_vm_level
{
135 /* base structure for tracking BO usage in a VM */
136 struct amdgpu_vm_bo_base
{
137 /* constant after initialization */
138 struct amdgpu_vm
*vm
;
139 struct amdgpu_bo
*bo
;
141 /* protected by bo being reserved */
142 struct amdgpu_vm_bo_base
*next
;
144 /* protected by spinlock */
145 struct list_head vm_status
;
147 /* protected by the BO being reserved */
151 struct amdgpu_vm_pt
{
152 struct amdgpu_vm_bo_base base
;
154 /* array of page tables, one for each directory entry */
155 struct amdgpu_vm_pt
*entries
;
158 /* provided by hw blocks that can write ptes, e.g., sdma */
159 struct amdgpu_vm_pte_funcs
{
160 /* number of dw to reserve per operation */
161 unsigned copy_pte_num_dw
;
163 /* copy pte entries from GART */
164 void (*copy_pte
)(struct amdgpu_ib
*ib
,
165 uint64_t pe
, uint64_t src
,
168 /* write pte one entry at a time with addr mapping */
169 void (*write_pte
)(struct amdgpu_ib
*ib
, uint64_t pe
,
170 uint64_t value
, unsigned count
,
172 /* for linear pte/pde updates without addr mapping */
173 void (*set_pte_pde
)(struct amdgpu_ib
*ib
,
175 uint64_t addr
, unsigned count
,
176 uint32_t incr
, uint64_t flags
);
179 struct amdgpu_task_info
{
180 char process_name
[TASK_COMM_LEN
];
181 char task_name
[TASK_COMM_LEN
];
187 * struct amdgpu_vm_update_params
189 * Encapsulate some VM table update parameters to reduce
190 * the number of function parameters
193 struct amdgpu_vm_update_params
{
196 * @adev: amdgpu device we do this update for
198 struct amdgpu_device
*adev
;
201 * @vm: optional amdgpu_vm we do this update for
203 struct amdgpu_vm
*vm
;
206 * @direct: if changes should be made directly
213 * DMA addresses to use for mapping
215 dma_addr_t
*pages_addr
;
218 * @job: job to used for hw submission
220 struct amdgpu_job
*job
;
223 * @num_dw_left: number of dw left for the IB
225 unsigned int num_dw_left
;
228 struct amdgpu_vm_update_funcs
{
229 int (*map_table
)(struct amdgpu_bo
*bo
);
230 int (*prepare
)(struct amdgpu_vm_update_params
*p
, void * owner
,
231 struct dma_fence
*exclusive
);
232 int (*update
)(struct amdgpu_vm_update_params
*p
,
233 struct amdgpu_bo
*bo
, uint64_t pe
, uint64_t addr
,
234 unsigned count
, uint32_t incr
, uint64_t flags
);
235 int (*commit
)(struct amdgpu_vm_update_params
*p
,
236 struct dma_fence
**fence
);
240 /* tree of virtual addresses mapped */
241 struct rb_root_cached va
;
243 /* Lock to prevent eviction while we are updating page tables
244 * use vm_eviction_lock/unlock(vm)
246 struct mutex eviction_lock
;
248 unsigned int saved_flags
;
250 /* BOs who needs a validation */
251 struct list_head evicted
;
253 /* PT BOs which relocated and their parent need an update */
254 struct list_head relocated
;
256 /* per VM BOs moved, but not yet updated in the PT */
257 struct list_head moved
;
259 /* All BOs of this VM not currently in the state machine */
260 struct list_head idle
;
262 /* regular invalidated BOs, but not yet updated in the PT */
263 struct list_head invalidated
;
264 spinlock_t invalidated_lock
;
266 /* BO mappings freed, but not yet updated in the PT */
267 struct list_head freed
;
269 /* contains the page directory */
270 struct amdgpu_vm_pt root
;
271 struct dma_fence
*last_update
;
273 /* Scheduler entities for page table updates */
274 struct drm_sched_entity direct
;
275 struct drm_sched_entity delayed
;
277 /* Last submission to the scheduler entities */
278 struct dma_fence
*last_direct
;
279 struct dma_fence
*last_delayed
;
282 /* dedicated to vm */
283 struct amdgpu_vmid
*reserved_vmid
[AMDGPU_MAX_VMHUBS
];
285 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
286 bool use_cpu_for_update
;
288 /* Functions to use for VM table updates */
289 const struct amdgpu_vm_update_funcs
*update_funcs
;
291 /* Flag to indicate ATS support from PTE for GFX9 */
292 bool pte_support_ats
;
294 /* Up to 128 pending retry page faults */
295 DECLARE_KFIFO(faults
, u64
, 128);
297 /* Points to the KFD process VM info */
298 struct amdkfd_process_info
*process_info
;
300 /* List node in amdkfd_process_info.vm_list_head */
301 struct list_head vm_list_node
;
303 /* Valid while the PD is reserved or fenced */
304 uint64_t pd_phys_addr
;
306 /* Some basic info about the task */
307 struct amdgpu_task_info task_info
;
309 /* Store positions of group of BOs */
310 struct ttm_lru_bulk_move lru_bulk_move
;
311 /* mark whether can do the bulk move */
313 /* Flag to indicate if VM is used for compute */
314 bool is_compute_context
;
317 struct amdgpu_vm_manager
{
318 /* Handling of VMIDs */
319 struct amdgpu_vmid_mgr id_mgr
[AMDGPU_MAX_VMHUBS
];
321 /* Handling of VM fences */
323 unsigned seqno
[AMDGPU_MAX_RINGS
];
328 uint32_t fragment_size
;
329 enum amdgpu_vm_level root_level
;
330 /* vram base address for page table entry */
331 u64 vram_base_offset
;
332 /* vm pte handling */
333 const struct amdgpu_vm_pte_funcs
*vm_pte_funcs
;
334 struct drm_gpu_scheduler
*vm_pte_scheds
[AMDGPU_MAX_RINGS
];
335 unsigned vm_pte_num_scheds
;
336 struct amdgpu_ring
*page_fault
;
338 /* partial resident texture handling */
340 atomic_t num_prt_users
;
342 /* controls how VM page tables are updated for Graphics and Compute.
343 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
344 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
348 /* PASID to VM mapping, will be used in interrupt context to
349 * look up VM of a page fault
351 struct idr pasid_idr
;
352 spinlock_t pasid_lock
;
354 /* counter of mapped memory through xgmi */
355 uint32_t xgmi_map_counter
;
356 struct mutex lock_pstate
;
359 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
360 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
361 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
363 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs
;
364 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs
;
366 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
);
367 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
);
369 long amdgpu_vm_wait_idle(struct amdgpu_vm
*vm
, long timeout
);
370 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
371 int vm_context
, unsigned int pasid
);
372 int amdgpu_vm_make_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
, unsigned int pasid
);
373 void amdgpu_vm_release_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
);
374 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
);
375 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
376 struct list_head
*validated
,
377 struct amdgpu_bo_list_entry
*entry
);
378 bool amdgpu_vm_ready(struct amdgpu_vm
*vm
);
379 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
380 int (*callback
)(void *p
, struct amdgpu_bo
*bo
),
382 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
, bool need_pipe_sync
);
383 int amdgpu_vm_update_pdes(struct amdgpu_device
*adev
,
384 struct amdgpu_vm
*vm
, bool direct
);
385 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
386 struct amdgpu_vm
*vm
,
387 struct dma_fence
**fence
);
388 int amdgpu_vm_handle_moved(struct amdgpu_device
*adev
,
389 struct amdgpu_vm
*vm
);
390 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
391 struct amdgpu_bo_va
*bo_va
,
393 bool amdgpu_vm_evictable(struct amdgpu_bo
*bo
);
394 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
395 struct amdgpu_bo
*bo
, bool evicted
);
396 uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
);
397 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
398 struct amdgpu_bo
*bo
);
399 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
400 struct amdgpu_vm
*vm
,
401 struct amdgpu_bo
*bo
);
402 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
403 struct amdgpu_bo_va
*bo_va
,
404 uint64_t addr
, uint64_t offset
,
405 uint64_t size
, uint64_t flags
);
406 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
407 struct amdgpu_bo_va
*bo_va
,
408 uint64_t addr
, uint64_t offset
,
409 uint64_t size
, uint64_t flags
);
410 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
411 struct amdgpu_bo_va
*bo_va
,
413 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
414 struct amdgpu_vm
*vm
,
415 uint64_t saddr
, uint64_t size
);
416 struct amdgpu_bo_va_mapping
*amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm
*vm
,
418 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm
*vm
, struct ww_acquire_ctx
*ticket
);
419 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
420 struct amdgpu_bo_va
*bo_va
);
421 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint32_t min_vm_size
,
422 uint32_t fragment_size_default
, unsigned max_level
,
424 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
);
425 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
426 struct amdgpu_job
*job
);
427 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
);
429 void amdgpu_vm_get_task_info(struct amdgpu_device
*adev
, unsigned int pasid
,
430 struct amdgpu_task_info
*task_info
);
431 bool amdgpu_vm_handle_fault(struct amdgpu_device
*adev
, unsigned int pasid
,
434 void amdgpu_vm_set_task_info(struct amdgpu_vm
*vm
);
436 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device
*adev
,
437 struct amdgpu_vm
*vm
);
438 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object
*bo
);