treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / include / nvkm / subdev / mmu.h
blob54cdcb017518660b5509604524db932eee391690
1 /* SPDX-License-Identifier: MIT */
2 #ifndef __NVKM_MMU_H__
3 #define __NVKM_MMU_H__
4 #include <core/subdev.h>
6 struct nvkm_vma {
7 struct list_head head;
8 struct rb_node tree;
9 u64 addr;
10 u64 size:50;
11 bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
12 bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
13 #define NVKM_VMA_PAGE_NONE 7
14 u8 page:3; /* Requested page type (index, or NONE for automatic). */
15 u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
16 bool used:1; /* Region allocated. */
17 bool part:1; /* Region was split from an allocated region by map(). */
18 bool user:1; /* Region user-allocated. */
19 bool busy:1; /* Region busy (for temporarily preventing user access). */
20 bool mapped:1; /* Region contains valid pages. */
21 struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
22 struct nvkm_tags *tags; /* Compression tag reference. */
25 struct nvkm_vmm {
26 const struct nvkm_vmm_func *func;
27 struct nvkm_mmu *mmu;
28 const char *name;
29 u32 debug;
30 struct kref kref;
31 struct mutex mutex;
33 u64 start;
34 u64 limit;
36 struct nvkm_vmm_pt *pd;
37 struct list_head join;
39 struct list_head list;
40 struct rb_root free;
41 struct rb_root root;
43 bool bootstrapped;
44 atomic_t engref[NVKM_SUBDEV_NR];
46 dma_addr_t null;
47 void *nullp;
49 bool replay;
52 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
53 struct lock_class_key *, const char *name, struct nvkm_vmm **);
54 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
55 void nvkm_vmm_unref(struct nvkm_vmm **);
56 int nvkm_vmm_boot(struct nvkm_vmm *);
57 int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
58 void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
59 int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
60 void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
62 struct nvkm_vmm_map {
63 struct nvkm_memory *memory;
64 u64 offset;
66 struct nvkm_mm_node *mem;
67 struct scatterlist *sgl;
68 dma_addr_t *dma;
69 u64 *pfn;
70 u64 off;
72 const struct nvkm_vmm_page *page;
74 struct nvkm_tags *tags;
75 u64 next;
76 u64 type;
77 u64 ctag;
80 int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
81 struct nvkm_vmm_map *);
82 void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
84 struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
85 struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
87 struct nvkm_mmu {
88 const struct nvkm_mmu_func *func;
89 struct nvkm_subdev subdev;
91 u8 dma_bits;
93 int heap_nr;
94 struct {
95 #define NVKM_MEM_VRAM 0x01
96 #define NVKM_MEM_HOST 0x02
97 #define NVKM_MEM_COMP 0x04
98 #define NVKM_MEM_DISP 0x08
99 u8 type;
100 u64 size;
101 } heap[4];
103 int type_nr;
104 struct {
105 #define NVKM_MEM_KIND 0x10
106 #define NVKM_MEM_MAPPABLE 0x20
107 #define NVKM_MEM_COHERENT 0x40
108 #define NVKM_MEM_UNCACHED 0x80
109 u8 type;
110 u8 heap;
111 } type[16];
113 struct nvkm_vmm *vmm;
115 struct {
116 struct mutex mutex;
117 struct list_head list;
118 } ptc, ptp;
120 struct nvkm_device_oclass user;
123 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
124 int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
125 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
126 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
127 int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
128 int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
129 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
130 int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
131 int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
132 int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
133 int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
134 int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
135 int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
136 int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
137 int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
138 #endif