treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / lima / lima_vm.c
blob840e2350d87263377da246bede8f3b767ac3f308
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
4 #include <linux/slab.h>
5 #include <linux/dma-mapping.h>
7 #include "lima_device.h"
8 #include "lima_vm.h"
9 #include "lima_gem.h"
10 #include "lima_regs.h"
12 struct lima_bo_va {
13 struct list_head list;
14 unsigned int ref_count;
16 struct drm_mm_node node;
18 struct lima_vm *vm;
21 #define LIMA_VM_PD_SHIFT 22
22 #define LIMA_VM_PT_SHIFT 12
23 #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
24 #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
26 #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
27 #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
29 #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
30 #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
31 #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
32 #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
37 u32 addr;
39 for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
40 u32 pbe = LIMA_PBE(addr);
41 u32 bte = LIMA_BTE(addr);
43 vm->bts[pbe].cpu[bte] = 0;
47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
49 u32 pbe = LIMA_PBE(va);
50 u32 bte = LIMA_BTE(va);
52 if (!vm->bts[pbe].cpu) {
53 dma_addr_t pts;
54 u32 *pd;
55 int j;
57 vm->bts[pbe].cpu = dma_alloc_wc(
58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
60 if (!vm->bts[pbe].cpu)
61 return -ENOMEM;
63 pts = vm->bts[pbe].dma;
64 pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
65 for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
66 pd[j] = pts | LIMA_VM_FLAG_PRESENT;
67 pts += LIMA_PAGE_SIZE;
71 vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
73 return 0;
76 static struct lima_bo_va *
77 lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
79 struct lima_bo_va *bo_va, *ret = NULL;
81 list_for_each_entry(bo_va, &bo->va, list) {
82 if (bo_va->vm == vm) {
83 ret = bo_va;
84 break;
88 return ret;
91 int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
93 struct lima_bo_va *bo_va;
94 struct sg_dma_page_iter sg_iter;
95 int offset = 0, err;
97 mutex_lock(&bo->lock);
99 bo_va = lima_vm_bo_find(vm, bo);
100 if (bo_va) {
101 bo_va->ref_count++;
102 mutex_unlock(&bo->lock);
103 return 0;
106 /* should not create new bo_va if not asked by caller */
107 if (!create) {
108 mutex_unlock(&bo->lock);
109 return -ENOENT;
112 bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
113 if (!bo_va) {
114 err = -ENOMEM;
115 goto err_out0;
118 bo_va->vm = vm;
119 bo_va->ref_count = 1;
121 mutex_lock(&vm->lock);
123 err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
124 if (err)
125 goto err_out1;
127 for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
128 err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
129 bo_va->node.start + offset);
130 if (err)
131 goto err_out2;
133 offset += PAGE_SIZE;
136 mutex_unlock(&vm->lock);
138 list_add_tail(&bo_va->list, &bo->va);
140 mutex_unlock(&bo->lock);
141 return 0;
143 err_out2:
144 if (offset)
145 lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
146 drm_mm_remove_node(&bo_va->node);
147 err_out1:
148 mutex_unlock(&vm->lock);
149 kfree(bo_va);
150 err_out0:
151 mutex_unlock(&bo->lock);
152 return err;
155 void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
157 struct lima_bo_va *bo_va;
159 mutex_lock(&bo->lock);
161 bo_va = lima_vm_bo_find(vm, bo);
162 if (--bo_va->ref_count > 0) {
163 mutex_unlock(&bo->lock);
164 return;
167 mutex_lock(&vm->lock);
169 lima_vm_unmap_range(vm, bo_va->node.start,
170 bo_va->node.start + bo_va->node.size - 1);
172 drm_mm_remove_node(&bo_va->node);
174 mutex_unlock(&vm->lock);
176 list_del(&bo_va->list);
178 mutex_unlock(&bo->lock);
180 kfree(bo_va);
183 u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
185 struct lima_bo_va *bo_va;
186 u32 ret;
188 mutex_lock(&bo->lock);
190 bo_va = lima_vm_bo_find(vm, bo);
191 ret = bo_va->node.start;
193 mutex_unlock(&bo->lock);
195 return ret;
198 struct lima_vm *lima_vm_create(struct lima_device *dev)
200 struct lima_vm *vm;
202 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
203 if (!vm)
204 return NULL;
206 vm->dev = dev;
207 mutex_init(&vm->lock);
208 kref_init(&vm->refcount);
210 vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
211 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
212 if (!vm->pd.cpu)
213 goto err_out0;
215 if (dev->dlbu_cpu) {
216 int err = lima_vm_map_page(
217 vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
218 if (err)
219 goto err_out1;
222 drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
224 return vm;
226 err_out1:
227 dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
228 err_out0:
229 kfree(vm);
230 return NULL;
233 void lima_vm_release(struct kref *kref)
235 struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
236 int i;
238 drm_mm_takedown(&vm->mm);
240 for (i = 0; i < LIMA_VM_NUM_BT; i++) {
241 if (vm->bts[i].cpu)
242 dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
243 vm->bts[i].cpu, vm->bts[i].dma);
246 if (vm->pd.cpu)
247 dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
249 kfree(vm);
252 void lima_vm_print(struct lima_vm *vm)
254 int i, j, k;
255 u32 *pd, *pt;
257 if (!vm->pd.cpu)
258 return;
260 pd = vm->pd.cpu;
261 for (i = 0; i < LIMA_VM_NUM_BT; i++) {
262 if (!vm->bts[i].cpu)
263 continue;
265 pt = vm->bts[i].cpu;
266 for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
267 int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
269 printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
271 for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
272 u32 pte = *pt++;
274 if (pte)
275 printk(KERN_INFO " pt %03x:%08x\n", k, pte);