1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
4 #include <linux/slab.h>
5 #include <linux/dma-mapping.h>
7 #include "lima_device.h"
10 #include "lima_regs.h"
13 struct list_head list
;
14 unsigned int ref_count
;
16 struct drm_mm_node node
;
21 #define LIMA_VM_PD_SHIFT 22
22 #define LIMA_VM_PT_SHIFT 12
23 #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
24 #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
26 #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
27 #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
29 #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
30 #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
31 #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
32 #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
35 static void lima_vm_unmap_range(struct lima_vm
*vm
, u32 start
, u32 end
)
39 for (addr
= start
; addr
<= end
; addr
+= LIMA_PAGE_SIZE
) {
40 u32 pbe
= LIMA_PBE(addr
);
41 u32 bte
= LIMA_BTE(addr
);
43 vm
->bts
[pbe
].cpu
[bte
] = 0;
47 static int lima_vm_map_page(struct lima_vm
*vm
, dma_addr_t pa
, u32 va
)
49 u32 pbe
= LIMA_PBE(va
);
50 u32 bte
= LIMA_BTE(va
);
52 if (!vm
->bts
[pbe
].cpu
) {
57 vm
->bts
[pbe
].cpu
= dma_alloc_wc(
58 vm
->dev
->dev
, LIMA_PAGE_SIZE
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
,
59 &vm
->bts
[pbe
].dma
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
);
60 if (!vm
->bts
[pbe
].cpu
)
63 pts
= vm
->bts
[pbe
].dma
;
64 pd
= vm
->pd
.cpu
+ (pbe
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
);
65 for (j
= 0; j
< LIMA_VM_NUM_PT_PER_BT
; j
++) {
66 pd
[j
] = pts
| LIMA_VM_FLAG_PRESENT
;
67 pts
+= LIMA_PAGE_SIZE
;
71 vm
->bts
[pbe
].cpu
[bte
] = pa
| LIMA_VM_FLAGS_CACHE
;
76 static struct lima_bo_va
*
77 lima_vm_bo_find(struct lima_vm
*vm
, struct lima_bo
*bo
)
79 struct lima_bo_va
*bo_va
, *ret
= NULL
;
81 list_for_each_entry(bo_va
, &bo
->va
, list
) {
82 if (bo_va
->vm
== vm
) {
91 int lima_vm_bo_add(struct lima_vm
*vm
, struct lima_bo
*bo
, bool create
)
93 struct lima_bo_va
*bo_va
;
94 struct sg_dma_page_iter sg_iter
;
97 mutex_lock(&bo
->lock
);
99 bo_va
= lima_vm_bo_find(vm
, bo
);
102 mutex_unlock(&bo
->lock
);
106 /* should not create new bo_va if not asked by caller */
108 mutex_unlock(&bo
->lock
);
112 bo_va
= kzalloc(sizeof(*bo_va
), GFP_KERNEL
);
119 bo_va
->ref_count
= 1;
121 mutex_lock(&vm
->lock
);
123 err
= drm_mm_insert_node(&vm
->mm
, &bo_va
->node
, lima_bo_size(bo
));
127 for_each_sgtable_dma_page(bo
->base
.sgt
, &sg_iter
, 0) {
128 err
= lima_vm_map_page(vm
, sg_page_iter_dma_address(&sg_iter
),
129 bo_va
->node
.start
+ offset
);
136 mutex_unlock(&vm
->lock
);
138 list_add_tail(&bo_va
->list
, &bo
->va
);
140 mutex_unlock(&bo
->lock
);
145 lima_vm_unmap_range(vm
, bo_va
->node
.start
, bo_va
->node
.start
+ offset
- 1);
146 drm_mm_remove_node(&bo_va
->node
);
148 mutex_unlock(&vm
->lock
);
151 mutex_unlock(&bo
->lock
);
155 void lima_vm_bo_del(struct lima_vm
*vm
, struct lima_bo
*bo
)
157 struct lima_bo_va
*bo_va
;
160 mutex_lock(&bo
->lock
);
162 bo_va
= lima_vm_bo_find(vm
, bo
);
163 if (--bo_va
->ref_count
> 0) {
164 mutex_unlock(&bo
->lock
);
168 mutex_lock(&vm
->lock
);
170 size
= bo
->heap_size
? bo
->heap_size
: bo_va
->node
.size
;
171 lima_vm_unmap_range(vm
, bo_va
->node
.start
,
172 bo_va
->node
.start
+ size
- 1);
174 drm_mm_remove_node(&bo_va
->node
);
176 mutex_unlock(&vm
->lock
);
178 list_del(&bo_va
->list
);
180 mutex_unlock(&bo
->lock
);
185 u32
lima_vm_get_va(struct lima_vm
*vm
, struct lima_bo
*bo
)
187 struct lima_bo_va
*bo_va
;
190 mutex_lock(&bo
->lock
);
192 bo_va
= lima_vm_bo_find(vm
, bo
);
193 ret
= bo_va
->node
.start
;
195 mutex_unlock(&bo
->lock
);
200 struct lima_vm
*lima_vm_create(struct lima_device
*dev
)
204 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
209 mutex_init(&vm
->lock
);
210 kref_init(&vm
->refcount
);
212 vm
->pd
.cpu
= dma_alloc_wc(dev
->dev
, LIMA_PAGE_SIZE
, &vm
->pd
.dma
,
213 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
);
218 int err
= lima_vm_map_page(
219 vm
, dev
->dlbu_dma
, LIMA_VA_RESERVE_DLBU
);
224 drm_mm_init(&vm
->mm
, dev
->va_start
, dev
->va_end
- dev
->va_start
);
229 dma_free_wc(dev
->dev
, LIMA_PAGE_SIZE
, vm
->pd
.cpu
, vm
->pd
.dma
);
235 void lima_vm_release(struct kref
*kref
)
237 struct lima_vm
*vm
= container_of(kref
, struct lima_vm
, refcount
);
240 drm_mm_takedown(&vm
->mm
);
242 for (i
= 0; i
< LIMA_VM_NUM_BT
; i
++) {
244 dma_free_wc(vm
->dev
->dev
, LIMA_PAGE_SIZE
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
,
245 vm
->bts
[i
].cpu
, vm
->bts
[i
].dma
);
249 dma_free_wc(vm
->dev
->dev
, LIMA_PAGE_SIZE
, vm
->pd
.cpu
, vm
->pd
.dma
);
254 void lima_vm_print(struct lima_vm
*vm
)
263 for (i
= 0; i
< LIMA_VM_NUM_BT
; i
++) {
268 for (j
= 0; j
< LIMA_VM_NUM_PT_PER_BT
; j
++) {
269 int idx
= (i
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
) + j
;
271 printk(KERN_INFO
"lima vm pd %03x:%08x\n", idx
, pd
[idx
]);
273 for (k
= 0; k
< LIMA_PAGE_ENT_NUM
; k
++) {
277 printk(KERN_INFO
" pt %03x:%08x\n", k
, pte
);
283 int lima_vm_map_bo(struct lima_vm
*vm
, struct lima_bo
*bo
, int pageoff
)
285 struct lima_bo_va
*bo_va
;
286 struct sg_dma_page_iter sg_iter
;
290 mutex_lock(&bo
->lock
);
292 bo_va
= lima_vm_bo_find(vm
, bo
);
298 mutex_lock(&vm
->lock
);
300 base
= bo_va
->node
.start
+ (pageoff
<< PAGE_SHIFT
);
301 for_each_sgtable_dma_page(bo
->base
.sgt
, &sg_iter
, pageoff
) {
302 err
= lima_vm_map_page(vm
, sg_page_iter_dma_address(&sg_iter
),
310 mutex_unlock(&vm
->lock
);
312 mutex_unlock(&bo
->lock
);
317 lima_vm_unmap_range(vm
, base
, base
+ offset
- 1);
318 mutex_unlock(&vm
->lock
);
320 mutex_unlock(&bo
->lock
);