1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
4 #include <linux/slab.h>
5 #include <linux/dma-mapping.h>
7 #include "lima_device.h"
10 #include "lima_regs.h"
13 struct list_head list
;
14 unsigned int ref_count
;
16 struct drm_mm_node node
;
21 #define LIMA_VM_PD_SHIFT 22
22 #define LIMA_VM_PT_SHIFT 12
23 #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
24 #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
26 #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
27 #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
29 #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
30 #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
31 #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
32 #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
35 static void lima_vm_unmap_range(struct lima_vm
*vm
, u32 start
, u32 end
)
39 for (addr
= start
; addr
<= end
; addr
+= LIMA_PAGE_SIZE
) {
40 u32 pbe
= LIMA_PBE(addr
);
41 u32 bte
= LIMA_BTE(addr
);
43 vm
->bts
[pbe
].cpu
[bte
] = 0;
47 static int lima_vm_map_page(struct lima_vm
*vm
, dma_addr_t pa
, u32 va
)
49 u32 pbe
= LIMA_PBE(va
);
50 u32 bte
= LIMA_BTE(va
);
52 if (!vm
->bts
[pbe
].cpu
) {
57 vm
->bts
[pbe
].cpu
= dma_alloc_wc(
58 vm
->dev
->dev
, LIMA_PAGE_SIZE
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
,
59 &vm
->bts
[pbe
].dma
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
);
60 if (!vm
->bts
[pbe
].cpu
)
63 pts
= vm
->bts
[pbe
].dma
;
64 pd
= vm
->pd
.cpu
+ (pbe
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
);
65 for (j
= 0; j
< LIMA_VM_NUM_PT_PER_BT
; j
++) {
66 pd
[j
] = pts
| LIMA_VM_FLAG_PRESENT
;
67 pts
+= LIMA_PAGE_SIZE
;
71 vm
->bts
[pbe
].cpu
[bte
] = pa
| LIMA_VM_FLAGS_CACHE
;
76 static struct lima_bo_va
*
77 lima_vm_bo_find(struct lima_vm
*vm
, struct lima_bo
*bo
)
79 struct lima_bo_va
*bo_va
, *ret
= NULL
;
81 list_for_each_entry(bo_va
, &bo
->va
, list
) {
82 if (bo_va
->vm
== vm
) {
91 int lima_vm_bo_add(struct lima_vm
*vm
, struct lima_bo
*bo
, bool create
)
93 struct lima_bo_va
*bo_va
;
94 struct sg_dma_page_iter sg_iter
;
97 mutex_lock(&bo
->lock
);
99 bo_va
= lima_vm_bo_find(vm
, bo
);
102 mutex_unlock(&bo
->lock
);
106 /* should not create new bo_va if not asked by caller */
108 mutex_unlock(&bo
->lock
);
112 bo_va
= kzalloc(sizeof(*bo_va
), GFP_KERNEL
);
119 bo_va
->ref_count
= 1;
121 mutex_lock(&vm
->lock
);
123 err
= drm_mm_insert_node(&vm
->mm
, &bo_va
->node
, lima_bo_size(bo
));
127 for_each_sg_dma_page(bo
->base
.sgt
->sgl
, &sg_iter
, bo
->base
.sgt
->nents
, 0) {
128 err
= lima_vm_map_page(vm
, sg_page_iter_dma_address(&sg_iter
),
129 bo_va
->node
.start
+ offset
);
136 mutex_unlock(&vm
->lock
);
138 list_add_tail(&bo_va
->list
, &bo
->va
);
140 mutex_unlock(&bo
->lock
);
145 lima_vm_unmap_range(vm
, bo_va
->node
.start
, bo_va
->node
.start
+ offset
- 1);
146 drm_mm_remove_node(&bo_va
->node
);
148 mutex_unlock(&vm
->lock
);
151 mutex_unlock(&bo
->lock
);
155 void lima_vm_bo_del(struct lima_vm
*vm
, struct lima_bo
*bo
)
157 struct lima_bo_va
*bo_va
;
159 mutex_lock(&bo
->lock
);
161 bo_va
= lima_vm_bo_find(vm
, bo
);
162 if (--bo_va
->ref_count
> 0) {
163 mutex_unlock(&bo
->lock
);
167 mutex_lock(&vm
->lock
);
169 lima_vm_unmap_range(vm
, bo_va
->node
.start
,
170 bo_va
->node
.start
+ bo_va
->node
.size
- 1);
172 drm_mm_remove_node(&bo_va
->node
);
174 mutex_unlock(&vm
->lock
);
176 list_del(&bo_va
->list
);
178 mutex_unlock(&bo
->lock
);
183 u32
lima_vm_get_va(struct lima_vm
*vm
, struct lima_bo
*bo
)
185 struct lima_bo_va
*bo_va
;
188 mutex_lock(&bo
->lock
);
190 bo_va
= lima_vm_bo_find(vm
, bo
);
191 ret
= bo_va
->node
.start
;
193 mutex_unlock(&bo
->lock
);
198 struct lima_vm
*lima_vm_create(struct lima_device
*dev
)
202 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
207 mutex_init(&vm
->lock
);
208 kref_init(&vm
->refcount
);
210 vm
->pd
.cpu
= dma_alloc_wc(dev
->dev
, LIMA_PAGE_SIZE
, &vm
->pd
.dma
,
211 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
);
216 int err
= lima_vm_map_page(
217 vm
, dev
->dlbu_dma
, LIMA_VA_RESERVE_DLBU
);
222 drm_mm_init(&vm
->mm
, dev
->va_start
, dev
->va_end
- dev
->va_start
);
227 dma_free_wc(dev
->dev
, LIMA_PAGE_SIZE
, vm
->pd
.cpu
, vm
->pd
.dma
);
233 void lima_vm_release(struct kref
*kref
)
235 struct lima_vm
*vm
= container_of(kref
, struct lima_vm
, refcount
);
238 drm_mm_takedown(&vm
->mm
);
240 for (i
= 0; i
< LIMA_VM_NUM_BT
; i
++) {
242 dma_free_wc(vm
->dev
->dev
, LIMA_PAGE_SIZE
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
,
243 vm
->bts
[i
].cpu
, vm
->bts
[i
].dma
);
247 dma_free_wc(vm
->dev
->dev
, LIMA_PAGE_SIZE
, vm
->pd
.cpu
, vm
->pd
.dma
);
252 void lima_vm_print(struct lima_vm
*vm
)
261 for (i
= 0; i
< LIMA_VM_NUM_BT
; i
++) {
266 for (j
= 0; j
< LIMA_VM_NUM_PT_PER_BT
; j
++) {
267 int idx
= (i
<< LIMA_VM_NUM_PT_PER_BT_SHIFT
) + j
;
269 printk(KERN_INFO
"lima vm pd %03x:%08x\n", idx
, pd
[idx
]);
271 for (k
= 0; k
< LIMA_PAGE_ENT_NUM
; k
++) {
275 printk(KERN_INFO
" pt %03x:%08x\n", k
, pte
);