1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
14 /* Called DRM core on the last userspace/kernel unreference of the
17 static void panfrost_gem_free_object(struct drm_gem_object
*obj
)
19 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
20 struct panfrost_device
*pfdev
= obj
->dev
->dev_private
;
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
28 mutex_lock(&pfdev
->shrinker_lock
);
29 list_del_init(&bo
->base
.madv_list
);
30 mutex_unlock(&pfdev
->shrinker_lock
);
33 * If we still have mappings attached to the BO, there's a problem in
36 WARN_ON_ONCE(!list_empty(&bo
->mappings
.list
));
40 int n_sgt
= bo
->base
.base
.size
/ SZ_2M
;
42 for (i
= 0; i
< n_sgt
; i
++) {
43 if (bo
->sgts
[i
].sgl
) {
44 dma_unmap_sgtable(pfdev
->dev
, &bo
->sgts
[i
],
45 DMA_BIDIRECTIONAL
, 0);
46 sg_free_table(&bo
->sgts
[i
]);
52 drm_gem_shmem_free_object(obj
);
55 struct panfrost_gem_mapping
*
56 panfrost_gem_mapping_get(struct panfrost_gem_object
*bo
,
57 struct panfrost_file_priv
*priv
)
59 struct panfrost_gem_mapping
*iter
, *mapping
= NULL
;
61 mutex_lock(&bo
->mappings
.lock
);
62 list_for_each_entry(iter
, &bo
->mappings
.list
, node
) {
63 if (iter
->mmu
== &priv
->mmu
) {
64 kref_get(&iter
->refcount
);
69 mutex_unlock(&bo
->mappings
.lock
);
75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping
*mapping
)
77 struct panfrost_file_priv
*priv
;
80 panfrost_mmu_unmap(mapping
);
82 priv
= container_of(mapping
->mmu
, struct panfrost_file_priv
, mmu
);
83 spin_lock(&priv
->mm_lock
);
84 if (drm_mm_node_allocated(&mapping
->mmnode
))
85 drm_mm_remove_node(&mapping
->mmnode
);
86 spin_unlock(&priv
->mm_lock
);
89 static void panfrost_gem_mapping_release(struct kref
*kref
)
91 struct panfrost_gem_mapping
*mapping
;
93 mapping
= container_of(kref
, struct panfrost_gem_mapping
, refcount
);
95 panfrost_gem_teardown_mapping(mapping
);
96 drm_gem_object_put(&mapping
->obj
->base
.base
);
100 void panfrost_gem_mapping_put(struct panfrost_gem_mapping
*mapping
)
105 kref_put(&mapping
->refcount
, panfrost_gem_mapping_release
);
108 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object
*bo
)
110 struct panfrost_gem_mapping
*mapping
;
112 list_for_each_entry(mapping
, &bo
->mappings
.list
, node
)
113 panfrost_gem_teardown_mapping(mapping
);
116 int panfrost_gem_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
119 size_t size
= obj
->size
;
121 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
122 unsigned long color
= bo
->noexec
? PANFROST_BO_NOEXEC
: 0;
123 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
124 struct panfrost_gem_mapping
*mapping
;
126 mapping
= kzalloc(sizeof(*mapping
), GFP_KERNEL
);
130 INIT_LIST_HEAD(&mapping
->node
);
131 kref_init(&mapping
->refcount
);
132 drm_gem_object_get(obj
);
136 * Executable buffers cannot cross a 16MB boundary as the program
137 * counter is 24-bits. We assume executable buffers will be less than
138 * 16MB and aligning executable buffers to their size will avoid
139 * crossing a 16MB boundary.
142 align
= size
>> PAGE_SHIFT
;
144 align
= size
>= SZ_2M
? SZ_2M
>> PAGE_SHIFT
: 0;
146 mapping
->mmu
= &priv
->mmu
;
147 spin_lock(&priv
->mm_lock
);
148 ret
= drm_mm_insert_node_generic(&priv
->mm
, &mapping
->mmnode
,
149 size
>> PAGE_SHIFT
, align
, color
, 0);
150 spin_unlock(&priv
->mm_lock
);
155 ret
= panfrost_mmu_map(mapping
);
160 mutex_lock(&bo
->mappings
.lock
);
161 WARN_ON(bo
->base
.madv
!= PANFROST_MADV_WILLNEED
);
162 list_add_tail(&mapping
->node
, &bo
->mappings
.list
);
163 mutex_unlock(&bo
->mappings
.lock
);
167 panfrost_gem_mapping_put(mapping
);
171 void panfrost_gem_close(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
173 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
174 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
175 struct panfrost_gem_mapping
*mapping
= NULL
, *iter
;
177 mutex_lock(&bo
->mappings
.lock
);
178 list_for_each_entry(iter
, &bo
->mappings
.list
, node
) {
179 if (iter
->mmu
== &priv
->mmu
) {
181 list_del(&iter
->node
);
185 mutex_unlock(&bo
->mappings
.lock
);
187 panfrost_gem_mapping_put(mapping
);
190 static int panfrost_gem_pin(struct drm_gem_object
*obj
)
192 if (to_panfrost_bo(obj
)->is_heap
)
195 return drm_gem_shmem_pin(obj
);
198 static const struct drm_gem_object_funcs panfrost_gem_funcs
= {
199 .free
= panfrost_gem_free_object
,
200 .open
= panfrost_gem_open
,
201 .close
= panfrost_gem_close
,
202 .print_info
= drm_gem_shmem_print_info
,
203 .pin
= panfrost_gem_pin
,
204 .unpin
= drm_gem_shmem_unpin
,
205 .get_sg_table
= drm_gem_shmem_get_sg_table
,
206 .vmap
= drm_gem_shmem_vmap
,
207 .vunmap
= drm_gem_shmem_vunmap
,
208 .mmap
= drm_gem_shmem_mmap
,
212 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
214 * @size: Size in bytes of the memory the object will reference
216 * This lets the GEM helpers allocate object structs for us, and keep
217 * our BO stats correct.
219 struct drm_gem_object
*panfrost_gem_create_object(struct drm_device
*dev
, size_t size
)
221 struct panfrost_device
*pfdev
= dev
->dev_private
;
222 struct panfrost_gem_object
*obj
;
224 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
228 INIT_LIST_HEAD(&obj
->mappings
.list
);
229 mutex_init(&obj
->mappings
.lock
);
230 obj
->base
.base
.funcs
= &panfrost_gem_funcs
;
231 obj
->base
.map_wc
= !pfdev
->coherent
;
233 return &obj
->base
.base
;
236 struct panfrost_gem_object
*
237 panfrost_gem_create_with_handle(struct drm_file
*file_priv
,
238 struct drm_device
*dev
, size_t size
,
243 struct drm_gem_shmem_object
*shmem
;
244 struct panfrost_gem_object
*bo
;
246 /* Round up heap allocations to 2MB to keep fault handling simple */
247 if (flags
& PANFROST_BO_HEAP
)
248 size
= roundup(size
, SZ_2M
);
250 shmem
= drm_gem_shmem_create(dev
, size
);
252 return ERR_CAST(shmem
);
254 bo
= to_panfrost_bo(&shmem
->base
);
255 bo
->noexec
= !!(flags
& PANFROST_BO_NOEXEC
);
256 bo
->is_heap
= !!(flags
& PANFROST_BO_HEAP
);
259 * Allocate an id of idr table where the obj is registered
260 * and handle has the id what user can see.
262 ret
= drm_gem_handle_create(file_priv
, &shmem
->base
, handle
);
263 /* drop reference from allocate - handle holds it now. */
264 drm_gem_object_put(&shmem
->base
);
271 struct drm_gem_object
*
272 panfrost_gem_prime_import_sg_table(struct drm_device
*dev
,
273 struct dma_buf_attachment
*attach
,
274 struct sg_table
*sgt
)
276 struct drm_gem_object
*obj
;
277 struct panfrost_gem_object
*bo
;
279 obj
= drm_gem_shmem_prime_import_sg_table(dev
, attach
, sgt
);
281 return ERR_CAST(obj
);
283 bo
= to_panfrost_bo(obj
);