1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
14 /* Called DRM core on the last userspace/kernel unreference of the
17 static void panfrost_gem_free_object(struct drm_gem_object
*obj
)
19 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
20 struct panfrost_device
*pfdev
= obj
->dev
->dev_private
;
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
28 mutex_lock(&pfdev
->shrinker_lock
);
29 list_del_init(&bo
->base
.madv_list
);
30 mutex_unlock(&pfdev
->shrinker_lock
);
33 * If we still have mappings attached to the BO, there's a problem in
36 WARN_ON_ONCE(!list_empty(&bo
->mappings
.list
));
40 int n_sgt
= bo
->base
.base
.size
/ SZ_2M
;
42 for (i
= 0; i
< n_sgt
; i
++) {
43 if (bo
->sgts
[i
].sgl
) {
44 dma_unmap_sgtable(pfdev
->dev
, &bo
->sgts
[i
],
45 DMA_BIDIRECTIONAL
, 0);
46 sg_free_table(&bo
->sgts
[i
]);
52 drm_gem_shmem_free(&bo
->base
);
55 struct panfrost_gem_mapping
*
56 panfrost_gem_mapping_get(struct panfrost_gem_object
*bo
,
57 struct panfrost_file_priv
*priv
)
59 struct panfrost_gem_mapping
*iter
, *mapping
= NULL
;
61 mutex_lock(&bo
->mappings
.lock
);
62 list_for_each_entry(iter
, &bo
->mappings
.list
, node
) {
63 if (iter
->mmu
== priv
->mmu
) {
64 kref_get(&iter
->refcount
);
69 mutex_unlock(&bo
->mappings
.lock
);
75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping
*mapping
)
78 panfrost_mmu_unmap(mapping
);
80 spin_lock(&mapping
->mmu
->mm_lock
);
81 if (drm_mm_node_allocated(&mapping
->mmnode
))
82 drm_mm_remove_node(&mapping
->mmnode
);
83 spin_unlock(&mapping
->mmu
->mm_lock
);
86 static void panfrost_gem_mapping_release(struct kref
*kref
)
88 struct panfrost_gem_mapping
*mapping
;
90 mapping
= container_of(kref
, struct panfrost_gem_mapping
, refcount
);
92 panfrost_gem_teardown_mapping(mapping
);
93 drm_gem_object_put(&mapping
->obj
->base
.base
);
94 panfrost_mmu_ctx_put(mapping
->mmu
);
98 void panfrost_gem_mapping_put(struct panfrost_gem_mapping
*mapping
)
103 kref_put(&mapping
->refcount
, panfrost_gem_mapping_release
);
106 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object
*bo
)
108 struct panfrost_gem_mapping
*mapping
;
110 list_for_each_entry(mapping
, &bo
->mappings
.list
, node
)
111 panfrost_gem_teardown_mapping(mapping
);
114 int panfrost_gem_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
117 size_t size
= obj
->size
;
119 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
120 unsigned long color
= bo
->noexec
? PANFROST_BO_NOEXEC
: 0;
121 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
122 struct panfrost_gem_mapping
*mapping
;
124 mapping
= kzalloc(sizeof(*mapping
), GFP_KERNEL
);
128 INIT_LIST_HEAD(&mapping
->node
);
129 kref_init(&mapping
->refcount
);
130 drm_gem_object_get(obj
);
134 * Executable buffers cannot cross a 16MB boundary as the program
135 * counter is 24-bits. We assume executable buffers will be less than
136 * 16MB and aligning executable buffers to their size will avoid
137 * crossing a 16MB boundary.
140 align
= size
>> PAGE_SHIFT
;
142 align
= size
>= SZ_2M
? SZ_2M
>> PAGE_SHIFT
: 0;
144 mapping
->mmu
= panfrost_mmu_ctx_get(priv
->mmu
);
145 spin_lock(&mapping
->mmu
->mm_lock
);
146 ret
= drm_mm_insert_node_generic(&mapping
->mmu
->mm
, &mapping
->mmnode
,
147 size
>> PAGE_SHIFT
, align
, color
, 0);
148 spin_unlock(&mapping
->mmu
->mm_lock
);
153 ret
= panfrost_mmu_map(mapping
);
158 mutex_lock(&bo
->mappings
.lock
);
159 WARN_ON(bo
->base
.madv
!= PANFROST_MADV_WILLNEED
);
160 list_add_tail(&mapping
->node
, &bo
->mappings
.list
);
161 mutex_unlock(&bo
->mappings
.lock
);
165 panfrost_gem_mapping_put(mapping
);
169 void panfrost_gem_close(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
171 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
172 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
173 struct panfrost_gem_mapping
*mapping
= NULL
, *iter
;
175 mutex_lock(&bo
->mappings
.lock
);
176 list_for_each_entry(iter
, &bo
->mappings
.list
, node
) {
177 if (iter
->mmu
== priv
->mmu
) {
179 list_del(&iter
->node
);
183 mutex_unlock(&bo
->mappings
.lock
);
185 panfrost_gem_mapping_put(mapping
);
188 static int panfrost_gem_pin(struct drm_gem_object
*obj
)
190 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
195 return drm_gem_shmem_pin_locked(&bo
->base
);
198 static enum drm_gem_object_status
panfrost_gem_status(struct drm_gem_object
*obj
)
200 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
201 enum drm_gem_object_status res
= 0;
203 if (bo
->base
.base
.import_attach
|| bo
->base
.pages
)
204 res
|= DRM_GEM_OBJECT_RESIDENT
;
206 if (bo
->base
.madv
== PANFROST_MADV_DONTNEED
)
207 res
|= DRM_GEM_OBJECT_PURGEABLE
;
212 static size_t panfrost_gem_rss(struct drm_gem_object
*obj
)
214 struct panfrost_gem_object
*bo
= to_panfrost_bo(obj
);
217 return bo
->heap_rss_size
;
218 } else if (bo
->base
.pages
) {
219 WARN_ON(bo
->heap_rss_size
);
220 return bo
->base
.base
.size
;
226 static const struct drm_gem_object_funcs panfrost_gem_funcs
= {
227 .free
= panfrost_gem_free_object
,
228 .open
= panfrost_gem_open
,
229 .close
= panfrost_gem_close
,
230 .print_info
= drm_gem_shmem_object_print_info
,
231 .pin
= panfrost_gem_pin
,
232 .unpin
= drm_gem_shmem_object_unpin
,
233 .get_sg_table
= drm_gem_shmem_object_get_sg_table
,
234 .vmap
= drm_gem_shmem_object_vmap
,
235 .vunmap
= drm_gem_shmem_object_vunmap
,
236 .mmap
= drm_gem_shmem_object_mmap
,
237 .status
= panfrost_gem_status
,
238 .rss
= panfrost_gem_rss
,
239 .vm_ops
= &drm_gem_shmem_vm_ops
,
243 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
245 * @size: Size in bytes of the memory the object will reference
247 * This lets the GEM helpers allocate object structs for us, and keep
248 * our BO stats correct.
250 struct drm_gem_object
*panfrost_gem_create_object(struct drm_device
*dev
, size_t size
)
252 struct panfrost_device
*pfdev
= dev
->dev_private
;
253 struct panfrost_gem_object
*obj
;
255 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
257 return ERR_PTR(-ENOMEM
);
259 INIT_LIST_HEAD(&obj
->mappings
.list
);
260 mutex_init(&obj
->mappings
.lock
);
261 obj
->base
.base
.funcs
= &panfrost_gem_funcs
;
262 obj
->base
.map_wc
= !pfdev
->coherent
;
264 return &obj
->base
.base
;
267 struct panfrost_gem_object
*
268 panfrost_gem_create(struct drm_device
*dev
, size_t size
, u32 flags
)
270 struct drm_gem_shmem_object
*shmem
;
271 struct panfrost_gem_object
*bo
;
273 /* Round up heap allocations to 2MB to keep fault handling simple */
274 if (flags
& PANFROST_BO_HEAP
)
275 size
= roundup(size
, SZ_2M
);
277 shmem
= drm_gem_shmem_create(dev
, size
);
279 return ERR_CAST(shmem
);
281 bo
= to_panfrost_bo(&shmem
->base
);
282 bo
->noexec
= !!(flags
& PANFROST_BO_NOEXEC
);
283 bo
->is_heap
= !!(flags
& PANFROST_BO_HEAP
);
288 struct drm_gem_object
*
289 panfrost_gem_prime_import_sg_table(struct drm_device
*dev
,
290 struct dma_buf_attachment
*attach
,
291 struct sg_table
*sgt
)
293 struct drm_gem_object
*obj
;
294 struct panfrost_gem_object
*bo
;
296 obj
= drm_gem_shmem_prime_import_sg_table(dev
, attach
, sgt
);
298 return ERR_CAST(obj
);
300 bo
= to_panfrost_bo(obj
);