2 * Copyright © 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DOC: VC4 GEM BO management support
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
22 #include <linux/dma-buf.h>
25 #include "uapi/drm/vc4_drm.h"
27 static const char * const bo_type_names
[] = {
38 static bool is_user_label(int label
)
40 return label
>= VC4_BO_TYPE_COUNT
;
43 static void vc4_bo_stats_dump(struct vc4_dev
*vc4
)
47 for (i
= 0; i
< vc4
->num_labels
; i
++) {
48 if (!vc4
->bo_labels
[i
].num_allocated
)
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4
->bo_labels
[i
].name
,
53 vc4
->bo_labels
[i
].size_allocated
/ 1024,
54 vc4
->bo_labels
[i
].num_allocated
);
57 mutex_lock(&vc4
->purgeable
.lock
);
58 if (vc4
->purgeable
.num
)
59 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
60 vc4
->purgeable
.size
/ 1024, vc4
->purgeable
.num
);
62 if (vc4
->purgeable
.purged_num
)
63 DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
64 vc4
->purgeable
.purged_size
/ 1024,
65 vc4
->purgeable
.purged_num
);
66 mutex_unlock(&vc4
->purgeable
.lock
);
69 #ifdef CONFIG_DEBUG_FS
70 int vc4_bo_stats_debugfs(struct seq_file
*m
, void *unused
)
72 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
73 struct drm_device
*dev
= node
->minor
->dev
;
74 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
77 mutex_lock(&vc4
->bo_lock
);
78 for (i
= 0; i
< vc4
->num_labels
; i
++) {
79 if (!vc4
->bo_labels
[i
].num_allocated
)
82 seq_printf(m
, "%30s: %6dkb BOs (%d)\n",
83 vc4
->bo_labels
[i
].name
,
84 vc4
->bo_labels
[i
].size_allocated
/ 1024,
85 vc4
->bo_labels
[i
].num_allocated
);
87 mutex_unlock(&vc4
->bo_lock
);
89 mutex_lock(&vc4
->purgeable
.lock
);
90 if (vc4
->purgeable
.num
)
91 seq_printf(m
, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
92 vc4
->purgeable
.size
/ 1024, vc4
->purgeable
.num
);
94 if (vc4
->purgeable
.purged_num
)
95 seq_printf(m
, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
96 vc4
->purgeable
.purged_size
/ 1024,
97 vc4
->purgeable
.purged_num
);
98 mutex_unlock(&vc4
->purgeable
.lock
);
104 /* Takes ownership of *name and returns the appropriate slot for it in
105 * the bo_labels[] array, extending it as necessary.
107 * This is inefficient and could use a hash table instead of walking
108 * an array and strcmp()ing. However, the assumption is that user
109 * labeling will be infrequent (scanout buffers and other long-lived
110 * objects, or debug driver builds), so we can live with it for now.
112 static int vc4_get_user_label(struct vc4_dev
*vc4
, const char *name
)
117 for (i
= 0; i
< vc4
->num_labels
; i
++) {
118 if (!vc4
->bo_labels
[i
].name
) {
120 } else if (strcmp(vc4
->bo_labels
[i
].name
, name
) == 0) {
126 if (free_slot
!= -1) {
127 WARN_ON(vc4
->bo_labels
[free_slot
].num_allocated
!= 0);
128 vc4
->bo_labels
[free_slot
].name
= name
;
131 u32 new_label_count
= vc4
->num_labels
+ 1;
132 struct vc4_label
*new_labels
=
133 krealloc(vc4
->bo_labels
,
134 new_label_count
* sizeof(*new_labels
),
142 free_slot
= vc4
->num_labels
;
143 vc4
->bo_labels
= new_labels
;
144 vc4
->num_labels
= new_label_count
;
146 vc4
->bo_labels
[free_slot
].name
= name
;
147 vc4
->bo_labels
[free_slot
].num_allocated
= 0;
148 vc4
->bo_labels
[free_slot
].size_allocated
= 0;
154 static void vc4_bo_set_label(struct drm_gem_object
*gem_obj
, int label
)
156 struct vc4_bo
*bo
= to_vc4_bo(gem_obj
);
157 struct vc4_dev
*vc4
= to_vc4_dev(gem_obj
->dev
);
159 lockdep_assert_held(&vc4
->bo_lock
);
162 vc4
->bo_labels
[label
].num_allocated
++;
163 vc4
->bo_labels
[label
].size_allocated
+= gem_obj
->size
;
166 vc4
->bo_labels
[bo
->label
].num_allocated
--;
167 vc4
->bo_labels
[bo
->label
].size_allocated
-= gem_obj
->size
;
169 if (vc4
->bo_labels
[bo
->label
].num_allocated
== 0 &&
170 is_user_label(bo
->label
)) {
171 /* Free user BO label slots on last unreference.
172 * Slots are just where we track the stats for a given
173 * name, and once a name is unused we can reuse that
176 kfree(vc4
->bo_labels
[bo
->label
].name
);
177 vc4
->bo_labels
[bo
->label
].name
= NULL
;
183 static uint32_t bo_page_index(size_t size
)
185 return (size
/ PAGE_SIZE
) - 1;
188 static void vc4_bo_destroy(struct vc4_bo
*bo
)
190 struct drm_gem_object
*obj
= &bo
->base
.base
;
191 struct vc4_dev
*vc4
= to_vc4_dev(obj
->dev
);
193 lockdep_assert_held(&vc4
->bo_lock
);
195 vc4_bo_set_label(obj
, -1);
197 if (bo
->validated_shader
) {
198 kfree(bo
->validated_shader
->texture_samples
);
199 kfree(bo
->validated_shader
);
200 bo
->validated_shader
= NULL
;
203 reservation_object_fini(&bo
->_resv
);
205 drm_gem_cma_free_object(obj
);
208 static void vc4_bo_remove_from_cache(struct vc4_bo
*bo
)
210 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
212 lockdep_assert_held(&vc4
->bo_lock
);
213 list_del(&bo
->unref_head
);
214 list_del(&bo
->size_head
);
217 static struct list_head
*vc4_get_cache_list_for_size(struct drm_device
*dev
,
220 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
221 uint32_t page_index
= bo_page_index(size
);
223 if (vc4
->bo_cache
.size_list_size
<= page_index
) {
224 uint32_t new_size
= max(vc4
->bo_cache
.size_list_size
* 2,
226 struct list_head
*new_list
;
229 new_list
= kmalloc_array(new_size
, sizeof(struct list_head
),
234 /* Rebase the old cached BO lists to their new list
237 for (i
= 0; i
< vc4
->bo_cache
.size_list_size
; i
++) {
238 struct list_head
*old_list
=
239 &vc4
->bo_cache
.size_list
[i
];
241 if (list_empty(old_list
))
242 INIT_LIST_HEAD(&new_list
[i
]);
244 list_replace(old_list
, &new_list
[i
]);
246 /* And initialize the brand new BO list heads. */
247 for (i
= vc4
->bo_cache
.size_list_size
; i
< new_size
; i
++)
248 INIT_LIST_HEAD(&new_list
[i
]);
250 kfree(vc4
->bo_cache
.size_list
);
251 vc4
->bo_cache
.size_list
= new_list
;
252 vc4
->bo_cache
.size_list_size
= new_size
;
255 return &vc4
->bo_cache
.size_list
[page_index
];
258 static void vc4_bo_cache_purge(struct drm_device
*dev
)
260 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
262 mutex_lock(&vc4
->bo_lock
);
263 while (!list_empty(&vc4
->bo_cache
.time_list
)) {
264 struct vc4_bo
*bo
= list_last_entry(&vc4
->bo_cache
.time_list
,
265 struct vc4_bo
, unref_head
);
266 vc4_bo_remove_from_cache(bo
);
269 mutex_unlock(&vc4
->bo_lock
);
272 void vc4_bo_add_to_purgeable_pool(struct vc4_bo
*bo
)
274 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
276 mutex_lock(&vc4
->purgeable
.lock
);
277 list_add_tail(&bo
->size_head
, &vc4
->purgeable
.list
);
278 vc4
->purgeable
.num
++;
279 vc4
->purgeable
.size
+= bo
->base
.base
.size
;
280 mutex_unlock(&vc4
->purgeable
.lock
);
283 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo
*bo
)
285 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
287 /* list_del_init() is used here because the caller might release
288 * the purgeable lock in order to acquire the madv one and update the
290 * During this short period of time a user might decide to mark
291 * the BO as unpurgeable, and if bo->madv is set to
292 * VC4_MADV_DONTNEED it will try to remove the BO from the
293 * purgeable list which will fail if the ->next/prev fields
294 * are set to LIST_POISON1/LIST_POISON2 (which is what
296 * Re-initializing the list element guarantees that list_del()
297 * will work correctly even if it's a NOP.
299 list_del_init(&bo
->size_head
);
300 vc4
->purgeable
.num
--;
301 vc4
->purgeable
.size
-= bo
->base
.base
.size
;
304 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo
*bo
)
306 struct vc4_dev
*vc4
= to_vc4_dev(bo
->base
.base
.dev
);
308 mutex_lock(&vc4
->purgeable
.lock
);
309 vc4_bo_remove_from_purgeable_pool_locked(bo
);
310 mutex_unlock(&vc4
->purgeable
.lock
);
313 static void vc4_bo_purge(struct drm_gem_object
*obj
)
315 struct vc4_bo
*bo
= to_vc4_bo(obj
);
316 struct drm_device
*dev
= obj
->dev
;
318 WARN_ON(!mutex_is_locked(&bo
->madv_lock
));
319 WARN_ON(bo
->madv
!= VC4_MADV_DONTNEED
);
321 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
323 dma_free_wc(dev
->dev
, obj
->size
, bo
->base
.vaddr
, bo
->base
.paddr
);
324 bo
->base
.vaddr
= NULL
;
325 bo
->madv
= __VC4_MADV_PURGED
;
328 static void vc4_bo_userspace_cache_purge(struct drm_device
*dev
)
330 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
332 mutex_lock(&vc4
->purgeable
.lock
);
333 while (!list_empty(&vc4
->purgeable
.list
)) {
334 struct vc4_bo
*bo
= list_first_entry(&vc4
->purgeable
.list
,
335 struct vc4_bo
, size_head
);
336 struct drm_gem_object
*obj
= &bo
->base
.base
;
337 size_t purged_size
= 0;
339 vc4_bo_remove_from_purgeable_pool_locked(bo
);
341 /* Release the purgeable lock while we're purging the BO so
342 * that other people can continue inserting things in the
343 * purgeable pool without having to wait for all BOs to be
346 mutex_unlock(&vc4
->purgeable
.lock
);
347 mutex_lock(&bo
->madv_lock
);
349 /* Since we released the purgeable pool lock before acquiring
350 * the BO madv one, the user may have marked the BO as WILLNEED
351 * and re-used it in the meantime.
352 * Before purging the BO we need to make sure
353 * - it is still marked as DONTNEED
354 * - it has not been re-inserted in the purgeable list
355 * - it is not used by HW blocks
356 * If one of these conditions is not met, just skip the entry.
358 if (bo
->madv
== VC4_MADV_DONTNEED
&&
359 list_empty(&bo
->size_head
) &&
360 !refcount_read(&bo
->usecnt
)) {
361 purged_size
= bo
->base
.base
.size
;
364 mutex_unlock(&bo
->madv_lock
);
365 mutex_lock(&vc4
->purgeable
.lock
);
368 vc4
->purgeable
.purged_size
+= purged_size
;
369 vc4
->purgeable
.purged_num
++;
372 mutex_unlock(&vc4
->purgeable
.lock
);
375 static struct vc4_bo
*vc4_bo_get_from_cache(struct drm_device
*dev
,
377 enum vc4_kernel_bo_type type
)
379 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
380 uint32_t page_index
= bo_page_index(size
);
381 struct vc4_bo
*bo
= NULL
;
383 size
= roundup(size
, PAGE_SIZE
);
385 mutex_lock(&vc4
->bo_lock
);
386 if (page_index
>= vc4
->bo_cache
.size_list_size
)
389 if (list_empty(&vc4
->bo_cache
.size_list
[page_index
]))
392 bo
= list_first_entry(&vc4
->bo_cache
.size_list
[page_index
],
393 struct vc4_bo
, size_head
);
394 vc4_bo_remove_from_cache(bo
);
395 kref_init(&bo
->base
.base
.refcount
);
399 vc4_bo_set_label(&bo
->base
.base
, type
);
400 mutex_unlock(&vc4
->bo_lock
);
405 * vc4_gem_create_object - Implementation of driver->gem_create_object.
407 * @size: Size in bytes of the memory the object will reference
409 * This lets the CMA helpers allocate object structs for us, and keep
410 * our BO stats correct.
412 struct drm_gem_object
*vc4_create_object(struct drm_device
*dev
, size_t size
)
414 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
417 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
419 return ERR_PTR(-ENOMEM
);
421 bo
->madv
= VC4_MADV_WILLNEED
;
422 refcount_set(&bo
->usecnt
, 0);
423 mutex_init(&bo
->madv_lock
);
424 mutex_lock(&vc4
->bo_lock
);
425 bo
->label
= VC4_BO_TYPE_KERNEL
;
426 vc4
->bo_labels
[VC4_BO_TYPE_KERNEL
].num_allocated
++;
427 vc4
->bo_labels
[VC4_BO_TYPE_KERNEL
].size_allocated
+= size
;
428 mutex_unlock(&vc4
->bo_lock
);
429 bo
->resv
= &bo
->_resv
;
430 reservation_object_init(bo
->resv
);
432 return &bo
->base
.base
;
435 struct vc4_bo
*vc4_bo_create(struct drm_device
*dev
, size_t unaligned_size
,
436 bool allow_unzeroed
, enum vc4_kernel_bo_type type
)
438 size_t size
= roundup(unaligned_size
, PAGE_SIZE
);
439 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
440 struct drm_gem_cma_object
*cma_obj
;
444 return ERR_PTR(-EINVAL
);
446 /* First, try to get a vc4_bo from the kernel BO cache. */
447 bo
= vc4_bo_get_from_cache(dev
, size
, type
);
450 memset(bo
->base
.vaddr
, 0, bo
->base
.base
.size
);
454 cma_obj
= drm_gem_cma_create(dev
, size
);
455 if (IS_ERR(cma_obj
)) {
457 * If we've run out of CMA memory, kill the cache of
458 * CMA allocations we've got laying around and try again.
460 vc4_bo_cache_purge(dev
);
461 cma_obj
= drm_gem_cma_create(dev
, size
);
464 if (IS_ERR(cma_obj
)) {
466 * Still not enough CMA memory, purge the userspace BO
468 * This is sub-optimal since we purge the whole userspace
469 * BO cache which forces user that want to re-use the BO to
470 * restore its initial content.
471 * Ideally, we should purge entries one by one and retry
472 * after each to see if CMA allocation succeeds. Or even
473 * better, try to find an entry with at least the same
476 vc4_bo_userspace_cache_purge(dev
);
477 cma_obj
= drm_gem_cma_create(dev
, size
);
480 if (IS_ERR(cma_obj
)) {
481 DRM_ERROR("Failed to allocate from CMA:\n");
482 vc4_bo_stats_dump(vc4
);
483 return ERR_PTR(-ENOMEM
);
485 bo
= to_vc4_bo(&cma_obj
->base
);
487 /* By default, BOs do not support the MADV ioctl. This will be enabled
488 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
491 bo
->madv
= __VC4_MADV_NOTSUPP
;
493 mutex_lock(&vc4
->bo_lock
);
494 vc4_bo_set_label(&cma_obj
->base
, type
);
495 mutex_unlock(&vc4
->bo_lock
);
500 int vc4_dumb_create(struct drm_file
*file_priv
,
501 struct drm_device
*dev
,
502 struct drm_mode_create_dumb
*args
)
504 int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
505 struct vc4_bo
*bo
= NULL
;
508 if (args
->pitch
< min_pitch
)
509 args
->pitch
= min_pitch
;
511 if (args
->size
< args
->pitch
* args
->height
)
512 args
->size
= args
->pitch
* args
->height
;
514 bo
= vc4_bo_create(dev
, args
->size
, false, VC4_BO_TYPE_DUMB
);
518 bo
->madv
= VC4_MADV_WILLNEED
;
520 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
521 drm_gem_object_put_unlocked(&bo
->base
.base
);
526 static void vc4_bo_cache_free_old(struct drm_device
*dev
)
528 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
529 unsigned long expire_time
= jiffies
- msecs_to_jiffies(1000);
531 lockdep_assert_held(&vc4
->bo_lock
);
533 while (!list_empty(&vc4
->bo_cache
.time_list
)) {
534 struct vc4_bo
*bo
= list_last_entry(&vc4
->bo_cache
.time_list
,
535 struct vc4_bo
, unref_head
);
536 if (time_before(expire_time
, bo
->free_time
)) {
537 mod_timer(&vc4
->bo_cache
.time_timer
,
538 round_jiffies_up(jiffies
+
539 msecs_to_jiffies(1000)));
543 vc4_bo_remove_from_cache(bo
);
548 /* Called on the last userspace/kernel unreference of the BO. Returns
549 * it to the BO cache if possible, otherwise frees it.
551 void vc4_free_object(struct drm_gem_object
*gem_bo
)
553 struct drm_device
*dev
= gem_bo
->dev
;
554 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
555 struct vc4_bo
*bo
= to_vc4_bo(gem_bo
);
556 struct list_head
*cache_list
;
558 /* Remove the BO from the purgeable list. */
559 mutex_lock(&bo
->madv_lock
);
560 if (bo
->madv
== VC4_MADV_DONTNEED
&& !refcount_read(&bo
->usecnt
))
561 vc4_bo_remove_from_purgeable_pool(bo
);
562 mutex_unlock(&bo
->madv_lock
);
564 mutex_lock(&vc4
->bo_lock
);
565 /* If the object references someone else's memory, we can't cache it.
567 if (gem_bo
->import_attach
) {
572 /* Don't cache if it was publicly named. */
578 /* If this object was partially constructed but CMA allocation
579 * had failed, just free it. Can also happen when the BO has been
582 if (!bo
->base
.vaddr
) {
587 cache_list
= vc4_get_cache_list_for_size(dev
, gem_bo
->size
);
593 if (bo
->validated_shader
) {
594 kfree(bo
->validated_shader
->texture_samples
);
595 kfree(bo
->validated_shader
);
596 bo
->validated_shader
= NULL
;
599 /* Reset madv and usecnt before adding the BO to the cache. */
600 bo
->madv
= __VC4_MADV_NOTSUPP
;
601 refcount_set(&bo
->usecnt
, 0);
603 bo
->t_format
= false;
604 bo
->free_time
= jiffies
;
605 list_add(&bo
->size_head
, cache_list
);
606 list_add(&bo
->unref_head
, &vc4
->bo_cache
.time_list
);
608 vc4_bo_set_label(&bo
->base
.base
, VC4_BO_TYPE_KERNEL_CACHE
);
610 vc4_bo_cache_free_old(dev
);
613 mutex_unlock(&vc4
->bo_lock
);
616 static void vc4_bo_cache_time_work(struct work_struct
*work
)
618 struct vc4_dev
*vc4
=
619 container_of(work
, struct vc4_dev
, bo_cache
.time_work
);
620 struct drm_device
*dev
= vc4
->dev
;
622 mutex_lock(&vc4
->bo_lock
);
623 vc4_bo_cache_free_old(dev
);
624 mutex_unlock(&vc4
->bo_lock
);
627 int vc4_bo_inc_usecnt(struct vc4_bo
*bo
)
631 /* Fast path: if the BO is already retained by someone, no need to
632 * check the madv status.
634 if (refcount_inc_not_zero(&bo
->usecnt
))
637 mutex_lock(&bo
->madv_lock
);
639 case VC4_MADV_WILLNEED
:
640 if (!refcount_inc_not_zero(&bo
->usecnt
))
641 refcount_set(&bo
->usecnt
, 1);
644 case VC4_MADV_DONTNEED
:
645 /* We shouldn't use a BO marked as purgeable if at least
646 * someone else retained its content by incrementing usecnt.
647 * Luckily the BO hasn't been purged yet, but something wrong
648 * is happening here. Just throw an error instead of
649 * authorizing this use case.
651 case __VC4_MADV_PURGED
:
652 /* We can't use a purged BO. */
654 /* Invalid madv value. */
658 mutex_unlock(&bo
->madv_lock
);
663 void vc4_bo_dec_usecnt(struct vc4_bo
*bo
)
665 /* Fast path: if the BO is still retained by someone, no need to test
668 if (refcount_dec_not_one(&bo
->usecnt
))
671 mutex_lock(&bo
->madv_lock
);
672 if (refcount_dec_and_test(&bo
->usecnt
) &&
673 bo
->madv
== VC4_MADV_DONTNEED
)
674 vc4_bo_add_to_purgeable_pool(bo
);
675 mutex_unlock(&bo
->madv_lock
);
678 static void vc4_bo_cache_time_timer(struct timer_list
*t
)
680 struct vc4_dev
*vc4
= from_timer(vc4
, t
, bo_cache
.time_timer
);
682 schedule_work(&vc4
->bo_cache
.time_work
);
685 struct reservation_object
*vc4_prime_res_obj(struct drm_gem_object
*obj
)
687 struct vc4_bo
*bo
= to_vc4_bo(obj
);
693 vc4_prime_export(struct drm_device
*dev
, struct drm_gem_object
*obj
, int flags
)
695 struct vc4_bo
*bo
= to_vc4_bo(obj
);
696 struct dma_buf
*dmabuf
;
699 if (bo
->validated_shader
) {
700 DRM_DEBUG("Attempting to export shader BO\n");
701 return ERR_PTR(-EINVAL
);
704 /* Note: as soon as the BO is exported it becomes unpurgeable, because
705 * noone ever decrements the usecnt even if the reference held by the
706 * exported BO is released. This shouldn't be a problem since we don't
707 * expect exported BOs to be marked as purgeable.
709 ret
= vc4_bo_inc_usecnt(bo
);
711 DRM_ERROR("Failed to increment BO usecnt\n");
715 dmabuf
= drm_gem_prime_export(dev
, obj
, flags
);
717 vc4_bo_dec_usecnt(bo
);
722 int vc4_fault(struct vm_fault
*vmf
)
724 struct vm_area_struct
*vma
= vmf
->vma
;
725 struct drm_gem_object
*obj
= vma
->vm_private_data
;
726 struct vc4_bo
*bo
= to_vc4_bo(obj
);
728 /* The only reason we would end up here is when user-space accesses
729 * BO's memory after it's been purged.
731 mutex_lock(&bo
->madv_lock
);
732 WARN_ON(bo
->madv
!= __VC4_MADV_PURGED
);
733 mutex_unlock(&bo
->madv_lock
);
735 return VM_FAULT_SIGBUS
;
738 int vc4_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
740 struct drm_gem_object
*gem_obj
;
741 unsigned long vm_pgoff
;
745 ret
= drm_gem_mmap(filp
, vma
);
749 gem_obj
= vma
->vm_private_data
;
750 bo
= to_vc4_bo(gem_obj
);
752 if (bo
->validated_shader
&& (vma
->vm_flags
& VM_WRITE
)) {
753 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
757 if (bo
->madv
!= VC4_MADV_WILLNEED
) {
758 DRM_DEBUG("mmaping of %s BO not allowed\n",
759 bo
->madv
== VC4_MADV_DONTNEED
?
760 "purgeable" : "purged");
765 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
766 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
769 vma
->vm_flags
&= ~VM_PFNMAP
;
771 /* This ->vm_pgoff dance is needed to make all parties happy:
772 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
773 * mem-region, hence the need to set it to zero (the value set by
774 * the DRM core is a virtual offset encoding the GEM object-id)
775 * - the mmap() core logic needs ->vm_pgoff to be restored to its
776 * initial value before returning from this function because it
777 * encodes the offset of this GEM in the dev->anon_inode pseudo-file
778 * and this information will be used when we invalidate userspace
779 * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
781 vm_pgoff
= vma
->vm_pgoff
;
783 ret
= dma_mmap_wc(bo
->base
.base
.dev
->dev
, vma
, bo
->base
.vaddr
,
784 bo
->base
.paddr
, vma
->vm_end
- vma
->vm_start
);
785 vma
->vm_pgoff
= vm_pgoff
;
788 drm_gem_vm_close(vma
);
793 int vc4_prime_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
795 struct vc4_bo
*bo
= to_vc4_bo(obj
);
797 if (bo
->validated_shader
&& (vma
->vm_flags
& VM_WRITE
)) {
798 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
802 return drm_gem_cma_prime_mmap(obj
, vma
);
805 void *vc4_prime_vmap(struct drm_gem_object
*obj
)
807 struct vc4_bo
*bo
= to_vc4_bo(obj
);
809 if (bo
->validated_shader
) {
810 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
811 return ERR_PTR(-EINVAL
);
814 return drm_gem_cma_prime_vmap(obj
);
817 struct drm_gem_object
*
818 vc4_prime_import_sg_table(struct drm_device
*dev
,
819 struct dma_buf_attachment
*attach
,
820 struct sg_table
*sgt
)
822 struct drm_gem_object
*obj
;
825 obj
= drm_gem_cma_prime_import_sg_table(dev
, attach
, sgt
);
830 bo
->resv
= attach
->dmabuf
->resv
;
835 int vc4_create_bo_ioctl(struct drm_device
*dev
, void *data
,
836 struct drm_file
*file_priv
)
838 struct drm_vc4_create_bo
*args
= data
;
839 struct vc4_bo
*bo
= NULL
;
843 * We can't allocate from the BO cache, because the BOs don't
844 * get zeroed, and that might leak data between users.
846 bo
= vc4_bo_create(dev
, args
->size
, false, VC4_BO_TYPE_V3D
);
850 bo
->madv
= VC4_MADV_WILLNEED
;
852 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
853 drm_gem_object_put_unlocked(&bo
->base
.base
);
858 int vc4_mmap_bo_ioctl(struct drm_device
*dev
, void *data
,
859 struct drm_file
*file_priv
)
861 struct drm_vc4_mmap_bo
*args
= data
;
862 struct drm_gem_object
*gem_obj
;
864 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
866 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
870 /* The mmap offset was set up at BO allocation time. */
871 args
->offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
873 drm_gem_object_put_unlocked(gem_obj
);
878 vc4_create_shader_bo_ioctl(struct drm_device
*dev
, void *data
,
879 struct drm_file
*file_priv
)
881 struct drm_vc4_create_shader_bo
*args
= data
;
882 struct vc4_bo
*bo
= NULL
;
888 if (args
->size
% sizeof(u64
) != 0)
891 if (args
->flags
!= 0) {
892 DRM_INFO("Unknown flags set: 0x%08x\n", args
->flags
);
896 if (args
->pad
!= 0) {
897 DRM_INFO("Pad set: 0x%08x\n", args
->pad
);
901 bo
= vc4_bo_create(dev
, args
->size
, true, VC4_BO_TYPE_V3D_SHADER
);
905 bo
->madv
= VC4_MADV_WILLNEED
;
907 if (copy_from_user(bo
->base
.vaddr
,
908 (void __user
*)(uintptr_t)args
->data
,
913 /* Clear the rest of the memory from allocating from the BO
916 memset(bo
->base
.vaddr
+ args
->size
, 0,
917 bo
->base
.base
.size
- args
->size
);
919 bo
->validated_shader
= vc4_validate_shader(&bo
->base
);
920 if (!bo
->validated_shader
) {
925 /* We have to create the handle after validation, to avoid
926 * races for users to do doing things like mmap the shader BO.
928 ret
= drm_gem_handle_create(file_priv
, &bo
->base
.base
, &args
->handle
);
931 drm_gem_object_put_unlocked(&bo
->base
.base
);
937 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
939 * @data: ioctl argument
940 * @file_priv: DRM file for this fd
942 * The tiling state of the BO decides the default modifier of an fb if
943 * no specific modifier was set by userspace, and the return value of
944 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
945 * received from dmabuf as the same tiling format as the producer
948 int vc4_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
949 struct drm_file
*file_priv
)
951 struct drm_vc4_set_tiling
*args
= data
;
952 struct drm_gem_object
*gem_obj
;
956 if (args
->flags
!= 0)
959 switch (args
->modifier
) {
960 case DRM_FORMAT_MOD_NONE
:
963 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
:
970 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
972 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
975 bo
= to_vc4_bo(gem_obj
);
976 bo
->t_format
= t_format
;
978 drm_gem_object_put_unlocked(gem_obj
);
984 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
986 * @data: ioctl argument
987 * @file_priv: DRM file for this fd
989 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
991 int vc4_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
992 struct drm_file
*file_priv
)
994 struct drm_vc4_get_tiling
*args
= data
;
995 struct drm_gem_object
*gem_obj
;
998 if (args
->flags
!= 0 || args
->modifier
!= 0)
1001 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
1003 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
1006 bo
= to_vc4_bo(gem_obj
);
1009 args
->modifier
= DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
;
1011 args
->modifier
= DRM_FORMAT_MOD_NONE
;
1013 drm_gem_object_put_unlocked(gem_obj
);
1018 int vc4_bo_cache_init(struct drm_device
*dev
)
1020 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1023 /* Create the initial set of BO labels that the kernel will
1024 * use. This lets us avoid a bunch of string reallocation in
1025 * the kernel's draw and BO allocation paths.
1027 vc4
->bo_labels
= kcalloc(VC4_BO_TYPE_COUNT
, sizeof(*vc4
->bo_labels
),
1029 if (!vc4
->bo_labels
)
1031 vc4
->num_labels
= VC4_BO_TYPE_COUNT
;
1033 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names
) != VC4_BO_TYPE_COUNT
);
1034 for (i
= 0; i
< VC4_BO_TYPE_COUNT
; i
++)
1035 vc4
->bo_labels
[i
].name
= bo_type_names
[i
];
1037 mutex_init(&vc4
->bo_lock
);
1039 INIT_LIST_HEAD(&vc4
->bo_cache
.time_list
);
1041 INIT_WORK(&vc4
->bo_cache
.time_work
, vc4_bo_cache_time_work
);
1042 timer_setup(&vc4
->bo_cache
.time_timer
, vc4_bo_cache_time_timer
, 0);
1047 void vc4_bo_cache_destroy(struct drm_device
*dev
)
1049 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1052 del_timer(&vc4
->bo_cache
.time_timer
);
1053 cancel_work_sync(&vc4
->bo_cache
.time_work
);
1055 vc4_bo_cache_purge(dev
);
1057 for (i
= 0; i
< vc4
->num_labels
; i
++) {
1058 if (vc4
->bo_labels
[i
].num_allocated
) {
1059 DRM_ERROR("Destroying BO cache with %d %s "
1060 "BOs still allocated\n",
1061 vc4
->bo_labels
[i
].num_allocated
,
1062 vc4
->bo_labels
[i
].name
);
1065 if (is_user_label(i
))
1066 kfree(vc4
->bo_labels
[i
].name
);
1068 kfree(vc4
->bo_labels
);
1071 int vc4_label_bo_ioctl(struct drm_device
*dev
, void *data
,
1072 struct drm_file
*file_priv
)
1074 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1075 struct drm_vc4_label_bo
*args
= data
;
1077 struct drm_gem_object
*gem_obj
;
1083 name
= strndup_user(u64_to_user_ptr(args
->name
), args
->len
+ 1);
1085 return PTR_ERR(name
);
1087 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
1089 DRM_ERROR("Failed to look up GEM BO %d\n", args
->handle
);
1094 mutex_lock(&vc4
->bo_lock
);
1095 label
= vc4_get_user_label(vc4
, name
);
1097 vc4_bo_set_label(gem_obj
, label
);
1100 mutex_unlock(&vc4
->bo_lock
);
1102 drm_gem_object_put_unlocked(gem_obj
);