initial commit with v2.6.32.60
[linux-2.6.32.60-moxart.git] / drivers / gpu / drm / ttm / ttm_bo.c
blob0ed436e3905e6edaeb451a7f10af0c1470259ab5
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
47 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49 static struct attribute ttm_bo_count = {
50 .name = "bo_count",
51 .mode = S_IRUGO
54 static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr,
56 char *buffer)
58 struct ttm_bo_global *glob =
59 container_of(kobj, struct ttm_bo_global, kobj);
61 return snprintf(buffer, PAGE_SIZE, "%lu\n",
62 (unsigned long) atomic_read(&glob->bo_count));
65 static struct attribute *ttm_bo_global_attrs[] = {
66 &ttm_bo_count,
67 NULL
70 static struct sysfs_ops ttm_bo_global_ops = {
71 .show = &ttm_bo_global_show
74 static struct kobj_type ttm_bo_glob_kobj_type = {
75 .release = &ttm_bo_global_kobj_release,
76 .sysfs_ops = &ttm_bo_global_ops,
77 .default_attrs = ttm_bo_global_attrs
81 static inline uint32_t ttm_bo_type_flags(unsigned type)
83 return 1 << (type);
86 static void ttm_bo_release_list(struct kref *list_kref)
88 struct ttm_buffer_object *bo =
89 container_of(list_kref, struct ttm_buffer_object, list_kref);
90 struct ttm_bo_device *bdev = bo->bdev;
92 BUG_ON(atomic_read(&bo->list_kref.refcount));
93 BUG_ON(atomic_read(&bo->kref.refcount));
94 BUG_ON(atomic_read(&bo->cpu_writers));
95 BUG_ON(bo->sync_obj != NULL);
96 BUG_ON(bo->mem.mm_node != NULL);
97 BUG_ON(!list_empty(&bo->lru));
98 BUG_ON(!list_empty(&bo->ddestroy));
100 if (bo->ttm)
101 ttm_tt_destroy(bo->ttm);
102 atomic_dec(&bo->glob->bo_count);
103 if (bo->destroy)
104 bo->destroy(bo);
105 else {
106 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
107 kfree(bo);
111 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
114 if (interruptible) {
115 int ret = 0;
117 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0))
120 return -ERESTART;
121 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
124 return 0;
127 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
129 struct ttm_bo_device *bdev = bo->bdev;
130 struct ttm_mem_type_manager *man;
132 BUG_ON(!atomic_read(&bo->reserved));
134 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
136 BUG_ON(!list_empty(&bo->lru));
138 man = &bdev->man[bo->mem.mem_type];
139 list_add_tail(&bo->lru, &man->lru);
140 kref_get(&bo->list_kref);
142 if (bo->ttm != NULL) {
143 list_add_tail(&bo->swap, &bo->glob->swap_lru);
144 kref_get(&bo->list_kref);
150 * Call with the lru_lock held.
153 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
155 int put_count = 0;
157 if (!list_empty(&bo->swap)) {
158 list_del_init(&bo->swap);
159 ++put_count;
161 if (!list_empty(&bo->lru)) {
162 list_del_init(&bo->lru);
163 ++put_count;
167 * TODO: Add a driver hook to delete from
168 * driver-specific LRU's here.
171 return put_count;
174 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
175 bool interruptible,
176 bool no_wait, bool use_sequence, uint32_t sequence)
178 struct ttm_bo_global *glob = bo->glob;
179 int ret;
181 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
182 if (use_sequence && bo->seq_valid &&
183 (sequence - bo->val_seq < (1 << 31))) {
184 return -EAGAIN;
187 if (no_wait)
188 return -EBUSY;
190 spin_unlock(&glob->lru_lock);
191 ret = ttm_bo_wait_unreserved(bo, interruptible);
192 spin_lock(&glob->lru_lock);
194 if (unlikely(ret))
195 return ret;
198 if (use_sequence) {
199 bo->val_seq = sequence;
200 bo->seq_valid = true;
201 } else {
202 bo->seq_valid = false;
205 return 0;
207 EXPORT_SYMBOL(ttm_bo_reserve);
209 static void ttm_bo_ref_bug(struct kref *list_kref)
211 BUG();
214 int ttm_bo_reserve(struct ttm_buffer_object *bo,
215 bool interruptible,
216 bool no_wait, bool use_sequence, uint32_t sequence)
218 struct ttm_bo_global *glob = bo->glob;
219 int put_count = 0;
220 int ret;
222 spin_lock(&glob->lru_lock);
223 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
224 sequence);
225 if (likely(ret == 0))
226 put_count = ttm_bo_del_from_lru(bo);
227 spin_unlock(&glob->lru_lock);
229 while (put_count--)
230 kref_put(&bo->list_kref, ttm_bo_ref_bug);
232 return ret;
235 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
237 struct ttm_bo_global *glob = bo->glob;
239 spin_lock(&glob->lru_lock);
240 ttm_bo_add_to_lru(bo);
241 atomic_set(&bo->reserved, 0);
242 wake_up_all(&bo->event_queue);
243 spin_unlock(&glob->lru_lock);
245 EXPORT_SYMBOL(ttm_bo_unreserve);
248 * Call bo->mutex locked.
251 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
253 struct ttm_bo_device *bdev = bo->bdev;
254 struct ttm_bo_global *glob = bo->glob;
255 int ret = 0;
256 uint32_t page_flags = 0;
258 TTM_ASSERT_LOCKED(&bo->mutex);
259 bo->ttm = NULL;
261 if (bdev->need_dma32)
262 page_flags |= TTM_PAGE_FLAG_DMA32;
264 switch (bo->type) {
265 case ttm_bo_type_device:
266 if (zero_alloc)
267 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
268 case ttm_bo_type_kernel:
269 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
270 page_flags, glob->dummy_read_page);
271 if (unlikely(bo->ttm == NULL))
272 ret = -ENOMEM;
273 break;
274 case ttm_bo_type_user:
275 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
276 page_flags | TTM_PAGE_FLAG_USER,
277 glob->dummy_read_page);
278 if (unlikely(bo->ttm == NULL))
279 ret = -ENOMEM;
280 break;
282 ret = ttm_tt_set_user(bo->ttm, current,
283 bo->buffer_start, bo->num_pages);
284 if (unlikely(ret != 0)) {
285 ttm_tt_destroy(bo->ttm);
286 bo->ttm = NULL;
288 break;
289 default:
290 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
291 ret = -EINVAL;
292 break;
295 return ret;
298 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
299 struct ttm_mem_reg *mem,
300 bool evict, bool interruptible, bool no_wait)
302 struct ttm_bo_device *bdev = bo->bdev;
303 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
304 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
305 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
306 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
307 int ret = 0;
309 if (old_is_pci || new_is_pci ||
310 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
311 ttm_bo_unmap_virtual(bo);
314 * Create and bind a ttm if required.
317 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
318 ret = ttm_bo_add_ttm(bo, false);
319 if (ret)
320 goto out_err;
322 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
323 if (ret)
324 goto out_err;
326 if (mem->mem_type != TTM_PL_SYSTEM) {
327 ret = ttm_tt_bind(bo->ttm, mem);
328 if (ret)
329 goto out_err;
332 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
334 struct ttm_mem_reg *old_mem = &bo->mem;
335 uint32_t save_flags = old_mem->placement;
337 *old_mem = *mem;
338 mem->mm_node = NULL;
339 ttm_flag_masked(&save_flags, mem->placement,
340 TTM_PL_MASK_MEMTYPE);
341 goto moved;
346 if (bdev->driver->move_notify)
347 bdev->driver->move_notify(bo, mem);
349 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
350 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
351 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
352 else if (bdev->driver->move)
353 ret = bdev->driver->move(bo, evict, interruptible,
354 no_wait, mem);
355 else
356 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
358 if (ret)
359 goto out_err;
361 moved:
362 if (bo->evicted) {
363 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
364 if (ret)
365 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
366 bo->evicted = false;
369 if (bo->mem.mm_node) {
370 spin_lock(&bo->lock);
371 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
372 bdev->man[bo->mem.mem_type].gpu_offset;
373 bo->cur_placement = bo->mem.placement;
374 spin_unlock(&bo->lock);
377 return 0;
379 out_err:
380 new_man = &bdev->man[bo->mem.mem_type];
381 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
382 ttm_tt_unbind(bo->ttm);
383 ttm_tt_destroy(bo->ttm);
384 bo->ttm = NULL;
387 return ret;
391 * If bo idle, remove from delayed- and lru lists, and unref.
392 * If not idle, and already on delayed list, do nothing.
393 * If not idle, and not on delayed list, put on delayed list,
394 * up the list_kref and schedule a delayed list check.
397 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
399 struct ttm_bo_device *bdev = bo->bdev;
400 struct ttm_bo_global *glob = bo->glob;
401 struct ttm_bo_driver *driver = bdev->driver;
402 int ret;
404 spin_lock(&bo->lock);
405 (void) ttm_bo_wait(bo, false, false, !remove_all);
407 if (!bo->sync_obj) {
408 int put_count;
410 spin_unlock(&bo->lock);
412 spin_lock(&glob->lru_lock);
413 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
414 BUG_ON(ret);
415 if (bo->ttm)
416 ttm_tt_unbind(bo->ttm);
418 if (!list_empty(&bo->ddestroy)) {
419 list_del_init(&bo->ddestroy);
420 kref_put(&bo->list_kref, ttm_bo_ref_bug);
422 if (bo->mem.mm_node) {
423 drm_mm_put_block(bo->mem.mm_node);
424 bo->mem.mm_node = NULL;
426 put_count = ttm_bo_del_from_lru(bo);
427 spin_unlock(&glob->lru_lock);
429 atomic_set(&bo->reserved, 0);
431 while (put_count--)
432 kref_put(&bo->list_kref, ttm_bo_release_list);
434 return 0;
437 spin_lock(&glob->lru_lock);
438 if (list_empty(&bo->ddestroy)) {
439 void *sync_obj = bo->sync_obj;
440 void *sync_obj_arg = bo->sync_obj_arg;
442 kref_get(&bo->list_kref);
443 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
444 spin_unlock(&glob->lru_lock);
445 spin_unlock(&bo->lock);
447 if (sync_obj)
448 driver->sync_obj_flush(sync_obj, sync_obj_arg);
449 schedule_delayed_work(&bdev->wq,
450 ((HZ / 100) < 1) ? 1 : HZ / 100);
451 ret = 0;
453 } else {
454 spin_unlock(&glob->lru_lock);
455 spin_unlock(&bo->lock);
456 ret = -EBUSY;
459 return ret;
463 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
464 * encountered buffers.
467 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
469 struct ttm_bo_global *glob = bdev->glob;
470 struct ttm_buffer_object *entry, *nentry;
471 struct list_head *list, *next;
472 int ret;
474 spin_lock(&glob->lru_lock);
475 list_for_each_safe(list, next, &bdev->ddestroy) {
476 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
477 nentry = NULL;
480 * Protect the next list entry from destruction while we
481 * unlock the lru_lock.
484 if (next != &bdev->ddestroy) {
485 nentry = list_entry(next, struct ttm_buffer_object,
486 ddestroy);
487 kref_get(&nentry->list_kref);
489 kref_get(&entry->list_kref);
491 spin_unlock(&glob->lru_lock);
492 ret = ttm_bo_cleanup_refs(entry, remove_all);
493 kref_put(&entry->list_kref, ttm_bo_release_list);
495 spin_lock(&glob->lru_lock);
496 if (nentry) {
497 bool next_onlist = !list_empty(next);
498 spin_unlock(&glob->lru_lock);
499 kref_put(&nentry->list_kref, ttm_bo_release_list);
500 spin_lock(&glob->lru_lock);
502 * Someone might have raced us and removed the
503 * next entry from the list. We don't bother restarting
504 * list traversal.
507 if (!next_onlist)
508 break;
510 if (ret)
511 break;
513 ret = !list_empty(&bdev->ddestroy);
514 spin_unlock(&glob->lru_lock);
516 return ret;
519 static void ttm_bo_delayed_workqueue(struct work_struct *work)
521 struct ttm_bo_device *bdev =
522 container_of(work, struct ttm_bo_device, wq.work);
524 if (ttm_bo_delayed_delete(bdev, false)) {
525 schedule_delayed_work(&bdev->wq,
526 ((HZ / 100) < 1) ? 1 : HZ / 100);
530 static void ttm_bo_release(struct kref *kref)
532 struct ttm_buffer_object *bo =
533 container_of(kref, struct ttm_buffer_object, kref);
534 struct ttm_bo_device *bdev = bo->bdev;
536 if (likely(bo->vm_node != NULL)) {
537 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
538 drm_mm_put_block(bo->vm_node);
539 bo->vm_node = NULL;
541 write_unlock(&bdev->vm_lock);
542 ttm_bo_cleanup_refs(bo, false);
543 kref_put(&bo->list_kref, ttm_bo_release_list);
544 write_lock(&bdev->vm_lock);
547 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
549 struct ttm_buffer_object *bo = *p_bo;
550 struct ttm_bo_device *bdev = bo->bdev;
552 *p_bo = NULL;
553 write_lock(&bdev->vm_lock);
554 kref_put(&bo->kref, ttm_bo_release);
555 write_unlock(&bdev->vm_lock);
557 EXPORT_SYMBOL(ttm_bo_unref);
559 static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
560 bool interruptible, bool no_wait)
562 int ret = 0;
563 struct ttm_bo_device *bdev = bo->bdev;
564 struct ttm_bo_global *glob = bo->glob;
565 struct ttm_mem_reg evict_mem;
566 uint32_t proposed_placement;
568 if (bo->mem.mem_type != mem_type)
569 goto out;
571 spin_lock(&bo->lock);
572 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
573 spin_unlock(&bo->lock);
575 if (unlikely(ret != 0)) {
576 if (ret != -ERESTART) {
577 printk(KERN_ERR TTM_PFX
578 "Failed to expire sync object before "
579 "buffer eviction.\n");
581 goto out;
584 BUG_ON(!atomic_read(&bo->reserved));
586 evict_mem = bo->mem;
587 evict_mem.mm_node = NULL;
589 proposed_placement = bdev->driver->evict_flags(bo);
591 ret = ttm_bo_mem_space(bo, proposed_placement,
592 &evict_mem, interruptible, no_wait);
593 if (unlikely(ret != 0 && ret != -ERESTART))
594 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
595 &evict_mem, interruptible, no_wait);
597 if (ret) {
598 if (ret != -ERESTART)
599 printk(KERN_ERR TTM_PFX
600 "Failed to find memory space for "
601 "buffer 0x%p eviction.\n", bo);
602 goto out;
605 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
606 no_wait);
607 if (ret) {
608 if (ret != -ERESTART)
609 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
610 goto out;
613 spin_lock(&glob->lru_lock);
614 if (evict_mem.mm_node) {
615 drm_mm_put_block(evict_mem.mm_node);
616 evict_mem.mm_node = NULL;
618 spin_unlock(&glob->lru_lock);
619 bo->evicted = true;
620 out:
621 return ret;
625 * Repeatedly evict memory from the LRU for @mem_type until we create enough
626 * space, or we've evicted everything and there isn't enough space.
628 static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
629 struct ttm_mem_reg *mem,
630 uint32_t mem_type,
631 bool interruptible, bool no_wait)
633 struct ttm_bo_global *glob = bdev->glob;
634 struct drm_mm_node *node;
635 struct ttm_buffer_object *entry;
636 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
637 struct list_head *lru;
638 unsigned long num_pages = mem->num_pages;
639 int put_count = 0;
640 int ret;
642 retry_pre_get:
643 ret = drm_mm_pre_get(&man->manager);
644 if (unlikely(ret != 0))
645 return ret;
647 spin_lock(&glob->lru_lock);
648 do {
649 node = drm_mm_search_free(&man->manager, num_pages,
650 mem->page_alignment, 1);
651 if (node)
652 break;
654 lru = &man->lru;
655 if (list_empty(lru))
656 break;
658 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
659 kref_get(&entry->list_kref);
661 ret =
662 ttm_bo_reserve_locked(entry, interruptible, no_wait,
663 false, 0);
665 if (likely(ret == 0))
666 put_count = ttm_bo_del_from_lru(entry);
668 spin_unlock(&glob->lru_lock);
670 if (unlikely(ret != 0))
671 return ret;
673 while (put_count--)
674 kref_put(&entry->list_kref, ttm_bo_ref_bug);
676 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
678 ttm_bo_unreserve(entry);
680 kref_put(&entry->list_kref, ttm_bo_release_list);
681 if (ret)
682 return ret;
684 spin_lock(&glob->lru_lock);
685 } while (1);
687 if (!node) {
688 spin_unlock(&glob->lru_lock);
689 return -ENOMEM;
692 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
693 if (unlikely(!node)) {
694 spin_unlock(&glob->lru_lock);
695 goto retry_pre_get;
698 spin_unlock(&glob->lru_lock);
699 mem->mm_node = node;
700 mem->mem_type = mem_type;
701 return 0;
704 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
705 uint32_t cur_placement,
706 uint32_t proposed_placement)
708 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
709 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
712 * Keep current caching if possible.
715 if ((cur_placement & caching) != 0)
716 result |= (cur_placement & caching);
717 else if ((man->default_caching & caching) != 0)
718 result |= man->default_caching;
719 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
720 result |= TTM_PL_FLAG_CACHED;
721 else if ((TTM_PL_FLAG_WC & caching) != 0)
722 result |= TTM_PL_FLAG_WC;
723 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
724 result |= TTM_PL_FLAG_UNCACHED;
726 return result;
730 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
731 bool disallow_fixed,
732 uint32_t mem_type,
733 uint32_t proposed_placement,
734 uint32_t *masked_placement)
736 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
738 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
739 return false;
741 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
742 return false;
744 if ((proposed_placement & man->available_caching) == 0)
745 return false;
747 cur_flags |= (proposed_placement & man->available_caching);
749 *masked_placement = cur_flags;
750 return true;
754 * Creates space for memory region @mem according to its type.
756 * This function first searches for free space in compatible memory types in
757 * the priority order defined by the driver. If free space isn't found, then
758 * ttm_bo_mem_force_space is attempted in priority order to evict and find
759 * space.
761 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
762 uint32_t proposed_placement,
763 struct ttm_mem_reg *mem,
764 bool interruptible, bool no_wait)
766 struct ttm_bo_device *bdev = bo->bdev;
767 struct ttm_bo_global *glob = bo->glob;
768 struct ttm_mem_type_manager *man;
770 uint32_t num_prios = bdev->driver->num_mem_type_prio;
771 const uint32_t *prios = bdev->driver->mem_type_prio;
772 uint32_t i;
773 uint32_t mem_type = TTM_PL_SYSTEM;
774 uint32_t cur_flags = 0;
775 bool type_found = false;
776 bool type_ok = false;
777 bool has_eagain = false;
778 struct drm_mm_node *node = NULL;
779 int ret;
781 mem->mm_node = NULL;
782 for (i = 0; i < num_prios; ++i) {
783 mem_type = prios[i];
784 man = &bdev->man[mem_type];
786 type_ok = ttm_bo_mt_compatible(man,
787 bo->type == ttm_bo_type_user,
788 mem_type, proposed_placement,
789 &cur_flags);
791 if (!type_ok)
792 continue;
794 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
795 cur_flags);
797 if (mem_type == TTM_PL_SYSTEM)
798 break;
800 if (man->has_type && man->use_type) {
801 type_found = true;
802 do {
803 ret = drm_mm_pre_get(&man->manager);
804 if (unlikely(ret))
805 return ret;
807 spin_lock(&glob->lru_lock);
808 node = drm_mm_search_free(&man->manager,
809 mem->num_pages,
810 mem->page_alignment,
812 if (unlikely(!node)) {
813 spin_unlock(&glob->lru_lock);
814 break;
816 node = drm_mm_get_block_atomic(node,
817 mem->num_pages,
818 mem->
819 page_alignment);
820 spin_unlock(&glob->lru_lock);
821 } while (!node);
823 if (node)
824 break;
827 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
828 mem->mm_node = node;
829 mem->mem_type = mem_type;
830 mem->placement = cur_flags;
831 return 0;
834 if (!type_found)
835 return -EINVAL;
837 num_prios = bdev->driver->num_mem_busy_prio;
838 prios = bdev->driver->mem_busy_prio;
840 for (i = 0; i < num_prios; ++i) {
841 mem_type = prios[i];
842 man = &bdev->man[mem_type];
844 if (!man->has_type)
845 continue;
847 if (!ttm_bo_mt_compatible(man,
848 bo->type == ttm_bo_type_user,
849 mem_type,
850 proposed_placement, &cur_flags))
851 continue;
853 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
854 cur_flags);
856 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
857 interruptible, no_wait);
859 if (ret == 0 && mem->mm_node) {
860 mem->placement = cur_flags;
861 return 0;
864 if (ret == -ERESTART)
865 has_eagain = true;
868 ret = (has_eagain) ? -ERESTART : -ENOMEM;
869 return ret;
871 EXPORT_SYMBOL(ttm_bo_mem_space);
873 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
875 int ret = 0;
877 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
878 return -EBUSY;
880 ret = wait_event_interruptible(bo->event_queue,
881 atomic_read(&bo->cpu_writers) == 0);
883 if (ret == -ERESTARTSYS)
884 ret = -ERESTART;
886 return ret;
889 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
890 uint32_t proposed_placement,
891 bool interruptible, bool no_wait)
893 struct ttm_bo_global *glob = bo->glob;
894 int ret = 0;
895 struct ttm_mem_reg mem;
897 BUG_ON(!atomic_read(&bo->reserved));
900 * FIXME: It's possible to pipeline buffer moves.
901 * Have the driver move function wait for idle when necessary,
902 * instead of doing it here.
905 spin_lock(&bo->lock);
906 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
907 spin_unlock(&bo->lock);
909 if (ret)
910 return ret;
912 mem.num_pages = bo->num_pages;
913 mem.size = mem.num_pages << PAGE_SHIFT;
914 mem.page_alignment = bo->mem.page_alignment;
917 * Determine where to move the buffer.
920 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
921 interruptible, no_wait);
922 if (ret)
923 goto out_unlock;
925 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
927 out_unlock:
928 if (ret && mem.mm_node) {
929 spin_lock(&glob->lru_lock);
930 drm_mm_put_block(mem.mm_node);
931 spin_unlock(&glob->lru_lock);
933 return ret;
936 static int ttm_bo_mem_compat(uint32_t proposed_placement,
937 struct ttm_mem_reg *mem)
939 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
940 return 0;
941 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
942 return 0;
944 return 1;
947 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
948 uint32_t proposed_placement,
949 bool interruptible, bool no_wait)
951 int ret;
953 BUG_ON(!atomic_read(&bo->reserved));
954 bo->proposed_placement = proposed_placement;
956 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
957 (unsigned long)proposed_placement,
958 (unsigned long)bo->mem.placement);
961 * Check whether we need to move buffer.
964 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
965 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
966 interruptible, no_wait);
967 if (ret) {
968 if (ret != -ERESTART)
969 printk(KERN_ERR TTM_PFX
970 "Failed moving buffer. "
971 "Proposed placement 0x%08x\n",
972 bo->proposed_placement);
973 if (ret == -ENOMEM)
974 printk(KERN_ERR TTM_PFX
975 "Out of aperture space or "
976 "DRM memory quota.\n");
977 return ret;
982 * We might need to add a TTM.
985 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
986 ret = ttm_bo_add_ttm(bo, true);
987 if (ret)
988 return ret;
991 * Validation has succeeded, move the access and other
992 * non-mapping-related flag bits from the proposed flags to
993 * the active flags
996 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
997 ~TTM_PL_MASK_MEMTYPE);
999 return 0;
1001 EXPORT_SYMBOL(ttm_buffer_object_validate);
1004 ttm_bo_check_placement(struct ttm_buffer_object *bo,
1005 uint32_t set_flags, uint32_t clr_flags)
1007 uint32_t new_mask = set_flags | clr_flags;
1009 if ((bo->type == ttm_bo_type_user) &&
1010 (clr_flags & TTM_PL_FLAG_CACHED)) {
1011 printk(KERN_ERR TTM_PFX
1012 "User buffers require cache-coherent memory.\n");
1013 return -EINVAL;
1016 if (!capable(CAP_SYS_ADMIN)) {
1017 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1018 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1019 " NO_EVICT status.\n");
1020 return -EINVAL;
1023 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1024 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1025 printk(KERN_ERR TTM_PFX
1026 "Incompatible memory specification"
1027 " for NO_EVICT buffer.\n");
1028 return -EINVAL;
1031 return 0;
1034 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1035 struct ttm_buffer_object *bo,
1036 unsigned long size,
1037 enum ttm_bo_type type,
1038 uint32_t flags,
1039 uint32_t page_alignment,
1040 unsigned long buffer_start,
1041 bool interruptible,
1042 struct file *persistant_swap_storage,
1043 size_t acc_size,
1044 void (*destroy) (struct ttm_buffer_object *))
1046 int ret = 0;
1047 unsigned long num_pages;
1049 size += buffer_start & ~PAGE_MASK;
1050 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1051 if (num_pages == 0) {
1052 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1053 return -EINVAL;
1055 bo->destroy = destroy;
1057 spin_lock_init(&bo->lock);
1058 kref_init(&bo->kref);
1059 kref_init(&bo->list_kref);
1060 atomic_set(&bo->cpu_writers, 0);
1061 atomic_set(&bo->reserved, 1);
1062 init_waitqueue_head(&bo->event_queue);
1063 INIT_LIST_HEAD(&bo->lru);
1064 INIT_LIST_HEAD(&bo->ddestroy);
1065 INIT_LIST_HEAD(&bo->swap);
1066 bo->bdev = bdev;
1067 bo->glob = bdev->glob;
1068 bo->type = type;
1069 bo->num_pages = num_pages;
1070 bo->mem.mem_type = TTM_PL_SYSTEM;
1071 bo->mem.num_pages = bo->num_pages;
1072 bo->mem.mm_node = NULL;
1073 bo->mem.page_alignment = page_alignment;
1074 bo->buffer_start = buffer_start & PAGE_MASK;
1075 bo->priv_flags = 0;
1076 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1077 bo->seq_valid = false;
1078 bo->persistant_swap_storage = persistant_swap_storage;
1079 bo->acc_size = acc_size;
1080 atomic_inc(&bo->glob->bo_count);
1082 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1083 if (unlikely(ret != 0))
1084 goto out_err;
1087 * If no caching attributes are set, accept any form of caching.
1090 if ((flags & TTM_PL_MASK_CACHING) == 0)
1091 flags |= TTM_PL_MASK_CACHING;
1094 * For ttm_bo_type_device buffers, allocate
1095 * address space from the device.
1098 if (bo->type == ttm_bo_type_device) {
1099 ret = ttm_bo_setup_vm(bo);
1100 if (ret)
1101 goto out_err;
1104 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1105 if (ret)
1106 goto out_err;
1108 ttm_bo_unreserve(bo);
1109 return 0;
1111 out_err:
1112 ttm_bo_unreserve(bo);
1113 ttm_bo_unref(&bo);
1115 return ret;
1117 EXPORT_SYMBOL(ttm_buffer_object_init);
1119 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1120 unsigned long num_pages)
1122 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1123 PAGE_MASK;
1125 return glob->ttm_bo_size + 2 * page_array_size;
1128 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1129 unsigned long size,
1130 enum ttm_bo_type type,
1131 uint32_t flags,
1132 uint32_t page_alignment,
1133 unsigned long buffer_start,
1134 bool interruptible,
1135 struct file *persistant_swap_storage,
1136 struct ttm_buffer_object **p_bo)
1138 struct ttm_buffer_object *bo;
1139 int ret;
1140 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1142 size_t acc_size =
1143 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1144 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1145 if (unlikely(ret != 0))
1146 return ret;
1148 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1150 if (unlikely(bo == NULL)) {
1151 ttm_mem_global_free(mem_glob, acc_size);
1152 return -ENOMEM;
1155 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1156 page_alignment, buffer_start,
1157 interruptible,
1158 persistant_swap_storage, acc_size, NULL);
1159 if (likely(ret == 0))
1160 *p_bo = bo;
1162 return ret;
1165 static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1166 uint32_t mem_type, bool allow_errors)
1168 int ret;
1170 spin_lock(&bo->lock);
1171 ret = ttm_bo_wait(bo, false, false, false);
1172 spin_unlock(&bo->lock);
1174 if (ret && allow_errors)
1175 goto out;
1177 if (bo->mem.mem_type == mem_type)
1178 ret = ttm_bo_evict(bo, mem_type, false, false);
1180 if (ret) {
1181 if (allow_errors) {
1182 goto out;
1183 } else {
1184 ret = 0;
1185 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1189 out:
1190 return ret;
1193 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1194 struct list_head *head,
1195 unsigned mem_type, bool allow_errors)
1197 struct ttm_bo_global *glob = bdev->glob;
1198 struct ttm_buffer_object *entry;
1199 int ret;
1200 int put_count;
1203 * Can't use standard list traversal since we're unlocking.
1206 spin_lock(&glob->lru_lock);
1208 while (!list_empty(head)) {
1209 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1210 kref_get(&entry->list_kref);
1211 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1212 put_count = ttm_bo_del_from_lru(entry);
1213 spin_unlock(&glob->lru_lock);
1214 while (put_count--)
1215 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1216 BUG_ON(ret);
1217 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1218 ttm_bo_unreserve(entry);
1219 kref_put(&entry->list_kref, ttm_bo_release_list);
1220 spin_lock(&glob->lru_lock);
1223 spin_unlock(&glob->lru_lock);
1225 return 0;
1228 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1230 struct ttm_bo_global *glob = bdev->glob;
1231 struct ttm_mem_type_manager *man;
1232 int ret = -EINVAL;
1234 if (mem_type >= TTM_NUM_MEM_TYPES) {
1235 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1236 return ret;
1238 man = &bdev->man[mem_type];
1240 if (!man->has_type) {
1241 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1242 "memory manager type %u\n", mem_type);
1243 return ret;
1246 man->use_type = false;
1247 man->has_type = false;
1249 ret = 0;
1250 if (mem_type > 0) {
1251 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1253 spin_lock(&glob->lru_lock);
1254 if (drm_mm_clean(&man->manager))
1255 drm_mm_takedown(&man->manager);
1256 else
1257 ret = -EBUSY;
1259 spin_unlock(&glob->lru_lock);
1262 return ret;
1264 EXPORT_SYMBOL(ttm_bo_clean_mm);
1266 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1268 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1270 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1271 printk(KERN_ERR TTM_PFX
1272 "Illegal memory manager memory type %u.\n",
1273 mem_type);
1274 return -EINVAL;
1277 if (!man->has_type) {
1278 printk(KERN_ERR TTM_PFX
1279 "Memory type %u has not been initialized.\n",
1280 mem_type);
1281 return 0;
1284 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1286 EXPORT_SYMBOL(ttm_bo_evict_mm);
1288 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1289 unsigned long p_offset, unsigned long p_size)
1291 int ret = -EINVAL;
1292 struct ttm_mem_type_manager *man;
1294 if (type >= TTM_NUM_MEM_TYPES) {
1295 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1296 return ret;
1299 man = &bdev->man[type];
1300 if (man->has_type) {
1301 printk(KERN_ERR TTM_PFX
1302 "Memory manager already initialized for type %d\n",
1303 type);
1304 return ret;
1307 ret = bdev->driver->init_mem_type(bdev, type, man);
1308 if (ret)
1309 return ret;
1311 ret = 0;
1312 if (type != TTM_PL_SYSTEM) {
1313 if (!p_size) {
1314 printk(KERN_ERR TTM_PFX
1315 "Zero size memory manager type %d\n",
1316 type);
1317 return ret;
1319 ret = drm_mm_init(&man->manager, p_offset, p_size);
1320 if (ret)
1321 return ret;
1323 man->has_type = true;
1324 man->use_type = true;
1325 man->size = p_size;
1327 INIT_LIST_HEAD(&man->lru);
1329 return 0;
1331 EXPORT_SYMBOL(ttm_bo_init_mm);
1333 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1335 struct ttm_bo_global *glob =
1336 container_of(kobj, struct ttm_bo_global, kobj);
1338 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1339 __free_page(glob->dummy_read_page);
1340 kfree(glob);
1343 void ttm_bo_global_release(struct ttm_global_reference *ref)
1345 struct ttm_bo_global *glob = ref->object;
1347 kobject_del(&glob->kobj);
1348 kobject_put(&glob->kobj);
1350 EXPORT_SYMBOL(ttm_bo_global_release);
1352 int ttm_bo_global_init(struct ttm_global_reference *ref)
1354 struct ttm_bo_global_ref *bo_ref =
1355 container_of(ref, struct ttm_bo_global_ref, ref);
1356 struct ttm_bo_global *glob = ref->object;
1357 int ret;
1359 mutex_init(&glob->device_list_mutex);
1360 spin_lock_init(&glob->lru_lock);
1361 glob->mem_glob = bo_ref->mem_glob;
1362 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1364 if (unlikely(glob->dummy_read_page == NULL)) {
1365 ret = -ENOMEM;
1366 goto out_no_drp;
1369 INIT_LIST_HEAD(&glob->swap_lru);
1370 INIT_LIST_HEAD(&glob->device_list);
1372 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1373 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1374 if (unlikely(ret != 0)) {
1375 printk(KERN_ERR TTM_PFX
1376 "Could not register buffer object swapout.\n");
1377 goto out_no_shrink;
1380 glob->ttm_bo_extra_size =
1381 ttm_round_pot(sizeof(struct ttm_tt)) +
1382 ttm_round_pot(sizeof(struct ttm_backend));
1384 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1385 ttm_round_pot(sizeof(struct ttm_buffer_object));
1387 atomic_set(&glob->bo_count, 0);
1389 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1390 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1391 if (unlikely(ret != 0))
1392 kobject_put(&glob->kobj);
1393 return ret;
1394 out_no_shrink:
1395 __free_page(glob->dummy_read_page);
1396 out_no_drp:
1397 kfree(glob);
1398 return ret;
1400 EXPORT_SYMBOL(ttm_bo_global_init);
1403 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1405 int ret = 0;
1406 unsigned i = TTM_NUM_MEM_TYPES;
1407 struct ttm_mem_type_manager *man;
1408 struct ttm_bo_global *glob = bdev->glob;
1410 while (i--) {
1411 man = &bdev->man[i];
1412 if (man->has_type) {
1413 man->use_type = false;
1414 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1415 ret = -EBUSY;
1416 printk(KERN_ERR TTM_PFX
1417 "DRM memory manager type %d "
1418 "is not clean.\n", i);
1420 man->has_type = false;
1424 mutex_lock(&glob->device_list_mutex);
1425 list_del(&bdev->device_list);
1426 mutex_unlock(&glob->device_list_mutex);
1428 if (!cancel_delayed_work(&bdev->wq))
1429 flush_scheduled_work();
1431 while (ttm_bo_delayed_delete(bdev, true))
1434 spin_lock(&glob->lru_lock);
1435 if (list_empty(&bdev->ddestroy))
1436 TTM_DEBUG("Delayed destroy list was clean\n");
1438 if (list_empty(&bdev->man[0].lru))
1439 TTM_DEBUG("Swap list was clean\n");
1440 spin_unlock(&glob->lru_lock);
1442 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1443 write_lock(&bdev->vm_lock);
1444 drm_mm_takedown(&bdev->addr_space_mm);
1445 write_unlock(&bdev->vm_lock);
1447 return ret;
1449 EXPORT_SYMBOL(ttm_bo_device_release);
1451 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1452 struct ttm_bo_global *glob,
1453 struct ttm_bo_driver *driver,
1454 uint64_t file_page_offset,
1455 bool need_dma32)
1457 int ret = -EINVAL;
1459 rwlock_init(&bdev->vm_lock);
1460 bdev->driver = driver;
1462 memset(bdev->man, 0, sizeof(bdev->man));
1465 * Initialize the system memory buffer type.
1466 * Other types need to be driver / IOCTL initialized.
1468 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1469 if (unlikely(ret != 0))
1470 goto out_no_sys;
1472 bdev->addr_space_rb = RB_ROOT;
1473 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1474 if (unlikely(ret != 0))
1475 goto out_no_addr_mm;
1477 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1478 bdev->nice_mode = true;
1479 INIT_LIST_HEAD(&bdev->ddestroy);
1480 bdev->dev_mapping = NULL;
1481 bdev->glob = glob;
1482 bdev->need_dma32 = need_dma32;
1484 mutex_lock(&glob->device_list_mutex);
1485 list_add_tail(&bdev->device_list, &glob->device_list);
1486 mutex_unlock(&glob->device_list_mutex);
1488 return 0;
1489 out_no_addr_mm:
1490 ttm_bo_clean_mm(bdev, 0);
1491 out_no_sys:
1492 return ret;
1494 EXPORT_SYMBOL(ttm_bo_device_init);
1497 * buffer object vm functions.
1500 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1502 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1504 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1505 if (mem->mem_type == TTM_PL_SYSTEM)
1506 return false;
1508 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1509 return false;
1511 if (mem->placement & TTM_PL_FLAG_CACHED)
1512 return false;
1514 return true;
1517 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1518 struct ttm_mem_reg *mem,
1519 unsigned long *bus_base,
1520 unsigned long *bus_offset, unsigned long *bus_size)
1522 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1524 *bus_size = 0;
1525 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1526 return -EINVAL;
1528 if (ttm_mem_reg_is_pci(bdev, mem)) {
1529 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1530 *bus_size = mem->num_pages << PAGE_SHIFT;
1531 *bus_base = man->io_offset;
1534 return 0;
1537 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1539 struct ttm_bo_device *bdev = bo->bdev;
1540 loff_t offset = (loff_t) bo->addr_space_offset;
1541 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1543 if (!bdev->dev_mapping)
1544 return;
1546 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1548 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1550 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1552 struct ttm_bo_device *bdev = bo->bdev;
1553 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1554 struct rb_node *parent = NULL;
1555 struct ttm_buffer_object *cur_bo;
1556 unsigned long offset = bo->vm_node->start;
1557 unsigned long cur_offset;
1559 while (*cur) {
1560 parent = *cur;
1561 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1562 cur_offset = cur_bo->vm_node->start;
1563 if (offset < cur_offset)
1564 cur = &parent->rb_left;
1565 else if (offset > cur_offset)
1566 cur = &parent->rb_right;
1567 else
1568 BUG();
1571 rb_link_node(&bo->vm_rb, parent, cur);
1572 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1576 * ttm_bo_setup_vm:
1578 * @bo: the buffer to allocate address space for
1580 * Allocate address space in the drm device so that applications
1581 * can mmap the buffer and access the contents. This only
1582 * applies to ttm_bo_type_device objects as others are not
1583 * placed in the drm device address space.
1586 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1588 struct ttm_bo_device *bdev = bo->bdev;
1589 int ret;
1591 retry_pre_get:
1592 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1593 if (unlikely(ret != 0))
1594 return ret;
1596 write_lock(&bdev->vm_lock);
1597 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1598 bo->mem.num_pages, 0, 0);
1600 if (unlikely(bo->vm_node == NULL)) {
1601 ret = -ENOMEM;
1602 goto out_unlock;
1605 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1606 bo->mem.num_pages, 0);
1608 if (unlikely(bo->vm_node == NULL)) {
1609 write_unlock(&bdev->vm_lock);
1610 goto retry_pre_get;
1613 ttm_bo_vm_insert_rb(bo);
1614 write_unlock(&bdev->vm_lock);
1615 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1617 return 0;
1618 out_unlock:
1619 write_unlock(&bdev->vm_lock);
1620 return ret;
1623 int ttm_bo_wait(struct ttm_buffer_object *bo,
1624 bool lazy, bool interruptible, bool no_wait)
1626 struct ttm_bo_driver *driver = bo->bdev->driver;
1627 void *sync_obj;
1628 void *sync_obj_arg;
1629 int ret = 0;
1631 if (likely(bo->sync_obj == NULL))
1632 return 0;
1634 while (bo->sync_obj) {
1636 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1637 void *tmp_obj = bo->sync_obj;
1638 bo->sync_obj = NULL;
1639 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1640 spin_unlock(&bo->lock);
1641 driver->sync_obj_unref(&tmp_obj);
1642 spin_lock(&bo->lock);
1643 continue;
1646 if (no_wait)
1647 return -EBUSY;
1649 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1650 sync_obj_arg = bo->sync_obj_arg;
1651 spin_unlock(&bo->lock);
1652 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1653 lazy, interruptible);
1654 if (unlikely(ret != 0)) {
1655 driver->sync_obj_unref(&sync_obj);
1656 spin_lock(&bo->lock);
1657 return ret;
1659 spin_lock(&bo->lock);
1660 if (likely(bo->sync_obj == sync_obj &&
1661 bo->sync_obj_arg == sync_obj_arg)) {
1662 void *tmp_obj = bo->sync_obj;
1663 bo->sync_obj = NULL;
1664 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1665 &bo->priv_flags);
1666 spin_unlock(&bo->lock);
1667 driver->sync_obj_unref(&sync_obj);
1668 driver->sync_obj_unref(&tmp_obj);
1669 spin_lock(&bo->lock);
1670 } else {
1671 spin_unlock(&bo->lock);
1672 driver->sync_obj_unref(&sync_obj);
1673 spin_lock(&bo->lock);
1676 return 0;
1678 EXPORT_SYMBOL(ttm_bo_wait);
1680 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1682 atomic_set(&bo->reserved, 0);
1683 wake_up_all(&bo->event_queue);
1686 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1687 bool no_wait)
1689 int ret;
1691 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1692 if (no_wait)
1693 return -EBUSY;
1694 else if (interruptible) {
1695 ret = wait_event_interruptible
1696 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1697 if (unlikely(ret != 0))
1698 return -ERESTART;
1699 } else {
1700 wait_event(bo->event_queue,
1701 atomic_read(&bo->reserved) == 0);
1704 return 0;
1707 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1709 int ret = 0;
1712 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1713 * makes sure the lru lists are updated.
1716 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1717 if (unlikely(ret != 0))
1718 return ret;
1719 spin_lock(&bo->lock);
1720 ret = ttm_bo_wait(bo, false, true, no_wait);
1721 spin_unlock(&bo->lock);
1722 if (likely(ret == 0))
1723 atomic_inc(&bo->cpu_writers);
1724 ttm_bo_unreserve(bo);
1725 return ret;
1728 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1730 if (atomic_dec_and_test(&bo->cpu_writers))
1731 wake_up_all(&bo->event_queue);
1735 * A buffer object shrink method that tries to swap out the first
1736 * buffer object on the bo_global::swap_lru list.
1739 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1741 struct ttm_bo_global *glob =
1742 container_of(shrink, struct ttm_bo_global, shrink);
1743 struct ttm_buffer_object *bo;
1744 int ret = -EBUSY;
1745 int put_count;
1746 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1748 spin_lock(&glob->lru_lock);
1749 while (ret == -EBUSY) {
1750 if (unlikely(list_empty(&glob->swap_lru))) {
1751 spin_unlock(&glob->lru_lock);
1752 return -EBUSY;
1755 bo = list_first_entry(&glob->swap_lru,
1756 struct ttm_buffer_object, swap);
1757 kref_get(&bo->list_kref);
1760 * Reserve buffer. Since we unlock while sleeping, we need
1761 * to re-check that nobody removed us from the swap-list while
1762 * we slept.
1765 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1766 if (unlikely(ret == -EBUSY)) {
1767 spin_unlock(&glob->lru_lock);
1768 ttm_bo_wait_unreserved(bo, false);
1769 kref_put(&bo->list_kref, ttm_bo_release_list);
1770 spin_lock(&glob->lru_lock);
1774 BUG_ON(ret != 0);
1775 put_count = ttm_bo_del_from_lru(bo);
1776 spin_unlock(&glob->lru_lock);
1778 while (put_count--)
1779 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1782 * Wait for GPU, then move to system cached.
1785 spin_lock(&bo->lock);
1786 ret = ttm_bo_wait(bo, false, false, false);
1787 spin_unlock(&bo->lock);
1789 if (unlikely(ret != 0))
1790 goto out;
1792 if ((bo->mem.placement & swap_placement) != swap_placement) {
1793 struct ttm_mem_reg evict_mem;
1795 evict_mem = bo->mem;
1796 evict_mem.mm_node = NULL;
1797 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1798 evict_mem.mem_type = TTM_PL_SYSTEM;
1800 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1801 false, false);
1802 if (unlikely(ret != 0))
1803 goto out;
1806 ttm_bo_unmap_virtual(bo);
1809 * Swap out. Buffer will be swapped in again as soon as
1810 * anyone tries to access a ttm page.
1813 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1814 out:
1818 * Unreserve without putting on LRU to avoid swapping out an
1819 * already swapped buffer.
1822 atomic_set(&bo->reserved, 0);
1823 wake_up_all(&bo->event_queue);
1824 kref_put(&bo->list_kref, ttm_bo_release_list);
1825 return ret;
1828 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1830 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)