Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / include / drm / ttm / ttm_bo_driver.h
blob990d529f823c6e751c0fdb60130fe1b924b83416
1 /**************************************************************************
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 #ifndef _TTM_BO_DRIVER_H_
31 #define _TTM_BO_DRIVER_H_
33 #include <drm/drm_mm.h>
34 #include <drm/drm_global.h>
35 #include <drm/drm_vma_manager.h>
36 #include <linux/workqueue.h>
37 #include <linux/fs.h>
38 #include <linux/spinlock.h>
39 #include <linux/reservation.h>
41 #include "ttm_bo_api.h"
42 #include "ttm_memory.h"
43 #include "ttm_module.h"
44 #include "ttm_placement.h"
46 #define TTM_MAX_BO_PRIORITY 4U
48 struct ttm_backend_func {
49 /**
50 * struct ttm_backend_func member bind
52 * @ttm: Pointer to a struct ttm_tt.
53 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
54 * memory type and location for binding.
56 * Bind the backend pages into the aperture in the location
57 * indicated by @bo_mem. This function should be able to handle
58 * differences between aperture and system page sizes.
60 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
62 /**
63 * struct ttm_backend_func member unbind
65 * @ttm: Pointer to a struct ttm_tt.
67 * Unbind previously bound backend pages. This function should be
68 * able to handle differences between aperture and system page sizes.
70 int (*unbind) (struct ttm_tt *ttm);
72 /**
73 * struct ttm_backend_func member destroy
75 * @ttm: Pointer to a struct ttm_tt.
77 * Destroy the backend. This will be call back from ttm_tt_destroy so
78 * don't call ttm_tt_destroy from the callback or infinite loop.
80 void (*destroy) (struct ttm_tt *ttm);
83 #define TTM_PAGE_FLAG_WRITE (1 << 3)
84 #define TTM_PAGE_FLAG_SWAPPED (1 << 4)
85 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
86 #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
87 #define TTM_PAGE_FLAG_DMA32 (1 << 7)
88 #define TTM_PAGE_FLAG_SG (1 << 8)
90 enum ttm_caching_state {
91 tt_uncached,
92 tt_wc,
93 tt_cached
96 /**
97 * struct ttm_tt
99 * @bdev: Pointer to a struct ttm_bo_device.
100 * @func: Pointer to a struct ttm_backend_func that describes
101 * the backend methods.
102 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
103 * pointer.
104 * @pages: Array of pages backing the data.
105 * @num_pages: Number of pages in the page array.
106 * @bdev: Pointer to the current struct ttm_bo_device.
107 * @be: Pointer to the ttm backend.
108 * @swap_storage: Pointer to shmem struct file for swap storage.
109 * @caching_state: The current caching state of the pages.
110 * @state: The current binding state of the pages.
112 * This is a structure holding the pages, caching- and aperture binding
113 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
114 * memory.
117 struct ttm_tt {
118 struct ttm_bo_device *bdev;
119 struct ttm_backend_func *func;
120 struct page *dummy_read_page;
121 struct page **pages;
122 uint32_t page_flags;
123 unsigned long num_pages;
124 struct sg_table *sg; /* for SG objects via dma-buf */
125 struct ttm_bo_global *glob;
126 struct file *swap_storage;
127 enum ttm_caching_state caching_state;
128 enum {
129 tt_bound,
130 tt_unbound,
131 tt_unpopulated,
132 } state;
136 * struct ttm_dma_tt
138 * @ttm: Base ttm_tt struct.
139 * @dma_address: The DMA (bus) addresses of the pages
140 * @pages_list: used by some page allocation backend
142 * This is a structure holding the pages, caching- and aperture binding
143 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
144 * memory.
146 struct ttm_dma_tt {
147 struct ttm_tt ttm;
148 dma_addr_t *dma_address;
149 struct list_head pages_list;
152 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
153 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
154 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
156 struct ttm_mem_type_manager;
158 struct ttm_mem_type_manager_func {
160 * struct ttm_mem_type_manager member init
162 * @man: Pointer to a memory type manager.
163 * @p_size: Implementation dependent, but typically the size of the
164 * range to be managed in pages.
166 * Called to initialize a private range manager. The function is
167 * expected to initialize the man::priv member.
168 * Returns 0 on success, negative error code on failure.
170 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
173 * struct ttm_mem_type_manager member takedown
175 * @man: Pointer to a memory type manager.
177 * Called to undo the setup done in init. All allocated resources
178 * should be freed.
180 int (*takedown)(struct ttm_mem_type_manager *man);
183 * struct ttm_mem_type_manager member get_node
185 * @man: Pointer to a memory type manager.
186 * @bo: Pointer to the buffer object we're allocating space for.
187 * @placement: Placement details.
188 * @flags: Additional placement flags.
189 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
191 * This function should allocate space in the memory type managed
192 * by @man. Placement details if
193 * applicable are given by @placement. If successful,
194 * @mem::mm_node should be set to a non-null value, and
195 * @mem::start should be set to a value identifying the beginning
196 * of the range allocated, and the function should return zero.
197 * If the memory region accommodate the buffer object, @mem::mm_node
198 * should be set to NULL, and the function should return 0.
199 * If a system error occurred, preventing the request to be fulfilled,
200 * the function should return a negative error code.
202 * Note that @mem::mm_node will only be dereferenced by
203 * struct ttm_mem_type_manager functions and optionally by the driver,
204 * which has knowledge of the underlying type.
206 * This function may not be called from within atomic context, so
207 * an implementation can and must use either a mutex or a spinlock to
208 * protect any data structures managing the space.
210 int (*get_node)(struct ttm_mem_type_manager *man,
211 struct ttm_buffer_object *bo,
212 const struct ttm_place *place,
213 struct ttm_mem_reg *mem);
216 * struct ttm_mem_type_manager member put_node
218 * @man: Pointer to a memory type manager.
219 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
221 * This function frees memory type resources previously allocated
222 * and that are identified by @mem::mm_node and @mem::start. May not
223 * be called from within atomic context.
225 void (*put_node)(struct ttm_mem_type_manager *man,
226 struct ttm_mem_reg *mem);
229 * struct ttm_mem_type_manager member debug
231 * @man: Pointer to a memory type manager.
232 * @prefix: Prefix to be used in printout to identify the caller.
234 * This function is called to print out the state of the memory
235 * type manager to aid debugging of out-of-memory conditions.
236 * It may not be called from within atomic context.
238 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
242 * struct ttm_mem_type_manager
244 * @has_type: The memory type has been initialized.
245 * @use_type: The memory type is enabled.
246 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
247 * managed by this memory type.
248 * @gpu_offset: If used, the GPU offset of the first managed page of
249 * fixed memory or the first managed location in an aperture.
250 * @size: Size of the managed region.
251 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
252 * as defined in ttm_placement_common.h
253 * @default_caching: The default caching policy used for a buffer object
254 * placed in this memory type if the user doesn't provide one.
255 * @func: structure pointer implementing the range manager. See above
256 * @priv: Driver private closure for @func.
257 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
258 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
259 * reserved by the TTM vm system.
260 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
261 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
262 * @move_lock: lock for move fence
263 * static information. bdev::driver::io_mem_free is never used.
264 * @lru: The lru list for this memory type.
265 * @move: The fence of the last pipelined move operation.
267 * This structure is used to identify and manage memory types for a device.
268 * It's set up by the ttm_bo_driver::init_mem_type method.
273 struct ttm_mem_type_manager {
274 struct ttm_bo_device *bdev;
277 * No protection. Constant from start.
280 bool has_type;
281 bool use_type;
282 uint32_t flags;
283 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
284 uint64_t size;
285 uint32_t available_caching;
286 uint32_t default_caching;
287 const struct ttm_mem_type_manager_func *func;
288 void *priv;
289 struct mutex io_reserve_mutex;
290 bool use_io_reserve_lru;
291 bool io_reserve_fastpath;
292 spinlock_t move_lock;
295 * Protected by @io_reserve_mutex:
298 struct list_head io_reserve_lru;
301 * Protected by the global->lru_lock.
304 struct list_head lru[TTM_MAX_BO_PRIORITY];
307 * Protected by @move_lock.
309 struct dma_fence *move;
313 * struct ttm_bo_driver
315 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
316 * @invalidate_caches: Callback to invalidate read caches when a buffer object
317 * has been evicted.
318 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
319 * structure.
320 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
321 * @move: Callback for a driver to hook in accelerated functions to
322 * move a buffer.
323 * If set to NULL, a potentially slow memcpy() move is used.
326 struct ttm_bo_driver {
328 * ttm_tt_create
330 * @bdev: pointer to a struct ttm_bo_device:
331 * @size: Size of the data needed backing.
332 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
333 * @dummy_read_page: See struct ttm_bo_device.
335 * Create a struct ttm_tt to back data with system memory pages.
336 * No pages are actually allocated.
337 * Returns:
338 * NULL: Out of memory.
340 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
341 unsigned long size,
342 uint32_t page_flags,
343 struct page *dummy_read_page);
346 * ttm_tt_populate
348 * @ttm: The struct ttm_tt to contain the backing pages.
350 * Allocate all backing pages
351 * Returns:
352 * -ENOMEM: Out of memory.
354 int (*ttm_tt_populate)(struct ttm_tt *ttm);
357 * ttm_tt_unpopulate
359 * @ttm: The struct ttm_tt to contain the backing pages.
361 * Free all backing page
363 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
366 * struct ttm_bo_driver member invalidate_caches
368 * @bdev: the buffer object device.
369 * @flags: new placement of the rebound buffer object.
371 * A previosly evicted buffer has been rebound in a
372 * potentially new location. Tell the driver that it might
373 * consider invalidating read (texture) caches on the next command
374 * submission as a consequence.
377 int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
378 int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
379 struct ttm_mem_type_manager *man);
382 * struct ttm_bo_driver member eviction_valuable
384 * @bo: the buffer object to be evicted
385 * @place: placement we need room for
387 * Check with the driver if it is valuable to evict a BO to make room
388 * for a certain placement.
390 bool (*eviction_valuable)(struct ttm_buffer_object *bo,
391 const struct ttm_place *place);
393 * struct ttm_bo_driver member evict_flags:
395 * @bo: the buffer object to be evicted
397 * Return the bo flags for a buffer which is not mapped to the hardware.
398 * These will be placed in proposed_flags so that when the move is
399 * finished, they'll end up in bo->mem.flags
402 void (*evict_flags)(struct ttm_buffer_object *bo,
403 struct ttm_placement *placement);
406 * struct ttm_bo_driver member move:
408 * @bo: the buffer to move
409 * @evict: whether this motion is evicting the buffer from
410 * the graphics address space
411 * @interruptible: Use interruptible sleeps if possible when sleeping.
412 * @no_wait: whether this should give up and return -EBUSY
413 * if this move would require sleeping
414 * @new_mem: the new memory region receiving the buffer
416 * Move a buffer between two memory regions.
418 int (*move)(struct ttm_buffer_object *bo, bool evict,
419 bool interruptible, bool no_wait_gpu,
420 struct ttm_mem_reg *new_mem);
423 * struct ttm_bo_driver_member verify_access
425 * @bo: Pointer to a buffer object.
426 * @filp: Pointer to a struct file trying to access the object.
428 * Called from the map / write / read methods to verify that the
429 * caller is permitted to access the buffer object.
430 * This member may be set to NULL, which will refuse this kind of
431 * access for all buffer objects.
432 * This function should return 0 if access is granted, -EPERM otherwise.
434 int (*verify_access)(struct ttm_buffer_object *bo,
435 struct file *filp);
438 * Hook to notify driver about a driver move so it
439 * can do tiling things and book-keeping.
441 * @evict: whether this move is evicting the buffer from the graphics
442 * address space
444 void (*move_notify)(struct ttm_buffer_object *bo,
445 bool evict,
446 struct ttm_mem_reg *new_mem);
447 /* notify the driver we are taking a fault on this BO
448 * and have reserved it */
449 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
452 * notify the driver that we're about to swap out this bo
454 void (*swap_notify)(struct ttm_buffer_object *bo);
457 * Driver callback on when mapping io memory (for bo_move_memcpy
458 * for instance). TTM will take care to call io_mem_free whenever
459 * the mapping is not use anymore. io_mem_reserve & io_mem_free
460 * are balanced.
462 int (*io_mem_reserve)(struct ttm_bo_device *bdev,
463 struct ttm_mem_reg *mem);
464 void (*io_mem_free)(struct ttm_bo_device *bdev,
465 struct ttm_mem_reg *mem);
468 * Return the pfn for a given page_offset inside the BO.
470 * @bo: the BO to look up the pfn for
471 * @page_offset: the offset to look up
473 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
474 unsigned long page_offset);
478 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
481 struct ttm_bo_global_ref {
482 struct drm_global_reference ref;
483 struct ttm_mem_global *mem_glob;
487 * struct ttm_bo_global - Buffer object driver global data.
489 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
490 * @dummy_read_page: Pointer to a dummy page used for mapping requests
491 * of unpopulated pages.
492 * @shrink: A shrink callback object used for buffer object swap.
493 * @device_list_mutex: Mutex protecting the device list.
494 * This mutex is held while traversing the device list for pm options.
495 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
496 * @device_list: List of buffer object devices.
497 * @swap_lru: Lru list of buffer objects used for swapping.
500 struct ttm_bo_global {
503 * Constant after init.
506 struct kobject kobj;
507 struct ttm_mem_global *mem_glob;
508 struct page *dummy_read_page;
509 struct ttm_mem_shrink shrink;
510 struct mutex device_list_mutex;
511 spinlock_t lru_lock;
514 * Protected by device_list_mutex.
516 struct list_head device_list;
519 * Protected by the lru_lock.
521 struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
524 * Internal protection.
526 atomic_t bo_count;
530 #define TTM_NUM_MEM_TYPES 8
533 * struct ttm_bo_device - Buffer object driver device-specific data.
535 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
536 * @man: An array of mem_type_managers.
537 * @vma_manager: Address space manager
538 * lru_lock: Spinlock that protects the buffer+device lru lists and
539 * ddestroy lists.
540 * @dev_mapping: A pointer to the struct address_space representing the
541 * device address space.
542 * @wq: Work queue structure for the delayed delete workqueue.
546 struct ttm_bo_device {
549 * Constant after bo device init / atomic.
551 struct list_head device_list;
552 struct ttm_bo_global *glob;
553 struct ttm_bo_driver *driver;
554 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
557 * Protected by internal locks.
559 struct drm_vma_offset_manager vma_manager;
562 * Protected by the global:lru lock.
564 struct list_head ddestroy;
567 * Protected by load / firstopen / lastclose /unload sync.
570 struct address_space *dev_mapping;
573 * Internal protection.
576 struct delayed_work wq;
578 bool need_dma32;
582 * ttm_flag_masked
584 * @old: Pointer to the result and original value.
585 * @new: New value of bits.
586 * @mask: Mask of bits to change.
588 * Convenience function to change a number of bits identified by a mask.
591 static inline uint32_t
592 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
594 *old ^= (*old ^ new) & mask;
595 return *old;
599 * ttm_tt_init
601 * @ttm: The struct ttm_tt.
602 * @bdev: pointer to a struct ttm_bo_device:
603 * @size: Size of the data needed backing.
604 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
605 * @dummy_read_page: See struct ttm_bo_device.
607 * Create a struct ttm_tt to back data with system memory pages.
608 * No pages are actually allocated.
609 * Returns:
610 * NULL: Out of memory.
612 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
613 unsigned long size, uint32_t page_flags,
614 struct page *dummy_read_page);
615 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
616 unsigned long size, uint32_t page_flags,
617 struct page *dummy_read_page);
620 * ttm_tt_fini
622 * @ttm: the ttm_tt structure.
624 * Free memory of ttm_tt structure
626 extern void ttm_tt_fini(struct ttm_tt *ttm);
627 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
630 * ttm_ttm_bind:
632 * @ttm: The struct ttm_tt containing backing pages.
633 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
635 * Bind the pages of @ttm to an aperture location identified by @bo_mem
637 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
640 * ttm_ttm_destroy:
642 * @ttm: The struct ttm_tt.
644 * Unbind, unpopulate and destroy common struct ttm_tt.
646 extern void ttm_tt_destroy(struct ttm_tt *ttm);
649 * ttm_ttm_unbind:
651 * @ttm: The struct ttm_tt.
653 * Unbind a struct ttm_tt.
655 extern void ttm_tt_unbind(struct ttm_tt *ttm);
658 * ttm_tt_swapin:
660 * @ttm: The struct ttm_tt.
662 * Swap in a previously swap out ttm_tt.
664 extern int ttm_tt_swapin(struct ttm_tt *ttm);
667 * ttm_tt_set_placement_caching:
669 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
670 * @placement: Flag indicating the desired caching policy.
672 * This function will change caching policy of any default kernel mappings of
673 * the pages backing @ttm. If changing from cached to uncached or
674 * write-combined,
675 * all CPU caches will first be flushed to make sure the data of the pages
676 * hit RAM. This function may be very costly as it involves global TLB
677 * and cache flushes and potential page splitting / combining.
679 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
680 extern int ttm_tt_swapout(struct ttm_tt *ttm,
681 struct file *persistent_swap_storage);
684 * ttm_tt_unpopulate - free pages from a ttm
686 * @ttm: Pointer to the ttm_tt structure
688 * Calls the driver method to free all pages from a ttm
690 extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
693 * ttm_bo.c
697 * ttm_mem_reg_is_pci
699 * @bdev: Pointer to a struct ttm_bo_device.
700 * @mem: A valid struct ttm_mem_reg.
702 * Returns true if the memory described by @mem is PCI memory,
703 * false otherwise.
705 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
706 struct ttm_mem_reg *mem);
709 * ttm_bo_mem_space
711 * @bo: Pointer to a struct ttm_buffer_object. the data of which
712 * we want to allocate space for.
713 * @proposed_placement: Proposed new placement for the buffer object.
714 * @mem: A struct ttm_mem_reg.
715 * @interruptible: Sleep interruptible when sliping.
716 * @no_wait_gpu: Return immediately if the GPU is busy.
718 * Allocate memory space for the buffer object pointed to by @bo, using
719 * the placement flags in @mem, potentially evicting other idle buffer objects.
720 * This function may sleep while waiting for space to become available.
721 * Returns:
722 * -EBUSY: No space available (only if no_wait == 1).
723 * -ENOMEM: Could not allocate memory for the buffer object, either due to
724 * fragmentation or concurrent allocators.
725 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
727 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
728 struct ttm_placement *placement,
729 struct ttm_mem_reg *mem,
730 bool interruptible,
731 bool no_wait_gpu);
733 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
734 struct ttm_mem_reg *mem);
735 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
736 struct ttm_mem_reg *mem);
738 extern void ttm_bo_global_release(struct drm_global_reference *ref);
739 extern int ttm_bo_global_init(struct drm_global_reference *ref);
741 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
744 * ttm_bo_device_init
746 * @bdev: A pointer to a struct ttm_bo_device to initialize.
747 * @glob: A pointer to an initialized struct ttm_bo_global.
748 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
749 * @mapping: The address space to use for this bo.
750 * @file_page_offset: Offset into the device address space that is available
751 * for buffer data. This ensures compatibility with other users of the
752 * address space.
754 * Initializes a struct ttm_bo_device:
755 * Returns:
756 * !0: Failure.
758 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
759 struct ttm_bo_global *glob,
760 struct ttm_bo_driver *driver,
761 struct address_space *mapping,
762 uint64_t file_page_offset, bool need_dma32);
765 * ttm_bo_unmap_virtual
767 * @bo: tear down the virtual mappings for this BO
769 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
772 * ttm_bo_unmap_virtual
774 * @bo: tear down the virtual mappings for this BO
776 * The caller must take ttm_mem_io_lock before calling this function.
778 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
780 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
781 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
782 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
783 bool interruptible);
784 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
786 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
787 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
790 * __ttm_bo_reserve:
792 * @bo: A pointer to a struct ttm_buffer_object.
793 * @interruptible: Sleep interruptible if waiting.
794 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
795 * @ticket: ticket used to acquire the ww_mutex.
797 * Will not remove reserved buffers from the lru lists.
798 * Otherwise identical to ttm_bo_reserve.
800 * Returns:
801 * -EDEADLK: The reservation may cause a deadlock.
802 * Release all buffer reservations, wait for @bo to become unreserved and
803 * try again. (only if use_sequence == 1).
804 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
805 * a signal. Release all buffer reservations and return to user-space.
806 * -EBUSY: The function needed to sleep, but @no_wait was true
807 * -EALREADY: Bo already reserved using @ticket. This error code will only
808 * be returned if @use_ticket is set to true.
810 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
811 bool interruptible, bool no_wait,
812 struct ww_acquire_ctx *ticket)
814 int ret = 0;
816 if (no_wait) {
817 bool success;
818 if (WARN_ON(ticket))
819 return -EBUSY;
821 success = ww_mutex_trylock(&bo->resv->lock);
822 return success ? 0 : -EBUSY;
825 if (interruptible)
826 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
827 else
828 ret = ww_mutex_lock(&bo->resv->lock, ticket);
829 if (ret == -EINTR)
830 return -ERESTARTSYS;
831 return ret;
835 * ttm_bo_reserve:
837 * @bo: A pointer to a struct ttm_buffer_object.
838 * @interruptible: Sleep interruptible if waiting.
839 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
840 * @ticket: ticket used to acquire the ww_mutex.
842 * Locks a buffer object for validation. (Or prevents other processes from
843 * locking it for validation) and removes it from lru lists, while taking
844 * a number of measures to prevent deadlocks.
846 * Deadlocks may occur when two processes try to reserve multiple buffers in
847 * different order, either by will or as a result of a buffer being evicted
848 * to make room for a buffer already reserved. (Buffers are reserved before
849 * they are evicted). The following algorithm prevents such deadlocks from
850 * occurring:
851 * Processes attempting to reserve multiple buffers other than for eviction,
852 * (typically execbuf), should first obtain a unique 32-bit
853 * validation sequence number,
854 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
855 * sequence number. If upon call of this function, the buffer object is already
856 * reserved, the validation sequence is checked against the validation
857 * sequence of the process currently reserving the buffer,
858 * and if the current validation sequence is greater than that of the process
859 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
860 * waiting for the buffer to become unreserved, after which it retries
861 * reserving.
862 * The caller should, when receiving an -EDEADLK error
863 * release all its buffer reservations, wait for @bo to become unreserved, and
864 * then rerun the validation with the same validation sequence. This procedure
865 * will always guarantee that the process with the lowest validation sequence
866 * will eventually succeed, preventing both deadlocks and starvation.
868 * Returns:
869 * -EDEADLK: The reservation may cause a deadlock.
870 * Release all buffer reservations, wait for @bo to become unreserved and
871 * try again. (only if use_sequence == 1).
872 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
873 * a signal. Release all buffer reservations and return to user-space.
874 * -EBUSY: The function needed to sleep, but @no_wait was true
875 * -EALREADY: Bo already reserved using @ticket. This error code will only
876 * be returned if @use_ticket is set to true.
878 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
879 bool interruptible, bool no_wait,
880 struct ww_acquire_ctx *ticket)
882 int ret;
884 WARN_ON(!kref_read(&bo->kref));
886 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
887 if (likely(ret == 0))
888 ttm_bo_del_sub_from_lru(bo);
890 return ret;
894 * ttm_bo_reserve_slowpath:
895 * @bo: A pointer to a struct ttm_buffer_object.
896 * @interruptible: Sleep interruptible if waiting.
897 * @sequence: Set (@bo)->sequence to this value after lock
899 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
900 * from all our other reservations. Because there are no other reservations
901 * held by us, this function cannot deadlock any more.
903 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
904 bool interruptible,
905 struct ww_acquire_ctx *ticket)
907 int ret = 0;
909 WARN_ON(!kref_read(&bo->kref));
911 if (interruptible)
912 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
913 ticket);
914 else
915 ww_mutex_lock_slow(&bo->resv->lock, ticket);
917 if (likely(ret == 0))
918 ttm_bo_del_sub_from_lru(bo);
919 else if (ret == -EINTR)
920 ret = -ERESTARTSYS;
922 return ret;
926 * __ttm_bo_unreserve
927 * @bo: A pointer to a struct ttm_buffer_object.
929 * Unreserve a previous reservation of @bo where the buffer object is
930 * already on lru lists.
932 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
934 ww_mutex_unlock(&bo->resv->lock);
938 * ttm_bo_unreserve
940 * @bo: A pointer to a struct ttm_buffer_object.
942 * Unreserve a previous reservation of @bo.
944 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
946 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
947 spin_lock(&bo->glob->lru_lock);
948 ttm_bo_add_to_lru(bo);
949 spin_unlock(&bo->glob->lru_lock);
951 __ttm_bo_unreserve(bo);
955 * ttm_bo_unreserve_ticket
956 * @bo: A pointer to a struct ttm_buffer_object.
957 * @ticket: ww_acquire_ctx used for reserving
959 * Unreserve a previous reservation of @bo made with @ticket.
961 static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
962 struct ww_acquire_ctx *t)
964 ttm_bo_unreserve(bo);
968 * ttm_bo_util.c
971 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
972 struct ttm_mem_reg *mem);
973 void ttm_mem_io_free(struct ttm_bo_device *bdev,
974 struct ttm_mem_reg *mem);
976 * ttm_bo_move_ttm
978 * @bo: A pointer to a struct ttm_buffer_object.
979 * @interruptible: Sleep interruptible if waiting.
980 * @no_wait_gpu: Return immediately if the GPU is busy.
981 * @new_mem: struct ttm_mem_reg indicating where to move.
983 * Optimized move function for a buffer object with both old and
984 * new placement backed by a TTM. The function will, if successful,
985 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
986 * and update the (@bo)->mem placement flags. If unsuccessful, the old
987 * data remains untouched, and it's up to the caller to free the
988 * memory space indicated by @new_mem.
989 * Returns:
990 * !0: Failure.
993 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
994 bool interruptible, bool no_wait_gpu,
995 struct ttm_mem_reg *new_mem);
998 * ttm_bo_move_memcpy
1000 * @bo: A pointer to a struct ttm_buffer_object.
1001 * @interruptible: Sleep interruptible if waiting.
1002 * @no_wait_gpu: Return immediately if the GPU is busy.
1003 * @new_mem: struct ttm_mem_reg indicating where to move.
1005 * Fallback move function for a mappable buffer object in mappable memory.
1006 * The function will, if successful,
1007 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
1008 * and update the (@bo)->mem placement flags. If unsuccessful, the old
1009 * data remains untouched, and it's up to the caller to free the
1010 * memory space indicated by @new_mem.
1011 * Returns:
1012 * !0: Failure.
1015 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1016 bool interruptible, bool no_wait_gpu,
1017 struct ttm_mem_reg *new_mem);
1020 * ttm_bo_free_old_node
1022 * @bo: A pointer to a struct ttm_buffer_object.
1024 * Utility function to free an old placement after a successful move.
1026 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1029 * ttm_bo_move_accel_cleanup.
1031 * @bo: A pointer to a struct ttm_buffer_object.
1032 * @fence: A fence object that signals when moving is complete.
1033 * @evict: This is an evict move. Don't return until the buffer is idle.
1034 * @new_mem: struct ttm_mem_reg indicating where to move.
1036 * Accelerated move function to be called when an accelerated move
1037 * has been scheduled. The function will create a new temporary buffer object
1038 * representing the old placement, and put the sync object on both buffer
1039 * objects. After that the newly created buffer object is unref'd to be
1040 * destroyed when the move is complete. This will help pipeline
1041 * buffer moves.
1044 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1045 struct dma_fence *fence, bool evict,
1046 struct ttm_mem_reg *new_mem);
1049 * ttm_bo_pipeline_move.
1051 * @bo: A pointer to a struct ttm_buffer_object.
1052 * @fence: A fence object that signals when moving is complete.
1053 * @evict: This is an evict move. Don't return until the buffer is idle.
1054 * @new_mem: struct ttm_mem_reg indicating where to move.
1056 * Function for pipelining accelerated moves. Either free the memory
1057 * immediately or hang it on a temporary buffer object.
1059 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
1060 struct dma_fence *fence, bool evict,
1061 struct ttm_mem_reg *new_mem);
1064 * ttm_io_prot
1066 * @c_state: Caching state.
1067 * @tmp: Page protection flag for a normal, cached mapping.
1069 * Utility function that returns the pgprot_t that should be used for
1070 * setting up a PTE with the caching model indicated by @c_state.
1072 extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1074 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1076 #if IS_ENABLED(CONFIG_AGP)
1077 #include <linux/agp_backend.h>
1080 * ttm_agp_tt_create
1082 * @bdev: Pointer to a struct ttm_bo_device.
1083 * @bridge: The agp bridge this device is sitting on.
1084 * @size: Size of the data needed backing.
1085 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1086 * @dummy_read_page: See struct ttm_bo_device.
1089 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1090 * for TT memory. This function uses the linux agpgart interface to
1091 * bind and unbind memory backing a ttm_tt.
1093 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1094 struct agp_bridge_data *bridge,
1095 unsigned long size, uint32_t page_flags,
1096 struct page *dummy_read_page);
1097 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1098 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1099 #endif
1101 #endif