1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
31 #include "vmwgfx_reg.h"
33 #include <drm/vmwgfx_drm.h>
34 #include <drm/drm_hashtab.h>
35 #include <linux/suspend.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_object.h>
38 #include <drm/ttm/ttm_lock.h>
39 #include <drm/ttm/ttm_execbuf_util.h>
40 #include <drm/ttm/ttm_module.h>
41 #include "vmwgfx_fence.h"
43 #define VMWGFX_DRIVER_DATE "20150810"
44 #define VMWGFX_DRIVER_MAJOR 2
45 #define VMWGFX_DRIVER_MINOR 9
46 #define VMWGFX_DRIVER_PATCHLEVEL 0
47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49 #define VMWGFX_MAX_RELOCATIONS 2048
50 #define VMWGFX_MAX_VALIDATIONS 2048
51 #define VMWGFX_MAX_DISPLAYS 16
52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
56 * Perhaps we should have sysfs entries for these.
58 #define VMWGFX_NUM_GB_CONTEXT 256
59 #define VMWGFX_NUM_GB_SHADER 20000
60 #define VMWGFX_NUM_GB_SURFACE 32768
61 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62 #define VMWGFX_NUM_DXCONTEXT 256
63 #define VMWGFX_NUM_DXQUERY 512
64 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
65 VMWGFX_NUM_GB_SHADER +\
66 VMWGFX_NUM_GB_SURFACE +\
67 VMWGFX_NUM_GB_SCREEN_TARGET)
69 #define VMW_PL_GMR TTM_PL_PRIV0
70 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
71 #define VMW_PL_MOB TTM_PL_PRIV1
72 #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
74 #define VMW_RES_CONTEXT ttm_driver_type0
75 #define VMW_RES_SURFACE ttm_driver_type1
76 #define VMW_RES_STREAM ttm_driver_type2
77 #define VMW_RES_FENCE ttm_driver_type3
78 #define VMW_RES_SHADER ttm_driver_type4
81 struct drm_master
*locked_master
;
82 struct ttm_object_file
*tfile
;
83 struct list_head fence_events
;
87 struct vmw_dma_buffer
{
88 struct ttm_buffer_object base
;
89 struct list_head res_list
;
91 /* Not ref-counted. Protected by binding_mutex */
92 struct vmw_resource
*dx_query_ctx
;
96 * struct vmw_validate_buffer - Carries validation info about buffers.
98 * @base: Validation info for TTM.
99 * @hash: Hash entry for quick lookup of the TTM buffer object.
101 * This structure contains also driver private validation info
102 * on top of the info needed by TTM.
104 struct vmw_validate_buffer
{
105 struct ttm_validate_buffer base
;
106 struct drm_hash_item hash
;
107 bool validate_as_mob
;
111 struct vmw_resource
{
113 struct vmw_private
*dev_priv
;
116 unsigned long backup_size
;
117 bool res_dirty
; /* Protected by backup buffer reserved */
118 bool backup_dirty
; /* Protected by backup buffer reserved */
119 struct vmw_dma_buffer
*backup
;
120 unsigned long backup_offset
;
121 unsigned long pin_count
; /* Protected by resource reserved */
122 const struct vmw_res_func
*func
;
123 struct list_head lru_head
; /* Protected by the resource lock */
124 struct list_head mob_head
; /* Protected by @backup reserved */
125 struct list_head binding_head
; /* Protected by binding_mutex */
126 void (*res_free
) (struct vmw_resource
*res
);
127 void (*hw_destroy
) (struct vmw_resource
*res
);
132 * Resources that are managed using ioctls.
146 * Resources that are managed using command streams.
148 enum vmw_cmdbuf_res_type
{
149 vmw_cmdbuf_res_shader
,
153 struct vmw_cmdbuf_res_manager
;
155 struct vmw_cursor_snooper
{
156 struct drm_crtc
*crtc
;
161 struct vmw_framebuffer
;
162 struct vmw_surface_offset
;
165 struct vmw_resource res
;
168 uint32_t mip_levels
[DRM_VMW_MAX_SURFACE_FACES
];
169 struct drm_vmw_size base_size
;
170 struct drm_vmw_size
*sizes
;
174 /* TODO so far just a extra pointer */
175 struct vmw_cursor_snooper snooper
;
176 struct vmw_surface_offset
*offsets
;
177 SVGA3dTextureFilter autogen_filter
;
178 uint32_t multisample_count
;
179 struct list_head view_list
;
182 struct vmw_marker_queue
{
183 struct list_head head
;
189 struct vmw_fifo_state
{
190 unsigned long reserved_size
;
193 unsigned long static_buffer_size
;
194 bool using_bounce_buffer
;
195 uint32_t capabilities
;
196 struct mutex fifo_mutex
;
197 struct rw_semaphore rwsem
;
198 struct vmw_marker_queue marker_queue
;
202 struct vmw_relocation
{
204 SVGAGuestPtr
*location
;
209 * struct vmw_res_cache_entry - resource information cache entry
211 * @valid: Whether the entry is valid, which also implies that the execbuf
212 * code holds a reference to the resource, and it's placed on the
214 * @handle: User-space handle of a resource.
215 * @res: Non-ref-counted pointer to the resource.
217 * Used to avoid frequent repeated user-space handle lookups of the
220 struct vmw_res_cache_entry
{
223 struct vmw_resource
*res
;
224 struct vmw_resource_val_node
*node
;
228 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
230 enum vmw_dma_map_mode
{
231 vmw_dma_phys
, /* Use physical page addresses */
232 vmw_dma_alloc_coherent
, /* Use TTM coherent pages */
233 vmw_dma_map_populate
, /* Unmap from DMA just after unpopulate */
234 vmw_dma_map_bind
, /* Unmap from DMA just before unbind */
239 * struct vmw_sg_table - Scatter/gather table for binding, with additional
240 * device-specific information.
242 * @sgt: Pointer to a struct sg_table with binding information
243 * @num_regions: Number of regions with device-address contiguous pages
245 struct vmw_sg_table
{
246 enum vmw_dma_map_mode mode
;
248 const dma_addr_t
*addrs
;
249 struct sg_table
*sgt
;
250 unsigned long num_regions
;
251 unsigned long num_pages
;
255 * struct vmw_piter - Page iterator that iterates over a list of pages
256 * and DMA addresses that could be either a scatter-gather list or
259 * @pages: Array of page pointers to the pages.
260 * @addrs: DMA addresses to the pages if coherent pages are used.
261 * @iter: Scatter-gather page iterator. Current position in SG list.
262 * @i: Current position in arrays.
263 * @num_pages: Number of pages total.
264 * @next: Function to advance the iterator. Returns false if past the list
265 * of pages, true otherwise.
266 * @dma_address: Function to return the DMA address of the current page.
270 const dma_addr_t
*addrs
;
271 struct sg_page_iter iter
;
273 unsigned long num_pages
;
274 bool (*next
)(struct vmw_piter
*);
275 dma_addr_t (*dma_address
)(struct vmw_piter
*);
276 struct page
*(*page
)(struct vmw_piter
*);
280 * enum vmw_display_unit_type - Describes the display unit
282 enum vmw_display_unit_type
{
285 vmw_du_screen_object
,
290 struct vmw_sw_context
{
291 struct drm_open_hash res_ht
;
292 bool res_ht_initialized
;
293 bool kernel
; /**< is the called made from the kernel */
294 struct vmw_fpriv
*fp
;
295 struct list_head validate_nodes
;
296 struct vmw_relocation relocs
[VMWGFX_MAX_RELOCATIONS
];
298 struct vmw_validate_buffer val_bufs
[VMWGFX_MAX_VALIDATIONS
];
299 uint32_t cur_val_buf
;
300 uint32_t *cmd_bounce
;
301 uint32_t cmd_bounce_size
;
302 struct list_head resource_list
;
303 struct list_head ctx_resource_list
; /* For contexts and cotables */
304 struct vmw_dma_buffer
*cur_query_bo
;
305 struct list_head res_relocations
;
307 struct vmw_res_cache_entry res_cache
[vmw_res_max
];
308 struct vmw_resource
*last_query_ctx
;
309 bool needs_post_query_barrier
;
310 struct vmw_resource
*error_resource
;
311 struct vmw_ctx_binding_state
*staged_bindings
;
312 bool staged_bindings_inuse
;
313 struct list_head staged_cmd_res
;
314 struct vmw_resource_val_node
*dx_ctx_node
;
315 struct vmw_dma_buffer
*dx_query_mob
;
316 struct vmw_resource
*dx_query_ctx
;
317 struct vmw_cmdbuf_res_manager
*man
;
320 struct vmw_legacy_display
;
324 struct ttm_lock lock
;
327 struct vmw_vga_topology_state
{
337 * struct vmw_otable - Guest Memory OBject table metadata
339 * @size: Size of the table (page-aligned).
340 * @page_table: Pointer to a struct vmw_mob holding the page table.
344 struct vmw_mob
*page_table
;
348 struct vmw_otable_batch
{
349 unsigned num_otables
;
350 struct vmw_otable
*otables
;
351 struct vmw_resource
*context
;
352 struct ttm_buffer_object
*otable_bo
;
356 struct ttm_bo_device bdev
;
357 struct ttm_bo_global_ref bo_global_ref
;
358 struct drm_global_reference mem_global_ref
;
360 struct vmw_fifo_state fifo
;
362 struct drm_device
*dev
;
363 unsigned long vmw_chipset
;
364 unsigned int io_start
;
367 uint32_t prim_bb_mem
;
370 uint32_t fb_max_width
;
371 uint32_t fb_max_height
;
372 uint32_t texture_max_width
;
373 uint32_t texture_max_height
;
374 uint32_t stdu_max_width
;
375 uint32_t stdu_max_height
;
376 uint32_t initial_width
;
377 uint32_t initial_height
;
379 uint32_t capabilities
;
380 uint32_t max_gmr_ids
;
381 uint32_t max_gmr_pages
;
382 uint32_t max_mob_pages
;
383 uint32_t max_mob_size
;
384 uint32_t memory_size
;
395 struct vmw_vga_topology_state vga_save
[VMWGFX_MAX_DISPLAYS
];
400 uint32_t vga_pitchlock
;
402 uint32_t num_displays
;
409 enum vmw_display_unit_type active_display_unit
;
410 struct vmw_legacy_display
*ldu_priv
;
411 struct vmw_screen_object_display
*sou_priv
;
412 struct vmw_overlay
*overlay_priv
;
415 * Context and surface management.
418 rwlock_t resource_lock
;
419 struct idr res_idr
[vmw_res_max
];
421 * Block lastclose from racing with firstopen.
424 struct mutex init_mutex
;
427 * A resource manager for kernel-only surfaces and
431 struct ttm_object_device
*tdev
;
438 wait_queue_head_t fence_queue
;
439 wait_queue_head_t fifo_queue
;
440 spinlock_t waiter_lock
;
441 int fence_queue_waiters
; /* Protected by waiter_lock */
442 int goal_queue_waiters
; /* Protected by waiter_lock */
443 int cmdbuf_waiters
; /* Protected by waiter_lock */
444 int error_waiters
; /* Protected by waiter_lock */
445 int fifo_queue_waiters
; /* Protected by waiter_lock */
446 uint32_t last_read_seqno
;
447 struct vmw_fence_manager
*fman
;
448 uint32_t irq_mask
; /* Updates protected by waiter_lock */
454 uint32_t traces_state
;
455 uint32_t enable_state
;
456 uint32_t config_done_state
;
462 * Protected by the cmdbuf mutex.
465 struct vmw_sw_context ctx
;
466 struct mutex cmdbuf_mutex
;
467 struct mutex binding_mutex
;
475 spinlock_t svga_lock
;
481 struct vmw_master
*active_master
;
482 struct vmw_master fbdev_master
;
483 struct notifier_block pm_nb
;
485 bool refuse_hibernation
;
487 struct mutex release_mutex
;
488 atomic_t num_fifo_resources
;
491 * Replace this with an rwsem as soon as we have down_xx_interruptible()
493 struct ttm_lock reservation_sem
;
496 * Query processing. These members
497 * are protected by the cmdbuf mutex.
500 struct vmw_dma_buffer
*dummy_query_bo
;
501 struct vmw_dma_buffer
*pinned_bo
;
503 uint32_t query_cid_valid
;
504 bool dummy_query_bo_pinned
;
507 * Surface swapping. The "surface_lru" list is protected by the
508 * resource lock in order to be able to destroy a surface and take
509 * it off the lru atomically. "used_memory_size" is currently
510 * protected by the cmdbuf mutex for simplicity.
513 struct list_head res_lru
[vmw_res_max
];
514 uint32_t used_memory_size
;
519 enum vmw_dma_map_mode map_mode
;
524 struct vmw_otable_batch otable_batch
;
526 struct vmw_cmdbuf_man
*cman
;
529 static inline struct vmw_surface
*vmw_res_to_srf(struct vmw_resource
*res
)
531 return container_of(res
, struct vmw_surface
, res
);
534 static inline struct vmw_private
*vmw_priv(struct drm_device
*dev
)
536 return (struct vmw_private
*)dev
->dev_private
;
539 static inline struct vmw_fpriv
*vmw_fpriv(struct drm_file
*file_priv
)
541 return (struct vmw_fpriv
*)file_priv
->driver_priv
;
544 static inline struct vmw_master
*vmw_master(struct drm_master
*master
)
546 return (struct vmw_master
*) master
->driver_priv
;
550 * The locking here is fine-grained, so that it is performed once
551 * for every read- and write operation. This is of course costly, but we
552 * don't perform much register access in the timing critical paths anyway.
553 * Instead we have the extra benefit of being sure that we don't forget
554 * the hw lock around register accesses.
556 static inline void vmw_write(struct vmw_private
*dev_priv
,
557 unsigned int offset
, uint32_t value
)
559 unsigned long irq_flags
;
561 spin_lock_irqsave(&dev_priv
->hw_lock
, irq_flags
);
562 outl(offset
, dev_priv
->io_start
+ VMWGFX_INDEX_PORT
);
563 outl(value
, dev_priv
->io_start
+ VMWGFX_VALUE_PORT
);
564 spin_unlock_irqrestore(&dev_priv
->hw_lock
, irq_flags
);
567 static inline uint32_t vmw_read(struct vmw_private
*dev_priv
,
570 unsigned long irq_flags
;
573 spin_lock_irqsave(&dev_priv
->hw_lock
, irq_flags
);
574 outl(offset
, dev_priv
->io_start
+ VMWGFX_INDEX_PORT
);
575 val
= inl(dev_priv
->io_start
+ VMWGFX_VALUE_PORT
);
576 spin_unlock_irqrestore(&dev_priv
->hw_lock
, irq_flags
);
581 extern void vmw_svga_enable(struct vmw_private
*dev_priv
);
582 extern void vmw_svga_disable(struct vmw_private
*dev_priv
);
586 * GMR utilities - vmwgfx_gmr.c
589 extern int vmw_gmr_bind(struct vmw_private
*dev_priv
,
590 const struct vmw_sg_table
*vsgt
,
591 unsigned long num_pages
,
593 extern void vmw_gmr_unbind(struct vmw_private
*dev_priv
, int gmr_id
);
596 * Resource utilities - vmwgfx_resource.c
598 struct vmw_user_resource_conv
;
600 extern void vmw_resource_unreference(struct vmw_resource
**p_res
);
601 extern struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
);
602 extern struct vmw_resource
*
603 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
);
604 extern int vmw_resource_validate(struct vmw_resource
*res
);
605 extern int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
607 extern bool vmw_resource_needs_backup(const struct vmw_resource
*res
);
608 extern int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
609 struct ttm_object_file
*tfile
,
611 struct vmw_surface
**out_surf
,
612 struct vmw_dma_buffer
**out_buf
);
613 extern int vmw_user_resource_lookup_handle(
614 struct vmw_private
*dev_priv
,
615 struct ttm_object_file
*tfile
,
617 const struct vmw_user_resource_conv
*converter
,
618 struct vmw_resource
**p_res
);
619 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
);
620 extern int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
621 struct vmw_dma_buffer
*vmw_bo
,
622 size_t size
, struct ttm_placement
*placement
,
624 void (*bo_free
) (struct ttm_buffer_object
*bo
));
625 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
626 struct ttm_object_file
*tfile
);
627 extern int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
628 struct ttm_object_file
*tfile
,
632 struct vmw_dma_buffer
**p_dma_buf
,
633 struct ttm_base_object
**p_base
);
634 extern int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
635 struct vmw_dma_buffer
*dma_buf
,
637 extern int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
638 struct drm_file
*file_priv
);
639 extern int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
640 struct drm_file
*file_priv
);
641 extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device
*dev
, void *data
,
642 struct drm_file
*file_priv
);
643 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
644 uint32_t cur_validate_node
);
645 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
);
646 extern int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
647 uint32_t id
, struct vmw_dma_buffer
**out
,
648 struct ttm_base_object
**base
);
649 extern int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
650 struct drm_file
*file_priv
);
651 extern int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
652 struct drm_file
*file_priv
);
653 extern int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
654 struct ttm_object_file
*tfile
,
656 struct vmw_resource
**out
);
657 extern void vmw_resource_unreserve(struct vmw_resource
*res
,
659 struct vmw_dma_buffer
*new_backup
,
660 unsigned long new_backup_offset
);
661 extern void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
662 struct ttm_mem_reg
*mem
);
663 extern void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
664 struct ttm_mem_reg
*mem
);
665 extern int vmw_query_readback_all(struct vmw_dma_buffer
*dx_query_mob
);
666 extern void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
667 struct vmw_fence_obj
*fence
);
668 extern void vmw_resource_evict_all(struct vmw_private
*dev_priv
);
671 * DMA buffer helper routines - vmwgfx_dmabuf.c
673 extern int vmw_dmabuf_pin_in_placement(struct vmw_private
*vmw_priv
,
674 struct vmw_dma_buffer
*bo
,
675 struct ttm_placement
*placement
,
677 extern int vmw_dmabuf_pin_in_vram(struct vmw_private
*dev_priv
,
678 struct vmw_dma_buffer
*buf
,
680 extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private
*dev_priv
,
681 struct vmw_dma_buffer
*buf
,
683 extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private
*vmw_priv
,
684 struct vmw_dma_buffer
*bo
,
686 extern int vmw_dmabuf_unpin(struct vmw_private
*vmw_priv
,
687 struct vmw_dma_buffer
*bo
,
689 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object
*buf
,
691 extern void vmw_bo_pin_reserved(struct vmw_dma_buffer
*bo
, bool pin
);
694 * Misc Ioctl functionality - vmwgfx_ioctl.c
697 extern int vmw_getparam_ioctl(struct drm_device
*dev
, void *data
,
698 struct drm_file
*file_priv
);
699 extern int vmw_get_cap_3d_ioctl(struct drm_device
*dev
, void *data
,
700 struct drm_file
*file_priv
);
701 extern int vmw_present_ioctl(struct drm_device
*dev
, void *data
,
702 struct drm_file
*file_priv
);
703 extern int vmw_present_readback_ioctl(struct drm_device
*dev
, void *data
,
704 struct drm_file
*file_priv
);
705 extern unsigned int vmw_fops_poll(struct file
*filp
,
706 struct poll_table_struct
*wait
);
707 extern ssize_t
vmw_fops_read(struct file
*filp
, char __user
*buffer
,
708 size_t count
, loff_t
*offset
);
711 * Fifo utilities - vmwgfx_fifo.c
714 extern int vmw_fifo_init(struct vmw_private
*dev_priv
,
715 struct vmw_fifo_state
*fifo
);
716 extern void vmw_fifo_release(struct vmw_private
*dev_priv
,
717 struct vmw_fifo_state
*fifo
);
718 extern void *vmw_fifo_reserve(struct vmw_private
*dev_priv
, uint32_t bytes
);
720 vmw_fifo_reserve_dx(struct vmw_private
*dev_priv
, uint32_t bytes
, int ctx_id
);
721 extern void vmw_fifo_commit(struct vmw_private
*dev_priv
, uint32_t bytes
);
722 extern void vmw_fifo_commit_flush(struct vmw_private
*dev_priv
, uint32_t bytes
);
723 extern int vmw_fifo_send_fence(struct vmw_private
*dev_priv
,
725 extern void vmw_fifo_ping_host_locked(struct vmw_private
*, uint32_t reason
);
726 extern void vmw_fifo_ping_host(struct vmw_private
*dev_priv
, uint32_t reason
);
727 extern bool vmw_fifo_have_3d(struct vmw_private
*dev_priv
);
728 extern bool vmw_fifo_have_pitchlock(struct vmw_private
*dev_priv
);
729 extern int vmw_fifo_emit_dummy_query(struct vmw_private
*dev_priv
,
731 extern int vmw_fifo_flush(struct vmw_private
*dev_priv
,
735 * TTM glue - vmwgfx_ttm_glue.c
738 extern int vmw_ttm_global_init(struct vmw_private
*dev_priv
);
739 extern void vmw_ttm_global_release(struct vmw_private
*dev_priv
);
740 extern int vmw_mmap(struct file
*filp
, struct vm_area_struct
*vma
);
743 * TTM buffer object driver - vmwgfx_buffer.c
746 extern const size_t vmw_tt_size
;
747 extern struct ttm_placement vmw_vram_placement
;
748 extern struct ttm_placement vmw_vram_ne_placement
;
749 extern struct ttm_placement vmw_vram_sys_placement
;
750 extern struct ttm_placement vmw_vram_gmr_placement
;
751 extern struct ttm_placement vmw_vram_gmr_ne_placement
;
752 extern struct ttm_placement vmw_sys_placement
;
753 extern struct ttm_placement vmw_sys_ne_placement
;
754 extern struct ttm_placement vmw_evictable_placement
;
755 extern struct ttm_placement vmw_srf_placement
;
756 extern struct ttm_placement vmw_mob_placement
;
757 extern struct ttm_placement vmw_mob_ne_placement
;
758 extern struct ttm_bo_driver vmw_bo_driver
;
759 extern int vmw_dma_quiescent(struct drm_device
*dev
);
760 extern int vmw_bo_map_dma(struct ttm_buffer_object
*bo
);
761 extern void vmw_bo_unmap_dma(struct ttm_buffer_object
*bo
);
762 extern const struct vmw_sg_table
*
763 vmw_bo_sg_table(struct ttm_buffer_object
*bo
);
764 extern void vmw_piter_start(struct vmw_piter
*viter
,
765 const struct vmw_sg_table
*vsgt
,
766 unsigned long p_offs
);
769 * vmw_piter_next - Advance the iterator one page.
771 * @viter: Pointer to the iterator to advance.
773 * Returns false if past the list of pages, true otherwise.
775 static inline bool vmw_piter_next(struct vmw_piter
*viter
)
777 return viter
->next(viter
);
781 * vmw_piter_dma_addr - Return the DMA address of the current page.
783 * @viter: Pointer to the iterator
785 * Returns the DMA address of the page pointed to by @viter.
787 static inline dma_addr_t
vmw_piter_dma_addr(struct vmw_piter
*viter
)
789 return viter
->dma_address(viter
);
793 * vmw_piter_page - Return a pointer to the current page.
795 * @viter: Pointer to the iterator
797 * Returns the DMA address of the page pointed to by @viter.
799 static inline struct page
*vmw_piter_page(struct vmw_piter
*viter
)
801 return viter
->page(viter
);
805 * Command submission - vmwgfx_execbuf.c
808 extern int vmw_execbuf_ioctl(struct drm_device
*dev
, unsigned long data
,
809 struct drm_file
*file_priv
, size_t size
);
810 extern int vmw_execbuf_process(struct drm_file
*file_priv
,
811 struct vmw_private
*dev_priv
,
812 void __user
*user_commands
,
813 void *kernel_commands
,
814 uint32_t command_size
,
815 uint64_t throttle_us
,
816 uint32_t dx_context_handle
,
817 struct drm_vmw_fence_rep __user
819 struct vmw_fence_obj
**out_fence
);
820 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
821 struct vmw_fence_obj
*fence
);
822 extern void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
);
824 extern int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
825 struct vmw_private
*dev_priv
,
826 struct vmw_fence_obj
**p_fence
,
828 extern void vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
829 struct vmw_fpriv
*vmw_fp
,
831 struct drm_vmw_fence_rep __user
833 struct vmw_fence_obj
*fence
,
834 uint32_t fence_handle
);
835 extern int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
836 struct ttm_buffer_object
*bo
,
838 bool validate_as_mob
);
842 * IRQs and wating - vmwgfx_irq.c
845 extern irqreturn_t
vmw_irq_handler(int irq
, void *arg
);
846 extern int vmw_wait_seqno(struct vmw_private
*dev_priv
, bool lazy
,
847 uint32_t seqno
, bool interruptible
,
848 unsigned long timeout
);
849 extern void vmw_irq_preinstall(struct drm_device
*dev
);
850 extern int vmw_irq_postinstall(struct drm_device
*dev
);
851 extern void vmw_irq_uninstall(struct drm_device
*dev
);
852 extern bool vmw_seqno_passed(struct vmw_private
*dev_priv
,
854 extern int vmw_fallback_wait(struct vmw_private
*dev_priv
,
859 unsigned long timeout
);
860 extern void vmw_update_seqno(struct vmw_private
*dev_priv
,
861 struct vmw_fifo_state
*fifo_state
);
862 extern void vmw_seqno_waiter_add(struct vmw_private
*dev_priv
);
863 extern void vmw_seqno_waiter_remove(struct vmw_private
*dev_priv
);
864 extern void vmw_goal_waiter_add(struct vmw_private
*dev_priv
);
865 extern void vmw_goal_waiter_remove(struct vmw_private
*dev_priv
);
866 extern void vmw_generic_waiter_add(struct vmw_private
*dev_priv
, u32 flag
,
868 extern void vmw_generic_waiter_remove(struct vmw_private
*dev_priv
,
869 u32 flag
, int *waiter_count
);
872 * Rudimentary fence-like objects currently used only for throttling -
876 extern void vmw_marker_queue_init(struct vmw_marker_queue
*queue
);
877 extern void vmw_marker_queue_takedown(struct vmw_marker_queue
*queue
);
878 extern int vmw_marker_push(struct vmw_marker_queue
*queue
,
880 extern int vmw_marker_pull(struct vmw_marker_queue
*queue
,
881 uint32_t signaled_seqno
);
882 extern int vmw_wait_lag(struct vmw_private
*dev_priv
,
883 struct vmw_marker_queue
*queue
, uint32_t us
);
886 * Kernel framebuffer - vmwgfx_fb.c
889 int vmw_fb_init(struct vmw_private
*vmw_priv
);
890 int vmw_fb_close(struct vmw_private
*dev_priv
);
891 int vmw_fb_off(struct vmw_private
*vmw_priv
);
892 int vmw_fb_on(struct vmw_private
*vmw_priv
);
895 * Kernel modesetting - vmwgfx_kms.c
898 int vmw_kms_init(struct vmw_private
*dev_priv
);
899 int vmw_kms_close(struct vmw_private
*dev_priv
);
900 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
);
901 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
);
902 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
903 struct drm_file
*file_priv
);
904 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
);
905 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
906 struct ttm_object_file
*tfile
,
907 struct ttm_buffer_object
*bo
,
908 SVGA3dCmdHeader
*header
);
909 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
910 unsigned width
, unsigned height
, unsigned pitch
,
911 unsigned bpp
, unsigned depth
);
912 void vmw_kms_idle_workqueues(struct vmw_master
*vmaster
);
913 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
916 u32
vmw_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
);
917 int vmw_enable_vblank(struct drm_device
*dev
, unsigned int pipe
);
918 void vmw_disable_vblank(struct drm_device
*dev
, unsigned int pipe
);
919 int vmw_kms_present(struct vmw_private
*dev_priv
,
920 struct drm_file
*file_priv
,
921 struct vmw_framebuffer
*vfb
,
922 struct vmw_surface
*surface
,
923 uint32_t sid
, int32_t destX
, int32_t destY
,
924 struct drm_vmw_rect
*clips
,
926 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
927 struct drm_file
*file_priv
);
928 void vmw_kms_legacy_hotspot_clear(struct vmw_private
*dev_priv
);
930 int vmw_dumb_create(struct drm_file
*file_priv
,
931 struct drm_device
*dev
,
932 struct drm_mode_create_dumb
*args
);
934 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
935 struct drm_device
*dev
, uint32_t handle
,
937 int vmw_dumb_destroy(struct drm_file
*file_priv
,
938 struct drm_device
*dev
,
940 extern int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
);
941 extern void vmw_resource_unpin(struct vmw_resource
*res
);
942 extern enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
);
945 * Overlay control - vmwgfx_overlay.c
948 int vmw_overlay_init(struct vmw_private
*dev_priv
);
949 int vmw_overlay_close(struct vmw_private
*dev_priv
);
950 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
951 struct drm_file
*file_priv
);
952 int vmw_overlay_stop_all(struct vmw_private
*dev_priv
);
953 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
);
954 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
);
955 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
);
956 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
);
957 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
);
958 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
);
964 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func
;
967 * Prime - vmwgfx_prime.c
970 extern const struct dma_buf_ops vmw_prime_dmabuf_ops
;
971 extern int vmw_prime_fd_to_handle(struct drm_device
*dev
,
972 struct drm_file
*file_priv
,
973 int fd
, u32
*handle
);
974 extern int vmw_prime_handle_to_fd(struct drm_device
*dev
,
975 struct drm_file
*file_priv
,
976 uint32_t handle
, uint32_t flags
,
980 * MemoryOBject management - vmwgfx_mob.c
983 extern int vmw_mob_bind(struct vmw_private
*dev_priv
, struct vmw_mob
*mob
,
984 const struct vmw_sg_table
*vsgt
,
985 unsigned long num_data_pages
, int32_t mob_id
);
986 extern void vmw_mob_unbind(struct vmw_private
*dev_priv
,
987 struct vmw_mob
*mob
);
988 extern void vmw_mob_destroy(struct vmw_mob
*mob
);
989 extern struct vmw_mob
*vmw_mob_create(unsigned long data_pages
);
990 extern int vmw_otables_setup(struct vmw_private
*dev_priv
);
991 extern void vmw_otables_takedown(struct vmw_private
*dev_priv
);
994 * Context management - vmwgfx_context.c
997 extern const struct vmw_user_resource_conv
*user_context_converter
;
999 extern int vmw_context_check(struct vmw_private
*dev_priv
,
1000 struct ttm_object_file
*tfile
,
1002 struct vmw_resource
**p_res
);
1003 extern int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
1004 struct drm_file
*file_priv
);
1005 extern int vmw_extended_context_define_ioctl(struct drm_device
*dev
, void *data
,
1006 struct drm_file
*file_priv
);
1007 extern int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
1008 struct drm_file
*file_priv
);
1009 extern struct list_head
*vmw_context_binding_list(struct vmw_resource
*ctx
);
1010 extern struct vmw_cmdbuf_res_manager
*
1011 vmw_context_res_man(struct vmw_resource
*ctx
);
1012 extern struct vmw_resource
*vmw_context_cotable(struct vmw_resource
*ctx
,
1013 SVGACOTableType cotable_type
);
1014 extern struct list_head
*vmw_context_binding_list(struct vmw_resource
*ctx
);
1015 struct vmw_ctx_binding_state
;
1016 extern struct vmw_ctx_binding_state
*
1017 vmw_context_binding_state(struct vmw_resource
*ctx
);
1018 extern void vmw_dx_context_scrub_cotables(struct vmw_resource
*ctx
,
1020 extern int vmw_context_bind_dx_query(struct vmw_resource
*ctx_res
,
1021 struct vmw_dma_buffer
*mob
);
1022 extern struct vmw_dma_buffer
*
1023 vmw_context_get_dx_query_mob(struct vmw_resource
*ctx_res
);
1027 * Surface management - vmwgfx_surface.c
1030 extern const struct vmw_user_resource_conv
*user_surface_converter
;
1032 extern void vmw_surface_res_free(struct vmw_resource
*res
);
1033 extern int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
1034 struct drm_file
*file_priv
);
1035 extern int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1036 struct drm_file
*file_priv
);
1037 extern int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1038 struct drm_file
*file_priv
);
1039 extern int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1040 struct drm_file
*file_priv
);
1041 extern int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1042 struct drm_file
*file_priv
);
1043 extern int vmw_surface_check(struct vmw_private
*dev_priv
,
1044 struct ttm_object_file
*tfile
,
1045 uint32_t handle
, int *id
);
1046 extern int vmw_surface_validate(struct vmw_private
*dev_priv
,
1047 struct vmw_surface
*srf
);
1048 int vmw_surface_gb_priv_define(struct drm_device
*dev
,
1049 uint32_t user_accounting_size
,
1050 uint32_t svga3d_flags
,
1051 SVGA3dSurfaceFormat format
,
1053 uint32_t num_mip_levels
,
1054 uint32_t multisample_count
,
1055 uint32_t array_size
,
1056 struct drm_vmw_size size
,
1057 struct vmw_surface
**srf_out
);
1060 * Shader management - vmwgfx_shader.c
1063 extern const struct vmw_user_resource_conv
*user_shader_converter
;
1065 extern int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
1066 struct drm_file
*file_priv
);
1067 extern int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
1068 struct drm_file
*file_priv
);
1069 extern int vmw_compat_shader_add(struct vmw_private
*dev_priv
,
1070 struct vmw_cmdbuf_res_manager
*man
,
1071 u32 user_key
, const void *bytecode
,
1072 SVGA3dShaderType shader_type
,
1074 struct list_head
*list
);
1075 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager
*man
,
1076 u32 user_key
, SVGA3dShaderType shader_type
,
1077 struct list_head
*list
);
1078 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager
*man
,
1079 struct vmw_resource
*ctx
,
1081 SVGA3dShaderType shader_type
,
1082 struct list_head
*list
);
1083 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private
*dev_priv
,
1084 struct list_head
*list
,
1087 extern struct vmw_resource
*
1088 vmw_shader_lookup(struct vmw_cmdbuf_res_manager
*man
,
1089 u32 user_key
, SVGA3dShaderType shader_type
);
1092 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1095 extern struct vmw_cmdbuf_res_manager
*
1096 vmw_cmdbuf_res_man_create(struct vmw_private
*dev_priv
);
1097 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager
*man
);
1098 extern size_t vmw_cmdbuf_res_man_size(void);
1099 extern struct vmw_resource
*
1100 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager
*man
,
1101 enum vmw_cmdbuf_res_type res_type
,
1103 extern void vmw_cmdbuf_res_revert(struct list_head
*list
);
1104 extern void vmw_cmdbuf_res_commit(struct list_head
*list
);
1105 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager
*man
,
1106 enum vmw_cmdbuf_res_type res_type
,
1108 struct vmw_resource
*res
,
1109 struct list_head
*list
);
1110 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager
*man
,
1111 enum vmw_cmdbuf_res_type res_type
,
1113 struct list_head
*list
,
1114 struct vmw_resource
**res
);
1117 * COTable management - vmwgfx_cotable.c
1119 extern const SVGACOTableType vmw_cotable_scrub_order
[];
1120 extern struct vmw_resource
*vmw_cotable_alloc(struct vmw_private
*dev_priv
,
1121 struct vmw_resource
*ctx
,
1123 extern int vmw_cotable_notify(struct vmw_resource
*res
, int id
);
1124 extern int vmw_cotable_scrub(struct vmw_resource
*res
, bool readback
);
1125 extern void vmw_cotable_add_resource(struct vmw_resource
*ctx
,
1126 struct list_head
*head
);
1129 * Command buffer managerment vmwgfx_cmdbuf.c
1131 struct vmw_cmdbuf_man
;
1132 struct vmw_cmdbuf_header
;
1134 extern struct vmw_cmdbuf_man
*
1135 vmw_cmdbuf_man_create(struct vmw_private
*dev_priv
);
1136 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man
*man
,
1137 size_t size
, size_t default_size
);
1138 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man
*man
);
1139 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man
*man
);
1140 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man
*man
, bool interruptible
,
1141 unsigned long timeout
);
1142 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man
*man
, size_t size
,
1143 int ctx_id
, bool interruptible
,
1144 struct vmw_cmdbuf_header
*header
);
1145 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man
*man
, size_t size
,
1146 struct vmw_cmdbuf_header
*header
,
1148 extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man
*man
);
1149 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man
*man
,
1150 size_t size
, bool interruptible
,
1151 struct vmw_cmdbuf_header
**p_header
);
1152 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header
*header
);
1153 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man
*man
,
1154 bool interruptible
);
1158 * Inline helper functions
1161 static inline void vmw_surface_unreference(struct vmw_surface
**srf
)
1163 struct vmw_surface
*tmp_srf
= *srf
;
1164 struct vmw_resource
*res
= &tmp_srf
->res
;
1167 vmw_resource_unreference(&res
);
1170 static inline struct vmw_surface
*vmw_surface_reference(struct vmw_surface
*srf
)
1172 (void) vmw_resource_reference(&srf
->res
);
1176 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer
**buf
)
1178 struct vmw_dma_buffer
*tmp_buf
= *buf
;
1181 if (tmp_buf
!= NULL
) {
1182 struct ttm_buffer_object
*bo
= &tmp_buf
->base
;
1188 static inline struct vmw_dma_buffer
*vmw_dmabuf_reference(struct vmw_dma_buffer
*buf
)
1190 if (ttm_bo_reference(&buf
->base
))
1195 static inline struct ttm_mem_global
*vmw_mem_glob(struct vmw_private
*dev_priv
)
1197 return (struct ttm_mem_global
*) dev_priv
->mem_global_ref
.object
;
1200 static inline void vmw_fifo_resource_inc(struct vmw_private
*dev_priv
)
1202 atomic_inc(&dev_priv
->num_fifo_resources
);
1205 static inline void vmw_fifo_resource_dec(struct vmw_private
*dev_priv
)
1207 atomic_dec(&dev_priv
->num_fifo_resources
);
1211 * vmw_mmio_read - Perform a MMIO read from volatile memory
1213 * @addr: The address to read from
1215 * This function is intended to be equivalent to ioread32() on
1216 * memremap'd memory, but without byteswapping.
1218 static inline u32
vmw_mmio_read(u32
*addr
)
1220 return READ_ONCE(*addr
);
1224 * vmw_mmio_write - Perform a MMIO write to volatile memory
1226 * @addr: The address to write to
1228 * This function is intended to be equivalent to iowrite32 on
1229 * memremap'd memory, but without byteswapping.
1231 static inline void vmw_mmio_write(u32 value
, u32
*addr
)
1233 WRITE_ONCE(*addr
, value
);