libdrm: NOTE! Default branch is now main
[drm/libdrm.git] / freedreno / freedreno_priv.h
blobb8eac4b26df540ac5d57b6c6bb19a6ca8b6abde7
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
29 #ifndef FREEDRENO_PRIV_H_
30 #define FREEDRENO_PRIV_H_
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <sys/ioctl.h>
39 #include <pthread.h>
40 #include <stdio.h>
41 #include <assert.h>
43 #include "libdrm_macros.h"
44 #include "xf86drm.h"
45 #include "xf86atomic.h"
47 #include "util_double_list.h"
48 #include "util_math.h"
50 #include "freedreno_drmif.h"
51 #include "freedreno_ringbuffer.h"
52 #include "drm.h"
54 #ifndef TRUE
55 # define TRUE 1
56 #endif
57 #ifndef FALSE
58 # define FALSE 0
59 #endif
61 struct fd_device_funcs {
62 int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
63 uint32_t flags, uint32_t *handle);
64 struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
65 uint32_t size, uint32_t handle);
66 struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
67 unsigned prio);
68 void (*destroy)(struct fd_device *dev);
71 struct fd_bo_bucket {
72 uint32_t size;
73 struct list_head list;
76 struct fd_bo_cache {
77 struct fd_bo_bucket cache_bucket[14 * 4];
78 int num_buckets;
79 time_t time;
82 struct fd_device {
83 int fd;
84 enum fd_version version;
85 atomic_t refcnt;
87 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
89 * handle_table: maps handle to fd_bo
90 * name_table: maps flink name to fd_bo
92 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
93 * returns a new handle. So we need to figure out if the bo is already
94 * open in the process first, before calling gem-open.
96 void *handle_table, *name_table;
98 const struct fd_device_funcs *funcs;
100 struct fd_bo_cache bo_cache;
101 struct fd_bo_cache ring_cache;
103 int closefd; /* call close(fd) upon destruction */
105 /* just for valgrind: */
106 int bo_size;
109 drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
110 drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
111 drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
112 uint32_t *size, uint32_t flags);
113 drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
115 /* for where @table_lock is already held: */
116 drm_private void fd_device_del_locked(struct fd_device *dev);
118 struct fd_pipe_funcs {
119 struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size,
120 enum fd_ringbuffer_flags flags);
121 int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value);
122 int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
123 void (*destroy)(struct fd_pipe *pipe);
126 struct fd_pipe {
127 struct fd_device *dev;
128 enum fd_pipe_id id;
129 uint32_t gpu_id;
130 atomic_t refcnt;
131 const struct fd_pipe_funcs *funcs;
134 struct fd_ringbuffer_funcs {
135 void * (*hostptr)(struct fd_ringbuffer *ring);
136 int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start,
137 int in_fence_fd, int *out_fence_fd);
138 void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
139 void (*reset)(struct fd_ringbuffer *ring);
140 void (*emit_reloc)(struct fd_ringbuffer *ring,
141 const struct fd_reloc *reloc);
142 uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
143 struct fd_ringbuffer *target, uint32_t cmd_idx);
144 uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
145 void (*destroy)(struct fd_ringbuffer *ring);
148 struct fd_bo_funcs {
149 int (*offset)(struct fd_bo *bo, uint64_t *offset);
150 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
151 void (*cpu_fini)(struct fd_bo *bo);
152 int (*madvise)(struct fd_bo *bo, int willneed);
153 uint64_t (*iova)(struct fd_bo *bo);
154 void (*destroy)(struct fd_bo *bo);
157 struct fd_bo {
158 struct fd_device *dev;
159 uint32_t size;
160 uint32_t handle;
161 uint32_t name;
162 void *map;
163 atomic_t refcnt;
164 const struct fd_bo_funcs *funcs;
166 enum {
167 NO_CACHE = 0,
168 BO_CACHE = 1,
169 RING_CACHE = 2,
170 } bo_reuse;
172 struct list_head list; /* bucket-list entry */
173 time_t free_time; /* time when added to bucket-list */
176 drm_private struct fd_bo *fd_bo_new_ring(struct fd_device *dev,
177 uint32_t size, uint32_t flags);
179 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
181 #define enable_debug 0 /* TODO make dynamic */
183 #define INFO_MSG(fmt, ...) \
184 do { drmMsg("[I] "fmt " (%s:%d)\n", \
185 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
186 #define DEBUG_MSG(fmt, ...) \
187 do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
188 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
189 #define WARN_MSG(fmt, ...) \
190 do { drmMsg("[W] "fmt " (%s:%d)\n", \
191 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
192 #define ERROR_MSG(fmt, ...) \
193 do { drmMsg("[E] " fmt " (%s:%d)\n", \
194 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
196 #define U642VOID(x) ((void *)(unsigned long)(x))
197 #define VOID2U64(x) ((uint64_t)(unsigned long)(x))
199 static inline uint32_t
200 offset_bytes(void *end, void *start)
202 return ((char *)end) - ((char *)start);
205 #if HAVE_VALGRIND
206 # include <memcheck.h>
209 * For tracking the backing memory (if valgrind enabled, we force a mmap
210 * for the purposes of tracking)
212 static inline void VG_BO_ALLOC(struct fd_bo *bo)
214 if (bo && RUNNING_ON_VALGRIND) {
215 VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
219 static inline void VG_BO_FREE(struct fd_bo *bo)
221 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
225 * For tracking bo structs that are in the buffer-cache, so that valgrind
226 * doesn't attribute ownership to the first one to allocate the recycled
227 * bo.
229 * Note that the list_head in fd_bo is used to track the buffers in cache
230 * so disable error reporting on the range while they are in cache so
231 * valgrind doesn't squawk about list traversal.
234 static inline void VG_BO_RELEASE(struct fd_bo *bo)
236 if (RUNNING_ON_VALGRIND) {
237 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
238 VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
239 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
242 static inline void VG_BO_OBTAIN(struct fd_bo *bo)
244 if (RUNNING_ON_VALGRIND) {
245 VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
246 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
247 VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
250 #else
251 static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
252 static inline void VG_BO_FREE(struct fd_bo *bo) {}
253 static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
254 static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
255 #endif
258 #endif /* FREEDRENO_PRIV_H_ */