1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-fence.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
16 HOST1X_CLASS_HOST1X
= 0x1,
17 HOST1X_CLASS_NVJPG1
= 0x7,
18 HOST1X_CLASS_NVENC
= 0x21,
19 HOST1X_CLASS_NVENC1
= 0x22,
20 HOST1X_CLASS_GR2D
= 0x51,
21 HOST1X_CLASS_GR2D_SB
= 0x52,
22 HOST1X_CLASS_VIC
= 0x5D,
23 HOST1X_CLASS_GR3D
= 0x60,
24 HOST1X_CLASS_NVJPG
= 0xC0,
25 HOST1X_CLASS_NVDEC
= 0xF0,
26 HOST1X_CLASS_NVDEC1
= 0xF5,
27 HOST1X_CLASS_OFA
= 0xF8,
34 u64
host1x_get_dma_mask(struct host1x
*host1x
);
37 * struct host1x_bo_cache - host1x buffer object cache
38 * @mappings: list of mappings
39 * @lock: synchronizes accesses to the list of mappings
41 * Note that entries are not periodically evicted from this cache and instead need to be
42 * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
43 * released when the last reference to a buffer object represented by a mapping in this
46 struct host1x_bo_cache
{
47 struct list_head mappings
;
51 static inline void host1x_bo_cache_init(struct host1x_bo_cache
*cache
)
53 INIT_LIST_HEAD(&cache
->mappings
);
54 mutex_init(&cache
->lock
);
57 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache
*cache
)
59 /* XXX warn if not empty? */
60 mutex_destroy(&cache
->lock
);
64 * struct host1x_client_ops - host1x client operations
65 * @early_init: host1x client early initialization code
66 * @init: host1x client initialization code
67 * @exit: host1x client tear down code
68 * @late_exit: host1x client late tear down code
69 * @suspend: host1x client suspend code
70 * @resume: host1x client resume code
72 struct host1x_client_ops
{
73 int (*early_init
)(struct host1x_client
*client
);
74 int (*init
)(struct host1x_client
*client
);
75 int (*exit
)(struct host1x_client
*client
);
76 int (*late_exit
)(struct host1x_client
*client
);
77 int (*suspend
)(struct host1x_client
*client
);
78 int (*resume
)(struct host1x_client
*client
);
82 * struct host1x_client - host1x client structure
83 * @list: list node for the host1x client
84 * @host: pointer to struct device representing the host1x controller
85 * @dev: pointer to struct device backing this host1x client
86 * @group: IOMMU group that this client is a member of
87 * @ops: host1x client operations
88 * @class: host1x class represented by this client
89 * @channel: host1x channel associated with this client
90 * @syncpts: array of syncpoints requested for this client
91 * @num_syncpts: number of syncpoints requested for this client
92 * @parent: pointer to parent structure
93 * @usecount: reference count for this structure
94 * @lock: mutex for mutually exclusive concurrency
95 * @cache: host1x buffer object cache
97 struct host1x_client
{
98 struct list_head list
;
101 struct iommu_group
*group
;
103 const struct host1x_client_ops
*ops
;
105 enum host1x_class
class;
106 struct host1x_channel
*channel
;
108 struct host1x_syncpt
**syncpts
;
109 unsigned int num_syncpts
;
111 struct host1x_client
*parent
;
112 unsigned int usecount
;
115 struct host1x_bo_cache cache
;
119 * host1x buffer objects
125 struct host1x_bo_mapping
{
127 struct dma_buf_attachment
*attach
;
128 enum dma_data_direction direction
;
129 struct list_head list
;
130 struct host1x_bo
*bo
;
131 struct sg_table
*sgt
;
137 struct host1x_bo_cache
*cache
;
138 struct list_head entry
;
141 static inline struct host1x_bo_mapping
*to_host1x_bo_mapping(struct kref
*ref
)
143 return container_of(ref
, struct host1x_bo_mapping
, ref
);
146 struct host1x_bo_ops
{
147 struct host1x_bo
*(*get
)(struct host1x_bo
*bo
);
148 void (*put
)(struct host1x_bo
*bo
);
149 struct host1x_bo_mapping
*(*pin
)(struct device
*dev
, struct host1x_bo
*bo
,
150 enum dma_data_direction dir
);
151 void (*unpin
)(struct host1x_bo_mapping
*map
);
152 void *(*mmap
)(struct host1x_bo
*bo
);
153 void (*munmap
)(struct host1x_bo
*bo
, void *addr
);
157 const struct host1x_bo_ops
*ops
;
158 struct list_head mappings
;
162 static inline void host1x_bo_init(struct host1x_bo
*bo
,
163 const struct host1x_bo_ops
*ops
)
165 INIT_LIST_HEAD(&bo
->mappings
);
166 spin_lock_init(&bo
->lock
);
170 static inline struct host1x_bo
*host1x_bo_get(struct host1x_bo
*bo
)
172 return bo
->ops
->get(bo
);
175 static inline void host1x_bo_put(struct host1x_bo
*bo
)
180 struct host1x_bo_mapping
*host1x_bo_pin(struct device
*dev
, struct host1x_bo
*bo
,
181 enum dma_data_direction dir
,
182 struct host1x_bo_cache
*cache
);
183 void host1x_bo_unpin(struct host1x_bo_mapping
*map
);
185 static inline void *host1x_bo_mmap(struct host1x_bo
*bo
)
187 return bo
->ops
->mmap(bo
);
190 static inline void host1x_bo_munmap(struct host1x_bo
*bo
, void *addr
)
192 bo
->ops
->munmap(bo
, addr
);
199 #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
200 #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
202 struct host1x_syncpt_base
;
203 struct host1x_syncpt
;
206 struct host1x_syncpt
*host1x_syncpt_get_by_id(struct host1x
*host
, u32 id
);
207 struct host1x_syncpt
*host1x_syncpt_get_by_id_noref(struct host1x
*host
, u32 id
);
208 struct host1x_syncpt
*host1x_syncpt_get(struct host1x_syncpt
*sp
);
209 u32
host1x_syncpt_id(struct host1x_syncpt
*sp
);
210 u32
host1x_syncpt_read_min(struct host1x_syncpt
*sp
);
211 u32
host1x_syncpt_read_max(struct host1x_syncpt
*sp
);
212 u32
host1x_syncpt_read(struct host1x_syncpt
*sp
);
213 int host1x_syncpt_incr(struct host1x_syncpt
*sp
);
214 u32
host1x_syncpt_incr_max(struct host1x_syncpt
*sp
, u32 incrs
);
215 int host1x_syncpt_wait(struct host1x_syncpt
*sp
, u32 thresh
, long timeout
,
217 struct host1x_syncpt
*host1x_syncpt_request(struct host1x_client
*client
,
218 unsigned long flags
);
219 void host1x_syncpt_put(struct host1x_syncpt
*sp
);
220 struct host1x_syncpt
*host1x_syncpt_alloc(struct host1x
*host
,
224 struct host1x_syncpt_base
*host1x_syncpt_get_base(struct host1x_syncpt
*sp
);
225 u32
host1x_syncpt_base_id(struct host1x_syncpt_base
*base
);
227 void host1x_syncpt_release_vblank_reservation(struct host1x_client
*client
,
230 struct dma_fence
*host1x_fence_create(struct host1x_syncpt
*sp
, u32 threshold
,
232 void host1x_fence_cancel(struct dma_fence
*fence
);
238 struct host1x_channel
;
241 struct host1x_channel
*host1x_channel_request(struct host1x_client
*client
);
242 struct host1x_channel
*host1x_channel_get(struct host1x_channel
*channel
);
243 void host1x_channel_stop(struct host1x_channel
*channel
);
244 void host1x_channel_put(struct host1x_channel
*channel
);
245 int host1x_job_submit(struct host1x_job
*job
);
251 #define HOST1X_RELOC_READ (1 << 0)
252 #define HOST1X_RELOC_WRITE (1 << 1)
254 struct host1x_reloc
{
256 struct host1x_bo
*bo
;
257 unsigned long offset
;
260 struct host1x_bo
*bo
;
261 unsigned long offset
;
268 /* When refcount goes to zero, job can be freed */
272 struct list_head list
;
274 /* Channel where job is submitted to */
275 struct host1x_channel
*channel
;
277 /* client where the job originated */
278 struct host1x_client
*client
;
280 /* Gathers and their memory */
281 struct host1x_job_cmd
*cmds
;
282 unsigned int num_cmds
;
284 /* Array of handles to be pinned & unpinned */
285 struct host1x_reloc
*relocs
;
286 unsigned int num_relocs
;
287 struct host1x_job_unpin_data
*unpins
;
288 unsigned int num_unpins
;
290 dma_addr_t
*addr_phys
;
291 dma_addr_t
*gather_addr_phys
;
292 dma_addr_t
*reloc_addr_phys
;
294 /* Sync point id, number of increments and end related to the submit */
295 struct host1x_syncpt
*syncpt
;
299 /* Completion fence for job tracking */
300 struct dma_fence
*fence
;
301 struct dma_fence_cb fence_cb
;
303 /* Maximum time to wait for this job */
304 unsigned int timeout
;
306 /* Job has timed out and should be released */
309 /* Index and number of slots used in the push buffer */
310 unsigned int first_get
;
311 unsigned int num_slots
;
313 /* Copy of gathers */
314 size_t gather_copy_size
;
315 dma_addr_t gather_copy
;
316 u8
*gather_copy_mapped
;
318 /* Check if register is marked as an address reg */
319 int (*is_addr_reg
)(struct device
*dev
, u32
class, u32 reg
);
321 /* Check if class belongs to the unit */
322 int (*is_valid_class
)(u32
class);
324 /* Request a SETCLASS to this class */
327 /* Add a channel wait for previous ops to complete */
330 /* Fast-forward syncpoint increments on job timeout */
331 bool syncpt_recovery
;
333 /* Callback called when job is freed */
334 void (*release
)(struct host1x_job
*job
);
337 /* Whether host1x-side firewall should be ran for this job or not */
338 bool enable_firewall
;
340 /* Options for configuring engine data stream ID */
341 /* Context device to use for job */
342 struct host1x_memory_context
*memory_context
;
343 /* Stream ID to use if context isolation is disabled (!memory_context) */
344 u32 engine_fallback_streamid
;
345 /* Engine offset to program stream ID to */
346 u32 engine_streamid_offset
;
349 struct host1x_job
*host1x_job_alloc(struct host1x_channel
*ch
,
350 u32 num_cmdbufs
, u32 num_relocs
,
352 void host1x_job_add_gather(struct host1x_job
*job
, struct host1x_bo
*bo
,
353 unsigned int words
, unsigned int offset
);
354 void host1x_job_add_wait(struct host1x_job
*job
, u32 id
, u32 thresh
,
355 bool relative
, u32 next_class
);
356 struct host1x_job
*host1x_job_get(struct host1x_job
*job
);
357 void host1x_job_put(struct host1x_job
*job
);
358 int host1x_job_pin(struct host1x_job
*job
, struct device
*dev
);
359 void host1x_job_unpin(struct host1x_job
*job
);
362 * subdevice probe infrastructure
365 struct host1x_device
;
368 * struct host1x_driver - host1x logical device driver
369 * @driver: core driver
370 * @subdevs: table of OF device IDs matching subdevices for this driver
371 * @list: list node for the driver
372 * @probe: called when the host1x logical device is probed
373 * @remove: called when the host1x logical device is removed
374 * @shutdown: called when the host1x logical device is shut down
376 struct host1x_driver
{
377 struct device_driver driver
;
379 const struct of_device_id
*subdevs
;
380 struct list_head list
;
382 int (*probe
)(struct host1x_device
*device
);
383 int (*remove
)(struct host1x_device
*device
);
384 void (*shutdown
)(struct host1x_device
*device
);
387 static inline struct host1x_driver
*
388 to_host1x_driver(struct device_driver
*driver
)
390 return container_of(driver
, struct host1x_driver
, driver
);
393 int host1x_driver_register_full(struct host1x_driver
*driver
,
394 struct module
*owner
);
395 void host1x_driver_unregister(struct host1x_driver
*driver
);
397 #define host1x_driver_register(driver) \
398 host1x_driver_register_full(driver, THIS_MODULE)
400 struct host1x_device
{
401 struct host1x_driver
*driver
;
402 struct list_head list
;
405 struct mutex subdevs_lock
;
406 struct list_head subdevs
;
407 struct list_head active
;
409 struct mutex clients_lock
;
410 struct list_head clients
;
414 struct device_dma_parameters dma_parms
;
417 static inline struct host1x_device
*to_host1x_device(struct device
*dev
)
419 return container_of(dev
, struct host1x_device
, dev
);
422 int host1x_device_init(struct host1x_device
*device
);
423 int host1x_device_exit(struct host1x_device
*device
);
425 void __host1x_client_init(struct host1x_client
*client
, struct lock_class_key
*key
);
426 void host1x_client_exit(struct host1x_client
*client
);
428 #define host1x_client_init(client) \
430 static struct lock_class_key __key; \
431 __host1x_client_init(client, &__key); \
434 int __host1x_client_register(struct host1x_client
*client
);
437 * Note that this wrapper calls __host1x_client_init() for compatibility
438 * with existing callers. Callers that want to separately initialize and
439 * register a host1x client must first initialize using either of the
440 * __host1x_client_init() or host1x_client_init() functions and then use
441 * the low-level __host1x_client_register() function to avoid the client
442 * getting reinitialized.
444 #define host1x_client_register(client) \
446 static struct lock_class_key __key; \
447 __host1x_client_init(client, &__key); \
448 __host1x_client_register(client); \
451 void host1x_client_unregister(struct host1x_client
*client
);
453 int host1x_client_suspend(struct host1x_client
*client
);
454 int host1x_client_resume(struct host1x_client
*client
);
456 struct tegra_mipi_device
;
458 struct tegra_mipi_device
*tegra_mipi_request(struct device
*device
,
459 struct device_node
*np
);
460 void tegra_mipi_free(struct tegra_mipi_device
*device
);
461 int tegra_mipi_enable(struct tegra_mipi_device
*device
);
462 int tegra_mipi_disable(struct tegra_mipi_device
*device
);
463 int tegra_mipi_start_calibration(struct tegra_mipi_device
*device
);
464 int tegra_mipi_finish_calibration(struct tegra_mipi_device
*device
);
466 /* host1x memory contexts */
468 struct host1x_memory_context
{
474 struct device_dma_parameters dma_parms
;
480 #ifdef CONFIG_IOMMU_API
481 struct host1x_memory_context
*host1x_memory_context_alloc(struct host1x
*host1x
,
484 void host1x_memory_context_get(struct host1x_memory_context
*cd
);
485 void host1x_memory_context_put(struct host1x_memory_context
*cd
);
487 static inline struct host1x_memory_context
*host1x_memory_context_alloc(struct host1x
*host1x
,
494 static inline void host1x_memory_context_get(struct host1x_memory_context
*cd
)
498 static inline void host1x_memory_context_put(struct host1x_memory_context
*cd
)