Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / accel / qaic / qaic.h
blob02561b6cecc64b41d9fd2985518889d6cb4509d1
1 /* SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
7 #ifndef _QAIC_H_
8 #define _QAIC_H_
10 #include <linux/interrupt.h>
11 #include <linux/kref.h>
12 #include <linux/mhi.h>
13 #include <linux/mutex.h>
14 #include <linux/pci.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/wait.h>
18 #include <linux/workqueue.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_gem.h>
22 #define QAIC_DBC_BASE SZ_128K
23 #define QAIC_DBC_SIZE SZ_4K
25 #define QAIC_NO_PARTITION -1
27 #define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
29 #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
30 #define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
31 #define to_drm(qddev) (&(qddev)->drm)
32 #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
33 #define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
35 enum __packed dev_states {
36 /* Device is offline or will be very soon */
37 QAIC_OFFLINE,
38 /* Device is booting, not clear if it's in a usable state */
39 QAIC_BOOT,
40 /* Device is fully operational */
41 QAIC_ONLINE,
44 extern bool datapath_polling;
46 struct qaic_user {
47 /* Uniquely identifies this user for the device */
48 int handle;
49 struct kref ref_count;
50 /* Char device opened by this user */
51 struct qaic_drm_device *qddev;
52 /* Node in list of users that opened this drm device */
53 struct list_head node;
54 /* SRCU used to synchronize this user during cleanup */
55 struct srcu_struct qddev_lock;
56 atomic_t chunk_id;
59 struct dma_bridge_chan {
60 /* Pointer to device strcut maintained by driver */
61 struct qaic_device *qdev;
62 /* ID of this DMA bridge channel(DBC) */
63 unsigned int id;
64 /* Synchronizes access to xfer_list */
65 spinlock_t xfer_lock;
66 /* Base address of request queue */
67 void *req_q_base;
68 /* Base address of response queue */
69 void *rsp_q_base;
71 * Base bus address of request queue. Response queue bus address can be
72 * calculated by adding request queue size to this variable
74 dma_addr_t dma_addr;
75 /* Total size of request and response queue in byte */
76 u32 total_size;
77 /* Capacity of request/response queue */
78 u32 nelem;
79 /* The user that opened this DBC */
80 struct qaic_user *usr;
82 * Request ID of next memory handle that goes in request queue. One
83 * memory handle can enqueue more than one request elements, all
84 * this requests that belong to same memory handle have same request ID
86 u16 next_req_id;
87 /* true: DBC is in use; false: DBC not in use */
88 bool in_use;
90 * Base address of device registers. Used to read/write request and
91 * response queue's head and tail pointer of this DBC.
93 void __iomem *dbc_base;
94 /* Head of list where each node is a memory handle queued in request queue */
95 struct list_head xfer_list;
96 /* Synchronizes DBC readers during cleanup */
97 struct srcu_struct ch_lock;
99 * When this DBC is released, any thread waiting on this wait queue is
100 * woken up
102 wait_queue_head_t dbc_release;
103 /* Head of list where each node is a bo associated with this DBC */
104 struct list_head bo_lists;
105 /* The irq line for this DBC. Used for polling */
106 unsigned int irq;
107 /* Polling work item to simulate interrupts */
108 struct work_struct poll_work;
111 struct qaic_device {
112 /* Pointer to base PCI device struct of our physical device */
113 struct pci_dev *pdev;
114 /* Req. ID of request that will be queued next in MHI control device */
115 u32 next_seq_num;
116 /* Base address of bar 0 */
117 void __iomem *bar_0;
118 /* Base address of bar 2 */
119 void __iomem *bar_2;
120 /* Controller structure for MHI devices */
121 struct mhi_controller *mhi_cntrl;
122 /* MHI control channel device */
123 struct mhi_device *cntl_ch;
124 /* List of requests queued in MHI control device */
125 struct list_head cntl_xfer_list;
126 /* Synchronizes MHI control device transactions and its xfer list */
127 struct mutex cntl_mutex;
128 /* Array of DBC struct of this device */
129 struct dma_bridge_chan *dbc;
130 /* Work queue for tasks related to MHI control device */
131 struct workqueue_struct *cntl_wq;
132 /* Synchronizes all the users of device during cleanup */
133 struct srcu_struct dev_lock;
134 /* Track the state of the device during resets */
135 enum dev_states dev_state;
136 /* true: single MSI is used to operate device */
137 bool single_msi;
139 * true: A tx MHI transaction has failed and a rx buffer is still queued
140 * in control device. Such a buffer is considered lost rx buffer
141 * false: No rx buffer is lost in control device
143 bool cntl_lost_buf;
144 /* Maximum number of DBC supported by this device */
145 u32 num_dbc;
146 /* Reference to the drm_device for this device when it is created */
147 struct qaic_drm_device *qddev;
148 /* Generate the CRC of a control message */
149 u32 (*gen_crc)(void *msg);
150 /* Validate the CRC of a control message */
151 bool (*valid_crc)(void *msg);
152 /* MHI "QAIC_TIMESYNC" channel device */
153 struct mhi_device *qts_ch;
154 /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
155 struct workqueue_struct *qts_wq;
156 /* Head of list of page allocated by MHI bootlog device */
157 struct list_head bootlog;
158 /* MHI bootlog channel device */
159 struct mhi_device *bootlog_ch;
160 /* Work queue for tasks related to MHI bootlog device */
161 struct workqueue_struct *bootlog_wq;
162 /* Synchronizes access of pages in MHI bootlog device */
163 struct mutex bootlog_mutex;
166 struct qaic_drm_device {
167 /* The drm device struct of this drm device */
168 struct drm_device drm;
169 /* Pointer to the root device struct driven by this driver */
170 struct qaic_device *qdev;
172 * The physical device can be partition in number of logical devices.
173 * And each logical device is given a partition id. This member stores
174 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
175 * device is the actual physical device
177 s32 partition_id;
178 /* Head in list of users who have opened this drm device */
179 struct list_head users;
180 /* Synchronizes access to users list */
181 struct mutex users_mutex;
184 struct qaic_bo {
185 struct drm_gem_object base;
186 /* Scatter/gather table for allocate/imported BO */
187 struct sg_table *sgt;
188 /* Head in list of slices of this BO */
189 struct list_head slices;
190 /* Total nents, for all slices of this BO */
191 int total_slice_nents;
193 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
194 * DMA_FROM_DEVICE.
196 int dir;
197 /* The pointer of the DBC which operates on this BO */
198 struct dma_bridge_chan *dbc;
199 /* Number of slice that belongs to this buffer */
200 u32 nr_slice;
201 /* Number of slice that have been transferred by DMA engine */
202 u32 nr_slice_xfer_done;
204 * If true then user has attached slicing information to this BO by
205 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
207 bool sliced;
208 /* Request ID of this BO if it is queued for execution */
209 u16 req_id;
210 /* Handle assigned to this BO */
211 u32 handle;
212 /* Wait on this for completion of DMA transfer of this BO */
213 struct completion xfer_done;
215 * Node in linked list where head is dbc->xfer_list.
216 * This link list contain BO's that are queued for DMA transfer.
218 struct list_head xfer_list;
220 * Node in linked list where head is dbc->bo_lists.
221 * This link list contain BO's that are associated with the DBC it is
222 * linked to.
224 struct list_head bo_list;
225 struct {
227 * Latest timestamp(ns) at which kernel received a request to
228 * execute this BO
230 u64 req_received_ts;
232 * Latest timestamp(ns) at which kernel enqueued requests of
233 * this BO for execution in DMA queue
235 u64 req_submit_ts;
237 * Latest timestamp(ns) at which kernel received a completion
238 * interrupt for requests of this BO
240 u64 req_processed_ts;
242 * Number of elements already enqueued in DMA queue before
243 * enqueuing requests of this BO
245 u32 queue_level_before;
246 } perf_stats;
247 /* Synchronizes BO operations */
248 struct mutex lock;
251 struct bo_slice {
252 /* Mapped pages */
253 struct sg_table *sgt;
254 /* Number of requests required to queue in DMA queue */
255 int nents;
256 /* See enum dma_data_direction */
257 int dir;
258 /* Actual requests that will be copied in DMA queue */
259 struct dbc_req *reqs;
260 struct kref ref_count;
261 /* true: No DMA transfer required */
262 bool no_xfer;
263 /* Pointer to the parent BO handle */
264 struct qaic_bo *bo;
265 /* Node in list of slices maintained by parent BO */
266 struct list_head slice;
267 /* Size of this slice in bytes */
268 u64 size;
269 /* Offset of this slice in buffer */
270 u64 offset;
273 int get_dbc_req_elem_size(void);
274 int get_dbc_rsp_elem_size(void);
275 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
276 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
277 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
279 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
281 int qaic_control_open(struct qaic_device *qdev);
282 void qaic_control_close(struct qaic_device *qdev);
283 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
285 irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
286 irqreturn_t dbc_irq_handler(int irq, void *data);
287 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
288 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
289 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
290 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
291 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
293 void wake_all_cntl(struct qaic_device *qdev);
294 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
296 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
298 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
299 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
300 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
301 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
302 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
303 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
304 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
305 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
306 void irq_polling_work(struct work_struct *work);
308 #endif /* _QAIC_H_ */