Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / dma / idxd / idxd.h
blob5a50e91c71bf01fcfee884d8244be1fb6681dcc3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_H_
4 #define _IDXD_H_
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include "registers.h"
13 #define IDXD_DRIVER_VERSION "1.00"
15 extern struct kmem_cache *idxd_desc_pool;
17 #define IDXD_REG_TIMEOUT 50
18 #define IDXD_DRAIN_TIMEOUT 5000
20 enum idxd_type {
21 IDXD_TYPE_UNKNOWN = -1,
22 IDXD_TYPE_DSA = 0,
23 IDXD_TYPE_IAX,
24 IDXD_TYPE_MAX,
27 #define IDXD_NAME_SIZE 128
29 struct idxd_device_driver {
30 struct device_driver drv;
33 struct idxd_irq_entry {
34 struct idxd_device *idxd;
35 int id;
36 struct llist_head pending_llist;
37 struct list_head work_list;
39 * Lock to protect access between irq thread process descriptor
40 * and irq thread processing error descriptor.
42 spinlock_t list_lock;
45 struct idxd_group {
46 struct device conf_dev;
47 struct idxd_device *idxd;
48 struct grpcfg grpcfg;
49 int id;
50 int num_engines;
51 int num_wqs;
52 bool use_token_limit;
53 u8 tokens_allowed;
54 u8 tokens_reserved;
55 int tc_a;
56 int tc_b;
59 #define IDXD_MAX_PRIORITY 0xf
61 enum idxd_wq_state {
62 IDXD_WQ_DISABLED = 0,
63 IDXD_WQ_ENABLED,
66 enum idxd_wq_flag {
67 WQ_FLAG_DEDICATED = 0,
68 WQ_FLAG_BLOCK_ON_FAULT,
71 enum idxd_wq_type {
72 IDXD_WQT_NONE = 0,
73 IDXD_WQT_KERNEL,
74 IDXD_WQT_USER,
77 struct idxd_cdev {
78 struct cdev cdev;
79 struct device *dev;
80 int minor;
81 struct wait_queue_head err_queue;
84 #define IDXD_ALLOCATED_BATCH_SIZE 128U
85 #define WQ_NAME_SIZE 1024
86 #define WQ_TYPE_SIZE 10
88 enum idxd_op_type {
89 IDXD_OP_BLOCK = 0,
90 IDXD_OP_NONBLOCK = 1,
93 enum idxd_complete_type {
94 IDXD_COMPLETE_NORMAL = 0,
95 IDXD_COMPLETE_ABORT,
96 IDXD_COMPLETE_DEV_FAIL,
99 struct idxd_wq {
100 void __iomem *portal;
101 struct device conf_dev;
102 struct idxd_cdev idxd_cdev;
103 struct idxd_device *idxd;
104 int id;
105 enum idxd_wq_type type;
106 struct idxd_group *group;
107 int client_count;
108 struct mutex wq_lock; /* mutex for workqueue */
109 u32 size;
110 u32 threshold;
111 u32 priority;
112 enum idxd_wq_state state;
113 unsigned long flags;
114 union wqcfg *wqcfg;
115 u32 vec_ptr; /* interrupt steering */
116 struct dsa_hw_desc **hw_descs;
117 int num_descs;
118 union {
119 struct dsa_completion_record *compls;
120 struct iax_completion_record *iax_compls;
122 void *compls_raw;
123 dma_addr_t compls_addr;
124 dma_addr_t compls_addr_raw;
125 int compls_size;
126 struct idxd_desc **descs;
127 struct sbitmap_queue sbq;
128 struct dma_chan dma_chan;
129 char name[WQ_NAME_SIZE + 1];
130 u64 max_xfer_bytes;
131 u32 max_batch_size;
132 bool ats_dis;
135 struct idxd_engine {
136 struct device conf_dev;
137 int id;
138 struct idxd_group *group;
139 struct idxd_device *idxd;
142 /* shadow registers */
143 struct idxd_hw {
144 u32 version;
145 union gen_cap_reg gen_cap;
146 union wq_cap_reg wq_cap;
147 union group_cap_reg group_cap;
148 union engine_cap_reg engine_cap;
149 struct opcap opcap;
152 enum idxd_device_state {
153 IDXD_DEV_HALTED = -1,
154 IDXD_DEV_DISABLED = 0,
155 IDXD_DEV_CONF_READY,
156 IDXD_DEV_ENABLED,
159 enum idxd_device_flag {
160 IDXD_FLAG_CONFIGURABLE = 0,
161 IDXD_FLAG_CMD_RUNNING,
162 IDXD_FLAG_PASID_ENABLED,
165 struct idxd_device {
166 enum idxd_type type;
167 struct device conf_dev;
168 struct list_head list;
169 struct idxd_hw hw;
170 enum idxd_device_state state;
171 unsigned long flags;
172 int id;
173 int major;
174 u8 cmd_status;
176 struct pci_dev *pdev;
177 void __iomem *reg_base;
179 spinlock_t dev_lock; /* spinlock for device */
180 struct completion *cmd_done;
181 struct idxd_group *groups;
182 struct idxd_wq *wqs;
183 struct idxd_engine *engines;
185 struct iommu_sva *sva;
186 unsigned int pasid;
188 int num_groups;
190 u32 msix_perm_offset;
191 u32 wqcfg_offset;
192 u32 grpcfg_offset;
193 u32 perfmon_offset;
195 u64 max_xfer_bytes;
196 u32 max_batch_size;
197 int max_groups;
198 int max_engines;
199 int max_tokens;
200 int max_wqs;
201 int max_wq_size;
202 int token_limit;
203 int nr_tokens; /* non-reserved tokens */
204 unsigned int wqcfg_size;
205 int compl_size;
207 union sw_err_reg sw_err;
208 wait_queue_head_t cmd_waitq;
209 struct msix_entry *msix_entries;
210 int num_wq_irqs;
211 struct idxd_irq_entry *irq_entries;
213 struct dma_device dma_dev;
214 struct workqueue_struct *wq;
215 struct work_struct work;
218 /* IDXD software descriptor */
219 struct idxd_desc {
220 union {
221 struct dsa_hw_desc *hw;
222 struct iax_hw_desc *iax_hw;
224 dma_addr_t desc_dma;
225 union {
226 struct dsa_completion_record *completion;
227 struct iax_completion_record *iax_completion;
229 dma_addr_t compl_dma;
230 struct dma_async_tx_descriptor txd;
231 struct llist_node llnode;
232 struct list_head list;
233 int id;
234 int cpu;
235 struct idxd_wq *wq;
238 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
239 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
241 extern struct bus_type dsa_bus_type;
242 extern struct bus_type iax_bus_type;
244 extern bool support_enqcmd;
246 static inline bool wq_dedicated(struct idxd_wq *wq)
248 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
251 static inline bool wq_shared(struct idxd_wq *wq)
253 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
256 static inline bool device_pasid_enabled(struct idxd_device *idxd)
258 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
261 static inline bool device_swq_supported(struct idxd_device *idxd)
263 return (support_enqcmd && device_pasid_enabled(idxd));
266 enum idxd_portal_prot {
267 IDXD_PORTAL_UNLIMITED = 0,
268 IDXD_PORTAL_LIMITED,
271 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
273 return prot * 0x1000;
276 static inline int idxd_get_wq_portal_full_offset(int wq_id,
277 enum idxd_portal_prot prot)
279 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
282 static inline void idxd_set_type(struct idxd_device *idxd)
284 struct pci_dev *pdev = idxd->pdev;
286 if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
287 idxd->type = IDXD_TYPE_DSA;
288 else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
289 idxd->type = IDXD_TYPE_IAX;
290 else
291 idxd->type = IDXD_TYPE_UNKNOWN;
294 static inline void idxd_wq_get(struct idxd_wq *wq)
296 wq->client_count++;
299 static inline void idxd_wq_put(struct idxd_wq *wq)
301 wq->client_count--;
304 static inline int idxd_wq_refcount(struct idxd_wq *wq)
306 return wq->client_count;
309 const char *idxd_get_dev_name(struct idxd_device *idxd);
310 int idxd_register_bus_type(void);
311 void idxd_unregister_bus_type(void);
312 int idxd_setup_sysfs(struct idxd_device *idxd);
313 void idxd_cleanup_sysfs(struct idxd_device *idxd);
314 int idxd_register_driver(void);
315 void idxd_unregister_driver(void);
316 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
318 /* device interrupt control */
319 irqreturn_t idxd_irq_handler(int vec, void *data);
320 irqreturn_t idxd_misc_thread(int vec, void *data);
321 irqreturn_t idxd_wq_thread(int irq, void *data);
322 void idxd_mask_error_interrupts(struct idxd_device *idxd);
323 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
324 void idxd_mask_msix_vectors(struct idxd_device *idxd);
325 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
326 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
328 /* device control */
329 void idxd_device_init_reset(struct idxd_device *idxd);
330 int idxd_device_enable(struct idxd_device *idxd);
331 int idxd_device_disable(struct idxd_device *idxd);
332 void idxd_device_reset(struct idxd_device *idxd);
333 void idxd_device_cleanup(struct idxd_device *idxd);
334 int idxd_device_config(struct idxd_device *idxd);
335 void idxd_device_wqs_clear_state(struct idxd_device *idxd);
336 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
338 /* work queue control */
339 int idxd_wq_alloc_resources(struct idxd_wq *wq);
340 void idxd_wq_free_resources(struct idxd_wq *wq);
341 int idxd_wq_enable(struct idxd_wq *wq);
342 int idxd_wq_disable(struct idxd_wq *wq);
343 void idxd_wq_drain(struct idxd_wq *wq);
344 int idxd_wq_map_portal(struct idxd_wq *wq);
345 void idxd_wq_unmap_portal(struct idxd_wq *wq);
346 void idxd_wq_disable_cleanup(struct idxd_wq *wq);
347 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
348 int idxd_wq_disable_pasid(struct idxd_wq *wq);
350 /* submission */
351 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
352 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
353 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
355 /* dmaengine */
356 int idxd_register_dma_device(struct idxd_device *idxd);
357 void idxd_unregister_dma_device(struct idxd_device *idxd);
358 int idxd_register_dma_channel(struct idxd_wq *wq);
359 void idxd_unregister_dma_channel(struct idxd_wq *wq);
360 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
361 void idxd_dma_complete_txd(struct idxd_desc *desc,
362 enum idxd_complete_type comp_type);
363 dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
365 /* cdev */
366 int idxd_cdev_register(void);
367 void idxd_cdev_remove(void);
368 int idxd_cdev_get_major(struct idxd_device *idxd);
369 int idxd_wq_add_cdev(struct idxd_wq *wq);
370 void idxd_wq_del_cdev(struct idxd_wq *wq);
372 #endif