1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include "registers.h"
13 #define IDXD_DRIVER_VERSION "1.00"
15 extern struct kmem_cache
*idxd_desc_pool
;
17 #define IDXD_REG_TIMEOUT 50
18 #define IDXD_DRAIN_TIMEOUT 5000
21 IDXD_TYPE_UNKNOWN
= -1,
27 #define IDXD_NAME_SIZE 128
29 struct idxd_device_driver
{
30 struct device_driver drv
;
33 struct idxd_irq_entry
{
34 struct idxd_device
*idxd
;
36 struct llist_head pending_llist
;
37 struct list_head work_list
;
39 * Lock to protect access between irq thread process descriptor
40 * and irq thread processing error descriptor.
46 struct device conf_dev
;
47 struct idxd_device
*idxd
;
59 #define IDXD_MAX_PRIORITY 0xf
67 WQ_FLAG_DEDICATED
= 0,
68 WQ_FLAG_BLOCK_ON_FAULT
,
81 struct wait_queue_head err_queue
;
84 #define IDXD_ALLOCATED_BATCH_SIZE 128U
85 #define WQ_NAME_SIZE 1024
86 #define WQ_TYPE_SIZE 10
93 enum idxd_complete_type
{
94 IDXD_COMPLETE_NORMAL
= 0,
96 IDXD_COMPLETE_DEV_FAIL
,
100 void __iomem
*portal
;
101 struct device conf_dev
;
102 struct idxd_cdev idxd_cdev
;
103 struct idxd_device
*idxd
;
105 enum idxd_wq_type type
;
106 struct idxd_group
*group
;
108 struct mutex wq_lock
; /* mutex for workqueue */
112 enum idxd_wq_state state
;
115 u32 vec_ptr
; /* interrupt steering */
116 struct dsa_hw_desc
**hw_descs
;
119 struct dsa_completion_record
*compls
;
120 struct iax_completion_record
*iax_compls
;
123 dma_addr_t compls_addr
;
124 dma_addr_t compls_addr_raw
;
126 struct idxd_desc
**descs
;
127 struct sbitmap_queue sbq
;
128 struct dma_chan dma_chan
;
129 char name
[WQ_NAME_SIZE
+ 1];
136 struct device conf_dev
;
138 struct idxd_group
*group
;
139 struct idxd_device
*idxd
;
142 /* shadow registers */
145 union gen_cap_reg gen_cap
;
146 union wq_cap_reg wq_cap
;
147 union group_cap_reg group_cap
;
148 union engine_cap_reg engine_cap
;
152 enum idxd_device_state
{
153 IDXD_DEV_HALTED
= -1,
154 IDXD_DEV_DISABLED
= 0,
159 enum idxd_device_flag
{
160 IDXD_FLAG_CONFIGURABLE
= 0,
161 IDXD_FLAG_CMD_RUNNING
,
162 IDXD_FLAG_PASID_ENABLED
,
167 struct device conf_dev
;
168 struct list_head list
;
170 enum idxd_device_state state
;
176 struct pci_dev
*pdev
;
177 void __iomem
*reg_base
;
179 spinlock_t dev_lock
; /* spinlock for device */
180 struct completion
*cmd_done
;
181 struct idxd_group
*groups
;
183 struct idxd_engine
*engines
;
185 struct iommu_sva
*sva
;
190 u32 msix_perm_offset
;
203 int nr_tokens
; /* non-reserved tokens */
204 unsigned int wqcfg_size
;
207 union sw_err_reg sw_err
;
208 wait_queue_head_t cmd_waitq
;
209 struct msix_entry
*msix_entries
;
211 struct idxd_irq_entry
*irq_entries
;
213 struct dma_device dma_dev
;
214 struct workqueue_struct
*wq
;
215 struct work_struct work
;
218 /* IDXD software descriptor */
221 struct dsa_hw_desc
*hw
;
222 struct iax_hw_desc
*iax_hw
;
226 struct dsa_completion_record
*completion
;
227 struct iax_completion_record
*iax_completion
;
229 dma_addr_t compl_dma
;
230 struct dma_async_tx_descriptor txd
;
231 struct llist_node llnode
;
232 struct list_head list
;
238 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
239 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
241 extern struct bus_type dsa_bus_type
;
242 extern struct bus_type iax_bus_type
;
244 extern bool support_enqcmd
;
246 static inline bool wq_dedicated(struct idxd_wq
*wq
)
248 return test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
251 static inline bool wq_shared(struct idxd_wq
*wq
)
253 return !test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
256 static inline bool device_pasid_enabled(struct idxd_device
*idxd
)
258 return test_bit(IDXD_FLAG_PASID_ENABLED
, &idxd
->flags
);
261 static inline bool device_swq_supported(struct idxd_device
*idxd
)
263 return (support_enqcmd
&& device_pasid_enabled(idxd
));
266 enum idxd_portal_prot
{
267 IDXD_PORTAL_UNLIMITED
= 0,
271 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot
)
273 return prot
* 0x1000;
276 static inline int idxd_get_wq_portal_full_offset(int wq_id
,
277 enum idxd_portal_prot prot
)
279 return ((wq_id
* 4) << PAGE_SHIFT
) + idxd_get_wq_portal_offset(prot
);
282 static inline void idxd_set_type(struct idxd_device
*idxd
)
284 struct pci_dev
*pdev
= idxd
->pdev
;
286 if (pdev
->device
== PCI_DEVICE_ID_INTEL_DSA_SPR0
)
287 idxd
->type
= IDXD_TYPE_DSA
;
288 else if (pdev
->device
== PCI_DEVICE_ID_INTEL_IAX_SPR0
)
289 idxd
->type
= IDXD_TYPE_IAX
;
291 idxd
->type
= IDXD_TYPE_UNKNOWN
;
294 static inline void idxd_wq_get(struct idxd_wq
*wq
)
299 static inline void idxd_wq_put(struct idxd_wq
*wq
)
304 static inline int idxd_wq_refcount(struct idxd_wq
*wq
)
306 return wq
->client_count
;
309 const char *idxd_get_dev_name(struct idxd_device
*idxd
);
310 int idxd_register_bus_type(void);
311 void idxd_unregister_bus_type(void);
312 int idxd_setup_sysfs(struct idxd_device
*idxd
);
313 void idxd_cleanup_sysfs(struct idxd_device
*idxd
);
314 int idxd_register_driver(void);
315 void idxd_unregister_driver(void);
316 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
);
318 /* device interrupt control */
319 irqreturn_t
idxd_irq_handler(int vec
, void *data
);
320 irqreturn_t
idxd_misc_thread(int vec
, void *data
);
321 irqreturn_t
idxd_wq_thread(int irq
, void *data
);
322 void idxd_mask_error_interrupts(struct idxd_device
*idxd
);
323 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
);
324 void idxd_mask_msix_vectors(struct idxd_device
*idxd
);
325 void idxd_mask_msix_vector(struct idxd_device
*idxd
, int vec_id
);
326 void idxd_unmask_msix_vector(struct idxd_device
*idxd
, int vec_id
);
329 void idxd_device_init_reset(struct idxd_device
*idxd
);
330 int idxd_device_enable(struct idxd_device
*idxd
);
331 int idxd_device_disable(struct idxd_device
*idxd
);
332 void idxd_device_reset(struct idxd_device
*idxd
);
333 void idxd_device_cleanup(struct idxd_device
*idxd
);
334 int idxd_device_config(struct idxd_device
*idxd
);
335 void idxd_device_wqs_clear_state(struct idxd_device
*idxd
);
336 void idxd_device_drain_pasid(struct idxd_device
*idxd
, int pasid
);
338 /* work queue control */
339 int idxd_wq_alloc_resources(struct idxd_wq
*wq
);
340 void idxd_wq_free_resources(struct idxd_wq
*wq
);
341 int idxd_wq_enable(struct idxd_wq
*wq
);
342 int idxd_wq_disable(struct idxd_wq
*wq
);
343 void idxd_wq_drain(struct idxd_wq
*wq
);
344 int idxd_wq_map_portal(struct idxd_wq
*wq
);
345 void idxd_wq_unmap_portal(struct idxd_wq
*wq
);
346 void idxd_wq_disable_cleanup(struct idxd_wq
*wq
);
347 int idxd_wq_set_pasid(struct idxd_wq
*wq
, int pasid
);
348 int idxd_wq_disable_pasid(struct idxd_wq
*wq
);
351 int idxd_submit_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
352 struct idxd_desc
*idxd_alloc_desc(struct idxd_wq
*wq
, enum idxd_op_type optype
);
353 void idxd_free_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
356 int idxd_register_dma_device(struct idxd_device
*idxd
);
357 void idxd_unregister_dma_device(struct idxd_device
*idxd
);
358 int idxd_register_dma_channel(struct idxd_wq
*wq
);
359 void idxd_unregister_dma_channel(struct idxd_wq
*wq
);
360 void idxd_parse_completion_status(u8 status
, enum dmaengine_tx_result
*res
);
361 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
362 enum idxd_complete_type comp_type
);
363 dma_cookie_t
idxd_dma_tx_submit(struct dma_async_tx_descriptor
*tx
);
366 int idxd_cdev_register(void);
367 void idxd_cdev_remove(void);
368 int idxd_cdev_get_major(struct idxd_device
*idxd
);
369 int idxd_wq_add_cdev(struct idxd_wq
*wq
);
370 void idxd_wq_del_cdev(struct idxd_wq
*wq
);