1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include "registers.h"
13 #define IDXD_DRIVER_VERSION "1.00"
15 extern struct kmem_cache
*idxd_desc_pool
;
17 #define IDXD_REG_TIMEOUT 50
18 #define IDXD_DRAIN_TIMEOUT 5000
21 IDXD_TYPE_UNKNOWN
= -1,
26 #define IDXD_NAME_SIZE 128
28 struct idxd_device_driver
{
29 struct device_driver drv
;
32 struct idxd_irq_entry
{
33 struct idxd_device
*idxd
;
35 struct llist_head pending_llist
;
36 struct list_head work_list
;
40 struct device conf_dev
;
41 struct idxd_device
*idxd
;
53 #define IDXD_MAX_PRIORITY 0xf
61 WQ_FLAG_DEDICATED
= 0,
74 struct wait_queue_head err_queue
;
77 #define IDXD_ALLOCATED_BATCH_SIZE 128U
78 #define WQ_NAME_SIZE 1024
79 #define WQ_TYPE_SIZE 10
86 enum idxd_complete_type
{
87 IDXD_COMPLETE_NORMAL
= 0,
92 void __iomem
*dportal
;
93 struct device conf_dev
;
94 struct idxd_cdev idxd_cdev
;
95 struct idxd_device
*idxd
;
97 enum idxd_wq_type type
;
98 struct idxd_group
*group
;
100 struct mutex wq_lock
; /* mutex for workqueue */
104 enum idxd_wq_state state
;
107 atomic_t dq_count
; /* dedicated queue flow control */
108 u32 vec_ptr
; /* interrupt steering */
109 struct dsa_hw_desc
**hw_descs
;
111 struct dsa_completion_record
*compls
;
112 dma_addr_t compls_addr
;
114 struct idxd_desc
**descs
;
115 struct sbitmap sbmap
;
116 struct dma_chan dma_chan
;
117 struct percpu_rw_semaphore submit_lock
;
118 wait_queue_head_t submit_waitq
;
119 char name
[WQ_NAME_SIZE
+ 1];
123 struct device conf_dev
;
125 struct idxd_group
*group
;
126 struct idxd_device
*idxd
;
129 /* shadow registers */
132 union gen_cap_reg gen_cap
;
133 union wq_cap_reg wq_cap
;
134 union group_cap_reg group_cap
;
135 union engine_cap_reg engine_cap
;
139 enum idxd_device_state
{
140 IDXD_DEV_HALTED
= -1,
141 IDXD_DEV_DISABLED
= 0,
146 enum idxd_device_flag
{
147 IDXD_FLAG_CONFIGURABLE
= 0,
152 struct device conf_dev
;
153 struct list_head list
;
155 enum idxd_device_state state
;
160 struct pci_dev
*pdev
;
161 void __iomem
*reg_base
;
163 spinlock_t dev_lock
; /* spinlock for device */
164 struct idxd_group
*groups
;
166 struct idxd_engine
*engines
;
170 u32 msix_perm_offset
;
183 int nr_tokens
; /* non-reserved tokens */
185 union sw_err_reg sw_err
;
187 struct msix_entry
*msix_entries
;
189 struct idxd_irq_entry
*irq_entries
;
191 struct dma_device dma_dev
;
194 /* IDXD software descriptor */
196 struct dsa_hw_desc
*hw
;
198 struct dsa_completion_record
*completion
;
199 dma_addr_t compl_dma
;
200 struct dma_async_tx_descriptor txd
;
201 struct llist_node llnode
;
202 struct list_head list
;
207 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
208 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
210 extern struct bus_type dsa_bus_type
;
212 static inline bool wq_dedicated(struct idxd_wq
*wq
)
214 return test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
217 enum idxd_portal_prot
{
218 IDXD_PORTAL_UNLIMITED
= 0,
222 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot
)
224 return prot
* 0x1000;
227 static inline int idxd_get_wq_portal_full_offset(int wq_id
,
228 enum idxd_portal_prot prot
)
230 return ((wq_id
* 4) << PAGE_SHIFT
) + idxd_get_wq_portal_offset(prot
);
233 static inline void idxd_set_type(struct idxd_device
*idxd
)
235 struct pci_dev
*pdev
= idxd
->pdev
;
237 if (pdev
->device
== PCI_DEVICE_ID_INTEL_DSA_SPR0
)
238 idxd
->type
= IDXD_TYPE_DSA
;
240 idxd
->type
= IDXD_TYPE_UNKNOWN
;
243 static inline void idxd_wq_get(struct idxd_wq
*wq
)
248 static inline void idxd_wq_put(struct idxd_wq
*wq
)
253 static inline int idxd_wq_refcount(struct idxd_wq
*wq
)
255 return wq
->client_count
;
258 const char *idxd_get_dev_name(struct idxd_device
*idxd
);
259 int idxd_register_bus_type(void);
260 void idxd_unregister_bus_type(void);
261 int idxd_setup_sysfs(struct idxd_device
*idxd
);
262 void idxd_cleanup_sysfs(struct idxd_device
*idxd
);
263 int idxd_register_driver(void);
264 void idxd_unregister_driver(void);
265 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
);
267 /* device interrupt control */
268 irqreturn_t
idxd_irq_handler(int vec
, void *data
);
269 irqreturn_t
idxd_misc_thread(int vec
, void *data
);
270 irqreturn_t
idxd_wq_thread(int irq
, void *data
);
271 void idxd_mask_error_interrupts(struct idxd_device
*idxd
);
272 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
);
273 void idxd_mask_msix_vectors(struct idxd_device
*idxd
);
274 int idxd_mask_msix_vector(struct idxd_device
*idxd
, int vec_id
);
275 int idxd_unmask_msix_vector(struct idxd_device
*idxd
, int vec_id
);
278 int idxd_device_enable(struct idxd_device
*idxd
);
279 int idxd_device_disable(struct idxd_device
*idxd
);
280 int idxd_device_reset(struct idxd_device
*idxd
);
281 int __idxd_device_reset(struct idxd_device
*idxd
);
282 void idxd_device_cleanup(struct idxd_device
*idxd
);
283 int idxd_device_config(struct idxd_device
*idxd
);
284 void idxd_device_wqs_clear_state(struct idxd_device
*idxd
);
286 /* work queue control */
287 int idxd_wq_alloc_resources(struct idxd_wq
*wq
);
288 void idxd_wq_free_resources(struct idxd_wq
*wq
);
289 int idxd_wq_enable(struct idxd_wq
*wq
);
290 int idxd_wq_disable(struct idxd_wq
*wq
);
291 int idxd_wq_map_portal(struct idxd_wq
*wq
);
292 void idxd_wq_unmap_portal(struct idxd_wq
*wq
);
295 int idxd_submit_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
296 struct idxd_desc
*idxd_alloc_desc(struct idxd_wq
*wq
, enum idxd_op_type optype
);
297 void idxd_free_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
300 int idxd_register_dma_device(struct idxd_device
*idxd
);
301 void idxd_unregister_dma_device(struct idxd_device
*idxd
);
302 int idxd_register_dma_channel(struct idxd_wq
*wq
);
303 void idxd_unregister_dma_channel(struct idxd_wq
*wq
);
304 void idxd_parse_completion_status(u8 status
, enum dmaengine_tx_result
*res
);
305 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
306 enum idxd_complete_type comp_type
);
307 dma_cookie_t
idxd_dma_tx_submit(struct dma_async_tx_descriptor
*tx
);
310 int idxd_cdev_register(void);
311 void idxd_cdev_remove(void);
312 int idxd_cdev_get_major(struct idxd_device
*idxd
);
313 int idxd_wq_add_cdev(struct idxd_wq
*wq
);
314 void idxd_wq_del_cdev(struct idxd_wq
*wq
);