1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
14 void idxd_device_wqs_clear_state(struct idxd_device
*idxd
)
18 lockdep_assert_held(&idxd
->dev_lock
);
19 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
20 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
22 wq
->state
= IDXD_WQ_DISABLED
;
26 static int idxd_restart(struct idxd_device
*idxd
)
30 lockdep_assert_held(&idxd
->dev_lock
);
32 rc
= __idxd_device_reset(idxd
);
36 rc
= idxd_device_config(idxd
);
40 rc
= idxd_device_enable(idxd
);
44 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
45 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
47 if (wq
->state
== IDXD_WQ_ENABLED
) {
48 rc
= idxd_wq_enable(wq
);
50 dev_warn(&idxd
->pdev
->dev
,
51 "Unable to re-enable wq %s\n",
52 dev_name(&wq
->conf_dev
));
60 idxd_device_wqs_clear_state(idxd
);
61 idxd
->state
= IDXD_DEV_HALTED
;
65 irqreturn_t
idxd_irq_handler(int vec
, void *data
)
67 struct idxd_irq_entry
*irq_entry
= data
;
68 struct idxd_device
*idxd
= irq_entry
->idxd
;
70 idxd_mask_msix_vector(idxd
, irq_entry
->id
);
71 return IRQ_WAKE_THREAD
;
74 irqreturn_t
idxd_misc_thread(int vec
, void *data
)
76 struct idxd_irq_entry
*irq_entry
= data
;
77 struct idxd_device
*idxd
= irq_entry
->idxd
;
78 struct device
*dev
= &idxd
->pdev
->dev
;
79 union gensts_reg gensts
;
84 cause
= ioread32(idxd
->reg_base
+ IDXD_INTCAUSE_OFFSET
);
86 if (cause
& IDXD_INTC_ERR
) {
87 spin_lock_bh(&idxd
->dev_lock
);
88 for (i
= 0; i
< 4; i
++)
89 idxd
->sw_err
.bits
[i
] = ioread64(idxd
->reg_base
+
90 IDXD_SWERR_OFFSET
+ i
* sizeof(u64
));
91 iowrite64(IDXD_SWERR_ACK
, idxd
->reg_base
+ IDXD_SWERR_OFFSET
);
93 if (idxd
->sw_err
.valid
&& idxd
->sw_err
.wq_idx_valid
) {
94 int id
= idxd
->sw_err
.wq_idx
;
95 struct idxd_wq
*wq
= &idxd
->wqs
[id
];
97 if (wq
->type
== IDXD_WQT_USER
)
98 wake_up_interruptible(&wq
->idxd_cdev
.err_queue
);
102 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
103 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
105 if (wq
->type
== IDXD_WQT_USER
)
106 wake_up_interruptible(&wq
->idxd_cdev
.err_queue
);
110 spin_unlock_bh(&idxd
->dev_lock
);
111 val
|= IDXD_INTC_ERR
;
113 for (i
= 0; i
< 4; i
++)
114 dev_warn(dev
, "err[%d]: %#16.16llx\n",
115 i
, idxd
->sw_err
.bits
[i
]);
119 if (cause
& IDXD_INTC_CMD
) {
120 /* Driver does use command interrupts */
121 val
|= IDXD_INTC_CMD
;
124 if (cause
& IDXD_INTC_OCCUPY
) {
125 /* Driver does not utilize occupancy interrupt */
126 val
|= IDXD_INTC_OCCUPY
;
129 if (cause
& IDXD_INTC_PERFMON_OVFL
) {
131 * Driver does not utilize perfmon counter overflow interrupt
134 val
|= IDXD_INTC_PERFMON_OVFL
;
139 dev_warn_once(dev
, "Unexpected interrupt cause bits set: %#x\n",
142 iowrite32(cause
, idxd
->reg_base
+ IDXD_INTCAUSE_OFFSET
);
146 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
147 if (gensts
.state
== IDXD_DEVICE_STATE_HALT
) {
148 spin_lock_bh(&idxd
->dev_lock
);
149 if (gensts
.reset_type
== IDXD_DEVICE_RESET_SOFTWARE
) {
150 rc
= idxd_restart(idxd
);
152 dev_err(&idxd
->pdev
->dev
,
153 "idxd restart failed, device halt.");
155 idxd_device_wqs_clear_state(idxd
);
156 idxd
->state
= IDXD_DEV_HALTED
;
157 dev_err(&idxd
->pdev
->dev
,
158 "idxd halted, need %s.\n",
159 gensts
.reset_type
== IDXD_DEVICE_RESET_FLR
?
160 "FLR" : "system reset");
162 spin_unlock_bh(&idxd
->dev_lock
);
165 idxd_unmask_msix_vector(idxd
, irq_entry
->id
);
169 static int irq_process_pending_llist(struct idxd_irq_entry
*irq_entry
,
172 struct idxd_desc
*desc
, *t
;
173 struct llist_node
*head
;
176 head
= llist_del_all(&irq_entry
->pending_llist
);
180 llist_for_each_entry_safe(desc
, t
, head
, llnode
) {
181 if (desc
->completion
->status
) {
182 idxd_dma_complete_txd(desc
, IDXD_COMPLETE_NORMAL
);
183 idxd_free_desc(desc
->wq
, desc
);
186 list_add_tail(&desc
->list
, &irq_entry
->work_list
);
194 static int irq_process_work_list(struct idxd_irq_entry
*irq_entry
,
197 struct list_head
*node
, *next
;
200 if (list_empty(&irq_entry
->work_list
))
203 list_for_each_safe(node
, next
, &irq_entry
->work_list
) {
204 struct idxd_desc
*desc
=
205 container_of(node
, struct idxd_desc
, list
);
207 if (desc
->completion
->status
) {
208 list_del(&desc
->list
);
209 /* process and callback */
210 idxd_dma_complete_txd(desc
, IDXD_COMPLETE_NORMAL
);
211 idxd_free_desc(desc
->wq
, desc
);
221 irqreturn_t
idxd_wq_thread(int irq
, void *data
)
223 struct idxd_irq_entry
*irq_entry
= data
;
224 int rc
, processed
= 0, retry
= 0;
227 * There are two lists we are processing. The pending_llist is where
228 * submmiter adds all the submitted descriptor after sending it to
229 * the workqueue. It's a lockless singly linked list. The work_list
230 * is the common linux double linked list. We are in a scenario of
231 * multiple producers and a single consumer. The producers are all
232 * the kernel submitters of descriptors, and the consumer is the
233 * kernel irq handler thread for the msix vector when using threaded
234 * irq. To work with the restrictions of llist to remain lockless,
235 * we are doing the following steps:
236 * 1. Iterate through the work_list and process any completed
237 * descriptor. Delete the completed entries during iteration.
238 * 2. llist_del_all() from the pending list.
239 * 3. Iterate through the llist that was deleted from the pending list
240 * and process the completed entries.
241 * 4. If the entry is still waiting on hardware, list_add_tail() to
243 * 5. Repeat until no more descriptors.
246 rc
= irq_process_work_list(irq_entry
, &processed
);
252 rc
= irq_process_pending_llist(irq_entry
, &processed
);
253 } while (rc
!= 0 && retry
!= 10);
255 idxd_unmask_msix_vector(irq_entry
->idxd
, irq_entry
->id
);