Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / dma / idxd / device.c
blob5cf419fe6b4645337cf361305ca066d34763b3c2
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "idxd.h"
13 #include "registers.h"
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
16 u32 *status);
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
20 /* Interrupt control bits */
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
23 union genctrl_reg genctrl;
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
26 genctrl.softerr_int_en = 1;
27 genctrl.halt_int_en = 1;
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
31 void idxd_mask_error_interrupts(struct idxd_device *idxd)
33 union genctrl_reg genctrl;
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
36 genctrl.softerr_int_en = 0;
37 genctrl.halt_int_en = 0;
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
41 static void free_hw_descs(struct idxd_wq *wq)
43 int i;
45 for (i = 0; i < wq->num_descs; i++)
46 kfree(wq->hw_descs[i]);
48 kfree(wq->hw_descs);
51 static int alloc_hw_descs(struct idxd_wq *wq, int num)
53 struct device *dev = &wq->idxd->pdev->dev;
54 int i;
55 int node = dev_to_node(dev);
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
58 GFP_KERNEL, node);
59 if (!wq->hw_descs)
60 return -ENOMEM;
62 for (i = 0; i < num; i++) {
63 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
64 GFP_KERNEL, node);
65 if (!wq->hw_descs[i]) {
66 free_hw_descs(wq);
67 return -ENOMEM;
71 return 0;
74 static void free_descs(struct idxd_wq *wq)
76 int i;
78 for (i = 0; i < wq->num_descs; i++)
79 kfree(wq->descs[i]);
81 kfree(wq->descs);
84 static int alloc_descs(struct idxd_wq *wq, int num)
86 struct device *dev = &wq->idxd->pdev->dev;
87 int i;
88 int node = dev_to_node(dev);
90 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
91 GFP_KERNEL, node);
92 if (!wq->descs)
93 return -ENOMEM;
95 for (i = 0; i < num; i++) {
96 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
97 GFP_KERNEL, node);
98 if (!wq->descs[i]) {
99 free_descs(wq);
100 return -ENOMEM;
104 return 0;
107 /* WQ control bits */
108 int idxd_wq_alloc_resources(struct idxd_wq *wq)
110 struct idxd_device *idxd = wq->idxd;
111 struct device *dev = &idxd->pdev->dev;
112 int rc, num_descs, i;
114 if (wq->type != IDXD_WQT_KERNEL)
115 return 0;
117 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
118 wq->num_descs = num_descs;
120 rc = alloc_hw_descs(wq, num_descs);
121 if (rc < 0)
122 return rc;
124 wq->compls_size = num_descs * idxd->data->compl_size;
125 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
126 if (!wq->compls) {
127 rc = -ENOMEM;
128 goto fail_alloc_compls;
131 rc = alloc_descs(wq, num_descs);
132 if (rc < 0)
133 goto fail_alloc_descs;
135 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
136 dev_to_node(dev));
137 if (rc < 0)
138 goto fail_sbitmap_init;
140 for (i = 0; i < num_descs; i++) {
141 struct idxd_desc *desc = wq->descs[i];
143 desc->hw = wq->hw_descs[i];
144 if (idxd->data->type == IDXD_TYPE_DSA)
145 desc->completion = &wq->compls[i];
146 else if (idxd->data->type == IDXD_TYPE_IAX)
147 desc->iax_completion = &wq->iax_compls[i];
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
149 desc->id = i;
150 desc->wq = wq;
151 desc->cpu = -1;
154 return 0;
156 fail_sbitmap_init:
157 free_descs(wq);
158 fail_alloc_descs:
159 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
160 fail_alloc_compls:
161 free_hw_descs(wq);
162 return rc;
164 EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, "IDXD");
166 void idxd_wq_free_resources(struct idxd_wq *wq)
168 struct device *dev = &wq->idxd->pdev->dev;
170 if (wq->type != IDXD_WQT_KERNEL)
171 return;
173 free_hw_descs(wq);
174 free_descs(wq);
175 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
176 sbitmap_queue_free(&wq->sbq);
178 EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD");
180 int idxd_wq_enable(struct idxd_wq *wq)
182 struct idxd_device *idxd = wq->idxd;
183 struct device *dev = &idxd->pdev->dev;
184 u32 status;
186 if (wq->state == IDXD_WQ_ENABLED) {
187 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
188 return 0;
191 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
193 if (status != IDXD_CMDSTS_SUCCESS &&
194 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
195 dev_dbg(dev, "WQ enable failed: %#x\n", status);
196 return -ENXIO;
199 wq->state = IDXD_WQ_ENABLED;
200 set_bit(wq->id, idxd->wq_enable_map);
201 dev_dbg(dev, "WQ %d enabled\n", wq->id);
202 return 0;
205 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
207 struct idxd_device *idxd = wq->idxd;
208 struct device *dev = &idxd->pdev->dev;
209 u32 status, operand;
211 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
213 if (wq->state != IDXD_WQ_ENABLED) {
214 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
215 return 0;
218 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
219 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
221 if (status != IDXD_CMDSTS_SUCCESS) {
222 dev_dbg(dev, "WQ disable failed: %#x\n", status);
223 return -ENXIO;
226 if (reset_config)
227 idxd_wq_disable_cleanup(wq);
228 clear_bit(wq->id, idxd->wq_enable_map);
229 wq->state = IDXD_WQ_DISABLED;
230 dev_dbg(dev, "WQ %d disabled\n", wq->id);
231 return 0;
234 void idxd_wq_drain(struct idxd_wq *wq)
236 struct idxd_device *idxd = wq->idxd;
237 struct device *dev = &idxd->pdev->dev;
238 u32 operand;
240 if (wq->state != IDXD_WQ_ENABLED) {
241 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
242 return;
245 dev_dbg(dev, "Draining WQ %d\n", wq->id);
246 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
247 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
250 void idxd_wq_reset(struct idxd_wq *wq)
252 struct idxd_device *idxd = wq->idxd;
253 struct device *dev = &idxd->pdev->dev;
254 u32 operand;
256 if (wq->state != IDXD_WQ_ENABLED) {
257 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
258 return;
261 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
262 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
263 idxd_wq_disable_cleanup(wq);
266 int idxd_wq_map_portal(struct idxd_wq *wq)
268 struct idxd_device *idxd = wq->idxd;
269 struct pci_dev *pdev = idxd->pdev;
270 struct device *dev = &pdev->dev;
271 resource_size_t start;
273 start = pci_resource_start(pdev, IDXD_WQ_BAR);
274 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
276 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
277 if (!wq->portal)
278 return -ENOMEM;
280 return 0;
283 void idxd_wq_unmap_portal(struct idxd_wq *wq)
285 struct device *dev = &wq->idxd->pdev->dev;
287 devm_iounmap(dev, wq->portal);
288 wq->portal = NULL;
289 wq->portal_offset = 0;
292 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
294 int i;
296 for (i = 0; i < idxd->max_wqs; i++) {
297 struct idxd_wq *wq = idxd->wqs[i];
299 if (wq->portal)
300 idxd_wq_unmap_portal(wq);
304 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
306 struct idxd_device *idxd = wq->idxd;
307 union wqcfg wqcfg;
308 unsigned int offset;
310 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
311 spin_lock(&idxd->dev_lock);
312 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
313 wqcfg.pasid_en = 1;
314 wqcfg.pasid = pasid;
315 wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
316 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
317 spin_unlock(&idxd->dev_lock);
320 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
322 int rc;
324 rc = idxd_wq_disable(wq, false);
325 if (rc < 0)
326 return rc;
328 __idxd_wq_set_pasid_locked(wq, pasid);
330 rc = idxd_wq_enable(wq);
331 if (rc < 0)
332 return rc;
334 return 0;
337 int idxd_wq_disable_pasid(struct idxd_wq *wq)
339 struct idxd_device *idxd = wq->idxd;
340 int rc;
341 union wqcfg wqcfg;
342 unsigned int offset;
344 rc = idxd_wq_disable(wq, false);
345 if (rc < 0)
346 return rc;
348 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
349 spin_lock(&idxd->dev_lock);
350 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
351 wqcfg.pasid_en = 0;
352 wqcfg.pasid = 0;
353 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
354 spin_unlock(&idxd->dev_lock);
356 rc = idxd_wq_enable(wq);
357 if (rc < 0)
358 return rc;
360 return 0;
363 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
365 struct idxd_device *idxd = wq->idxd;
367 lockdep_assert_held(&wq->wq_lock);
368 wq->state = IDXD_WQ_DISABLED;
369 memset(wq->wqcfg, 0, idxd->wqcfg_size);
370 wq->type = IDXD_WQT_NONE;
371 wq->threshold = 0;
372 wq->priority = 0;
373 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
374 wq->flags = 0;
375 memset(wq->name, 0, WQ_NAME_SIZE);
376 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
377 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
378 if (wq->opcap_bmap)
379 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
382 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
384 lockdep_assert_held(&wq->wq_lock);
386 wq->size = 0;
387 wq->group = NULL;
390 static void idxd_wq_ref_release(struct percpu_ref *ref)
392 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
394 complete(&wq->wq_dead);
397 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
399 int rc;
401 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
402 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
403 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
404 if (rc < 0)
405 return rc;
406 reinit_completion(&wq->wq_dead);
407 reinit_completion(&wq->wq_resurrect);
408 return 0;
410 EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, "IDXD");
412 void __idxd_wq_quiesce(struct idxd_wq *wq)
414 lockdep_assert_held(&wq->wq_lock);
415 reinit_completion(&wq->wq_resurrect);
416 percpu_ref_kill(&wq->wq_active);
417 complete_all(&wq->wq_resurrect);
418 wait_for_completion(&wq->wq_dead);
420 EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, "IDXD");
422 void idxd_wq_quiesce(struct idxd_wq *wq)
424 mutex_lock(&wq->wq_lock);
425 __idxd_wq_quiesce(wq);
426 mutex_unlock(&wq->wq_lock);
428 EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, "IDXD");
430 /* Device control bits */
431 static inline bool idxd_is_enabled(struct idxd_device *idxd)
433 union gensts_reg gensts;
435 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
437 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
438 return true;
439 return false;
442 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
444 union gensts_reg gensts;
446 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
448 return (gensts.state == IDXD_DEVICE_STATE_HALT);
452 * This is function is only used for reset during probe and will
453 * poll for completion. Once the device is setup with interrupts,
454 * all commands will be done via interrupt completion.
456 int idxd_device_init_reset(struct idxd_device *idxd)
458 struct device *dev = &idxd->pdev->dev;
459 union idxd_command_reg cmd;
461 if (idxd_device_is_halted(idxd)) {
462 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
463 return -ENXIO;
466 memset(&cmd, 0, sizeof(cmd));
467 cmd.cmd = IDXD_CMD_RESET_DEVICE;
468 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
469 spin_lock(&idxd->cmd_lock);
470 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
472 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
473 IDXD_CMDSTS_ACTIVE)
474 cpu_relax();
475 spin_unlock(&idxd->cmd_lock);
476 return 0;
479 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
480 u32 *status)
482 union idxd_command_reg cmd;
483 DECLARE_COMPLETION_ONSTACK(done);
484 u32 stat;
485 unsigned long flags;
487 if (idxd_device_is_halted(idxd)) {
488 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
489 if (status)
490 *status = IDXD_CMDSTS_HW_ERR;
491 return;
494 memset(&cmd, 0, sizeof(cmd));
495 cmd.cmd = cmd_code;
496 cmd.operand = operand;
497 cmd.int_req = 1;
499 spin_lock_irqsave(&idxd->cmd_lock, flags);
500 wait_event_lock_irq(idxd->cmd_waitq,
501 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
502 idxd->cmd_lock);
504 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
505 __func__, cmd_code, operand);
507 idxd->cmd_status = 0;
508 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
509 idxd->cmd_done = &done;
510 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
513 * After command submitted, release lock and go to sleep until
514 * the command completes via interrupt.
516 spin_unlock_irqrestore(&idxd->cmd_lock, flags);
517 wait_for_completion(&done);
518 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
519 spin_lock(&idxd->cmd_lock);
520 if (status)
521 *status = stat;
522 idxd->cmd_status = stat & GENMASK(7, 0);
524 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
525 /* Wake up other pending commands */
526 wake_up(&idxd->cmd_waitq);
527 spin_unlock(&idxd->cmd_lock);
530 int idxd_device_enable(struct idxd_device *idxd)
532 struct device *dev = &idxd->pdev->dev;
533 u32 status;
535 if (idxd_is_enabled(idxd)) {
536 dev_dbg(dev, "Device already enabled\n");
537 return -ENXIO;
540 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
542 /* If the command is successful or if the device was enabled */
543 if (status != IDXD_CMDSTS_SUCCESS &&
544 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
545 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
546 return -ENXIO;
549 idxd->state = IDXD_DEV_ENABLED;
550 return 0;
553 int idxd_device_disable(struct idxd_device *idxd)
555 struct device *dev = &idxd->pdev->dev;
556 u32 status;
558 if (!idxd_is_enabled(idxd)) {
559 dev_dbg(dev, "Device is not enabled\n");
560 return 0;
563 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
565 /* If the command is successful or if the device was disabled */
566 if (status != IDXD_CMDSTS_SUCCESS &&
567 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
568 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
569 return -ENXIO;
572 idxd_device_clear_state(idxd);
573 return 0;
576 void idxd_device_reset(struct idxd_device *idxd)
578 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
579 idxd_device_clear_state(idxd);
580 spin_lock(&idxd->dev_lock);
581 idxd_unmask_error_interrupts(idxd);
582 spin_unlock(&idxd->dev_lock);
585 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
587 struct device *dev = &idxd->pdev->dev;
588 u32 operand;
590 operand = pasid;
591 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
592 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
593 dev_dbg(dev, "pasid %d drained\n", pasid);
596 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
597 enum idxd_interrupt_type irq_type)
599 struct device *dev = &idxd->pdev->dev;
600 u32 operand, status;
602 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
603 return -EOPNOTSUPP;
605 dev_dbg(dev, "get int handle, idx %d\n", idx);
607 operand = idx & GENMASK(15, 0);
608 if (irq_type == IDXD_IRQ_IMS)
609 operand |= CMD_INT_HANDLE_IMS;
611 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
613 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
615 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
616 dev_dbg(dev, "request int handle failed: %#x\n", status);
617 return -ENXIO;
620 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
622 dev_dbg(dev, "int handle acquired: %u\n", *handle);
623 return 0;
626 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
627 enum idxd_interrupt_type irq_type)
629 struct device *dev = &idxd->pdev->dev;
630 u32 operand, status;
631 union idxd_command_reg cmd;
633 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
634 return -EOPNOTSUPP;
636 dev_dbg(dev, "release int handle, handle %d\n", handle);
638 memset(&cmd, 0, sizeof(cmd));
639 operand = handle & GENMASK(15, 0);
641 if (irq_type == IDXD_IRQ_IMS)
642 operand |= CMD_INT_HANDLE_IMS;
644 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
645 cmd.operand = operand;
647 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
649 spin_lock(&idxd->cmd_lock);
650 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
652 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
653 cpu_relax();
654 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
655 spin_unlock(&idxd->cmd_lock);
657 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
658 dev_dbg(dev, "release int handle failed: %#x\n", status);
659 return -ENXIO;
662 dev_dbg(dev, "int handle released.\n");
663 return 0;
666 /* Device configuration bits */
667 static void idxd_engines_clear_state(struct idxd_device *idxd)
669 struct idxd_engine *engine;
670 int i;
672 lockdep_assert_held(&idxd->dev_lock);
673 for (i = 0; i < idxd->max_engines; i++) {
674 engine = idxd->engines[i];
675 engine->group = NULL;
679 static void idxd_groups_clear_state(struct idxd_device *idxd)
681 struct idxd_group *group;
682 int i;
684 lockdep_assert_held(&idxd->dev_lock);
685 for (i = 0; i < idxd->max_groups; i++) {
686 group = idxd->groups[i];
687 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
688 group->num_engines = 0;
689 group->num_wqs = 0;
690 group->use_rdbuf_limit = false;
692 * The default value is the same as the value of
693 * total read buffers in GRPCAP.
695 group->rdbufs_allowed = idxd->max_rdbufs;
696 group->rdbufs_reserved = 0;
697 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
698 group->tc_a = 1;
699 group->tc_b = 1;
700 } else {
701 group->tc_a = -1;
702 group->tc_b = -1;
704 group->desc_progress_limit = 0;
705 group->batch_progress_limit = 0;
709 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
711 int i;
713 for (i = 0; i < idxd->max_wqs; i++) {
714 struct idxd_wq *wq = idxd->wqs[i];
716 mutex_lock(&wq->wq_lock);
717 idxd_wq_disable_cleanup(wq);
718 idxd_wq_device_reset_cleanup(wq);
719 mutex_unlock(&wq->wq_lock);
723 void idxd_device_clear_state(struct idxd_device *idxd)
725 /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
726 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
728 * Clearing wq state is protected by wq lock.
729 * So no need to be protected by device lock.
731 idxd_device_wqs_clear_state(idxd);
733 spin_lock(&idxd->dev_lock);
734 idxd_groups_clear_state(idxd);
735 idxd_engines_clear_state(idxd);
736 } else {
737 spin_lock(&idxd->dev_lock);
740 idxd->state = IDXD_DEV_DISABLED;
741 spin_unlock(&idxd->dev_lock);
744 static int idxd_device_evl_setup(struct idxd_device *idxd)
746 union gencfg_reg gencfg;
747 union evlcfg_reg evlcfg;
748 union genctrl_reg genctrl;
749 struct device *dev = &idxd->pdev->dev;
750 void *addr;
751 dma_addr_t dma_addr;
752 int size;
753 struct idxd_evl *evl = idxd->evl;
754 unsigned long *bmap;
755 int rc;
757 if (!evl)
758 return 0;
760 size = evl_size(idxd);
762 bmap = bitmap_zalloc(size, GFP_KERNEL);
763 if (!bmap) {
764 rc = -ENOMEM;
765 goto err_bmap;
769 * Address needs to be page aligned. However, dma_alloc_coherent() provides
770 * at minimal page size aligned address. No manual alignment required.
772 addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
773 if (!addr) {
774 rc = -ENOMEM;
775 goto err_alloc;
778 mutex_lock(&evl->lock);
779 evl->log = addr;
780 evl->dma = dma_addr;
781 evl->log_size = size;
782 evl->bmap = bmap;
784 memset(&evlcfg, 0, sizeof(evlcfg));
785 evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
786 evlcfg.size = evl->size;
788 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
789 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
791 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
792 genctrl.evl_int_en = 1;
793 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
795 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
796 gencfg.evl_en = 1;
797 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
799 mutex_unlock(&evl->lock);
800 return 0;
802 err_alloc:
803 bitmap_free(bmap);
804 err_bmap:
805 return rc;
808 static void idxd_device_evl_free(struct idxd_device *idxd)
810 void *evl_log;
811 unsigned int evl_log_size;
812 dma_addr_t evl_dma;
813 union gencfg_reg gencfg;
814 union genctrl_reg genctrl;
815 struct device *dev = &idxd->pdev->dev;
816 struct idxd_evl *evl = idxd->evl;
818 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
819 if (!gencfg.evl_en)
820 return;
822 mutex_lock(&evl->lock);
823 gencfg.evl_en = 0;
824 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
826 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
827 genctrl.evl_int_en = 0;
828 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
830 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
831 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
833 bitmap_free(evl->bmap);
834 evl_log = evl->log;
835 evl_log_size = evl->log_size;
836 evl_dma = evl->dma;
837 evl->log = NULL;
838 evl->size = IDXD_EVL_SIZE_MIN;
839 mutex_unlock(&evl->lock);
841 dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
844 static void idxd_group_config_write(struct idxd_group *group)
846 struct idxd_device *idxd = group->idxd;
847 struct device *dev = &idxd->pdev->dev;
848 int i;
849 u32 grpcfg_offset;
851 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
853 /* setup GRPWQCFG */
854 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
855 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
856 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
857 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
858 group->id, i, grpcfg_offset,
859 ioread64(idxd->reg_base + grpcfg_offset));
862 /* setup GRPENGCFG */
863 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
864 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
865 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
866 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
868 /* setup GRPFLAGS */
869 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
870 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
871 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
872 group->id, grpcfg_offset,
873 ioread64(idxd->reg_base + grpcfg_offset));
876 static int idxd_groups_config_write(struct idxd_device *idxd)
879 union gencfg_reg reg;
880 int i;
881 struct device *dev = &idxd->pdev->dev;
883 /* Setup bandwidth rdbuf limit */
884 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
885 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
886 reg.rdbuf_limit = idxd->rdbuf_limit;
887 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
890 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
891 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
893 for (i = 0; i < idxd->max_groups; i++) {
894 struct idxd_group *group = idxd->groups[i];
896 idxd_group_config_write(group);
899 return 0;
902 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
904 struct pci_dev *pdev = idxd->pdev;
906 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
907 return true;
908 return false;
911 static int idxd_wq_config_write(struct idxd_wq *wq)
913 struct idxd_device *idxd = wq->idxd;
914 struct device *dev = &idxd->pdev->dev;
915 u32 wq_offset;
916 int i, n;
918 if (!wq->group)
919 return 0;
922 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
923 * wq reset. This will copy back the sticky values that are present on some devices.
925 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
926 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
927 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
930 if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
931 wq->size = WQ_DEFAULT_QUEUE_DEPTH;
933 /* byte 0-3 */
934 wq->wqcfg->wq_size = wq->size;
936 /* bytes 4-7 */
937 wq->wqcfg->wq_thresh = wq->threshold;
939 /* byte 8-11 */
940 if (wq_dedicated(wq))
941 wq->wqcfg->mode = 1;
944 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
945 * WQ type is kernel to indicate privileged access. This setting only
946 * matters for dedicated WQ. According to the DSA spec:
947 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
948 * Privileged Mode Enable field of the PCI Express PASID capability
949 * is 0, this field must be 0.
951 * In the case of a dedicated kernel WQ that is not able to support
952 * the PASID cap, then the configuration will be rejected.
954 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
955 !idxd_device_pasid_priv_enabled(idxd) &&
956 wq->type == IDXD_WQT_KERNEL) {
957 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
958 return -EOPNOTSUPP;
961 wq->wqcfg->priority = wq->priority;
963 if (idxd->hw.gen_cap.block_on_fault &&
964 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
965 !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
966 wq->wqcfg->bof = 1;
968 if (idxd->hw.wq_cap.wq_ats_support)
969 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
971 if (idxd->hw.wq_cap.wq_prs_support)
972 wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
974 /* bytes 12-15 */
975 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
976 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
978 /* bytes 32-63 */
979 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
980 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
981 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
982 int pos = n % BITS_PER_LONG_LONG;
983 int idx = n / BITS_PER_LONG_LONG;
985 wq->wqcfg->op_config[idx] |= BIT(pos);
989 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
990 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
991 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
992 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
993 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
994 wq->id, i, wq_offset,
995 ioread32(idxd->reg_base + wq_offset));
998 return 0;
1001 static int idxd_wqs_config_write(struct idxd_device *idxd)
1003 int i, rc;
1005 for (i = 0; i < idxd->max_wqs; i++) {
1006 struct idxd_wq *wq = idxd->wqs[i];
1008 rc = idxd_wq_config_write(wq);
1009 if (rc < 0)
1010 return rc;
1013 return 0;
1016 static void idxd_group_flags_setup(struct idxd_device *idxd)
1018 int i;
1020 /* TC-A 0 and TC-B 1 should be defaults */
1021 for (i = 0; i < idxd->max_groups; i++) {
1022 struct idxd_group *group = idxd->groups[i];
1024 if (group->tc_a == -1)
1025 group->tc_a = group->grpcfg.flags.tc_a = 0;
1026 else
1027 group->grpcfg.flags.tc_a = group->tc_a;
1028 if (group->tc_b == -1)
1029 group->tc_b = group->grpcfg.flags.tc_b = 1;
1030 else
1031 group->grpcfg.flags.tc_b = group->tc_b;
1032 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
1033 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
1034 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
1035 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
1036 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
1040 static int idxd_engines_setup(struct idxd_device *idxd)
1042 int i, engines = 0;
1043 struct idxd_engine *eng;
1044 struct idxd_group *group;
1046 for (i = 0; i < idxd->max_groups; i++) {
1047 group = idxd->groups[i];
1048 group->grpcfg.engines = 0;
1051 for (i = 0; i < idxd->max_engines; i++) {
1052 eng = idxd->engines[i];
1053 group = eng->group;
1055 if (!group)
1056 continue;
1058 group->grpcfg.engines |= BIT(eng->id);
1059 engines++;
1062 if (!engines)
1063 return -EINVAL;
1065 return 0;
1068 static int idxd_wqs_setup(struct idxd_device *idxd)
1070 struct idxd_wq *wq;
1071 struct idxd_group *group;
1072 int i, j, configured = 0;
1073 struct device *dev = &idxd->pdev->dev;
1075 for (i = 0; i < idxd->max_groups; i++) {
1076 group = idxd->groups[i];
1077 for (j = 0; j < 4; j++)
1078 group->grpcfg.wqs[j] = 0;
1081 for (i = 0; i < idxd->max_wqs; i++) {
1082 wq = idxd->wqs[i];
1083 group = wq->group;
1085 if (!wq->group)
1086 continue;
1088 if (wq_shared(wq) && !wq_shared_supported(wq)) {
1089 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1090 dev_warn(dev, "No shared wq support but configured.\n");
1091 return -EINVAL;
1094 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1095 configured++;
1098 if (configured == 0) {
1099 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1100 return -EINVAL;
1103 return 0;
1106 int idxd_device_config(struct idxd_device *idxd)
1108 int rc;
1110 lockdep_assert_held(&idxd->dev_lock);
1111 rc = idxd_wqs_setup(idxd);
1112 if (rc < 0)
1113 return rc;
1115 rc = idxd_engines_setup(idxd);
1116 if (rc < 0)
1117 return rc;
1119 idxd_group_flags_setup(idxd);
1121 rc = idxd_wqs_config_write(idxd);
1122 if (rc < 0)
1123 return rc;
1125 rc = idxd_groups_config_write(idxd);
1126 if (rc < 0)
1127 return rc;
1129 return 0;
1132 static int idxd_wq_load_config(struct idxd_wq *wq)
1134 struct idxd_device *idxd = wq->idxd;
1135 struct device *dev = &idxd->pdev->dev;
1136 int wqcfg_offset;
1137 int i;
1139 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1140 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1142 wq->size = wq->wqcfg->wq_size;
1143 wq->threshold = wq->wqcfg->wq_thresh;
1145 /* The driver does not support shared WQ mode in read-only config yet */
1146 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1147 return -EOPNOTSUPP;
1149 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1151 wq->priority = wq->wqcfg->priority;
1153 wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1154 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1156 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1157 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1158 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1161 return 0;
1164 static void idxd_group_load_config(struct idxd_group *group)
1166 struct idxd_device *idxd = group->idxd;
1167 struct device *dev = &idxd->pdev->dev;
1168 int i, j, grpcfg_offset;
1171 * Load WQS bit fields
1172 * Iterate through all 256 bits 64 bits at a time
1174 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1175 struct idxd_wq *wq;
1177 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1178 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1179 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1180 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1182 if (i * 64 >= idxd->max_wqs)
1183 break;
1185 /* Iterate through all 64 bits and check for wq set */
1186 for (j = 0; j < 64; j++) {
1187 int id = i * 64 + j;
1189 /* No need to check beyond max wqs */
1190 if (id >= idxd->max_wqs)
1191 break;
1193 /* Set group assignment for wq if wq bit is set */
1194 if (group->grpcfg.wqs[i] & BIT(j)) {
1195 wq = idxd->wqs[id];
1196 wq->group = group;
1201 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1202 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1203 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1204 grpcfg_offset, group->grpcfg.engines);
1206 /* Iterate through all 64 bits to check engines set */
1207 for (i = 0; i < 64; i++) {
1208 if (i >= idxd->max_engines)
1209 break;
1211 if (group->grpcfg.engines & BIT(i)) {
1212 struct idxd_engine *engine = idxd->engines[i];
1214 engine->group = group;
1218 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1219 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1220 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1221 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1224 int idxd_device_load_config(struct idxd_device *idxd)
1226 union gencfg_reg reg;
1227 int i, rc;
1229 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1230 idxd->rdbuf_limit = reg.rdbuf_limit;
1232 for (i = 0; i < idxd->max_groups; i++) {
1233 struct idxd_group *group = idxd->groups[i];
1235 idxd_group_load_config(group);
1238 for (i = 0; i < idxd->max_wqs; i++) {
1239 struct idxd_wq *wq = idxd->wqs[i];
1241 rc = idxd_wq_load_config(wq);
1242 if (rc < 0)
1243 return rc;
1246 return 0;
1249 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1251 struct idxd_desc *desc, *itr;
1252 struct llist_node *head;
1253 LIST_HEAD(flist);
1254 enum idxd_complete_type ctype;
1256 spin_lock(&ie->list_lock);
1257 head = llist_del_all(&ie->pending_llist);
1258 if (head) {
1259 llist_for_each_entry_safe(desc, itr, head, llnode)
1260 list_add_tail(&desc->list, &ie->work_list);
1263 list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1264 list_move_tail(&desc->list, &flist);
1265 spin_unlock(&ie->list_lock);
1267 list_for_each_entry_safe(desc, itr, &flist, list) {
1268 struct dma_async_tx_descriptor *tx;
1270 list_del(&desc->list);
1271 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1273 * wq is being disabled. Any remaining descriptors are
1274 * likely to be stuck and can be dropped. callback could
1275 * point to code that is no longer accessible, for example
1276 * if dmatest module has been unloaded.
1278 tx = &desc->txd;
1279 tx->callback = NULL;
1280 tx->callback_result = NULL;
1281 idxd_dma_complete_txd(desc, ctype, true, NULL, NULL);
1285 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1286 struct idxd_irq_entry *ie)
1288 union msix_perm mperm;
1290 if (ie->pasid == IOMMU_PASID_INVALID)
1291 return;
1293 mperm.bits = 0;
1294 mperm.pasid = ie->pasid;
1295 mperm.pasid_en = 1;
1296 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1299 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1300 struct idxd_irq_entry *ie)
1302 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1305 void idxd_wq_free_irq(struct idxd_wq *wq)
1307 struct idxd_device *idxd = wq->idxd;
1308 struct idxd_irq_entry *ie = &wq->ie;
1310 if (wq->type != IDXD_WQT_KERNEL)
1311 return;
1313 free_irq(ie->vector, ie);
1314 idxd_flush_pending_descs(ie);
1315 if (idxd->request_int_handles)
1316 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1317 idxd_device_clear_perm_entry(idxd, ie);
1318 ie->vector = -1;
1319 ie->int_handle = INVALID_INT_HANDLE;
1320 ie->pasid = IOMMU_PASID_INVALID;
1323 int idxd_wq_request_irq(struct idxd_wq *wq)
1325 struct idxd_device *idxd = wq->idxd;
1326 struct pci_dev *pdev = idxd->pdev;
1327 struct device *dev = &pdev->dev;
1328 struct idxd_irq_entry *ie;
1329 int rc;
1331 if (wq->type != IDXD_WQT_KERNEL)
1332 return 0;
1334 ie = &wq->ie;
1335 ie->vector = pci_irq_vector(pdev, ie->id);
1336 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1337 idxd_device_set_perm_entry(idxd, ie);
1339 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1340 if (rc < 0) {
1341 dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1342 goto err_irq;
1345 if (idxd->request_int_handles) {
1346 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1347 IDXD_IRQ_MSIX);
1348 if (rc < 0)
1349 goto err_int_handle;
1350 } else {
1351 ie->int_handle = ie->id;
1354 return 0;
1356 err_int_handle:
1357 ie->int_handle = INVALID_INT_HANDLE;
1358 free_irq(ie->vector, ie);
1359 err_irq:
1360 idxd_device_clear_perm_entry(idxd, ie);
1361 ie->pasid = IOMMU_PASID_INVALID;
1362 return rc;
1365 int idxd_drv_enable_wq(struct idxd_wq *wq)
1367 struct idxd_device *idxd = wq->idxd;
1368 struct device *dev = &idxd->pdev->dev;
1369 int rc = -ENXIO;
1371 lockdep_assert_held(&wq->wq_lock);
1373 if (idxd->state != IDXD_DEV_ENABLED) {
1374 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1375 goto err;
1378 if (wq->state != IDXD_WQ_DISABLED) {
1379 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1380 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1381 rc = -EBUSY;
1382 goto err;
1385 if (!wq->group) {
1386 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1387 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1388 goto err;
1391 if (strlen(wq->name) == 0) {
1392 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1393 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1394 goto err;
1397 /* Shared WQ checks */
1398 if (wq_shared(wq)) {
1399 if (!wq_shared_supported(wq)) {
1400 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1401 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1402 goto err;
1405 * Shared wq with the threshold set to 0 means the user
1406 * did not set the threshold or transitioned from a
1407 * dedicated wq but did not set threshold. A value
1408 * of 0 would effectively disable the shared wq. The
1409 * driver does not allow a value of 0 to be set for
1410 * threshold via sysfs.
1412 if (wq->threshold == 0) {
1413 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1414 dev_dbg(dev, "Shared wq and threshold 0.\n");
1415 goto err;
1420 * In the event that the WQ is configurable for pasid, the driver
1421 * should setup the pasid, pasid_en bit. This is true for both kernel
1422 * and user shared workqueues. There is no need to setup priv bit in
1423 * that in-kernel DMA will also do user privileged requests.
1424 * A dedicated wq that is not 'kernel' type will configure pasid and
1425 * pasid_en later on so there is no need to setup.
1427 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1428 if (wq_pasid_enabled(wq)) {
1429 if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1430 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1432 __idxd_wq_set_pasid_locked(wq, pasid);
1437 rc = 0;
1438 spin_lock(&idxd->dev_lock);
1439 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1440 rc = idxd_device_config(idxd);
1441 spin_unlock(&idxd->dev_lock);
1442 if (rc < 0) {
1443 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1444 goto err;
1447 rc = idxd_wq_enable(wq);
1448 if (rc < 0) {
1449 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1450 goto err;
1453 rc = idxd_wq_map_portal(wq);
1454 if (rc < 0) {
1455 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1456 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1457 goto err_map_portal;
1460 wq->client_count = 0;
1462 rc = idxd_wq_request_irq(wq);
1463 if (rc < 0) {
1464 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1465 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1466 goto err_irq;
1469 rc = idxd_wq_alloc_resources(wq);
1470 if (rc < 0) {
1471 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1472 dev_dbg(dev, "WQ resource alloc failed\n");
1473 goto err_res_alloc;
1476 rc = idxd_wq_init_percpu_ref(wq);
1477 if (rc < 0) {
1478 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1479 dev_dbg(dev, "percpu_ref setup failed\n");
1480 goto err_ref;
1483 return 0;
1485 err_ref:
1486 idxd_wq_free_resources(wq);
1487 err_res_alloc:
1488 idxd_wq_free_irq(wq);
1489 err_irq:
1490 idxd_wq_unmap_portal(wq);
1491 err_map_portal:
1492 if (idxd_wq_disable(wq, false))
1493 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1494 err:
1495 return rc;
1497 EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, "IDXD");
1499 void idxd_drv_disable_wq(struct idxd_wq *wq)
1501 struct idxd_device *idxd = wq->idxd;
1502 struct device *dev = &idxd->pdev->dev;
1504 lockdep_assert_held(&wq->wq_lock);
1506 if (idxd_wq_refcount(wq))
1507 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1508 wq->id, idxd_wq_refcount(wq));
1510 idxd_wq_unmap_portal(wq);
1511 idxd_wq_drain(wq);
1512 idxd_wq_free_irq(wq);
1513 idxd_wq_reset(wq);
1514 idxd_wq_free_resources(wq);
1515 percpu_ref_exit(&wq->wq_active);
1516 wq->type = IDXD_WQT_NONE;
1517 wq->client_count = 0;
1519 EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD");
1521 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1523 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1524 int rc = 0;
1527 * Device should be in disabled state for the idxd_drv to load. If it's in
1528 * enabled state, then the device was altered outside of driver's control.
1529 * If the state is in halted state, then we don't want to proceed.
1531 if (idxd->state != IDXD_DEV_DISABLED) {
1532 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1533 return -ENXIO;
1536 /* Device configuration */
1537 spin_lock(&idxd->dev_lock);
1538 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1539 rc = idxd_device_config(idxd);
1540 spin_unlock(&idxd->dev_lock);
1541 if (rc < 0)
1542 return -ENXIO;
1545 * System PASID is preserved across device disable/enable cycle, but
1546 * genconfig register content gets cleared during device reset. We
1547 * need to re-enable user interrupts for kernel work queue completion
1548 * IRQ to function.
1550 if (idxd->pasid != IOMMU_PASID_INVALID)
1551 idxd_set_user_intr(idxd, 1);
1553 rc = idxd_device_evl_setup(idxd);
1554 if (rc < 0) {
1555 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1556 return rc;
1559 /* Start device */
1560 rc = idxd_device_enable(idxd);
1561 if (rc < 0) {
1562 idxd_device_evl_free(idxd);
1563 return rc;
1566 /* Setup DMA device without channels */
1567 rc = idxd_register_dma_device(idxd);
1568 if (rc < 0) {
1569 idxd_device_disable(idxd);
1570 idxd_device_evl_free(idxd);
1571 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1572 return rc;
1575 idxd->cmd_status = 0;
1576 return 0;
1579 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1581 struct device *dev = &idxd_dev->conf_dev;
1582 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1583 int i;
1585 for (i = 0; i < idxd->max_wqs; i++) {
1586 struct idxd_wq *wq = idxd->wqs[i];
1587 struct device *wq_dev = wq_confdev(wq);
1589 if (wq->state == IDXD_WQ_DISABLED)
1590 continue;
1591 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1592 device_release_driver(wq_dev);
1595 idxd_unregister_dma_device(idxd);
1596 idxd_device_disable(idxd);
1597 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1598 idxd_device_reset(idxd);
1599 idxd_device_evl_free(idxd);
1602 static enum idxd_dev_type dev_types[] = {
1603 IDXD_DEV_DSA,
1604 IDXD_DEV_IAX,
1605 IDXD_DEV_NONE,
1608 struct idxd_device_driver idxd_drv = {
1609 .type = dev_types,
1610 .probe = idxd_device_drv_probe,
1611 .remove = idxd_device_drv_remove,
1612 .name = "idxd",
1614 EXPORT_SYMBOL_GPL(idxd_drv);