1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device
*idxd
, int cmd_code
, u32 operand
,
19 /* Interrupt control bits */
20 void idxd_mask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
22 struct irq_data
*data
= irq_get_irq_data(idxd
->msix_entries
[vec_id
].vector
);
24 pci_msi_mask_irq(data
);
27 void idxd_mask_msix_vectors(struct idxd_device
*idxd
)
29 struct pci_dev
*pdev
= idxd
->pdev
;
30 int msixcnt
= pci_msix_vec_count(pdev
);
33 for (i
= 0; i
< msixcnt
; i
++)
34 idxd_mask_msix_vector(idxd
, i
);
37 void idxd_unmask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
39 struct irq_data
*data
= irq_get_irq_data(idxd
->msix_entries
[vec_id
].vector
);
41 pci_msi_unmask_irq(data
);
44 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
)
46 union genctrl_reg genctrl
;
48 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
49 genctrl
.softerr_int_en
= 1;
50 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
53 void idxd_mask_error_interrupts(struct idxd_device
*idxd
)
55 union genctrl_reg genctrl
;
57 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
58 genctrl
.softerr_int_en
= 0;
59 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
62 static void free_hw_descs(struct idxd_wq
*wq
)
66 for (i
= 0; i
< wq
->num_descs
; i
++)
67 kfree(wq
->hw_descs
[i
]);
72 static int alloc_hw_descs(struct idxd_wq
*wq
, int num
)
74 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
76 int node
= dev_to_node(dev
);
78 wq
->hw_descs
= kcalloc_node(num
, sizeof(struct dsa_hw_desc
*),
83 for (i
= 0; i
< num
; i
++) {
84 wq
->hw_descs
[i
] = kzalloc_node(sizeof(*wq
->hw_descs
[i
]),
86 if (!wq
->hw_descs
[i
]) {
95 static void free_descs(struct idxd_wq
*wq
)
99 for (i
= 0; i
< wq
->num_descs
; i
++)
105 static int alloc_descs(struct idxd_wq
*wq
, int num
)
107 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
109 int node
= dev_to_node(dev
);
111 wq
->descs
= kcalloc_node(num
, sizeof(struct idxd_desc
*),
116 for (i
= 0; i
< num
; i
++) {
117 wq
->descs
[i
] = kzalloc_node(sizeof(*wq
->descs
[i
]),
128 /* WQ control bits */
129 int idxd_wq_alloc_resources(struct idxd_wq
*wq
)
131 struct idxd_device
*idxd
= wq
->idxd
;
132 struct device
*dev
= &idxd
->pdev
->dev
;
133 int rc
, num_descs
, i
;
137 if (wq
->type
!= IDXD_WQT_KERNEL
)
140 wq
->num_descs
= wq
->size
;
141 num_descs
= wq
->size
;
143 rc
= alloc_hw_descs(wq
, num_descs
);
147 if (idxd
->type
== IDXD_TYPE_DSA
)
149 else if (idxd
->type
== IDXD_TYPE_IAX
)
154 wq
->compls_size
= num_descs
* idxd
->compl_size
+ align
;
155 wq
->compls_raw
= dma_alloc_coherent(dev
, wq
->compls_size
,
156 &wq
->compls_addr_raw
, GFP_KERNEL
);
157 if (!wq
->compls_raw
) {
159 goto fail_alloc_compls
;
162 /* Adjust alignment */
163 wq
->compls_addr
= (wq
->compls_addr_raw
+ (align
- 1)) & ~(align
- 1);
164 tmp
= (u64
)wq
->compls_raw
;
165 tmp
= (tmp
+ (align
- 1)) & ~(align
- 1);
166 wq
->compls
= (struct dsa_completion_record
*)tmp
;
168 rc
= alloc_descs(wq
, num_descs
);
170 goto fail_alloc_descs
;
172 rc
= sbitmap_queue_init_node(&wq
->sbq
, num_descs
, -1, false, GFP_KERNEL
,
175 goto fail_sbitmap_init
;
177 for (i
= 0; i
< num_descs
; i
++) {
178 struct idxd_desc
*desc
= wq
->descs
[i
];
180 desc
->hw
= wq
->hw_descs
[i
];
181 if (idxd
->type
== IDXD_TYPE_DSA
)
182 desc
->completion
= &wq
->compls
[i
];
183 else if (idxd
->type
== IDXD_TYPE_IAX
)
184 desc
->iax_completion
= &wq
->iax_compls
[i
];
185 desc
->compl_dma
= wq
->compls_addr
+ idxd
->compl_size
* i
;
189 dma_async_tx_descriptor_init(&desc
->txd
, &wq
->dma_chan
);
190 desc
->txd
.tx_submit
= idxd_dma_tx_submit
;
198 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls_raw
,
199 wq
->compls_addr_raw
);
205 void idxd_wq_free_resources(struct idxd_wq
*wq
)
207 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
209 if (wq
->type
!= IDXD_WQT_KERNEL
)
214 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls_raw
,
215 wq
->compls_addr_raw
);
216 sbitmap_queue_free(&wq
->sbq
);
219 int idxd_wq_enable(struct idxd_wq
*wq
)
221 struct idxd_device
*idxd
= wq
->idxd
;
222 struct device
*dev
= &idxd
->pdev
->dev
;
225 if (wq
->state
== IDXD_WQ_ENABLED
) {
226 dev_dbg(dev
, "WQ %d already enabled\n", wq
->id
);
230 idxd_cmd_exec(idxd
, IDXD_CMD_ENABLE_WQ
, wq
->id
, &status
);
232 if (status
!= IDXD_CMDSTS_SUCCESS
&&
233 status
!= IDXD_CMDSTS_ERR_WQ_ENABLED
) {
234 dev_dbg(dev
, "WQ enable failed: %#x\n", status
);
238 wq
->state
= IDXD_WQ_ENABLED
;
239 dev_dbg(dev
, "WQ %d enabled\n", wq
->id
);
243 int idxd_wq_disable(struct idxd_wq
*wq
)
245 struct idxd_device
*idxd
= wq
->idxd
;
246 struct device
*dev
= &idxd
->pdev
->dev
;
249 dev_dbg(dev
, "Disabling WQ %d\n", wq
->id
);
251 if (wq
->state
!= IDXD_WQ_ENABLED
) {
252 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
256 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
257 idxd_cmd_exec(idxd
, IDXD_CMD_DISABLE_WQ
, operand
, &status
);
259 if (status
!= IDXD_CMDSTS_SUCCESS
) {
260 dev_dbg(dev
, "WQ disable failed: %#x\n", status
);
264 wq
->state
= IDXD_WQ_DISABLED
;
265 dev_dbg(dev
, "WQ %d disabled\n", wq
->id
);
269 void idxd_wq_drain(struct idxd_wq
*wq
)
271 struct idxd_device
*idxd
= wq
->idxd
;
272 struct device
*dev
= &idxd
->pdev
->dev
;
275 if (wq
->state
!= IDXD_WQ_ENABLED
) {
276 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
280 dev_dbg(dev
, "Draining WQ %d\n", wq
->id
);
281 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
282 idxd_cmd_exec(idxd
, IDXD_CMD_DRAIN_WQ
, operand
, NULL
);
285 int idxd_wq_map_portal(struct idxd_wq
*wq
)
287 struct idxd_device
*idxd
= wq
->idxd
;
288 struct pci_dev
*pdev
= idxd
->pdev
;
289 struct device
*dev
= &pdev
->dev
;
290 resource_size_t start
;
292 start
= pci_resource_start(pdev
, IDXD_WQ_BAR
);
293 start
+= idxd_get_wq_portal_full_offset(wq
->id
, IDXD_PORTAL_LIMITED
);
295 wq
->portal
= devm_ioremap(dev
, start
, IDXD_PORTAL_SIZE
);
302 void idxd_wq_unmap_portal(struct idxd_wq
*wq
)
304 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
306 devm_iounmap(dev
, wq
->portal
);
309 int idxd_wq_set_pasid(struct idxd_wq
*wq
, int pasid
)
311 struct idxd_device
*idxd
= wq
->idxd
;
317 rc
= idxd_wq_disable(wq
);
321 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_PASID_IDX
);
322 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
323 wqcfg
.bits
[WQCFG_PASID_IDX
] = ioread32(idxd
->reg_base
+ offset
);
326 iowrite32(wqcfg
.bits
[WQCFG_PASID_IDX
], idxd
->reg_base
+ offset
);
327 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
329 rc
= idxd_wq_enable(wq
);
336 int idxd_wq_disable_pasid(struct idxd_wq
*wq
)
338 struct idxd_device
*idxd
= wq
->idxd
;
344 rc
= idxd_wq_disable(wq
);
348 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_PASID_IDX
);
349 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
350 wqcfg
.bits
[WQCFG_PASID_IDX
] = ioread32(idxd
->reg_base
+ offset
);
353 iowrite32(wqcfg
.bits
[WQCFG_PASID_IDX
], idxd
->reg_base
+ offset
);
354 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
356 rc
= idxd_wq_enable(wq
);
363 void idxd_wq_disable_cleanup(struct idxd_wq
*wq
)
365 struct idxd_device
*idxd
= wq
->idxd
;
366 struct device
*dev
= &idxd
->pdev
->dev
;
369 lockdep_assert_held(&idxd
->dev_lock
);
370 memset(wq
->wqcfg
, 0, idxd
->wqcfg_size
);
371 wq
->type
= IDXD_WQT_NONE
;
377 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
378 memset(wq
->name
, 0, WQ_NAME_SIZE
);
380 for (i
= 0; i
< WQCFG_STRIDES(idxd
); i
++) {
381 wq_offset
= WQCFG_OFFSET(idxd
, wq
->id
, i
);
382 iowrite32(0, idxd
->reg_base
+ wq_offset
);
383 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n",
384 wq
->id
, i
, wq_offset
,
385 ioread32(idxd
->reg_base
+ wq_offset
));
389 /* Device control bits */
390 static inline bool idxd_is_enabled(struct idxd_device
*idxd
)
392 union gensts_reg gensts
;
394 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
396 if (gensts
.state
== IDXD_DEVICE_STATE_ENABLED
)
402 * This is function is only used for reset during probe and will
403 * poll for completion. Once the device is setup with interrupts,
404 * all commands will be done via interrupt completion.
406 void idxd_device_init_reset(struct idxd_device
*idxd
)
408 struct device
*dev
= &idxd
->pdev
->dev
;
409 union idxd_command_reg cmd
;
412 memset(&cmd
, 0, sizeof(cmd
));
413 cmd
.cmd
= IDXD_CMD_RESET_DEVICE
;
414 dev_dbg(dev
, "%s: sending reset for init.\n", __func__
);
415 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
416 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
418 while (ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
) &
421 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
424 static void idxd_cmd_exec(struct idxd_device
*idxd
, int cmd_code
, u32 operand
,
427 union idxd_command_reg cmd
;
428 DECLARE_COMPLETION_ONSTACK(done
);
431 memset(&cmd
, 0, sizeof(cmd
));
433 cmd
.operand
= operand
;
436 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
437 wait_event_lock_irq(idxd
->cmd_waitq
,
438 !test_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
),
441 dev_dbg(&idxd
->pdev
->dev
, "%s: sending cmd: %#x op: %#x\n",
442 __func__
, cmd_code
, operand
);
444 idxd
->cmd_status
= 0;
445 __set_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
);
446 idxd
->cmd_done
= &done
;
447 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
450 * After command submitted, release lock and go to sleep until
451 * the command completes via interrupt.
453 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
454 wait_for_completion(&done
);
455 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
457 *status
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
458 idxd
->cmd_status
= *status
& GENMASK(7, 0);
461 __clear_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
);
462 /* Wake up other pending commands */
463 wake_up(&idxd
->cmd_waitq
);
464 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
467 int idxd_device_enable(struct idxd_device
*idxd
)
469 struct device
*dev
= &idxd
->pdev
->dev
;
472 if (idxd_is_enabled(idxd
)) {
473 dev_dbg(dev
, "Device already enabled\n");
477 idxd_cmd_exec(idxd
, IDXD_CMD_ENABLE_DEVICE
, 0, &status
);
479 /* If the command is successful or if the device was enabled */
480 if (status
!= IDXD_CMDSTS_SUCCESS
&&
481 status
!= IDXD_CMDSTS_ERR_DEV_ENABLED
) {
482 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
486 idxd
->state
= IDXD_DEV_ENABLED
;
490 void idxd_device_wqs_clear_state(struct idxd_device
*idxd
)
494 lockdep_assert_held(&idxd
->dev_lock
);
496 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
497 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
499 if (wq
->state
== IDXD_WQ_ENABLED
) {
500 idxd_wq_disable_cleanup(wq
);
501 wq
->state
= IDXD_WQ_DISABLED
;
506 int idxd_device_disable(struct idxd_device
*idxd
)
508 struct device
*dev
= &idxd
->pdev
->dev
;
512 if (!idxd_is_enabled(idxd
)) {
513 dev_dbg(dev
, "Device is not enabled\n");
517 idxd_cmd_exec(idxd
, IDXD_CMD_DISABLE_DEVICE
, 0, &status
);
519 /* If the command is successful or if the device was disabled */
520 if (status
!= IDXD_CMDSTS_SUCCESS
&&
521 !(status
& IDXD_CMDSTS_ERR_DIS_DEV_EN
)) {
522 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
526 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
527 idxd_device_wqs_clear_state(idxd
);
528 idxd
->state
= IDXD_DEV_CONF_READY
;
529 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
533 void idxd_device_reset(struct idxd_device
*idxd
)
537 idxd_cmd_exec(idxd
, IDXD_CMD_RESET_DEVICE
, 0, NULL
);
538 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
539 idxd_device_wqs_clear_state(idxd
);
540 idxd
->state
= IDXD_DEV_CONF_READY
;
541 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
544 void idxd_device_drain_pasid(struct idxd_device
*idxd
, int pasid
)
546 struct device
*dev
= &idxd
->pdev
->dev
;
550 dev_dbg(dev
, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID
, operand
);
551 idxd_cmd_exec(idxd
, IDXD_CMD_DRAIN_PASID
, operand
, NULL
);
552 dev_dbg(dev
, "pasid %d drained\n", pasid
);
555 /* Device configuration bits */
556 static void idxd_group_config_write(struct idxd_group
*group
)
558 struct idxd_device
*idxd
= group
->idxd
;
559 struct device
*dev
= &idxd
->pdev
->dev
;
563 dev_dbg(dev
, "Writing group %d cfg registers\n", group
->id
);
566 for (i
= 0; i
< GRPWQCFG_STRIDES
; i
++) {
567 grpcfg_offset
= GRPWQCFG_OFFSET(idxd
, group
->id
, i
);
568 iowrite64(group
->grpcfg
.wqs
[i
], idxd
->reg_base
+ grpcfg_offset
);
569 dev_dbg(dev
, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
570 group
->id
, i
, grpcfg_offset
,
571 ioread64(idxd
->reg_base
+ grpcfg_offset
));
574 /* setup GRPENGCFG */
575 grpcfg_offset
= GRPENGCFG_OFFSET(idxd
, group
->id
);
576 iowrite64(group
->grpcfg
.engines
, idxd
->reg_base
+ grpcfg_offset
);
577 dev_dbg(dev
, "GRPCFG engs[%d: %#x]: %#llx\n", group
->id
,
578 grpcfg_offset
, ioread64(idxd
->reg_base
+ grpcfg_offset
));
581 grpcfg_offset
= GRPFLGCFG_OFFSET(idxd
, group
->id
);
582 iowrite32(group
->grpcfg
.flags
.bits
, idxd
->reg_base
+ grpcfg_offset
);
583 dev_dbg(dev
, "GRPFLAGS flags[%d: %#x]: %#x\n",
584 group
->id
, grpcfg_offset
,
585 ioread32(idxd
->reg_base
+ grpcfg_offset
));
588 static int idxd_groups_config_write(struct idxd_device
*idxd
)
591 union gencfg_reg reg
;
593 struct device
*dev
= &idxd
->pdev
->dev
;
595 /* Setup bandwidth token limit */
596 if (idxd
->token_limit
) {
597 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
598 reg
.token_limit
= idxd
->token_limit
;
599 iowrite32(reg
.bits
, idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
602 dev_dbg(dev
, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET
,
603 ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
));
605 for (i
= 0; i
< idxd
->max_groups
; i
++) {
606 struct idxd_group
*group
= &idxd
->groups
[i
];
608 idxd_group_config_write(group
);
614 static int idxd_wq_config_write(struct idxd_wq
*wq
)
616 struct idxd_device
*idxd
= wq
->idxd
;
617 struct device
*dev
= &idxd
->pdev
->dev
;
624 memset(wq
->wqcfg
, 0, idxd
->wqcfg_size
);
627 wq
->wqcfg
->wq_size
= wq
->size
;
630 dev_warn(dev
, "Incorrect work queue size: 0\n");
635 wq
->wqcfg
->wq_thresh
= wq
->threshold
;
638 wq
->wqcfg
->priv
= !!(wq
->type
== IDXD_WQT_KERNEL
);
639 if (wq_dedicated(wq
))
642 if (device_pasid_enabled(idxd
)) {
643 wq
->wqcfg
->pasid_en
= 1;
644 if (wq
->type
== IDXD_WQT_KERNEL
&& wq_dedicated(wq
))
645 wq
->wqcfg
->pasid
= idxd
->pasid
;
648 wq
->wqcfg
->priority
= wq
->priority
;
650 if (idxd
->hw
.gen_cap
.block_on_fault
&&
651 test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
))
654 if (idxd
->hw
.wq_cap
.wq_ats_support
)
655 wq
->wqcfg
->wq_ats_disable
= wq
->ats_dis
;
658 wq
->wqcfg
->max_xfer_shift
= ilog2(wq
->max_xfer_bytes
);
659 wq
->wqcfg
->max_batch_shift
= ilog2(wq
->max_batch_size
);
661 dev_dbg(dev
, "WQ %d CFGs\n", wq
->id
);
662 for (i
= 0; i
< WQCFG_STRIDES(idxd
); i
++) {
663 wq_offset
= WQCFG_OFFSET(idxd
, wq
->id
, i
);
664 iowrite32(wq
->wqcfg
->bits
[i
], idxd
->reg_base
+ wq_offset
);
665 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n",
666 wq
->id
, i
, wq_offset
,
667 ioread32(idxd
->reg_base
+ wq_offset
));
673 static int idxd_wqs_config_write(struct idxd_device
*idxd
)
677 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
678 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
680 rc
= idxd_wq_config_write(wq
);
688 static void idxd_group_flags_setup(struct idxd_device
*idxd
)
692 /* TC-A 0 and TC-B 1 should be defaults */
693 for (i
= 0; i
< idxd
->max_groups
; i
++) {
694 struct idxd_group
*group
= &idxd
->groups
[i
];
696 if (group
->tc_a
== -1)
697 group
->tc_a
= group
->grpcfg
.flags
.tc_a
= 0;
699 group
->grpcfg
.flags
.tc_a
= group
->tc_a
;
700 if (group
->tc_b
== -1)
701 group
->tc_b
= group
->grpcfg
.flags
.tc_b
= 1;
703 group
->grpcfg
.flags
.tc_b
= group
->tc_b
;
704 group
->grpcfg
.flags
.use_token_limit
= group
->use_token_limit
;
705 group
->grpcfg
.flags
.tokens_reserved
= group
->tokens_reserved
;
706 if (group
->tokens_allowed
)
707 group
->grpcfg
.flags
.tokens_allowed
=
708 group
->tokens_allowed
;
710 group
->grpcfg
.flags
.tokens_allowed
= idxd
->max_tokens
;
714 static int idxd_engines_setup(struct idxd_device
*idxd
)
717 struct idxd_engine
*eng
;
718 struct idxd_group
*group
;
720 for (i
= 0; i
< idxd
->max_groups
; i
++) {
721 group
= &idxd
->groups
[i
];
722 group
->grpcfg
.engines
= 0;
725 for (i
= 0; i
< idxd
->max_engines
; i
++) {
726 eng
= &idxd
->engines
[i
];
732 group
->grpcfg
.engines
|= BIT(eng
->id
);
742 static int idxd_wqs_setup(struct idxd_device
*idxd
)
745 struct idxd_group
*group
;
746 int i
, j
, configured
= 0;
747 struct device
*dev
= &idxd
->pdev
->dev
;
749 for (i
= 0; i
< idxd
->max_groups
; i
++) {
750 group
= &idxd
->groups
[i
];
751 for (j
= 0; j
< 4; j
++)
752 group
->grpcfg
.wqs
[j
] = 0;
755 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
764 if (wq_shared(wq
) && !device_swq_supported(idxd
)) {
765 dev_warn(dev
, "No shared wq support but configured.\n");
769 group
->grpcfg
.wqs
[wq
->id
/ 64] |= BIT(wq
->id
% 64);
779 int idxd_device_config(struct idxd_device
*idxd
)
783 lockdep_assert_held(&idxd
->dev_lock
);
784 rc
= idxd_wqs_setup(idxd
);
788 rc
= idxd_engines_setup(idxd
);
792 idxd_group_flags_setup(idxd
);
794 rc
= idxd_wqs_config_write(idxd
);
798 rc
= idxd_groups_config_write(idxd
);