1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 static void idxd_conf_device_release(struct device
*dev
)
21 dev_dbg(dev
, "%s for %s\n", __func__
, dev_name(dev
));
24 static struct device_type idxd_group_device_type
= {
26 .release
= idxd_conf_device_release
,
29 static struct device_type idxd_wq_device_type
= {
31 .release
= idxd_conf_device_release
,
34 static struct device_type idxd_engine_device_type
= {
36 .release
= idxd_conf_device_release
,
39 static struct device_type dsa_device_type
= {
41 .release
= idxd_conf_device_release
,
44 static struct device_type iax_device_type
= {
46 .release
= idxd_conf_device_release
,
49 static inline bool is_dsa_dev(struct device
*dev
)
51 return dev
? dev
->type
== &dsa_device_type
: false;
54 static inline bool is_iax_dev(struct device
*dev
)
56 return dev
? dev
->type
== &iax_device_type
: false;
59 static inline bool is_idxd_dev(struct device
*dev
)
61 return is_dsa_dev(dev
) || is_iax_dev(dev
);
64 static inline bool is_idxd_wq_dev(struct device
*dev
)
66 return dev
? dev
->type
== &idxd_wq_device_type
: false;
69 static inline bool is_idxd_wq_dmaengine(struct idxd_wq
*wq
)
71 if (wq
->type
== IDXD_WQT_KERNEL
&&
72 strcmp(wq
->name
, "dmaengine") == 0)
77 static inline bool is_idxd_wq_cdev(struct idxd_wq
*wq
)
79 return wq
->type
== IDXD_WQT_USER
;
82 static int idxd_config_bus_match(struct device
*dev
,
83 struct device_driver
*drv
)
87 if (is_idxd_dev(dev
)) {
88 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
90 if (idxd
->state
!= IDXD_DEV_CONF_READY
)
93 } else if (is_idxd_wq_dev(dev
)) {
94 struct idxd_wq
*wq
= confdev_to_wq(dev
);
95 struct idxd_device
*idxd
= wq
->idxd
;
97 if (idxd
->state
< IDXD_DEV_CONF_READY
)
100 if (wq
->state
!= IDXD_WQ_DISABLED
) {
101 dev_dbg(dev
, "%s not disabled\n", dev_name(dev
));
108 dev_dbg(dev
, "%s matched\n", dev_name(dev
));
113 static int idxd_config_bus_probe(struct device
*dev
)
118 dev_dbg(dev
, "%s called\n", __func__
);
120 if (is_idxd_dev(dev
)) {
121 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
123 if (idxd
->state
!= IDXD_DEV_CONF_READY
) {
124 dev_warn(dev
, "Device not ready for config\n");
128 if (!try_module_get(THIS_MODULE
))
131 /* Perform IDXD configuration and enabling */
132 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
133 rc
= idxd_device_config(idxd
);
134 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
136 module_put(THIS_MODULE
);
137 dev_warn(dev
, "Device config failed: %d\n", rc
);
142 rc
= idxd_device_enable(idxd
);
144 module_put(THIS_MODULE
);
145 dev_warn(dev
, "Device enable failed: %d\n", rc
);
149 dev_info(dev
, "Device %s enabled\n", dev_name(dev
));
151 rc
= idxd_register_dma_device(idxd
);
153 module_put(THIS_MODULE
);
154 dev_dbg(dev
, "Failed to register dmaengine device\n");
158 } else if (is_idxd_wq_dev(dev
)) {
159 struct idxd_wq
*wq
= confdev_to_wq(dev
);
160 struct idxd_device
*idxd
= wq
->idxd
;
162 mutex_lock(&wq
->wq_lock
);
164 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
165 mutex_unlock(&wq
->wq_lock
);
166 dev_warn(dev
, "Enabling while device not enabled.\n");
170 if (wq
->state
!= IDXD_WQ_DISABLED
) {
171 mutex_unlock(&wq
->wq_lock
);
172 dev_warn(dev
, "WQ %d already enabled.\n", wq
->id
);
177 mutex_unlock(&wq
->wq_lock
);
178 dev_warn(dev
, "WQ not attached to group.\n");
182 if (strlen(wq
->name
) == 0) {
183 mutex_unlock(&wq
->wq_lock
);
184 dev_warn(dev
, "WQ name not set.\n");
188 /* Shared WQ checks */
190 if (!device_swq_supported(idxd
)) {
192 "PASID not enabled and shared WQ.\n");
193 mutex_unlock(&wq
->wq_lock
);
197 * Shared wq with the threshold set to 0 means the user
198 * did not set the threshold or transitioned from a
199 * dedicated wq but did not set threshold. A value
200 * of 0 would effectively disable the shared wq. The
201 * driver does not allow a value of 0 to be set for
202 * threshold via sysfs.
204 if (wq
->threshold
== 0) {
206 "Shared WQ and threshold 0.\n");
207 mutex_unlock(&wq
->wq_lock
);
212 rc
= idxd_wq_alloc_resources(wq
);
214 mutex_unlock(&wq
->wq_lock
);
215 dev_warn(dev
, "WQ resource alloc failed\n");
219 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
220 rc
= idxd_device_config(idxd
);
221 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
223 mutex_unlock(&wq
->wq_lock
);
224 dev_warn(dev
, "Writing WQ %d config failed: %d\n",
229 rc
= idxd_wq_enable(wq
);
231 mutex_unlock(&wq
->wq_lock
);
232 dev_warn(dev
, "WQ %d enabling failed: %d\n",
237 rc
= idxd_wq_map_portal(wq
);
239 dev_warn(dev
, "wq portal mapping failed: %d\n", rc
);
240 rc
= idxd_wq_disable(wq
);
242 dev_warn(dev
, "IDXD wq disable failed\n");
243 mutex_unlock(&wq
->wq_lock
);
247 wq
->client_count
= 0;
249 dev_info(dev
, "wq %s enabled\n", dev_name(&wq
->conf_dev
));
251 if (is_idxd_wq_dmaengine(wq
)) {
252 rc
= idxd_register_dma_channel(wq
);
254 dev_dbg(dev
, "DMA channel register failed\n");
255 mutex_unlock(&wq
->wq_lock
);
258 } else if (is_idxd_wq_cdev(wq
)) {
259 rc
= idxd_wq_add_cdev(wq
);
261 dev_dbg(dev
, "Cdev creation failed\n");
262 mutex_unlock(&wq
->wq_lock
);
267 mutex_unlock(&wq
->wq_lock
);
274 static void disable_wq(struct idxd_wq
*wq
)
276 struct idxd_device
*idxd
= wq
->idxd
;
277 struct device
*dev
= &idxd
->pdev
->dev
;
280 mutex_lock(&wq
->wq_lock
);
281 dev_dbg(dev
, "%s removing WQ %s\n", __func__
, dev_name(&wq
->conf_dev
));
282 if (wq
->state
== IDXD_WQ_DISABLED
) {
283 mutex_unlock(&wq
->wq_lock
);
287 if (is_idxd_wq_dmaengine(wq
))
288 idxd_unregister_dma_channel(wq
);
289 else if (is_idxd_wq_cdev(wq
))
290 idxd_wq_del_cdev(wq
);
292 if (idxd_wq_refcount(wq
))
293 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
294 wq
->id
, idxd_wq_refcount(wq
));
296 idxd_wq_unmap_portal(wq
);
299 rc
= idxd_wq_disable(wq
);
301 idxd_wq_free_resources(wq
);
302 wq
->client_count
= 0;
303 mutex_unlock(&wq
->wq_lock
);
306 dev_warn(dev
, "Failed to disable %s: %d\n",
307 dev_name(&wq
->conf_dev
), rc
);
309 dev_info(dev
, "wq %s disabled\n", dev_name(&wq
->conf_dev
));
312 static int idxd_config_bus_remove(struct device
*dev
)
316 dev_dbg(dev
, "%s called for %s\n", __func__
, dev_name(dev
));
318 /* disable workqueue here */
319 if (is_idxd_wq_dev(dev
)) {
320 struct idxd_wq
*wq
= confdev_to_wq(dev
);
323 } else if (is_idxd_dev(dev
)) {
324 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
327 dev_dbg(dev
, "%s removing dev %s\n", __func__
,
328 dev_name(&idxd
->conf_dev
));
329 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
330 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
332 if (wq
->state
== IDXD_WQ_DISABLED
)
334 dev_warn(dev
, "Active wq %d on disable %s.\n", i
,
335 dev_name(&idxd
->conf_dev
));
336 device_release_driver(&wq
->conf_dev
);
339 idxd_unregister_dma_device(idxd
);
340 rc
= idxd_device_disable(idxd
);
341 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
342 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
344 mutex_lock(&wq
->wq_lock
);
345 idxd_wq_disable_cleanup(wq
);
346 mutex_unlock(&wq
->wq_lock
);
348 module_put(THIS_MODULE
);
350 dev_warn(dev
, "Device disable failed\n");
352 dev_info(dev
, "Device %s disabled\n", dev_name(dev
));
359 static void idxd_config_bus_shutdown(struct device
*dev
)
361 dev_dbg(dev
, "%s called\n", __func__
);
364 struct bus_type dsa_bus_type
= {
366 .match
= idxd_config_bus_match
,
367 .probe
= idxd_config_bus_probe
,
368 .remove
= idxd_config_bus_remove
,
369 .shutdown
= idxd_config_bus_shutdown
,
372 struct bus_type iax_bus_type
= {
374 .match
= idxd_config_bus_match
,
375 .probe
= idxd_config_bus_probe
,
376 .remove
= idxd_config_bus_remove
,
377 .shutdown
= idxd_config_bus_shutdown
,
380 static struct bus_type
*idxd_bus_types
[] = {
385 static struct idxd_device_driver dsa_drv
= {
388 .bus
= &dsa_bus_type
,
389 .owner
= THIS_MODULE
,
390 .mod_name
= KBUILD_MODNAME
,
394 static struct idxd_device_driver iax_drv
= {
397 .bus
= &iax_bus_type
,
398 .owner
= THIS_MODULE
,
399 .mod_name
= KBUILD_MODNAME
,
403 static struct idxd_device_driver
*idxd_drvs
[] = {
408 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
)
410 return idxd_bus_types
[idxd
->type
];
413 static struct device_type
*idxd_get_device_type(struct idxd_device
*idxd
)
415 if (idxd
->type
== IDXD_TYPE_DSA
)
416 return &dsa_device_type
;
417 else if (idxd
->type
== IDXD_TYPE_IAX
)
418 return &iax_device_type
;
423 /* IDXD generic driver setup */
424 int idxd_register_driver(void)
428 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
429 rc
= driver_register(&idxd_drvs
[i
]->drv
);
438 driver_unregister(&idxd_drvs
[i
]->drv
);
442 void idxd_unregister_driver(void)
446 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
447 driver_unregister(&idxd_drvs
[i
]->drv
);
450 /* IDXD engine attributes */
451 static ssize_t
engine_group_id_show(struct device
*dev
,
452 struct device_attribute
*attr
, char *buf
)
454 struct idxd_engine
*engine
=
455 container_of(dev
, struct idxd_engine
, conf_dev
);
458 return sprintf(buf
, "%d\n", engine
->group
->id
);
460 return sprintf(buf
, "%d\n", -1);
463 static ssize_t
engine_group_id_store(struct device
*dev
,
464 struct device_attribute
*attr
,
465 const char *buf
, size_t count
)
467 struct idxd_engine
*engine
=
468 container_of(dev
, struct idxd_engine
, conf_dev
);
469 struct idxd_device
*idxd
= engine
->idxd
;
472 struct idxd_group
*prevg
;
474 rc
= kstrtol(buf
, 10, &id
);
478 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
481 if (id
> idxd
->max_groups
- 1 || id
< -1)
486 engine
->group
->num_engines
--;
487 engine
->group
= NULL
;
492 prevg
= engine
->group
;
495 prevg
->num_engines
--;
496 engine
->group
= &idxd
->groups
[id
];
497 engine
->group
->num_engines
++;
502 static struct device_attribute dev_attr_engine_group
=
503 __ATTR(group_id
, 0644, engine_group_id_show
,
504 engine_group_id_store
);
506 static struct attribute
*idxd_engine_attributes
[] = {
507 &dev_attr_engine_group
.attr
,
511 static const struct attribute_group idxd_engine_attribute_group
= {
512 .attrs
= idxd_engine_attributes
,
515 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
516 &idxd_engine_attribute_group
,
520 /* Group attributes */
522 static void idxd_set_free_tokens(struct idxd_device
*idxd
)
526 for (i
= 0, tokens
= 0; i
< idxd
->max_groups
; i
++) {
527 struct idxd_group
*g
= &idxd
->groups
[i
];
529 tokens
+= g
->tokens_reserved
;
532 idxd
->nr_tokens
= idxd
->max_tokens
- tokens
;
535 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
536 struct device_attribute
*attr
,
539 struct idxd_group
*group
=
540 container_of(dev
, struct idxd_group
, conf_dev
);
542 return sprintf(buf
, "%u\n", group
->tokens_reserved
);
545 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
546 struct device_attribute
*attr
,
547 const char *buf
, size_t count
)
549 struct idxd_group
*group
=
550 container_of(dev
, struct idxd_group
, conf_dev
);
551 struct idxd_device
*idxd
= group
->idxd
;
555 rc
= kstrtoul(buf
, 10, &val
);
559 if (idxd
->type
== IDXD_TYPE_IAX
)
562 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
565 if (idxd
->state
== IDXD_DEV_ENABLED
)
568 if (val
> idxd
->max_tokens
)
571 if (val
> idxd
->nr_tokens
+ group
->tokens_reserved
)
574 group
->tokens_reserved
= val
;
575 idxd_set_free_tokens(idxd
);
579 static struct device_attribute dev_attr_group_tokens_reserved
=
580 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
581 group_tokens_reserved_store
);
583 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
584 struct device_attribute
*attr
,
587 struct idxd_group
*group
=
588 container_of(dev
, struct idxd_group
, conf_dev
);
590 return sprintf(buf
, "%u\n", group
->tokens_allowed
);
593 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
594 struct device_attribute
*attr
,
595 const char *buf
, size_t count
)
597 struct idxd_group
*group
=
598 container_of(dev
, struct idxd_group
, conf_dev
);
599 struct idxd_device
*idxd
= group
->idxd
;
603 rc
= kstrtoul(buf
, 10, &val
);
607 if (idxd
->type
== IDXD_TYPE_IAX
)
610 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
613 if (idxd
->state
== IDXD_DEV_ENABLED
)
616 if (val
< 4 * group
->num_engines
||
617 val
> group
->tokens_reserved
+ idxd
->nr_tokens
)
620 group
->tokens_allowed
= val
;
624 static struct device_attribute dev_attr_group_tokens_allowed
=
625 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
626 group_tokens_allowed_store
);
628 static ssize_t
group_use_token_limit_show(struct device
*dev
,
629 struct device_attribute
*attr
,
632 struct idxd_group
*group
=
633 container_of(dev
, struct idxd_group
, conf_dev
);
635 return sprintf(buf
, "%u\n", group
->use_token_limit
);
638 static ssize_t
group_use_token_limit_store(struct device
*dev
,
639 struct device_attribute
*attr
,
640 const char *buf
, size_t count
)
642 struct idxd_group
*group
=
643 container_of(dev
, struct idxd_group
, conf_dev
);
644 struct idxd_device
*idxd
= group
->idxd
;
648 rc
= kstrtoul(buf
, 10, &val
);
652 if (idxd
->type
== IDXD_TYPE_IAX
)
655 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
658 if (idxd
->state
== IDXD_DEV_ENABLED
)
661 if (idxd
->token_limit
== 0)
664 group
->use_token_limit
= !!val
;
668 static struct device_attribute dev_attr_group_use_token_limit
=
669 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
670 group_use_token_limit_store
);
672 static ssize_t
group_engines_show(struct device
*dev
,
673 struct device_attribute
*attr
, char *buf
)
675 struct idxd_group
*group
=
676 container_of(dev
, struct idxd_group
, conf_dev
);
679 struct idxd_device
*idxd
= group
->idxd
;
681 for (i
= 0; i
< idxd
->max_engines
; i
++) {
682 struct idxd_engine
*engine
= &idxd
->engines
[i
];
687 if (engine
->group
->id
== group
->id
)
688 rc
+= sprintf(tmp
+ rc
, "engine%d.%d ",
689 idxd
->id
, engine
->id
);
693 rc
+= sprintf(tmp
+ rc
, "\n");
698 static struct device_attribute dev_attr_group_engines
=
699 __ATTR(engines
, 0444, group_engines_show
, NULL
);
701 static ssize_t
group_work_queues_show(struct device
*dev
,
702 struct device_attribute
*attr
, char *buf
)
704 struct idxd_group
*group
=
705 container_of(dev
, struct idxd_group
, conf_dev
);
708 struct idxd_device
*idxd
= group
->idxd
;
710 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
711 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
716 if (wq
->group
->id
== group
->id
)
717 rc
+= sprintf(tmp
+ rc
, "wq%d.%d ",
722 rc
+= sprintf(tmp
+ rc
, "\n");
727 static struct device_attribute dev_attr_group_work_queues
=
728 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
730 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
731 struct device_attribute
*attr
,
734 struct idxd_group
*group
=
735 container_of(dev
, struct idxd_group
, conf_dev
);
737 return sprintf(buf
, "%d\n", group
->tc_a
);
740 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
741 struct device_attribute
*attr
,
742 const char *buf
, size_t count
)
744 struct idxd_group
*group
=
745 container_of(dev
, struct idxd_group
, conf_dev
);
746 struct idxd_device
*idxd
= group
->idxd
;
750 rc
= kstrtol(buf
, 10, &val
);
754 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
757 if (idxd
->state
== IDXD_DEV_ENABLED
)
760 if (val
< 0 || val
> 7)
767 static struct device_attribute dev_attr_group_traffic_class_a
=
768 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
769 group_traffic_class_a_store
);
771 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
772 struct device_attribute
*attr
,
775 struct idxd_group
*group
=
776 container_of(dev
, struct idxd_group
, conf_dev
);
778 return sprintf(buf
, "%d\n", group
->tc_b
);
781 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
782 struct device_attribute
*attr
,
783 const char *buf
, size_t count
)
785 struct idxd_group
*group
=
786 container_of(dev
, struct idxd_group
, conf_dev
);
787 struct idxd_device
*idxd
= group
->idxd
;
791 rc
= kstrtol(buf
, 10, &val
);
795 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
798 if (idxd
->state
== IDXD_DEV_ENABLED
)
801 if (val
< 0 || val
> 7)
808 static struct device_attribute dev_attr_group_traffic_class_b
=
809 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
810 group_traffic_class_b_store
);
812 static struct attribute
*idxd_group_attributes
[] = {
813 &dev_attr_group_work_queues
.attr
,
814 &dev_attr_group_engines
.attr
,
815 &dev_attr_group_use_token_limit
.attr
,
816 &dev_attr_group_tokens_allowed
.attr
,
817 &dev_attr_group_tokens_reserved
.attr
,
818 &dev_attr_group_traffic_class_a
.attr
,
819 &dev_attr_group_traffic_class_b
.attr
,
823 static const struct attribute_group idxd_group_attribute_group
= {
824 .attrs
= idxd_group_attributes
,
827 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
828 &idxd_group_attribute_group
,
832 /* IDXD work queue attribs */
833 static ssize_t
wq_clients_show(struct device
*dev
,
834 struct device_attribute
*attr
, char *buf
)
836 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
838 return sprintf(buf
, "%d\n", wq
->client_count
);
841 static struct device_attribute dev_attr_wq_clients
=
842 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
844 static ssize_t
wq_state_show(struct device
*dev
,
845 struct device_attribute
*attr
, char *buf
)
847 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
850 case IDXD_WQ_DISABLED
:
851 return sprintf(buf
, "disabled\n");
852 case IDXD_WQ_ENABLED
:
853 return sprintf(buf
, "enabled\n");
856 return sprintf(buf
, "unknown\n");
859 static struct device_attribute dev_attr_wq_state
=
860 __ATTR(state
, 0444, wq_state_show
, NULL
);
862 static ssize_t
wq_group_id_show(struct device
*dev
,
863 struct device_attribute
*attr
, char *buf
)
865 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
868 return sprintf(buf
, "%u\n", wq
->group
->id
);
870 return sprintf(buf
, "-1\n");
873 static ssize_t
wq_group_id_store(struct device
*dev
,
874 struct device_attribute
*attr
,
875 const char *buf
, size_t count
)
877 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
878 struct idxd_device
*idxd
= wq
->idxd
;
881 struct idxd_group
*prevg
, *group
;
883 rc
= kstrtol(buf
, 10, &id
);
887 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
890 if (wq
->state
!= IDXD_WQ_DISABLED
)
893 if (id
> idxd
->max_groups
- 1 || id
< -1)
898 wq
->group
->num_wqs
--;
904 group
= &idxd
->groups
[id
];
914 static struct device_attribute dev_attr_wq_group_id
=
915 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
917 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
920 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
922 return sprintf(buf
, "%s\n",
923 wq_dedicated(wq
) ? "dedicated" : "shared");
926 static ssize_t
wq_mode_store(struct device
*dev
,
927 struct device_attribute
*attr
, const char *buf
,
930 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
931 struct idxd_device
*idxd
= wq
->idxd
;
933 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
936 if (wq
->state
!= IDXD_WQ_DISABLED
)
939 if (sysfs_streq(buf
, "dedicated")) {
940 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
942 } else if (sysfs_streq(buf
, "shared") && device_swq_supported(idxd
)) {
943 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
951 static struct device_attribute dev_attr_wq_mode
=
952 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
954 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
957 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
959 return sprintf(buf
, "%u\n", wq
->size
);
962 static int total_claimed_wq_size(struct idxd_device
*idxd
)
967 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
968 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
976 static ssize_t
wq_size_store(struct device
*dev
,
977 struct device_attribute
*attr
, const char *buf
,
980 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
982 struct idxd_device
*idxd
= wq
->idxd
;
985 rc
= kstrtoul(buf
, 10, &size
);
989 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
992 if (wq
->state
!= IDXD_WQ_DISABLED
)
995 if (size
+ total_claimed_wq_size(idxd
) - wq
->size
> idxd
->max_wq_size
)
1002 static struct device_attribute dev_attr_wq_size
=
1003 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
1005 static ssize_t
wq_priority_show(struct device
*dev
,
1006 struct device_attribute
*attr
, char *buf
)
1008 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1010 return sprintf(buf
, "%u\n", wq
->priority
);
1013 static ssize_t
wq_priority_store(struct device
*dev
,
1014 struct device_attribute
*attr
,
1015 const char *buf
, size_t count
)
1017 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1019 struct idxd_device
*idxd
= wq
->idxd
;
1022 rc
= kstrtoul(buf
, 10, &prio
);
1026 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1029 if (wq
->state
!= IDXD_WQ_DISABLED
)
1032 if (prio
> IDXD_MAX_PRIORITY
)
1035 wq
->priority
= prio
;
1039 static struct device_attribute dev_attr_wq_priority
=
1040 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
1042 static ssize_t
wq_block_on_fault_show(struct device
*dev
,
1043 struct device_attribute
*attr
, char *buf
)
1045 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1047 return sprintf(buf
, "%u\n",
1048 test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
));
1051 static ssize_t
wq_block_on_fault_store(struct device
*dev
,
1052 struct device_attribute
*attr
,
1053 const char *buf
, size_t count
)
1055 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1056 struct idxd_device
*idxd
= wq
->idxd
;
1060 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1063 if (wq
->state
!= IDXD_WQ_DISABLED
)
1066 rc
= kstrtobool(buf
, &bof
);
1071 set_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
1073 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
1078 static struct device_attribute dev_attr_wq_block_on_fault
=
1079 __ATTR(block_on_fault
, 0644, wq_block_on_fault_show
,
1080 wq_block_on_fault_store
);
1082 static ssize_t
wq_threshold_show(struct device
*dev
,
1083 struct device_attribute
*attr
, char *buf
)
1085 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1087 return sprintf(buf
, "%u\n", wq
->threshold
);
1090 static ssize_t
wq_threshold_store(struct device
*dev
,
1091 struct device_attribute
*attr
,
1092 const char *buf
, size_t count
)
1094 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1095 struct idxd_device
*idxd
= wq
->idxd
;
1099 rc
= kstrtouint(buf
, 0, &val
);
1103 if (val
> wq
->size
|| val
<= 0)
1106 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1109 if (wq
->state
!= IDXD_WQ_DISABLED
)
1112 if (test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
))
1115 wq
->threshold
= val
;
1120 static struct device_attribute dev_attr_wq_threshold
=
1121 __ATTR(threshold
, 0644, wq_threshold_show
, wq_threshold_store
);
1123 static ssize_t
wq_type_show(struct device
*dev
,
1124 struct device_attribute
*attr
, char *buf
)
1126 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1129 case IDXD_WQT_KERNEL
:
1130 return sprintf(buf
, "%s\n",
1131 idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
1133 return sprintf(buf
, "%s\n",
1134 idxd_wq_type_names
[IDXD_WQT_USER
]);
1137 return sprintf(buf
, "%s\n",
1138 idxd_wq_type_names
[IDXD_WQT_NONE
]);
1144 static ssize_t
wq_type_store(struct device
*dev
,
1145 struct device_attribute
*attr
, const char *buf
,
1148 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1149 enum idxd_wq_type old_type
;
1151 if (wq
->state
!= IDXD_WQ_DISABLED
)
1154 old_type
= wq
->type
;
1155 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_NONE
]))
1156 wq
->type
= IDXD_WQT_NONE
;
1157 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
1158 wq
->type
= IDXD_WQT_KERNEL
;
1159 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
1160 wq
->type
= IDXD_WQT_USER
;
1164 /* If we are changing queue type, clear the name */
1165 if (wq
->type
!= old_type
)
1166 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1171 static struct device_attribute dev_attr_wq_type
=
1172 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
1174 static ssize_t
wq_name_show(struct device
*dev
,
1175 struct device_attribute
*attr
, char *buf
)
1177 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1179 return sprintf(buf
, "%s\n", wq
->name
);
1182 static ssize_t
wq_name_store(struct device
*dev
,
1183 struct device_attribute
*attr
, const char *buf
,
1186 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1188 if (wq
->state
!= IDXD_WQ_DISABLED
)
1191 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
1195 * This is temporarily placed here until we have SVM support for
1198 if (wq
->type
== IDXD_WQT_KERNEL
&& device_pasid_enabled(wq
->idxd
))
1201 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1202 strncpy(wq
->name
, buf
, WQ_NAME_SIZE
);
1203 strreplace(wq
->name
, '\n', '\0');
1207 static struct device_attribute dev_attr_wq_name
=
1208 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
1210 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
1211 struct device_attribute
*attr
, char *buf
)
1213 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1215 return sprintf(buf
, "%d\n", wq
->idxd_cdev
.minor
);
1218 static struct device_attribute dev_attr_wq_cdev_minor
=
1219 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
1221 static int __get_sysfs_u64(const char *buf
, u64
*val
)
1225 rc
= kstrtou64(buf
, 0, val
);
1232 *val
= roundup_pow_of_two(*val
);
1236 static ssize_t
wq_max_transfer_size_show(struct device
*dev
, struct device_attribute
*attr
,
1239 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1241 return sprintf(buf
, "%llu\n", wq
->max_xfer_bytes
);
1244 static ssize_t
wq_max_transfer_size_store(struct device
*dev
, struct device_attribute
*attr
,
1245 const char *buf
, size_t count
)
1247 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1248 struct idxd_device
*idxd
= wq
->idxd
;
1252 if (wq
->state
!= IDXD_WQ_DISABLED
)
1255 rc
= __get_sysfs_u64(buf
, &xfer_size
);
1259 if (xfer_size
> idxd
->max_xfer_bytes
)
1262 wq
->max_xfer_bytes
= xfer_size
;
1267 static struct device_attribute dev_attr_wq_max_transfer_size
=
1268 __ATTR(max_transfer_size
, 0644,
1269 wq_max_transfer_size_show
, wq_max_transfer_size_store
);
1271 static ssize_t
wq_max_batch_size_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1273 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1275 return sprintf(buf
, "%u\n", wq
->max_batch_size
);
1278 static ssize_t
wq_max_batch_size_store(struct device
*dev
, struct device_attribute
*attr
,
1279 const char *buf
, size_t count
)
1281 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1282 struct idxd_device
*idxd
= wq
->idxd
;
1286 if (wq
->state
!= IDXD_WQ_DISABLED
)
1289 rc
= __get_sysfs_u64(buf
, &batch_size
);
1293 if (batch_size
> idxd
->max_batch_size
)
1296 wq
->max_batch_size
= (u32
)batch_size
;
1301 static struct device_attribute dev_attr_wq_max_batch_size
=
1302 __ATTR(max_batch_size
, 0644, wq_max_batch_size_show
, wq_max_batch_size_store
);
1304 static ssize_t
wq_ats_disable_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1306 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1308 return sprintf(buf
, "%u\n", wq
->ats_dis
);
1311 static ssize_t
wq_ats_disable_store(struct device
*dev
, struct device_attribute
*attr
,
1312 const char *buf
, size_t count
)
1314 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1315 struct idxd_device
*idxd
= wq
->idxd
;
1319 if (wq
->state
!= IDXD_WQ_DISABLED
)
1322 if (!idxd
->hw
.wq_cap
.wq_ats_support
)
1325 rc
= kstrtobool(buf
, &ats_dis
);
1329 wq
->ats_dis
= ats_dis
;
1334 static struct device_attribute dev_attr_wq_ats_disable
=
1335 __ATTR(ats_disable
, 0644, wq_ats_disable_show
, wq_ats_disable_store
);
1337 static struct attribute
*idxd_wq_attributes
[] = {
1338 &dev_attr_wq_clients
.attr
,
1339 &dev_attr_wq_state
.attr
,
1340 &dev_attr_wq_group_id
.attr
,
1341 &dev_attr_wq_mode
.attr
,
1342 &dev_attr_wq_size
.attr
,
1343 &dev_attr_wq_priority
.attr
,
1344 &dev_attr_wq_block_on_fault
.attr
,
1345 &dev_attr_wq_threshold
.attr
,
1346 &dev_attr_wq_type
.attr
,
1347 &dev_attr_wq_name
.attr
,
1348 &dev_attr_wq_cdev_minor
.attr
,
1349 &dev_attr_wq_max_transfer_size
.attr
,
1350 &dev_attr_wq_max_batch_size
.attr
,
1351 &dev_attr_wq_ats_disable
.attr
,
1355 static const struct attribute_group idxd_wq_attribute_group
= {
1356 .attrs
= idxd_wq_attributes
,
1359 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1360 &idxd_wq_attribute_group
,
1364 /* IDXD device attribs */
1365 static ssize_t
version_show(struct device
*dev
, struct device_attribute
*attr
,
1368 struct idxd_device
*idxd
=
1369 container_of(dev
, struct idxd_device
, conf_dev
);
1371 return sprintf(buf
, "%#x\n", idxd
->hw
.version
);
1373 static DEVICE_ATTR_RO(version
);
1375 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1376 struct device_attribute
*attr
,
1379 struct idxd_device
*idxd
=
1380 container_of(dev
, struct idxd_device
, conf_dev
);
1382 return sprintf(buf
, "%u\n", idxd
->max_wq_size
);
1384 static DEVICE_ATTR_RO(max_work_queues_size
);
1386 static ssize_t
max_groups_show(struct device
*dev
,
1387 struct device_attribute
*attr
, char *buf
)
1389 struct idxd_device
*idxd
=
1390 container_of(dev
, struct idxd_device
, conf_dev
);
1392 return sprintf(buf
, "%u\n", idxd
->max_groups
);
1394 static DEVICE_ATTR_RO(max_groups
);
1396 static ssize_t
max_work_queues_show(struct device
*dev
,
1397 struct device_attribute
*attr
, char *buf
)
1399 struct idxd_device
*idxd
=
1400 container_of(dev
, struct idxd_device
, conf_dev
);
1402 return sprintf(buf
, "%u\n", idxd
->max_wqs
);
1404 static DEVICE_ATTR_RO(max_work_queues
);
1406 static ssize_t
max_engines_show(struct device
*dev
,
1407 struct device_attribute
*attr
, char *buf
)
1409 struct idxd_device
*idxd
=
1410 container_of(dev
, struct idxd_device
, conf_dev
);
1412 return sprintf(buf
, "%u\n", idxd
->max_engines
);
1414 static DEVICE_ATTR_RO(max_engines
);
1416 static ssize_t
numa_node_show(struct device
*dev
,
1417 struct device_attribute
*attr
, char *buf
)
1419 struct idxd_device
*idxd
=
1420 container_of(dev
, struct idxd_device
, conf_dev
);
1422 return sprintf(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1424 static DEVICE_ATTR_RO(numa_node
);
1426 static ssize_t
max_batch_size_show(struct device
*dev
,
1427 struct device_attribute
*attr
, char *buf
)
1429 struct idxd_device
*idxd
=
1430 container_of(dev
, struct idxd_device
, conf_dev
);
1432 return sprintf(buf
, "%u\n", idxd
->max_batch_size
);
1434 static DEVICE_ATTR_RO(max_batch_size
);
1436 static ssize_t
max_transfer_size_show(struct device
*dev
,
1437 struct device_attribute
*attr
,
1440 struct idxd_device
*idxd
=
1441 container_of(dev
, struct idxd_device
, conf_dev
);
1443 return sprintf(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1445 static DEVICE_ATTR_RO(max_transfer_size
);
1447 static ssize_t
op_cap_show(struct device
*dev
,
1448 struct device_attribute
*attr
, char *buf
)
1450 struct idxd_device
*idxd
=
1451 container_of(dev
, struct idxd_device
, conf_dev
);
1453 return sprintf(buf
, "%#llx\n", idxd
->hw
.opcap
.bits
[0]);
1455 static DEVICE_ATTR_RO(op_cap
);
1457 static ssize_t
gen_cap_show(struct device
*dev
,
1458 struct device_attribute
*attr
, char *buf
)
1460 struct idxd_device
*idxd
=
1461 container_of(dev
, struct idxd_device
, conf_dev
);
1463 return sprintf(buf
, "%#llx\n", idxd
->hw
.gen_cap
.bits
);
1465 static DEVICE_ATTR_RO(gen_cap
);
1467 static ssize_t
configurable_show(struct device
*dev
,
1468 struct device_attribute
*attr
, char *buf
)
1470 struct idxd_device
*idxd
=
1471 container_of(dev
, struct idxd_device
, conf_dev
);
1473 return sprintf(buf
, "%u\n",
1474 test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1476 static DEVICE_ATTR_RO(configurable
);
1478 static ssize_t
clients_show(struct device
*dev
,
1479 struct device_attribute
*attr
, char *buf
)
1481 struct idxd_device
*idxd
=
1482 container_of(dev
, struct idxd_device
, conf_dev
);
1483 unsigned long flags
;
1486 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1487 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1488 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1490 count
+= wq
->client_count
;
1492 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1494 return sprintf(buf
, "%d\n", count
);
1496 static DEVICE_ATTR_RO(clients
);
1498 static ssize_t
pasid_enabled_show(struct device
*dev
,
1499 struct device_attribute
*attr
, char *buf
)
1501 struct idxd_device
*idxd
=
1502 container_of(dev
, struct idxd_device
, conf_dev
);
1504 return sprintf(buf
, "%u\n", device_pasid_enabled(idxd
));
1506 static DEVICE_ATTR_RO(pasid_enabled
);
1508 static ssize_t
state_show(struct device
*dev
,
1509 struct device_attribute
*attr
, char *buf
)
1511 struct idxd_device
*idxd
=
1512 container_of(dev
, struct idxd_device
, conf_dev
);
1514 switch (idxd
->state
) {
1515 case IDXD_DEV_DISABLED
:
1516 case IDXD_DEV_CONF_READY
:
1517 return sprintf(buf
, "disabled\n");
1518 case IDXD_DEV_ENABLED
:
1519 return sprintf(buf
, "enabled\n");
1520 case IDXD_DEV_HALTED
:
1521 return sprintf(buf
, "halted\n");
1524 return sprintf(buf
, "unknown\n");
1526 static DEVICE_ATTR_RO(state
);
1528 static ssize_t
errors_show(struct device
*dev
,
1529 struct device_attribute
*attr
, char *buf
)
1531 struct idxd_device
*idxd
=
1532 container_of(dev
, struct idxd_device
, conf_dev
);
1534 unsigned long flags
;
1536 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1537 for (i
= 0; i
< 4; i
++)
1538 out
+= sprintf(buf
+ out
, "%#018llx ", idxd
->sw_err
.bits
[i
]);
1539 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1541 out
+= sprintf(buf
+ out
, "\n");
1544 static DEVICE_ATTR_RO(errors
);
1546 static ssize_t
max_tokens_show(struct device
*dev
,
1547 struct device_attribute
*attr
, char *buf
)
1549 struct idxd_device
*idxd
=
1550 container_of(dev
, struct idxd_device
, conf_dev
);
1552 return sprintf(buf
, "%u\n", idxd
->max_tokens
);
1554 static DEVICE_ATTR_RO(max_tokens
);
1556 static ssize_t
token_limit_show(struct device
*dev
,
1557 struct device_attribute
*attr
, char *buf
)
1559 struct idxd_device
*idxd
=
1560 container_of(dev
, struct idxd_device
, conf_dev
);
1562 return sprintf(buf
, "%u\n", idxd
->token_limit
);
1565 static ssize_t
token_limit_store(struct device
*dev
,
1566 struct device_attribute
*attr
,
1567 const char *buf
, size_t count
)
1569 struct idxd_device
*idxd
=
1570 container_of(dev
, struct idxd_device
, conf_dev
);
1574 rc
= kstrtoul(buf
, 10, &val
);
1578 if (idxd
->state
== IDXD_DEV_ENABLED
)
1581 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1584 if (!idxd
->hw
.group_cap
.token_limit
)
1587 if (val
> idxd
->hw
.group_cap
.total_tokens
)
1590 idxd
->token_limit
= val
;
1593 static DEVICE_ATTR_RW(token_limit
);
1595 static ssize_t
cdev_major_show(struct device
*dev
,
1596 struct device_attribute
*attr
, char *buf
)
1598 struct idxd_device
*idxd
=
1599 container_of(dev
, struct idxd_device
, conf_dev
);
1601 return sprintf(buf
, "%u\n", idxd
->major
);
1603 static DEVICE_ATTR_RO(cdev_major
);
1605 static ssize_t
cmd_status_show(struct device
*dev
,
1606 struct device_attribute
*attr
, char *buf
)
1608 struct idxd_device
*idxd
= container_of(dev
, struct idxd_device
, conf_dev
);
1610 return sprintf(buf
, "%#x\n", idxd
->cmd_status
);
1612 static DEVICE_ATTR_RO(cmd_status
);
1614 static struct attribute
*idxd_device_attributes
[] = {
1615 &dev_attr_version
.attr
,
1616 &dev_attr_max_groups
.attr
,
1617 &dev_attr_max_work_queues
.attr
,
1618 &dev_attr_max_work_queues_size
.attr
,
1619 &dev_attr_max_engines
.attr
,
1620 &dev_attr_numa_node
.attr
,
1621 &dev_attr_max_batch_size
.attr
,
1622 &dev_attr_max_transfer_size
.attr
,
1623 &dev_attr_op_cap
.attr
,
1624 &dev_attr_gen_cap
.attr
,
1625 &dev_attr_configurable
.attr
,
1626 &dev_attr_clients
.attr
,
1627 &dev_attr_pasid_enabled
.attr
,
1628 &dev_attr_state
.attr
,
1629 &dev_attr_errors
.attr
,
1630 &dev_attr_max_tokens
.attr
,
1631 &dev_attr_token_limit
.attr
,
1632 &dev_attr_cdev_major
.attr
,
1633 &dev_attr_cmd_status
.attr
,
1637 static const struct attribute_group idxd_device_attribute_group
= {
1638 .attrs
= idxd_device_attributes
,
1641 static const struct attribute_group
*idxd_attribute_groups
[] = {
1642 &idxd_device_attribute_group
,
1646 static int idxd_setup_engine_sysfs(struct idxd_device
*idxd
)
1648 struct device
*dev
= &idxd
->pdev
->dev
;
1651 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1652 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1654 engine
->conf_dev
.parent
= &idxd
->conf_dev
;
1655 dev_set_name(&engine
->conf_dev
, "engine%d.%d",
1656 idxd
->id
, engine
->id
);
1657 engine
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1658 engine
->conf_dev
.groups
= idxd_engine_attribute_groups
;
1659 engine
->conf_dev
.type
= &idxd_engine_device_type
;
1660 dev_dbg(dev
, "Engine device register: %s\n",
1661 dev_name(&engine
->conf_dev
));
1662 rc
= device_register(&engine
->conf_dev
);
1664 put_device(&engine
->conf_dev
);
1673 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1675 device_unregister(&engine
->conf_dev
);
1680 static int idxd_setup_group_sysfs(struct idxd_device
*idxd
)
1682 struct device
*dev
= &idxd
->pdev
->dev
;
1685 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1686 struct idxd_group
*group
= &idxd
->groups
[i
];
1688 group
->conf_dev
.parent
= &idxd
->conf_dev
;
1689 dev_set_name(&group
->conf_dev
, "group%d.%d",
1690 idxd
->id
, group
->id
);
1691 group
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1692 group
->conf_dev
.groups
= idxd_group_attribute_groups
;
1693 group
->conf_dev
.type
= &idxd_group_device_type
;
1694 dev_dbg(dev
, "Group device register: %s\n",
1695 dev_name(&group
->conf_dev
));
1696 rc
= device_register(&group
->conf_dev
);
1698 put_device(&group
->conf_dev
);
1707 struct idxd_group
*group
= &idxd
->groups
[i
];
1709 device_unregister(&group
->conf_dev
);
1714 static int idxd_setup_wq_sysfs(struct idxd_device
*idxd
)
1716 struct device
*dev
= &idxd
->pdev
->dev
;
1719 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1720 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1722 wq
->conf_dev
.parent
= &idxd
->conf_dev
;
1723 dev_set_name(&wq
->conf_dev
, "wq%d.%d", idxd
->id
, wq
->id
);
1724 wq
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1725 wq
->conf_dev
.groups
= idxd_wq_attribute_groups
;
1726 wq
->conf_dev
.type
= &idxd_wq_device_type
;
1727 dev_dbg(dev
, "WQ device register: %s\n",
1728 dev_name(&wq
->conf_dev
));
1729 rc
= device_register(&wq
->conf_dev
);
1731 put_device(&wq
->conf_dev
);
1740 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1742 device_unregister(&wq
->conf_dev
);
1747 static int idxd_setup_device_sysfs(struct idxd_device
*idxd
)
1749 struct device
*dev
= &idxd
->pdev
->dev
;
1751 char devname
[IDXD_NAME_SIZE
];
1753 sprintf(devname
, "%s%d", idxd_get_dev_name(idxd
), idxd
->id
);
1754 idxd
->conf_dev
.parent
= dev
;
1755 dev_set_name(&idxd
->conf_dev
, "%s", devname
);
1756 idxd
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1757 idxd
->conf_dev
.groups
= idxd_attribute_groups
;
1758 idxd
->conf_dev
.type
= idxd_get_device_type(idxd
);
1760 dev_dbg(dev
, "IDXD device register: %s\n", dev_name(&idxd
->conf_dev
));
1761 rc
= device_register(&idxd
->conf_dev
);
1763 put_device(&idxd
->conf_dev
);
1770 int idxd_setup_sysfs(struct idxd_device
*idxd
)
1772 struct device
*dev
= &idxd
->pdev
->dev
;
1775 rc
= idxd_setup_device_sysfs(idxd
);
1777 dev_dbg(dev
, "Device sysfs registering failed: %d\n", rc
);
1781 rc
= idxd_setup_wq_sysfs(idxd
);
1783 /* unregister conf dev */
1784 dev_dbg(dev
, "Work Queue sysfs registering failed: %d\n", rc
);
1788 rc
= idxd_setup_group_sysfs(idxd
);
1790 /* unregister conf dev */
1791 dev_dbg(dev
, "Group sysfs registering failed: %d\n", rc
);
1795 rc
= idxd_setup_engine_sysfs(idxd
);
1797 /* unregister conf dev */
1798 dev_dbg(dev
, "Engine sysfs registering failed: %d\n", rc
);
1805 void idxd_cleanup_sysfs(struct idxd_device
*idxd
)
1809 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1810 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1812 device_unregister(&wq
->conf_dev
);
1815 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1816 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1818 device_unregister(&engine
->conf_dev
);
1821 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1822 struct idxd_group
*group
= &idxd
->groups
[i
];
1824 device_unregister(&group
->conf_dev
);
1827 device_unregister(&idxd
->conf_dev
);
1830 int idxd_register_bus_type(void)
1834 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
1835 rc
= bus_register(idxd_bus_types
[i
]);
1844 bus_unregister(idxd_bus_types
[i
]);
1848 void idxd_unregister_bus_type(void)
1852 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
1853 bus_unregister(idxd_bus_types
[i
]);