1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 static void idxd_conf_device_release(struct device
*dev
)
21 dev_dbg(dev
, "%s for %s\n", __func__
, dev_name(dev
));
24 static struct device_type idxd_group_device_type
= {
26 .release
= idxd_conf_device_release
,
29 static struct device_type idxd_wq_device_type
= {
31 .release
= idxd_conf_device_release
,
34 static struct device_type idxd_engine_device_type
= {
36 .release
= idxd_conf_device_release
,
39 static struct device_type dsa_device_type
= {
41 .release
= idxd_conf_device_release
,
44 static inline bool is_dsa_dev(struct device
*dev
)
46 return dev
? dev
->type
== &dsa_device_type
: false;
49 static inline bool is_idxd_dev(struct device
*dev
)
51 return is_dsa_dev(dev
);
54 static inline bool is_idxd_wq_dev(struct device
*dev
)
56 return dev
? dev
->type
== &idxd_wq_device_type
: false;
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq
*wq
)
61 if (wq
->type
== IDXD_WQT_KERNEL
&&
62 strcmp(wq
->name
, "dmaengine") == 0)
67 static inline bool is_idxd_wq_cdev(struct idxd_wq
*wq
)
69 return wq
->type
== IDXD_WQT_USER
;
72 static int idxd_config_bus_match(struct device
*dev
,
73 struct device_driver
*drv
)
77 if (is_idxd_dev(dev
)) {
78 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
80 if (idxd
->state
!= IDXD_DEV_CONF_READY
)
83 } else if (is_idxd_wq_dev(dev
)) {
84 struct idxd_wq
*wq
= confdev_to_wq(dev
);
85 struct idxd_device
*idxd
= wq
->idxd
;
87 if (idxd
->state
< IDXD_DEV_CONF_READY
)
90 if (wq
->state
!= IDXD_WQ_DISABLED
) {
91 dev_dbg(dev
, "%s not disabled\n", dev_name(dev
));
98 dev_dbg(dev
, "%s matched\n", dev_name(dev
));
103 static int idxd_config_bus_probe(struct device
*dev
)
108 dev_dbg(dev
, "%s called\n", __func__
);
110 if (is_idxd_dev(dev
)) {
111 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
113 if (idxd
->state
!= IDXD_DEV_CONF_READY
) {
114 dev_warn(dev
, "Device not ready for config\n");
118 if (!try_module_get(THIS_MODULE
))
121 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
123 /* Perform IDXD configuration and enabling */
124 rc
= idxd_device_config(idxd
);
126 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
127 module_put(THIS_MODULE
);
128 dev_warn(dev
, "Device config failed: %d\n", rc
);
133 rc
= idxd_device_enable(idxd
);
135 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
136 module_put(THIS_MODULE
);
137 dev_warn(dev
, "Device enable failed: %d\n", rc
);
141 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
142 dev_info(dev
, "Device %s enabled\n", dev_name(dev
));
144 rc
= idxd_register_dma_device(idxd
);
146 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
147 module_put(THIS_MODULE
);
148 dev_dbg(dev
, "Failed to register dmaengine device\n");
152 } else if (is_idxd_wq_dev(dev
)) {
153 struct idxd_wq
*wq
= confdev_to_wq(dev
);
154 struct idxd_device
*idxd
= wq
->idxd
;
156 mutex_lock(&wq
->wq_lock
);
158 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
159 mutex_unlock(&wq
->wq_lock
);
160 dev_warn(dev
, "Enabling while device not enabled.\n");
164 if (wq
->state
!= IDXD_WQ_DISABLED
) {
165 mutex_unlock(&wq
->wq_lock
);
166 dev_warn(dev
, "WQ %d already enabled.\n", wq
->id
);
171 mutex_unlock(&wq
->wq_lock
);
172 dev_warn(dev
, "WQ not attached to group.\n");
176 if (strlen(wq
->name
) == 0) {
177 mutex_unlock(&wq
->wq_lock
);
178 dev_warn(dev
, "WQ name not set.\n");
182 rc
= idxd_wq_alloc_resources(wq
);
184 mutex_unlock(&wq
->wq_lock
);
185 dev_warn(dev
, "WQ resource alloc failed\n");
189 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
190 rc
= idxd_device_config(idxd
);
192 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
193 mutex_unlock(&wq
->wq_lock
);
194 dev_warn(dev
, "Writing WQ %d config failed: %d\n",
199 rc
= idxd_wq_enable(wq
);
201 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
202 mutex_unlock(&wq
->wq_lock
);
203 dev_warn(dev
, "WQ %d enabling failed: %d\n",
207 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
209 rc
= idxd_wq_map_portal(wq
);
211 dev_warn(dev
, "wq portal mapping failed: %d\n", rc
);
212 rc
= idxd_wq_disable(wq
);
214 dev_warn(dev
, "IDXD wq disable failed\n");
215 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
216 mutex_unlock(&wq
->wq_lock
);
220 wq
->client_count
= 0;
222 dev_info(dev
, "wq %s enabled\n", dev_name(&wq
->conf_dev
));
224 if (is_idxd_wq_dmaengine(wq
)) {
225 rc
= idxd_register_dma_channel(wq
);
227 dev_dbg(dev
, "DMA channel register failed\n");
228 mutex_unlock(&wq
->wq_lock
);
231 } else if (is_idxd_wq_cdev(wq
)) {
232 rc
= idxd_wq_add_cdev(wq
);
234 dev_dbg(dev
, "Cdev creation failed\n");
235 mutex_unlock(&wq
->wq_lock
);
240 mutex_unlock(&wq
->wq_lock
);
247 static void disable_wq(struct idxd_wq
*wq
)
249 struct idxd_device
*idxd
= wq
->idxd
;
250 struct device
*dev
= &idxd
->pdev
->dev
;
254 mutex_lock(&wq
->wq_lock
);
255 dev_dbg(dev
, "%s removing WQ %s\n", __func__
, dev_name(&wq
->conf_dev
));
256 if (wq
->state
== IDXD_WQ_DISABLED
) {
257 mutex_unlock(&wq
->wq_lock
);
261 if (is_idxd_wq_dmaengine(wq
))
262 idxd_unregister_dma_channel(wq
);
263 else if (is_idxd_wq_cdev(wq
))
264 idxd_wq_del_cdev(wq
);
266 if (idxd_wq_refcount(wq
))
267 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
268 wq
->id
, idxd_wq_refcount(wq
));
270 idxd_wq_unmap_portal(wq
);
272 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
273 rc
= idxd_wq_disable(wq
);
274 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
276 idxd_wq_free_resources(wq
);
277 wq
->client_count
= 0;
278 mutex_unlock(&wq
->wq_lock
);
281 dev_warn(dev
, "Failed to disable %s: %d\n",
282 dev_name(&wq
->conf_dev
), rc
);
284 dev_info(dev
, "wq %s disabled\n", dev_name(&wq
->conf_dev
));
287 static int idxd_config_bus_remove(struct device
*dev
)
292 dev_dbg(dev
, "%s called for %s\n", __func__
, dev_name(dev
));
294 /* disable workqueue here */
295 if (is_idxd_wq_dev(dev
)) {
296 struct idxd_wq
*wq
= confdev_to_wq(dev
);
299 } else if (is_idxd_dev(dev
)) {
300 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
303 dev_dbg(dev
, "%s removing dev %s\n", __func__
,
304 dev_name(&idxd
->conf_dev
));
305 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
306 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
308 if (wq
->state
== IDXD_WQ_DISABLED
)
310 dev_warn(dev
, "Active wq %d on disable %s.\n", i
,
311 dev_name(&idxd
->conf_dev
));
312 device_release_driver(&wq
->conf_dev
);
315 idxd_unregister_dma_device(idxd
);
316 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
317 rc
= idxd_device_disable(idxd
);
318 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
319 module_put(THIS_MODULE
);
321 dev_warn(dev
, "Device disable failed\n");
323 dev_info(dev
, "Device %s disabled\n", dev_name(dev
));
330 static void idxd_config_bus_shutdown(struct device
*dev
)
332 dev_dbg(dev
, "%s called\n", __func__
);
335 struct bus_type dsa_bus_type
= {
337 .match
= idxd_config_bus_match
,
338 .probe
= idxd_config_bus_probe
,
339 .remove
= idxd_config_bus_remove
,
340 .shutdown
= idxd_config_bus_shutdown
,
343 static struct bus_type
*idxd_bus_types
[] = {
347 static struct idxd_device_driver dsa_drv
= {
350 .bus
= &dsa_bus_type
,
351 .owner
= THIS_MODULE
,
352 .mod_name
= KBUILD_MODNAME
,
356 static struct idxd_device_driver
*idxd_drvs
[] = {
360 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
)
362 return idxd_bus_types
[idxd
->type
];
365 static struct device_type
*idxd_get_device_type(struct idxd_device
*idxd
)
367 if (idxd
->type
== IDXD_TYPE_DSA
)
368 return &dsa_device_type
;
373 /* IDXD generic driver setup */
374 int idxd_register_driver(void)
378 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
379 rc
= driver_register(&idxd_drvs
[i
]->drv
);
388 driver_unregister(&idxd_drvs
[i
]->drv
);
392 void idxd_unregister_driver(void)
396 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
397 driver_unregister(&idxd_drvs
[i
]->drv
);
400 /* IDXD engine attributes */
401 static ssize_t
engine_group_id_show(struct device
*dev
,
402 struct device_attribute
*attr
, char *buf
)
404 struct idxd_engine
*engine
=
405 container_of(dev
, struct idxd_engine
, conf_dev
);
408 return sprintf(buf
, "%d\n", engine
->group
->id
);
410 return sprintf(buf
, "%d\n", -1);
413 static ssize_t
engine_group_id_store(struct device
*dev
,
414 struct device_attribute
*attr
,
415 const char *buf
, size_t count
)
417 struct idxd_engine
*engine
=
418 container_of(dev
, struct idxd_engine
, conf_dev
);
419 struct idxd_device
*idxd
= engine
->idxd
;
422 struct idxd_group
*prevg
;
424 rc
= kstrtol(buf
, 10, &id
);
428 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
431 if (id
> idxd
->max_groups
- 1 || id
< -1)
436 engine
->group
->num_engines
--;
437 engine
->group
= NULL
;
442 prevg
= engine
->group
;
445 prevg
->num_engines
--;
446 engine
->group
= &idxd
->groups
[id
];
447 engine
->group
->num_engines
++;
452 static struct device_attribute dev_attr_engine_group
=
453 __ATTR(group_id
, 0644, engine_group_id_show
,
454 engine_group_id_store
);
456 static struct attribute
*idxd_engine_attributes
[] = {
457 &dev_attr_engine_group
.attr
,
461 static const struct attribute_group idxd_engine_attribute_group
= {
462 .attrs
= idxd_engine_attributes
,
465 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
466 &idxd_engine_attribute_group
,
470 /* Group attributes */
472 static void idxd_set_free_tokens(struct idxd_device
*idxd
)
476 for (i
= 0, tokens
= 0; i
< idxd
->max_groups
; i
++) {
477 struct idxd_group
*g
= &idxd
->groups
[i
];
479 tokens
+= g
->tokens_reserved
;
482 idxd
->nr_tokens
= idxd
->max_tokens
- tokens
;
485 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
486 struct device_attribute
*attr
,
489 struct idxd_group
*group
=
490 container_of(dev
, struct idxd_group
, conf_dev
);
492 return sprintf(buf
, "%u\n", group
->tokens_reserved
);
495 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
496 struct device_attribute
*attr
,
497 const char *buf
, size_t count
)
499 struct idxd_group
*group
=
500 container_of(dev
, struct idxd_group
, conf_dev
);
501 struct idxd_device
*idxd
= group
->idxd
;
505 rc
= kstrtoul(buf
, 10, &val
);
509 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
512 if (idxd
->state
== IDXD_DEV_ENABLED
)
515 if (val
> idxd
->max_tokens
)
518 if (val
> idxd
->nr_tokens
+ group
->tokens_reserved
)
521 group
->tokens_reserved
= val
;
522 idxd_set_free_tokens(idxd
);
526 static struct device_attribute dev_attr_group_tokens_reserved
=
527 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
528 group_tokens_reserved_store
);
530 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
531 struct device_attribute
*attr
,
534 struct idxd_group
*group
=
535 container_of(dev
, struct idxd_group
, conf_dev
);
537 return sprintf(buf
, "%u\n", group
->tokens_allowed
);
540 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
541 struct device_attribute
*attr
,
542 const char *buf
, size_t count
)
544 struct idxd_group
*group
=
545 container_of(dev
, struct idxd_group
, conf_dev
);
546 struct idxd_device
*idxd
= group
->idxd
;
550 rc
= kstrtoul(buf
, 10, &val
);
554 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
557 if (idxd
->state
== IDXD_DEV_ENABLED
)
560 if (val
< 4 * group
->num_engines
||
561 val
> group
->tokens_reserved
+ idxd
->nr_tokens
)
564 group
->tokens_allowed
= val
;
568 static struct device_attribute dev_attr_group_tokens_allowed
=
569 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
570 group_tokens_allowed_store
);
572 static ssize_t
group_use_token_limit_show(struct device
*dev
,
573 struct device_attribute
*attr
,
576 struct idxd_group
*group
=
577 container_of(dev
, struct idxd_group
, conf_dev
);
579 return sprintf(buf
, "%u\n", group
->use_token_limit
);
582 static ssize_t
group_use_token_limit_store(struct device
*dev
,
583 struct device_attribute
*attr
,
584 const char *buf
, size_t count
)
586 struct idxd_group
*group
=
587 container_of(dev
, struct idxd_group
, conf_dev
);
588 struct idxd_device
*idxd
= group
->idxd
;
592 rc
= kstrtoul(buf
, 10, &val
);
596 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
599 if (idxd
->state
== IDXD_DEV_ENABLED
)
602 if (idxd
->token_limit
== 0)
605 group
->use_token_limit
= !!val
;
609 static struct device_attribute dev_attr_group_use_token_limit
=
610 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
611 group_use_token_limit_store
);
613 static ssize_t
group_engines_show(struct device
*dev
,
614 struct device_attribute
*attr
, char *buf
)
616 struct idxd_group
*group
=
617 container_of(dev
, struct idxd_group
, conf_dev
);
620 struct idxd_device
*idxd
= group
->idxd
;
622 for (i
= 0; i
< idxd
->max_engines
; i
++) {
623 struct idxd_engine
*engine
= &idxd
->engines
[i
];
628 if (engine
->group
->id
== group
->id
)
629 rc
+= sprintf(tmp
+ rc
, "engine%d.%d ",
630 idxd
->id
, engine
->id
);
634 rc
+= sprintf(tmp
+ rc
, "\n");
639 static struct device_attribute dev_attr_group_engines
=
640 __ATTR(engines
, 0444, group_engines_show
, NULL
);
642 static ssize_t
group_work_queues_show(struct device
*dev
,
643 struct device_attribute
*attr
, char *buf
)
645 struct idxd_group
*group
=
646 container_of(dev
, struct idxd_group
, conf_dev
);
649 struct idxd_device
*idxd
= group
->idxd
;
651 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
652 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
657 if (wq
->group
->id
== group
->id
)
658 rc
+= sprintf(tmp
+ rc
, "wq%d.%d ",
663 rc
+= sprintf(tmp
+ rc
, "\n");
668 static struct device_attribute dev_attr_group_work_queues
=
669 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
671 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
672 struct device_attribute
*attr
,
675 struct idxd_group
*group
=
676 container_of(dev
, struct idxd_group
, conf_dev
);
678 return sprintf(buf
, "%d\n", group
->tc_a
);
681 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
682 struct device_attribute
*attr
,
683 const char *buf
, size_t count
)
685 struct idxd_group
*group
=
686 container_of(dev
, struct idxd_group
, conf_dev
);
687 struct idxd_device
*idxd
= group
->idxd
;
691 rc
= kstrtol(buf
, 10, &val
);
695 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
698 if (idxd
->state
== IDXD_DEV_ENABLED
)
701 if (val
< 0 || val
> 7)
708 static struct device_attribute dev_attr_group_traffic_class_a
=
709 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
710 group_traffic_class_a_store
);
712 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
713 struct device_attribute
*attr
,
716 struct idxd_group
*group
=
717 container_of(dev
, struct idxd_group
, conf_dev
);
719 return sprintf(buf
, "%d\n", group
->tc_b
);
722 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
723 struct device_attribute
*attr
,
724 const char *buf
, size_t count
)
726 struct idxd_group
*group
=
727 container_of(dev
, struct idxd_group
, conf_dev
);
728 struct idxd_device
*idxd
= group
->idxd
;
732 rc
= kstrtol(buf
, 10, &val
);
736 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
739 if (idxd
->state
== IDXD_DEV_ENABLED
)
742 if (val
< 0 || val
> 7)
749 static struct device_attribute dev_attr_group_traffic_class_b
=
750 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
751 group_traffic_class_b_store
);
753 static struct attribute
*idxd_group_attributes
[] = {
754 &dev_attr_group_work_queues
.attr
,
755 &dev_attr_group_engines
.attr
,
756 &dev_attr_group_use_token_limit
.attr
,
757 &dev_attr_group_tokens_allowed
.attr
,
758 &dev_attr_group_tokens_reserved
.attr
,
759 &dev_attr_group_traffic_class_a
.attr
,
760 &dev_attr_group_traffic_class_b
.attr
,
764 static const struct attribute_group idxd_group_attribute_group
= {
765 .attrs
= idxd_group_attributes
,
768 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
769 &idxd_group_attribute_group
,
773 /* IDXD work queue attribs */
774 static ssize_t
wq_clients_show(struct device
*dev
,
775 struct device_attribute
*attr
, char *buf
)
777 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
779 return sprintf(buf
, "%d\n", wq
->client_count
);
782 static struct device_attribute dev_attr_wq_clients
=
783 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
785 static ssize_t
wq_state_show(struct device
*dev
,
786 struct device_attribute
*attr
, char *buf
)
788 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
791 case IDXD_WQ_DISABLED
:
792 return sprintf(buf
, "disabled\n");
793 case IDXD_WQ_ENABLED
:
794 return sprintf(buf
, "enabled\n");
797 return sprintf(buf
, "unknown\n");
800 static struct device_attribute dev_attr_wq_state
=
801 __ATTR(state
, 0444, wq_state_show
, NULL
);
803 static ssize_t
wq_group_id_show(struct device
*dev
,
804 struct device_attribute
*attr
, char *buf
)
806 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
809 return sprintf(buf
, "%u\n", wq
->group
->id
);
811 return sprintf(buf
, "-1\n");
814 static ssize_t
wq_group_id_store(struct device
*dev
,
815 struct device_attribute
*attr
,
816 const char *buf
, size_t count
)
818 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
819 struct idxd_device
*idxd
= wq
->idxd
;
822 struct idxd_group
*prevg
, *group
;
824 rc
= kstrtol(buf
, 10, &id
);
828 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
831 if (wq
->state
!= IDXD_WQ_DISABLED
)
834 if (id
> idxd
->max_groups
- 1 || id
< -1)
839 wq
->group
->num_wqs
--;
845 group
= &idxd
->groups
[id
];
855 static struct device_attribute dev_attr_wq_group_id
=
856 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
858 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
861 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
863 return sprintf(buf
, "%s\n",
864 wq_dedicated(wq
) ? "dedicated" : "shared");
867 static ssize_t
wq_mode_store(struct device
*dev
,
868 struct device_attribute
*attr
, const char *buf
,
871 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
872 struct idxd_device
*idxd
= wq
->idxd
;
874 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
877 if (wq
->state
!= IDXD_WQ_DISABLED
)
880 if (sysfs_streq(buf
, "dedicated")) {
881 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
890 static struct device_attribute dev_attr_wq_mode
=
891 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
893 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
896 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
898 return sprintf(buf
, "%u\n", wq
->size
);
901 static int total_claimed_wq_size(struct idxd_device
*idxd
)
906 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
907 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
915 static ssize_t
wq_size_store(struct device
*dev
,
916 struct device_attribute
*attr
, const char *buf
,
919 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
921 struct idxd_device
*idxd
= wq
->idxd
;
924 rc
= kstrtoul(buf
, 10, &size
);
928 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
931 if (wq
->state
!= IDXD_WQ_DISABLED
)
934 if (size
+ total_claimed_wq_size(idxd
) - wq
->size
> idxd
->max_wq_size
)
941 static struct device_attribute dev_attr_wq_size
=
942 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
944 static ssize_t
wq_priority_show(struct device
*dev
,
945 struct device_attribute
*attr
, char *buf
)
947 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
949 return sprintf(buf
, "%u\n", wq
->priority
);
952 static ssize_t
wq_priority_store(struct device
*dev
,
953 struct device_attribute
*attr
,
954 const char *buf
, size_t count
)
956 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
958 struct idxd_device
*idxd
= wq
->idxd
;
961 rc
= kstrtoul(buf
, 10, &prio
);
965 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
968 if (wq
->state
!= IDXD_WQ_DISABLED
)
971 if (prio
> IDXD_MAX_PRIORITY
)
978 static struct device_attribute dev_attr_wq_priority
=
979 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
981 static ssize_t
wq_type_show(struct device
*dev
,
982 struct device_attribute
*attr
, char *buf
)
984 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
987 case IDXD_WQT_KERNEL
:
988 return sprintf(buf
, "%s\n",
989 idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
991 return sprintf(buf
, "%s\n",
992 idxd_wq_type_names
[IDXD_WQT_USER
]);
995 return sprintf(buf
, "%s\n",
996 idxd_wq_type_names
[IDXD_WQT_NONE
]);
1002 static ssize_t
wq_type_store(struct device
*dev
,
1003 struct device_attribute
*attr
, const char *buf
,
1006 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1007 enum idxd_wq_type old_type
;
1009 if (wq
->state
!= IDXD_WQ_DISABLED
)
1012 old_type
= wq
->type
;
1013 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_NONE
]))
1014 wq
->type
= IDXD_WQT_NONE
;
1015 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
1016 wq
->type
= IDXD_WQT_KERNEL
;
1017 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
1018 wq
->type
= IDXD_WQT_USER
;
1022 /* If we are changing queue type, clear the name */
1023 if (wq
->type
!= old_type
)
1024 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1029 static struct device_attribute dev_attr_wq_type
=
1030 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
1032 static ssize_t
wq_name_show(struct device
*dev
,
1033 struct device_attribute
*attr
, char *buf
)
1035 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1037 return sprintf(buf
, "%s\n", wq
->name
);
1040 static ssize_t
wq_name_store(struct device
*dev
,
1041 struct device_attribute
*attr
, const char *buf
,
1044 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1046 if (wq
->state
!= IDXD_WQ_DISABLED
)
1049 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
1052 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1053 strncpy(wq
->name
, buf
, WQ_NAME_SIZE
);
1054 strreplace(wq
->name
, '\n', '\0');
1058 static struct device_attribute dev_attr_wq_name
=
1059 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
1061 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
1062 struct device_attribute
*attr
, char *buf
)
1064 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1066 return sprintf(buf
, "%d\n", wq
->idxd_cdev
.minor
);
1069 static struct device_attribute dev_attr_wq_cdev_minor
=
1070 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
1072 static struct attribute
*idxd_wq_attributes
[] = {
1073 &dev_attr_wq_clients
.attr
,
1074 &dev_attr_wq_state
.attr
,
1075 &dev_attr_wq_group_id
.attr
,
1076 &dev_attr_wq_mode
.attr
,
1077 &dev_attr_wq_size
.attr
,
1078 &dev_attr_wq_priority
.attr
,
1079 &dev_attr_wq_type
.attr
,
1080 &dev_attr_wq_name
.attr
,
1081 &dev_attr_wq_cdev_minor
.attr
,
1085 static const struct attribute_group idxd_wq_attribute_group
= {
1086 .attrs
= idxd_wq_attributes
,
1089 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1090 &idxd_wq_attribute_group
,
1094 /* IDXD device attribs */
1095 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1096 struct device_attribute
*attr
,
1099 struct idxd_device
*idxd
=
1100 container_of(dev
, struct idxd_device
, conf_dev
);
1102 return sprintf(buf
, "%u\n", idxd
->max_wq_size
);
1104 static DEVICE_ATTR_RO(max_work_queues_size
);
1106 static ssize_t
max_groups_show(struct device
*dev
,
1107 struct device_attribute
*attr
, char *buf
)
1109 struct idxd_device
*idxd
=
1110 container_of(dev
, struct idxd_device
, conf_dev
);
1112 return sprintf(buf
, "%u\n", idxd
->max_groups
);
1114 static DEVICE_ATTR_RO(max_groups
);
1116 static ssize_t
max_work_queues_show(struct device
*dev
,
1117 struct device_attribute
*attr
, char *buf
)
1119 struct idxd_device
*idxd
=
1120 container_of(dev
, struct idxd_device
, conf_dev
);
1122 return sprintf(buf
, "%u\n", idxd
->max_wqs
);
1124 static DEVICE_ATTR_RO(max_work_queues
);
1126 static ssize_t
max_engines_show(struct device
*dev
,
1127 struct device_attribute
*attr
, char *buf
)
1129 struct idxd_device
*idxd
=
1130 container_of(dev
, struct idxd_device
, conf_dev
);
1132 return sprintf(buf
, "%u\n", idxd
->max_engines
);
1134 static DEVICE_ATTR_RO(max_engines
);
1136 static ssize_t
numa_node_show(struct device
*dev
,
1137 struct device_attribute
*attr
, char *buf
)
1139 struct idxd_device
*idxd
=
1140 container_of(dev
, struct idxd_device
, conf_dev
);
1142 return sprintf(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1144 static DEVICE_ATTR_RO(numa_node
);
1146 static ssize_t
max_batch_size_show(struct device
*dev
,
1147 struct device_attribute
*attr
, char *buf
)
1149 struct idxd_device
*idxd
=
1150 container_of(dev
, struct idxd_device
, conf_dev
);
1152 return sprintf(buf
, "%u\n", idxd
->max_batch_size
);
1154 static DEVICE_ATTR_RO(max_batch_size
);
1156 static ssize_t
max_transfer_size_show(struct device
*dev
,
1157 struct device_attribute
*attr
,
1160 struct idxd_device
*idxd
=
1161 container_of(dev
, struct idxd_device
, conf_dev
);
1163 return sprintf(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1165 static DEVICE_ATTR_RO(max_transfer_size
);
1167 static ssize_t
op_cap_show(struct device
*dev
,
1168 struct device_attribute
*attr
, char *buf
)
1170 struct idxd_device
*idxd
=
1171 container_of(dev
, struct idxd_device
, conf_dev
);
1173 return sprintf(buf
, "%#llx\n", idxd
->hw
.opcap
.bits
[0]);
1175 static DEVICE_ATTR_RO(op_cap
);
1177 static ssize_t
gen_cap_show(struct device
*dev
,
1178 struct device_attribute
*attr
, char *buf
)
1180 struct idxd_device
*idxd
=
1181 container_of(dev
, struct idxd_device
, conf_dev
);
1183 return sprintf(buf
, "%#llx\n", idxd
->hw
.gen_cap
.bits
);
1185 static DEVICE_ATTR_RO(gen_cap
);
1187 static ssize_t
configurable_show(struct device
*dev
,
1188 struct device_attribute
*attr
, char *buf
)
1190 struct idxd_device
*idxd
=
1191 container_of(dev
, struct idxd_device
, conf_dev
);
1193 return sprintf(buf
, "%u\n",
1194 test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1196 static DEVICE_ATTR_RO(configurable
);
1198 static ssize_t
clients_show(struct device
*dev
,
1199 struct device_attribute
*attr
, char *buf
)
1201 struct idxd_device
*idxd
=
1202 container_of(dev
, struct idxd_device
, conf_dev
);
1203 unsigned long flags
;
1206 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1207 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1208 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1210 count
+= wq
->client_count
;
1212 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1214 return sprintf(buf
, "%d\n", count
);
1216 static DEVICE_ATTR_RO(clients
);
1218 static ssize_t
state_show(struct device
*dev
,
1219 struct device_attribute
*attr
, char *buf
)
1221 struct idxd_device
*idxd
=
1222 container_of(dev
, struct idxd_device
, conf_dev
);
1224 switch (idxd
->state
) {
1225 case IDXD_DEV_DISABLED
:
1226 case IDXD_DEV_CONF_READY
:
1227 return sprintf(buf
, "disabled\n");
1228 case IDXD_DEV_ENABLED
:
1229 return sprintf(buf
, "enabled\n");
1230 case IDXD_DEV_HALTED
:
1231 return sprintf(buf
, "halted\n");
1234 return sprintf(buf
, "unknown\n");
1236 static DEVICE_ATTR_RO(state
);
1238 static ssize_t
errors_show(struct device
*dev
,
1239 struct device_attribute
*attr
, char *buf
)
1241 struct idxd_device
*idxd
=
1242 container_of(dev
, struct idxd_device
, conf_dev
);
1244 unsigned long flags
;
1246 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1247 for (i
= 0; i
< 4; i
++)
1248 out
+= sprintf(buf
+ out
, "%#018llx ", idxd
->sw_err
.bits
[i
]);
1249 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1251 out
+= sprintf(buf
+ out
, "\n");
1254 static DEVICE_ATTR_RO(errors
);
1256 static ssize_t
max_tokens_show(struct device
*dev
,
1257 struct device_attribute
*attr
, char *buf
)
1259 struct idxd_device
*idxd
=
1260 container_of(dev
, struct idxd_device
, conf_dev
);
1262 return sprintf(buf
, "%u\n", idxd
->max_tokens
);
1264 static DEVICE_ATTR_RO(max_tokens
);
1266 static ssize_t
token_limit_show(struct device
*dev
,
1267 struct device_attribute
*attr
, char *buf
)
1269 struct idxd_device
*idxd
=
1270 container_of(dev
, struct idxd_device
, conf_dev
);
1272 return sprintf(buf
, "%u\n", idxd
->token_limit
);
1275 static ssize_t
token_limit_store(struct device
*dev
,
1276 struct device_attribute
*attr
,
1277 const char *buf
, size_t count
)
1279 struct idxd_device
*idxd
=
1280 container_of(dev
, struct idxd_device
, conf_dev
);
1284 rc
= kstrtoul(buf
, 10, &val
);
1288 if (idxd
->state
== IDXD_DEV_ENABLED
)
1291 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1294 if (!idxd
->hw
.group_cap
.token_limit
)
1297 if (val
> idxd
->hw
.group_cap
.total_tokens
)
1300 idxd
->token_limit
= val
;
1303 static DEVICE_ATTR_RW(token_limit
);
1305 static ssize_t
cdev_major_show(struct device
*dev
,
1306 struct device_attribute
*attr
, char *buf
)
1308 struct idxd_device
*idxd
=
1309 container_of(dev
, struct idxd_device
, conf_dev
);
1311 return sprintf(buf
, "%u\n", idxd
->major
);
1313 static DEVICE_ATTR_RO(cdev_major
);
1315 static struct attribute
*idxd_device_attributes
[] = {
1316 &dev_attr_max_groups
.attr
,
1317 &dev_attr_max_work_queues
.attr
,
1318 &dev_attr_max_work_queues_size
.attr
,
1319 &dev_attr_max_engines
.attr
,
1320 &dev_attr_numa_node
.attr
,
1321 &dev_attr_max_batch_size
.attr
,
1322 &dev_attr_max_transfer_size
.attr
,
1323 &dev_attr_op_cap
.attr
,
1324 &dev_attr_gen_cap
.attr
,
1325 &dev_attr_configurable
.attr
,
1326 &dev_attr_clients
.attr
,
1327 &dev_attr_state
.attr
,
1328 &dev_attr_errors
.attr
,
1329 &dev_attr_max_tokens
.attr
,
1330 &dev_attr_token_limit
.attr
,
1331 &dev_attr_cdev_major
.attr
,
1335 static const struct attribute_group idxd_device_attribute_group
= {
1336 .attrs
= idxd_device_attributes
,
1339 static const struct attribute_group
*idxd_attribute_groups
[] = {
1340 &idxd_device_attribute_group
,
1344 static int idxd_setup_engine_sysfs(struct idxd_device
*idxd
)
1346 struct device
*dev
= &idxd
->pdev
->dev
;
1349 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1350 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1352 engine
->conf_dev
.parent
= &idxd
->conf_dev
;
1353 dev_set_name(&engine
->conf_dev
, "engine%d.%d",
1354 idxd
->id
, engine
->id
);
1355 engine
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1356 engine
->conf_dev
.groups
= idxd_engine_attribute_groups
;
1357 engine
->conf_dev
.type
= &idxd_engine_device_type
;
1358 dev_dbg(dev
, "Engine device register: %s\n",
1359 dev_name(&engine
->conf_dev
));
1360 rc
= device_register(&engine
->conf_dev
);
1362 put_device(&engine
->conf_dev
);
1371 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1373 device_unregister(&engine
->conf_dev
);
1378 static int idxd_setup_group_sysfs(struct idxd_device
*idxd
)
1380 struct device
*dev
= &idxd
->pdev
->dev
;
1383 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1384 struct idxd_group
*group
= &idxd
->groups
[i
];
1386 group
->conf_dev
.parent
= &idxd
->conf_dev
;
1387 dev_set_name(&group
->conf_dev
, "group%d.%d",
1388 idxd
->id
, group
->id
);
1389 group
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1390 group
->conf_dev
.groups
= idxd_group_attribute_groups
;
1391 group
->conf_dev
.type
= &idxd_group_device_type
;
1392 dev_dbg(dev
, "Group device register: %s\n",
1393 dev_name(&group
->conf_dev
));
1394 rc
= device_register(&group
->conf_dev
);
1396 put_device(&group
->conf_dev
);
1405 struct idxd_group
*group
= &idxd
->groups
[i
];
1407 device_unregister(&group
->conf_dev
);
1412 static int idxd_setup_wq_sysfs(struct idxd_device
*idxd
)
1414 struct device
*dev
= &idxd
->pdev
->dev
;
1417 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1418 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1420 wq
->conf_dev
.parent
= &idxd
->conf_dev
;
1421 dev_set_name(&wq
->conf_dev
, "wq%d.%d", idxd
->id
, wq
->id
);
1422 wq
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1423 wq
->conf_dev
.groups
= idxd_wq_attribute_groups
;
1424 wq
->conf_dev
.type
= &idxd_wq_device_type
;
1425 dev_dbg(dev
, "WQ device register: %s\n",
1426 dev_name(&wq
->conf_dev
));
1427 rc
= device_register(&wq
->conf_dev
);
1429 put_device(&wq
->conf_dev
);
1438 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1440 device_unregister(&wq
->conf_dev
);
1445 static int idxd_setup_device_sysfs(struct idxd_device
*idxd
)
1447 struct device
*dev
= &idxd
->pdev
->dev
;
1449 char devname
[IDXD_NAME_SIZE
];
1451 sprintf(devname
, "%s%d", idxd_get_dev_name(idxd
), idxd
->id
);
1452 idxd
->conf_dev
.parent
= dev
;
1453 dev_set_name(&idxd
->conf_dev
, "%s", devname
);
1454 idxd
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1455 idxd
->conf_dev
.groups
= idxd_attribute_groups
;
1456 idxd
->conf_dev
.type
= idxd_get_device_type(idxd
);
1458 dev_dbg(dev
, "IDXD device register: %s\n", dev_name(&idxd
->conf_dev
));
1459 rc
= device_register(&idxd
->conf_dev
);
1461 put_device(&idxd
->conf_dev
);
1468 int idxd_setup_sysfs(struct idxd_device
*idxd
)
1470 struct device
*dev
= &idxd
->pdev
->dev
;
1473 rc
= idxd_setup_device_sysfs(idxd
);
1475 dev_dbg(dev
, "Device sysfs registering failed: %d\n", rc
);
1479 rc
= idxd_setup_wq_sysfs(idxd
);
1481 /* unregister conf dev */
1482 dev_dbg(dev
, "Work Queue sysfs registering failed: %d\n", rc
);
1486 rc
= idxd_setup_group_sysfs(idxd
);
1488 /* unregister conf dev */
1489 dev_dbg(dev
, "Group sysfs registering failed: %d\n", rc
);
1493 rc
= idxd_setup_engine_sysfs(idxd
);
1495 /* unregister conf dev */
1496 dev_dbg(dev
, "Engine sysfs registering failed: %d\n", rc
);
1503 void idxd_cleanup_sysfs(struct idxd_device
*idxd
)
1507 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1508 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1510 device_unregister(&wq
->conf_dev
);
1513 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1514 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1516 device_unregister(&engine
->conf_dev
);
1519 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1520 struct idxd_group
*group
= &idxd
->groups
[i
];
1522 device_unregister(&group
->conf_dev
);
1525 device_unregister(&idxd
->conf_dev
);
1528 int idxd_register_bus_type(void)
1532 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
1533 rc
= bus_register(idxd_bus_types
[i
]);
1542 bus_unregister(idxd_bus_types
[i
]);
1546 void idxd_unregister_bus_type(void)
1550 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
1551 bus_unregister(idxd_bus_types
[i
]);