1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 static void idxd_conf_device_release(struct device
*dev
)
21 dev_dbg(dev
, "%s for %s\n", __func__
, dev_name(dev
));
24 static struct device_type idxd_group_device_type
= {
26 .release
= idxd_conf_device_release
,
29 static struct device_type idxd_wq_device_type
= {
31 .release
= idxd_conf_device_release
,
34 static struct device_type idxd_engine_device_type
= {
36 .release
= idxd_conf_device_release
,
39 static struct device_type dsa_device_type
= {
41 .release
= idxd_conf_device_release
,
44 static inline bool is_dsa_dev(struct device
*dev
)
46 return dev
? dev
->type
== &dsa_device_type
: false;
49 static inline bool is_idxd_dev(struct device
*dev
)
51 return is_dsa_dev(dev
);
54 static inline bool is_idxd_wq_dev(struct device
*dev
)
56 return dev
? dev
->type
== &idxd_wq_device_type
: false;
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq
*wq
)
61 if (wq
->type
== IDXD_WQT_KERNEL
&&
62 strcmp(wq
->name
, "dmaengine") == 0)
67 static inline bool is_idxd_wq_cdev(struct idxd_wq
*wq
)
69 return wq
->type
== IDXD_WQT_USER
;
72 static int idxd_config_bus_match(struct device
*dev
,
73 struct device_driver
*drv
)
77 if (is_idxd_dev(dev
)) {
78 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
80 if (idxd
->state
!= IDXD_DEV_CONF_READY
)
83 } else if (is_idxd_wq_dev(dev
)) {
84 struct idxd_wq
*wq
= confdev_to_wq(dev
);
85 struct idxd_device
*idxd
= wq
->idxd
;
87 if (idxd
->state
< IDXD_DEV_CONF_READY
)
90 if (wq
->state
!= IDXD_WQ_DISABLED
) {
91 dev_dbg(dev
, "%s not disabled\n", dev_name(dev
));
98 dev_dbg(dev
, "%s matched\n", dev_name(dev
));
103 static int idxd_config_bus_probe(struct device
*dev
)
108 dev_dbg(dev
, "%s called\n", __func__
);
110 if (is_idxd_dev(dev
)) {
111 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
113 if (idxd
->state
!= IDXD_DEV_CONF_READY
) {
114 dev_warn(dev
, "Device not ready for config\n");
118 if (!try_module_get(THIS_MODULE
))
121 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
123 /* Perform IDXD configuration and enabling */
124 rc
= idxd_device_config(idxd
);
126 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
127 dev_warn(dev
, "Device config failed: %d\n", rc
);
132 rc
= idxd_device_enable(idxd
);
134 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
135 dev_warn(dev
, "Device enable failed: %d\n", rc
);
139 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
140 dev_info(dev
, "Device %s enabled\n", dev_name(dev
));
142 rc
= idxd_register_dma_device(idxd
);
144 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
145 dev_dbg(dev
, "Failed to register dmaengine device\n");
149 } else if (is_idxd_wq_dev(dev
)) {
150 struct idxd_wq
*wq
= confdev_to_wq(dev
);
151 struct idxd_device
*idxd
= wq
->idxd
;
153 mutex_lock(&wq
->wq_lock
);
155 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
156 mutex_unlock(&wq
->wq_lock
);
157 dev_warn(dev
, "Enabling while device not enabled.\n");
161 if (wq
->state
!= IDXD_WQ_DISABLED
) {
162 mutex_unlock(&wq
->wq_lock
);
163 dev_warn(dev
, "WQ %d already enabled.\n", wq
->id
);
168 mutex_unlock(&wq
->wq_lock
);
169 dev_warn(dev
, "WQ not attached to group.\n");
173 if (strlen(wq
->name
) == 0) {
174 mutex_unlock(&wq
->wq_lock
);
175 dev_warn(dev
, "WQ name not set.\n");
179 rc
= idxd_wq_alloc_resources(wq
);
181 mutex_unlock(&wq
->wq_lock
);
182 dev_warn(dev
, "WQ resource alloc failed\n");
186 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
187 rc
= idxd_device_config(idxd
);
189 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
190 mutex_unlock(&wq
->wq_lock
);
191 dev_warn(dev
, "Writing WQ %d config failed: %d\n",
196 rc
= idxd_wq_enable(wq
);
198 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
199 mutex_unlock(&wq
->wq_lock
);
200 dev_warn(dev
, "WQ %d enabling failed: %d\n",
204 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
206 rc
= idxd_wq_map_portal(wq
);
208 dev_warn(dev
, "wq portal mapping failed: %d\n", rc
);
209 rc
= idxd_wq_disable(wq
);
211 dev_warn(dev
, "IDXD wq disable failed\n");
212 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
213 mutex_unlock(&wq
->wq_lock
);
217 wq
->client_count
= 0;
219 dev_info(dev
, "wq %s enabled\n", dev_name(&wq
->conf_dev
));
221 if (is_idxd_wq_dmaengine(wq
)) {
222 rc
= idxd_register_dma_channel(wq
);
224 dev_dbg(dev
, "DMA channel register failed\n");
225 mutex_unlock(&wq
->wq_lock
);
228 } else if (is_idxd_wq_cdev(wq
)) {
229 rc
= idxd_wq_add_cdev(wq
);
231 dev_dbg(dev
, "Cdev creation failed\n");
232 mutex_unlock(&wq
->wq_lock
);
237 mutex_unlock(&wq
->wq_lock
);
244 static void disable_wq(struct idxd_wq
*wq
)
246 struct idxd_device
*idxd
= wq
->idxd
;
247 struct device
*dev
= &idxd
->pdev
->dev
;
251 mutex_lock(&wq
->wq_lock
);
252 dev_dbg(dev
, "%s removing WQ %s\n", __func__
, dev_name(&wq
->conf_dev
));
253 if (wq
->state
== IDXD_WQ_DISABLED
) {
254 mutex_unlock(&wq
->wq_lock
);
258 if (is_idxd_wq_dmaengine(wq
))
259 idxd_unregister_dma_channel(wq
);
260 else if (is_idxd_wq_cdev(wq
))
261 idxd_wq_del_cdev(wq
);
263 if (idxd_wq_refcount(wq
))
264 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
265 wq
->id
, idxd_wq_refcount(wq
));
267 idxd_wq_unmap_portal(wq
);
269 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
270 rc
= idxd_wq_disable(wq
);
271 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
273 idxd_wq_free_resources(wq
);
274 wq
->client_count
= 0;
275 mutex_unlock(&wq
->wq_lock
);
278 dev_warn(dev
, "Failed to disable %s: %d\n",
279 dev_name(&wq
->conf_dev
), rc
);
281 dev_info(dev
, "wq %s disabled\n", dev_name(&wq
->conf_dev
));
284 static int idxd_config_bus_remove(struct device
*dev
)
289 dev_dbg(dev
, "%s called for %s\n", __func__
, dev_name(dev
));
291 /* disable workqueue here */
292 if (is_idxd_wq_dev(dev
)) {
293 struct idxd_wq
*wq
= confdev_to_wq(dev
);
296 } else if (is_idxd_dev(dev
)) {
297 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
300 dev_dbg(dev
, "%s removing dev %s\n", __func__
,
301 dev_name(&idxd
->conf_dev
));
302 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
303 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
305 if (wq
->state
== IDXD_WQ_DISABLED
)
307 dev_warn(dev
, "Active wq %d on disable %s.\n", i
,
308 dev_name(&idxd
->conf_dev
));
309 device_release_driver(&wq
->conf_dev
);
312 idxd_unregister_dma_device(idxd
);
313 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
314 rc
= idxd_device_disable(idxd
);
315 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
316 module_put(THIS_MODULE
);
318 dev_warn(dev
, "Device disable failed\n");
320 dev_info(dev
, "Device %s disabled\n", dev_name(dev
));
327 static void idxd_config_bus_shutdown(struct device
*dev
)
329 dev_dbg(dev
, "%s called\n", __func__
);
332 struct bus_type dsa_bus_type
= {
334 .match
= idxd_config_bus_match
,
335 .probe
= idxd_config_bus_probe
,
336 .remove
= idxd_config_bus_remove
,
337 .shutdown
= idxd_config_bus_shutdown
,
340 static struct bus_type
*idxd_bus_types
[] = {
344 static struct idxd_device_driver dsa_drv
= {
347 .bus
= &dsa_bus_type
,
348 .owner
= THIS_MODULE
,
349 .mod_name
= KBUILD_MODNAME
,
353 static struct idxd_device_driver
*idxd_drvs
[] = {
357 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
)
359 return idxd_bus_types
[idxd
->type
];
362 static struct device_type
*idxd_get_device_type(struct idxd_device
*idxd
)
364 if (idxd
->type
== IDXD_TYPE_DSA
)
365 return &dsa_device_type
;
370 /* IDXD generic driver setup */
371 int idxd_register_driver(void)
375 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
376 rc
= driver_register(&idxd_drvs
[i
]->drv
);
385 driver_unregister(&idxd_drvs
[i
]->drv
);
389 void idxd_unregister_driver(void)
393 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
394 driver_unregister(&idxd_drvs
[i
]->drv
);
397 /* IDXD engine attributes */
398 static ssize_t
engine_group_id_show(struct device
*dev
,
399 struct device_attribute
*attr
, char *buf
)
401 struct idxd_engine
*engine
=
402 container_of(dev
, struct idxd_engine
, conf_dev
);
405 return sprintf(buf
, "%d\n", engine
->group
->id
);
407 return sprintf(buf
, "%d\n", -1);
410 static ssize_t
engine_group_id_store(struct device
*dev
,
411 struct device_attribute
*attr
,
412 const char *buf
, size_t count
)
414 struct idxd_engine
*engine
=
415 container_of(dev
, struct idxd_engine
, conf_dev
);
416 struct idxd_device
*idxd
= engine
->idxd
;
419 struct idxd_group
*prevg
, *group
;
421 rc
= kstrtol(buf
, 10, &id
);
425 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
428 if (id
> idxd
->max_groups
- 1 || id
< -1)
433 engine
->group
->num_engines
--;
434 engine
->group
= NULL
;
439 group
= &idxd
->groups
[id
];
440 prevg
= engine
->group
;
443 prevg
->num_engines
--;
444 engine
->group
= &idxd
->groups
[id
];
445 engine
->group
->num_engines
++;
450 static struct device_attribute dev_attr_engine_group
=
451 __ATTR(group_id
, 0644, engine_group_id_show
,
452 engine_group_id_store
);
454 static struct attribute
*idxd_engine_attributes
[] = {
455 &dev_attr_engine_group
.attr
,
459 static const struct attribute_group idxd_engine_attribute_group
= {
460 .attrs
= idxd_engine_attributes
,
463 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
464 &idxd_engine_attribute_group
,
468 /* Group attributes */
470 static void idxd_set_free_tokens(struct idxd_device
*idxd
)
474 for (i
= 0, tokens
= 0; i
< idxd
->max_groups
; i
++) {
475 struct idxd_group
*g
= &idxd
->groups
[i
];
477 tokens
+= g
->tokens_reserved
;
480 idxd
->nr_tokens
= idxd
->max_tokens
- tokens
;
483 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
484 struct device_attribute
*attr
,
487 struct idxd_group
*group
=
488 container_of(dev
, struct idxd_group
, conf_dev
);
490 return sprintf(buf
, "%u\n", group
->tokens_reserved
);
493 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
494 struct device_attribute
*attr
,
495 const char *buf
, size_t count
)
497 struct idxd_group
*group
=
498 container_of(dev
, struct idxd_group
, conf_dev
);
499 struct idxd_device
*idxd
= group
->idxd
;
503 rc
= kstrtoul(buf
, 10, &val
);
507 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
510 if (idxd
->state
== IDXD_DEV_ENABLED
)
513 if (idxd
->token_limit
== 0)
516 if (val
> idxd
->max_tokens
)
519 if (val
> idxd
->nr_tokens
)
522 group
->tokens_reserved
= val
;
523 idxd_set_free_tokens(idxd
);
527 static struct device_attribute dev_attr_group_tokens_reserved
=
528 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
529 group_tokens_reserved_store
);
531 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
532 struct device_attribute
*attr
,
535 struct idxd_group
*group
=
536 container_of(dev
, struct idxd_group
, conf_dev
);
538 return sprintf(buf
, "%u\n", group
->tokens_allowed
);
541 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
542 struct device_attribute
*attr
,
543 const char *buf
, size_t count
)
545 struct idxd_group
*group
=
546 container_of(dev
, struct idxd_group
, conf_dev
);
547 struct idxd_device
*idxd
= group
->idxd
;
551 rc
= kstrtoul(buf
, 10, &val
);
555 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
558 if (idxd
->state
== IDXD_DEV_ENABLED
)
561 if (idxd
->token_limit
== 0)
563 if (val
< 4 * group
->num_engines
||
564 val
> group
->tokens_reserved
+ idxd
->nr_tokens
)
567 group
->tokens_allowed
= val
;
571 static struct device_attribute dev_attr_group_tokens_allowed
=
572 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
573 group_tokens_allowed_store
);
575 static ssize_t
group_use_token_limit_show(struct device
*dev
,
576 struct device_attribute
*attr
,
579 struct idxd_group
*group
=
580 container_of(dev
, struct idxd_group
, conf_dev
);
582 return sprintf(buf
, "%u\n", group
->use_token_limit
);
585 static ssize_t
group_use_token_limit_store(struct device
*dev
,
586 struct device_attribute
*attr
,
587 const char *buf
, size_t count
)
589 struct idxd_group
*group
=
590 container_of(dev
, struct idxd_group
, conf_dev
);
591 struct idxd_device
*idxd
= group
->idxd
;
595 rc
= kstrtoul(buf
, 10, &val
);
599 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
602 if (idxd
->state
== IDXD_DEV_ENABLED
)
605 if (idxd
->token_limit
== 0)
608 group
->use_token_limit
= !!val
;
612 static struct device_attribute dev_attr_group_use_token_limit
=
613 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
614 group_use_token_limit_store
);
616 static ssize_t
group_engines_show(struct device
*dev
,
617 struct device_attribute
*attr
, char *buf
)
619 struct idxd_group
*group
=
620 container_of(dev
, struct idxd_group
, conf_dev
);
623 struct idxd_device
*idxd
= group
->idxd
;
625 for (i
= 0; i
< idxd
->max_engines
; i
++) {
626 struct idxd_engine
*engine
= &idxd
->engines
[i
];
631 if (engine
->group
->id
== group
->id
)
632 rc
+= sprintf(tmp
+ rc
, "engine%d.%d ",
633 idxd
->id
, engine
->id
);
637 rc
+= sprintf(tmp
+ rc
, "\n");
642 static struct device_attribute dev_attr_group_engines
=
643 __ATTR(engines
, 0444, group_engines_show
, NULL
);
645 static ssize_t
group_work_queues_show(struct device
*dev
,
646 struct device_attribute
*attr
, char *buf
)
648 struct idxd_group
*group
=
649 container_of(dev
, struct idxd_group
, conf_dev
);
652 struct idxd_device
*idxd
= group
->idxd
;
654 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
655 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
660 if (wq
->group
->id
== group
->id
)
661 rc
+= sprintf(tmp
+ rc
, "wq%d.%d ",
666 rc
+= sprintf(tmp
+ rc
, "\n");
671 static struct device_attribute dev_attr_group_work_queues
=
672 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
674 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
675 struct device_attribute
*attr
,
678 struct idxd_group
*group
=
679 container_of(dev
, struct idxd_group
, conf_dev
);
681 return sprintf(buf
, "%d\n", group
->tc_a
);
684 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
685 struct device_attribute
*attr
,
686 const char *buf
, size_t count
)
688 struct idxd_group
*group
=
689 container_of(dev
, struct idxd_group
, conf_dev
);
690 struct idxd_device
*idxd
= group
->idxd
;
694 rc
= kstrtol(buf
, 10, &val
);
698 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
701 if (idxd
->state
== IDXD_DEV_ENABLED
)
704 if (val
< 0 || val
> 7)
711 static struct device_attribute dev_attr_group_traffic_class_a
=
712 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
713 group_traffic_class_a_store
);
715 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
716 struct device_attribute
*attr
,
719 struct idxd_group
*group
=
720 container_of(dev
, struct idxd_group
, conf_dev
);
722 return sprintf(buf
, "%d\n", group
->tc_b
);
725 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
726 struct device_attribute
*attr
,
727 const char *buf
, size_t count
)
729 struct idxd_group
*group
=
730 container_of(dev
, struct idxd_group
, conf_dev
);
731 struct idxd_device
*idxd
= group
->idxd
;
735 rc
= kstrtol(buf
, 10, &val
);
739 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
742 if (idxd
->state
== IDXD_DEV_ENABLED
)
745 if (val
< 0 || val
> 7)
752 static struct device_attribute dev_attr_group_traffic_class_b
=
753 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
754 group_traffic_class_b_store
);
756 static struct attribute
*idxd_group_attributes
[] = {
757 &dev_attr_group_work_queues
.attr
,
758 &dev_attr_group_engines
.attr
,
759 &dev_attr_group_use_token_limit
.attr
,
760 &dev_attr_group_tokens_allowed
.attr
,
761 &dev_attr_group_tokens_reserved
.attr
,
762 &dev_attr_group_traffic_class_a
.attr
,
763 &dev_attr_group_traffic_class_b
.attr
,
767 static const struct attribute_group idxd_group_attribute_group
= {
768 .attrs
= idxd_group_attributes
,
771 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
772 &idxd_group_attribute_group
,
776 /* IDXD work queue attribs */
777 static ssize_t
wq_clients_show(struct device
*dev
,
778 struct device_attribute
*attr
, char *buf
)
780 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
782 return sprintf(buf
, "%d\n", wq
->client_count
);
785 static struct device_attribute dev_attr_wq_clients
=
786 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
788 static ssize_t
wq_state_show(struct device
*dev
,
789 struct device_attribute
*attr
, char *buf
)
791 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
794 case IDXD_WQ_DISABLED
:
795 return sprintf(buf
, "disabled\n");
796 case IDXD_WQ_ENABLED
:
797 return sprintf(buf
, "enabled\n");
800 return sprintf(buf
, "unknown\n");
803 static struct device_attribute dev_attr_wq_state
=
804 __ATTR(state
, 0444, wq_state_show
, NULL
);
806 static ssize_t
wq_group_id_show(struct device
*dev
,
807 struct device_attribute
*attr
, char *buf
)
809 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
812 return sprintf(buf
, "%u\n", wq
->group
->id
);
814 return sprintf(buf
, "-1\n");
817 static ssize_t
wq_group_id_store(struct device
*dev
,
818 struct device_attribute
*attr
,
819 const char *buf
, size_t count
)
821 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
822 struct idxd_device
*idxd
= wq
->idxd
;
825 struct idxd_group
*prevg
, *group
;
827 rc
= kstrtol(buf
, 10, &id
);
831 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
834 if (wq
->state
!= IDXD_WQ_DISABLED
)
837 if (id
> idxd
->max_groups
- 1 || id
< -1)
842 wq
->group
->num_wqs
--;
848 group
= &idxd
->groups
[id
];
858 static struct device_attribute dev_attr_wq_group_id
=
859 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
861 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
864 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
866 return sprintf(buf
, "%s\n",
867 wq_dedicated(wq
) ? "dedicated" : "shared");
870 static ssize_t
wq_mode_store(struct device
*dev
,
871 struct device_attribute
*attr
, const char *buf
,
874 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
875 struct idxd_device
*idxd
= wq
->idxd
;
877 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
880 if (wq
->state
!= IDXD_WQ_DISABLED
)
883 if (sysfs_streq(buf
, "dedicated")) {
884 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
893 static struct device_attribute dev_attr_wq_mode
=
894 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
896 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
899 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
901 return sprintf(buf
, "%u\n", wq
->size
);
904 static ssize_t
wq_size_store(struct device
*dev
,
905 struct device_attribute
*attr
, const char *buf
,
908 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
910 struct idxd_device
*idxd
= wq
->idxd
;
913 rc
= kstrtoul(buf
, 10, &size
);
917 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
920 if (wq
->state
!= IDXD_WQ_DISABLED
)
923 if (size
> idxd
->max_wq_size
)
930 static struct device_attribute dev_attr_wq_size
=
931 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
933 static ssize_t
wq_priority_show(struct device
*dev
,
934 struct device_attribute
*attr
, char *buf
)
936 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
938 return sprintf(buf
, "%u\n", wq
->priority
);
941 static ssize_t
wq_priority_store(struct device
*dev
,
942 struct device_attribute
*attr
,
943 const char *buf
, size_t count
)
945 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
947 struct idxd_device
*idxd
= wq
->idxd
;
950 rc
= kstrtoul(buf
, 10, &prio
);
954 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
957 if (wq
->state
!= IDXD_WQ_DISABLED
)
960 if (prio
> IDXD_MAX_PRIORITY
)
967 static struct device_attribute dev_attr_wq_priority
=
968 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
970 static ssize_t
wq_type_show(struct device
*dev
,
971 struct device_attribute
*attr
, char *buf
)
973 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
976 case IDXD_WQT_KERNEL
:
977 return sprintf(buf
, "%s\n",
978 idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
980 return sprintf(buf
, "%s\n",
981 idxd_wq_type_names
[IDXD_WQT_USER
]);
984 return sprintf(buf
, "%s\n",
985 idxd_wq_type_names
[IDXD_WQT_NONE
]);
991 static ssize_t
wq_type_store(struct device
*dev
,
992 struct device_attribute
*attr
, const char *buf
,
995 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
996 enum idxd_wq_type old_type
;
998 if (wq
->state
!= IDXD_WQ_DISABLED
)
1001 old_type
= wq
->type
;
1002 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
1003 wq
->type
= IDXD_WQT_KERNEL
;
1004 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
1005 wq
->type
= IDXD_WQT_USER
;
1007 wq
->type
= IDXD_WQT_NONE
;
1009 /* If we are changing queue type, clear the name */
1010 if (wq
->type
!= old_type
)
1011 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1016 static struct device_attribute dev_attr_wq_type
=
1017 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
1019 static ssize_t
wq_name_show(struct device
*dev
,
1020 struct device_attribute
*attr
, char *buf
)
1022 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1024 return sprintf(buf
, "%s\n", wq
->name
);
1027 static ssize_t
wq_name_store(struct device
*dev
,
1028 struct device_attribute
*attr
, const char *buf
,
1031 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1033 if (wq
->state
!= IDXD_WQ_DISABLED
)
1036 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
1039 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1040 strncpy(wq
->name
, buf
, WQ_NAME_SIZE
);
1041 strreplace(wq
->name
, '\n', '\0');
1045 static struct device_attribute dev_attr_wq_name
=
1046 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
1048 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
1049 struct device_attribute
*attr
, char *buf
)
1051 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1053 return sprintf(buf
, "%d\n", wq
->idxd_cdev
.minor
);
1056 static struct device_attribute dev_attr_wq_cdev_minor
=
1057 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
1059 static struct attribute
*idxd_wq_attributes
[] = {
1060 &dev_attr_wq_clients
.attr
,
1061 &dev_attr_wq_state
.attr
,
1062 &dev_attr_wq_group_id
.attr
,
1063 &dev_attr_wq_mode
.attr
,
1064 &dev_attr_wq_size
.attr
,
1065 &dev_attr_wq_priority
.attr
,
1066 &dev_attr_wq_type
.attr
,
1067 &dev_attr_wq_name
.attr
,
1068 &dev_attr_wq_cdev_minor
.attr
,
1072 static const struct attribute_group idxd_wq_attribute_group
= {
1073 .attrs
= idxd_wq_attributes
,
1076 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1077 &idxd_wq_attribute_group
,
1081 /* IDXD device attribs */
1082 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1083 struct device_attribute
*attr
,
1086 struct idxd_device
*idxd
=
1087 container_of(dev
, struct idxd_device
, conf_dev
);
1089 return sprintf(buf
, "%u\n", idxd
->max_wq_size
);
1091 static DEVICE_ATTR_RO(max_work_queues_size
);
1093 static ssize_t
max_groups_show(struct device
*dev
,
1094 struct device_attribute
*attr
, char *buf
)
1096 struct idxd_device
*idxd
=
1097 container_of(dev
, struct idxd_device
, conf_dev
);
1099 return sprintf(buf
, "%u\n", idxd
->max_groups
);
1101 static DEVICE_ATTR_RO(max_groups
);
1103 static ssize_t
max_work_queues_show(struct device
*dev
,
1104 struct device_attribute
*attr
, char *buf
)
1106 struct idxd_device
*idxd
=
1107 container_of(dev
, struct idxd_device
, conf_dev
);
1109 return sprintf(buf
, "%u\n", idxd
->max_wqs
);
1111 static DEVICE_ATTR_RO(max_work_queues
);
1113 static ssize_t
max_engines_show(struct device
*dev
,
1114 struct device_attribute
*attr
, char *buf
)
1116 struct idxd_device
*idxd
=
1117 container_of(dev
, struct idxd_device
, conf_dev
);
1119 return sprintf(buf
, "%u\n", idxd
->max_engines
);
1121 static DEVICE_ATTR_RO(max_engines
);
1123 static ssize_t
numa_node_show(struct device
*dev
,
1124 struct device_attribute
*attr
, char *buf
)
1126 struct idxd_device
*idxd
=
1127 container_of(dev
, struct idxd_device
, conf_dev
);
1129 return sprintf(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1131 static DEVICE_ATTR_RO(numa_node
);
1133 static ssize_t
max_batch_size_show(struct device
*dev
,
1134 struct device_attribute
*attr
, char *buf
)
1136 struct idxd_device
*idxd
=
1137 container_of(dev
, struct idxd_device
, conf_dev
);
1139 return sprintf(buf
, "%u\n", idxd
->max_batch_size
);
1141 static DEVICE_ATTR_RO(max_batch_size
);
1143 static ssize_t
max_transfer_size_show(struct device
*dev
,
1144 struct device_attribute
*attr
,
1147 struct idxd_device
*idxd
=
1148 container_of(dev
, struct idxd_device
, conf_dev
);
1150 return sprintf(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1152 static DEVICE_ATTR_RO(max_transfer_size
);
1154 static ssize_t
op_cap_show(struct device
*dev
,
1155 struct device_attribute
*attr
, char *buf
)
1157 struct idxd_device
*idxd
=
1158 container_of(dev
, struct idxd_device
, conf_dev
);
1160 return sprintf(buf
, "%#llx\n", idxd
->hw
.opcap
.bits
[0]);
1162 static DEVICE_ATTR_RO(op_cap
);
1164 static ssize_t
configurable_show(struct device
*dev
,
1165 struct device_attribute
*attr
, char *buf
)
1167 struct idxd_device
*idxd
=
1168 container_of(dev
, struct idxd_device
, conf_dev
);
1170 return sprintf(buf
, "%u\n",
1171 test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1173 static DEVICE_ATTR_RO(configurable
);
1175 static ssize_t
clients_show(struct device
*dev
,
1176 struct device_attribute
*attr
, char *buf
)
1178 struct idxd_device
*idxd
=
1179 container_of(dev
, struct idxd_device
, conf_dev
);
1180 unsigned long flags
;
1183 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1184 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1185 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1187 count
+= wq
->client_count
;
1189 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1191 return sprintf(buf
, "%d\n", count
);
1193 static DEVICE_ATTR_RO(clients
);
1195 static ssize_t
state_show(struct device
*dev
,
1196 struct device_attribute
*attr
, char *buf
)
1198 struct idxd_device
*idxd
=
1199 container_of(dev
, struct idxd_device
, conf_dev
);
1201 switch (idxd
->state
) {
1202 case IDXD_DEV_DISABLED
:
1203 case IDXD_DEV_CONF_READY
:
1204 return sprintf(buf
, "disabled\n");
1205 case IDXD_DEV_ENABLED
:
1206 return sprintf(buf
, "enabled\n");
1207 case IDXD_DEV_HALTED
:
1208 return sprintf(buf
, "halted\n");
1211 return sprintf(buf
, "unknown\n");
1213 static DEVICE_ATTR_RO(state
);
1215 static ssize_t
errors_show(struct device
*dev
,
1216 struct device_attribute
*attr
, char *buf
)
1218 struct idxd_device
*idxd
=
1219 container_of(dev
, struct idxd_device
, conf_dev
);
1221 unsigned long flags
;
1223 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1224 for (i
= 0; i
< 4; i
++)
1225 out
+= sprintf(buf
+ out
, "%#018llx ", idxd
->sw_err
.bits
[i
]);
1226 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1228 out
+= sprintf(buf
+ out
, "\n");
1231 static DEVICE_ATTR_RO(errors
);
1233 static ssize_t
max_tokens_show(struct device
*dev
,
1234 struct device_attribute
*attr
, char *buf
)
1236 struct idxd_device
*idxd
=
1237 container_of(dev
, struct idxd_device
, conf_dev
);
1239 return sprintf(buf
, "%u\n", idxd
->max_tokens
);
1241 static DEVICE_ATTR_RO(max_tokens
);
1243 static ssize_t
token_limit_show(struct device
*dev
,
1244 struct device_attribute
*attr
, char *buf
)
1246 struct idxd_device
*idxd
=
1247 container_of(dev
, struct idxd_device
, conf_dev
);
1249 return sprintf(buf
, "%u\n", idxd
->token_limit
);
1252 static ssize_t
token_limit_store(struct device
*dev
,
1253 struct device_attribute
*attr
,
1254 const char *buf
, size_t count
)
1256 struct idxd_device
*idxd
=
1257 container_of(dev
, struct idxd_device
, conf_dev
);
1261 rc
= kstrtoul(buf
, 10, &val
);
1265 if (idxd
->state
== IDXD_DEV_ENABLED
)
1268 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1271 if (!idxd
->hw
.group_cap
.token_limit
)
1274 if (val
> idxd
->hw
.group_cap
.total_tokens
)
1277 idxd
->token_limit
= val
;
1280 static DEVICE_ATTR_RW(token_limit
);
1282 static ssize_t
cdev_major_show(struct device
*dev
,
1283 struct device_attribute
*attr
, char *buf
)
1285 struct idxd_device
*idxd
=
1286 container_of(dev
, struct idxd_device
, conf_dev
);
1288 return sprintf(buf
, "%u\n", idxd
->major
);
1290 static DEVICE_ATTR_RO(cdev_major
);
1292 static struct attribute
*idxd_device_attributes
[] = {
1293 &dev_attr_max_groups
.attr
,
1294 &dev_attr_max_work_queues
.attr
,
1295 &dev_attr_max_work_queues_size
.attr
,
1296 &dev_attr_max_engines
.attr
,
1297 &dev_attr_numa_node
.attr
,
1298 &dev_attr_max_batch_size
.attr
,
1299 &dev_attr_max_transfer_size
.attr
,
1300 &dev_attr_op_cap
.attr
,
1301 &dev_attr_configurable
.attr
,
1302 &dev_attr_clients
.attr
,
1303 &dev_attr_state
.attr
,
1304 &dev_attr_errors
.attr
,
1305 &dev_attr_max_tokens
.attr
,
1306 &dev_attr_token_limit
.attr
,
1307 &dev_attr_cdev_major
.attr
,
1311 static const struct attribute_group idxd_device_attribute_group
= {
1312 .attrs
= idxd_device_attributes
,
1315 static const struct attribute_group
*idxd_attribute_groups
[] = {
1316 &idxd_device_attribute_group
,
1320 static int idxd_setup_engine_sysfs(struct idxd_device
*idxd
)
1322 struct device
*dev
= &idxd
->pdev
->dev
;
1325 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1326 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1328 engine
->conf_dev
.parent
= &idxd
->conf_dev
;
1329 dev_set_name(&engine
->conf_dev
, "engine%d.%d",
1330 idxd
->id
, engine
->id
);
1331 engine
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1332 engine
->conf_dev
.groups
= idxd_engine_attribute_groups
;
1333 engine
->conf_dev
.type
= &idxd_engine_device_type
;
1334 dev_dbg(dev
, "Engine device register: %s\n",
1335 dev_name(&engine
->conf_dev
));
1336 rc
= device_register(&engine
->conf_dev
);
1338 put_device(&engine
->conf_dev
);
1347 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1349 device_unregister(&engine
->conf_dev
);
1354 static int idxd_setup_group_sysfs(struct idxd_device
*idxd
)
1356 struct device
*dev
= &idxd
->pdev
->dev
;
1359 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1360 struct idxd_group
*group
= &idxd
->groups
[i
];
1362 group
->conf_dev
.parent
= &idxd
->conf_dev
;
1363 dev_set_name(&group
->conf_dev
, "group%d.%d",
1364 idxd
->id
, group
->id
);
1365 group
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1366 group
->conf_dev
.groups
= idxd_group_attribute_groups
;
1367 group
->conf_dev
.type
= &idxd_group_device_type
;
1368 dev_dbg(dev
, "Group device register: %s\n",
1369 dev_name(&group
->conf_dev
));
1370 rc
= device_register(&group
->conf_dev
);
1372 put_device(&group
->conf_dev
);
1381 struct idxd_group
*group
= &idxd
->groups
[i
];
1383 device_unregister(&group
->conf_dev
);
1388 static int idxd_setup_wq_sysfs(struct idxd_device
*idxd
)
1390 struct device
*dev
= &idxd
->pdev
->dev
;
1393 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1394 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1396 wq
->conf_dev
.parent
= &idxd
->conf_dev
;
1397 dev_set_name(&wq
->conf_dev
, "wq%d.%d", idxd
->id
, wq
->id
);
1398 wq
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1399 wq
->conf_dev
.groups
= idxd_wq_attribute_groups
;
1400 wq
->conf_dev
.type
= &idxd_wq_device_type
;
1401 dev_dbg(dev
, "WQ device register: %s\n",
1402 dev_name(&wq
->conf_dev
));
1403 rc
= device_register(&wq
->conf_dev
);
1405 put_device(&wq
->conf_dev
);
1414 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1416 device_unregister(&wq
->conf_dev
);
1421 static int idxd_setup_device_sysfs(struct idxd_device
*idxd
)
1423 struct device
*dev
= &idxd
->pdev
->dev
;
1425 char devname
[IDXD_NAME_SIZE
];
1427 sprintf(devname
, "%s%d", idxd_get_dev_name(idxd
), idxd
->id
);
1428 idxd
->conf_dev
.parent
= dev
;
1429 dev_set_name(&idxd
->conf_dev
, "%s", devname
);
1430 idxd
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1431 idxd
->conf_dev
.groups
= idxd_attribute_groups
;
1432 idxd
->conf_dev
.type
= idxd_get_device_type(idxd
);
1434 dev_dbg(dev
, "IDXD device register: %s\n", dev_name(&idxd
->conf_dev
));
1435 rc
= device_register(&idxd
->conf_dev
);
1437 put_device(&idxd
->conf_dev
);
1444 int idxd_setup_sysfs(struct idxd_device
*idxd
)
1446 struct device
*dev
= &idxd
->pdev
->dev
;
1449 rc
= idxd_setup_device_sysfs(idxd
);
1451 dev_dbg(dev
, "Device sysfs registering failed: %d\n", rc
);
1455 rc
= idxd_setup_wq_sysfs(idxd
);
1457 /* unregister conf dev */
1458 dev_dbg(dev
, "Work Queue sysfs registering failed: %d\n", rc
);
1462 rc
= idxd_setup_group_sysfs(idxd
);
1464 /* unregister conf dev */
1465 dev_dbg(dev
, "Group sysfs registering failed: %d\n", rc
);
1469 rc
= idxd_setup_engine_sysfs(idxd
);
1471 /* unregister conf dev */
1472 dev_dbg(dev
, "Engine sysfs registering failed: %d\n", rc
);
1479 void idxd_cleanup_sysfs(struct idxd_device
*idxd
)
1483 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1484 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1486 device_unregister(&wq
->conf_dev
);
1489 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1490 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1492 device_unregister(&engine
->conf_dev
);
1495 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1496 struct idxd_group
*group
= &idxd
->groups
[i
];
1498 device_unregister(&group
->conf_dev
);
1501 device_unregister(&idxd
->conf_dev
);
1504 int idxd_register_bus_type(void)
1508 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
1509 rc
= bus_register(idxd_bus_types
[i
]);
1518 bus_unregister(idxd_bus_types
[i
]);
1522 void idxd_unregister_bus_type(void)
1526 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
1527 bus_unregister(idxd_bus_types
[i
]);