1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 /* IDXD engine attributes */
20 static ssize_t
engine_group_id_show(struct device
*dev
,
21 struct device_attribute
*attr
, char *buf
)
23 struct idxd_engine
*engine
= confdev_to_engine(dev
);
26 return sysfs_emit(buf
, "%d\n", engine
->group
->id
);
28 return sysfs_emit(buf
, "%d\n", -1);
31 static ssize_t
engine_group_id_store(struct device
*dev
,
32 struct device_attribute
*attr
,
33 const char *buf
, size_t count
)
35 struct idxd_engine
*engine
= confdev_to_engine(dev
);
36 struct idxd_device
*idxd
= engine
->idxd
;
39 struct idxd_group
*prevg
;
41 rc
= kstrtol(buf
, 10, &id
);
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
48 if (id
> idxd
->max_groups
- 1 || id
< -1)
53 engine
->group
->num_engines
--;
59 prevg
= engine
->group
;
63 engine
->group
= idxd
->groups
[id
];
64 engine
->group
->num_engines
++;
69 static struct device_attribute dev_attr_engine_group
=
70 __ATTR(group_id
, 0644, engine_group_id_show
,
71 engine_group_id_store
);
73 static struct attribute
*idxd_engine_attributes
[] = {
74 &dev_attr_engine_group
.attr
,
78 static const struct attribute_group idxd_engine_attribute_group
= {
79 .attrs
= idxd_engine_attributes
,
82 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
83 &idxd_engine_attribute_group
,
87 static void idxd_conf_engine_release(struct device
*dev
)
89 struct idxd_engine
*engine
= confdev_to_engine(dev
);
94 const struct device_type idxd_engine_device_type
= {
96 .release
= idxd_conf_engine_release
,
97 .groups
= idxd_engine_attribute_groups
,
100 /* Group attributes */
102 static void idxd_set_free_rdbufs(struct idxd_device
*idxd
)
106 for (i
= 0, rdbufs
= 0; i
< idxd
->max_groups
; i
++) {
107 struct idxd_group
*g
= idxd
->groups
[i
];
109 rdbufs
+= g
->rdbufs_reserved
;
112 idxd
->nr_rdbufs
= idxd
->max_rdbufs
- rdbufs
;
115 static ssize_t
group_read_buffers_reserved_show(struct device
*dev
,
116 struct device_attribute
*attr
,
119 struct idxd_group
*group
= confdev_to_group(dev
);
121 return sysfs_emit(buf
, "%u\n", group
->rdbufs_reserved
);
124 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
125 struct device_attribute
*attr
,
128 dev_warn_once(dev
, "attribute deprecated, see read_buffers_reserved.\n");
129 return group_read_buffers_reserved_show(dev
, attr
, buf
);
132 static ssize_t
group_read_buffers_reserved_store(struct device
*dev
,
133 struct device_attribute
*attr
,
134 const char *buf
, size_t count
)
136 struct idxd_group
*group
= confdev_to_group(dev
);
137 struct idxd_device
*idxd
= group
->idxd
;
141 rc
= kstrtoul(buf
, 10, &val
);
145 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
148 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
151 if (idxd
->state
== IDXD_DEV_ENABLED
)
154 if (val
> idxd
->max_rdbufs
)
157 if (val
> idxd
->nr_rdbufs
+ group
->rdbufs_reserved
)
160 group
->rdbufs_reserved
= val
;
161 idxd_set_free_rdbufs(idxd
);
165 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
166 struct device_attribute
*attr
,
167 const char *buf
, size_t count
)
169 dev_warn_once(dev
, "attribute deprecated, see read_buffers_reserved.\n");
170 return group_read_buffers_reserved_store(dev
, attr
, buf
, count
);
173 static struct device_attribute dev_attr_group_tokens_reserved
=
174 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
175 group_tokens_reserved_store
);
177 static struct device_attribute dev_attr_group_read_buffers_reserved
=
178 __ATTR(read_buffers_reserved
, 0644, group_read_buffers_reserved_show
,
179 group_read_buffers_reserved_store
);
181 static ssize_t
group_read_buffers_allowed_show(struct device
*dev
,
182 struct device_attribute
*attr
,
185 struct idxd_group
*group
= confdev_to_group(dev
);
187 return sysfs_emit(buf
, "%u\n", group
->rdbufs_allowed
);
190 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
191 struct device_attribute
*attr
,
194 dev_warn_once(dev
, "attribute deprecated, see read_buffers_allowed.\n");
195 return group_read_buffers_allowed_show(dev
, attr
, buf
);
198 static ssize_t
group_read_buffers_allowed_store(struct device
*dev
,
199 struct device_attribute
*attr
,
200 const char *buf
, size_t count
)
202 struct idxd_group
*group
= confdev_to_group(dev
);
203 struct idxd_device
*idxd
= group
->idxd
;
207 rc
= kstrtoul(buf
, 10, &val
);
211 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
214 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
217 if (idxd
->state
== IDXD_DEV_ENABLED
)
220 if (val
< 4 * group
->num_engines
||
221 val
> group
->rdbufs_reserved
+ idxd
->nr_rdbufs
)
224 group
->rdbufs_allowed
= val
;
228 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
229 struct device_attribute
*attr
,
230 const char *buf
, size_t count
)
232 dev_warn_once(dev
, "attribute deprecated, see read_buffers_allowed.\n");
233 return group_read_buffers_allowed_store(dev
, attr
, buf
, count
);
236 static struct device_attribute dev_attr_group_tokens_allowed
=
237 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
238 group_tokens_allowed_store
);
240 static struct device_attribute dev_attr_group_read_buffers_allowed
=
241 __ATTR(read_buffers_allowed
, 0644, group_read_buffers_allowed_show
,
242 group_read_buffers_allowed_store
);
244 static ssize_t
group_use_read_buffer_limit_show(struct device
*dev
,
245 struct device_attribute
*attr
,
248 struct idxd_group
*group
= confdev_to_group(dev
);
250 return sysfs_emit(buf
, "%u\n", group
->use_rdbuf_limit
);
253 static ssize_t
group_use_token_limit_show(struct device
*dev
,
254 struct device_attribute
*attr
,
257 dev_warn_once(dev
, "attribute deprecated, see use_read_buffer_limit.\n");
258 return group_use_read_buffer_limit_show(dev
, attr
, buf
);
261 static ssize_t
group_use_read_buffer_limit_store(struct device
*dev
,
262 struct device_attribute
*attr
,
263 const char *buf
, size_t count
)
265 struct idxd_group
*group
= confdev_to_group(dev
);
266 struct idxd_device
*idxd
= group
->idxd
;
270 rc
= kstrtoul(buf
, 10, &val
);
274 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
277 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
280 if (idxd
->state
== IDXD_DEV_ENABLED
)
283 if (idxd
->rdbuf_limit
== 0)
286 group
->use_rdbuf_limit
= !!val
;
290 static ssize_t
group_use_token_limit_store(struct device
*dev
,
291 struct device_attribute
*attr
,
292 const char *buf
, size_t count
)
294 dev_warn_once(dev
, "attribute deprecated, see use_read_buffer_limit.\n");
295 return group_use_read_buffer_limit_store(dev
, attr
, buf
, count
);
298 static struct device_attribute dev_attr_group_use_token_limit
=
299 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
300 group_use_token_limit_store
);
302 static struct device_attribute dev_attr_group_use_read_buffer_limit
=
303 __ATTR(use_read_buffer_limit
, 0644, group_use_read_buffer_limit_show
,
304 group_use_read_buffer_limit_store
);
306 static ssize_t
group_engines_show(struct device
*dev
,
307 struct device_attribute
*attr
, char *buf
)
309 struct idxd_group
*group
= confdev_to_group(dev
);
311 struct idxd_device
*idxd
= group
->idxd
;
313 for (i
= 0; i
< idxd
->max_engines
; i
++) {
314 struct idxd_engine
*engine
= idxd
->engines
[i
];
319 if (engine
->group
->id
== group
->id
)
320 rc
+= sysfs_emit_at(buf
, rc
, "engine%d.%d ", idxd
->id
, engine
->id
);
326 rc
+= sysfs_emit_at(buf
, rc
, "\n");
331 static struct device_attribute dev_attr_group_engines
=
332 __ATTR(engines
, 0444, group_engines_show
, NULL
);
334 static ssize_t
group_work_queues_show(struct device
*dev
,
335 struct device_attribute
*attr
, char *buf
)
337 struct idxd_group
*group
= confdev_to_group(dev
);
339 struct idxd_device
*idxd
= group
->idxd
;
341 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
342 struct idxd_wq
*wq
= idxd
->wqs
[i
];
347 if (wq
->group
->id
== group
->id
)
348 rc
+= sysfs_emit_at(buf
, rc
, "wq%d.%d ", idxd
->id
, wq
->id
);
354 rc
+= sysfs_emit_at(buf
, rc
, "\n");
359 static struct device_attribute dev_attr_group_work_queues
=
360 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
362 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
363 struct device_attribute
*attr
,
366 struct idxd_group
*group
= confdev_to_group(dev
);
368 return sysfs_emit(buf
, "%d\n", group
->tc_a
);
371 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
372 struct device_attribute
*attr
,
373 const char *buf
, size_t count
)
375 struct idxd_group
*group
= confdev_to_group(dev
);
376 struct idxd_device
*idxd
= group
->idxd
;
380 rc
= kstrtol(buf
, 10, &val
);
384 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
387 if (idxd
->state
== IDXD_DEV_ENABLED
)
390 if (idxd
->hw
.version
<= DEVICE_VERSION_2
&& !tc_override
)
393 if (val
< 0 || val
> 7)
400 static struct device_attribute dev_attr_group_traffic_class_a
=
401 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
402 group_traffic_class_a_store
);
404 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
405 struct device_attribute
*attr
,
408 struct idxd_group
*group
= confdev_to_group(dev
);
410 return sysfs_emit(buf
, "%d\n", group
->tc_b
);
413 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
414 struct device_attribute
*attr
,
415 const char *buf
, size_t count
)
417 struct idxd_group
*group
= confdev_to_group(dev
);
418 struct idxd_device
*idxd
= group
->idxd
;
422 rc
= kstrtol(buf
, 10, &val
);
426 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
429 if (idxd
->state
== IDXD_DEV_ENABLED
)
432 if (idxd
->hw
.version
<= DEVICE_VERSION_2
&& !tc_override
)
435 if (val
< 0 || val
> 7)
442 static struct device_attribute dev_attr_group_traffic_class_b
=
443 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
444 group_traffic_class_b_store
);
446 static ssize_t
group_desc_progress_limit_show(struct device
*dev
,
447 struct device_attribute
*attr
,
450 struct idxd_group
*group
= confdev_to_group(dev
);
452 return sysfs_emit(buf
, "%d\n", group
->desc_progress_limit
);
455 static ssize_t
group_desc_progress_limit_store(struct device
*dev
,
456 struct device_attribute
*attr
,
457 const char *buf
, size_t count
)
459 struct idxd_group
*group
= confdev_to_group(dev
);
462 rc
= kstrtoint(buf
, 10, &val
);
466 if (val
& ~GENMASK(1, 0))
469 group
->desc_progress_limit
= val
;
473 static struct device_attribute dev_attr_group_desc_progress_limit
=
474 __ATTR(desc_progress_limit
, 0644, group_desc_progress_limit_show
,
475 group_desc_progress_limit_store
);
477 static ssize_t
group_batch_progress_limit_show(struct device
*dev
,
478 struct device_attribute
*attr
,
481 struct idxd_group
*group
= confdev_to_group(dev
);
483 return sysfs_emit(buf
, "%d\n", group
->batch_progress_limit
);
486 static ssize_t
group_batch_progress_limit_store(struct device
*dev
,
487 struct device_attribute
*attr
,
488 const char *buf
, size_t count
)
490 struct idxd_group
*group
= confdev_to_group(dev
);
493 rc
= kstrtoint(buf
, 10, &val
);
497 if (val
& ~GENMASK(1, 0))
500 group
->batch_progress_limit
= val
;
504 static struct device_attribute dev_attr_group_batch_progress_limit
=
505 __ATTR(batch_progress_limit
, 0644, group_batch_progress_limit_show
,
506 group_batch_progress_limit_store
);
507 static struct attribute
*idxd_group_attributes
[] = {
508 &dev_attr_group_work_queues
.attr
,
509 &dev_attr_group_engines
.attr
,
510 &dev_attr_group_use_token_limit
.attr
,
511 &dev_attr_group_use_read_buffer_limit
.attr
,
512 &dev_attr_group_tokens_allowed
.attr
,
513 &dev_attr_group_read_buffers_allowed
.attr
,
514 &dev_attr_group_tokens_reserved
.attr
,
515 &dev_attr_group_read_buffers_reserved
.attr
,
516 &dev_attr_group_traffic_class_a
.attr
,
517 &dev_attr_group_traffic_class_b
.attr
,
518 &dev_attr_group_desc_progress_limit
.attr
,
519 &dev_attr_group_batch_progress_limit
.attr
,
523 static bool idxd_group_attr_progress_limit_invisible(struct attribute
*attr
,
524 struct idxd_device
*idxd
)
526 return (attr
== &dev_attr_group_desc_progress_limit
.attr
||
527 attr
== &dev_attr_group_batch_progress_limit
.attr
) &&
528 !idxd
->hw
.group_cap
.progress_limit
;
531 static bool idxd_group_attr_read_buffers_invisible(struct attribute
*attr
,
532 struct idxd_device
*idxd
)
535 * Intel IAA does not support Read Buffer allocation control,
536 * make these attributes invisible.
538 return (attr
== &dev_attr_group_use_token_limit
.attr
||
539 attr
== &dev_attr_group_use_read_buffer_limit
.attr
||
540 attr
== &dev_attr_group_tokens_allowed
.attr
||
541 attr
== &dev_attr_group_read_buffers_allowed
.attr
||
542 attr
== &dev_attr_group_tokens_reserved
.attr
||
543 attr
== &dev_attr_group_read_buffers_reserved
.attr
) &&
544 idxd
->data
->type
== IDXD_TYPE_IAX
;
547 static umode_t
idxd_group_attr_visible(struct kobject
*kobj
,
548 struct attribute
*attr
, int n
)
550 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
551 struct idxd_group
*group
= confdev_to_group(dev
);
552 struct idxd_device
*idxd
= group
->idxd
;
554 if (idxd_group_attr_progress_limit_invisible(attr
, idxd
))
557 if (idxd_group_attr_read_buffers_invisible(attr
, idxd
))
563 static const struct attribute_group idxd_group_attribute_group
= {
564 .attrs
= idxd_group_attributes
,
565 .is_visible
= idxd_group_attr_visible
,
568 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
569 &idxd_group_attribute_group
,
573 static void idxd_conf_group_release(struct device
*dev
)
575 struct idxd_group
*group
= confdev_to_group(dev
);
580 const struct device_type idxd_group_device_type
= {
582 .release
= idxd_conf_group_release
,
583 .groups
= idxd_group_attribute_groups
,
586 /* IDXD work queue attribs */
587 static ssize_t
wq_clients_show(struct device
*dev
,
588 struct device_attribute
*attr
, char *buf
)
590 struct idxd_wq
*wq
= confdev_to_wq(dev
);
592 return sysfs_emit(buf
, "%d\n", wq
->client_count
);
595 static struct device_attribute dev_attr_wq_clients
=
596 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
598 static ssize_t
wq_state_show(struct device
*dev
,
599 struct device_attribute
*attr
, char *buf
)
601 struct idxd_wq
*wq
= confdev_to_wq(dev
);
604 case IDXD_WQ_DISABLED
:
605 return sysfs_emit(buf
, "disabled\n");
606 case IDXD_WQ_ENABLED
:
607 return sysfs_emit(buf
, "enabled\n");
610 return sysfs_emit(buf
, "unknown\n");
613 static struct device_attribute dev_attr_wq_state
=
614 __ATTR(state
, 0444, wq_state_show
, NULL
);
616 static ssize_t
wq_group_id_show(struct device
*dev
,
617 struct device_attribute
*attr
, char *buf
)
619 struct idxd_wq
*wq
= confdev_to_wq(dev
);
622 return sysfs_emit(buf
, "%u\n", wq
->group
->id
);
624 return sysfs_emit(buf
, "-1\n");
627 static ssize_t
wq_group_id_store(struct device
*dev
,
628 struct device_attribute
*attr
,
629 const char *buf
, size_t count
)
631 struct idxd_wq
*wq
= confdev_to_wq(dev
);
632 struct idxd_device
*idxd
= wq
->idxd
;
635 struct idxd_group
*prevg
, *group
;
637 rc
= kstrtol(buf
, 10, &id
);
641 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
644 if (wq
->state
!= IDXD_WQ_DISABLED
)
647 if (id
> idxd
->max_groups
- 1 || id
< -1)
652 wq
->group
->num_wqs
--;
658 group
= idxd
->groups
[id
];
668 static struct device_attribute dev_attr_wq_group_id
=
669 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
671 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
674 struct idxd_wq
*wq
= confdev_to_wq(dev
);
676 return sysfs_emit(buf
, "%s\n", wq_dedicated(wq
) ? "dedicated" : "shared");
679 static ssize_t
wq_mode_store(struct device
*dev
,
680 struct device_attribute
*attr
, const char *buf
,
683 struct idxd_wq
*wq
= confdev_to_wq(dev
);
684 struct idxd_device
*idxd
= wq
->idxd
;
686 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
689 if (wq
->state
!= IDXD_WQ_DISABLED
)
692 if (sysfs_streq(buf
, "dedicated")) {
693 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
695 } else if (sysfs_streq(buf
, "shared")) {
696 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
704 static struct device_attribute dev_attr_wq_mode
=
705 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
707 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
710 struct idxd_wq
*wq
= confdev_to_wq(dev
);
712 return sysfs_emit(buf
, "%u\n", wq
->size
);
715 static int total_claimed_wq_size(struct idxd_device
*idxd
)
720 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
721 struct idxd_wq
*wq
= idxd
->wqs
[i
];
729 static ssize_t
wq_size_store(struct device
*dev
,
730 struct device_attribute
*attr
, const char *buf
,
733 struct idxd_wq
*wq
= confdev_to_wq(dev
);
735 struct idxd_device
*idxd
= wq
->idxd
;
738 rc
= kstrtoul(buf
, 10, &size
);
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
745 if (idxd
->state
== IDXD_DEV_ENABLED
)
748 if (size
+ total_claimed_wq_size(idxd
) - wq
->size
> idxd
->max_wq_size
)
755 static struct device_attribute dev_attr_wq_size
=
756 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
758 static ssize_t
wq_priority_show(struct device
*dev
,
759 struct device_attribute
*attr
, char *buf
)
761 struct idxd_wq
*wq
= confdev_to_wq(dev
);
763 return sysfs_emit(buf
, "%u\n", wq
->priority
);
766 static ssize_t
wq_priority_store(struct device
*dev
,
767 struct device_attribute
*attr
,
768 const char *buf
, size_t count
)
770 struct idxd_wq
*wq
= confdev_to_wq(dev
);
772 struct idxd_device
*idxd
= wq
->idxd
;
775 rc
= kstrtoul(buf
, 10, &prio
);
779 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
782 if (wq
->state
!= IDXD_WQ_DISABLED
)
785 if (prio
> IDXD_MAX_PRIORITY
)
792 static struct device_attribute dev_attr_wq_priority
=
793 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
795 static ssize_t
wq_block_on_fault_show(struct device
*dev
,
796 struct device_attribute
*attr
, char *buf
)
798 struct idxd_wq
*wq
= confdev_to_wq(dev
);
800 return sysfs_emit(buf
, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
));
803 static ssize_t
wq_block_on_fault_store(struct device
*dev
,
804 struct device_attribute
*attr
,
805 const char *buf
, size_t count
)
807 struct idxd_wq
*wq
= confdev_to_wq(dev
);
808 struct idxd_device
*idxd
= wq
->idxd
;
812 if (!idxd
->hw
.gen_cap
.block_on_fault
)
815 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
818 if (wq
->state
!= IDXD_WQ_DISABLED
)
821 rc
= kstrtobool(buf
, &bof
);
826 if (test_bit(WQ_FLAG_PRS_DISABLE
, &wq
->flags
))
829 set_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
831 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
837 static struct device_attribute dev_attr_wq_block_on_fault
=
838 __ATTR(block_on_fault
, 0644, wq_block_on_fault_show
,
839 wq_block_on_fault_store
);
841 static ssize_t
wq_threshold_show(struct device
*dev
,
842 struct device_attribute
*attr
, char *buf
)
844 struct idxd_wq
*wq
= confdev_to_wq(dev
);
846 return sysfs_emit(buf
, "%u\n", wq
->threshold
);
849 static ssize_t
wq_threshold_store(struct device
*dev
,
850 struct device_attribute
*attr
,
851 const char *buf
, size_t count
)
853 struct idxd_wq
*wq
= confdev_to_wq(dev
);
854 struct idxd_device
*idxd
= wq
->idxd
;
858 rc
= kstrtouint(buf
, 0, &val
);
862 if (val
> wq
->size
|| val
<= 0)
865 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
868 if (wq
->state
!= IDXD_WQ_DISABLED
)
871 if (test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
))
879 static struct device_attribute dev_attr_wq_threshold
=
880 __ATTR(threshold
, 0644, wq_threshold_show
, wq_threshold_store
);
882 static ssize_t
wq_type_show(struct device
*dev
,
883 struct device_attribute
*attr
, char *buf
)
885 struct idxd_wq
*wq
= confdev_to_wq(dev
);
888 case IDXD_WQT_KERNEL
:
889 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
891 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_USER
]);
894 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_NONE
]);
900 static ssize_t
wq_type_store(struct device
*dev
,
901 struct device_attribute
*attr
, const char *buf
,
904 struct idxd_wq
*wq
= confdev_to_wq(dev
);
905 enum idxd_wq_type old_type
;
907 if (wq
->state
!= IDXD_WQ_DISABLED
)
911 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_NONE
]))
912 wq
->type
= IDXD_WQT_NONE
;
913 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
914 wq
->type
= IDXD_WQT_KERNEL
;
915 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
916 wq
->type
= IDXD_WQT_USER
;
920 /* If we are changing queue type, clear the name */
921 if (wq
->type
!= old_type
)
922 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
927 static struct device_attribute dev_attr_wq_type
=
928 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
930 static ssize_t
wq_name_show(struct device
*dev
,
931 struct device_attribute
*attr
, char *buf
)
933 struct idxd_wq
*wq
= confdev_to_wq(dev
);
935 return sysfs_emit(buf
, "%s\n", wq
->name
);
938 static ssize_t
wq_name_store(struct device
*dev
,
939 struct device_attribute
*attr
, const char *buf
,
942 struct idxd_wq
*wq
= confdev_to_wq(dev
);
945 if (wq
->state
!= IDXD_WQ_DISABLED
)
948 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
951 input
= kstrndup(buf
, count
, GFP_KERNEL
);
956 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
957 sprintf(wq
->name
, "%s", pos
);
962 static struct device_attribute dev_attr_wq_name
=
963 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
965 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
966 struct device_attribute
*attr
, char *buf
)
968 struct idxd_wq
*wq
= confdev_to_wq(dev
);
971 mutex_lock(&wq
->wq_lock
);
973 minor
= wq
->idxd_cdev
->minor
;
974 mutex_unlock(&wq
->wq_lock
);
978 return sysfs_emit(buf
, "%d\n", minor
);
981 static struct device_attribute dev_attr_wq_cdev_minor
=
982 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
984 static int __get_sysfs_u64(const char *buf
, u64
*val
)
988 rc
= kstrtou64(buf
, 0, val
);
995 *val
= roundup_pow_of_two(*val
);
999 static ssize_t
wq_max_transfer_size_show(struct device
*dev
, struct device_attribute
*attr
,
1002 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1004 return sysfs_emit(buf
, "%llu\n", wq
->max_xfer_bytes
);
1007 static ssize_t
wq_max_transfer_size_store(struct device
*dev
, struct device_attribute
*attr
,
1008 const char *buf
, size_t count
)
1010 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1011 struct idxd_device
*idxd
= wq
->idxd
;
1015 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1018 if (wq
->state
!= IDXD_WQ_DISABLED
)
1021 rc
= __get_sysfs_u64(buf
, &xfer_size
);
1025 if (xfer_size
> idxd
->max_xfer_bytes
)
1028 wq
->max_xfer_bytes
= xfer_size
;
1033 static struct device_attribute dev_attr_wq_max_transfer_size
=
1034 __ATTR(max_transfer_size
, 0644,
1035 wq_max_transfer_size_show
, wq_max_transfer_size_store
);
1037 static ssize_t
wq_max_batch_size_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1039 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1041 return sysfs_emit(buf
, "%u\n", wq
->max_batch_size
);
1044 static ssize_t
wq_max_batch_size_store(struct device
*dev
, struct device_attribute
*attr
,
1045 const char *buf
, size_t count
)
1047 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1048 struct idxd_device
*idxd
= wq
->idxd
;
1052 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1055 if (wq
->state
!= IDXD_WQ_DISABLED
)
1058 rc
= __get_sysfs_u64(buf
, &batch_size
);
1062 if (batch_size
> idxd
->max_batch_size
)
1065 idxd_wq_set_max_batch_size(idxd
->data
->type
, wq
, (u32
)batch_size
);
1070 static struct device_attribute dev_attr_wq_max_batch_size
=
1071 __ATTR(max_batch_size
, 0644, wq_max_batch_size_show
, wq_max_batch_size_store
);
1073 static ssize_t
wq_ats_disable_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1075 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1077 return sysfs_emit(buf
, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE
, &wq
->flags
));
1080 static ssize_t
wq_ats_disable_store(struct device
*dev
, struct device_attribute
*attr
,
1081 const char *buf
, size_t count
)
1083 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1084 struct idxd_device
*idxd
= wq
->idxd
;
1088 if (wq
->state
!= IDXD_WQ_DISABLED
)
1091 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1094 rc
= kstrtobool(buf
, &ats_dis
);
1099 set_bit(WQ_FLAG_ATS_DISABLE
, &wq
->flags
);
1101 clear_bit(WQ_FLAG_ATS_DISABLE
, &wq
->flags
);
1106 static struct device_attribute dev_attr_wq_ats_disable
=
1107 __ATTR(ats_disable
, 0644, wq_ats_disable_show
, wq_ats_disable_store
);
1109 static ssize_t
wq_prs_disable_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1111 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1113 return sysfs_emit(buf
, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE
, &wq
->flags
));
1116 static ssize_t
wq_prs_disable_store(struct device
*dev
, struct device_attribute
*attr
,
1117 const char *buf
, size_t count
)
1119 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1120 struct idxd_device
*idxd
= wq
->idxd
;
1124 if (wq
->state
!= IDXD_WQ_DISABLED
)
1127 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1130 rc
= kstrtobool(buf
, &prs_dis
);
1135 set_bit(WQ_FLAG_PRS_DISABLE
, &wq
->flags
);
1136 /* when PRS is disabled, BOF needs to be off as well */
1137 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
1139 clear_bit(WQ_FLAG_PRS_DISABLE
, &wq
->flags
);
1144 static struct device_attribute dev_attr_wq_prs_disable
=
1145 __ATTR(prs_disable
, 0644, wq_prs_disable_show
, wq_prs_disable_store
);
1147 static ssize_t
wq_occupancy_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1149 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1150 struct idxd_device
*idxd
= wq
->idxd
;
1153 if (!idxd
->hw
.wq_cap
.occupancy
)
1156 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_OCCUP_IDX
);
1157 occup
= ioread32(idxd
->reg_base
+ offset
) & WQCFG_OCCUP_MASK
;
1159 return sysfs_emit(buf
, "%u\n", occup
);
1162 static struct device_attribute dev_attr_wq_occupancy
=
1163 __ATTR(occupancy
, 0444, wq_occupancy_show
, NULL
);
1165 static ssize_t
wq_enqcmds_retries_show(struct device
*dev
,
1166 struct device_attribute
*attr
, char *buf
)
1168 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1170 if (wq_dedicated(wq
))
1173 return sysfs_emit(buf
, "%u\n", wq
->enqcmds_retries
);
1176 static ssize_t
wq_enqcmds_retries_store(struct device
*dev
, struct device_attribute
*attr
,
1177 const char *buf
, size_t count
)
1179 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1181 unsigned int retries
;
1183 if (wq_dedicated(wq
))
1186 rc
= kstrtouint(buf
, 10, &retries
);
1190 if (retries
> IDXD_ENQCMDS_MAX_RETRIES
)
1191 retries
= IDXD_ENQCMDS_MAX_RETRIES
;
1193 wq
->enqcmds_retries
= retries
;
1197 static struct device_attribute dev_attr_wq_enqcmds_retries
=
1198 __ATTR(enqcmds_retries
, 0644, wq_enqcmds_retries_show
, wq_enqcmds_retries_store
);
1200 static ssize_t
op_cap_show_common(struct device
*dev
, char *buf
, unsigned long *opcap_bmap
)
1206 for (i
= IDXD_MAX_OPCAP_BITS
/64 - 1; i
>= 0; i
--) {
1207 unsigned long val
= opcap_bmap
[i
];
1209 /* On systems where direct user submissions are not safe, we need to clear out
1210 * the BATCH capability from the capability mask in sysfs since we cannot support
1211 * that command on such systems.
1213 if (i
== DSA_OPCODE_BATCH
/64 && !confdev_to_idxd(dev
)->user_submission_safe
)
1214 clear_bit(DSA_OPCODE_BATCH
% 64, &val
);
1216 pos
+= sysfs_emit_at(buf
, pos
, "%*pb", 64, &val
);
1217 pos
+= sysfs_emit_at(buf
, pos
, "%c", i
== 0 ? '\n' : ',');
1223 static ssize_t
wq_op_config_show(struct device
*dev
,
1224 struct device_attribute
*attr
, char *buf
)
1226 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1228 return op_cap_show_common(dev
, buf
, wq
->opcap_bmap
);
1231 static int idxd_verify_supported_opcap(struct idxd_device
*idxd
, unsigned long *opmask
)
1236 * The OPCAP is defined as 256 bits that represents each operation the device
1237 * supports per bit. Iterate through all the bits and check if the input mask
1238 * is set for bits that are not set in the OPCAP for the device. If no OPCAP
1239 * bit is set and input mask has the bit set, then return error.
1241 for_each_set_bit(bit
, opmask
, IDXD_MAX_OPCAP_BITS
) {
1242 if (!test_bit(bit
, idxd
->opcap_bmap
))
1249 static ssize_t
wq_op_config_store(struct device
*dev
, struct device_attribute
*attr
,
1250 const char *buf
, size_t count
)
1252 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1253 struct idxd_device
*idxd
= wq
->idxd
;
1254 unsigned long *opmask
;
1257 if (wq
->state
!= IDXD_WQ_DISABLED
)
1260 opmask
= bitmap_zalloc(IDXD_MAX_OPCAP_BITS
, GFP_KERNEL
);
1264 rc
= bitmap_parse(buf
, count
, opmask
, IDXD_MAX_OPCAP_BITS
);
1268 rc
= idxd_verify_supported_opcap(idxd
, opmask
);
1272 bitmap_copy(wq
->opcap_bmap
, opmask
, IDXD_MAX_OPCAP_BITS
);
1274 bitmap_free(opmask
);
1278 bitmap_free(opmask
);
1282 static struct device_attribute dev_attr_wq_op_config
=
1283 __ATTR(op_config
, 0644, wq_op_config_show
, wq_op_config_store
);
1285 static ssize_t
wq_driver_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1287 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1289 return sysfs_emit(buf
, "%s\n", wq
->driver_name
);
1292 static ssize_t
wq_driver_name_store(struct device
*dev
, struct device_attribute
*attr
,
1293 const char *buf
, size_t count
)
1295 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1298 if (wq
->state
!= IDXD_WQ_DISABLED
)
1301 if (strlen(buf
) > DRIVER_NAME_SIZE
|| strlen(buf
) == 0)
1304 input
= kstrndup(buf
, count
, GFP_KERNEL
);
1309 memset(wq
->driver_name
, 0, DRIVER_NAME_SIZE
+ 1);
1310 sprintf(wq
->driver_name
, "%s", pos
);
1315 static struct device_attribute dev_attr_wq_driver_name
=
1316 __ATTR(driver_name
, 0644, wq_driver_name_show
, wq_driver_name_store
);
1318 static struct attribute
*idxd_wq_attributes
[] = {
1319 &dev_attr_wq_clients
.attr
,
1320 &dev_attr_wq_state
.attr
,
1321 &dev_attr_wq_group_id
.attr
,
1322 &dev_attr_wq_mode
.attr
,
1323 &dev_attr_wq_size
.attr
,
1324 &dev_attr_wq_priority
.attr
,
1325 &dev_attr_wq_block_on_fault
.attr
,
1326 &dev_attr_wq_threshold
.attr
,
1327 &dev_attr_wq_type
.attr
,
1328 &dev_attr_wq_name
.attr
,
1329 &dev_attr_wq_cdev_minor
.attr
,
1330 &dev_attr_wq_max_transfer_size
.attr
,
1331 &dev_attr_wq_max_batch_size
.attr
,
1332 &dev_attr_wq_ats_disable
.attr
,
1333 &dev_attr_wq_prs_disable
.attr
,
1334 &dev_attr_wq_occupancy
.attr
,
1335 &dev_attr_wq_enqcmds_retries
.attr
,
1336 &dev_attr_wq_op_config
.attr
,
1337 &dev_attr_wq_driver_name
.attr
,
1341 /* A WQ attr is invisible if the feature is not supported in WQCAP. */
1342 #define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
1343 ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
1345 static bool idxd_wq_attr_max_batch_size_invisible(struct attribute
*attr
,
1346 struct idxd_device
*idxd
)
1348 /* Intel IAA does not support batch processing, make it invisible */
1349 return attr
== &dev_attr_wq_max_batch_size
.attr
&&
1350 idxd
->data
->type
== IDXD_TYPE_IAX
;
1353 static umode_t
idxd_wq_attr_visible(struct kobject
*kobj
,
1354 struct attribute
*attr
, int n
)
1356 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1357 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1358 struct idxd_device
*idxd
= wq
->idxd
;
1360 if (idxd_wq_attr_invisible(op_config
, op_config
, attr
, idxd
))
1363 if (idxd_wq_attr_max_batch_size_invisible(attr
, idxd
))
1366 if (idxd_wq_attr_invisible(prs_disable
, wq_prs_support
, attr
, idxd
))
1369 if (idxd_wq_attr_invisible(ats_disable
, wq_ats_support
, attr
, idxd
))
1375 static const struct attribute_group idxd_wq_attribute_group
= {
1376 .attrs
= idxd_wq_attributes
,
1377 .is_visible
= idxd_wq_attr_visible
,
1380 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1381 &idxd_wq_attribute_group
,
1385 static void idxd_conf_wq_release(struct device
*dev
)
1387 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1389 bitmap_free(wq
->opcap_bmap
);
1391 xa_destroy(&wq
->upasid_xa
);
1395 const struct device_type idxd_wq_device_type
= {
1397 .release
= idxd_conf_wq_release
,
1398 .groups
= idxd_wq_attribute_groups
,
1401 /* IDXD device attribs */
1402 static ssize_t
version_show(struct device
*dev
, struct device_attribute
*attr
,
1405 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1407 return sysfs_emit(buf
, "%#x\n", idxd
->hw
.version
);
1409 static DEVICE_ATTR_RO(version
);
1411 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1412 struct device_attribute
*attr
,
1415 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1417 return sysfs_emit(buf
, "%u\n", idxd
->max_wq_size
);
1419 static DEVICE_ATTR_RO(max_work_queues_size
);
1421 static ssize_t
max_groups_show(struct device
*dev
,
1422 struct device_attribute
*attr
, char *buf
)
1424 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1426 return sysfs_emit(buf
, "%u\n", idxd
->max_groups
);
1428 static DEVICE_ATTR_RO(max_groups
);
1430 static ssize_t
max_work_queues_show(struct device
*dev
,
1431 struct device_attribute
*attr
, char *buf
)
1433 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1435 return sysfs_emit(buf
, "%u\n", idxd
->max_wqs
);
1437 static DEVICE_ATTR_RO(max_work_queues
);
1439 static ssize_t
max_engines_show(struct device
*dev
,
1440 struct device_attribute
*attr
, char *buf
)
1442 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1444 return sysfs_emit(buf
, "%u\n", idxd
->max_engines
);
1446 static DEVICE_ATTR_RO(max_engines
);
1448 static ssize_t
numa_node_show(struct device
*dev
,
1449 struct device_attribute
*attr
, char *buf
)
1451 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1453 return sysfs_emit(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1455 static DEVICE_ATTR_RO(numa_node
);
1457 static ssize_t
max_batch_size_show(struct device
*dev
,
1458 struct device_attribute
*attr
, char *buf
)
1460 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1462 return sysfs_emit(buf
, "%u\n", idxd
->max_batch_size
);
1464 static DEVICE_ATTR_RO(max_batch_size
);
1466 static ssize_t
max_transfer_size_show(struct device
*dev
,
1467 struct device_attribute
*attr
,
1470 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1472 return sysfs_emit(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1474 static DEVICE_ATTR_RO(max_transfer_size
);
1476 static ssize_t
op_cap_show(struct device
*dev
,
1477 struct device_attribute
*attr
, char *buf
)
1479 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1481 return op_cap_show_common(dev
, buf
, idxd
->opcap_bmap
);
1483 static DEVICE_ATTR_RO(op_cap
);
1485 static ssize_t
gen_cap_show(struct device
*dev
,
1486 struct device_attribute
*attr
, char *buf
)
1488 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1490 return sysfs_emit(buf
, "%#llx\n", idxd
->hw
.gen_cap
.bits
);
1492 static DEVICE_ATTR_RO(gen_cap
);
1494 static ssize_t
configurable_show(struct device
*dev
,
1495 struct device_attribute
*attr
, char *buf
)
1497 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1499 return sysfs_emit(buf
, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1501 static DEVICE_ATTR_RO(configurable
);
1503 static ssize_t
clients_show(struct device
*dev
,
1504 struct device_attribute
*attr
, char *buf
)
1506 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1509 spin_lock(&idxd
->dev_lock
);
1510 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1511 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1513 count
+= wq
->client_count
;
1515 spin_unlock(&idxd
->dev_lock
);
1517 return sysfs_emit(buf
, "%d\n", count
);
1519 static DEVICE_ATTR_RO(clients
);
1521 static ssize_t
pasid_enabled_show(struct device
*dev
,
1522 struct device_attribute
*attr
, char *buf
)
1524 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1526 return sysfs_emit(buf
, "%u\n", device_user_pasid_enabled(idxd
));
1528 static DEVICE_ATTR_RO(pasid_enabled
);
1530 static ssize_t
state_show(struct device
*dev
,
1531 struct device_attribute
*attr
, char *buf
)
1533 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1535 switch (idxd
->state
) {
1536 case IDXD_DEV_DISABLED
:
1537 return sysfs_emit(buf
, "disabled\n");
1538 case IDXD_DEV_ENABLED
:
1539 return sysfs_emit(buf
, "enabled\n");
1540 case IDXD_DEV_HALTED
:
1541 return sysfs_emit(buf
, "halted\n");
1544 return sysfs_emit(buf
, "unknown\n");
1546 static DEVICE_ATTR_RO(state
);
1548 static ssize_t
errors_show(struct device
*dev
,
1549 struct device_attribute
*attr
, char *buf
)
1551 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1552 DECLARE_BITMAP(swerr_bmap
, 256);
1554 bitmap_zero(swerr_bmap
, 256);
1555 spin_lock(&idxd
->dev_lock
);
1556 multi_u64_to_bmap(swerr_bmap
, &idxd
->sw_err
.bits
[0], 4);
1557 spin_unlock(&idxd
->dev_lock
);
1558 return sysfs_emit(buf
, "%*pb\n", 256, swerr_bmap
);
1560 static DEVICE_ATTR_RO(errors
);
1562 static ssize_t
max_read_buffers_show(struct device
*dev
,
1563 struct device_attribute
*attr
, char *buf
)
1565 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1567 return sysfs_emit(buf
, "%u\n", idxd
->max_rdbufs
);
1570 static ssize_t
max_tokens_show(struct device
*dev
,
1571 struct device_attribute
*attr
, char *buf
)
1573 dev_warn_once(dev
, "attribute deprecated, see max_read_buffers.\n");
1574 return max_read_buffers_show(dev
, attr
, buf
);
1577 static DEVICE_ATTR_RO(max_tokens
); /* deprecated */
1578 static DEVICE_ATTR_RO(max_read_buffers
);
1580 static ssize_t
read_buffer_limit_show(struct device
*dev
,
1581 struct device_attribute
*attr
, char *buf
)
1583 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1585 return sysfs_emit(buf
, "%u\n", idxd
->rdbuf_limit
);
1588 static ssize_t
token_limit_show(struct device
*dev
,
1589 struct device_attribute
*attr
, char *buf
)
1591 dev_warn_once(dev
, "attribute deprecated, see read_buffer_limit.\n");
1592 return read_buffer_limit_show(dev
, attr
, buf
);
1595 static ssize_t
read_buffer_limit_store(struct device
*dev
,
1596 struct device_attribute
*attr
,
1597 const char *buf
, size_t count
)
1599 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1603 rc
= kstrtoul(buf
, 10, &val
);
1607 if (idxd
->state
== IDXD_DEV_ENABLED
)
1610 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1613 if (!idxd
->hw
.group_cap
.rdbuf_limit
)
1616 if (val
> idxd
->hw
.group_cap
.total_rdbufs
)
1619 idxd
->rdbuf_limit
= val
;
1623 static ssize_t
token_limit_store(struct device
*dev
,
1624 struct device_attribute
*attr
,
1625 const char *buf
, size_t count
)
1627 dev_warn_once(dev
, "attribute deprecated, see read_buffer_limit\n");
1628 return read_buffer_limit_store(dev
, attr
, buf
, count
);
1631 static DEVICE_ATTR_RW(token_limit
); /* deprecated */
1632 static DEVICE_ATTR_RW(read_buffer_limit
);
1634 static ssize_t
cdev_major_show(struct device
*dev
,
1635 struct device_attribute
*attr
, char *buf
)
1637 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1639 return sysfs_emit(buf
, "%u\n", idxd
->major
);
1641 static DEVICE_ATTR_RO(cdev_major
);
1643 static ssize_t
cmd_status_show(struct device
*dev
,
1644 struct device_attribute
*attr
, char *buf
)
1646 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1648 return sysfs_emit(buf
, "%#x\n", idxd
->cmd_status
);
1651 static ssize_t
cmd_status_store(struct device
*dev
, struct device_attribute
*attr
,
1652 const char *buf
, size_t count
)
1654 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1656 idxd
->cmd_status
= 0;
1659 static DEVICE_ATTR_RW(cmd_status
);
1661 static ssize_t
iaa_cap_show(struct device
*dev
,
1662 struct device_attribute
*attr
, char *buf
)
1664 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1666 if (idxd
->hw
.version
< DEVICE_VERSION_2
)
1669 return sysfs_emit(buf
, "%#llx\n", idxd
->hw
.iaa_cap
.bits
);
1671 static DEVICE_ATTR_RO(iaa_cap
);
1673 static ssize_t
event_log_size_show(struct device
*dev
,
1674 struct device_attribute
*attr
, char *buf
)
1676 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1681 return sysfs_emit(buf
, "%u\n", idxd
->evl
->size
);
1684 static ssize_t
event_log_size_store(struct device
*dev
,
1685 struct device_attribute
*attr
,
1686 const char *buf
, size_t count
)
1688 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1695 rc
= kstrtoul(buf
, 10, &val
);
1699 if (idxd
->state
== IDXD_DEV_ENABLED
)
1702 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1705 if (val
< IDXD_EVL_SIZE_MIN
|| val
> IDXD_EVL_SIZE_MAX
||
1706 (val
* evl_ent_size(idxd
) > ULONG_MAX
- idxd
->evl
->dma
))
1709 idxd
->evl
->size
= val
;
1712 static DEVICE_ATTR_RW(event_log_size
);
1714 static bool idxd_device_attr_max_batch_size_invisible(struct attribute
*attr
,
1715 struct idxd_device
*idxd
)
1717 /* Intel IAA does not support batch processing, make it invisible */
1718 return attr
== &dev_attr_max_batch_size
.attr
&&
1719 idxd
->data
->type
== IDXD_TYPE_IAX
;
1722 static bool idxd_device_attr_read_buffers_invisible(struct attribute
*attr
,
1723 struct idxd_device
*idxd
)
1726 * Intel IAA does not support Read Buffer allocation control,
1727 * make these attributes invisible.
1729 return (attr
== &dev_attr_max_tokens
.attr
||
1730 attr
== &dev_attr_max_read_buffers
.attr
||
1731 attr
== &dev_attr_token_limit
.attr
||
1732 attr
== &dev_attr_read_buffer_limit
.attr
) &&
1733 idxd
->data
->type
== IDXD_TYPE_IAX
;
1736 static bool idxd_device_attr_iaa_cap_invisible(struct attribute
*attr
,
1737 struct idxd_device
*idxd
)
1739 return attr
== &dev_attr_iaa_cap
.attr
&&
1740 (idxd
->data
->type
!= IDXD_TYPE_IAX
||
1741 idxd
->hw
.version
< DEVICE_VERSION_2
);
1744 static bool idxd_device_attr_event_log_size_invisible(struct attribute
*attr
,
1745 struct idxd_device
*idxd
)
1747 return (attr
== &dev_attr_event_log_size
.attr
&&
1748 !idxd
->hw
.gen_cap
.evl_support
);
1751 static umode_t
idxd_device_attr_visible(struct kobject
*kobj
,
1752 struct attribute
*attr
, int n
)
1754 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1755 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1757 if (idxd_device_attr_max_batch_size_invisible(attr
, idxd
))
1760 if (idxd_device_attr_read_buffers_invisible(attr
, idxd
))
1763 if (idxd_device_attr_iaa_cap_invisible(attr
, idxd
))
1766 if (idxd_device_attr_event_log_size_invisible(attr
, idxd
))
1772 static struct attribute
*idxd_device_attributes
[] = {
1773 &dev_attr_version
.attr
,
1774 &dev_attr_max_groups
.attr
,
1775 &dev_attr_max_work_queues
.attr
,
1776 &dev_attr_max_work_queues_size
.attr
,
1777 &dev_attr_max_engines
.attr
,
1778 &dev_attr_numa_node
.attr
,
1779 &dev_attr_max_batch_size
.attr
,
1780 &dev_attr_max_transfer_size
.attr
,
1781 &dev_attr_op_cap
.attr
,
1782 &dev_attr_gen_cap
.attr
,
1783 &dev_attr_configurable
.attr
,
1784 &dev_attr_clients
.attr
,
1785 &dev_attr_pasid_enabled
.attr
,
1786 &dev_attr_state
.attr
,
1787 &dev_attr_errors
.attr
,
1788 &dev_attr_max_tokens
.attr
,
1789 &dev_attr_max_read_buffers
.attr
,
1790 &dev_attr_token_limit
.attr
,
1791 &dev_attr_read_buffer_limit
.attr
,
1792 &dev_attr_cdev_major
.attr
,
1793 &dev_attr_cmd_status
.attr
,
1794 &dev_attr_iaa_cap
.attr
,
1795 &dev_attr_event_log_size
.attr
,
1799 static const struct attribute_group idxd_device_attribute_group
= {
1800 .attrs
= idxd_device_attributes
,
1801 .is_visible
= idxd_device_attr_visible
,
1804 static const struct attribute_group
*idxd_attribute_groups
[] = {
1805 &idxd_device_attribute_group
,
1809 static void idxd_conf_device_release(struct device
*dev
)
1811 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1813 kfree(idxd
->groups
);
1814 bitmap_free(idxd
->wq_enable_map
);
1816 kfree(idxd
->engines
);
1818 kmem_cache_destroy(idxd
->evl_cache
);
1819 ida_free(&idxd_ida
, idxd
->id
);
1820 bitmap_free(idxd
->opcap_bmap
);
1824 const struct device_type dsa_device_type
= {
1826 .release
= idxd_conf_device_release
,
1827 .groups
= idxd_attribute_groups
,
1830 const struct device_type iax_device_type
= {
1832 .release
= idxd_conf_device_release
,
1833 .groups
= idxd_attribute_groups
,
1836 static int idxd_register_engine_devices(struct idxd_device
*idxd
)
1838 struct idxd_engine
*engine
;
1841 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1842 engine
= idxd
->engines
[i
];
1843 rc
= device_add(engine_confdev(engine
));
1852 for (; i
< idxd
->max_engines
; i
++) {
1853 engine
= idxd
->engines
[i
];
1854 put_device(engine_confdev(engine
));
1858 engine
= idxd
->engines
[j
];
1859 device_unregister(engine_confdev(engine
));
1864 static int idxd_register_group_devices(struct idxd_device
*idxd
)
1866 struct idxd_group
*group
;
1869 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1870 group
= idxd
->groups
[i
];
1871 rc
= device_add(group_confdev(group
));
1880 for (; i
< idxd
->max_groups
; i
++) {
1881 group
= idxd
->groups
[i
];
1882 put_device(group_confdev(group
));
1886 group
= idxd
->groups
[j
];
1887 device_unregister(group_confdev(group
));
1892 static int idxd_register_wq_devices(struct idxd_device
*idxd
)
1897 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1899 rc
= device_add(wq_confdev(wq
));
1908 for (; i
< idxd
->max_wqs
; i
++) {
1910 put_device(wq_confdev(wq
));
1915 device_unregister(wq_confdev(wq
));
1920 int idxd_register_devices(struct idxd_device
*idxd
)
1922 struct device
*dev
= &idxd
->pdev
->dev
;
1925 rc
= device_add(idxd_confdev(idxd
));
1929 rc
= idxd_register_wq_devices(idxd
);
1931 dev_dbg(dev
, "WQ devices registering failed: %d\n", rc
);
1935 rc
= idxd_register_engine_devices(idxd
);
1937 dev_dbg(dev
, "Engine devices registering failed: %d\n", rc
);
1941 rc
= idxd_register_group_devices(idxd
);
1943 dev_dbg(dev
, "Group device registering failed: %d\n", rc
);
1950 for (i
= 0; i
< idxd
->max_engines
; i
++)
1951 device_unregister(engine_confdev(idxd
->engines
[i
]));
1953 for (i
= 0; i
< idxd
->max_wqs
; i
++)
1954 device_unregister(wq_confdev(idxd
->wqs
[i
]));
1956 device_del(idxd_confdev(idxd
));
1960 void idxd_unregister_devices(struct idxd_device
*idxd
)
1964 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1965 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1967 device_unregister(wq_confdev(wq
));
1970 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1971 struct idxd_engine
*engine
= idxd
->engines
[i
];
1973 device_unregister(engine_confdev(engine
));
1976 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1977 struct idxd_group
*group
= idxd
->groups
[i
];
1979 device_unregister(group_confdev(group
));
1983 int idxd_register_bus_type(void)
1985 return bus_register(&dsa_bus_type
);
1988 void idxd_unregister_bus_type(void)
1990 bus_unregister(&dsa_bus_type
);