2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/sysfs.h>
13 #include <linux/pci_regs.h>
17 #define to_afu_chardev_m(d) dev_get_drvdata(d)
19 /********* Adapter attributes **********************************************/
21 static ssize_t
caia_version_show(struct device
*device
,
22 struct device_attribute
*attr
,
25 struct cxl
*adapter
= to_cxl_adapter(device
);
27 return scnprintf(buf
, PAGE_SIZE
, "%i.%i\n", adapter
->caia_major
,
31 static ssize_t
psl_revision_show(struct device
*device
,
32 struct device_attribute
*attr
,
35 struct cxl
*adapter
= to_cxl_adapter(device
);
37 return scnprintf(buf
, PAGE_SIZE
, "%i\n", adapter
->psl_rev
);
40 static ssize_t
base_image_show(struct device
*device
,
41 struct device_attribute
*attr
,
44 struct cxl
*adapter
= to_cxl_adapter(device
);
46 return scnprintf(buf
, PAGE_SIZE
, "%i\n", adapter
->base_image
);
49 static ssize_t
image_loaded_show(struct device
*device
,
50 struct device_attribute
*attr
,
53 struct cxl
*adapter
= to_cxl_adapter(device
);
55 if (adapter
->user_image_loaded
)
56 return scnprintf(buf
, PAGE_SIZE
, "user\n");
57 return scnprintf(buf
, PAGE_SIZE
, "factory\n");
60 static ssize_t
psl_timebase_synced_show(struct device
*device
,
61 struct device_attribute
*attr
,
64 struct cxl
*adapter
= to_cxl_adapter(device
);
66 return scnprintf(buf
, PAGE_SIZE
, "%i\n", adapter
->psl_timebase_synced
);
69 static ssize_t
reset_adapter_store(struct device
*device
,
70 struct device_attribute
*attr
,
71 const char *buf
, size_t count
)
73 struct cxl
*adapter
= to_cxl_adapter(device
);
77 rc
= sscanf(buf
, "%i", &val
);
78 if ((rc
!= 1) || (val
!= 1 && val
!= -1))
82 * See if we can lock the context mapping that's only allowed
83 * when there are no contexts attached to the adapter. Once
84 * taken this will also prevent any context from getting activated.
87 rc
= cxl_adapter_context_lock(adapter
);
91 rc
= cxl_ops
->adapter_reset(adapter
);
92 /* In case reset failed release context lock */
94 cxl_adapter_context_unlock(adapter
);
96 } else if (val
== -1) {
97 /* Perform a forced adapter reset */
98 rc
= cxl_ops
->adapter_reset(adapter
);
102 return rc
? rc
: count
;
105 static ssize_t
load_image_on_perst_show(struct device
*device
,
106 struct device_attribute
*attr
,
109 struct cxl
*adapter
= to_cxl_adapter(device
);
111 if (!adapter
->perst_loads_image
)
112 return scnprintf(buf
, PAGE_SIZE
, "none\n");
114 if (adapter
->perst_select_user
)
115 return scnprintf(buf
, PAGE_SIZE
, "user\n");
116 return scnprintf(buf
, PAGE_SIZE
, "factory\n");
119 static ssize_t
load_image_on_perst_store(struct device
*device
,
120 struct device_attribute
*attr
,
121 const char *buf
, size_t count
)
123 struct cxl
*adapter
= to_cxl_adapter(device
);
126 if (!strncmp(buf
, "none", 4))
127 adapter
->perst_loads_image
= false;
128 else if (!strncmp(buf
, "user", 4)) {
129 adapter
->perst_select_user
= true;
130 adapter
->perst_loads_image
= true;
131 } else if (!strncmp(buf
, "factory", 7)) {
132 adapter
->perst_select_user
= false;
133 adapter
->perst_loads_image
= true;
137 if ((rc
= cxl_update_image_control(adapter
)))
143 static ssize_t
perst_reloads_same_image_show(struct device
*device
,
144 struct device_attribute
*attr
,
147 struct cxl
*adapter
= to_cxl_adapter(device
);
149 return scnprintf(buf
, PAGE_SIZE
, "%i\n", adapter
->perst_same_image
);
152 static ssize_t
perst_reloads_same_image_store(struct device
*device
,
153 struct device_attribute
*attr
,
154 const char *buf
, size_t count
)
156 struct cxl
*adapter
= to_cxl_adapter(device
);
160 rc
= sscanf(buf
, "%i", &val
);
161 if ((rc
!= 1) || !(val
== 1 || val
== 0))
164 adapter
->perst_same_image
= (val
== 1 ? true : false);
168 static struct device_attribute adapter_attrs
[] = {
169 __ATTR_RO(caia_version
),
170 __ATTR_RO(psl_revision
),
171 __ATTR_RO(base_image
),
172 __ATTR_RO(image_loaded
),
173 __ATTR_RO(psl_timebase_synced
),
174 __ATTR_RW(load_image_on_perst
),
175 __ATTR_RW(perst_reloads_same_image
),
176 __ATTR(reset
, S_IWUSR
, NULL
, reset_adapter_store
),
180 /********* AFU master specific attributes **********************************/
182 static ssize_t
mmio_size_show_master(struct device
*device
,
183 struct device_attribute
*attr
,
186 struct cxl_afu
*afu
= to_afu_chardev_m(device
);
188 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", afu
->adapter
->ps_size
);
191 static ssize_t
pp_mmio_off_show(struct device
*device
,
192 struct device_attribute
*attr
,
195 struct cxl_afu
*afu
= to_afu_chardev_m(device
);
197 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", afu
->native
->pp_offset
);
200 static ssize_t
pp_mmio_len_show(struct device
*device
,
201 struct device_attribute
*attr
,
204 struct cxl_afu
*afu
= to_afu_chardev_m(device
);
206 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", afu
->pp_size
);
209 static struct device_attribute afu_master_attrs
[] = {
210 __ATTR(mmio_size
, S_IRUGO
, mmio_size_show_master
, NULL
),
211 __ATTR_RO(pp_mmio_off
),
212 __ATTR_RO(pp_mmio_len
),
216 /********* AFU attributes **************************************************/
218 static ssize_t
mmio_size_show(struct device
*device
,
219 struct device_attribute
*attr
,
222 struct cxl_afu
*afu
= to_cxl_afu(device
);
225 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", afu
->pp_size
);
226 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", afu
->adapter
->ps_size
);
229 static ssize_t
reset_store_afu(struct device
*device
,
230 struct device_attribute
*attr
,
231 const char *buf
, size_t count
)
233 struct cxl_afu
*afu
= to_cxl_afu(device
);
236 /* Not safe to reset if it is currently in use */
237 mutex_lock(&afu
->contexts_lock
);
238 if (!idr_is_empty(&afu
->contexts_idr
)) {
243 if ((rc
= cxl_ops
->afu_reset(afu
)))
248 mutex_unlock(&afu
->contexts_lock
);
252 static ssize_t
irqs_min_show(struct device
*device
,
253 struct device_attribute
*attr
,
256 struct cxl_afu
*afu
= to_cxl_afu(device
);
258 return scnprintf(buf
, PAGE_SIZE
, "%i\n", afu
->pp_irqs
);
261 static ssize_t
irqs_max_show(struct device
*device
,
262 struct device_attribute
*attr
,
265 struct cxl_afu
*afu
= to_cxl_afu(device
);
267 return scnprintf(buf
, PAGE_SIZE
, "%i\n", afu
->irqs_max
);
270 static ssize_t
irqs_max_store(struct device
*device
,
271 struct device_attribute
*attr
,
272 const char *buf
, size_t count
)
274 struct cxl_afu
*afu
= to_cxl_afu(device
);
278 ret
= sscanf(buf
, "%i", &irqs_max
);
282 if (irqs_max
< afu
->pp_irqs
)
285 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
286 if (irqs_max
> afu
->adapter
->user_irqs
)
289 /* pHyp sets a per-AFU limit */
290 if (irqs_max
> afu
->guest
->max_ints
)
294 afu
->irqs_max
= irqs_max
;
298 static ssize_t
modes_supported_show(struct device
*device
,
299 struct device_attribute
*attr
, char *buf
)
301 struct cxl_afu
*afu
= to_cxl_afu(device
);
302 char *p
= buf
, *end
= buf
+ PAGE_SIZE
;
304 if (afu
->modes_supported
& CXL_MODE_DEDICATED
)
305 p
+= scnprintf(p
, end
- p
, "dedicated_process\n");
306 if (afu
->modes_supported
& CXL_MODE_DIRECTED
)
307 p
+= scnprintf(p
, end
- p
, "afu_directed\n");
311 static ssize_t
prefault_mode_show(struct device
*device
,
312 struct device_attribute
*attr
,
315 struct cxl_afu
*afu
= to_cxl_afu(device
);
317 switch (afu
->prefault_mode
) {
318 case CXL_PREFAULT_WED
:
319 return scnprintf(buf
, PAGE_SIZE
, "work_element_descriptor\n");
320 case CXL_PREFAULT_ALL
:
321 return scnprintf(buf
, PAGE_SIZE
, "all\n");
323 return scnprintf(buf
, PAGE_SIZE
, "none\n");
327 static ssize_t
prefault_mode_store(struct device
*device
,
328 struct device_attribute
*attr
,
329 const char *buf
, size_t count
)
331 struct cxl_afu
*afu
= to_cxl_afu(device
);
332 enum prefault_modes mode
= -1;
334 if (!strncmp(buf
, "work_element_descriptor", 23))
335 mode
= CXL_PREFAULT_WED
;
336 if (!strncmp(buf
, "all", 3))
337 mode
= CXL_PREFAULT_ALL
;
338 if (!strncmp(buf
, "none", 4))
339 mode
= CXL_PREFAULT_NONE
;
344 afu
->prefault_mode
= mode
;
348 static ssize_t
mode_show(struct device
*device
,
349 struct device_attribute
*attr
,
352 struct cxl_afu
*afu
= to_cxl_afu(device
);
354 if (afu
->current_mode
== CXL_MODE_DEDICATED
)
355 return scnprintf(buf
, PAGE_SIZE
, "dedicated_process\n");
356 if (afu
->current_mode
== CXL_MODE_DIRECTED
)
357 return scnprintf(buf
, PAGE_SIZE
, "afu_directed\n");
358 return scnprintf(buf
, PAGE_SIZE
, "none\n");
361 static ssize_t
mode_store(struct device
*device
, struct device_attribute
*attr
,
362 const char *buf
, size_t count
)
364 struct cxl_afu
*afu
= to_cxl_afu(device
);
365 int old_mode
, mode
= -1;
368 /* can't change this if we have a user */
369 mutex_lock(&afu
->contexts_lock
);
370 if (!idr_is_empty(&afu
->contexts_idr
))
373 if (!strncmp(buf
, "dedicated_process", 17))
374 mode
= CXL_MODE_DEDICATED
;
375 if (!strncmp(buf
, "afu_directed", 12))
376 mode
= CXL_MODE_DIRECTED
;
377 if (!strncmp(buf
, "none", 4))
386 * afu_deactivate_mode needs to be done outside the lock, prevent
387 * other contexts coming in before we are ready:
389 old_mode
= afu
->current_mode
;
390 afu
->current_mode
= 0;
393 mutex_unlock(&afu
->contexts_lock
);
395 if ((rc
= cxl_ops
->afu_deactivate_mode(afu
, old_mode
)))
397 if ((rc
= cxl_ops
->afu_activate_mode(afu
, mode
)))
402 mutex_unlock(&afu
->contexts_lock
);
406 static ssize_t
api_version_show(struct device
*device
,
407 struct device_attribute
*attr
,
410 return scnprintf(buf
, PAGE_SIZE
, "%i\n", CXL_API_VERSION
);
413 static ssize_t
api_version_compatible_show(struct device
*device
,
414 struct device_attribute
*attr
,
417 return scnprintf(buf
, PAGE_SIZE
, "%i\n", CXL_API_VERSION_COMPATIBLE
);
420 static ssize_t
afu_eb_read(struct file
*filp
, struct kobject
*kobj
,
421 struct bin_attribute
*bin_attr
, char *buf
,
422 loff_t off
, size_t count
)
424 struct cxl_afu
*afu
= to_cxl_afu(kobj_to_dev(kobj
));
426 return cxl_ops
->afu_read_err_buffer(afu
, buf
, off
, count
);
429 static struct device_attribute afu_attrs
[] = {
430 __ATTR_RO(mmio_size
),
433 __ATTR_RO(modes_supported
),
435 __ATTR_RW(prefault_mode
),
436 __ATTR_RO(api_version
),
437 __ATTR_RO(api_version_compatible
),
438 __ATTR(reset
, S_IWUSR
, NULL
, reset_store_afu
),
441 int cxl_sysfs_adapter_add(struct cxl
*adapter
)
443 struct device_attribute
*dev_attr
;
446 for (i
= 0; i
< ARRAY_SIZE(adapter_attrs
); i
++) {
447 dev_attr
= &adapter_attrs
[i
];
448 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
449 CXL_ADAPTER_ATTRS
)) {
450 if ((rc
= device_create_file(&adapter
->dev
, dev_attr
)))
456 for (i
--; i
>= 0; i
--) {
457 dev_attr
= &adapter_attrs
[i
];
458 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
460 device_remove_file(&adapter
->dev
, dev_attr
);
465 void cxl_sysfs_adapter_remove(struct cxl
*adapter
)
467 struct device_attribute
*dev_attr
;
470 for (i
= 0; i
< ARRAY_SIZE(adapter_attrs
); i
++) {
471 dev_attr
= &adapter_attrs
[i
];
472 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
474 device_remove_file(&adapter
->dev
, dev_attr
);
478 struct afu_config_record
{
480 struct bin_attribute config_attr
;
481 struct list_head list
;
488 #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
490 static ssize_t
vendor_show(struct kobject
*kobj
,
491 struct kobj_attribute
*attr
, char *buf
)
493 struct afu_config_record
*cr
= to_cr(kobj
);
495 return scnprintf(buf
, PAGE_SIZE
, "0x%.4x\n", cr
->vendor
);
498 static ssize_t
device_show(struct kobject
*kobj
,
499 struct kobj_attribute
*attr
, char *buf
)
501 struct afu_config_record
*cr
= to_cr(kobj
);
503 return scnprintf(buf
, PAGE_SIZE
, "0x%.4x\n", cr
->device
);
506 static ssize_t
class_show(struct kobject
*kobj
,
507 struct kobj_attribute
*attr
, char *buf
)
509 struct afu_config_record
*cr
= to_cr(kobj
);
511 return scnprintf(buf
, PAGE_SIZE
, "0x%.6x\n", cr
->class);
514 static ssize_t
afu_read_config(struct file
*filp
, struct kobject
*kobj
,
515 struct bin_attribute
*bin_attr
, char *buf
,
516 loff_t off
, size_t count
)
518 struct afu_config_record
*cr
= to_cr(kobj
);
519 struct cxl_afu
*afu
= to_cxl_afu(kobj_to_dev(kobj
->parent
));
523 for (i
= 0; i
< count
;) {
524 rc
= cxl_ops
->afu_cr_read64(afu
, cr
->cr
, off
& ~0x7, &val
);
527 for (j
= off
& 0x7; j
< 8 && i
< count
; i
++, j
++, off
++)
528 buf
[i
] = (val
>> (j
* 8)) & 0xff;
534 static struct kobj_attribute vendor_attribute
=
536 static struct kobj_attribute device_attribute
=
538 static struct kobj_attribute class_attribute
=
541 static struct attribute
*afu_cr_attrs
[] = {
542 &vendor_attribute
.attr
,
543 &device_attribute
.attr
,
544 &class_attribute
.attr
,
548 static void release_afu_config_record(struct kobject
*kobj
)
550 struct afu_config_record
*cr
= to_cr(kobj
);
555 static struct kobj_type afu_config_record_type
= {
556 .sysfs_ops
= &kobj_sysfs_ops
,
557 .release
= release_afu_config_record
,
558 .default_attrs
= afu_cr_attrs
,
561 static struct afu_config_record
*cxl_sysfs_afu_new_cr(struct cxl_afu
*afu
, int cr_idx
)
563 struct afu_config_record
*cr
;
566 cr
= kzalloc(sizeof(struct afu_config_record
), GFP_KERNEL
);
568 return ERR_PTR(-ENOMEM
);
572 rc
= cxl_ops
->afu_cr_read16(afu
, cr_idx
, PCI_DEVICE_ID
, &cr
->device
);
575 rc
= cxl_ops
->afu_cr_read16(afu
, cr_idx
, PCI_VENDOR_ID
, &cr
->vendor
);
578 rc
= cxl_ops
->afu_cr_read32(afu
, cr_idx
, PCI_CLASS_REVISION
, &cr
->class);
584 * Export raw AFU PCIe like config record. For now this is read only by
585 * root - we can expand that later to be readable by non-root and maybe
586 * even writable provided we have a good use-case. Once we support
587 * exposing AFUs through a virtual PHB they will get that for free from
588 * Linux' PCI infrastructure, but until then it's not clear that we
589 * need it for anything since the main use case is just identifying
590 * AFUs, which can be done via the vendor, device and class attributes.
592 sysfs_bin_attr_init(&cr
->config_attr
);
593 cr
->config_attr
.attr
.name
= "config";
594 cr
->config_attr
.attr
.mode
= S_IRUSR
;
595 cr
->config_attr
.size
= afu
->crs_len
;
596 cr
->config_attr
.read
= afu_read_config
;
598 rc
= kobject_init_and_add(&cr
->kobj
, &afu_config_record_type
,
599 &afu
->dev
.kobj
, "cr%i", cr
->cr
);
603 rc
= sysfs_create_bin_file(&cr
->kobj
, &cr
->config_attr
);
607 rc
= kobject_uevent(&cr
->kobj
, KOBJ_ADD
);
613 sysfs_remove_bin_file(&cr
->kobj
, &cr
->config_attr
);
615 kobject_put(&cr
->kobj
);
622 void cxl_sysfs_afu_remove(struct cxl_afu
*afu
)
624 struct device_attribute
*dev_attr
;
625 struct afu_config_record
*cr
, *tmp
;
628 /* remove the err buffer bin attribute */
630 device_remove_bin_file(&afu
->dev
, &afu
->attr_eb
);
632 for (i
= 0; i
< ARRAY_SIZE(afu_attrs
); i
++) {
633 dev_attr
= &afu_attrs
[i
];
634 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
636 device_remove_file(&afu
->dev
, &afu_attrs
[i
]);
639 list_for_each_entry_safe(cr
, tmp
, &afu
->crs
, list
) {
640 sysfs_remove_bin_file(&cr
->kobj
, &cr
->config_attr
);
641 kobject_put(&cr
->kobj
);
645 int cxl_sysfs_afu_add(struct cxl_afu
*afu
)
647 struct device_attribute
*dev_attr
;
648 struct afu_config_record
*cr
;
651 INIT_LIST_HEAD(&afu
->crs
);
653 for (i
= 0; i
< ARRAY_SIZE(afu_attrs
); i
++) {
654 dev_attr
= &afu_attrs
[i
];
655 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
657 if ((rc
= device_create_file(&afu
->dev
, &afu_attrs
[i
])))
662 /* conditionally create the add the binary file for error info buffer */
664 sysfs_attr_init(&afu
->attr_eb
.attr
);
666 afu
->attr_eb
.attr
.name
= "afu_err_buff";
667 afu
->attr_eb
.attr
.mode
= S_IRUGO
;
668 afu
->attr_eb
.size
= afu
->eb_len
;
669 afu
->attr_eb
.read
= afu_eb_read
;
671 rc
= device_create_bin_file(&afu
->dev
, &afu
->attr_eb
);
674 "Unable to create eb attr for the afu. Err(%d)\n",
680 for (i
= 0; i
< afu
->crs_num
; i
++) {
681 cr
= cxl_sysfs_afu_new_cr(afu
, i
);
686 list_add(&cr
->list
, &afu
->crs
);
692 cxl_sysfs_afu_remove(afu
);
695 /* reset the eb_len as we havent created the bin attr */
698 for (i
--; i
>= 0; i
--) {
699 dev_attr
= &afu_attrs
[i
];
700 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
702 device_remove_file(&afu
->dev
, &afu_attrs
[i
]);
707 int cxl_sysfs_afu_m_add(struct cxl_afu
*afu
)
709 struct device_attribute
*dev_attr
;
712 for (i
= 0; i
< ARRAY_SIZE(afu_master_attrs
); i
++) {
713 dev_attr
= &afu_master_attrs
[i
];
714 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
715 CXL_AFU_MASTER_ATTRS
)) {
716 if ((rc
= device_create_file(afu
->chardev_m
, &afu_master_attrs
[i
])))
724 for (i
--; i
>= 0; i
--) {
725 dev_attr
= &afu_master_attrs
[i
];
726 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
727 CXL_AFU_MASTER_ATTRS
))
728 device_remove_file(afu
->chardev_m
, &afu_master_attrs
[i
]);
733 void cxl_sysfs_afu_m_remove(struct cxl_afu
*afu
)
735 struct device_attribute
*dev_attr
;
738 for (i
= 0; i
< ARRAY_SIZE(afu_master_attrs
); i
++) {
739 dev_attr
= &afu_master_attrs
[i
];
740 if (cxl_ops
->support_attributes(dev_attr
->attr
.name
,
741 CXL_AFU_MASTER_ATTRS
))
742 device_remove_file(afu
->chardev_m
, &afu_master_attrs
[i
]);