1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Platform Monitory Technology Telemetry driver
5 * Copyright (c) 2020, Intel Corporation.
8 * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
11 #include <linux/kernel.h>
12 #include <linux/intel_vsec.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/module.h>
16 #include <linux/pci.h>
20 #define PMT_XA_START 1
21 #define PMT_XA_MAX INT_MAX
22 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
23 #define GUID_SPR_PUNIT 0x9956f43f
25 bool intel_pmt_is_early_client_hw(struct device
*dev
)
27 struct intel_vsec_device
*ivdev
= dev_to_ivdev(dev
);
30 * Early implementations of PMT on client platforms have some
31 * differences from the server platforms (which use the Out Of Band
32 * Management Services Module OOBMSM).
34 return !!(ivdev
->quirks
& VSEC_QUIRK_EARLY_HW
);
36 EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw
, "INTEL_PMT");
39 pmt_memcpy64_fromio(void *to
, const u64 __iomem
*from
, size_t count
)
44 if (!IS_ALIGNED((unsigned long)from
, 8))
47 for (i
= 0; i
< count
/8; i
++)
48 buf
[i
] = readq(&from
[i
]);
50 /* Copy any remaining bytes */
53 u64 tmp
= readq(&from
[i
]);
55 memcpy(&buf
[i
], &tmp
, remain
);
61 int pmt_telem_read_mmio(struct pci_dev
*pdev
, struct pmt_callbacks
*cb
, u32 guid
, void *buf
,
62 void __iomem
*addr
, loff_t off
, u32 count
)
64 if (cb
&& cb
->read_telem
)
65 return cb
->read_telem(pdev
, guid
, buf
, off
, count
);
69 if (guid
== GUID_SPR_PUNIT
)
70 /* PUNIT on SPR only supports aligned 64-bit read */
71 return pmt_memcpy64_fromio(buf
, addr
, count
);
73 memcpy_fromio(buf
, addr
, count
);
77 EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio
, "INTEL_PMT");
83 intel_pmt_read(struct file
*filp
, struct kobject
*kobj
,
84 struct bin_attribute
*attr
, char *buf
, loff_t off
,
87 struct intel_pmt_entry
*entry
= container_of(attr
,
88 struct intel_pmt_entry
,
94 if (off
>= entry
->size
)
97 if (count
> entry
->size
- off
)
98 count
= entry
->size
- off
;
100 count
= pmt_telem_read_mmio(entry
->ep
->pcidev
, entry
->cb
, entry
->header
.guid
, buf
,
101 entry
->base
, off
, count
);
107 intel_pmt_mmap(struct file
*filp
, struct kobject
*kobj
,
108 const struct bin_attribute
*attr
, struct vm_area_struct
*vma
)
110 struct intel_pmt_entry
*entry
= container_of(attr
,
111 struct intel_pmt_entry
,
113 unsigned long vsize
= vma
->vm_end
- vma
->vm_start
;
114 struct device
*dev
= kobj_to_dev(kobj
);
115 unsigned long phys
= entry
->base_addr
;
116 unsigned long pfn
= PFN_DOWN(phys
);
119 if (vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))
122 psize
= (PFN_UP(entry
->base_addr
+ entry
->size
) - pfn
) * PAGE_SIZE
;
124 dev_err(dev
, "Requested mmap size is too large\n");
128 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
129 if (io_remap_pfn_range(vma
, vma
->vm_start
, pfn
,
130 vsize
, vma
->vm_page_prot
))
137 guid_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
139 struct intel_pmt_entry
*entry
= dev_get_drvdata(dev
);
141 return sprintf(buf
, "0x%x\n", entry
->guid
);
143 static DEVICE_ATTR_RO(guid
);
145 static ssize_t
size_show(struct device
*dev
, struct device_attribute
*attr
,
148 struct intel_pmt_entry
*entry
= dev_get_drvdata(dev
);
150 return sprintf(buf
, "%zu\n", entry
->size
);
152 static DEVICE_ATTR_RO(size
);
155 offset_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
157 struct intel_pmt_entry
*entry
= dev_get_drvdata(dev
);
159 return sprintf(buf
, "%lu\n", offset_in_page(entry
->base_addr
));
161 static DEVICE_ATTR_RO(offset
);
163 static struct attribute
*intel_pmt_attrs
[] = {
166 &dev_attr_offset
.attr
,
169 ATTRIBUTE_GROUPS(intel_pmt
);
171 static struct class intel_pmt_class
= {
173 .dev_groups
= intel_pmt_groups
,
176 static int intel_pmt_populate_entry(struct intel_pmt_entry
*entry
,
177 struct intel_vsec_device
*ivdev
,
178 struct resource
*disc_res
)
180 struct pci_dev
*pci_dev
= ivdev
->pcidev
;
181 struct device
*dev
= &ivdev
->auxdev
.dev
;
182 struct intel_pmt_header
*header
= &entry
->header
;
186 * The base offset should always be 8 byte aligned.
188 * For non-local access types the lower 3 bits of base offset
189 * contains the index of the base address register where the
190 * telemetry can be found.
192 bir
= GET_BIR(header
->base_offset
);
194 /* Local access and BARID only for now */
195 switch (header
->access_type
) {
199 "Unsupported BAR index %d for access type %d\n",
200 bir
, header
->access_type
);
204 * For access_type LOCAL, the base address is as follows:
205 * base address = end of discovery region + base offset
207 entry
->base_addr
= disc_res
->end
+ 1 + header
->base_offset
;
210 * Some hardware use a different calculation for the base address
211 * when access_type == ACCESS_LOCAL. On the these systems
212 * ACCESS_LOCAL refers to an address in the same BAR as the
213 * header but at a fixed offset. But as the header address was
214 * supplied to the driver, we don't know which BAR it was in.
215 * So search for the bar whose range includes the header address.
217 if (intel_pmt_is_early_client_hw(dev
)) {
220 entry
->base_addr
= 0;
221 for (i
= 0; i
< 6; i
++)
222 if (disc_res
->start
>= pci_resource_start(pci_dev
, i
) &&
223 (disc_res
->start
<= pci_resource_end(pci_dev
, i
))) {
224 entry
->base_addr
= pci_resource_start(pci_dev
, i
) +
228 if (!entry
->base_addr
)
234 /* Use the provided base address if it exists */
235 if (ivdev
->base_addr
) {
236 entry
->base_addr
= ivdev
->base_addr
+
237 GET_ADDRESS(header
->base_offset
);
242 * If another BAR was specified then the base offset
243 * represents the offset within that BAR. SO retrieve the
244 * address from the parent PCI device and add offset.
246 entry
->base_addr
= pci_resource_start(pci_dev
, bir
) +
247 GET_ADDRESS(header
->base_offset
);
250 dev_err(dev
, "Unsupported access type %d\n",
251 header
->access_type
);
255 entry
->guid
= header
->guid
;
256 entry
->size
= header
->size
;
257 entry
->cb
= ivdev
->priv_data
;
262 static int intel_pmt_dev_register(struct intel_pmt_entry
*entry
,
263 struct intel_pmt_namespace
*ns
,
264 struct device
*parent
)
266 struct intel_vsec_device
*ivdev
= dev_to_ivdev(parent
);
267 struct resource res
= {0};
271 ret
= xa_alloc(ns
->xa
, &entry
->devid
, entry
, PMT_XA_LIMIT
, GFP_KERNEL
);
275 dev
= device_create(&intel_pmt_class
, parent
, MKDEV(0, 0), entry
,
276 "%s%d", ns
->name
, entry
->devid
);
279 dev_err(parent
, "Could not create %s%d device node\n",
280 ns
->name
, entry
->devid
);
282 goto fail_dev_create
;
285 entry
->kobj
= &dev
->kobj
;
288 ret
= sysfs_create_group(entry
->kobj
, ns
->attr_grp
);
290 goto fail_sysfs_create_group
;
293 /* if size is 0 assume no data buffer, so no file needed */
297 res
.start
= entry
->base_addr
;
298 res
.end
= res
.start
+ entry
->size
- 1;
299 res
.flags
= IORESOURCE_MEM
;
301 entry
->base
= devm_ioremap_resource(dev
, &res
);
302 if (IS_ERR(entry
->base
)) {
303 ret
= PTR_ERR(entry
->base
);
307 sysfs_bin_attr_init(&entry
->pmt_bin_attr
);
308 entry
->pmt_bin_attr
.attr
.name
= ns
->name
;
309 entry
->pmt_bin_attr
.attr
.mode
= 0440;
310 entry
->pmt_bin_attr
.mmap
= intel_pmt_mmap
;
311 entry
->pmt_bin_attr
.read
= intel_pmt_read
;
312 entry
->pmt_bin_attr
.size
= entry
->size
;
314 ret
= sysfs_create_bin_file(&dev
->kobj
, &entry
->pmt_bin_attr
);
318 if (ns
->pmt_add_endpoint
) {
319 ret
= ns
->pmt_add_endpoint(ivdev
, entry
);
321 goto fail_add_endpoint
;
327 sysfs_remove_bin_file(entry
->kobj
, &entry
->pmt_bin_attr
);
330 sysfs_remove_group(entry
->kobj
, ns
->attr_grp
);
331 fail_sysfs_create_group
:
332 device_unregister(dev
);
334 xa_erase(ns
->xa
, entry
->devid
);
339 int intel_pmt_dev_create(struct intel_pmt_entry
*entry
, struct intel_pmt_namespace
*ns
,
340 struct intel_vsec_device
*intel_vsec_dev
, int idx
)
342 struct device
*dev
= &intel_vsec_dev
->auxdev
.dev
;
343 struct resource
*disc_res
;
346 disc_res
= &intel_vsec_dev
->resource
[idx
];
348 entry
->disc_table
= devm_ioremap_resource(dev
, disc_res
);
349 if (IS_ERR(entry
->disc_table
))
350 return PTR_ERR(entry
->disc_table
);
352 ret
= ns
->pmt_header_decode(entry
, dev
);
356 ret
= intel_pmt_populate_entry(entry
, intel_vsec_dev
, disc_res
);
360 return intel_pmt_dev_register(entry
, ns
, dev
);
362 EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create
, "INTEL_PMT");
364 void intel_pmt_dev_destroy(struct intel_pmt_entry
*entry
,
365 struct intel_pmt_namespace
*ns
)
367 struct device
*dev
= kobj_to_dev(entry
->kobj
);
370 sysfs_remove_bin_file(entry
->kobj
, &entry
->pmt_bin_attr
);
373 sysfs_remove_group(entry
->kobj
, ns
->attr_grp
);
375 device_unregister(dev
);
376 xa_erase(ns
->xa
, entry
->devid
);
378 EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy
, "INTEL_PMT");
380 static int __init
pmt_class_init(void)
382 return class_register(&intel_pmt_class
);
385 static void __exit
pmt_class_exit(void)
387 class_unregister(&intel_pmt_class
);
390 module_init(pmt_class_init
);
391 module_exit(pmt_class_exit
);
393 MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
394 MODULE_DESCRIPTION("Intel PMT Class driver");
395 MODULE_LICENSE("GPL v2");