1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Vendor Specific Extended Capabilities auxiliary bus driver
5 * Copyright (c) 2021, Intel Corporation.
8 * Author: David E. Box <david.e.box@linux.intel.com>
10 * This driver discovers and creates auxiliary devices for Intel defined PCIe
11 * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
12 * VSEC and DVSEC respectively. The driver supports features on specific PCIe
13 * endpoints that exist primarily to expose them.
16 #include <linux/auxiliary_bus.h>
17 #include <linux/bits.h>
18 #include <linux/cleanup.h>
19 #include <linux/delay.h>
20 #include <linux/idr.h>
21 #include <linux/intel_vsec.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/types.h>
27 #define PMT_XA_START 0
28 #define PMT_XA_MAX INT_MAX
29 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
31 static DEFINE_IDA(intel_vsec_ida
);
32 static DEFINE_IDA(intel_vsec_sdsi_ida
);
33 static DEFINE_XARRAY_ALLOC(auxdev_array
);
35 static const char *intel_vsec_name(enum intel_vsec_id id
)
38 case VSEC_ID_TELEMETRY
:
44 case VSEC_ID_CRASHLOG
:
58 static bool intel_vsec_supported(u16 id
, unsigned long caps
)
61 case VSEC_ID_TELEMETRY
:
62 return !!(caps
& VSEC_CAP_TELEMETRY
);
64 return !!(caps
& VSEC_CAP_WATCHER
);
65 case VSEC_ID_CRASHLOG
:
66 return !!(caps
& VSEC_CAP_CRASHLOG
);
68 return !!(caps
& VSEC_CAP_SDSI
);
70 return !!(caps
& VSEC_CAP_TPMI
);
76 static void intel_vsec_remove_aux(void *data
)
78 auxiliary_device_delete(data
);
79 auxiliary_device_uninit(data
);
82 static void intel_vsec_dev_release(struct device
*dev
)
84 struct intel_vsec_device
*intel_vsec_dev
= dev_to_ivdev(dev
);
86 xa_erase(&auxdev_array
, intel_vsec_dev
->id
);
88 ida_free(intel_vsec_dev
->ida
, intel_vsec_dev
->auxdev
.id
);
90 kfree(intel_vsec_dev
->resource
);
91 kfree(intel_vsec_dev
);
94 int intel_vsec_add_aux(struct pci_dev
*pdev
, struct device
*parent
,
95 struct intel_vsec_device
*intel_vsec_dev
,
98 struct auxiliary_device
*auxdev
= &intel_vsec_dev
->auxdev
;
104 ret
= xa_alloc(&auxdev_array
, &intel_vsec_dev
->id
, intel_vsec_dev
,
105 PMT_XA_LIMIT
, GFP_KERNEL
);
107 kfree(intel_vsec_dev
->resource
);
108 kfree(intel_vsec_dev
);
112 id
= ida_alloc(intel_vsec_dev
->ida
, GFP_KERNEL
);
114 xa_erase(&auxdev_array
, intel_vsec_dev
->id
);
115 kfree(intel_vsec_dev
->resource
);
116 kfree(intel_vsec_dev
);
122 auxdev
->dev
.parent
= parent
;
123 auxdev
->dev
.release
= intel_vsec_dev_release
;
125 ret
= auxiliary_device_init(auxdev
);
127 intel_vsec_dev_release(&auxdev
->dev
);
131 ret
= auxiliary_device_add(auxdev
);
133 auxiliary_device_uninit(auxdev
);
137 return devm_add_action_or_reset(parent
, intel_vsec_remove_aux
,
140 EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux
, "INTEL_VSEC");
142 static int intel_vsec_add_dev(struct pci_dev
*pdev
, struct intel_vsec_header
*header
,
143 struct intel_vsec_platform_info
*info
)
145 struct intel_vsec_device
__free(kfree
) *intel_vsec_dev
= NULL
;
146 struct resource
__free(kfree
) *res
= NULL
;
147 struct resource
*tmp
;
148 struct device
*parent
;
149 unsigned long quirks
= info
->quirks
;
154 parent
= info
->parent
;
158 if (!intel_vsec_supported(header
->id
, info
->caps
))
161 if (!header
->num_entries
) {
162 dev_dbg(&pdev
->dev
, "Invalid 0 entry count for header id %d\n", header
->id
);
166 if (!header
->entry_size
) {
167 dev_dbg(&pdev
->dev
, "Invalid 0 entry size for header id %d\n", header
->id
);
171 intel_vsec_dev
= kzalloc(sizeof(*intel_vsec_dev
), GFP_KERNEL
);
175 res
= kcalloc(header
->num_entries
, sizeof(*res
), GFP_KERNEL
);
179 if (quirks
& VSEC_QUIRK_TABLE_SHIFT
)
180 header
->offset
>>= TABLE_OFFSET_SHIFT
;
183 base_addr
= info
->base_addr
;
185 base_addr
= pdev
->resource
[header
->tbir
].start
;
188 * The DVSEC/VSEC contains the starting offset and count for a block of
189 * discovery tables. Create a resource array of these tables to the
190 * auxiliary device driver.
192 for (i
= 0, tmp
= res
; i
< header
->num_entries
; i
++, tmp
++) {
193 tmp
->start
= base_addr
+ header
->offset
+ i
* (header
->entry_size
* sizeof(u32
));
194 tmp
->end
= tmp
->start
+ (header
->entry_size
* sizeof(u32
)) - 1;
195 tmp
->flags
= IORESOURCE_MEM
;
197 /* Check resource is not in use */
198 if (!request_mem_region(tmp
->start
, resource_size(tmp
), ""))
201 release_mem_region(tmp
->start
, resource_size(tmp
));
204 intel_vsec_dev
->pcidev
= pdev
;
205 intel_vsec_dev
->resource
= no_free_ptr(res
);
206 intel_vsec_dev
->num_resources
= header
->num_entries
;
207 intel_vsec_dev
->quirks
= info
->quirks
;
208 intel_vsec_dev
->base_addr
= info
->base_addr
;
209 intel_vsec_dev
->priv_data
= info
->priv_data
;
211 if (header
->id
== VSEC_ID_SDSI
)
212 intel_vsec_dev
->ida
= &intel_vsec_sdsi_ida
;
214 intel_vsec_dev
->ida
= &intel_vsec_ida
;
217 * Pass the ownership of intel_vsec_dev and resource within it to
218 * intel_vsec_add_aux()
220 return intel_vsec_add_aux(pdev
, parent
, no_free_ptr(intel_vsec_dev
),
221 intel_vsec_name(header
->id
));
224 static bool intel_vsec_walk_header(struct pci_dev
*pdev
,
225 struct intel_vsec_platform_info
*info
)
227 struct intel_vsec_header
**header
= info
->headers
;
228 bool have_devices
= false;
231 for ( ; *header
; header
++) {
232 ret
= intel_vsec_add_dev(pdev
, *header
, info
);
240 static bool intel_vsec_walk_dvsec(struct pci_dev
*pdev
,
241 struct intel_vsec_platform_info
*info
)
243 bool have_devices
= false;
247 struct intel_vsec_header header
;
252 pos
= pci_find_next_ext_capability(pdev
, pos
, PCI_EXT_CAP_ID_DVSEC
);
256 pci_read_config_dword(pdev
, pos
+ PCI_DVSEC_HEADER1
, &hdr
);
257 vid
= PCI_DVSEC_HEADER1_VID(hdr
);
258 if (vid
!= PCI_VENDOR_ID_INTEL
)
261 /* Support only revision 1 */
262 header
.rev
= PCI_DVSEC_HEADER1_REV(hdr
);
263 if (header
.rev
!= 1) {
264 dev_info(&pdev
->dev
, "Unsupported DVSEC revision %d\n", header
.rev
);
268 header
.length
= PCI_DVSEC_HEADER1_LEN(hdr
);
270 pci_read_config_byte(pdev
, pos
+ INTEL_DVSEC_ENTRIES
, &header
.num_entries
);
271 pci_read_config_byte(pdev
, pos
+ INTEL_DVSEC_SIZE
, &header
.entry_size
);
272 pci_read_config_dword(pdev
, pos
+ INTEL_DVSEC_TABLE
, &table
);
274 header
.tbir
= INTEL_DVSEC_TABLE_BAR(table
);
275 header
.offset
= INTEL_DVSEC_TABLE_OFFSET(table
);
277 pci_read_config_dword(pdev
, pos
+ PCI_DVSEC_HEADER2
, &hdr
);
278 header
.id
= PCI_DVSEC_HEADER2_ID(hdr
);
280 ret
= intel_vsec_add_dev(pdev
, &header
, info
);
290 static bool intel_vsec_walk_vsec(struct pci_dev
*pdev
,
291 struct intel_vsec_platform_info
*info
)
293 bool have_devices
= false;
297 struct intel_vsec_header header
;
301 pos
= pci_find_next_ext_capability(pdev
, pos
, PCI_EXT_CAP_ID_VNDR
);
305 pci_read_config_dword(pdev
, pos
+ PCI_VNDR_HEADER
, &hdr
);
307 /* Support only revision 1 */
308 header
.rev
= PCI_VNDR_HEADER_REV(hdr
);
309 if (header
.rev
!= 1) {
310 dev_info(&pdev
->dev
, "Unsupported VSEC revision %d\n", header
.rev
);
314 header
.id
= PCI_VNDR_HEADER_ID(hdr
);
315 header
.length
= PCI_VNDR_HEADER_LEN(hdr
);
317 /* entry, size, and table offset are the same as DVSEC */
318 pci_read_config_byte(pdev
, pos
+ INTEL_DVSEC_ENTRIES
, &header
.num_entries
);
319 pci_read_config_byte(pdev
, pos
+ INTEL_DVSEC_SIZE
, &header
.entry_size
);
320 pci_read_config_dword(pdev
, pos
+ INTEL_DVSEC_TABLE
, &table
);
322 header
.tbir
= INTEL_DVSEC_TABLE_BAR(table
);
323 header
.offset
= INTEL_DVSEC_TABLE_OFFSET(table
);
325 ret
= intel_vsec_add_dev(pdev
, &header
, info
);
335 void intel_vsec_register(struct pci_dev
*pdev
,
336 struct intel_vsec_platform_info
*info
)
338 if (!pdev
|| !info
|| !info
->headers
)
341 intel_vsec_walk_header(pdev
, info
);
343 EXPORT_SYMBOL_NS_GPL(intel_vsec_register
, "INTEL_VSEC");
345 static int intel_vsec_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
347 struct intel_vsec_platform_info
*info
;
348 bool have_devices
= false;
351 ret
= pcim_enable_device(pdev
);
355 pci_save_state(pdev
);
356 info
= (struct intel_vsec_platform_info
*)id
->driver_data
;
360 if (intel_vsec_walk_dvsec(pdev
, info
))
363 if (intel_vsec_walk_vsec(pdev
, info
))
366 if (info
&& (info
->quirks
& VSEC_QUIRK_NO_DVSEC
) &&
367 intel_vsec_walk_header(pdev
, info
))
377 static struct intel_vsec_header dg1_header
= {
386 static struct intel_vsec_header
*dg1_headers
[] = {
391 static const struct intel_vsec_platform_info dg1_info
= {
392 .caps
= VSEC_CAP_TELEMETRY
,
393 .headers
= dg1_headers
,
394 .quirks
= VSEC_QUIRK_NO_DVSEC
| VSEC_QUIRK_EARLY_HW
,
398 static const struct intel_vsec_platform_info mtl_info
= {
399 .caps
= VSEC_CAP_TELEMETRY
,
403 static const struct intel_vsec_platform_info oobmsm_info
= {
404 .caps
= VSEC_CAP_TELEMETRY
| VSEC_CAP_SDSI
| VSEC_CAP_TPMI
,
408 static const struct intel_vsec_platform_info tgl_info
= {
409 .caps
= VSEC_CAP_TELEMETRY
,
410 .quirks
= VSEC_QUIRK_TABLE_SHIFT
| VSEC_QUIRK_EARLY_HW
,
414 static const struct intel_vsec_platform_info lnl_info
= {
415 .caps
= VSEC_CAP_TELEMETRY
| VSEC_CAP_WATCHER
,
418 #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
419 #define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
420 #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
421 #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
422 #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
423 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
424 #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
425 #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
426 static const struct pci_device_id intel_vsec_pci_ids
[] = {
427 { PCI_DEVICE_DATA(INTEL
, VSEC_ADL
, &tgl_info
) },
428 { PCI_DEVICE_DATA(INTEL
, VSEC_DG1
, &dg1_info
) },
429 { PCI_DEVICE_DATA(INTEL
, VSEC_MTL_M
, &mtl_info
) },
430 { PCI_DEVICE_DATA(INTEL
, VSEC_MTL_S
, &mtl_info
) },
431 { PCI_DEVICE_DATA(INTEL
, VSEC_OOBMSM
, &oobmsm_info
) },
432 { PCI_DEVICE_DATA(INTEL
, VSEC_RPL
, &tgl_info
) },
433 { PCI_DEVICE_DATA(INTEL
, VSEC_TGL
, &tgl_info
) },
434 { PCI_DEVICE_DATA(INTEL
, VSEC_LNL_M
, &lnl_info
) },
437 MODULE_DEVICE_TABLE(pci
, intel_vsec_pci_ids
);
439 static pci_ers_result_t
intel_vsec_pci_error_detected(struct pci_dev
*pdev
,
440 pci_channel_state_t state
)
442 pci_ers_result_t status
= PCI_ERS_RESULT_NEED_RESET
;
444 dev_info(&pdev
->dev
, "PCI error detected, state %d", state
);
446 if (state
== pci_channel_io_perm_failure
)
447 status
= PCI_ERS_RESULT_DISCONNECT
;
449 pci_disable_device(pdev
);
454 static pci_ers_result_t
intel_vsec_pci_slot_reset(struct pci_dev
*pdev
)
456 struct intel_vsec_device
*intel_vsec_dev
;
457 pci_ers_result_t status
= PCI_ERS_RESULT_DISCONNECT
;
458 const struct pci_device_id
*pci_dev_id
;
461 dev_info(&pdev
->dev
, "Resetting PCI slot\n");
464 if (pci_enable_device(pdev
)) {
466 "Failed to re-enable PCI device after reset.\n");
470 status
= PCI_ERS_RESULT_RECOVERED
;
472 xa_for_each(&auxdev_array
, index
, intel_vsec_dev
) {
473 /* check if pdev doesn't match */
474 if (pdev
!= intel_vsec_dev
->pcidev
)
476 devm_release_action(&pdev
->dev
, intel_vsec_remove_aux
,
477 &intel_vsec_dev
->auxdev
);
479 pci_disable_device(pdev
);
480 pci_restore_state(pdev
);
481 pci_dev_id
= pci_match_id(intel_vsec_pci_ids
, pdev
);
482 intel_vsec_pci_probe(pdev
, pci_dev_id
);
488 static void intel_vsec_pci_resume(struct pci_dev
*pdev
)
490 dev_info(&pdev
->dev
, "Done resuming PCI device\n");
493 static const struct pci_error_handlers intel_vsec_pci_err_handlers
= {
494 .error_detected
= intel_vsec_pci_error_detected
,
495 .slot_reset
= intel_vsec_pci_slot_reset
,
496 .resume
= intel_vsec_pci_resume
,
499 static struct pci_driver intel_vsec_pci_driver
= {
500 .name
= "intel_vsec",
501 .id_table
= intel_vsec_pci_ids
,
502 .probe
= intel_vsec_pci_probe
,
503 .err_handler
= &intel_vsec_pci_err_handlers
,
505 module_pci_driver(intel_vsec_pci_driver
);
507 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
508 MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
509 MODULE_LICENSE("GPL v2");