1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/device.h>
5 #include <linux/slab.h>
16 * CXL device capabilities are enumerated by PCI DVSEC (Designated
17 * Vendor-specific) and / or descriptors provided by platform firmware.
18 * They can be defined as a set like the device and component registers
19 * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
20 * Extended Capabilities, or they can be individual capabilities
21 * appended to bridged and endpoint devices.
23 * Provide common infrastructure for enumerating and mapping these
24 * discrete capabilities.
28 * cxl_probe_component_regs() - Detect CXL Component register blocks
29 * @dev: Host device of the @base mapping
30 * @base: Mapping containing the HDM Decoder Capability Header
31 * @map: Map object describing the register block information found
33 * See CXL 2.0 8.2.4 Component Register Layout and Definition
34 * See CXL 2.0 8.2.5.5 CXL Device Register Interface
36 * Probe for component register information and return it in map object.
38 void cxl_probe_component_regs(struct device
*dev
, void __iomem
*base
,
39 struct cxl_component_reg_map
*map
)
44 *map
= (struct cxl_component_reg_map
) { 0 };
47 * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
48 * CXL 2.0 8.2.4 Table 141.
50 base
+= CXL_CM_OFFSET
;
52 cap_array
= readl(base
+ CXL_CM_CAP_HDR_OFFSET
);
54 if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK
, cap_array
) != CM_CAP_HDR_CAP_ID
) {
56 "Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
60 /* It's assumed that future versions will be backward compatible */
61 cap_count
= FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK
, cap_array
);
63 for (cap
= 1; cap
<= cap_count
; cap
++) {
64 void __iomem
*register_block
;
65 struct cxl_reg_map
*rmap
;
69 hdr
= readl(base
+ cap
* 0x4);
71 cap_id
= FIELD_GET(CXL_CM_CAP_HDR_ID_MASK
, hdr
);
72 offset
= FIELD_GET(CXL_CM_CAP_PTR_MASK
, hdr
);
73 register_block
= base
+ offset
;
74 hdr
= readl(register_block
);
78 case CXL_CM_CAP_CAP_ID_HDM
: {
81 dev_dbg(dev
, "found HDM decoder capability (0x%x)\n",
84 decoder_cnt
= cxl_hdm_decoder_count(hdr
);
85 length
= 0x20 * decoder_cnt
+ 0x10;
86 rmap
= &map
->hdm_decoder
;
89 case CXL_CM_CAP_CAP_ID_RAS
:
90 dev_dbg(dev
, "found RAS capability (0x%x)\n",
92 length
= CXL_RAS_CAPABILITY_LENGTH
;
96 dev_dbg(dev
, "Unknown CM cap ID: %d (0x%x)\n", cap_id
,
105 rmap
->offset
= CXL_CM_OFFSET
+ offset
;
109 EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs
, CXL
);
112 * cxl_probe_device_regs() - Detect CXL Device register blocks
113 * @dev: Host device of the @base mapping
114 * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
115 * @map: Map object describing the register block information found
117 * Probe for device register information and return it in map object.
119 void cxl_probe_device_regs(struct device
*dev
, void __iomem
*base
,
120 struct cxl_device_reg_map
*map
)
125 *map
= (struct cxl_device_reg_map
){ 0 };
127 cap_array
= readq(base
+ CXLDEV_CAP_ARRAY_OFFSET
);
128 if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK
, cap_array
) !=
129 CXLDEV_CAP_ARRAY_CAP_ID
)
132 cap_count
= FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK
, cap_array
);
134 for (cap
= 1; cap
<= cap_count
; cap
++) {
135 struct cxl_reg_map
*rmap
;
139 cap_id
= FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK
,
140 readl(base
+ cap
* 0x10));
141 offset
= readl(base
+ cap
* 0x10 + 0x4);
142 length
= readl(base
+ cap
* 0x10 + 0x8);
146 case CXLDEV_CAP_CAP_ID_DEVICE_STATUS
:
147 dev_dbg(dev
, "found Status capability (0x%x)\n", offset
);
150 case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX
:
151 dev_dbg(dev
, "found Mailbox capability (0x%x)\n", offset
);
154 case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX
:
155 dev_dbg(dev
, "found Secondary Mailbox capability (0x%x)\n", offset
);
157 case CXLDEV_CAP_CAP_ID_MEMDEV
:
158 dev_dbg(dev
, "found Memory Device capability (0x%x)\n", offset
);
162 if (cap_id
>= 0x8000)
163 dev_dbg(dev
, "Vendor cap ID: %#x offset: %#x\n", cap_id
, offset
);
165 dev_dbg(dev
, "Unknown cap ID: %#x offset: %#x\n", cap_id
, offset
);
173 rmap
->offset
= offset
;
177 EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs
, CXL
);
179 void __iomem
*devm_cxl_iomap_block(struct device
*dev
, resource_size_t addr
,
180 resource_size_t length
)
182 void __iomem
*ret_val
;
183 struct resource
*res
;
185 if (WARN_ON_ONCE(addr
== CXL_RESOURCE_NONE
))
188 res
= devm_request_mem_region(dev
, addr
, length
, dev_name(dev
));
190 resource_size_t end
= addr
+ length
- 1;
192 dev_err(dev
, "Failed to request region %pa-%pa\n", &addr
, &end
);
196 ret_val
= devm_ioremap(dev
, addr
, length
);
198 dev_err(dev
, "Failed to map region %pr\n", res
);
203 int cxl_map_component_regs(const struct cxl_register_map
*map
,
204 struct cxl_component_regs
*regs
,
205 unsigned long map_mask
)
207 struct device
*host
= map
->host
;
209 const struct cxl_reg_map
*rmap
;
212 { &map
->component_map
.hdm_decoder
, ®s
->hdm_decoder
},
213 { &map
->component_map
.ras
, ®s
->ras
},
217 for (i
= 0; i
< ARRAY_SIZE(mapinfo
); i
++) {
218 struct mapinfo
*mi
= &mapinfo
[i
];
219 resource_size_t addr
;
220 resource_size_t length
;
222 if (!mi
->rmap
->valid
)
224 if (!test_bit(mi
->rmap
->id
, &map_mask
))
226 addr
= map
->resource
+ mi
->rmap
->offset
;
227 length
= mi
->rmap
->size
;
228 *(mi
->addr
) = devm_cxl_iomap_block(host
, addr
, length
);
235 EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs
, CXL
);
237 int cxl_map_device_regs(const struct cxl_register_map
*map
,
238 struct cxl_device_regs
*regs
)
240 struct device
*host
= map
->host
;
241 resource_size_t phys_addr
= map
->resource
;
243 const struct cxl_reg_map
*rmap
;
246 { &map
->device_map
.status
, ®s
->status
, },
247 { &map
->device_map
.mbox
, ®s
->mbox
, },
248 { &map
->device_map
.memdev
, ®s
->memdev
, },
252 for (i
= 0; i
< ARRAY_SIZE(mapinfo
); i
++) {
253 struct mapinfo
*mi
= &mapinfo
[i
];
254 resource_size_t length
;
255 resource_size_t addr
;
257 if (!mi
->rmap
->valid
)
260 addr
= phys_addr
+ mi
->rmap
->offset
;
261 length
= mi
->rmap
->size
;
262 *(mi
->addr
) = devm_cxl_iomap_block(host
, addr
, length
);
269 EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs
, CXL
);
271 static bool cxl_decode_regblock(struct pci_dev
*pdev
, u32 reg_lo
, u32 reg_hi
,
272 struct cxl_register_map
*map
)
274 u8 reg_type
= FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK
, reg_lo
);
275 int bar
= FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK
, reg_lo
);
276 u64 offset
= ((u64
)reg_hi
<< 32) |
277 (reg_lo
& CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK
);
279 if (offset
> pci_resource_len(pdev
, bar
)) {
281 "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar
,
282 &pdev
->resource
[bar
], &offset
, reg_type
);
286 map
->reg_type
= reg_type
;
287 map
->resource
= pci_resource_start(pdev
, bar
) + offset
;
288 map
->max_size
= pci_resource_len(pdev
, bar
) - offset
;
293 * cxl_find_regblock_instance() - Locate a register block by type / index
294 * @pdev: The CXL PCI device to enumerate.
295 * @type: Register Block Indicator id
296 * @map: Enumeration output, clobbered on error
297 * @index: Index into which particular instance of a regblock wanted in the
298 * order found in register locator DVSEC.
300 * Return: 0 if register block enumerated, negative error code otherwise
302 * A CXL DVSEC may point to one or more register blocks, search for them
303 * by @type and @index.
305 int cxl_find_regblock_instance(struct pci_dev
*pdev
, enum cxl_regloc_type type
,
306 struct cxl_register_map
*map
, int index
)
308 u32 regloc_size
, regblocks
;
312 *map
= (struct cxl_register_map
) {
314 .resource
= CXL_RESOURCE_NONE
,
317 regloc
= pci_find_dvsec_capability(pdev
, PCI_VENDOR_ID_CXL
,
318 CXL_DVSEC_REG_LOCATOR
);
322 pci_read_config_dword(pdev
, regloc
+ PCI_DVSEC_HEADER1
, ®loc_size
);
323 regloc_size
= FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK
, regloc_size
);
325 regloc
+= CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET
;
326 regblocks
= (regloc_size
- CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET
) / 8;
328 for (i
= 0; i
< regblocks
; i
++, regloc
+= 8) {
331 pci_read_config_dword(pdev
, regloc
, ®_lo
);
332 pci_read_config_dword(pdev
, regloc
+ 4, ®_hi
);
334 if (!cxl_decode_regblock(pdev
, reg_lo
, reg_hi
, map
))
337 if (map
->reg_type
== type
) {
338 if (index
== instance
)
344 map
->resource
= CXL_RESOURCE_NONE
;
347 EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance
, CXL
);
350 * cxl_find_regblock() - Locate register blocks by type
351 * @pdev: The CXL PCI device to enumerate.
352 * @type: Register Block Indicator id
353 * @map: Enumeration output, clobbered on error
355 * Return: 0 if register block enumerated, negative error code otherwise
357 * A CXL DVSEC may point to one or more register blocks, search for them
360 int cxl_find_regblock(struct pci_dev
*pdev
, enum cxl_regloc_type type
,
361 struct cxl_register_map
*map
)
363 return cxl_find_regblock_instance(pdev
, type
, map
, 0);
365 EXPORT_SYMBOL_NS_GPL(cxl_find_regblock
, CXL
);
368 * cxl_count_regblock() - Count instances of a given regblock type.
369 * @pdev: The CXL PCI device to enumerate.
370 * @type: Register Block Indicator id
372 * Some regblocks may be repeated. Count how many instances.
374 * Return: count of matching regblocks.
376 int cxl_count_regblock(struct pci_dev
*pdev
, enum cxl_regloc_type type
)
378 struct cxl_register_map map
;
382 rc
= cxl_find_regblock_instance(pdev
, type
, &map
, count
);
388 EXPORT_SYMBOL_NS_GPL(cxl_count_regblock
, CXL
);
390 int cxl_map_pmu_regs(struct cxl_register_map
*map
, struct cxl_pmu_regs
*regs
)
392 struct device
*dev
= map
->host
;
393 resource_size_t phys_addr
;
395 phys_addr
= map
->resource
;
396 regs
->pmu
= devm_cxl_iomap_block(dev
, phys_addr
, CXL_PMU_REGMAP_SIZE
);
402 EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs
, CXL
);
404 static int cxl_map_regblock(struct cxl_register_map
*map
)
406 struct device
*host
= map
->host
;
408 map
->base
= ioremap(map
->resource
, map
->max_size
);
410 dev_err(host
, "failed to map registers\n");
414 dev_dbg(host
, "Mapped CXL Memory Device resource %pa\n", &map
->resource
);
418 static void cxl_unmap_regblock(struct cxl_register_map
*map
)
424 static int cxl_probe_regs(struct cxl_register_map
*map
)
426 struct cxl_component_reg_map
*comp_map
;
427 struct cxl_device_reg_map
*dev_map
;
428 struct device
*host
= map
->host
;
429 void __iomem
*base
= map
->base
;
431 switch (map
->reg_type
) {
432 case CXL_REGLOC_RBI_COMPONENT
:
433 comp_map
= &map
->component_map
;
434 cxl_probe_component_regs(host
, base
, comp_map
);
435 dev_dbg(host
, "Set up component registers\n");
437 case CXL_REGLOC_RBI_MEMDEV
:
438 dev_map
= &map
->device_map
;
439 cxl_probe_device_regs(host
, base
, dev_map
);
440 if (!dev_map
->status
.valid
|| !dev_map
->mbox
.valid
||
441 !dev_map
->memdev
.valid
) {
442 dev_err(host
, "registers not found: %s%s%s\n",
443 !dev_map
->status
.valid
? "status " : "",
444 !dev_map
->mbox
.valid
? "mbox " : "",
445 !dev_map
->memdev
.valid
? "memdev " : "");
449 dev_dbg(host
, "Probing device registers...\n");
458 int cxl_setup_regs(struct cxl_register_map
*map
)
462 rc
= cxl_map_regblock(map
);
466 rc
= cxl_probe_regs(map
);
467 cxl_unmap_regblock(map
);
471 EXPORT_SYMBOL_NS_GPL(cxl_setup_regs
, CXL
);
473 u16
cxl_rcrb_to_aer(struct device
*dev
, resource_size_t rcrb
)
479 if (WARN_ON_ONCE(rcrb
== CXL_RESOURCE_NONE
))
482 if (!request_mem_region(rcrb
, SZ_4K
, dev_name(dev
)))
485 addr
= ioremap(rcrb
, SZ_4K
);
489 cap_hdr
= readl(addr
+ offset
);
490 while (PCI_EXT_CAP_ID(cap_hdr
) != PCI_EXT_CAP_ID_ERR
) {
491 offset
= PCI_EXT_CAP_NEXT(cap_hdr
);
493 /* Offset 0 terminates capability list. */
496 cap_hdr
= readl(addr
+ offset
);
500 dev_dbg(dev
, "found AER extended capability (0x%x)\n", offset
);
504 release_mem_region(rcrb
, SZ_4K
);
509 static resource_size_t
cxl_rcrb_to_linkcap(struct device
*dev
, struct cxl_dport
*dport
)
511 resource_size_t rcrb
= dport
->rcrb
.base
;
516 if (!request_mem_region(rcrb
, SZ_4K
, "CXL RCRB"))
517 return CXL_RESOURCE_NONE
;
519 addr
= ioremap(rcrb
, SZ_4K
);
521 dev_err(dev
, "Failed to map region %pr\n", addr
);
522 release_mem_region(rcrb
, SZ_4K
);
523 return CXL_RESOURCE_NONE
;
526 offset
= FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK
, readw(addr
+ PCI_CAPABILITY_LIST
));
527 cap_hdr
= readl(addr
+ offset
);
528 while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK
, cap_hdr
)) != PCI_CAP_ID_EXP
) {
529 offset
= FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK
, cap_hdr
);
530 if (offset
== 0 || offset
> SZ_4K
) {
534 cap_hdr
= readl(addr
+ offset
);
538 release_mem_region(rcrb
, SZ_4K
);
540 return CXL_RESOURCE_NONE
;
545 int cxl_dport_map_rcd_linkcap(struct pci_dev
*pdev
, struct cxl_dport
*dport
)
547 void __iomem
*dport_pcie_cap
= NULL
;
549 struct cxl_rcrb_info
*ri
;
552 pos
= cxl_rcrb_to_linkcap(&pdev
->dev
, dport
);
553 if (pos
== CXL_RESOURCE_NONE
)
556 dport_pcie_cap
= devm_cxl_iomap_block(&pdev
->dev
,
559 dport
->regs
.rcd_pcie_cap
= dport_pcie_cap
;
563 EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap
, CXL
);
565 resource_size_t
__rcrb_to_component(struct device
*dev
, struct cxl_rcrb_info
*ri
,
568 resource_size_t component_reg_phys
;
569 resource_size_t rcrb
= ri
->base
;
575 if (which
== CXL_RCRB_UPSTREAM
)
579 * RCRB's BAR[0..1] point to component block containing CXL
580 * subsystem component registers. MEMBAR extraction follows
581 * the PCI Base spec here, esp. 64 bit extraction and memory
582 * ranges alignment (6.0, 7.5.1.2.1).
584 if (!request_mem_region(rcrb
, SZ_4K
, "CXL RCRB"))
585 return CXL_RESOURCE_NONE
;
586 addr
= ioremap(rcrb
, SZ_4K
);
588 dev_err(dev
, "Failed to map region %pr\n", addr
);
589 release_mem_region(rcrb
, SZ_4K
);
590 return CXL_RESOURCE_NONE
;
593 id
= readl(addr
+ PCI_VENDOR_ID
);
594 cmd
= readw(addr
+ PCI_COMMAND
);
595 bar0
= readl(addr
+ PCI_BASE_ADDRESS_0
);
596 bar1
= readl(addr
+ PCI_BASE_ADDRESS_1
);
598 release_mem_region(rcrb
, SZ_4K
);
601 * Sanity check, see CXL 3.0 Figure 9-8 CXL Device that Does Not
602 * Remap Upstream Port and Component Registers
605 if (which
== CXL_RCRB_DOWNSTREAM
)
606 dev_err(dev
, "Failed to access Downstream Port RCRB\n");
607 return CXL_RESOURCE_NONE
;
609 if (!(cmd
& PCI_COMMAND_MEMORY
))
610 return CXL_RESOURCE_NONE
;
611 /* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
612 if (bar0
& (PCI_BASE_ADDRESS_MEM_TYPE_1M
| PCI_BASE_ADDRESS_SPACE_IO
))
613 return CXL_RESOURCE_NONE
;
615 component_reg_phys
= bar0
& PCI_BASE_ADDRESS_MEM_MASK
;
616 if (bar0
& PCI_BASE_ADDRESS_MEM_TYPE_64
)
617 component_reg_phys
|= ((u64
)bar1
) << 32;
619 if (!component_reg_phys
)
620 return CXL_RESOURCE_NONE
;
622 /* MEMBAR is block size (64k) aligned. */
623 if (!IS_ALIGNED(component_reg_phys
, CXL_COMPONENT_REG_BLOCK_SIZE
))
624 return CXL_RESOURCE_NONE
;
626 return component_reg_phys
;
629 resource_size_t
cxl_rcd_component_reg_phys(struct device
*dev
,
630 struct cxl_dport
*dport
)
633 return CXL_RESOURCE_NONE
;
634 return __rcrb_to_component(dev
, &dport
->rcrb
, CXL_RCRB_UPSTREAM
);
636 EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys
, CXL
);