1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2014 IBM Corp.
10 static int cxl_pci_probe_mode(struct pci_bus
*bus
)
12 return PCI_PROBE_NORMAL
;
15 static int cxl_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
20 static void cxl_teardown_msi_irqs(struct pci_dev
*pdev
)
23 * MSI should never be set but need still need to provide this call
28 static bool cxl_pci_enable_device_hook(struct pci_dev
*dev
)
30 struct pci_controller
*phb
;
32 struct cxl_context
*ctx
;
34 phb
= pci_bus_to_host(dev
->bus
);
35 afu
= (struct cxl_afu
*)phb
->private_data
;
37 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
38 dev_warn(&dev
->dev
, "%s: Device link is down, refusing to enable AFU\n", __func__
);
42 dev
->dev
.archdata
.dma_offset
= PAGE_OFFSET
;
45 * Allocate a context to do cxl things too. If we eventually do real
46 * DMA ops, we'll need a default context to attach them to
48 ctx
= cxl_dev_context_init(dev
);
51 dev
->dev
.archdata
.cxl_ctx
= ctx
;
53 return (cxl_ops
->afu_check_and_enable(afu
) == 0);
56 static void cxl_pci_disable_device(struct pci_dev
*dev
)
58 struct cxl_context
*ctx
= cxl_get_context(dev
);
61 if (ctx
->status
== STARTED
) {
62 dev_err(&dev
->dev
, "Default context started\n");
65 dev
->dev
.archdata
.cxl_ctx
= NULL
;
66 cxl_release_context(ctx
);
70 static resource_size_t
cxl_pci_window_alignment(struct pci_bus
*bus
,
76 static void cxl_pci_reset_secondary_bus(struct pci_dev
*dev
)
78 /* Should we do an AFU reset here ? */
81 static int cxl_pcie_cfg_record(u8 bus
, u8 devfn
)
83 return (bus
<< 8) + devfn
;
86 static inline struct cxl_afu
*pci_bus_to_afu(struct pci_bus
*bus
)
88 struct pci_controller
*phb
= bus
? pci_bus_to_host(bus
) : NULL
;
90 return phb
? phb
->private_data
: NULL
;
93 static void cxl_afu_configured_put(struct cxl_afu
*afu
)
95 atomic_dec_if_positive(&afu
->configured_state
);
98 static bool cxl_afu_configured_get(struct cxl_afu
*afu
)
100 return atomic_inc_unless_negative(&afu
->configured_state
);
103 static inline int cxl_pcie_config_info(struct pci_bus
*bus
, unsigned int devfn
,
104 struct cxl_afu
*afu
, int *_record
)
108 record
= cxl_pcie_cfg_record(bus
->number
, devfn
);
109 if (record
> afu
->crs_num
)
110 return PCIBIOS_DEVICE_NOT_FOUND
;
116 static int cxl_pcie_read_config(struct pci_bus
*bus
, unsigned int devfn
,
117 int offset
, int len
, u32
*val
)
125 afu
= pci_bus_to_afu(bus
);
126 /* Grab a reader lock on afu. */
127 if (afu
== NULL
|| !cxl_afu_configured_get(afu
))
128 return PCIBIOS_DEVICE_NOT_FOUND
;
130 rc
= cxl_pcie_config_info(bus
, devfn
, afu
, &record
);
136 rc
= cxl_ops
->afu_cr_read8(afu
, record
, offset
, &val8
);
140 rc
= cxl_ops
->afu_cr_read16(afu
, record
, offset
, &val16
);
144 rc
= cxl_ops
->afu_cr_read32(afu
, record
, offset
, &val32
);
152 cxl_afu_configured_put(afu
);
153 return rc
? PCIBIOS_DEVICE_NOT_FOUND
: 0;
156 static int cxl_pcie_write_config(struct pci_bus
*bus
, unsigned int devfn
,
157 int offset
, int len
, u32 val
)
162 afu
= pci_bus_to_afu(bus
);
163 /* Grab a reader lock on afu. */
164 if (afu
== NULL
|| !cxl_afu_configured_get(afu
))
165 return PCIBIOS_DEVICE_NOT_FOUND
;
167 rc
= cxl_pcie_config_info(bus
, devfn
, afu
, &record
);
173 rc
= cxl_ops
->afu_cr_write8(afu
, record
, offset
, val
& 0xff);
176 rc
= cxl_ops
->afu_cr_write16(afu
, record
, offset
, val
& 0xffff);
179 rc
= cxl_ops
->afu_cr_write32(afu
, record
, offset
, val
);
186 cxl_afu_configured_put(afu
);
187 return rc
? PCIBIOS_SET_FAILED
: 0;
190 static struct pci_ops cxl_pcie_pci_ops
=
192 .read
= cxl_pcie_read_config
,
193 .write
= cxl_pcie_write_config
,
197 static struct pci_controller_ops cxl_pci_controller_ops
=
199 .probe_mode
= cxl_pci_probe_mode
,
200 .enable_device_hook
= cxl_pci_enable_device_hook
,
201 .disable_device
= cxl_pci_disable_device
,
202 .release_device
= cxl_pci_disable_device
,
203 .window_alignment
= cxl_pci_window_alignment
,
204 .reset_secondary_bus
= cxl_pci_reset_secondary_bus
,
205 .setup_msi_irqs
= cxl_setup_msi_irqs
,
206 .teardown_msi_irqs
= cxl_teardown_msi_irqs
,
209 int cxl_pci_vphb_add(struct cxl_afu
*afu
)
211 struct pci_controller
*phb
;
212 struct device_node
*vphb_dn
;
213 struct device
*parent
;
216 * If there are no AFU configuration records we won't have anything to
217 * expose under the vPHB, so skip creating one, returning success since
218 * this is still a valid case. This will also opt us out of EEH
219 * handling since we won't have anything special to do if there are no
220 * kernel drivers attached to the vPHB, and EEH handling is not yet
221 * supported in the peer model.
226 /* The parent device is the adapter. Reuse the device node of
228 * We don't seem to care what device node is used for the vPHB,
229 * but tools such as lsvpd walk up the device parents looking
230 * for a valid location code, so we might as well show devices
231 * attached to the adapter as being located on that adapter.
233 parent
= afu
->adapter
->dev
.parent
;
234 vphb_dn
= parent
->of_node
;
236 /* Alloc and setup PHB data structure */
237 phb
= pcibios_alloc_controller(vphb_dn
);
241 /* Setup parent in sysfs */
242 phb
->parent
= parent
;
244 /* Setup the PHB using arch provided callback */
245 phb
->ops
= &cxl_pcie_pci_ops
;
246 phb
->cfg_addr
= NULL
;
247 phb
->cfg_data
= NULL
;
248 phb
->private_data
= afu
;
249 phb
->controller_ops
= cxl_pci_controller_ops
;
252 pcibios_scan_phb(phb
);
253 if (phb
->bus
== NULL
)
256 /* Set release hook on root bus */
257 pci_set_host_bridge_release(to_pci_host_bridge(phb
->bus
->bridge
),
258 pcibios_free_controller_deferred
,
261 /* Claim resources. This might need some rework as well depending
262 * whether we are doing probe-only or not, like assigning unassigned
265 pcibios_claim_one_bus(phb
->bus
);
267 /* Add probed PCI devices to the device model */
268 pci_bus_add_devices(phb
->bus
);
275 void cxl_pci_vphb_remove(struct cxl_afu
*afu
)
277 struct pci_controller
*phb
;
279 /* If there is no configuration record we won't have one of these */
280 if (!afu
|| !afu
->phb
)
286 pci_remove_root_bus(phb
->bus
);
288 * We don't free phb here - that's handled by
289 * pcibios_free_controller_deferred()
293 bool cxl_pci_is_vphb_device(struct pci_dev
*dev
)
295 struct pci_controller
*phb
;
297 phb
= pci_bus_to_host(dev
->bus
);
299 return (phb
->ops
== &cxl_pcie_pci_ops
);
302 struct cxl_afu
*cxl_pci_to_afu(struct pci_dev
*dev
)
304 struct pci_controller
*phb
;
306 phb
= pci_bus_to_host(dev
->bus
);
308 return (struct cxl_afu
*)phb
->private_data
;
310 EXPORT_SYMBOL_GPL(cxl_pci_to_afu
);
312 unsigned int cxl_pci_to_cfg_record(struct pci_dev
*dev
)
314 return cxl_pcie_cfg_record(dev
->bus
->number
, dev
->devfn
);
316 EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record
);