2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci.h>
14 static int cxl_dma_set_mask(struct pci_dev
*pdev
, u64 dma_mask
)
16 if (dma_mask
< DMA_BIT_MASK(64)) {
17 pr_info("%s only 64bit DMA supported on CXL", __func__
);
21 *(pdev
->dev
.dma_mask
) = dma_mask
;
25 static int cxl_pci_probe_mode(struct pci_bus
*bus
)
27 return PCI_PROBE_NORMAL
;
30 static int cxl_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
35 static void cxl_teardown_msi_irqs(struct pci_dev
*pdev
)
38 * MSI should never be set but need still need to provide this call
43 static bool cxl_pci_enable_device_hook(struct pci_dev
*dev
)
45 struct pci_controller
*phb
;
47 struct cxl_context
*ctx
;
49 phb
= pci_bus_to_host(dev
->bus
);
50 afu
= (struct cxl_afu
*)phb
->private_data
;
52 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
53 dev_warn(&dev
->dev
, "%s: Device link is down, refusing to enable AFU\n", __func__
);
57 set_dma_ops(&dev
->dev
, &dma_direct_ops
);
58 set_dma_offset(&dev
->dev
, PAGE_OFFSET
);
61 * Allocate a context to do cxl things too. If we eventually do real
62 * DMA ops, we'll need a default context to attach them to
64 ctx
= cxl_dev_context_init(dev
);
67 dev
->dev
.archdata
.cxl_ctx
= ctx
;
69 return (cxl_ops
->afu_check_and_enable(afu
) == 0);
72 static void cxl_pci_disable_device(struct pci_dev
*dev
)
74 struct cxl_context
*ctx
= cxl_get_context(dev
);
77 if (ctx
->status
== STARTED
) {
78 dev_err(&dev
->dev
, "Default context started\n");
81 dev
->dev
.archdata
.cxl_ctx
= NULL
;
82 cxl_release_context(ctx
);
86 static resource_size_t
cxl_pci_window_alignment(struct pci_bus
*bus
,
92 static void cxl_pci_reset_secondary_bus(struct pci_dev
*dev
)
94 /* Should we do an AFU reset here ? */
97 static int cxl_pcie_cfg_record(u8 bus
, u8 devfn
)
99 return (bus
<< 8) + devfn
;
102 static int cxl_pcie_config_info(struct pci_bus
*bus
, unsigned int devfn
,
103 struct cxl_afu
**_afu
, int *_record
)
105 struct pci_controller
*phb
;
109 phb
= pci_bus_to_host(bus
);
111 return PCIBIOS_DEVICE_NOT_FOUND
;
113 afu
= (struct cxl_afu
*)phb
->private_data
;
114 record
= cxl_pcie_cfg_record(bus
->number
, devfn
);
115 if (record
> afu
->crs_num
)
116 return PCIBIOS_DEVICE_NOT_FOUND
;
123 static int cxl_pcie_read_config(struct pci_bus
*bus
, unsigned int devfn
,
124 int offset
, int len
, u32
*val
)
132 rc
= cxl_pcie_config_info(bus
, devfn
, &afu
, &record
);
138 rc
= cxl_ops
->afu_cr_read8(afu
, record
, offset
, &val8
);
142 rc
= cxl_ops
->afu_cr_read16(afu
, record
, offset
, &val16
);
146 rc
= cxl_ops
->afu_cr_read32(afu
, record
, offset
, &val32
);
154 return PCIBIOS_DEVICE_NOT_FOUND
;
156 return PCIBIOS_SUCCESSFUL
;
159 static int cxl_pcie_write_config(struct pci_bus
*bus
, unsigned int devfn
,
160 int offset
, int len
, u32 val
)
165 rc
= cxl_pcie_config_info(bus
, devfn
, &afu
, &record
);
171 rc
= cxl_ops
->afu_cr_write8(afu
, record
, offset
, val
& 0xff);
174 rc
= cxl_ops
->afu_cr_write16(afu
, record
, offset
, val
& 0xffff);
177 rc
= cxl_ops
->afu_cr_write32(afu
, record
, offset
, val
);
184 return PCIBIOS_SET_FAILED
;
186 return PCIBIOS_SUCCESSFUL
;
189 static struct pci_ops cxl_pcie_pci_ops
=
191 .read
= cxl_pcie_read_config
,
192 .write
= cxl_pcie_write_config
,
196 static struct pci_controller_ops cxl_pci_controller_ops
=
198 .probe_mode
= cxl_pci_probe_mode
,
199 .enable_device_hook
= cxl_pci_enable_device_hook
,
200 .disable_device
= cxl_pci_disable_device
,
201 .release_device
= cxl_pci_disable_device
,
202 .window_alignment
= cxl_pci_window_alignment
,
203 .reset_secondary_bus
= cxl_pci_reset_secondary_bus
,
204 .setup_msi_irqs
= cxl_setup_msi_irqs
,
205 .teardown_msi_irqs
= cxl_teardown_msi_irqs
,
206 .dma_set_mask
= cxl_dma_set_mask
,
209 int cxl_pci_vphb_add(struct cxl_afu
*afu
)
211 struct pci_dev
*phys_dev
;
212 struct pci_controller
*phb
, *phys_phb
;
213 struct device_node
*vphb_dn
;
214 struct device
*parent
;
216 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
217 phys_dev
= to_pci_dev(afu
->adapter
->dev
.parent
);
218 phys_phb
= pci_bus_to_host(phys_dev
->bus
);
219 vphb_dn
= phys_phb
->dn
;
220 parent
= &phys_dev
->dev
;
222 vphb_dn
= afu
->adapter
->dev
.parent
->of_node
;
223 parent
= afu
->adapter
->dev
.parent
;
226 /* Alloc and setup PHB data structure */
227 phb
= pcibios_alloc_controller(vphb_dn
);
231 /* Setup parent in sysfs */
232 phb
->parent
= parent
;
234 /* Setup the PHB using arch provided callback */
235 phb
->ops
= &cxl_pcie_pci_ops
;
236 phb
->cfg_addr
= NULL
;
238 phb
->private_data
= afu
;
239 phb
->controller_ops
= cxl_pci_controller_ops
;
242 pcibios_scan_phb(phb
);
243 if (phb
->bus
== NULL
)
246 /* Claim resources. This might need some rework as well depending
247 * whether we are doing probe-only or not, like assigning unassigned
250 pcibios_claim_one_bus(phb
->bus
);
252 /* Add probed PCI devices to the device model */
253 pci_bus_add_devices(phb
->bus
);
260 void cxl_pci_vphb_remove(struct cxl_afu
*afu
)
262 struct pci_controller
*phb
;
264 /* If there is no configuration record we won't have one of these */
265 if (!afu
|| !afu
->phb
)
271 pci_remove_root_bus(phb
->bus
);
272 pcibios_free_controller(phb
);
275 bool cxl_pci_is_vphb_device(struct pci_dev
*dev
)
277 struct pci_controller
*phb
;
279 phb
= pci_bus_to_host(dev
->bus
);
281 return (phb
->ops
== &cxl_pcie_pci_ops
);
284 struct cxl_afu
*cxl_pci_to_afu(struct pci_dev
*dev
)
286 struct pci_controller
*phb
;
288 phb
= pci_bus_to_host(dev
->bus
);
290 return (struct cxl_afu
*)phb
->private_data
;
292 EXPORT_SYMBOL_GPL(cxl_pci_to_afu
);
294 unsigned int cxl_pci_to_cfg_record(struct pci_dev
*dev
)
296 return cxl_pcie_cfg_record(dev
->bus
->number
, dev
->devfn
);
298 EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record
);