md/raid: only permit hot-add of compatible integrity profiles
[linux/fpc-iii.git] / drivers / misc / cxl / vphb.c
blobc241e15cacb1f022e766a1280208f8cb6dfbd176
1 /*
2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
10 #include <linux/pci.h>
11 #include <misc/cxl.h>
12 #include "cxl.h"
14 static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
16 if (dma_mask < DMA_BIT_MASK(64)) {
17 pr_info("%s only 64bit DMA supported on CXL", __func__);
18 return -EIO;
21 *(pdev->dev.dma_mask) = dma_mask;
22 return 0;
25 static int cxl_pci_probe_mode(struct pci_bus *bus)
27 return PCI_PROBE_NORMAL;
30 static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
32 return -ENODEV;
35 static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
38 * MSI should never be set but need still need to provide this call
39 * back.
43 static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
45 struct pci_controller *phb;
46 struct cxl_afu *afu;
47 struct cxl_context *ctx;
49 phb = pci_bus_to_host(dev->bus);
50 afu = (struct cxl_afu *)phb->private_data;
52 if (!cxl_adapter_link_ok(afu->adapter)) {
53 dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
54 return false;
57 set_dma_ops(&dev->dev, &dma_direct_ops);
58 set_dma_offset(&dev->dev, PAGE_OFFSET);
61 * Allocate a context to do cxl things too. If we eventually do real
62 * DMA ops, we'll need a default context to attach them to
64 ctx = cxl_dev_context_init(dev);
65 if (!ctx)
66 return false;
67 dev->dev.archdata.cxl_ctx = ctx;
69 return (cxl_afu_check_and_enable(afu) == 0);
72 static void cxl_pci_disable_device(struct pci_dev *dev)
74 struct cxl_context *ctx = cxl_get_context(dev);
76 if (ctx) {
77 if (ctx->status == STARTED) {
78 dev_err(&dev->dev, "Default context started\n");
79 return;
81 dev->dev.archdata.cxl_ctx = NULL;
82 cxl_release_context(ctx);
86 static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
87 unsigned long type)
89 return 1;
92 static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
94 /* Should we do an AFU reset here ? */
97 static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
99 return (bus << 8) + devfn;
102 static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
103 u8 bus, u8 devfn, int offset)
105 int record = cxl_pcie_cfg_record(bus, devfn);
107 return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
111 static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
112 int offset, int len,
113 volatile void __iomem **ioaddr,
114 u32 *mask, int *shift)
116 struct pci_controller *phb;
117 struct cxl_afu *afu;
118 unsigned long addr;
120 phb = pci_bus_to_host(bus);
121 if (phb == NULL)
122 return PCIBIOS_DEVICE_NOT_FOUND;
123 afu = (struct cxl_afu *)phb->private_data;
125 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
126 return PCIBIOS_DEVICE_NOT_FOUND;
127 if (offset >= (unsigned long)phb->cfg_data)
128 return PCIBIOS_BAD_REGISTER_NUMBER;
129 addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
131 *ioaddr = (void *)(addr & ~0x3ULL);
132 *shift = ((addr & 0x3) * 8);
133 switch (len) {
134 case 1:
135 *mask = 0xff;
136 break;
137 case 2:
138 *mask = 0xffff;
139 break;
140 default:
141 *mask = 0xffffffff;
142 break;
144 return 0;
148 static inline bool cxl_config_link_ok(struct pci_bus *bus)
150 struct pci_controller *phb;
151 struct cxl_afu *afu;
153 /* Config space IO is based on phb->cfg_addr, which is based on
154 * afu_desc_mmio. This isn't safe to read/write when the link
155 * goes down, as EEH tears down MMIO space.
157 * Check if the link is OK before proceeding.
160 phb = pci_bus_to_host(bus);
161 if (phb == NULL)
162 return false;
163 afu = (struct cxl_afu *)phb->private_data;
164 return cxl_adapter_link_ok(afu->adapter);
167 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
168 int offset, int len, u32 *val)
170 volatile void __iomem *ioaddr;
171 int shift, rc;
172 u32 mask;
174 rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
175 &mask, &shift);
176 if (rc)
177 return rc;
179 if (!cxl_config_link_ok(bus))
180 return PCIBIOS_DEVICE_NOT_FOUND;
182 /* Can only read 32 bits */
183 *val = (in_le32(ioaddr) >> shift) & mask;
184 return PCIBIOS_SUCCESSFUL;
187 static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
188 int offset, int len, u32 val)
190 volatile void __iomem *ioaddr;
191 u32 v, mask;
192 int shift, rc;
194 rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
195 &mask, &shift);
196 if (rc)
197 return rc;
199 if (!cxl_config_link_ok(bus))
200 return PCIBIOS_DEVICE_NOT_FOUND;
202 /* Can only write 32 bits so do read-modify-write */
203 mask <<= shift;
204 val <<= shift;
206 v = (in_le32(ioaddr) & ~mask) || (val & mask);
208 out_le32(ioaddr, v);
209 return PCIBIOS_SUCCESSFUL;
212 static struct pci_ops cxl_pcie_pci_ops =
214 .read = cxl_pcie_read_config,
215 .write = cxl_pcie_write_config,
219 static struct pci_controller_ops cxl_pci_controller_ops =
221 .probe_mode = cxl_pci_probe_mode,
222 .enable_device_hook = cxl_pci_enable_device_hook,
223 .disable_device = cxl_pci_disable_device,
224 .release_device = cxl_pci_disable_device,
225 .window_alignment = cxl_pci_window_alignment,
226 .reset_secondary_bus = cxl_pci_reset_secondary_bus,
227 .setup_msi_irqs = cxl_setup_msi_irqs,
228 .teardown_msi_irqs = cxl_teardown_msi_irqs,
229 .dma_set_mask = cxl_dma_set_mask,
232 int cxl_pci_vphb_add(struct cxl_afu *afu)
234 struct pci_dev *phys_dev;
235 struct pci_controller *phb, *phys_phb;
237 phys_dev = to_pci_dev(afu->adapter->dev.parent);
238 phys_phb = pci_bus_to_host(phys_dev->bus);
240 /* Alloc and setup PHB data structure */
241 phb = pcibios_alloc_controller(phys_phb->dn);
243 if (!phb)
244 return -ENODEV;
246 /* Setup parent in sysfs */
247 phb->parent = &phys_dev->dev;
249 /* Setup the PHB using arch provided callback */
250 phb->ops = &cxl_pcie_pci_ops;
251 phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
252 phb->cfg_data = (void *)(u64)afu->crs_len;
253 phb->private_data = afu;
254 phb->controller_ops = cxl_pci_controller_ops;
256 /* Scan the bus */
257 pcibios_scan_phb(phb);
258 if (phb->bus == NULL)
259 return -ENXIO;
261 /* Claim resources. This might need some rework as well depending
262 * whether we are doing probe-only or not, like assigning unassigned
263 * resources etc...
265 pcibios_claim_one_bus(phb->bus);
267 /* Add probed PCI devices to the device model */
268 pci_bus_add_devices(phb->bus);
270 afu->phb = phb;
272 return 0;
275 void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
277 /* When we are reconfigured, the AFU's MMIO space is unmapped
278 * and remapped. We need to reflect this in the PHB's view of
279 * the world.
281 afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
284 void cxl_pci_vphb_remove(struct cxl_afu *afu)
286 struct pci_controller *phb;
288 /* If there is no configuration record we won't have one of these */
289 if (!afu || !afu->phb)
290 return;
292 phb = afu->phb;
293 afu->phb = NULL;
295 pci_remove_root_bus(phb->bus);
296 pcibios_free_controller(phb);
299 struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
301 struct pci_controller *phb;
303 phb = pci_bus_to_host(dev->bus);
305 return (struct cxl_afu *)phb->private_data;
307 EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
309 unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
311 return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
313 EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);