2 * Copyright 2014-2016 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/msi.h>
12 #include <asm/pci-bridge.h>
13 #include <asm/pnv-pci.h>
19 struct device_node
*pnv_pci_get_phb_node(struct pci_dev
*dev
)
21 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
23 return of_node_get(hose
->dn
);
25 EXPORT_SYMBOL(pnv_pci_get_phb_node
);
27 int pnv_phb_to_cxl_mode(struct pci_dev
*dev
, uint64_t mode
)
29 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
30 struct pnv_phb
*phb
= hose
->private_data
;
31 struct pnv_ioda_pe
*pe
;
34 pe
= pnv_ioda_get_pe(dev
);
38 pe_info(pe
, "Switching PHB to CXL\n");
40 rc
= opal_pci_set_phb_cxl_mode(phb
->opal_id
, mode
, pe
->pe_number
);
41 if (rc
== OPAL_UNSUPPORTED
)
42 dev_err(&dev
->dev
, "Required cxl mode not supported by firmware - update skiboot\n");
44 dev_err(&dev
->dev
, "opal_pci_set_phb_cxl_mode failed: %i\n", rc
);
48 EXPORT_SYMBOL(pnv_phb_to_cxl_mode
);
50 /* Find PHB for cxl dev and allocate MSI hwirqs?
51 * Returns the absolute hardware IRQ number
53 int pnv_cxl_alloc_hwirqs(struct pci_dev
*dev
, int num
)
55 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
56 struct pnv_phb
*phb
= hose
->private_data
;
57 int hwirq
= msi_bitmap_alloc_hwirqs(&phb
->msi_bmp
, num
);
60 dev_warn(&dev
->dev
, "Failed to find a free MSI\n");
64 return phb
->msi_base
+ hwirq
;
66 EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs
);
68 void pnv_cxl_release_hwirqs(struct pci_dev
*dev
, int hwirq
, int num
)
70 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
71 struct pnv_phb
*phb
= hose
->private_data
;
73 msi_bitmap_free_hwirqs(&phb
->msi_bmp
, hwirq
- phb
->msi_base
, num
);
75 EXPORT_SYMBOL(pnv_cxl_release_hwirqs
);
77 void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges
*irqs
,
80 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
81 struct pnv_phb
*phb
= hose
->private_data
;
84 for (i
= 1; i
< CXL_IRQ_RANGES
; i
++) {
87 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
90 hwirq
= irqs
->offset
[i
] - phb
->msi_base
;
91 msi_bitmap_free_hwirqs(&phb
->msi_bmp
, hwirq
,
95 EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges
);
97 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges
*irqs
,
98 struct pci_dev
*dev
, int num
)
100 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
101 struct pnv_phb
*phb
= hose
->private_data
;
104 memset(irqs
, 0, sizeof(struct cxl_irq_ranges
));
106 /* 0 is reserved for the multiplexed PSL DSI interrupt */
107 for (i
= 1; i
< CXL_IRQ_RANGES
&& num
; i
++) {
110 hwirq
= msi_bitmap_alloc_hwirqs(&phb
->msi_bmp
, try);
118 irqs
->offset
[i
] = phb
->msi_base
+ hwirq
;
119 irqs
->range
[i
] = try;
120 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
121 i
, irqs
->offset
[i
], irqs
->range
[i
]);
129 pnv_cxl_release_hwirq_ranges(irqs
, dev
);
132 EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges
);
134 int pnv_cxl_get_irq_count(struct pci_dev
*dev
)
136 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
137 struct pnv_phb
*phb
= hose
->private_data
;
139 return phb
->msi_bmp
.irq_count
;
141 EXPORT_SYMBOL(pnv_cxl_get_irq_count
);
143 int pnv_cxl_ioda_msi_setup(struct pci_dev
*dev
, unsigned int hwirq
,
146 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
147 struct pnv_phb
*phb
= hose
->private_data
;
148 unsigned int xive_num
= hwirq
- phb
->msi_base
;
149 struct pnv_ioda_pe
*pe
;
152 if (!(pe
= pnv_ioda_get_pe(dev
)))
155 /* Assign XIVE to PE */
156 rc
= opal_pci_set_xive_pe(phb
->opal_id
, pe
->pe_number
, xive_num
);
158 pe_warn(pe
, "%s: OPAL error %d setting msi_base 0x%x "
159 "hwirq 0x%x XIVE 0x%x PE\n",
160 pci_name(dev
), rc
, phb
->msi_base
, hwirq
, xive_num
);
163 pnv_set_msi_irq_chip(phb
, virq
);
167 EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup
);
169 #if IS_MODULE(CONFIG_CXL)
170 static inline int get_cxl_module(void)
172 struct module
*cxl_module
;
174 mutex_lock(&module_mutex
);
176 cxl_module
= find_module("cxl");
178 __module_get(cxl_module
);
180 mutex_unlock(&module_mutex
);
188 static inline int get_cxl_module(void) { return 0; }
192 * Sets flags and switches the controller ops to enable the cxl kernel api.
193 * Originally the cxl kernel API operated on a virtual PHB, but certain cards
194 * such as the Mellanox CX4 use a peer model instead and for these cards the
195 * cxl kernel api will operate on the real PHB.
197 int pnv_cxl_enable_phb_kernel_api(struct pci_controller
*hose
, bool enable
)
199 struct pnv_phb
*phb
= hose
->private_data
;
204 * Once cxl mode is enabled on the PHB, there is currently no
205 * known safe method to disable it again, and trying risks a
206 * checkstop. If we can find a way to safely disable cxl mode
207 * in the future we can revisit this, but for now the only sane
208 * thing to do is to refuse to disable cxl mode:
214 * Hold a reference to the cxl module since several PHB operations now
215 * depend on it, and it would be insane to allow it to be removed so
216 * long as we are in this mode (and since we can't safely disable this
217 * mode once enabled...).
219 rc
= get_cxl_module();
223 phb
->flags
|= PNV_PHB_FLAG_CXL
;
224 hose
->controller_ops
= pnv_cxl_cx4_ioda_controller_ops
;
228 EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api
);
230 bool pnv_pci_on_cxl_phb(struct pci_dev
*dev
)
232 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
233 struct pnv_phb
*phb
= hose
->private_data
;
235 return !!(phb
->flags
& PNV_PHB_FLAG_CXL
);
237 EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb
);
239 struct cxl_afu
*pnv_cxl_phb_to_afu(struct pci_controller
*hose
)
241 struct pnv_phb
*phb
= hose
->private_data
;
243 return (struct cxl_afu
*)phb
->cxl_afu
;
245 EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu
);
247 void pnv_cxl_phb_set_peer_afu(struct pci_dev
*dev
, struct cxl_afu
*afu
)
249 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
250 struct pnv_phb
*phb
= hose
->private_data
;
254 EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu
);
257 * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
258 * by other functions on the device for memory access and interrupts. When the
259 * other functions are enabled we explicitly take a reference on the cxl
260 * function since they will use it, and allocate a default context associated
261 * with that function just like the vPHB model of the cxl kernel API.
263 bool pnv_cxl_enable_device_hook(struct pci_dev
*dev
)
265 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
266 struct pnv_phb
*phb
= hose
->private_data
;
267 struct cxl_afu
*afu
= phb
->cxl_afu
;
269 if (!pnv_pci_enable_device_hook(dev
))
273 /* No special handling for the cxl function, which is always PF 0 */
274 if (PCI_FUNC(dev
->devfn
) == 0)
278 dev_WARN(&dev
->dev
, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
282 dev_info(&dev
->dev
, "Enabling function on CXL enabled PHB with peer AFU\n");
284 /* Make sure the peer AFU can't go away while this device is active */
287 return cxl_pci_associate_default_context(dev
, afu
);
290 void pnv_cxl_disable_device(struct pci_dev
*dev
)
292 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
293 struct pnv_phb
*phb
= hose
->private_data
;
294 struct cxl_afu
*afu
= phb
->cxl_afu
;
296 /* No special handling for cxl function: */
297 if (PCI_FUNC(dev
->devfn
) == 0)
300 cxl_pci_disable_device(dev
);
305 * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
306 * function handles setting up the IVTE entries for the XSL to use.
308 * We are currently not filling out the MSIX table, since the only currently
309 * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
310 * is up to their driver to fill that out. In the future we may fill out the
311 * MSIX table (and change the IVTE entries to be an index to the MSIX table)
312 * for adapters implementing the Full MSI-X mode described in the CAIA.
314 int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
316 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
317 struct pnv_phb
*phb
= hose
->private_data
;
318 struct msi_desc
*entry
;
319 struct cxl_context
*ctx
= NULL
;
325 if (WARN_ON(!phb
) || !phb
->msi_bmp
.bitmap
)
328 if (pdev
->no_64bit_msi
&& !phb
->msi32_support
)
331 rc
= cxl_cx4_setup_msi_irqs(pdev
, nvec
, type
);
335 for_each_pci_msi_entry(entry
, pdev
) {
336 if (!entry
->msi_attrib
.is_64
&& !phb
->msi32_support
) {
337 pr_warn("%s: Supports only 64-bit MSIs\n",
342 hwirq
= cxl_next_msi_hwirq(pdev
, &ctx
, &afu_irq
);
343 if (WARN_ON(hwirq
<= 0))
344 return (hwirq
? hwirq
: -ENOMEM
);
346 virq
= irq_create_mapping(NULL
, hwirq
);
348 pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
353 rc
= pnv_cxl_ioda_msi_setup(pdev
, hwirq
, virq
);
355 pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev
));
356 irq_dispose_mapping(virq
);
360 irq_set_msi_desc(virq
, entry
);
366 void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev
*pdev
)
368 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
369 struct pnv_phb
*phb
= hose
->private_data
;
370 struct msi_desc
*entry
;
371 irq_hw_number_t hwirq
;
376 for_each_pci_msi_entry(entry
, pdev
) {
379 hwirq
= virq_to_hw(entry
->irq
);
380 irq_set_msi_desc(entry
->irq
, NULL
);
381 irq_dispose_mapping(entry
->irq
);
384 cxl_cx4_teardown_msi_irqs(pdev
);