Merge tag 'trace-v6.13-rc7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[linux.git] / drivers / crypto / marvell / octeontx / otx_cptpf_main.c
blob14a42559f81d683e1134d450df56be7573732fd3
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include "otx_cpt_common.h"
12 #include "otx_cptpf.h"
14 #define DRV_NAME "octeontx-cpt"
15 #define DRV_VERSION "1.0"
17 static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt)
19 /* Disable mbox(0) interrupts for all VFs */
20 writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0));
23 static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt)
25 /* Enable mbox(0) interrupts for all VFs */
26 writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0));
29 static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq,
30 void *cpt)
32 otx_cpt_mbox_intr_handler(cpt, 0);
34 return IRQ_HANDLED;
37 static void otx_cpt_reset(struct otx_cpt_device *cpt)
39 writeq(1, cpt->reg_base + OTX_CPT_PF_RESET);
42 static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt)
44 union otx_cptx_pf_constants pf_cnsts = {0};
46 pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS);
47 cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se;
48 cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae;
51 static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt)
53 union otx_cptx_pf_bist_status bist_sts = {0};
55 bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS);
56 return bist_sts.u;
59 static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt)
61 union otx_cptx_pf_exe_bist_status bist_sts = {0};
63 bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS);
64 return bist_sts.u;
67 static int otx_cpt_device_init(struct otx_cpt_device *cpt)
69 struct device *dev = &cpt->pdev->dev;
70 u16 sdevid;
71 u64 bist;
73 /* Reset the PF when probed first */
74 otx_cpt_reset(cpt);
75 mdelay(100);
77 pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid);
79 /* Check BIST status */
80 bist = (u64)otx_cpt_check_bist_status(cpt);
81 if (bist) {
82 dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
83 return -ENODEV;
86 bist = otx_cpt_check_exe_bist_status(cpt);
87 if (bist) {
88 dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
89 return -ENODEV;
92 /* Get max enabled cores */
93 otx_cpt_find_max_enabled_cores(cpt);
95 if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
96 (cpt->eng_grps.avail.max_se_cnt == 0)) {
97 cpt->pf_type = OTX_CPT_AE;
98 } else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
99 (cpt->eng_grps.avail.max_ae_cnt == 0)) {
100 cpt->pf_type = OTX_CPT_SE;
103 /* Get max VQs/VFs supported by the device */
104 cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev);
106 /* Disable all cores */
107 otx_cpt_disable_all_cores(cpt);
109 return 0;
112 static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt)
114 struct device *dev = &cpt->pdev->dev;
115 u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
116 u32 num_vec = OTX_CPT_PF_MSIX_VECTORS;
117 int ret;
119 /* Enable MSI-X */
120 ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
121 if (ret < 0) {
122 dev_err(&cpt->pdev->dev,
123 "Request for #%d msix vectors failed\n",
124 num_vec);
125 return ret;
128 /* Register mailbox interrupt handlers */
129 ret = request_irq(pci_irq_vector(cpt->pdev,
130 OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
131 otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
132 if (ret) {
133 dev_err(dev, "Request irq failed\n");
134 pci_free_irq_vectors(cpt->pdev);
135 return ret;
137 /* Enable mailbox interrupt */
138 otx_cpt_enable_mbox_interrupts(cpt);
139 return 0;
142 static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt)
144 u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
146 otx_cpt_disable_mbox_interrupts(cpt);
147 free_irq(pci_irq_vector(cpt->pdev,
148 OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
149 cpt);
150 pci_free_irq_vectors(cpt->pdev);
154 static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs)
156 struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
157 int ret = 0;
159 if (numvfs > cpt->max_vfs)
160 numvfs = cpt->max_vfs;
162 if (numvfs > 0) {
163 ret = otx_cpt_try_create_default_eng_grps(cpt->pdev,
164 &cpt->eng_grps,
165 cpt->pf_type);
166 if (ret)
167 return ret;
169 cpt->vfs_enabled = numvfs;
170 ret = pci_enable_sriov(pdev, numvfs);
171 if (ret) {
172 cpt->vfs_enabled = 0;
173 return ret;
175 otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true);
176 try_module_get(THIS_MODULE);
177 ret = numvfs;
178 } else {
179 pci_disable_sriov(pdev);
180 otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false);
181 module_put(THIS_MODULE);
182 cpt->vfs_enabled = 0;
184 dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret);
186 return ret;
189 static int otx_cpt_probe(struct pci_dev *pdev,
190 const struct pci_device_id __always_unused *ent)
192 struct device *dev = &pdev->dev;
193 struct otx_cpt_device *cpt;
194 int err;
196 cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
197 if (!cpt)
198 return -ENOMEM;
200 pci_set_drvdata(pdev, cpt);
201 cpt->pdev = pdev;
203 err = pci_enable_device(pdev);
204 if (err) {
205 dev_err(dev, "Failed to enable PCI device\n");
206 goto err_clear_drvdata;
209 err = pci_request_regions(pdev, DRV_NAME);
210 if (err) {
211 dev_err(dev, "PCI request regions failed 0x%x\n", err);
212 goto err_disable_device;
215 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
216 if (err) {
217 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
218 goto err_release_regions;
221 /* MAP PF's configuration registers */
222 cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0);
223 if (!cpt->reg_base) {
224 dev_err(dev, "Cannot map config register space, aborting\n");
225 err = -ENOMEM;
226 goto err_release_regions;
229 /* CPT device HW initialization */
230 err = otx_cpt_device_init(cpt);
231 if (err)
232 goto err_unmap_region;
234 /* Register interrupts */
235 err = otx_cpt_register_interrupts(cpt);
236 if (err)
237 goto err_unmap_region;
239 /* Initialize engine groups */
240 err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type);
241 if (err)
242 goto err_unregister_interrupts;
244 return 0;
246 err_unregister_interrupts:
247 otx_cpt_unregister_interrupts(cpt);
248 err_unmap_region:
249 pci_iounmap(pdev, cpt->reg_base);
250 err_release_regions:
251 pci_release_regions(pdev);
252 err_disable_device:
253 pci_disable_device(pdev);
254 err_clear_drvdata:
255 pci_set_drvdata(pdev, NULL);
257 return err;
260 static void otx_cpt_remove(struct pci_dev *pdev)
262 struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
264 if (!cpt)
265 return;
267 /* Disable VFs */
268 pci_disable_sriov(pdev);
269 /* Cleanup engine groups */
270 otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps);
271 /* Disable CPT PF interrupts */
272 otx_cpt_unregister_interrupts(cpt);
273 /* Disengage SE and AE cores from all groups */
274 otx_cpt_disable_all_cores(cpt);
275 pci_iounmap(pdev, cpt->reg_base);
276 pci_release_regions(pdev);
277 pci_disable_device(pdev);
278 pci_set_drvdata(pdev, NULL);
281 /* Supported devices */
282 static const struct pci_device_id otx_cpt_id_table[] = {
283 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) },
284 { 0, } /* end of table */
287 static struct pci_driver otx_cpt_pci_driver = {
288 .name = DRV_NAME,
289 .id_table = otx_cpt_id_table,
290 .probe = otx_cpt_probe,
291 .remove = otx_cpt_remove,
292 .sriov_configure = otx_cpt_sriov_configure
295 module_pci_driver(otx_cpt_pci_driver);
297 MODULE_AUTHOR("Marvell International Ltd.");
298 MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver");
299 MODULE_LICENSE("GPL v2");
300 MODULE_VERSION(DRV_VERSION);
301 MODULE_DEVICE_TABLE(pci, otx_cpt_id_table);