Linux 4.18.10
[linux/fpc-iii.git] / drivers / misc / mic / scif / scif_main.c
blob36d847af12096189c3e0e34d3e2695d4d2a9830a
1 /*
2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * Intel SCIF driver.
18 #include <linux/module.h>
19 #include <linux/idr.h>
21 #include <linux/mic_common.h>
22 #include "../common/mic_dev.h"
23 #include "../bus/scif_bus.h"
24 #include "scif_peer_bus.h"
25 #include "scif_main.h"
26 #include "scif_map.h"
28 struct scif_info scif_info = {
29 .mdev = {
30 .minor = MISC_DYNAMIC_MINOR,
31 .name = "scif",
32 .fops = &scif_fops,
36 struct scif_dev *scif_dev;
37 struct kmem_cache *unaligned_cache;
38 static atomic_t g_loopb_cnt;
40 /* Runs in the context of intr_wq */
41 static void scif_intr_bh_handler(struct work_struct *work)
43 struct scif_dev *scifdev =
44 container_of(work, struct scif_dev, intr_bh);
46 if (scifdev_self(scifdev))
47 scif_loopb_msg_handler(scifdev, scifdev->qpairs);
48 else
49 scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
52 int scif_setup_intr_wq(struct scif_dev *scifdev)
54 if (!scifdev->intr_wq) {
55 snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
56 "SCIF INTR %d", scifdev->node);
57 scifdev->intr_wq =
58 alloc_ordered_workqueue(scifdev->intr_wqname, 0);
59 if (!scifdev->intr_wq)
60 return -ENOMEM;
61 INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
63 return 0;
66 void scif_destroy_intr_wq(struct scif_dev *scifdev)
68 if (scifdev->intr_wq) {
69 destroy_workqueue(scifdev->intr_wq);
70 scifdev->intr_wq = NULL;
74 irqreturn_t scif_intr_handler(int irq, void *data)
76 struct scif_dev *scifdev = data;
77 struct scif_hw_dev *sdev = scifdev->sdev;
79 sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
80 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
81 return IRQ_HANDLED;
84 static void scif_qp_setup_handler(struct work_struct *work)
86 struct scif_dev *scifdev = container_of(work, struct scif_dev,
87 qp_dwork.work);
88 struct scif_hw_dev *sdev = scifdev->sdev;
89 dma_addr_t da = 0;
90 int err;
92 if (scif_is_mgmt_node()) {
93 struct mic_bootparam *bp = sdev->dp;
95 da = bp->scif_card_dma_addr;
96 scifdev->rdb = bp->h2c_scif_db;
97 } else {
98 struct mic_bootparam __iomem *bp = sdev->rdp;
100 da = readq(&bp->scif_host_dma_addr);
101 scifdev->rdb = ioread8(&bp->c2h_scif_db);
103 if (da) {
104 err = scif_qp_response(da, scifdev);
105 if (err)
106 dev_err(&scifdev->sdev->dev,
107 "scif_qp_response err %d\n", err);
108 } else {
109 schedule_delayed_work(&scifdev->qp_dwork,
110 msecs_to_jiffies(1000));
114 static int scif_setup_scifdev(void)
116 /* We support a maximum of 129 SCIF nodes including the mgmt node */
117 #define MAX_SCIF_NODES 129
118 int i;
119 u8 num_nodes = MAX_SCIF_NODES;
121 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
122 if (!scif_dev)
123 return -ENOMEM;
124 for (i = 0; i < num_nodes; i++) {
125 struct scif_dev *scifdev = &scif_dev[i];
127 scifdev->node = i;
128 scifdev->exit = OP_IDLE;
129 init_waitqueue_head(&scifdev->disconn_wq);
130 mutex_init(&scifdev->lock);
131 INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
132 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
133 scif_poll_qp_state);
134 INIT_DELAYED_WORK(&scifdev->qp_dwork,
135 scif_qp_setup_handler);
136 INIT_LIST_HEAD(&scifdev->p2p);
137 RCU_INIT_POINTER(scifdev->spdev, NULL);
139 return 0;
142 static void scif_destroy_scifdev(void)
144 kfree(scif_dev);
147 static int scif_probe(struct scif_hw_dev *sdev)
149 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
150 int rc;
152 dev_set_drvdata(&sdev->dev, sdev);
153 scifdev->sdev = sdev;
155 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
156 struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
158 loopb_dev->sdev = sdev;
159 rc = scif_setup_loopback_qp(loopb_dev);
160 if (rc)
161 goto exit;
164 rc = scif_setup_intr_wq(scifdev);
165 if (rc)
166 goto destroy_loopb;
167 rc = scif_setup_qp(scifdev);
168 if (rc)
169 goto destroy_intr;
170 scifdev->db = sdev->hw_ops->next_db(sdev);
171 scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
172 "SCIF_INTR", scifdev,
173 scifdev->db);
174 if (IS_ERR(scifdev->cookie)) {
175 rc = PTR_ERR(scifdev->cookie);
176 goto free_qp;
178 if (scif_is_mgmt_node()) {
179 struct mic_bootparam *bp = sdev->dp;
181 bp->c2h_scif_db = scifdev->db;
182 bp->scif_host_dma_addr = scifdev->qp_dma_addr;
183 } else {
184 struct mic_bootparam __iomem *bp = sdev->rdp;
186 iowrite8(scifdev->db, &bp->h2c_scif_db);
187 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
189 schedule_delayed_work(&scifdev->qp_dwork,
190 msecs_to_jiffies(1000));
191 return rc;
192 free_qp:
193 scif_free_qp(scifdev);
194 destroy_intr:
195 scif_destroy_intr_wq(scifdev);
196 destroy_loopb:
197 if (atomic_dec_and_test(&g_loopb_cnt))
198 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
199 exit:
200 return rc;
203 void scif_stop(struct scif_dev *scifdev)
205 struct scif_dev *dev;
206 int i;
208 for (i = scif_info.maxid; i >= 0; i--) {
209 dev = &scif_dev[i];
210 if (scifdev_self(dev))
211 continue;
212 scif_handle_remove_node(i);
216 static void scif_remove(struct scif_hw_dev *sdev)
218 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
220 if (scif_is_mgmt_node()) {
221 struct mic_bootparam *bp = sdev->dp;
223 bp->c2h_scif_db = -1;
224 bp->scif_host_dma_addr = 0x0;
225 } else {
226 struct mic_bootparam __iomem *bp = sdev->rdp;
228 iowrite8(-1, &bp->h2c_scif_db);
229 writeq(0x0, &bp->scif_card_dma_addr);
231 if (scif_is_mgmt_node()) {
232 scif_disconnect_node(scifdev->node, true);
233 } else {
234 scif_info.card_initiated_exit = true;
235 scif_stop(scifdev);
237 if (atomic_dec_and_test(&g_loopb_cnt))
238 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
239 if (scifdev->cookie) {
240 sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
241 scifdev->cookie = NULL;
243 scif_destroy_intr_wq(scifdev);
244 cancel_delayed_work(&scifdev->qp_dwork);
245 scif_free_qp(scifdev);
246 scifdev->rdb = -1;
247 scifdev->sdev = NULL;
250 static struct scif_hw_dev_id id_table[] = {
251 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
252 { 0 },
255 static struct scif_driver scif_driver = {
256 .driver.name = KBUILD_MODNAME,
257 .driver.owner = THIS_MODULE,
258 .id_table = id_table,
259 .probe = scif_probe,
260 .remove = scif_remove,
263 static int _scif_init(void)
265 int rc;
267 mutex_init(&scif_info.eplock);
268 spin_lock_init(&scif_info.rmalock);
269 spin_lock_init(&scif_info.nb_connect_lock);
270 spin_lock_init(&scif_info.port_lock);
271 mutex_init(&scif_info.conflock);
272 mutex_init(&scif_info.connlock);
273 mutex_init(&scif_info.fencelock);
274 INIT_LIST_HEAD(&scif_info.uaccept);
275 INIT_LIST_HEAD(&scif_info.listen);
276 INIT_LIST_HEAD(&scif_info.zombie);
277 INIT_LIST_HEAD(&scif_info.connected);
278 INIT_LIST_HEAD(&scif_info.disconnected);
279 INIT_LIST_HEAD(&scif_info.rma);
280 INIT_LIST_HEAD(&scif_info.rma_tc);
281 INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
282 INIT_LIST_HEAD(&scif_info.fence);
283 INIT_LIST_HEAD(&scif_info.nb_connect_list);
284 init_waitqueue_head(&scif_info.exitwq);
285 scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
286 scif_info.en_msg_log = 0;
287 scif_info.p2p_enable = 1;
288 rc = scif_setup_scifdev();
289 if (rc)
290 goto error;
291 unaligned_cache = kmem_cache_create("Unaligned_DMA",
292 SCIF_KMEM_UNALIGNED_BUF_SIZE,
293 0, SLAB_HWCACHE_ALIGN, NULL);
294 if (!unaligned_cache) {
295 rc = -ENOMEM;
296 goto free_sdev;
298 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
299 INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
300 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
301 idr_init(&scif_ports);
302 return 0;
303 free_sdev:
304 scif_destroy_scifdev();
305 error:
306 return rc;
309 static void _scif_exit(void)
311 idr_destroy(&scif_ports);
312 kmem_cache_destroy(unaligned_cache);
313 scif_destroy_scifdev();
316 static int __init scif_init(void)
318 struct miscdevice *mdev = &scif_info.mdev;
319 int rc;
321 _scif_init();
322 iova_cache_get();
323 rc = scif_peer_bus_init();
324 if (rc)
325 goto exit;
326 rc = scif_register_driver(&scif_driver);
327 if (rc)
328 goto peer_bus_exit;
329 rc = misc_register(mdev);
330 if (rc)
331 goto unreg_scif;
332 scif_init_debugfs();
333 return 0;
334 unreg_scif:
335 scif_unregister_driver(&scif_driver);
336 peer_bus_exit:
337 scif_peer_bus_exit();
338 exit:
339 _scif_exit();
340 return rc;
343 static void __exit scif_exit(void)
345 scif_exit_debugfs();
346 misc_deregister(&scif_info.mdev);
347 scif_unregister_driver(&scif_driver);
348 scif_peer_bus_exit();
349 iova_cache_put();
350 _scif_exit();
353 module_init(scif_init);
354 module_exit(scif_exit);
356 MODULE_DEVICE_TABLE(scif, id_table);
357 MODULE_AUTHOR("Intel Corporation");
358 MODULE_DESCRIPTION("Intel(R) SCIF driver");
359 MODULE_LICENSE("GPL v2");