1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2014 Intel Corporation.
9 #include <linux/module.h>
10 #include <linux/idr.h>
12 #include <linux/mic_common.h>
13 #include "../common/mic_dev.h"
14 #include "../bus/scif_bus.h"
15 #include "scif_peer_bus.h"
16 #include "scif_main.h"
19 struct scif_info scif_info
= {
21 .minor
= MISC_DYNAMIC_MINOR
,
27 struct scif_dev
*scif_dev
;
28 struct kmem_cache
*unaligned_cache
;
29 static atomic_t g_loopb_cnt
;
31 /* Runs in the context of intr_wq */
32 static void scif_intr_bh_handler(struct work_struct
*work
)
34 struct scif_dev
*scifdev
=
35 container_of(work
, struct scif_dev
, intr_bh
);
37 if (scifdev_self(scifdev
))
38 scif_loopb_msg_handler(scifdev
, scifdev
->qpairs
);
40 scif_nodeqp_intrhandler(scifdev
, scifdev
->qpairs
);
43 int scif_setup_intr_wq(struct scif_dev
*scifdev
)
45 if (!scifdev
->intr_wq
) {
46 snprintf(scifdev
->intr_wqname
, sizeof(scifdev
->intr_wqname
),
47 "SCIF INTR %d", scifdev
->node
);
49 alloc_ordered_workqueue(scifdev
->intr_wqname
, 0);
50 if (!scifdev
->intr_wq
)
52 INIT_WORK(&scifdev
->intr_bh
, scif_intr_bh_handler
);
57 void scif_destroy_intr_wq(struct scif_dev
*scifdev
)
59 if (scifdev
->intr_wq
) {
60 destroy_workqueue(scifdev
->intr_wq
);
61 scifdev
->intr_wq
= NULL
;
65 irqreturn_t
scif_intr_handler(int irq
, void *data
)
67 struct scif_dev
*scifdev
= data
;
68 struct scif_hw_dev
*sdev
= scifdev
->sdev
;
70 sdev
->hw_ops
->ack_interrupt(sdev
, scifdev
->db
);
71 queue_work(scifdev
->intr_wq
, &scifdev
->intr_bh
);
75 static void scif_qp_setup_handler(struct work_struct
*work
)
77 struct scif_dev
*scifdev
= container_of(work
, struct scif_dev
,
79 struct scif_hw_dev
*sdev
= scifdev
->sdev
;
83 if (scif_is_mgmt_node()) {
84 struct mic_bootparam
*bp
= sdev
->dp
;
86 da
= bp
->scif_card_dma_addr
;
87 scifdev
->rdb
= bp
->h2c_scif_db
;
89 struct mic_bootparam __iomem
*bp
= sdev
->rdp
;
91 da
= readq(&bp
->scif_host_dma_addr
);
92 scifdev
->rdb
= ioread8(&bp
->c2h_scif_db
);
95 err
= scif_qp_response(da
, scifdev
);
97 dev_err(&scifdev
->sdev
->dev
,
98 "scif_qp_response err %d\n", err
);
100 schedule_delayed_work(&scifdev
->qp_dwork
,
101 msecs_to_jiffies(1000));
105 static int scif_setup_scifdev(void)
107 /* We support a maximum of 129 SCIF nodes including the mgmt node */
108 #define MAX_SCIF_NODES 129
110 u8 num_nodes
= MAX_SCIF_NODES
;
112 scif_dev
= kcalloc(num_nodes
, sizeof(*scif_dev
), GFP_KERNEL
);
115 for (i
= 0; i
< num_nodes
; i
++) {
116 struct scif_dev
*scifdev
= &scif_dev
[i
];
119 scifdev
->exit
= OP_IDLE
;
120 init_waitqueue_head(&scifdev
->disconn_wq
);
121 mutex_init(&scifdev
->lock
);
122 INIT_WORK(&scifdev
->peer_add_work
, scif_add_peer_device
);
123 INIT_DELAYED_WORK(&scifdev
->p2p_dwork
,
125 INIT_DELAYED_WORK(&scifdev
->qp_dwork
,
126 scif_qp_setup_handler
);
127 INIT_LIST_HEAD(&scifdev
->p2p
);
128 RCU_INIT_POINTER(scifdev
->spdev
, NULL
);
133 static void scif_destroy_scifdev(void)
139 static int scif_probe(struct scif_hw_dev
*sdev
)
141 struct scif_dev
*scifdev
= &scif_dev
[sdev
->dnode
];
144 dev_set_drvdata(&sdev
->dev
, sdev
);
145 scifdev
->sdev
= sdev
;
147 if (1 == atomic_add_return(1, &g_loopb_cnt
)) {
148 struct scif_dev
*loopb_dev
= &scif_dev
[sdev
->snode
];
150 loopb_dev
->sdev
= sdev
;
151 rc
= scif_setup_loopback_qp(loopb_dev
);
156 rc
= scif_setup_intr_wq(scifdev
);
159 rc
= scif_setup_qp(scifdev
);
162 scifdev
->db
= sdev
->hw_ops
->next_db(sdev
);
163 scifdev
->cookie
= sdev
->hw_ops
->request_irq(sdev
, scif_intr_handler
,
164 "SCIF_INTR", scifdev
,
166 if (IS_ERR(scifdev
->cookie
)) {
167 rc
= PTR_ERR(scifdev
->cookie
);
170 if (scif_is_mgmt_node()) {
171 struct mic_bootparam
*bp
= sdev
->dp
;
173 bp
->c2h_scif_db
= scifdev
->db
;
174 bp
->scif_host_dma_addr
= scifdev
->qp_dma_addr
;
176 struct mic_bootparam __iomem
*bp
= sdev
->rdp
;
178 iowrite8(scifdev
->db
, &bp
->h2c_scif_db
);
179 writeq(scifdev
->qp_dma_addr
, &bp
->scif_card_dma_addr
);
181 schedule_delayed_work(&scifdev
->qp_dwork
,
182 msecs_to_jiffies(1000));
185 scif_free_qp(scifdev
);
187 scif_destroy_intr_wq(scifdev
);
189 if (atomic_dec_and_test(&g_loopb_cnt
))
190 scif_destroy_loopback_qp(&scif_dev
[sdev
->snode
]);
195 void scif_stop(struct scif_dev
*scifdev
)
197 struct scif_dev
*dev
;
200 for (i
= scif_info
.maxid
; i
>= 0; i
--) {
202 if (scifdev_self(dev
))
204 scif_handle_remove_node(i
);
208 static void scif_remove(struct scif_hw_dev
*sdev
)
210 struct scif_dev
*scifdev
= &scif_dev
[sdev
->dnode
];
212 if (scif_is_mgmt_node()) {
213 struct mic_bootparam
*bp
= sdev
->dp
;
215 bp
->c2h_scif_db
= -1;
216 bp
->scif_host_dma_addr
= 0x0;
218 struct mic_bootparam __iomem
*bp
= sdev
->rdp
;
220 iowrite8(-1, &bp
->h2c_scif_db
);
221 writeq(0x0, &bp
->scif_card_dma_addr
);
223 if (scif_is_mgmt_node()) {
224 scif_disconnect_node(scifdev
->node
, true);
226 scif_info
.card_initiated_exit
= true;
229 if (atomic_dec_and_test(&g_loopb_cnt
))
230 scif_destroy_loopback_qp(&scif_dev
[sdev
->snode
]);
231 if (scifdev
->cookie
) {
232 sdev
->hw_ops
->free_irq(sdev
, scifdev
->cookie
, scifdev
);
233 scifdev
->cookie
= NULL
;
235 scif_destroy_intr_wq(scifdev
);
236 cancel_delayed_work(&scifdev
->qp_dwork
);
237 scif_free_qp(scifdev
);
239 scifdev
->sdev
= NULL
;
242 static struct scif_hw_dev_id id_table
[] = {
243 { MIC_SCIF_DEV
, SCIF_DEV_ANY_ID
},
247 static struct scif_driver scif_driver
= {
248 .driver
.name
= KBUILD_MODNAME
,
249 .driver
.owner
= THIS_MODULE
,
250 .id_table
= id_table
,
252 .remove
= scif_remove
,
255 static int _scif_init(void)
259 mutex_init(&scif_info
.eplock
);
260 spin_lock_init(&scif_info
.rmalock
);
261 spin_lock_init(&scif_info
.nb_connect_lock
);
262 spin_lock_init(&scif_info
.port_lock
);
263 mutex_init(&scif_info
.conflock
);
264 mutex_init(&scif_info
.connlock
);
265 mutex_init(&scif_info
.fencelock
);
266 INIT_LIST_HEAD(&scif_info
.uaccept
);
267 INIT_LIST_HEAD(&scif_info
.listen
);
268 INIT_LIST_HEAD(&scif_info
.zombie
);
269 INIT_LIST_HEAD(&scif_info
.connected
);
270 INIT_LIST_HEAD(&scif_info
.disconnected
);
271 INIT_LIST_HEAD(&scif_info
.rma
);
272 INIT_LIST_HEAD(&scif_info
.rma_tc
);
273 INIT_LIST_HEAD(&scif_info
.mmu_notif_cleanup
);
274 INIT_LIST_HEAD(&scif_info
.fence
);
275 INIT_LIST_HEAD(&scif_info
.nb_connect_list
);
276 init_waitqueue_head(&scif_info
.exitwq
);
277 scif_info
.rma_tc_limit
= SCIF_RMA_TEMP_CACHE_LIMIT
;
278 scif_info
.en_msg_log
= 0;
279 scif_info
.p2p_enable
= 1;
280 rc
= scif_setup_scifdev();
283 unaligned_cache
= kmem_cache_create("Unaligned_DMA",
284 SCIF_KMEM_UNALIGNED_BUF_SIZE
,
285 0, SLAB_HWCACHE_ALIGN
, NULL
);
286 if (!unaligned_cache
) {
290 INIT_WORK(&scif_info
.misc_work
, scif_misc_handler
);
291 INIT_WORK(&scif_info
.mmu_notif_work
, scif_mmu_notif_handler
);
292 INIT_WORK(&scif_info
.conn_work
, scif_conn_handler
);
293 idr_init(&scif_ports
);
296 scif_destroy_scifdev();
301 static void _scif_exit(void)
303 idr_destroy(&scif_ports
);
304 kmem_cache_destroy(unaligned_cache
);
305 scif_destroy_scifdev();
308 static int __init
scif_init(void)
310 struct miscdevice
*mdev
= &scif_info
.mdev
;
315 rc
= scif_peer_bus_init();
318 rc
= scif_register_driver(&scif_driver
);
321 rc
= misc_register(mdev
);
327 scif_unregister_driver(&scif_driver
);
329 scif_peer_bus_exit();
335 static void __exit
scif_exit(void)
338 misc_deregister(&scif_info
.mdev
);
339 scif_unregister_driver(&scif_driver
);
340 scif_peer_bus_exit();
345 module_init(scif_init
);
346 module_exit(scif_exit
);
348 MODULE_DEVICE_TABLE(scif
, id_table
);
349 MODULE_AUTHOR("Intel Corporation");
350 MODULE_DESCRIPTION("Intel(R) SCIF driver");
351 MODULE_LICENSE("GPL v2");