perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / s390 / net / ism_drv.c
blobf96ec68af2e58aabfa0bdb71dde1f05ff8ad4151
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ISM driver for s390.
5 * Copyright IBM Corp. 2018
6 */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <net/smc.h>
18 #include <asm/debug.h>
20 #include "ism.h"
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
28 static const struct pci_device_id ism_device_table[] = {
29 { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
30 { 0, }
32 MODULE_DEVICE_TABLE(pci, ism_device_table);
34 static debug_info_t *ism_debug_info;
36 static int ism_cmd(struct ism_dev *ism, void *cmd)
38 struct ism_req_hdr *req = cmd;
39 struct ism_resp_hdr *resp = cmd;
41 memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
42 memcpy_toio(ism->ctl, req, sizeof(*req));
44 WRITE_ONCE(resp->ret, ISM_ERROR);
46 memcpy_fromio(resp, ism->ctl, sizeof(*resp));
47 if (resp->ret) {
48 debug_text_event(ism_debug_info, 0, "cmd failure");
49 debug_event(ism_debug_info, 0, resp, sizeof(*resp));
50 goto out;
52 memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
53 resp->len - sizeof(*resp));
54 out:
55 return resp->ret;
58 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
60 union ism_cmd_simple cmd;
62 memset(&cmd, 0, sizeof(cmd));
63 cmd.request.hdr.cmd = cmd_code;
64 cmd.request.hdr.len = sizeof(cmd.request);
66 return ism_cmd(ism, &cmd);
69 static int query_info(struct ism_dev *ism)
71 union ism_qi cmd;
73 memset(&cmd, 0, sizeof(cmd));
74 cmd.request.hdr.cmd = ISM_QUERY_INFO;
75 cmd.request.hdr.len = sizeof(cmd.request);
77 if (ism_cmd(ism, &cmd))
78 goto out;
80 debug_text_event(ism_debug_info, 3, "query info");
81 debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
82 out:
83 return 0;
86 static int register_sba(struct ism_dev *ism)
88 union ism_reg_sba cmd;
89 dma_addr_t dma_handle;
90 struct ism_sba *sba;
92 sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
93 &dma_handle, GFP_KERNEL);
94 if (!sba)
95 return -ENOMEM;
97 memset(&cmd, 0, sizeof(cmd));
98 cmd.request.hdr.cmd = ISM_REG_SBA;
99 cmd.request.hdr.len = sizeof(cmd.request);
100 cmd.request.sba = dma_handle;
102 if (ism_cmd(ism, &cmd)) {
103 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
104 return -EIO;
107 ism->sba = sba;
108 ism->sba_dma_addr = dma_handle;
110 return 0;
113 static int register_ieq(struct ism_dev *ism)
115 union ism_reg_ieq cmd;
116 dma_addr_t dma_handle;
117 struct ism_eq *ieq;
119 ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
120 &dma_handle, GFP_KERNEL);
121 if (!ieq)
122 return -ENOMEM;
124 memset(&cmd, 0, sizeof(cmd));
125 cmd.request.hdr.cmd = ISM_REG_IEQ;
126 cmd.request.hdr.len = sizeof(cmd.request);
127 cmd.request.ieq = dma_handle;
128 cmd.request.len = sizeof(*ieq);
130 if (ism_cmd(ism, &cmd)) {
131 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
132 return -EIO;
135 ism->ieq = ieq;
136 ism->ieq_idx = -1;
137 ism->ieq_dma_addr = dma_handle;
139 return 0;
142 static int unregister_sba(struct ism_dev *ism)
144 if (!ism->sba)
145 return 0;
147 if (ism_cmd_simple(ism, ISM_UNREG_SBA))
148 return -EIO;
150 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
151 ism->sba, ism->sba_dma_addr);
153 ism->sba = NULL;
154 ism->sba_dma_addr = 0;
156 return 0;
159 static int unregister_ieq(struct ism_dev *ism)
161 if (!ism->ieq)
162 return 0;
164 if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
165 return -EIO;
167 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
168 ism->ieq, ism->ieq_dma_addr);
170 ism->ieq = NULL;
171 ism->ieq_dma_addr = 0;
173 return 0;
176 static int ism_read_local_gid(struct ism_dev *ism)
178 union ism_read_gid cmd;
179 int ret;
181 memset(&cmd, 0, sizeof(cmd));
182 cmd.request.hdr.cmd = ISM_READ_GID;
183 cmd.request.hdr.len = sizeof(cmd.request);
185 ret = ism_cmd(ism, &cmd);
186 if (ret)
187 goto out;
189 ism->smcd->local_gid = cmd.response.gid;
190 out:
191 return ret;
194 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
195 u32 vid)
197 struct ism_dev *ism = smcd->priv;
198 union ism_query_rgid cmd;
200 memset(&cmd, 0, sizeof(cmd));
201 cmd.request.hdr.cmd = ISM_QUERY_RGID;
202 cmd.request.hdr.len = sizeof(cmd.request);
204 cmd.request.rgid = rgid;
205 cmd.request.vlan_valid = vid_valid;
206 cmd.request.vlan_id = vid;
208 return ism_cmd(ism, &cmd);
211 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
213 clear_bit(dmb->sba_idx, ism->sba_bitmap);
214 dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
215 dmb->cpu_addr, dmb->dma_addr);
218 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
220 unsigned long bit;
222 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
223 return -EINVAL;
225 if (!dmb->sba_idx) {
226 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
227 ISM_DMB_BIT_OFFSET);
228 if (bit == ISM_NR_DMBS)
229 return -ENOMEM;
231 dmb->sba_idx = bit;
233 if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 return -EINVAL;
237 dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 &dmb->dma_addr, GFP_KERNEL |
239 __GFP_NOWARN | __GFP_NOMEMALLOC |
240 __GFP_COMP | __GFP_NORETRY);
241 if (!dmb->cpu_addr)
242 clear_bit(dmb->sba_idx, ism->sba_bitmap);
244 return dmb->cpu_addr ? 0 : -ENOMEM;
247 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
249 struct ism_dev *ism = smcd->priv;
250 union ism_reg_dmb cmd;
251 int ret;
253 ret = ism_alloc_dmb(ism, dmb);
254 if (ret)
255 goto out;
257 memset(&cmd, 0, sizeof(cmd));
258 cmd.request.hdr.cmd = ISM_REG_DMB;
259 cmd.request.hdr.len = sizeof(cmd.request);
261 cmd.request.dmb = dmb->dma_addr;
262 cmd.request.dmb_len = dmb->dmb_len;
263 cmd.request.sba_idx = dmb->sba_idx;
264 cmd.request.vlan_valid = dmb->vlan_valid;
265 cmd.request.vlan_id = dmb->vlan_id;
266 cmd.request.rgid = dmb->rgid;
268 ret = ism_cmd(ism, &cmd);
269 if (ret) {
270 ism_free_dmb(ism, dmb);
271 goto out;
273 dmb->dmb_tok = cmd.response.dmb_tok;
274 out:
275 return ret;
278 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
280 struct ism_dev *ism = smcd->priv;
281 union ism_unreg_dmb cmd;
282 int ret;
284 memset(&cmd, 0, sizeof(cmd));
285 cmd.request.hdr.cmd = ISM_UNREG_DMB;
286 cmd.request.hdr.len = sizeof(cmd.request);
288 cmd.request.dmb_tok = dmb->dmb_tok;
290 ret = ism_cmd(ism, &cmd);
291 if (ret)
292 goto out;
294 ism_free_dmb(ism, dmb);
295 out:
296 return ret;
299 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
301 struct ism_dev *ism = smcd->priv;
302 union ism_set_vlan_id cmd;
304 memset(&cmd, 0, sizeof(cmd));
305 cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
306 cmd.request.hdr.len = sizeof(cmd.request);
308 cmd.request.vlan_id = vlan_id;
310 return ism_cmd(ism, &cmd);
313 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
315 struct ism_dev *ism = smcd->priv;
316 union ism_set_vlan_id cmd;
318 memset(&cmd, 0, sizeof(cmd));
319 cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
320 cmd.request.hdr.len = sizeof(cmd.request);
322 cmd.request.vlan_id = vlan_id;
324 return ism_cmd(ism, &cmd);
327 static int ism_set_vlan_required(struct smcd_dev *smcd)
329 return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
332 static int ism_reset_vlan_required(struct smcd_dev *smcd)
334 return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
337 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
338 u32 event_code, u64 info)
340 struct ism_dev *ism = smcd->priv;
341 union ism_sig_ieq cmd;
343 memset(&cmd, 0, sizeof(cmd));
344 cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
345 cmd.request.hdr.len = sizeof(cmd.request);
347 cmd.request.rgid = rgid;
348 cmd.request.trigger_irq = trigger_irq;
349 cmd.request.event_code = event_code;
350 cmd.request.info = info;
352 return ism_cmd(ism, &cmd);
355 static unsigned int max_bytes(unsigned int start, unsigned int len,
356 unsigned int boundary)
358 return min(boundary - (start & (boundary - 1)), len);
361 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
362 bool sf, unsigned int offset, void *data, unsigned int size)
364 struct ism_dev *ism = smcd->priv;
365 unsigned int bytes;
366 u64 dmb_req;
367 int ret;
369 while (size) {
370 bytes = max_bytes(offset, size, PAGE_SIZE);
371 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
372 offset);
374 ret = __ism_move(ism, dmb_req, data, bytes);
375 if (ret)
376 return ret;
378 size -= bytes;
379 data += bytes;
380 offset += bytes;
383 return 0;
386 static void ism_handle_event(struct ism_dev *ism)
388 struct smcd_event *entry;
390 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
391 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
392 ism->ieq_idx = 0;
394 entry = &ism->ieq->entry[ism->ieq_idx];
395 debug_event(ism_debug_info, 2, entry, sizeof(*entry));
396 smcd_handle_event(ism->smcd, entry);
400 static irqreturn_t ism_handle_irq(int irq, void *data)
402 struct ism_dev *ism = data;
403 unsigned long bit, end;
404 unsigned long *bv;
406 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
407 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
409 spin_lock(&ism->lock);
410 ism->sba->s = 0;
411 barrier();
412 for (bit = 0;;) {
413 bit = find_next_bit_inv(bv, end, bit);
414 if (bit >= end)
415 break;
417 clear_bit_inv(bit, bv);
418 barrier();
419 smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
420 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
423 if (ism->sba->e) {
424 ism->sba->e = 0;
425 barrier();
426 ism_handle_event(ism);
428 spin_unlock(&ism->lock);
429 return IRQ_HANDLED;
432 static const struct smcd_ops ism_ops = {
433 .query_remote_gid = ism_query_rgid,
434 .register_dmb = ism_register_dmb,
435 .unregister_dmb = ism_unregister_dmb,
436 .add_vlan_id = ism_add_vlan_id,
437 .del_vlan_id = ism_del_vlan_id,
438 .set_vlan_required = ism_set_vlan_required,
439 .reset_vlan_required = ism_reset_vlan_required,
440 .signal_event = ism_signal_ieq,
441 .move_data = ism_move,
444 static int ism_dev_init(struct ism_dev *ism)
446 struct pci_dev *pdev = ism->pdev;
447 int ret;
449 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
450 if (ret <= 0)
451 goto out;
453 ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
454 pci_name(pdev), ism);
455 if (ret)
456 goto free_vectors;
458 ret = register_sba(ism);
459 if (ret)
460 goto free_irq;
462 ret = register_ieq(ism);
463 if (ret)
464 goto unreg_sba;
466 ret = ism_read_local_gid(ism);
467 if (ret)
468 goto unreg_ieq;
470 ret = smcd_register_dev(ism->smcd);
471 if (ret)
472 goto unreg_ieq;
474 query_info(ism);
475 return 0;
477 unreg_ieq:
478 unregister_ieq(ism);
479 unreg_sba:
480 unregister_sba(ism);
481 free_irq:
482 free_irq(pci_irq_vector(pdev, 0), ism);
483 free_vectors:
484 pci_free_irq_vectors(pdev);
485 out:
486 return ret;
489 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
491 struct ism_dev *ism;
492 int ret;
494 ism = kzalloc(sizeof(*ism), GFP_KERNEL);
495 if (!ism)
496 return -ENOMEM;
498 spin_lock_init(&ism->lock);
499 dev_set_drvdata(&pdev->dev, ism);
500 ism->pdev = pdev;
502 ret = pci_enable_device_mem(pdev);
503 if (ret)
504 goto err;
506 ret = pci_request_mem_regions(pdev, DRV_NAME);
507 if (ret)
508 goto err_disable;
510 ism->ctl = pci_iomap(pdev, 2, 0);
511 if (!ism->ctl)
512 goto err_resource;
514 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
515 if (ret)
516 goto err_unmap;
518 dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
519 dma_set_max_seg_size(&pdev->dev, SZ_1M);
520 pci_set_master(pdev);
522 ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
523 ISM_NR_DMBS);
524 if (!ism->smcd)
525 goto err_unmap;
527 ism->smcd->priv = ism;
528 ret = ism_dev_init(ism);
529 if (ret)
530 goto err_free;
532 return 0;
534 err_free:
535 smcd_free_dev(ism->smcd);
536 err_unmap:
537 pci_iounmap(pdev, ism->ctl);
538 err_resource:
539 pci_release_mem_regions(pdev);
540 err_disable:
541 pci_disable_device(pdev);
542 err:
543 kfree(ism);
544 dev_set_drvdata(&pdev->dev, NULL);
545 return ret;
548 static void ism_dev_exit(struct ism_dev *ism)
550 struct pci_dev *pdev = ism->pdev;
552 smcd_unregister_dev(ism->smcd);
553 unregister_ieq(ism);
554 unregister_sba(ism);
555 free_irq(pci_irq_vector(pdev, 0), ism);
556 pci_free_irq_vectors(pdev);
559 static void ism_remove(struct pci_dev *pdev)
561 struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
563 ism_dev_exit(ism);
565 smcd_free_dev(ism->smcd);
566 pci_iounmap(pdev, ism->ctl);
567 pci_release_mem_regions(pdev);
568 pci_disable_device(pdev);
569 dev_set_drvdata(&pdev->dev, NULL);
570 kfree(ism);
573 static int ism_suspend(struct device *dev)
575 struct ism_dev *ism = dev_get_drvdata(dev);
577 ism_dev_exit(ism);
578 return 0;
581 static int ism_resume(struct device *dev)
583 struct ism_dev *ism = dev_get_drvdata(dev);
585 return ism_dev_init(ism);
588 static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
590 static struct pci_driver ism_driver = {
591 .name = DRV_NAME,
592 .id_table = ism_device_table,
593 .probe = ism_probe,
594 .remove = ism_remove,
595 .driver = {
596 .pm = &ism_pm_ops,
600 static int __init ism_init(void)
602 int ret;
604 ism_debug_info = debug_register("ism", 2, 1, 16);
605 if (!ism_debug_info)
606 return -ENODEV;
608 debug_register_view(ism_debug_info, &debug_hex_ascii_view);
609 ret = pci_register_driver(&ism_driver);
610 if (ret)
611 debug_unregister(ism_debug_info);
613 return ret;
616 static void __exit ism_exit(void)
618 pci_unregister_driver(&ism_driver);
619 debug_unregister(ism_debug_info);
622 module_init(ism_init);
623 module_exit(ism_exit);