1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2018
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
18 #include <asm/debug.h>
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
28 static const struct pci_device_id ism_device_table
[] = {
29 { PCI_VDEVICE(IBM
, PCI_DEVICE_ID_IBM_ISM
), 0 },
32 MODULE_DEVICE_TABLE(pci
, ism_device_table
);
34 static debug_info_t
*ism_debug_info
;
36 static int ism_cmd(struct ism_dev
*ism
, void *cmd
)
38 struct ism_req_hdr
*req
= cmd
;
39 struct ism_resp_hdr
*resp
= cmd
;
41 memcpy_toio(ism
->ctl
+ sizeof(*req
), req
+ 1, req
->len
- sizeof(*req
));
42 memcpy_toio(ism
->ctl
, req
, sizeof(*req
));
44 WRITE_ONCE(resp
->ret
, ISM_ERROR
);
46 memcpy_fromio(resp
, ism
->ctl
, sizeof(*resp
));
48 debug_text_event(ism_debug_info
, 0, "cmd failure");
49 debug_event(ism_debug_info
, 0, resp
, sizeof(*resp
));
52 memcpy_fromio(resp
+ 1, ism
->ctl
+ sizeof(*resp
),
53 resp
->len
- sizeof(*resp
));
58 static int ism_cmd_simple(struct ism_dev
*ism
, u32 cmd_code
)
60 union ism_cmd_simple cmd
;
62 memset(&cmd
, 0, sizeof(cmd
));
63 cmd
.request
.hdr
.cmd
= cmd_code
;
64 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
66 return ism_cmd(ism
, &cmd
);
69 static int query_info(struct ism_dev
*ism
)
73 memset(&cmd
, 0, sizeof(cmd
));
74 cmd
.request
.hdr
.cmd
= ISM_QUERY_INFO
;
75 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
77 if (ism_cmd(ism
, &cmd
))
80 debug_text_event(ism_debug_info
, 3, "query info");
81 debug_event(ism_debug_info
, 3, &cmd
.response
, sizeof(cmd
.response
));
86 static int register_sba(struct ism_dev
*ism
)
88 union ism_reg_sba cmd
;
89 dma_addr_t dma_handle
;
92 sba
= dma_zalloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
93 &dma_handle
, GFP_KERNEL
);
97 memset(&cmd
, 0, sizeof(cmd
));
98 cmd
.request
.hdr
.cmd
= ISM_REG_SBA
;
99 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
100 cmd
.request
.sba
= dma_handle
;
102 if (ism_cmd(ism
, &cmd
)) {
103 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, sba
, dma_handle
);
108 ism
->sba_dma_addr
= dma_handle
;
113 static int register_ieq(struct ism_dev
*ism
)
115 union ism_reg_ieq cmd
;
116 dma_addr_t dma_handle
;
119 ieq
= dma_zalloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
120 &dma_handle
, GFP_KERNEL
);
124 memset(&cmd
, 0, sizeof(cmd
));
125 cmd
.request
.hdr
.cmd
= ISM_REG_IEQ
;
126 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
127 cmd
.request
.ieq
= dma_handle
;
128 cmd
.request
.len
= sizeof(*ieq
);
130 if (ism_cmd(ism
, &cmd
)) {
131 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, ieq
, dma_handle
);
137 ism
->ieq_dma_addr
= dma_handle
;
142 static int unregister_sba(struct ism_dev
*ism
)
147 if (ism_cmd_simple(ism
, ISM_UNREG_SBA
))
150 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
151 ism
->sba
, ism
->sba_dma_addr
);
154 ism
->sba_dma_addr
= 0;
159 static int unregister_ieq(struct ism_dev
*ism
)
164 if (ism_cmd_simple(ism
, ISM_UNREG_IEQ
))
167 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
168 ism
->ieq
, ism
->ieq_dma_addr
);
171 ism
->ieq_dma_addr
= 0;
176 static int ism_read_local_gid(struct ism_dev
*ism
)
178 union ism_read_gid cmd
;
181 memset(&cmd
, 0, sizeof(cmd
));
182 cmd
.request
.hdr
.cmd
= ISM_READ_GID
;
183 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
185 ret
= ism_cmd(ism
, &cmd
);
189 ism
->smcd
->local_gid
= cmd
.response
.gid
;
194 static int ism_query_rgid(struct smcd_dev
*smcd
, u64 rgid
, u32 vid_valid
,
197 struct ism_dev
*ism
= smcd
->priv
;
198 union ism_query_rgid cmd
;
200 memset(&cmd
, 0, sizeof(cmd
));
201 cmd
.request
.hdr
.cmd
= ISM_QUERY_RGID
;
202 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
204 cmd
.request
.rgid
= rgid
;
205 cmd
.request
.vlan_valid
= vid_valid
;
206 cmd
.request
.vlan_id
= vid
;
208 return ism_cmd(ism
, &cmd
);
211 static void ism_free_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
213 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
214 dma_free_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
215 dmb
->cpu_addr
, dmb
->dma_addr
);
218 static int ism_alloc_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
222 if (PAGE_ALIGN(dmb
->dmb_len
) > dma_get_max_seg_size(&ism
->pdev
->dev
))
226 bit
= find_next_zero_bit(ism
->sba_bitmap
, ISM_NR_DMBS
,
228 if (bit
== ISM_NR_DMBS
)
233 if (dmb
->sba_idx
< ISM_DMB_BIT_OFFSET
||
234 test_and_set_bit(dmb
->sba_idx
, ism
->sba_bitmap
))
237 dmb
->cpu_addr
= dma_zalloc_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
238 &dmb
->dma_addr
, GFP_KERNEL
|
239 __GFP_NOWARN
| __GFP_NOMEMALLOC
|
240 __GFP_COMP
| __GFP_NORETRY
);
242 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
244 return dmb
->cpu_addr
? 0 : -ENOMEM
;
247 static int ism_register_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
249 struct ism_dev
*ism
= smcd
->priv
;
250 union ism_reg_dmb cmd
;
253 ret
= ism_alloc_dmb(ism
, dmb
);
257 memset(&cmd
, 0, sizeof(cmd
));
258 cmd
.request
.hdr
.cmd
= ISM_REG_DMB
;
259 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
261 cmd
.request
.dmb
= dmb
->dma_addr
;
262 cmd
.request
.dmb_len
= dmb
->dmb_len
;
263 cmd
.request
.sba_idx
= dmb
->sba_idx
;
264 cmd
.request
.vlan_valid
= dmb
->vlan_valid
;
265 cmd
.request
.vlan_id
= dmb
->vlan_id
;
266 cmd
.request
.rgid
= dmb
->rgid
;
268 ret
= ism_cmd(ism
, &cmd
);
270 ism_free_dmb(ism
, dmb
);
273 dmb
->dmb_tok
= cmd
.response
.dmb_tok
;
278 static int ism_unregister_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
280 struct ism_dev
*ism
= smcd
->priv
;
281 union ism_unreg_dmb cmd
;
284 memset(&cmd
, 0, sizeof(cmd
));
285 cmd
.request
.hdr
.cmd
= ISM_UNREG_DMB
;
286 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
288 cmd
.request
.dmb_tok
= dmb
->dmb_tok
;
290 ret
= ism_cmd(ism
, &cmd
);
294 ism_free_dmb(ism
, dmb
);
299 static int ism_add_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
301 struct ism_dev
*ism
= smcd
->priv
;
302 union ism_set_vlan_id cmd
;
304 memset(&cmd
, 0, sizeof(cmd
));
305 cmd
.request
.hdr
.cmd
= ISM_ADD_VLAN_ID
;
306 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
308 cmd
.request
.vlan_id
= vlan_id
;
310 return ism_cmd(ism
, &cmd
);
313 static int ism_del_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
315 struct ism_dev
*ism
= smcd
->priv
;
316 union ism_set_vlan_id cmd
;
318 memset(&cmd
, 0, sizeof(cmd
));
319 cmd
.request
.hdr
.cmd
= ISM_DEL_VLAN_ID
;
320 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
322 cmd
.request
.vlan_id
= vlan_id
;
324 return ism_cmd(ism
, &cmd
);
327 static int ism_set_vlan_required(struct smcd_dev
*smcd
)
329 return ism_cmd_simple(smcd
->priv
, ISM_SET_VLAN
);
332 static int ism_reset_vlan_required(struct smcd_dev
*smcd
)
334 return ism_cmd_simple(smcd
->priv
, ISM_RESET_VLAN
);
337 static int ism_signal_ieq(struct smcd_dev
*smcd
, u64 rgid
, u32 trigger_irq
,
338 u32 event_code
, u64 info
)
340 struct ism_dev
*ism
= smcd
->priv
;
341 union ism_sig_ieq cmd
;
343 memset(&cmd
, 0, sizeof(cmd
));
344 cmd
.request
.hdr
.cmd
= ISM_SIGNAL_IEQ
;
345 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
347 cmd
.request
.rgid
= rgid
;
348 cmd
.request
.trigger_irq
= trigger_irq
;
349 cmd
.request
.event_code
= event_code
;
350 cmd
.request
.info
= info
;
352 return ism_cmd(ism
, &cmd
);
355 static unsigned int max_bytes(unsigned int start
, unsigned int len
,
356 unsigned int boundary
)
358 return min(boundary
- (start
& (boundary
- 1)), len
);
361 static int ism_move(struct smcd_dev
*smcd
, u64 dmb_tok
, unsigned int idx
,
362 bool sf
, unsigned int offset
, void *data
, unsigned int size
)
364 struct ism_dev
*ism
= smcd
->priv
;
370 bytes
= max_bytes(offset
, size
, PAGE_SIZE
);
371 dmb_req
= ISM_CREATE_REQ(dmb_tok
, idx
, size
== bytes
? sf
: 0,
374 ret
= __ism_move(ism
, dmb_req
, data
, bytes
);
386 static void ism_handle_event(struct ism_dev
*ism
)
388 struct smcd_event
*entry
;
390 while ((ism
->ieq_idx
+ 1) != READ_ONCE(ism
->ieq
->header
.idx
)) {
391 if (++(ism
->ieq_idx
) == ARRAY_SIZE(ism
->ieq
->entry
))
394 entry
= &ism
->ieq
->entry
[ism
->ieq_idx
];
395 debug_event(ism_debug_info
, 2, entry
, sizeof(*entry
));
396 smcd_handle_event(ism
->smcd
, entry
);
400 static irqreturn_t
ism_handle_irq(int irq
, void *data
)
402 struct ism_dev
*ism
= data
;
403 unsigned long bit
, end
;
406 bv
= (void *) &ism
->sba
->dmb_bits
[ISM_DMB_WORD_OFFSET
];
407 end
= sizeof(ism
->sba
->dmb_bits
) * BITS_PER_BYTE
- ISM_DMB_BIT_OFFSET
;
409 spin_lock(&ism
->lock
);
413 bit
= find_next_bit_inv(bv
, end
, bit
);
417 clear_bit_inv(bit
, bv
);
419 smcd_handle_irq(ism
->smcd
, bit
+ ISM_DMB_BIT_OFFSET
);
420 ism
->sba
->dmbe_mask
[bit
+ ISM_DMB_BIT_OFFSET
] = 0;
426 ism_handle_event(ism
);
428 spin_unlock(&ism
->lock
);
432 static const struct smcd_ops ism_ops
= {
433 .query_remote_gid
= ism_query_rgid
,
434 .register_dmb
= ism_register_dmb
,
435 .unregister_dmb
= ism_unregister_dmb
,
436 .add_vlan_id
= ism_add_vlan_id
,
437 .del_vlan_id
= ism_del_vlan_id
,
438 .set_vlan_required
= ism_set_vlan_required
,
439 .reset_vlan_required
= ism_reset_vlan_required
,
440 .signal_event
= ism_signal_ieq
,
441 .move_data
= ism_move
,
444 static int ism_dev_init(struct ism_dev
*ism
)
446 struct pci_dev
*pdev
= ism
->pdev
;
449 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
453 ret
= request_irq(pci_irq_vector(pdev
, 0), ism_handle_irq
, 0,
454 pci_name(pdev
), ism
);
458 ret
= register_sba(ism
);
462 ret
= register_ieq(ism
);
466 ret
= ism_read_local_gid(ism
);
470 ret
= smcd_register_dev(ism
->smcd
);
482 free_irq(pci_irq_vector(pdev
, 0), ism
);
484 pci_free_irq_vectors(pdev
);
489 static int ism_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
494 ism
= kzalloc(sizeof(*ism
), GFP_KERNEL
);
498 spin_lock_init(&ism
->lock
);
499 dev_set_drvdata(&pdev
->dev
, ism
);
502 ret
= pci_enable_device_mem(pdev
);
506 ret
= pci_request_mem_regions(pdev
, DRV_NAME
);
510 ism
->ctl
= pci_iomap(pdev
, 2, 0);
514 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
518 dma_set_seg_boundary(&pdev
->dev
, SZ_1M
- 1);
519 dma_set_max_seg_size(&pdev
->dev
, SZ_1M
);
520 pci_set_master(pdev
);
522 ism
->smcd
= smcd_alloc_dev(&pdev
->dev
, dev_name(&pdev
->dev
), &ism_ops
,
527 ism
->smcd
->priv
= ism
;
528 ret
= ism_dev_init(ism
);
535 smcd_free_dev(ism
->smcd
);
537 pci_iounmap(pdev
, ism
->ctl
);
539 pci_release_mem_regions(pdev
);
541 pci_disable_device(pdev
);
544 dev_set_drvdata(&pdev
->dev
, NULL
);
548 static void ism_dev_exit(struct ism_dev
*ism
)
550 struct pci_dev
*pdev
= ism
->pdev
;
552 smcd_unregister_dev(ism
->smcd
);
555 free_irq(pci_irq_vector(pdev
, 0), ism
);
556 pci_free_irq_vectors(pdev
);
559 static void ism_remove(struct pci_dev
*pdev
)
561 struct ism_dev
*ism
= dev_get_drvdata(&pdev
->dev
);
565 smcd_free_dev(ism
->smcd
);
566 pci_iounmap(pdev
, ism
->ctl
);
567 pci_release_mem_regions(pdev
);
568 pci_disable_device(pdev
);
569 dev_set_drvdata(&pdev
->dev
, NULL
);
573 static int ism_suspend(struct device
*dev
)
575 struct ism_dev
*ism
= dev_get_drvdata(dev
);
581 static int ism_resume(struct device
*dev
)
583 struct ism_dev
*ism
= dev_get_drvdata(dev
);
585 return ism_dev_init(ism
);
588 static SIMPLE_DEV_PM_OPS(ism_pm_ops
, ism_suspend
, ism_resume
);
590 static struct pci_driver ism_driver
= {
592 .id_table
= ism_device_table
,
594 .remove
= ism_remove
,
600 static int __init
ism_init(void)
604 ism_debug_info
= debug_register("ism", 2, 1, 16);
608 debug_register_view(ism_debug_info
, &debug_hex_ascii_view
);
609 ret
= pci_register_driver(&ism_driver
);
611 debug_unregister(ism_debug_info
);
616 static void __exit
ism_exit(void)
618 pci_unregister_driver(&ism_driver
);
619 debug_unregister(ism_debug_info
);
622 module_init(ism_init
);
623 module_exit(ism_exit
);