1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2018
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
18 #include <asm/debug.h>
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
28 static const struct pci_device_id ism_device_table
[] = {
29 { PCI_VDEVICE(IBM
, PCI_DEVICE_ID_IBM_ISM
), 0 },
32 MODULE_DEVICE_TABLE(pci
, ism_device_table
);
34 static debug_info_t
*ism_debug_info
;
36 static int ism_cmd(struct ism_dev
*ism
, void *cmd
)
38 struct ism_req_hdr
*req
= cmd
;
39 struct ism_resp_hdr
*resp
= cmd
;
41 memcpy_toio(ism
->ctl
+ sizeof(*req
), req
+ 1, req
->len
- sizeof(*req
));
42 memcpy_toio(ism
->ctl
, req
, sizeof(*req
));
44 WRITE_ONCE(resp
->ret
, ISM_ERROR
);
46 memcpy_fromio(resp
, ism
->ctl
, sizeof(*resp
));
48 debug_text_event(ism_debug_info
, 0, "cmd failure");
49 debug_event(ism_debug_info
, 0, resp
, sizeof(*resp
));
52 memcpy_fromio(resp
+ 1, ism
->ctl
+ sizeof(*resp
),
53 resp
->len
- sizeof(*resp
));
58 static int ism_cmd_simple(struct ism_dev
*ism
, u32 cmd_code
)
60 union ism_cmd_simple cmd
;
62 memset(&cmd
, 0, sizeof(cmd
));
63 cmd
.request
.hdr
.cmd
= cmd_code
;
64 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
66 return ism_cmd(ism
, &cmd
);
69 static int query_info(struct ism_dev
*ism
)
73 memset(&cmd
, 0, sizeof(cmd
));
74 cmd
.request
.hdr
.cmd
= ISM_QUERY_INFO
;
75 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
77 if (ism_cmd(ism
, &cmd
))
80 debug_text_event(ism_debug_info
, 3, "query info");
81 debug_event(ism_debug_info
, 3, &cmd
.response
, sizeof(cmd
.response
));
86 static int register_sba(struct ism_dev
*ism
)
88 union ism_reg_sba cmd
;
89 dma_addr_t dma_handle
;
92 sba
= dma_zalloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
93 &dma_handle
, GFP_KERNEL
);
97 memset(&cmd
, 0, sizeof(cmd
));
98 cmd
.request
.hdr
.cmd
= ISM_REG_SBA
;
99 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
100 cmd
.request
.sba
= dma_handle
;
102 if (ism_cmd(ism
, &cmd
)) {
103 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, sba
, dma_handle
);
108 ism
->sba_dma_addr
= dma_handle
;
113 static int register_ieq(struct ism_dev
*ism
)
115 union ism_reg_ieq cmd
;
116 dma_addr_t dma_handle
;
119 ieq
= dma_zalloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
120 &dma_handle
, GFP_KERNEL
);
124 memset(&cmd
, 0, sizeof(cmd
));
125 cmd
.request
.hdr
.cmd
= ISM_REG_IEQ
;
126 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
127 cmd
.request
.ieq
= dma_handle
;
128 cmd
.request
.len
= sizeof(*ieq
);
130 if (ism_cmd(ism
, &cmd
)) {
131 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, ieq
, dma_handle
);
137 ism
->ieq_dma_addr
= dma_handle
;
142 static int unregister_sba(struct ism_dev
*ism
)
149 ret
= ism_cmd_simple(ism
, ISM_UNREG_SBA
);
150 if (ret
&& ret
!= ISM_ERROR
)
153 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
154 ism
->sba
, ism
->sba_dma_addr
);
157 ism
->sba_dma_addr
= 0;
162 static int unregister_ieq(struct ism_dev
*ism
)
169 ret
= ism_cmd_simple(ism
, ISM_UNREG_IEQ
);
170 if (ret
&& ret
!= ISM_ERROR
)
173 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
174 ism
->ieq
, ism
->ieq_dma_addr
);
177 ism
->ieq_dma_addr
= 0;
182 static int ism_read_local_gid(struct ism_dev
*ism
)
184 union ism_read_gid cmd
;
187 memset(&cmd
, 0, sizeof(cmd
));
188 cmd
.request
.hdr
.cmd
= ISM_READ_GID
;
189 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
191 ret
= ism_cmd(ism
, &cmd
);
195 ism
->smcd
->local_gid
= cmd
.response
.gid
;
200 static int ism_query_rgid(struct smcd_dev
*smcd
, u64 rgid
, u32 vid_valid
,
203 struct ism_dev
*ism
= smcd
->priv
;
204 union ism_query_rgid cmd
;
206 memset(&cmd
, 0, sizeof(cmd
));
207 cmd
.request
.hdr
.cmd
= ISM_QUERY_RGID
;
208 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
210 cmd
.request
.rgid
= rgid
;
211 cmd
.request
.vlan_valid
= vid_valid
;
212 cmd
.request
.vlan_id
= vid
;
214 return ism_cmd(ism
, &cmd
);
217 static void ism_free_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
219 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
220 dma_free_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
221 dmb
->cpu_addr
, dmb
->dma_addr
);
224 static int ism_alloc_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
228 if (PAGE_ALIGN(dmb
->dmb_len
) > dma_get_max_seg_size(&ism
->pdev
->dev
))
232 bit
= find_next_zero_bit(ism
->sba_bitmap
, ISM_NR_DMBS
,
234 if (bit
== ISM_NR_DMBS
)
239 if (dmb
->sba_idx
< ISM_DMB_BIT_OFFSET
||
240 test_and_set_bit(dmb
->sba_idx
, ism
->sba_bitmap
))
243 dmb
->cpu_addr
= dma_zalloc_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
244 &dmb
->dma_addr
, GFP_KERNEL
|
245 __GFP_NOWARN
| __GFP_NOMEMALLOC
|
246 __GFP_COMP
| __GFP_NORETRY
);
248 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
250 return dmb
->cpu_addr
? 0 : -ENOMEM
;
253 static int ism_register_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
255 struct ism_dev
*ism
= smcd
->priv
;
256 union ism_reg_dmb cmd
;
259 ret
= ism_alloc_dmb(ism
, dmb
);
263 memset(&cmd
, 0, sizeof(cmd
));
264 cmd
.request
.hdr
.cmd
= ISM_REG_DMB
;
265 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
267 cmd
.request
.dmb
= dmb
->dma_addr
;
268 cmd
.request
.dmb_len
= dmb
->dmb_len
;
269 cmd
.request
.sba_idx
= dmb
->sba_idx
;
270 cmd
.request
.vlan_valid
= dmb
->vlan_valid
;
271 cmd
.request
.vlan_id
= dmb
->vlan_id
;
272 cmd
.request
.rgid
= dmb
->rgid
;
274 ret
= ism_cmd(ism
, &cmd
);
276 ism_free_dmb(ism
, dmb
);
279 dmb
->dmb_tok
= cmd
.response
.dmb_tok
;
284 static int ism_unregister_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
286 struct ism_dev
*ism
= smcd
->priv
;
287 union ism_unreg_dmb cmd
;
290 memset(&cmd
, 0, sizeof(cmd
));
291 cmd
.request
.hdr
.cmd
= ISM_UNREG_DMB
;
292 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
294 cmd
.request
.dmb_tok
= dmb
->dmb_tok
;
296 ret
= ism_cmd(ism
, &cmd
);
297 if (ret
&& ret
!= ISM_ERROR
)
300 ism_free_dmb(ism
, dmb
);
305 static int ism_add_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
307 struct ism_dev
*ism
= smcd
->priv
;
308 union ism_set_vlan_id cmd
;
310 memset(&cmd
, 0, sizeof(cmd
));
311 cmd
.request
.hdr
.cmd
= ISM_ADD_VLAN_ID
;
312 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
314 cmd
.request
.vlan_id
= vlan_id
;
316 return ism_cmd(ism
, &cmd
);
319 static int ism_del_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
321 struct ism_dev
*ism
= smcd
->priv
;
322 union ism_set_vlan_id cmd
;
324 memset(&cmd
, 0, sizeof(cmd
));
325 cmd
.request
.hdr
.cmd
= ISM_DEL_VLAN_ID
;
326 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
328 cmd
.request
.vlan_id
= vlan_id
;
330 return ism_cmd(ism
, &cmd
);
333 static int ism_set_vlan_required(struct smcd_dev
*smcd
)
335 return ism_cmd_simple(smcd
->priv
, ISM_SET_VLAN
);
338 static int ism_reset_vlan_required(struct smcd_dev
*smcd
)
340 return ism_cmd_simple(smcd
->priv
, ISM_RESET_VLAN
);
343 static int ism_signal_ieq(struct smcd_dev
*smcd
, u64 rgid
, u32 trigger_irq
,
344 u32 event_code
, u64 info
)
346 struct ism_dev
*ism
= smcd
->priv
;
347 union ism_sig_ieq cmd
;
349 memset(&cmd
, 0, sizeof(cmd
));
350 cmd
.request
.hdr
.cmd
= ISM_SIGNAL_IEQ
;
351 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
353 cmd
.request
.rgid
= rgid
;
354 cmd
.request
.trigger_irq
= trigger_irq
;
355 cmd
.request
.event_code
= event_code
;
356 cmd
.request
.info
= info
;
358 return ism_cmd(ism
, &cmd
);
361 static unsigned int max_bytes(unsigned int start
, unsigned int len
,
362 unsigned int boundary
)
364 return min(boundary
- (start
& (boundary
- 1)), len
);
367 static int ism_move(struct smcd_dev
*smcd
, u64 dmb_tok
, unsigned int idx
,
368 bool sf
, unsigned int offset
, void *data
, unsigned int size
)
370 struct ism_dev
*ism
= smcd
->priv
;
376 bytes
= max_bytes(offset
, size
, PAGE_SIZE
);
377 dmb_req
= ISM_CREATE_REQ(dmb_tok
, idx
, size
== bytes
? sf
: 0,
380 ret
= __ism_move(ism
, dmb_req
, data
, bytes
);
392 static void ism_handle_event(struct ism_dev
*ism
)
394 struct smcd_event
*entry
;
396 while ((ism
->ieq_idx
+ 1) != READ_ONCE(ism
->ieq
->header
.idx
)) {
397 if (++(ism
->ieq_idx
) == ARRAY_SIZE(ism
->ieq
->entry
))
400 entry
= &ism
->ieq
->entry
[ism
->ieq_idx
];
401 debug_event(ism_debug_info
, 2, entry
, sizeof(*entry
));
402 smcd_handle_event(ism
->smcd
, entry
);
406 static irqreturn_t
ism_handle_irq(int irq
, void *data
)
408 struct ism_dev
*ism
= data
;
409 unsigned long bit
, end
;
412 bv
= (void *) &ism
->sba
->dmb_bits
[ISM_DMB_WORD_OFFSET
];
413 end
= sizeof(ism
->sba
->dmb_bits
) * BITS_PER_BYTE
- ISM_DMB_BIT_OFFSET
;
415 spin_lock(&ism
->lock
);
419 bit
= find_next_bit_inv(bv
, end
, bit
);
423 clear_bit_inv(bit
, bv
);
424 ism
->sba
->dmbe_mask
[bit
+ ISM_DMB_BIT_OFFSET
] = 0;
426 smcd_handle_irq(ism
->smcd
, bit
+ ISM_DMB_BIT_OFFSET
);
432 ism_handle_event(ism
);
434 spin_unlock(&ism
->lock
);
438 static const struct smcd_ops ism_ops
= {
439 .query_remote_gid
= ism_query_rgid
,
440 .register_dmb
= ism_register_dmb
,
441 .unregister_dmb
= ism_unregister_dmb
,
442 .add_vlan_id
= ism_add_vlan_id
,
443 .del_vlan_id
= ism_del_vlan_id
,
444 .set_vlan_required
= ism_set_vlan_required
,
445 .reset_vlan_required
= ism_reset_vlan_required
,
446 .signal_event
= ism_signal_ieq
,
447 .move_data
= ism_move
,
450 static int ism_dev_init(struct ism_dev
*ism
)
452 struct pci_dev
*pdev
= ism
->pdev
;
455 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
459 ret
= request_irq(pci_irq_vector(pdev
, 0), ism_handle_irq
, 0,
460 pci_name(pdev
), ism
);
464 ret
= register_sba(ism
);
468 ret
= register_ieq(ism
);
472 ret
= ism_read_local_gid(ism
);
476 ret
= smcd_register_dev(ism
->smcd
);
488 free_irq(pci_irq_vector(pdev
, 0), ism
);
490 pci_free_irq_vectors(pdev
);
495 static int ism_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
500 ism
= kzalloc(sizeof(*ism
), GFP_KERNEL
);
504 spin_lock_init(&ism
->lock
);
505 dev_set_drvdata(&pdev
->dev
, ism
);
508 ret
= pci_enable_device_mem(pdev
);
512 ret
= pci_request_mem_regions(pdev
, DRV_NAME
);
516 ism
->ctl
= pci_iomap(pdev
, 2, 0);
520 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
524 pci_set_dma_seg_boundary(pdev
, SZ_1M
- 1);
525 pci_set_dma_max_seg_size(pdev
, SZ_1M
);
526 pci_set_master(pdev
);
528 ism
->smcd
= smcd_alloc_dev(&pdev
->dev
, dev_name(&pdev
->dev
), &ism_ops
,
533 ism
->smcd
->priv
= ism
;
534 ret
= ism_dev_init(ism
);
541 smcd_free_dev(ism
->smcd
);
543 pci_iounmap(pdev
, ism
->ctl
);
545 pci_release_mem_regions(pdev
);
547 pci_disable_device(pdev
);
550 dev_set_drvdata(&pdev
->dev
, NULL
);
554 static void ism_dev_exit(struct ism_dev
*ism
)
556 struct pci_dev
*pdev
= ism
->pdev
;
558 smcd_unregister_dev(ism
->smcd
);
561 free_irq(pci_irq_vector(pdev
, 0), ism
);
562 pci_free_irq_vectors(pdev
);
565 static void ism_remove(struct pci_dev
*pdev
)
567 struct ism_dev
*ism
= dev_get_drvdata(&pdev
->dev
);
571 smcd_free_dev(ism
->smcd
);
572 pci_iounmap(pdev
, ism
->ctl
);
573 pci_release_mem_regions(pdev
);
574 pci_disable_device(pdev
);
575 dev_set_drvdata(&pdev
->dev
, NULL
);
579 static int ism_suspend(struct device
*dev
)
581 struct ism_dev
*ism
= dev_get_drvdata(dev
);
587 static int ism_resume(struct device
*dev
)
589 struct ism_dev
*ism
= dev_get_drvdata(dev
);
591 return ism_dev_init(ism
);
594 static SIMPLE_DEV_PM_OPS(ism_pm_ops
, ism_suspend
, ism_resume
);
596 static struct pci_driver ism_driver
= {
598 .id_table
= ism_device_table
,
600 .remove
= ism_remove
,
606 static int __init
ism_init(void)
610 ism_debug_info
= debug_register("ism", 2, 1, 16);
614 debug_register_view(ism_debug_info
, &debug_hex_ascii_view
);
615 ret
= pci_register_driver(&ism_driver
);
617 debug_unregister(ism_debug_info
);
622 static void __exit
ism_exit(void)
624 pci_unregister_driver(&ism_driver
);
625 debug_unregister(ism_debug_info
);
628 module_init(ism_init
);
629 module_exit(ism_exit
);