1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2018
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <linux/ctype.h>
17 #include <linux/processor.h>
20 #include <asm/debug.h>
24 MODULE_DESCRIPTION("ISM driver for s390");
25 MODULE_LICENSE("GPL");
27 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
28 #define DRV_NAME "ism"
30 static const struct pci_device_id ism_device_table
[] = {
31 { PCI_VDEVICE(IBM
, PCI_DEVICE_ID_IBM_ISM
), 0 },
34 MODULE_DEVICE_TABLE(pci
, ism_device_table
);
36 static debug_info_t
*ism_debug_info
;
38 static int ism_cmd(struct ism_dev
*ism
, void *cmd
)
40 struct ism_req_hdr
*req
= cmd
;
41 struct ism_resp_hdr
*resp
= cmd
;
43 __ism_write_cmd(ism
, req
+ 1, sizeof(*req
), req
->len
- sizeof(*req
));
44 __ism_write_cmd(ism
, req
, 0, sizeof(*req
));
46 WRITE_ONCE(resp
->ret
, ISM_ERROR
);
48 __ism_read_cmd(ism
, resp
, 0, sizeof(*resp
));
50 debug_text_event(ism_debug_info
, 0, "cmd failure");
51 debug_event(ism_debug_info
, 0, resp
, sizeof(*resp
));
54 __ism_read_cmd(ism
, resp
+ 1, sizeof(*resp
), resp
->len
- sizeof(*resp
));
59 static int ism_cmd_simple(struct ism_dev
*ism
, u32 cmd_code
)
61 union ism_cmd_simple cmd
;
63 memset(&cmd
, 0, sizeof(cmd
));
64 cmd
.request
.hdr
.cmd
= cmd_code
;
65 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
67 return ism_cmd(ism
, &cmd
);
70 static int query_info(struct ism_dev
*ism
)
74 memset(&cmd
, 0, sizeof(cmd
));
75 cmd
.request
.hdr
.cmd
= ISM_QUERY_INFO
;
76 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
78 if (ism_cmd(ism
, &cmd
))
81 debug_text_event(ism_debug_info
, 3, "query info");
82 debug_event(ism_debug_info
, 3, &cmd
.response
, sizeof(cmd
.response
));
87 static int register_sba(struct ism_dev
*ism
)
89 union ism_reg_sba cmd
;
90 dma_addr_t dma_handle
;
93 sba
= dma_alloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, &dma_handle
,
98 memset(&cmd
, 0, sizeof(cmd
));
99 cmd
.request
.hdr
.cmd
= ISM_REG_SBA
;
100 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
101 cmd
.request
.sba
= dma_handle
;
103 if (ism_cmd(ism
, &cmd
)) {
104 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, sba
, dma_handle
);
109 ism
->sba_dma_addr
= dma_handle
;
114 static int register_ieq(struct ism_dev
*ism
)
116 union ism_reg_ieq cmd
;
117 dma_addr_t dma_handle
;
120 ieq
= dma_alloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, &dma_handle
,
125 memset(&cmd
, 0, sizeof(cmd
));
126 cmd
.request
.hdr
.cmd
= ISM_REG_IEQ
;
127 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
128 cmd
.request
.ieq
= dma_handle
;
129 cmd
.request
.len
= sizeof(*ieq
);
131 if (ism_cmd(ism
, &cmd
)) {
132 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, ieq
, dma_handle
);
138 ism
->ieq_dma_addr
= dma_handle
;
143 static int unregister_sba(struct ism_dev
*ism
)
150 ret
= ism_cmd_simple(ism
, ISM_UNREG_SBA
);
151 if (ret
&& ret
!= ISM_ERROR
)
154 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
155 ism
->sba
, ism
->sba_dma_addr
);
158 ism
->sba_dma_addr
= 0;
163 static int unregister_ieq(struct ism_dev
*ism
)
170 ret
= ism_cmd_simple(ism
, ISM_UNREG_IEQ
);
171 if (ret
&& ret
!= ISM_ERROR
)
174 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
175 ism
->ieq
, ism
->ieq_dma_addr
);
178 ism
->ieq_dma_addr
= 0;
183 static int ism_read_local_gid(struct ism_dev
*ism
)
185 union ism_read_gid cmd
;
188 memset(&cmd
, 0, sizeof(cmd
));
189 cmd
.request
.hdr
.cmd
= ISM_READ_GID
;
190 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
192 ret
= ism_cmd(ism
, &cmd
);
196 ism
->smcd
->local_gid
= cmd
.response
.gid
;
201 static int ism_query_rgid(struct smcd_dev
*smcd
, u64 rgid
, u32 vid_valid
,
204 struct ism_dev
*ism
= smcd
->priv
;
205 union ism_query_rgid cmd
;
207 memset(&cmd
, 0, sizeof(cmd
));
208 cmd
.request
.hdr
.cmd
= ISM_QUERY_RGID
;
209 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
211 cmd
.request
.rgid
= rgid
;
212 cmd
.request
.vlan_valid
= vid_valid
;
213 cmd
.request
.vlan_id
= vid
;
215 return ism_cmd(ism
, &cmd
);
218 static void ism_free_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
220 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
221 dma_free_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
222 dmb
->cpu_addr
, dmb
->dma_addr
);
225 static int ism_alloc_dmb(struct ism_dev
*ism
, struct smcd_dmb
*dmb
)
229 if (PAGE_ALIGN(dmb
->dmb_len
) > dma_get_max_seg_size(&ism
->pdev
->dev
))
233 bit
= find_next_zero_bit(ism
->sba_bitmap
, ISM_NR_DMBS
,
235 if (bit
== ISM_NR_DMBS
)
240 if (dmb
->sba_idx
< ISM_DMB_BIT_OFFSET
||
241 test_and_set_bit(dmb
->sba_idx
, ism
->sba_bitmap
))
244 dmb
->cpu_addr
= dma_alloc_coherent(&ism
->pdev
->dev
, dmb
->dmb_len
,
246 GFP_KERNEL
| __GFP_NOWARN
| __GFP_NOMEMALLOC
| __GFP_COMP
| __GFP_NORETRY
);
248 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
250 return dmb
->cpu_addr
? 0 : -ENOMEM
;
253 static int ism_register_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
255 struct ism_dev
*ism
= smcd
->priv
;
256 union ism_reg_dmb cmd
;
259 ret
= ism_alloc_dmb(ism
, dmb
);
263 memset(&cmd
, 0, sizeof(cmd
));
264 cmd
.request
.hdr
.cmd
= ISM_REG_DMB
;
265 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
267 cmd
.request
.dmb
= dmb
->dma_addr
;
268 cmd
.request
.dmb_len
= dmb
->dmb_len
;
269 cmd
.request
.sba_idx
= dmb
->sba_idx
;
270 cmd
.request
.vlan_valid
= dmb
->vlan_valid
;
271 cmd
.request
.vlan_id
= dmb
->vlan_id
;
272 cmd
.request
.rgid
= dmb
->rgid
;
274 ret
= ism_cmd(ism
, &cmd
);
276 ism_free_dmb(ism
, dmb
);
279 dmb
->dmb_tok
= cmd
.response
.dmb_tok
;
284 static int ism_unregister_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
286 struct ism_dev
*ism
= smcd
->priv
;
287 union ism_unreg_dmb cmd
;
290 memset(&cmd
, 0, sizeof(cmd
));
291 cmd
.request
.hdr
.cmd
= ISM_UNREG_DMB
;
292 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
294 cmd
.request
.dmb_tok
= dmb
->dmb_tok
;
296 ret
= ism_cmd(ism
, &cmd
);
297 if (ret
&& ret
!= ISM_ERROR
)
300 ism_free_dmb(ism
, dmb
);
305 static int ism_add_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
307 struct ism_dev
*ism
= smcd
->priv
;
308 union ism_set_vlan_id cmd
;
310 memset(&cmd
, 0, sizeof(cmd
));
311 cmd
.request
.hdr
.cmd
= ISM_ADD_VLAN_ID
;
312 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
314 cmd
.request
.vlan_id
= vlan_id
;
316 return ism_cmd(ism
, &cmd
);
319 static int ism_del_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
321 struct ism_dev
*ism
= smcd
->priv
;
322 union ism_set_vlan_id cmd
;
324 memset(&cmd
, 0, sizeof(cmd
));
325 cmd
.request
.hdr
.cmd
= ISM_DEL_VLAN_ID
;
326 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
328 cmd
.request
.vlan_id
= vlan_id
;
330 return ism_cmd(ism
, &cmd
);
333 static int ism_set_vlan_required(struct smcd_dev
*smcd
)
335 return ism_cmd_simple(smcd
->priv
, ISM_SET_VLAN
);
338 static int ism_reset_vlan_required(struct smcd_dev
*smcd
)
340 return ism_cmd_simple(smcd
->priv
, ISM_RESET_VLAN
);
343 static int ism_signal_ieq(struct smcd_dev
*smcd
, u64 rgid
, u32 trigger_irq
,
344 u32 event_code
, u64 info
)
346 struct ism_dev
*ism
= smcd
->priv
;
347 union ism_sig_ieq cmd
;
349 memset(&cmd
, 0, sizeof(cmd
));
350 cmd
.request
.hdr
.cmd
= ISM_SIGNAL_IEQ
;
351 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
353 cmd
.request
.rgid
= rgid
;
354 cmd
.request
.trigger_irq
= trigger_irq
;
355 cmd
.request
.event_code
= event_code
;
356 cmd
.request
.info
= info
;
358 return ism_cmd(ism
, &cmd
);
361 static unsigned int max_bytes(unsigned int start
, unsigned int len
,
362 unsigned int boundary
)
364 return min(boundary
- (start
& (boundary
- 1)), len
);
367 static int ism_move(struct smcd_dev
*smcd
, u64 dmb_tok
, unsigned int idx
,
368 bool sf
, unsigned int offset
, void *data
, unsigned int size
)
370 struct ism_dev
*ism
= smcd
->priv
;
376 bytes
= max_bytes(offset
, size
, PAGE_SIZE
);
377 dmb_req
= ISM_CREATE_REQ(dmb_tok
, idx
, size
== bytes
? sf
: 0,
380 ret
= __ism_move(ism
, dmb_req
, data
, bytes
);
392 static struct ism_systemeid SYSTEM_EID
= {
393 .seid_string
= "IBM-SYSZ-ISMSEID00000000",
394 .serial_number
= "0000",
398 static void ism_create_system_eid(void)
405 ident_tail
= (u16
)(id
.ident
& ISM_IDENT_MASK
);
406 snprintf(tmp
, 5, "%04X", ident_tail
);
407 memcpy(&SYSTEM_EID
.serial_number
, tmp
, 4);
408 snprintf(tmp
, 5, "%04X", id
.machine
);
409 memcpy(&SYSTEM_EID
.type
, tmp
, 4);
412 static void ism_get_system_eid(struct smcd_dev
*smcd
, u8
**eid
)
414 *eid
= &SYSTEM_EID
.seid_string
[0];
417 static u16
ism_get_chid(struct smcd_dev
*smcd
)
419 struct ism_dev
*ismdev
;
421 ismdev
= (struct ism_dev
*)smcd
->priv
;
422 if (!ismdev
|| !ismdev
->pdev
)
425 return to_zpci(ismdev
->pdev
)->pchid
;
428 static void ism_handle_event(struct ism_dev
*ism
)
430 struct smcd_event
*entry
;
432 while ((ism
->ieq_idx
+ 1) != READ_ONCE(ism
->ieq
->header
.idx
)) {
433 if (++(ism
->ieq_idx
) == ARRAY_SIZE(ism
->ieq
->entry
))
436 entry
= &ism
->ieq
->entry
[ism
->ieq_idx
];
437 debug_event(ism_debug_info
, 2, entry
, sizeof(*entry
));
438 smcd_handle_event(ism
->smcd
, entry
);
442 static irqreturn_t
ism_handle_irq(int irq
, void *data
)
444 struct ism_dev
*ism
= data
;
445 unsigned long bit
, end
;
448 bv
= (void *) &ism
->sba
->dmb_bits
[ISM_DMB_WORD_OFFSET
];
449 end
= sizeof(ism
->sba
->dmb_bits
) * BITS_PER_BYTE
- ISM_DMB_BIT_OFFSET
;
451 spin_lock(&ism
->lock
);
455 bit
= find_next_bit_inv(bv
, end
, bit
);
459 clear_bit_inv(bit
, bv
);
460 ism
->sba
->dmbe_mask
[bit
+ ISM_DMB_BIT_OFFSET
] = 0;
462 smcd_handle_irq(ism
->smcd
, bit
+ ISM_DMB_BIT_OFFSET
);
468 ism_handle_event(ism
);
470 spin_unlock(&ism
->lock
);
474 static const struct smcd_ops ism_ops
= {
475 .query_remote_gid
= ism_query_rgid
,
476 .register_dmb
= ism_register_dmb
,
477 .unregister_dmb
= ism_unregister_dmb
,
478 .add_vlan_id
= ism_add_vlan_id
,
479 .del_vlan_id
= ism_del_vlan_id
,
480 .set_vlan_required
= ism_set_vlan_required
,
481 .reset_vlan_required
= ism_reset_vlan_required
,
482 .signal_event
= ism_signal_ieq
,
483 .move_data
= ism_move
,
484 .get_system_eid
= ism_get_system_eid
,
485 .get_chid
= ism_get_chid
,
488 static int ism_dev_init(struct ism_dev
*ism
)
490 struct pci_dev
*pdev
= ism
->pdev
;
493 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
497 ret
= request_irq(pci_irq_vector(pdev
, 0), ism_handle_irq
, 0,
498 pci_name(pdev
), ism
);
502 ret
= register_sba(ism
);
506 ret
= register_ieq(ism
);
510 ret
= ism_read_local_gid(ism
);
514 if (!ism_add_vlan_id(ism
->smcd
, ISM_RESERVED_VLANID
))
515 /* hardware is V2 capable */
516 ism_create_system_eid();
518 ret
= smcd_register_dev(ism
->smcd
);
530 free_irq(pci_irq_vector(pdev
, 0), ism
);
532 pci_free_irq_vectors(pdev
);
537 static int ism_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
542 ism
= kzalloc(sizeof(*ism
), GFP_KERNEL
);
546 spin_lock_init(&ism
->lock
);
547 dev_set_drvdata(&pdev
->dev
, ism
);
550 ret
= pci_enable_device_mem(pdev
);
554 ret
= pci_request_mem_regions(pdev
, DRV_NAME
);
558 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
562 dma_set_seg_boundary(&pdev
->dev
, SZ_1M
- 1);
563 dma_set_max_seg_size(&pdev
->dev
, SZ_1M
);
564 pci_set_master(pdev
);
566 ism
->smcd
= smcd_alloc_dev(&pdev
->dev
, dev_name(&pdev
->dev
), &ism_ops
,
573 ism
->smcd
->priv
= ism
;
574 ret
= ism_dev_init(ism
);
581 smcd_free_dev(ism
->smcd
);
583 pci_release_mem_regions(pdev
);
585 pci_disable_device(pdev
);
588 dev_set_drvdata(&pdev
->dev
, NULL
);
592 static void ism_dev_exit(struct ism_dev
*ism
)
594 struct pci_dev
*pdev
= ism
->pdev
;
596 smcd_unregister_dev(ism
->smcd
);
597 if (SYSTEM_EID
.serial_number
[0] != '0' ||
598 SYSTEM_EID
.type
[0] != '0')
599 ism_del_vlan_id(ism
->smcd
, ISM_RESERVED_VLANID
);
602 free_irq(pci_irq_vector(pdev
, 0), ism
);
603 pci_free_irq_vectors(pdev
);
606 static void ism_remove(struct pci_dev
*pdev
)
608 struct ism_dev
*ism
= dev_get_drvdata(&pdev
->dev
);
612 smcd_free_dev(ism
->smcd
);
613 pci_release_mem_regions(pdev
);
614 pci_disable_device(pdev
);
615 dev_set_drvdata(&pdev
->dev
, NULL
);
619 static struct pci_driver ism_driver
= {
621 .id_table
= ism_device_table
,
623 .remove
= ism_remove
,
626 static int __init
ism_init(void)
630 ism_debug_info
= debug_register("ism", 2, 1, 16);
634 debug_register_view(ism_debug_info
, &debug_hex_ascii_view
);
635 ret
= pci_register_driver(&ism_driver
);
637 debug_unregister(ism_debug_info
);
642 static void __exit
ism_exit(void)
644 pci_unregister_driver(&ism_driver
);
645 debug_unregister(ism_debug_info
);
648 module_init(ism_init
);
649 module_exit(ism_exit
);