1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2018
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/ctype.h>
16 #include <linux/processor.h>
20 MODULE_DESCRIPTION("ISM driver for s390");
21 MODULE_LICENSE("GPL");
23 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
24 #define DRV_NAME "ism"
26 static const struct pci_device_id ism_device_table
[] = {
27 { PCI_VDEVICE(IBM
, PCI_DEVICE_ID_IBM_ISM
), 0 },
30 MODULE_DEVICE_TABLE(pci
, ism_device_table
);
32 static debug_info_t
*ism_debug_info
;
34 #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
35 static struct ism_client
*clients
[MAX_CLIENTS
]; /* use an array rather than */
36 /* a list for fast mapping */
38 static DEFINE_MUTEX(clients_lock
);
39 static bool ism_v2_capable
;
41 struct list_head list
;
42 struct mutex mutex
; /* protects ism device list */
45 static struct ism_dev_list ism_dev_list
= {
46 .list
= LIST_HEAD_INIT(ism_dev_list
.list
),
47 .mutex
= __MUTEX_INITIALIZER(ism_dev_list
.mutex
),
50 static void ism_setup_forwarding(struct ism_client
*client
, struct ism_dev
*ism
)
54 spin_lock_irqsave(&ism
->lock
, flags
);
55 ism
->subs
[client
->id
] = client
;
56 spin_unlock_irqrestore(&ism
->lock
, flags
);
59 int ism_register_client(struct ism_client
*client
)
64 mutex_lock(&ism_dev_list
.mutex
);
65 mutex_lock(&clients_lock
);
66 for (i
= 0; i
< MAX_CLIENTS
; ++i
) {
76 mutex_unlock(&clients_lock
);
78 if (i
< MAX_CLIENTS
) {
79 /* initialize with all devices that we got so far */
80 list_for_each_entry(ism
, &ism_dev_list
.list
, list
) {
83 ism_setup_forwarding(client
, ism
);
86 mutex_unlock(&ism_dev_list
.mutex
);
90 EXPORT_SYMBOL_GPL(ism_register_client
);
92 int ism_unregister_client(struct ism_client
*client
)
98 mutex_lock(&ism_dev_list
.mutex
);
99 list_for_each_entry(ism
, &ism_dev_list
.list
, list
) {
100 spin_lock_irqsave(&ism
->lock
, flags
);
101 /* Stop forwarding IRQs and events */
102 ism
->subs
[client
->id
] = NULL
;
103 for (int i
= 0; i
< ISM_NR_DMBS
; ++i
) {
104 if (ism
->sba_client_arr
[i
] == client
->id
) {
105 WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
106 __func__
, client
->name
);
111 spin_unlock_irqrestore(&ism
->lock
, flags
);
113 mutex_unlock(&ism_dev_list
.mutex
);
115 mutex_lock(&clients_lock
);
116 clients
[client
->id
] = NULL
;
117 if (client
->id
+ 1 == max_client
)
119 mutex_unlock(&clients_lock
);
123 spin_unlock_irqrestore(&ism
->lock
, flags
);
124 mutex_unlock(&ism_dev_list
.mutex
);
127 EXPORT_SYMBOL_GPL(ism_unregister_client
);
129 static int ism_cmd(struct ism_dev
*ism
, void *cmd
)
131 struct ism_req_hdr
*req
= cmd
;
132 struct ism_resp_hdr
*resp
= cmd
;
134 __ism_write_cmd(ism
, req
+ 1, sizeof(*req
), req
->len
- sizeof(*req
));
135 __ism_write_cmd(ism
, req
, 0, sizeof(*req
));
137 WRITE_ONCE(resp
->ret
, ISM_ERROR
);
139 __ism_read_cmd(ism
, resp
, 0, sizeof(*resp
));
141 debug_text_event(ism_debug_info
, 0, "cmd failure");
142 debug_event(ism_debug_info
, 0, resp
, sizeof(*resp
));
145 __ism_read_cmd(ism
, resp
+ 1, sizeof(*resp
), resp
->len
- sizeof(*resp
));
150 static int ism_cmd_simple(struct ism_dev
*ism
, u32 cmd_code
)
152 union ism_cmd_simple cmd
;
154 memset(&cmd
, 0, sizeof(cmd
));
155 cmd
.request
.hdr
.cmd
= cmd_code
;
156 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
158 return ism_cmd(ism
, &cmd
);
161 static int query_info(struct ism_dev
*ism
)
165 memset(&cmd
, 0, sizeof(cmd
));
166 cmd
.request
.hdr
.cmd
= ISM_QUERY_INFO
;
167 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
169 if (ism_cmd(ism
, &cmd
))
172 debug_text_event(ism_debug_info
, 3, "query info");
173 debug_event(ism_debug_info
, 3, &cmd
.response
, sizeof(cmd
.response
));
178 static int register_sba(struct ism_dev
*ism
)
180 union ism_reg_sba cmd
;
181 dma_addr_t dma_handle
;
184 sba
= dma_alloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, &dma_handle
,
189 memset(&cmd
, 0, sizeof(cmd
));
190 cmd
.request
.hdr
.cmd
= ISM_REG_SBA
;
191 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
192 cmd
.request
.sba
= dma_handle
;
194 if (ism_cmd(ism
, &cmd
)) {
195 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, sba
, dma_handle
);
200 ism
->sba_dma_addr
= dma_handle
;
205 static int register_ieq(struct ism_dev
*ism
)
207 union ism_reg_ieq cmd
;
208 dma_addr_t dma_handle
;
211 ieq
= dma_alloc_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, &dma_handle
,
216 memset(&cmd
, 0, sizeof(cmd
));
217 cmd
.request
.hdr
.cmd
= ISM_REG_IEQ
;
218 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
219 cmd
.request
.ieq
= dma_handle
;
220 cmd
.request
.len
= sizeof(*ieq
);
222 if (ism_cmd(ism
, &cmd
)) {
223 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
, ieq
, dma_handle
);
229 ism
->ieq_dma_addr
= dma_handle
;
234 static int unregister_sba(struct ism_dev
*ism
)
241 ret
= ism_cmd_simple(ism
, ISM_UNREG_SBA
);
242 if (ret
&& ret
!= ISM_ERROR
)
245 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
246 ism
->sba
, ism
->sba_dma_addr
);
249 ism
->sba_dma_addr
= 0;
254 static int unregister_ieq(struct ism_dev
*ism
)
261 ret
= ism_cmd_simple(ism
, ISM_UNREG_IEQ
);
262 if (ret
&& ret
!= ISM_ERROR
)
265 dma_free_coherent(&ism
->pdev
->dev
, PAGE_SIZE
,
266 ism
->ieq
, ism
->ieq_dma_addr
);
269 ism
->ieq_dma_addr
= 0;
274 static int ism_read_local_gid(struct ism_dev
*ism
)
276 union ism_read_gid cmd
;
279 memset(&cmd
, 0, sizeof(cmd
));
280 cmd
.request
.hdr
.cmd
= ISM_READ_GID
;
281 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
283 ret
= ism_cmd(ism
, &cmd
);
287 ism
->local_gid
= cmd
.response
.gid
;
292 static void ism_free_dmb(struct ism_dev
*ism
, struct ism_dmb
*dmb
)
294 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
295 dma_unmap_page(&ism
->pdev
->dev
, dmb
->dma_addr
, dmb
->dmb_len
,
297 folio_put(virt_to_folio(dmb
->cpu_addr
));
300 static int ism_alloc_dmb(struct ism_dev
*ism
, struct ism_dmb
*dmb
)
306 if (PAGE_ALIGN(dmb
->dmb_len
) > dma_get_max_seg_size(&ism
->pdev
->dev
))
310 bit
= find_next_zero_bit(ism
->sba_bitmap
, ISM_NR_DMBS
,
312 if (bit
== ISM_NR_DMBS
)
317 if (dmb
->sba_idx
< ISM_DMB_BIT_OFFSET
||
318 test_and_set_bit(dmb
->sba_idx
, ism
->sba_bitmap
))
321 folio
= folio_alloc(GFP_KERNEL
| __GFP_NOWARN
| __GFP_NOMEMALLOC
|
322 __GFP_NORETRY
, get_order(dmb
->dmb_len
));
329 dmb
->cpu_addr
= folio_address(folio
);
330 dmb
->dma_addr
= dma_map_page(&ism
->pdev
->dev
,
331 virt_to_page(dmb
->cpu_addr
), 0,
332 dmb
->dmb_len
, DMA_FROM_DEVICE
);
333 if (dma_mapping_error(&ism
->pdev
->dev
, dmb
->dma_addr
)) {
341 kfree(dmb
->cpu_addr
);
343 clear_bit(dmb
->sba_idx
, ism
->sba_bitmap
);
347 int ism_register_dmb(struct ism_dev
*ism
, struct ism_dmb
*dmb
,
348 struct ism_client
*client
)
350 union ism_reg_dmb cmd
;
354 ret
= ism_alloc_dmb(ism
, dmb
);
358 memset(&cmd
, 0, sizeof(cmd
));
359 cmd
.request
.hdr
.cmd
= ISM_REG_DMB
;
360 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
362 cmd
.request
.dmb
= dmb
->dma_addr
;
363 cmd
.request
.dmb_len
= dmb
->dmb_len
;
364 cmd
.request
.sba_idx
= dmb
->sba_idx
;
365 cmd
.request
.vlan_valid
= dmb
->vlan_valid
;
366 cmd
.request
.vlan_id
= dmb
->vlan_id
;
367 cmd
.request
.rgid
= dmb
->rgid
;
369 ret
= ism_cmd(ism
, &cmd
);
371 ism_free_dmb(ism
, dmb
);
374 dmb
->dmb_tok
= cmd
.response
.dmb_tok
;
375 spin_lock_irqsave(&ism
->lock
, flags
);
376 ism
->sba_client_arr
[dmb
->sba_idx
- ISM_DMB_BIT_OFFSET
] = client
->id
;
377 spin_unlock_irqrestore(&ism
->lock
, flags
);
381 EXPORT_SYMBOL_GPL(ism_register_dmb
);
383 int ism_unregister_dmb(struct ism_dev
*ism
, struct ism_dmb
*dmb
)
385 union ism_unreg_dmb cmd
;
389 memset(&cmd
, 0, sizeof(cmd
));
390 cmd
.request
.hdr
.cmd
= ISM_UNREG_DMB
;
391 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
393 cmd
.request
.dmb_tok
= dmb
->dmb_tok
;
395 spin_lock_irqsave(&ism
->lock
, flags
);
396 ism
->sba_client_arr
[dmb
->sba_idx
- ISM_DMB_BIT_OFFSET
] = NO_CLIENT
;
397 spin_unlock_irqrestore(&ism
->lock
, flags
);
399 ret
= ism_cmd(ism
, &cmd
);
400 if (ret
&& ret
!= ISM_ERROR
)
403 ism_free_dmb(ism
, dmb
);
407 EXPORT_SYMBOL_GPL(ism_unregister_dmb
);
409 static int ism_add_vlan_id(struct ism_dev
*ism
, u64 vlan_id
)
411 union ism_set_vlan_id cmd
;
413 memset(&cmd
, 0, sizeof(cmd
));
414 cmd
.request
.hdr
.cmd
= ISM_ADD_VLAN_ID
;
415 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
417 cmd
.request
.vlan_id
= vlan_id
;
419 return ism_cmd(ism
, &cmd
);
422 static int ism_del_vlan_id(struct ism_dev
*ism
, u64 vlan_id
)
424 union ism_set_vlan_id cmd
;
426 memset(&cmd
, 0, sizeof(cmd
));
427 cmd
.request
.hdr
.cmd
= ISM_DEL_VLAN_ID
;
428 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
430 cmd
.request
.vlan_id
= vlan_id
;
432 return ism_cmd(ism
, &cmd
);
435 static unsigned int max_bytes(unsigned int start
, unsigned int len
,
436 unsigned int boundary
)
438 return min(boundary
- (start
& (boundary
- 1)), len
);
441 int ism_move(struct ism_dev
*ism
, u64 dmb_tok
, unsigned int idx
, bool sf
,
442 unsigned int offset
, void *data
, unsigned int size
)
449 bytes
= max_bytes(offset
, size
, PAGE_SIZE
);
450 dmb_req
= ISM_CREATE_REQ(dmb_tok
, idx
, size
== bytes
? sf
: 0,
453 ret
= __ism_move(ism
, dmb_req
, data
, bytes
);
464 EXPORT_SYMBOL_GPL(ism_move
);
466 static void ism_handle_event(struct ism_dev
*ism
)
468 struct ism_event
*entry
;
469 struct ism_client
*clt
;
472 while ((ism
->ieq_idx
+ 1) != READ_ONCE(ism
->ieq
->header
.idx
)) {
473 if (++(ism
->ieq_idx
) == ARRAY_SIZE(ism
->ieq
->entry
))
476 entry
= &ism
->ieq
->entry
[ism
->ieq_idx
];
477 debug_event(ism_debug_info
, 2, entry
, sizeof(*entry
));
478 for (i
= 0; i
< max_client
; ++i
) {
481 clt
->handle_event(ism
, entry
);
486 static irqreturn_t
ism_handle_irq(int irq
, void *data
)
488 struct ism_dev
*ism
= data
;
489 unsigned long bit
, end
;
494 bv
= (void *) &ism
->sba
->dmb_bits
[ISM_DMB_WORD_OFFSET
];
495 end
= sizeof(ism
->sba
->dmb_bits
) * BITS_PER_BYTE
- ISM_DMB_BIT_OFFSET
;
497 spin_lock(&ism
->lock
);
501 bit
= find_next_bit_inv(bv
, end
, bit
);
505 clear_bit_inv(bit
, bv
);
506 dmbemask
= ism
->sba
->dmbe_mask
[bit
+ ISM_DMB_BIT_OFFSET
];
507 ism
->sba
->dmbe_mask
[bit
+ ISM_DMB_BIT_OFFSET
] = 0;
509 client_id
= ism
->sba_client_arr
[bit
];
510 if (unlikely(client_id
== NO_CLIENT
|| !ism
->subs
[client_id
]))
512 ism
->subs
[client_id
]->handle_irq(ism
, bit
+ ISM_DMB_BIT_OFFSET
, dmbemask
);
518 ism_handle_event(ism
);
520 spin_unlock(&ism
->lock
);
524 static int ism_dev_init(struct ism_dev
*ism
)
526 struct pci_dev
*pdev
= ism
->pdev
;
529 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
533 ism
->sba_client_arr
= kzalloc(ISM_NR_DMBS
, GFP_KERNEL
);
534 if (!ism
->sba_client_arr
)
536 memset(ism
->sba_client_arr
, NO_CLIENT
, ISM_NR_DMBS
);
538 ret
= request_irq(pci_irq_vector(pdev
, 0), ism_handle_irq
, 0,
539 pci_name(pdev
), ism
);
541 goto free_client_arr
;
543 ret
= register_sba(ism
);
547 ret
= register_ieq(ism
);
551 ret
= ism_read_local_gid(ism
);
555 if (!ism_add_vlan_id(ism
, ISM_RESERVED_VLANID
))
556 /* hardware is V2 capable */
557 ism_v2_capable
= true;
559 ism_v2_capable
= false;
561 mutex_lock(&ism_dev_list
.mutex
);
562 mutex_lock(&clients_lock
);
563 for (i
= 0; i
< max_client
; ++i
) {
565 clients
[i
]->add(ism
);
566 ism_setup_forwarding(clients
[i
], ism
);
569 mutex_unlock(&clients_lock
);
571 list_add(&ism
->list
, &ism_dev_list
.list
);
572 mutex_unlock(&ism_dev_list
.mutex
);
582 free_irq(pci_irq_vector(pdev
, 0), ism
);
584 kfree(ism
->sba_client_arr
);
586 pci_free_irq_vectors(pdev
);
591 static int ism_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
596 ism
= kzalloc(sizeof(*ism
), GFP_KERNEL
);
600 spin_lock_init(&ism
->lock
);
601 dev_set_drvdata(&pdev
->dev
, ism
);
603 ism
->dev
.parent
= &pdev
->dev
;
604 device_initialize(&ism
->dev
);
605 dev_set_name(&ism
->dev
, dev_name(&pdev
->dev
));
606 ret
= device_add(&ism
->dev
);
610 ret
= pci_enable_device_mem(pdev
);
614 ret
= pci_request_mem_regions(pdev
, DRV_NAME
);
618 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
622 dma_set_seg_boundary(&pdev
->dev
, SZ_1M
- 1);
623 dma_set_max_seg_size(&pdev
->dev
, SZ_1M
);
624 pci_set_master(pdev
);
626 ret
= ism_dev_init(ism
);
633 pci_release_mem_regions(pdev
);
635 pci_disable_device(pdev
);
637 device_del(&ism
->dev
);
639 dev_set_drvdata(&pdev
->dev
, NULL
);
645 static void ism_dev_exit(struct ism_dev
*ism
)
647 struct pci_dev
*pdev
= ism
->pdev
;
651 spin_lock_irqsave(&ism
->lock
, flags
);
652 for (i
= 0; i
< max_client
; ++i
)
654 spin_unlock_irqrestore(&ism
->lock
, flags
);
656 mutex_lock(&ism_dev_list
.mutex
);
657 mutex_lock(&clients_lock
);
658 for (i
= 0; i
< max_client
; ++i
) {
660 clients
[i
]->remove(ism
);
662 mutex_unlock(&clients_lock
);
665 ism_del_vlan_id(ism
, ISM_RESERVED_VLANID
);
668 free_irq(pci_irq_vector(pdev
, 0), ism
);
669 kfree(ism
->sba_client_arr
);
670 pci_free_irq_vectors(pdev
);
671 list_del_init(&ism
->list
);
672 mutex_unlock(&ism_dev_list
.mutex
);
675 static void ism_remove(struct pci_dev
*pdev
)
677 struct ism_dev
*ism
= dev_get_drvdata(&pdev
->dev
);
681 pci_release_mem_regions(pdev
);
682 pci_disable_device(pdev
);
683 device_del(&ism
->dev
);
684 dev_set_drvdata(&pdev
->dev
, NULL
);
688 static struct pci_driver ism_driver
= {
690 .id_table
= ism_device_table
,
692 .remove
= ism_remove
,
695 static int __init
ism_init(void)
699 ism_debug_info
= debug_register("ism", 2, 1, 16);
703 memset(clients
, 0, sizeof(clients
));
705 debug_register_view(ism_debug_info
, &debug_hex_ascii_view
);
706 ret
= pci_register_driver(&ism_driver
);
708 debug_unregister(ism_debug_info
);
713 static void __exit
ism_exit(void)
715 pci_unregister_driver(&ism_driver
);
716 debug_unregister(ism_debug_info
);
719 module_init(ism_init
);
720 module_exit(ism_exit
);
722 /*************************** SMC-D Implementation *****************************/
724 #if IS_ENABLED(CONFIG_SMC)
725 static int ism_query_rgid(struct ism_dev
*ism
, u64 rgid
, u32 vid_valid
,
728 union ism_query_rgid cmd
;
730 memset(&cmd
, 0, sizeof(cmd
));
731 cmd
.request
.hdr
.cmd
= ISM_QUERY_RGID
;
732 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
734 cmd
.request
.rgid
= rgid
;
735 cmd
.request
.vlan_valid
= vid_valid
;
736 cmd
.request
.vlan_id
= vid
;
738 return ism_cmd(ism
, &cmd
);
741 static int smcd_query_rgid(struct smcd_dev
*smcd
, struct smcd_gid
*rgid
,
742 u32 vid_valid
, u32 vid
)
744 return ism_query_rgid(smcd
->priv
, rgid
->gid
, vid_valid
, vid
);
747 static int smcd_register_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
,
750 return ism_register_dmb(smcd
->priv
, (struct ism_dmb
*)dmb
, client
);
753 static int smcd_unregister_dmb(struct smcd_dev
*smcd
, struct smcd_dmb
*dmb
)
755 return ism_unregister_dmb(smcd
->priv
, (struct ism_dmb
*)dmb
);
758 static int smcd_add_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
760 return ism_add_vlan_id(smcd
->priv
, vlan_id
);
763 static int smcd_del_vlan_id(struct smcd_dev
*smcd
, u64 vlan_id
)
765 return ism_del_vlan_id(smcd
->priv
, vlan_id
);
768 static int smcd_set_vlan_required(struct smcd_dev
*smcd
)
770 return ism_cmd_simple(smcd
->priv
, ISM_SET_VLAN
);
773 static int smcd_reset_vlan_required(struct smcd_dev
*smcd
)
775 return ism_cmd_simple(smcd
->priv
, ISM_RESET_VLAN
);
778 static int ism_signal_ieq(struct ism_dev
*ism
, u64 rgid
, u32 trigger_irq
,
779 u32 event_code
, u64 info
)
781 union ism_sig_ieq cmd
;
783 memset(&cmd
, 0, sizeof(cmd
));
784 cmd
.request
.hdr
.cmd
= ISM_SIGNAL_IEQ
;
785 cmd
.request
.hdr
.len
= sizeof(cmd
.request
);
787 cmd
.request
.rgid
= rgid
;
788 cmd
.request
.trigger_irq
= trigger_irq
;
789 cmd
.request
.event_code
= event_code
;
790 cmd
.request
.info
= info
;
792 return ism_cmd(ism
, &cmd
);
795 static int smcd_signal_ieq(struct smcd_dev
*smcd
, struct smcd_gid
*rgid
,
796 u32 trigger_irq
, u32 event_code
, u64 info
)
798 return ism_signal_ieq(smcd
->priv
, rgid
->gid
,
799 trigger_irq
, event_code
, info
);
802 static int smcd_move(struct smcd_dev
*smcd
, u64 dmb_tok
, unsigned int idx
,
803 bool sf
, unsigned int offset
, void *data
,
806 return ism_move(smcd
->priv
, dmb_tok
, idx
, sf
, offset
, data
, size
);
809 static int smcd_supports_v2(void)
811 return ism_v2_capable
;
814 static u64
ism_get_local_gid(struct ism_dev
*ism
)
816 return ism
->local_gid
;
819 static void smcd_get_local_gid(struct smcd_dev
*smcd
,
820 struct smcd_gid
*smcd_gid
)
822 smcd_gid
->gid
= ism_get_local_gid(smcd
->priv
);
823 smcd_gid
->gid_ext
= 0;
826 static u16
ism_get_chid(struct ism_dev
*ism
)
828 if (!ism
|| !ism
->pdev
)
831 return to_zpci(ism
->pdev
)->pchid
;
834 static u16
smcd_get_chid(struct smcd_dev
*smcd
)
836 return ism_get_chid(smcd
->priv
);
839 static inline struct device
*smcd_get_dev(struct smcd_dev
*dev
)
841 struct ism_dev
*ism
= dev
->priv
;
846 static const struct smcd_ops ism_ops
= {
847 .query_remote_gid
= smcd_query_rgid
,
848 .register_dmb
= smcd_register_dmb
,
849 .unregister_dmb
= smcd_unregister_dmb
,
850 .add_vlan_id
= smcd_add_vlan_id
,
851 .del_vlan_id
= smcd_del_vlan_id
,
852 .set_vlan_required
= smcd_set_vlan_required
,
853 .reset_vlan_required
= smcd_reset_vlan_required
,
854 .signal_event
= smcd_signal_ieq
,
855 .move_data
= smcd_move
,
856 .supports_v2
= smcd_supports_v2
,
857 .get_local_gid
= smcd_get_local_gid
,
858 .get_chid
= smcd_get_chid
,
859 .get_dev
= smcd_get_dev
,
862 const struct smcd_ops
*ism_get_smcd_ops(void)
866 EXPORT_SYMBOL_GPL(ism_get_smcd_ops
);