r8152: fix tx packets accounting
[linux/fpc-iii.git] / drivers / infiniband / sw / rdmavt / vt.c
blobd430c2f7cec4cea4fc24f465dedd0b30e27f29ce
1 /*
2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * BSD LICENSE
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <linux/kernel.h>
50 #include "vt.h"
51 #include "trace.h"
53 #define RVT_UVERBS_ABI_VERSION 2
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_DESCRIPTION("RDMA Verbs Transport Library");
58 static int rvt_init(void)
61 * rdmavt does not need to do anything special when it starts up. All it
62 * needs to do is sit and wait until a driver attempts registration.
64 return 0;
66 module_init(rvt_init);
68 static void rvt_cleanup(void)
71 * Nothing to do at exit time either. The module won't be able to be
72 * removed until all drivers are gone which means all the dev structs
73 * are gone so there is really nothing to do.
76 module_exit(rvt_cleanup);
78 /**
79 * rvt_alloc_device - allocate rdi
80 * @size: how big of a structure to allocate
81 * @nports: number of ports to allocate array slots for
83 * Use IB core device alloc to allocate space for the rdi which is assumed to be
84 * inside of the ib_device. Any extra space that drivers require should be
85 * included in size.
87 * We also allocate a port array based on the number of ports.
89 * Return: pointer to allocated rdi
91 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
93 struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
95 rdi = (struct rvt_dev_info *)ib_alloc_device(size);
96 if (!rdi)
97 return rdi;
99 rdi->ports = kcalloc(nports,
100 sizeof(struct rvt_ibport **),
101 GFP_KERNEL);
102 if (!rdi->ports)
103 ib_dealloc_device(&rdi->ibdev);
105 return rdi;
107 EXPORT_SYMBOL(rvt_alloc_device);
110 * rvt_dealloc_device - deallocate rdi
111 * @rdi: structure to free
113 * Free a structure allocated with rvt_alloc_device()
115 void rvt_dealloc_device(struct rvt_dev_info *rdi)
117 kfree(rdi->ports);
118 ib_dealloc_device(&rdi->ibdev);
120 EXPORT_SYMBOL(rvt_dealloc_device);
122 static int rvt_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *props,
124 struct ib_udata *uhw)
126 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
128 if (uhw->inlen || uhw->outlen)
129 return -EINVAL;
131 * Return rvt_dev_info.dparms.props contents
133 *props = rdi->dparms.props;
134 return 0;
137 static int rvt_modify_device(struct ib_device *device,
138 int device_modify_mask,
139 struct ib_device_modify *device_modify)
142 * There is currently no need to supply this based on qib and hfi1.
143 * Future drivers may need to implement this though.
146 return -EOPNOTSUPP;
150 * rvt_query_port: Passes the query port call to the driver
151 * @ibdev: Verbs IB dev
152 * @port_num: port number, 1 based from ib core
153 * @props: structure to hold returned properties
155 * Return: 0 on success
157 static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
158 struct ib_port_attr *props)
160 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
161 struct rvt_ibport *rvp;
162 int port_index = ibport_num_to_idx(ibdev, port_num);
164 if (port_index < 0)
165 return -EINVAL;
167 rvp = rdi->ports[port_index];
168 memset(props, 0, sizeof(*props));
169 props->sm_lid = rvp->sm_lid;
170 props->sm_sl = rvp->sm_sl;
171 props->port_cap_flags = rvp->port_cap_flags;
172 props->max_msg_sz = 0x80000000;
173 props->pkey_tbl_len = rvt_get_npkeys(rdi);
174 props->bad_pkey_cntr = rvp->pkey_violations;
175 props->qkey_viol_cntr = rvp->qkey_violations;
176 props->subnet_timeout = rvp->subnet_timeout;
177 props->init_type_reply = 0;
179 /* Populate the remaining ib_port_attr elements */
180 return rdi->driver_f.query_port_state(rdi, port_num, props);
184 * rvt_modify_port
185 * @ibdev: Verbs IB dev
186 * @port_num: Port number, 1 based from ib core
187 * @port_modify_mask: How to change the port
188 * @props: Structure to fill in
190 * Return: 0 on success
192 static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
193 int port_modify_mask, struct ib_port_modify *props)
195 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
196 struct rvt_ibport *rvp;
197 int ret = 0;
198 int port_index = ibport_num_to_idx(ibdev, port_num);
200 if (port_index < 0)
201 return -EINVAL;
203 rvp = rdi->ports[port_index];
204 rvp->port_cap_flags |= props->set_port_cap_mask;
205 rvp->port_cap_flags &= ~props->clr_port_cap_mask;
207 if (props->set_port_cap_mask || props->clr_port_cap_mask)
208 rdi->driver_f.cap_mask_chg(rdi, port_num);
209 if (port_modify_mask & IB_PORT_SHUTDOWN)
210 ret = rdi->driver_f.shut_down_port(rdi, port_num);
211 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
212 rvp->qkey_violations = 0;
214 return ret;
218 * rvt_query_pkey - Return a pkey from the table at a given index
219 * @ibdev: Verbs IB dev
220 * @port_num: Port number, 1 based from ib core
221 * @intex: Index into pkey table
223 * Return: 0 on failure pkey otherwise
225 static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
226 u16 *pkey)
229 * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
230 * date. This function will just return that value. There is no need to
231 * lock, if a stale value is read and sent to the user so be it there is
232 * no way to protect against that anyway.
234 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
235 int port_index;
237 port_index = ibport_num_to_idx(ibdev, port_num);
238 if (port_index < 0)
239 return -EINVAL;
241 if (index >= rvt_get_npkeys(rdi))
242 return -EINVAL;
244 *pkey = rvt_get_pkey(rdi, port_index, index);
245 return 0;
249 * rvt_query_gid - Return a gid from the table
250 * @ibdev: Verbs IB dev
251 * @port_num: Port number, 1 based from ib core
252 * @index: = Index in table
253 * @gid: Gid to return
255 * Return: 0 on success
257 static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
258 int guid_index, union ib_gid *gid)
260 struct rvt_dev_info *rdi;
261 struct rvt_ibport *rvp;
262 int port_index;
265 * Driver is responsible for updating the guid table. Which will be used
266 * to craft the return value. This will work similar to how query_pkey()
267 * is being done.
269 port_index = ibport_num_to_idx(ibdev, port_num);
270 if (port_index < 0)
271 return -EINVAL;
273 rdi = ib_to_rvt(ibdev);
274 rvp = rdi->ports[port_index];
276 gid->global.subnet_prefix = rvp->gid_prefix;
278 return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
279 &gid->global.interface_id);
282 struct rvt_ucontext {
283 struct ib_ucontext ibucontext;
286 static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
287 *ibucontext)
289 return container_of(ibucontext, struct rvt_ucontext, ibucontext);
293 * rvt_alloc_ucontext - Allocate a user context
294 * @ibdev: Vers IB dev
295 * @data: User data allocated
297 static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
298 struct ib_udata *udata)
300 struct rvt_ucontext *context;
302 context = kmalloc(sizeof(*context), GFP_KERNEL);
303 if (!context)
304 return ERR_PTR(-ENOMEM);
305 return &context->ibucontext;
309 *rvt_dealloc_ucontext - Free a user context
310 *@context - Free this
312 static int rvt_dealloc_ucontext(struct ib_ucontext *context)
314 kfree(to_iucontext(context));
315 return 0;
318 static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
319 struct ib_port_immutable *immutable)
321 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
322 struct ib_port_attr attr;
323 int err, port_index;
325 port_index = ibport_num_to_idx(ibdev, port_num);
326 if (port_index < 0)
327 return -EINVAL;
329 err = rvt_query_port(ibdev, port_num, &attr);
330 if (err)
331 return err;
333 immutable->pkey_tbl_len = attr.pkey_tbl_len;
334 immutable->gid_tbl_len = attr.gid_tbl_len;
335 immutable->core_cap_flags = rdi->dparms.core_cap_flags;
336 immutable->max_mad_size = rdi->dparms.max_mad_size;
338 return 0;
341 enum {
342 MISC,
343 QUERY_DEVICE,
344 MODIFY_DEVICE,
345 QUERY_PORT,
346 MODIFY_PORT,
347 QUERY_PKEY,
348 QUERY_GID,
349 ALLOC_UCONTEXT,
350 DEALLOC_UCONTEXT,
351 GET_PORT_IMMUTABLE,
352 CREATE_QP,
353 MODIFY_QP,
354 DESTROY_QP,
355 QUERY_QP,
356 POST_SEND,
357 POST_RECV,
358 POST_SRQ_RECV,
359 CREATE_AH,
360 DESTROY_AH,
361 MODIFY_AH,
362 QUERY_AH,
363 CREATE_SRQ,
364 MODIFY_SRQ,
365 DESTROY_SRQ,
366 QUERY_SRQ,
367 ATTACH_MCAST,
368 DETACH_MCAST,
369 GET_DMA_MR,
370 REG_USER_MR,
371 DEREG_MR,
372 ALLOC_MR,
373 MAP_MR_SG,
374 ALLOC_FMR,
375 MAP_PHYS_FMR,
376 UNMAP_FMR,
377 DEALLOC_FMR,
378 MMAP,
379 CREATE_CQ,
380 DESTROY_CQ,
381 POLL_CQ,
382 REQ_NOTFIY_CQ,
383 RESIZE_CQ,
384 ALLOC_PD,
385 DEALLOC_PD,
386 _VERB_IDX_MAX /* Must always be last! */
389 static inline int check_driver_override(struct rvt_dev_info *rdi,
390 size_t offset, void *func)
392 if (!*(void **)((void *)&rdi->ibdev + offset)) {
393 *(void **)((void *)&rdi->ibdev + offset) = func;
394 return 0;
397 return 1;
400 static noinline int check_support(struct rvt_dev_info *rdi, int verb)
402 switch (verb) {
403 case MISC:
405 * These functions are not part of verbs specifically but are
406 * required for rdmavt to function.
408 if ((!rdi->driver_f.port_callback) ||
409 (!rdi->driver_f.get_card_name) ||
410 (!rdi->driver_f.get_pci_dev))
411 return -EINVAL;
412 break;
414 case QUERY_DEVICE:
415 check_driver_override(rdi, offsetof(struct ib_device,
416 query_device),
417 rvt_query_device);
418 break;
420 case MODIFY_DEVICE:
422 * rdmavt does not support modify device currently drivers must
423 * provide.
425 if (!check_driver_override(rdi, offsetof(struct ib_device,
426 modify_device),
427 rvt_modify_device))
428 return -EOPNOTSUPP;
429 break;
431 case QUERY_PORT:
432 if (!check_driver_override(rdi, offsetof(struct ib_device,
433 query_port),
434 rvt_query_port))
435 if (!rdi->driver_f.query_port_state)
436 return -EINVAL;
437 break;
439 case MODIFY_PORT:
440 if (!check_driver_override(rdi, offsetof(struct ib_device,
441 modify_port),
442 rvt_modify_port))
443 if (!rdi->driver_f.cap_mask_chg ||
444 !rdi->driver_f.shut_down_port)
445 return -EINVAL;
446 break;
448 case QUERY_PKEY:
449 check_driver_override(rdi, offsetof(struct ib_device,
450 query_pkey),
451 rvt_query_pkey);
452 break;
454 case QUERY_GID:
455 if (!check_driver_override(rdi, offsetof(struct ib_device,
456 query_gid),
457 rvt_query_gid))
458 if (!rdi->driver_f.get_guid_be)
459 return -EINVAL;
460 break;
462 case ALLOC_UCONTEXT:
463 check_driver_override(rdi, offsetof(struct ib_device,
464 alloc_ucontext),
465 rvt_alloc_ucontext);
466 break;
468 case DEALLOC_UCONTEXT:
469 check_driver_override(rdi, offsetof(struct ib_device,
470 dealloc_ucontext),
471 rvt_dealloc_ucontext);
472 break;
474 case GET_PORT_IMMUTABLE:
475 check_driver_override(rdi, offsetof(struct ib_device,
476 get_port_immutable),
477 rvt_get_port_immutable);
478 break;
480 case CREATE_QP:
481 if (!check_driver_override(rdi, offsetof(struct ib_device,
482 create_qp),
483 rvt_create_qp))
484 if (!rdi->driver_f.qp_priv_alloc ||
485 !rdi->driver_f.qp_priv_free ||
486 !rdi->driver_f.notify_qp_reset ||
487 !rdi->driver_f.flush_qp_waiters ||
488 !rdi->driver_f.stop_send_queue ||
489 !rdi->driver_f.quiesce_qp)
490 return -EINVAL;
491 break;
493 case MODIFY_QP:
494 if (!check_driver_override(rdi, offsetof(struct ib_device,
495 modify_qp),
496 rvt_modify_qp))
497 if (!rdi->driver_f.notify_qp_reset ||
498 !rdi->driver_f.schedule_send ||
499 !rdi->driver_f.get_pmtu_from_attr ||
500 !rdi->driver_f.flush_qp_waiters ||
501 !rdi->driver_f.stop_send_queue ||
502 !rdi->driver_f.quiesce_qp ||
503 !rdi->driver_f.notify_error_qp ||
504 !rdi->driver_f.mtu_from_qp ||
505 !rdi->driver_f.mtu_to_path_mtu)
506 return -EINVAL;
507 break;
509 case DESTROY_QP:
510 if (!check_driver_override(rdi, offsetof(struct ib_device,
511 destroy_qp),
512 rvt_destroy_qp))
513 if (!rdi->driver_f.qp_priv_free ||
514 !rdi->driver_f.notify_qp_reset ||
515 !rdi->driver_f.flush_qp_waiters ||
516 !rdi->driver_f.stop_send_queue ||
517 !rdi->driver_f.quiesce_qp)
518 return -EINVAL;
519 break;
521 case QUERY_QP:
522 check_driver_override(rdi, offsetof(struct ib_device,
523 query_qp),
524 rvt_query_qp);
525 break;
527 case POST_SEND:
528 if (!check_driver_override(rdi, offsetof(struct ib_device,
529 post_send),
530 rvt_post_send))
531 if (!rdi->driver_f.schedule_send ||
532 !rdi->driver_f.do_send ||
533 !rdi->post_parms)
534 return -EINVAL;
535 break;
537 case POST_RECV:
538 check_driver_override(rdi, offsetof(struct ib_device,
539 post_recv),
540 rvt_post_recv);
541 break;
542 case POST_SRQ_RECV:
543 check_driver_override(rdi, offsetof(struct ib_device,
544 post_srq_recv),
545 rvt_post_srq_recv);
546 break;
548 case CREATE_AH:
549 check_driver_override(rdi, offsetof(struct ib_device,
550 create_ah),
551 rvt_create_ah);
552 break;
554 case DESTROY_AH:
555 check_driver_override(rdi, offsetof(struct ib_device,
556 destroy_ah),
557 rvt_destroy_ah);
558 break;
560 case MODIFY_AH:
561 check_driver_override(rdi, offsetof(struct ib_device,
562 modify_ah),
563 rvt_modify_ah);
564 break;
566 case QUERY_AH:
567 check_driver_override(rdi, offsetof(struct ib_device,
568 query_ah),
569 rvt_query_ah);
570 break;
572 case CREATE_SRQ:
573 check_driver_override(rdi, offsetof(struct ib_device,
574 create_srq),
575 rvt_create_srq);
576 break;
578 case MODIFY_SRQ:
579 check_driver_override(rdi, offsetof(struct ib_device,
580 modify_srq),
581 rvt_modify_srq);
582 break;
584 case DESTROY_SRQ:
585 check_driver_override(rdi, offsetof(struct ib_device,
586 destroy_srq),
587 rvt_destroy_srq);
588 break;
590 case QUERY_SRQ:
591 check_driver_override(rdi, offsetof(struct ib_device,
592 query_srq),
593 rvt_query_srq);
594 break;
596 case ATTACH_MCAST:
597 check_driver_override(rdi, offsetof(struct ib_device,
598 attach_mcast),
599 rvt_attach_mcast);
600 break;
602 case DETACH_MCAST:
603 check_driver_override(rdi, offsetof(struct ib_device,
604 detach_mcast),
605 rvt_detach_mcast);
606 break;
608 case GET_DMA_MR:
609 check_driver_override(rdi, offsetof(struct ib_device,
610 get_dma_mr),
611 rvt_get_dma_mr);
612 break;
614 case REG_USER_MR:
615 check_driver_override(rdi, offsetof(struct ib_device,
616 reg_user_mr),
617 rvt_reg_user_mr);
618 break;
620 case DEREG_MR:
621 check_driver_override(rdi, offsetof(struct ib_device,
622 dereg_mr),
623 rvt_dereg_mr);
624 break;
626 case ALLOC_FMR:
627 check_driver_override(rdi, offsetof(struct ib_device,
628 alloc_fmr),
629 rvt_alloc_fmr);
630 break;
632 case ALLOC_MR:
633 check_driver_override(rdi, offsetof(struct ib_device,
634 alloc_mr),
635 rvt_alloc_mr);
636 break;
638 case MAP_MR_SG:
639 check_driver_override(rdi, offsetof(struct ib_device,
640 map_mr_sg),
641 rvt_map_mr_sg);
642 break;
644 case MAP_PHYS_FMR:
645 check_driver_override(rdi, offsetof(struct ib_device,
646 map_phys_fmr),
647 rvt_map_phys_fmr);
648 break;
650 case UNMAP_FMR:
651 check_driver_override(rdi, offsetof(struct ib_device,
652 unmap_fmr),
653 rvt_unmap_fmr);
654 break;
656 case DEALLOC_FMR:
657 check_driver_override(rdi, offsetof(struct ib_device,
658 dealloc_fmr),
659 rvt_dealloc_fmr);
660 break;
662 case MMAP:
663 check_driver_override(rdi, offsetof(struct ib_device,
664 mmap),
665 rvt_mmap);
666 break;
668 case CREATE_CQ:
669 check_driver_override(rdi, offsetof(struct ib_device,
670 create_cq),
671 rvt_create_cq);
672 break;
674 case DESTROY_CQ:
675 check_driver_override(rdi, offsetof(struct ib_device,
676 destroy_cq),
677 rvt_destroy_cq);
678 break;
680 case POLL_CQ:
681 check_driver_override(rdi, offsetof(struct ib_device,
682 poll_cq),
683 rvt_poll_cq);
684 break;
686 case REQ_NOTFIY_CQ:
687 check_driver_override(rdi, offsetof(struct ib_device,
688 req_notify_cq),
689 rvt_req_notify_cq);
690 break;
692 case RESIZE_CQ:
693 check_driver_override(rdi, offsetof(struct ib_device,
694 resize_cq),
695 rvt_resize_cq);
696 break;
698 case ALLOC_PD:
699 check_driver_override(rdi, offsetof(struct ib_device,
700 alloc_pd),
701 rvt_alloc_pd);
702 break;
704 case DEALLOC_PD:
705 check_driver_override(rdi, offsetof(struct ib_device,
706 dealloc_pd),
707 rvt_dealloc_pd);
708 break;
710 default:
711 return -EINVAL;
714 return 0;
718 * rvt_register_device - register a driver
719 * @rdi: main dev structure for all of rdmavt operations
721 * It is up to drivers to allocate the rdi and fill in the appropriate
722 * information.
724 * Return: 0 on success otherwise an errno.
726 int rvt_register_device(struct rvt_dev_info *rdi)
728 int ret = 0, i;
730 if (!rdi)
731 return -EINVAL;
734 * Check to ensure drivers have setup the required helpers for the verbs
735 * they want rdmavt to handle
737 for (i = 0; i < _VERB_IDX_MAX; i++)
738 if (check_support(rdi, i)) {
739 pr_err("Driver support req not met at %d\n", i);
740 return -EINVAL;
744 /* Once we get past here we can use rvt_pr macros and tracepoints */
745 trace_rvt_dbg(rdi, "Driver attempting registration");
746 rvt_mmap_init(rdi);
748 /* Queue Pairs */
749 ret = rvt_driver_qp_init(rdi);
750 if (ret) {
751 pr_err("Error in driver QP init.\n");
752 return -EINVAL;
755 /* Address Handle */
756 spin_lock_init(&rdi->n_ahs_lock);
757 rdi->n_ahs_allocated = 0;
759 /* Shared Receive Queue */
760 rvt_driver_srq_init(rdi);
762 /* Multicast */
763 rvt_driver_mcast_init(rdi);
765 /* Mem Region */
766 ret = rvt_driver_mr_init(rdi);
767 if (ret) {
768 pr_err("Error in driver MR init.\n");
769 goto bail_no_mr;
772 /* Completion queues */
773 ret = rvt_driver_cq_init(rdi);
774 if (ret) {
775 pr_err("Error in driver CQ init.\n");
776 goto bail_mr;
779 /* DMA Operations */
780 rdi->ibdev.dma_ops =
781 rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops;
783 /* Protection Domain */
784 spin_lock_init(&rdi->n_pds_lock);
785 rdi->n_pds_allocated = 0;
788 * There are some things which could be set by underlying drivers but
789 * really should be up to rdmavt to set. For instance drivers can't know
790 * exactly which functions rdmavt supports, nor do they know the ABI
791 * version, so we do all of this sort of stuff here.
793 rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
794 rdi->ibdev.uverbs_cmd_mask =
795 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
796 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
797 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
798 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
799 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
800 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
801 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
802 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
803 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
804 (1ull << IB_USER_VERBS_CMD_REG_MR) |
805 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
806 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
807 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
808 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
809 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
810 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
811 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
812 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
813 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
814 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
815 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
816 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
817 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
818 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
819 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
820 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
821 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
822 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
823 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
824 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
825 rdi->ibdev.node_type = RDMA_NODE_IB_CA;
826 rdi->ibdev.num_comp_vectors = 1;
828 /* We are now good to announce we exist */
829 ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
830 if (ret) {
831 rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
832 goto bail_cq;
835 rvt_create_mad_agents(rdi);
837 rvt_pr_info(rdi, "Registration with rdmavt done.\n");
838 return ret;
840 bail_cq:
841 rvt_cq_exit(rdi);
843 bail_mr:
844 rvt_mr_exit(rdi);
846 bail_no_mr:
847 rvt_qp_exit(rdi);
849 return ret;
851 EXPORT_SYMBOL(rvt_register_device);
854 * rvt_unregister_device - remove a driver
855 * @rdi: rvt dev struct
857 void rvt_unregister_device(struct rvt_dev_info *rdi)
859 trace_rvt_dbg(rdi, "Driver is unregistering.");
860 if (!rdi)
861 return;
863 rvt_free_mad_agents(rdi);
865 ib_unregister_device(&rdi->ibdev);
866 rvt_cq_exit(rdi);
867 rvt_mr_exit(rdi);
868 rvt_qp_exit(rdi);
870 EXPORT_SYMBOL(rvt_unregister_device);
873 * rvt_init_port - init internal data for driver port
874 * @rdi: rvt dev strut
875 * @port: rvt port
876 * @port_index: 0 based index of ports, different from IB core port num
878 * Keep track of a list of ports. No need to have a detach port.
879 * They persist until the driver goes away.
881 * Return: always 0
883 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
884 int port_index, u16 *pkey_table)
887 rdi->ports[port_index] = port;
888 rdi->ports[port_index]->pkey_table = pkey_table;
890 return 0;
892 EXPORT_SYMBOL(rvt_init_port);