2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/errno.h>
47 #include <linux/inetdevice.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/slab.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <net/addrconf.h>
58 #define DRV_NAME "vmw_pvrdma"
59 #define DRV_VERSION "1.0.1.0-k"
61 static DEFINE_MUTEX(pvrdma_device_list_lock
);
62 static LIST_HEAD(pvrdma_device_list
);
63 static struct workqueue_struct
*event_wq
;
65 static int pvrdma_add_gid(struct ib_device
*ibdev
,
68 const union ib_gid
*gid
,
69 const struct ib_gid_attr
*attr
,
71 static int pvrdma_del_gid(struct ib_device
*ibdev
,
77 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
80 return sprintf(buf
, "VMW_PVRDMA-%s\n", DRV_VERSION
);
83 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
86 return sprintf(buf
, "%d\n", PVRDMA_REV_ID
);
89 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
92 return sprintf(buf
, "%d\n", PVRDMA_BOARD_ID
);
95 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
96 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
97 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
99 static struct device_attribute
*pvrdma_class_attributes
[] = {
105 static void pvrdma_get_fw_ver_str(struct ib_device
*device
, char *str
)
107 struct pvrdma_dev
*dev
=
108 container_of(device
, struct pvrdma_dev
, ib_dev
);
109 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d\n",
110 (int) (dev
->dsr
->caps
.fw_ver
>> 32),
111 (int) (dev
->dsr
->caps
.fw_ver
>> 16) & 0xffff,
112 (int) dev
->dsr
->caps
.fw_ver
& 0xffff);
115 static int pvrdma_init_device(struct pvrdma_dev
*dev
)
117 /* Initialize some device related stuff */
118 spin_lock_init(&dev
->cmd_lock
);
119 sema_init(&dev
->cmd_sema
, 1);
120 atomic_set(&dev
->num_qps
, 0);
121 atomic_set(&dev
->num_srqs
, 0);
122 atomic_set(&dev
->num_cqs
, 0);
123 atomic_set(&dev
->num_pds
, 0);
124 atomic_set(&dev
->num_ahs
, 0);
129 static int pvrdma_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
130 struct ib_port_immutable
*immutable
)
132 struct pvrdma_dev
*dev
= to_vdev(ibdev
);
133 struct ib_port_attr attr
;
136 if (dev
->dsr
->caps
.gid_types
== PVRDMA_GID_TYPE_FLAG_ROCE_V1
)
137 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE
;
138 else if (dev
->dsr
->caps
.gid_types
== PVRDMA_GID_TYPE_FLAG_ROCE_V2
)
139 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
141 err
= ib_query_port(ibdev
, port_num
, &attr
);
145 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
146 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
147 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
151 static struct net_device
*pvrdma_get_netdev(struct ib_device
*ibdev
,
154 struct net_device
*netdev
;
155 struct pvrdma_dev
*dev
= to_vdev(ibdev
);
161 netdev
= dev
->netdev
;
169 static int pvrdma_register_device(struct pvrdma_dev
*dev
)
174 strlcpy(dev
->ib_dev
.name
, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX
);
175 dev
->ib_dev
.node_guid
= dev
->dsr
->caps
.node_guid
;
176 dev
->sys_image_guid
= dev
->dsr
->caps
.sys_image_guid
;
178 dev
->ib_dev
.owner
= THIS_MODULE
;
179 dev
->ib_dev
.num_comp_vectors
= 1;
180 dev
->ib_dev
.dev
.parent
= &dev
->pdev
->dev
;
181 dev
->ib_dev
.uverbs_abi_ver
= PVRDMA_UVERBS_ABI_VERSION
;
182 dev
->ib_dev
.uverbs_cmd_mask
=
183 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
184 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
185 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
186 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
187 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
188 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
189 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
190 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
191 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
192 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
193 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
194 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
195 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
196 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
197 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
198 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
199 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
200 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
201 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
202 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
);
204 dev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
205 dev
->ib_dev
.phys_port_cnt
= dev
->dsr
->caps
.phys_port_cnt
;
207 dev
->ib_dev
.query_device
= pvrdma_query_device
;
208 dev
->ib_dev
.query_port
= pvrdma_query_port
;
209 dev
->ib_dev
.query_gid
= pvrdma_query_gid
;
210 dev
->ib_dev
.query_pkey
= pvrdma_query_pkey
;
211 dev
->ib_dev
.modify_port
= pvrdma_modify_port
;
212 dev
->ib_dev
.alloc_ucontext
= pvrdma_alloc_ucontext
;
213 dev
->ib_dev
.dealloc_ucontext
= pvrdma_dealloc_ucontext
;
214 dev
->ib_dev
.mmap
= pvrdma_mmap
;
215 dev
->ib_dev
.alloc_pd
= pvrdma_alloc_pd
;
216 dev
->ib_dev
.dealloc_pd
= pvrdma_dealloc_pd
;
217 dev
->ib_dev
.create_ah
= pvrdma_create_ah
;
218 dev
->ib_dev
.destroy_ah
= pvrdma_destroy_ah
;
219 dev
->ib_dev
.create_qp
= pvrdma_create_qp
;
220 dev
->ib_dev
.modify_qp
= pvrdma_modify_qp
;
221 dev
->ib_dev
.query_qp
= pvrdma_query_qp
;
222 dev
->ib_dev
.destroy_qp
= pvrdma_destroy_qp
;
223 dev
->ib_dev
.post_send
= pvrdma_post_send
;
224 dev
->ib_dev
.post_recv
= pvrdma_post_recv
;
225 dev
->ib_dev
.create_cq
= pvrdma_create_cq
;
226 dev
->ib_dev
.modify_cq
= pvrdma_modify_cq
;
227 dev
->ib_dev
.resize_cq
= pvrdma_resize_cq
;
228 dev
->ib_dev
.destroy_cq
= pvrdma_destroy_cq
;
229 dev
->ib_dev
.poll_cq
= pvrdma_poll_cq
;
230 dev
->ib_dev
.req_notify_cq
= pvrdma_req_notify_cq
;
231 dev
->ib_dev
.get_dma_mr
= pvrdma_get_dma_mr
;
232 dev
->ib_dev
.reg_user_mr
= pvrdma_reg_user_mr
;
233 dev
->ib_dev
.dereg_mr
= pvrdma_dereg_mr
;
234 dev
->ib_dev
.alloc_mr
= pvrdma_alloc_mr
;
235 dev
->ib_dev
.map_mr_sg
= pvrdma_map_mr_sg
;
236 dev
->ib_dev
.add_gid
= pvrdma_add_gid
;
237 dev
->ib_dev
.del_gid
= pvrdma_del_gid
;
238 dev
->ib_dev
.get_netdev
= pvrdma_get_netdev
;
239 dev
->ib_dev
.get_port_immutable
= pvrdma_port_immutable
;
240 dev
->ib_dev
.get_link_layer
= pvrdma_port_link_layer
;
241 dev
->ib_dev
.get_dev_fw_str
= pvrdma_get_fw_ver_str
;
243 mutex_init(&dev
->port_mutex
);
244 spin_lock_init(&dev
->desc_lock
);
246 dev
->cq_tbl
= kcalloc(dev
->dsr
->caps
.max_cq
, sizeof(struct pvrdma_cq
*),
250 spin_lock_init(&dev
->cq_tbl_lock
);
252 dev
->qp_tbl
= kcalloc(dev
->dsr
->caps
.max_qp
, sizeof(struct pvrdma_qp
*),
256 spin_lock_init(&dev
->qp_tbl_lock
);
258 /* Check if SRQ is supported by backend */
259 if (dev
->dsr
->caps
.max_srq
) {
260 dev
->ib_dev
.uverbs_cmd_mask
|=
261 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
262 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
263 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
264 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
265 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV
);
267 dev
->ib_dev
.create_srq
= pvrdma_create_srq
;
268 dev
->ib_dev
.modify_srq
= pvrdma_modify_srq
;
269 dev
->ib_dev
.query_srq
= pvrdma_query_srq
;
270 dev
->ib_dev
.destroy_srq
= pvrdma_destroy_srq
;
271 dev
->ib_dev
.post_srq_recv
= pvrdma_post_srq_recv
;
273 dev
->srq_tbl
= kcalloc(dev
->dsr
->caps
.max_srq
,
274 sizeof(struct pvrdma_srq
*),
279 spin_lock_init(&dev
->srq_tbl_lock
);
281 ret
= ib_register_device(&dev
->ib_dev
, NULL
);
285 for (i
= 0; i
< ARRAY_SIZE(pvrdma_class_attributes
); ++i
) {
286 ret
= device_create_file(&dev
->ib_dev
.dev
,
287 pvrdma_class_attributes
[i
]);
292 dev
->ib_active
= true;
297 ib_unregister_device(&dev
->ib_dev
);
308 static irqreturn_t
pvrdma_intr0_handler(int irq
, void *dev_id
)
310 u32 icr
= PVRDMA_INTR_CAUSE_RESPONSE
;
311 struct pvrdma_dev
*dev
= dev_id
;
313 dev_dbg(&dev
->pdev
->dev
, "interrupt 0 (response) handler\n");
315 if (!dev
->pdev
->msix_enabled
) {
317 icr
= pvrdma_read_reg(dev
, PVRDMA_REG_ICR
);
322 if (icr
== PVRDMA_INTR_CAUSE_RESPONSE
)
323 complete(&dev
->cmd_done
);
328 static void pvrdma_qp_event(struct pvrdma_dev
*dev
, u32 qpn
, int type
)
330 struct pvrdma_qp
*qp
;
333 spin_lock_irqsave(&dev
->qp_tbl_lock
, flags
);
334 qp
= dev
->qp_tbl
[qpn
% dev
->dsr
->caps
.max_qp
];
336 refcount_inc(&qp
->refcnt
);
337 spin_unlock_irqrestore(&dev
->qp_tbl_lock
, flags
);
339 if (qp
&& qp
->ibqp
.event_handler
) {
340 struct ib_qp
*ibqp
= &qp
->ibqp
;
343 e
.device
= ibqp
->device
;
345 e
.event
= type
; /* 1:1 mapping for now. */
346 ibqp
->event_handler(&e
, ibqp
->qp_context
);
349 if (refcount_dec_and_test(&qp
->refcnt
))
354 static void pvrdma_cq_event(struct pvrdma_dev
*dev
, u32 cqn
, int type
)
356 struct pvrdma_cq
*cq
;
359 spin_lock_irqsave(&dev
->cq_tbl_lock
, flags
);
360 cq
= dev
->cq_tbl
[cqn
% dev
->dsr
->caps
.max_cq
];
362 refcount_inc(&cq
->refcnt
);
363 spin_unlock_irqrestore(&dev
->cq_tbl_lock
, flags
);
365 if (cq
&& cq
->ibcq
.event_handler
) {
366 struct ib_cq
*ibcq
= &cq
->ibcq
;
369 e
.device
= ibcq
->device
;
371 e
.event
= type
; /* 1:1 mapping for now. */
372 ibcq
->event_handler(&e
, ibcq
->cq_context
);
375 if (refcount_dec_and_test(&cq
->refcnt
))
380 static void pvrdma_srq_event(struct pvrdma_dev
*dev
, u32 srqn
, int type
)
382 struct pvrdma_srq
*srq
;
385 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
387 srq
= dev
->srq_tbl
[srqn
% dev
->dsr
->caps
.max_srq
];
391 refcount_inc(&srq
->refcnt
);
392 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
394 if (srq
&& srq
->ibsrq
.event_handler
) {
395 struct ib_srq
*ibsrq
= &srq
->ibsrq
;
398 e
.device
= ibsrq
->device
;
399 e
.element
.srq
= ibsrq
;
400 e
.event
= type
; /* 1:1 mapping for now. */
401 ibsrq
->event_handler(&e
, ibsrq
->srq_context
);
404 if (refcount_dec_and_test(&srq
->refcnt
))
405 complete(&srq
->free
);
409 static void pvrdma_dispatch_event(struct pvrdma_dev
*dev
, int port
,
410 enum ib_event_type event
)
412 struct ib_event ib_event
;
414 memset(&ib_event
, 0, sizeof(ib_event
));
415 ib_event
.device
= &dev
->ib_dev
;
416 ib_event
.element
.port_num
= port
;
417 ib_event
.event
= event
;
418 ib_dispatch_event(&ib_event
);
421 static void pvrdma_dev_event(struct pvrdma_dev
*dev
, u8 port
, int type
)
423 if (port
< 1 || port
> dev
->dsr
->caps
.phys_port_cnt
) {
424 dev_warn(&dev
->pdev
->dev
, "event on port %d\n", port
);
428 pvrdma_dispatch_event(dev
, port
, type
);
431 static inline struct pvrdma_eqe
*get_eqe(struct pvrdma_dev
*dev
, unsigned int i
)
433 return (struct pvrdma_eqe
*)pvrdma_page_dir_get_ptr(
436 sizeof(struct pvrdma_eqe
) * i
);
439 static irqreturn_t
pvrdma_intr1_handler(int irq
, void *dev_id
)
441 struct pvrdma_dev
*dev
= dev_id
;
442 struct pvrdma_ring
*ring
= &dev
->async_ring_state
->rx
;
443 int ring_slots
= (dev
->dsr
->async_ring_pages
.num_pages
- 1) *
444 PAGE_SIZE
/ sizeof(struct pvrdma_eqe
);
447 dev_dbg(&dev
->pdev
->dev
, "interrupt 1 (async event) handler\n");
450 * Don't process events until the IB device is registered. Otherwise
451 * we'll try to ib_dispatch_event() on an invalid device.
456 while (pvrdma_idx_ring_has_data(ring
, ring_slots
, &head
) > 0) {
457 struct pvrdma_eqe
*eqe
;
459 eqe
= get_eqe(dev
, head
);
462 case PVRDMA_EVENT_QP_FATAL
:
463 case PVRDMA_EVENT_QP_REQ_ERR
:
464 case PVRDMA_EVENT_QP_ACCESS_ERR
:
465 case PVRDMA_EVENT_COMM_EST
:
466 case PVRDMA_EVENT_SQ_DRAINED
:
467 case PVRDMA_EVENT_PATH_MIG
:
468 case PVRDMA_EVENT_PATH_MIG_ERR
:
469 case PVRDMA_EVENT_QP_LAST_WQE_REACHED
:
470 pvrdma_qp_event(dev
, eqe
->info
, eqe
->type
);
473 case PVRDMA_EVENT_CQ_ERR
:
474 pvrdma_cq_event(dev
, eqe
->info
, eqe
->type
);
477 case PVRDMA_EVENT_SRQ_ERR
:
478 case PVRDMA_EVENT_SRQ_LIMIT_REACHED
:
479 pvrdma_srq_event(dev
, eqe
->info
, eqe
->type
);
482 case PVRDMA_EVENT_PORT_ACTIVE
:
483 case PVRDMA_EVENT_PORT_ERR
:
484 case PVRDMA_EVENT_LID_CHANGE
:
485 case PVRDMA_EVENT_PKEY_CHANGE
:
486 case PVRDMA_EVENT_SM_CHANGE
:
487 case PVRDMA_EVENT_CLIENT_REREGISTER
:
488 case PVRDMA_EVENT_GID_CHANGE
:
489 pvrdma_dev_event(dev
, eqe
->info
, eqe
->type
);
492 case PVRDMA_EVENT_DEVICE_FATAL
:
493 pvrdma_dev_event(dev
, 1, eqe
->type
);
500 pvrdma_idx_ring_inc(&ring
->cons_head
, ring_slots
);
506 static inline struct pvrdma_cqne
*get_cqne(struct pvrdma_dev
*dev
,
509 return (struct pvrdma_cqne
*)pvrdma_page_dir_get_ptr(
512 sizeof(struct pvrdma_cqne
) * i
);
515 static irqreturn_t
pvrdma_intrx_handler(int irq
, void *dev_id
)
517 struct pvrdma_dev
*dev
= dev_id
;
518 struct pvrdma_ring
*ring
= &dev
->cq_ring_state
->rx
;
519 int ring_slots
= (dev
->dsr
->cq_ring_pages
.num_pages
- 1) * PAGE_SIZE
/
520 sizeof(struct pvrdma_cqne
);
524 dev_dbg(&dev
->pdev
->dev
, "interrupt x (completion) handler\n");
526 while (pvrdma_idx_ring_has_data(ring
, ring_slots
, &head
) > 0) {
527 struct pvrdma_cqne
*cqne
;
528 struct pvrdma_cq
*cq
;
530 cqne
= get_cqne(dev
, head
);
531 spin_lock_irqsave(&dev
->cq_tbl_lock
, flags
);
532 cq
= dev
->cq_tbl
[cqne
->info
% dev
->dsr
->caps
.max_cq
];
534 refcount_inc(&cq
->refcnt
);
535 spin_unlock_irqrestore(&dev
->cq_tbl_lock
, flags
);
537 if (cq
&& cq
->ibcq
.comp_handler
)
538 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
540 if (refcount_dec_and_test(&cq
->refcnt
))
543 pvrdma_idx_ring_inc(&ring
->cons_head
, ring_slots
);
549 static void pvrdma_free_irq(struct pvrdma_dev
*dev
)
553 dev_dbg(&dev
->pdev
->dev
, "freeing interrupts\n");
554 for (i
= 0; i
< dev
->nr_vectors
; i
++)
555 free_irq(pci_irq_vector(dev
->pdev
, i
), dev
);
558 static void pvrdma_enable_intrs(struct pvrdma_dev
*dev
)
560 dev_dbg(&dev
->pdev
->dev
, "enable interrupts\n");
561 pvrdma_write_reg(dev
, PVRDMA_REG_IMR
, 0);
564 static void pvrdma_disable_intrs(struct pvrdma_dev
*dev
)
566 dev_dbg(&dev
->pdev
->dev
, "disable interrupts\n");
567 pvrdma_write_reg(dev
, PVRDMA_REG_IMR
, ~0);
570 static int pvrdma_alloc_intrs(struct pvrdma_dev
*dev
)
572 struct pci_dev
*pdev
= dev
->pdev
;
575 ret
= pci_alloc_irq_vectors(pdev
, 1, PVRDMA_MAX_INTERRUPTS
,
578 ret
= pci_alloc_irq_vectors(pdev
, 1, 1,
579 PCI_IRQ_MSI
| PCI_IRQ_LEGACY
);
583 dev
->nr_vectors
= ret
;
585 ret
= request_irq(pci_irq_vector(dev
->pdev
, 0), pvrdma_intr0_handler
,
586 pdev
->msix_enabled
? 0 : IRQF_SHARED
, DRV_NAME
, dev
);
588 dev_err(&dev
->pdev
->dev
,
589 "failed to request interrupt 0\n");
590 goto out_free_vectors
;
593 for (i
= 1; i
< dev
->nr_vectors
; i
++) {
594 ret
= request_irq(pci_irq_vector(dev
->pdev
, i
),
595 i
== 1 ? pvrdma_intr1_handler
:
596 pvrdma_intrx_handler
,
599 dev_err(&dev
->pdev
->dev
,
600 "failed to request interrupt %d\n", i
);
609 free_irq(pci_irq_vector(dev
->pdev
, i
), dev
);
611 pci_free_irq_vectors(pdev
);
615 static void pvrdma_free_slots(struct pvrdma_dev
*dev
)
617 struct pci_dev
*pdev
= dev
->pdev
;
620 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->resp_slot
,
621 dev
->dsr
->resp_slot_dma
);
623 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->cmd_slot
,
624 dev
->dsr
->cmd_slot_dma
);
627 static int pvrdma_add_gid_at_index(struct pvrdma_dev
*dev
,
628 const union ib_gid
*gid
,
633 union pvrdma_cmd_req req
;
634 struct pvrdma_cmd_create_bind
*cmd_bind
= &req
.create_bind
;
636 if (!dev
->sgid_tbl
) {
637 dev_warn(&dev
->pdev
->dev
, "sgid table not initialized\n");
641 memset(cmd_bind
, 0, sizeof(*cmd_bind
));
642 cmd_bind
->hdr
.cmd
= PVRDMA_CMD_CREATE_BIND
;
643 memcpy(cmd_bind
->new_gid
, gid
->raw
, 16);
644 cmd_bind
->mtu
= ib_mtu_enum_to_int(IB_MTU_1024
);
645 cmd_bind
->vlan
= 0xfff;
646 cmd_bind
->index
= index
;
647 cmd_bind
->gid_type
= gid_type
;
649 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
651 dev_warn(&dev
->pdev
->dev
,
652 "could not create binding, error: %d\n", ret
);
655 memcpy(&dev
->sgid_tbl
[index
], gid
, sizeof(*gid
));
659 static int pvrdma_add_gid(struct ib_device
*ibdev
,
662 const union ib_gid
*gid
,
663 const struct ib_gid_attr
*attr
,
666 struct pvrdma_dev
*dev
= to_vdev(ibdev
);
668 return pvrdma_add_gid_at_index(dev
, gid
,
669 ib_gid_type_to_pvrdma(attr
->gid_type
),
673 static int pvrdma_del_gid_at_index(struct pvrdma_dev
*dev
, int index
)
676 union pvrdma_cmd_req req
;
677 struct pvrdma_cmd_destroy_bind
*cmd_dest
= &req
.destroy_bind
;
679 /* Update sgid table. */
680 if (!dev
->sgid_tbl
) {
681 dev_warn(&dev
->pdev
->dev
, "sgid table not initialized\n");
685 memset(cmd_dest
, 0, sizeof(*cmd_dest
));
686 cmd_dest
->hdr
.cmd
= PVRDMA_CMD_DESTROY_BIND
;
687 memcpy(cmd_dest
->dest_gid
, &dev
->sgid_tbl
[index
], 16);
688 cmd_dest
->index
= index
;
690 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
692 dev_warn(&dev
->pdev
->dev
,
693 "could not destroy binding, error: %d\n", ret
);
696 memset(&dev
->sgid_tbl
[index
], 0, 16);
700 static int pvrdma_del_gid(struct ib_device
*ibdev
,
705 struct pvrdma_dev
*dev
= to_vdev(ibdev
);
707 dev_dbg(&dev
->pdev
->dev
, "removing gid at index %u from %s",
708 index
, dev
->netdev
->name
);
710 return pvrdma_del_gid_at_index(dev
, index
);
713 static void pvrdma_netdevice_event_handle(struct pvrdma_dev
*dev
,
719 pvrdma_dispatch_event(dev
, 1, IB_EVENT_PORT_ERR
);
722 pvrdma_write_reg(dev
, PVRDMA_REG_CTL
,
723 PVRDMA_DEVICE_CTL_UNQUIESCE
);
727 if (pvrdma_read_reg(dev
, PVRDMA_REG_ERR
))
728 dev_err(&dev
->pdev
->dev
,
729 "failed to activate device during link up\n");
731 pvrdma_dispatch_event(dev
, 1, IB_EVENT_PORT_ACTIVE
);
734 dev_dbg(&dev
->pdev
->dev
, "ignore netdevice event %ld on %s\n",
735 event
, dev
->ib_dev
.name
);
740 static void pvrdma_netdevice_event_work(struct work_struct
*work
)
742 struct pvrdma_netdevice_work
*netdev_work
;
743 struct pvrdma_dev
*dev
;
745 netdev_work
= container_of(work
, struct pvrdma_netdevice_work
, work
);
747 mutex_lock(&pvrdma_device_list_lock
);
748 list_for_each_entry(dev
, &pvrdma_device_list
, device_link
) {
749 if (dev
->netdev
== netdev_work
->event_netdev
) {
750 pvrdma_netdevice_event_handle(dev
, netdev_work
->event
);
754 mutex_unlock(&pvrdma_device_list_lock
);
759 static int pvrdma_netdevice_event(struct notifier_block
*this,
760 unsigned long event
, void *ptr
)
762 struct net_device
*event_netdev
= netdev_notifier_info_to_dev(ptr
);
763 struct pvrdma_netdevice_work
*netdev_work
;
765 netdev_work
= kmalloc(sizeof(*netdev_work
), GFP_ATOMIC
);
769 INIT_WORK(&netdev_work
->work
, pvrdma_netdevice_event_work
);
770 netdev_work
->event_netdev
= event_netdev
;
771 netdev_work
->event
= event
;
772 queue_work(event_wq
, &netdev_work
->work
);
777 static int pvrdma_pci_probe(struct pci_dev
*pdev
,
778 const struct pci_device_id
*id
)
780 struct pci_dev
*pdev_net
;
781 struct pvrdma_dev
*dev
;
785 dma_addr_t slot_dma
= 0;
787 dev_dbg(&pdev
->dev
, "initializing driver %s\n", pci_name(pdev
));
789 /* Allocate zero-out device */
790 dev
= (struct pvrdma_dev
*)ib_alloc_device(sizeof(*dev
));
792 dev_err(&pdev
->dev
, "failed to allocate IB device\n");
796 mutex_lock(&pvrdma_device_list_lock
);
797 list_add(&dev
->device_link
, &pvrdma_device_list
);
798 mutex_unlock(&pvrdma_device_list_lock
);
800 ret
= pvrdma_init_device(dev
);
802 goto err_free_device
;
805 pci_set_drvdata(pdev
, dev
);
807 ret
= pci_enable_device(pdev
);
809 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
810 goto err_free_device
;
813 dev_dbg(&pdev
->dev
, "PCI resource flags BAR0 %#lx\n",
814 pci_resource_flags(pdev
, 0));
815 dev_dbg(&pdev
->dev
, "PCI resource len %#llx\n",
816 (unsigned long long)pci_resource_len(pdev
, 0));
817 dev_dbg(&pdev
->dev
, "PCI resource start %#llx\n",
818 (unsigned long long)pci_resource_start(pdev
, 0));
819 dev_dbg(&pdev
->dev
, "PCI resource flags BAR1 %#lx\n",
820 pci_resource_flags(pdev
, 1));
821 dev_dbg(&pdev
->dev
, "PCI resource len %#llx\n",
822 (unsigned long long)pci_resource_len(pdev
, 1));
823 dev_dbg(&pdev
->dev
, "PCI resource start %#llx\n",
824 (unsigned long long)pci_resource_start(pdev
, 1));
826 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
827 !(pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
828 dev_err(&pdev
->dev
, "PCI BAR region not MMIO\n");
830 goto err_free_device
;
833 ret
= pci_request_regions(pdev
, DRV_NAME
);
835 dev_err(&pdev
->dev
, "cannot request PCI resources\n");
836 goto err_disable_pdev
;
839 /* Enable 64-Bit DMA */
840 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
841 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
844 "pci_set_consistent_dma_mask failed\n");
845 goto err_free_resource
;
848 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
851 "pci_set_dma_mask failed\n");
852 goto err_free_resource
;
856 pci_set_master(pdev
);
858 /* Map register space */
859 start
= pci_resource_start(dev
->pdev
, PVRDMA_PCI_RESOURCE_REG
);
860 len
= pci_resource_len(dev
->pdev
, PVRDMA_PCI_RESOURCE_REG
);
861 dev
->regs
= ioremap(start
, len
);
863 dev_err(&pdev
->dev
, "register mapping failed\n");
865 goto err_free_resource
;
868 /* Setup per-device UAR. */
869 dev
->driver_uar
.index
= 0;
870 dev
->driver_uar
.pfn
=
871 pci_resource_start(dev
->pdev
, PVRDMA_PCI_RESOURCE_UAR
) >>
873 dev
->driver_uar
.map
=
874 ioremap(dev
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
875 if (!dev
->driver_uar
.map
) {
876 dev_err(&pdev
->dev
, "failed to remap UAR pages\n");
881 dev
->dsr_version
= pvrdma_read_reg(dev
, PVRDMA_REG_VERSION
);
882 dev_info(&pdev
->dev
, "device version %d, driver version %d\n",
883 dev
->dsr_version
, PVRDMA_VERSION
);
885 dev
->dsr
= dma_zalloc_coherent(&pdev
->dev
, sizeof(*dev
->dsr
),
886 &dev
->dsrbase
, GFP_KERNEL
);
888 dev_err(&pdev
->dev
, "failed to allocate shared region\n");
893 /* Setup the shared region */
894 dev
->dsr
->driver_version
= PVRDMA_VERSION
;
895 dev
->dsr
->gos_info
.gos_bits
= sizeof(void *) == 4 ?
898 dev
->dsr
->gos_info
.gos_type
= PVRDMA_GOS_TYPE_LINUX
;
899 dev
->dsr
->gos_info
.gos_ver
= 1;
900 dev
->dsr
->uar_pfn
= dev
->driver_uar
.pfn
;
903 dev
->cmd_slot
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
904 &slot_dma
, GFP_KERNEL
);
905 if (!dev
->cmd_slot
) {
910 dev
->dsr
->cmd_slot_dma
= (u64
)slot_dma
;
913 dev
->resp_slot
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
914 &slot_dma
, GFP_KERNEL
);
915 if (!dev
->resp_slot
) {
920 dev
->dsr
->resp_slot_dma
= (u64
)slot_dma
;
922 /* Async event ring */
923 dev
->dsr
->async_ring_pages
.num_pages
= PVRDMA_NUM_RING_PAGES
;
924 ret
= pvrdma_page_dir_init(dev
, &dev
->async_pdir
,
925 dev
->dsr
->async_ring_pages
.num_pages
, true);
928 dev
->async_ring_state
= dev
->async_pdir
.pages
[0];
929 dev
->dsr
->async_ring_pages
.pdir_dma
= dev
->async_pdir
.dir_dma
;
931 /* CQ notification ring */
932 dev
->dsr
->cq_ring_pages
.num_pages
= PVRDMA_NUM_RING_PAGES
;
933 ret
= pvrdma_page_dir_init(dev
, &dev
->cq_pdir
,
934 dev
->dsr
->cq_ring_pages
.num_pages
, true);
936 goto err_free_async_ring
;
937 dev
->cq_ring_state
= dev
->cq_pdir
.pages
[0];
938 dev
->dsr
->cq_ring_pages
.pdir_dma
= dev
->cq_pdir
.dir_dma
;
941 * Write the PA of the shared region to the device. The writes must be
942 * ordered such that the high bits are written last. When the writes
943 * complete, the device will have filled out the capabilities.
946 pvrdma_write_reg(dev
, PVRDMA_REG_DSRLOW
, (u32
)dev
->dsrbase
);
947 pvrdma_write_reg(dev
, PVRDMA_REG_DSRHIGH
,
948 (u32
)((u64
)(dev
->dsrbase
) >> 32));
950 /* Make sure the write is complete before reading status. */
953 /* The driver supports RoCE V1 and V2. */
954 if (!PVRDMA_SUPPORTED(dev
)) {
955 dev_err(&pdev
->dev
, "driver needs RoCE v1 or v2 support\n");
957 goto err_free_cq_ring
;
960 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
961 pdev_net
= pci_get_slot(pdev
->bus
, PCI_DEVFN(PCI_SLOT(pdev
->devfn
), 0));
963 dev_err(&pdev
->dev
, "failed to find paired net device\n");
965 goto err_free_cq_ring
;
968 if (pdev_net
->vendor
!= PCI_VENDOR_ID_VMWARE
||
969 pdev_net
->device
!= PCI_DEVICE_ID_VMWARE_VMXNET3
) {
970 dev_err(&pdev
->dev
, "failed to find paired vmxnet3 device\n");
971 pci_dev_put(pdev_net
);
973 goto err_free_cq_ring
;
976 dev
->netdev
= pci_get_drvdata(pdev_net
);
977 pci_dev_put(pdev_net
);
979 dev_err(&pdev
->dev
, "failed to get vmxnet3 device\n");
981 goto err_free_cq_ring
;
984 dev_info(&pdev
->dev
, "paired device to %s\n", dev
->netdev
->name
);
986 /* Interrupt setup */
987 ret
= pvrdma_alloc_intrs(dev
);
989 dev_err(&pdev
->dev
, "failed to allocate interrupts\n");
991 goto err_free_cq_ring
;
994 /* Allocate UAR table. */
995 ret
= pvrdma_uar_table_init(dev
);
997 dev_err(&pdev
->dev
, "failed to allocate UAR table\n");
1002 /* Allocate GID table */
1003 dev
->sgid_tbl
= kcalloc(dev
->dsr
->caps
.gid_tbl_len
,
1004 sizeof(union ib_gid
), GFP_KERNEL
);
1005 if (!dev
->sgid_tbl
) {
1007 goto err_free_uar_table
;
1009 dev_dbg(&pdev
->dev
, "gid table len %d\n", dev
->dsr
->caps
.gid_tbl_len
);
1011 pvrdma_enable_intrs(dev
);
1013 /* Activate pvrdma device */
1014 pvrdma_write_reg(dev
, PVRDMA_REG_CTL
, PVRDMA_DEVICE_CTL_ACTIVATE
);
1016 /* Make sure the write is complete before reading status. */
1019 /* Check if device was successfully activated */
1020 ret
= pvrdma_read_reg(dev
, PVRDMA_REG_ERR
);
1022 dev_err(&pdev
->dev
, "failed to activate device\n");
1024 goto err_disable_intr
;
1027 /* Register IB device */
1028 ret
= pvrdma_register_device(dev
);
1030 dev_err(&pdev
->dev
, "failed to register IB device\n");
1031 goto err_disable_intr
;
1034 dev
->nb_netdev
.notifier_call
= pvrdma_netdevice_event
;
1035 ret
= register_netdevice_notifier(&dev
->nb_netdev
);
1037 dev_err(&pdev
->dev
, "failed to register netdevice events\n");
1038 goto err_unreg_ibdev
;
1041 dev_info(&pdev
->dev
, "attached to device\n");
1045 ib_unregister_device(&dev
->ib_dev
);
1047 pvrdma_disable_intrs(dev
);
1048 kfree(dev
->sgid_tbl
);
1050 pvrdma_uar_table_cleanup(dev
);
1052 pvrdma_free_irq(dev
);
1053 pci_free_irq_vectors(pdev
);
1055 pvrdma_page_dir_cleanup(dev
, &dev
->cq_pdir
);
1056 err_free_async_ring
:
1057 pvrdma_page_dir_cleanup(dev
, &dev
->async_pdir
);
1059 pvrdma_free_slots(dev
);
1061 dma_free_coherent(&pdev
->dev
, sizeof(*dev
->dsr
), dev
->dsr
,
1064 iounmap(dev
->driver_uar
.map
);
1068 pci_release_regions(pdev
);
1070 pci_disable_device(pdev
);
1071 pci_set_drvdata(pdev
, NULL
);
1073 mutex_lock(&pvrdma_device_list_lock
);
1074 list_del(&dev
->device_link
);
1075 mutex_unlock(&pvrdma_device_list_lock
);
1076 ib_dealloc_device(&dev
->ib_dev
);
1080 static void pvrdma_pci_remove(struct pci_dev
*pdev
)
1082 struct pvrdma_dev
*dev
= pci_get_drvdata(pdev
);
1087 dev_info(&pdev
->dev
, "detaching from device\n");
1089 unregister_netdevice_notifier(&dev
->nb_netdev
);
1090 dev
->nb_netdev
.notifier_call
= NULL
;
1092 flush_workqueue(event_wq
);
1094 /* Unregister ib device */
1095 ib_unregister_device(&dev
->ib_dev
);
1097 mutex_lock(&pvrdma_device_list_lock
);
1098 list_del(&dev
->device_link
);
1099 mutex_unlock(&pvrdma_device_list_lock
);
1101 pvrdma_disable_intrs(dev
);
1102 pvrdma_free_irq(dev
);
1103 pci_free_irq_vectors(pdev
);
1105 /* Deactivate pvrdma device */
1106 pvrdma_write_reg(dev
, PVRDMA_REG_CTL
, PVRDMA_DEVICE_CTL_RESET
);
1107 pvrdma_page_dir_cleanup(dev
, &dev
->cq_pdir
);
1108 pvrdma_page_dir_cleanup(dev
, &dev
->async_pdir
);
1109 pvrdma_free_slots(dev
);
1112 kfree(dev
->sgid_tbl
);
1114 kfree(dev
->srq_tbl
);
1116 pvrdma_uar_table_cleanup(dev
);
1117 iounmap(dev
->driver_uar
.map
);
1119 ib_dealloc_device(&dev
->ib_dev
);
1121 /* Free pci resources */
1122 pci_release_regions(pdev
);
1123 pci_disable_device(pdev
);
1124 pci_set_drvdata(pdev
, NULL
);
1127 static const struct pci_device_id pvrdma_pci_table
[] = {
1128 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE
, PCI_DEVICE_ID_VMWARE_PVRDMA
), },
1132 MODULE_DEVICE_TABLE(pci
, pvrdma_pci_table
);
1134 static struct pci_driver pvrdma_driver
= {
1136 .id_table
= pvrdma_pci_table
,
1137 .probe
= pvrdma_pci_probe
,
1138 .remove
= pvrdma_pci_remove
,
1141 static int __init
pvrdma_init(void)
1145 event_wq
= alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM
);
1149 err
= pci_register_driver(&pvrdma_driver
);
1151 destroy_workqueue(event_wq
);
1156 static void __exit
pvrdma_cleanup(void)
1158 pci_unregister_driver(&pvrdma_driver
);
1160 destroy_workqueue(event_wq
);
1163 module_init(pvrdma_init
);
1164 module_exit(pvrdma_cleanup
);
1166 MODULE_AUTHOR("VMware, Inc");
1167 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1168 MODULE_LICENSE("Dual BSD/GPL");