2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
23 #include <rdma/ib_user_verbs.h>
24 #include <rdma/ib_addr.h>
26 #include "usnic_abi.h"
28 #include "usnic_common_util.h"
29 #include "usnic_ib_qp_grp.h"
30 #include "usnic_fwd.h"
31 #include "usnic_log.h"
32 #include "usnic_uiom.h"
33 #include "usnic_transport.h"
35 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
37 static void usnic_ib_fw_string_to_u64(char *fw_ver_str
, u64
*fw_ver
)
39 *fw_ver
= (u64
) *fw_ver_str
;
42 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp
*qp_grp
,
43 struct ib_udata
*udata
)
45 struct usnic_ib_dev
*us_ibdev
;
46 struct usnic_ib_create_qp_resp resp
;
48 struct vnic_dev_bar
*bar
;
49 struct usnic_vnic_res_chunk
*chunk
;
50 struct usnic_ib_qp_grp_flow
*default_flow
;
53 memset(&resp
, 0, sizeof(resp
));
55 us_ibdev
= qp_grp
->vf
->pf
;
56 pdev
= usnic_vnic_get_pdev(qp_grp
->vf
->vnic
);
58 usnic_err("Failed to get pdev of qp_grp %d\n",
63 bar
= usnic_vnic_get_bar(qp_grp
->vf
->vnic
, 0);
65 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
66 qp_grp
->grp_id
, pci_name(pdev
));
70 resp
.vfid
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
71 resp
.bar_bus_addr
= bar
->bus_addr
;
72 resp
.bar_len
= bar
->len
;
74 chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
75 if (IS_ERR_OR_NULL(chunk
)) {
76 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
77 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ
),
80 return chunk
? PTR_ERR(chunk
) : -ENOMEM
;
83 WARN_ON(chunk
->type
!= USNIC_VNIC_RES_TYPE_RQ
);
84 resp
.rq_cnt
= chunk
->cnt
;
85 for (i
= 0; i
< chunk
->cnt
; i
++)
86 resp
.rq_idx
[i
] = chunk
->res
[i
]->vnic_idx
;
88 chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_WQ
);
89 if (IS_ERR_OR_NULL(chunk
)) {
90 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
91 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ
),
94 return chunk
? PTR_ERR(chunk
) : -ENOMEM
;
97 WARN_ON(chunk
->type
!= USNIC_VNIC_RES_TYPE_WQ
);
98 resp
.wq_cnt
= chunk
->cnt
;
99 for (i
= 0; i
< chunk
->cnt
; i
++)
100 resp
.wq_idx
[i
] = chunk
->res
[i
]->vnic_idx
;
102 chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_CQ
);
103 if (IS_ERR_OR_NULL(chunk
)) {
104 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
105 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ
),
108 return chunk
? PTR_ERR(chunk
) : -ENOMEM
;
111 WARN_ON(chunk
->type
!= USNIC_VNIC_RES_TYPE_CQ
);
112 resp
.cq_cnt
= chunk
->cnt
;
113 for (i
= 0; i
< chunk
->cnt
; i
++)
114 resp
.cq_idx
[i
] = chunk
->res
[i
]->vnic_idx
;
116 default_flow
= list_first_entry(&qp_grp
->flows_lst
,
117 struct usnic_ib_qp_grp_flow
, link
);
118 resp
.transport
= default_flow
->trans_type
;
120 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
122 usnic_err("Failed to copy udata for %s", us_ibdev
->ib_dev
.name
);
129 static struct usnic_ib_qp_grp
*
130 find_free_vf_and_create_qp_grp(struct usnic_ib_dev
*us_ibdev
,
131 struct usnic_ib_pd
*pd
,
132 struct usnic_transport_spec
*trans_spec
,
133 struct usnic_vnic_res_spec
*res_spec
)
135 struct usnic_ib_vf
*vf
;
136 struct usnic_vnic
*vnic
;
137 struct usnic_ib_qp_grp
*qp_grp
;
138 struct device
*dev
, **dev_list
;
141 BUG_ON(!mutex_is_locked(&us_ibdev
->usdev_lock
));
143 if (list_empty(&us_ibdev
->vf_dev_list
)) {
144 usnic_info("No vfs to allocate\n");
148 if (usnic_ib_share_vf
) {
149 /* Try to find resouces on a used vf which is in pd */
150 dev_list
= usnic_uiom_get_dev_list(pd
->umem_pd
);
151 for (i
= 0; dev_list
[i
]; i
++) {
153 vf
= pci_get_drvdata(to_pci_dev(dev
));
154 spin_lock(&vf
->lock
);
156 if (!usnic_vnic_check_room(vnic
, res_spec
)) {
157 usnic_dbg("Found used vnic %s from %s\n",
158 us_ibdev
->ib_dev
.name
,
159 pci_name(usnic_vnic_get_pdev(
164 spin_unlock(&vf
->lock
);
167 usnic_uiom_free_dev_list(dev_list
);
171 /* Try to find resources on an unused vf */
172 list_for_each_entry(vf
, &us_ibdev
->vf_dev_list
, link
) {
173 spin_lock(&vf
->lock
);
175 if (vf
->qp_grp_ref_cnt
== 0 &&
176 usnic_vnic_check_room(vnic
, res_spec
) == 0) {
180 spin_unlock(&vf
->lock
);
185 usnic_info("No free qp grp found on %s\n",
186 us_ibdev
->ib_dev
.name
);
187 return ERR_PTR(-ENOMEM
);
190 qp_grp
= usnic_ib_qp_grp_create(us_ibdev
->ufdev
, vf
, pd
, res_spec
,
192 spin_unlock(&vf
->lock
);
193 if (IS_ERR_OR_NULL(qp_grp
)) {
194 usnic_err("Failed to allocate qp_grp\n");
195 return ERR_PTR(qp_grp
? PTR_ERR(qp_grp
) : -ENOMEM
);
201 static void qp_grp_destroy(struct usnic_ib_qp_grp
*qp_grp
)
203 struct usnic_ib_vf
*vf
= qp_grp
->vf
;
205 WARN_ON(qp_grp
->state
!= IB_QPS_RESET
);
207 spin_lock(&vf
->lock
);
208 usnic_ib_qp_grp_destroy(qp_grp
);
209 spin_unlock(&vf
->lock
);
212 static void eth_speed_to_ib_speed(int speed
, u8
*active_speed
,
215 if (speed
<= 10000) {
216 *active_width
= IB_WIDTH_1X
;
217 *active_speed
= IB_SPEED_FDR10
;
218 } else if (speed
<= 20000) {
219 *active_width
= IB_WIDTH_4X
;
220 *active_speed
= IB_SPEED_DDR
;
221 } else if (speed
<= 30000) {
222 *active_width
= IB_WIDTH_4X
;
223 *active_speed
= IB_SPEED_QDR
;
224 } else if (speed
<= 40000) {
225 *active_width
= IB_WIDTH_4X
;
226 *active_speed
= IB_SPEED_FDR10
;
228 *active_width
= IB_WIDTH_4X
;
229 *active_speed
= IB_SPEED_EDR
;
233 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd
)
235 if (cmd
.spec
.trans_type
<= USNIC_TRANSPORT_UNKNOWN
||
236 cmd
.spec
.trans_type
>= USNIC_TRANSPORT_MAX
)
242 /* Start of ib callback functions */
244 enum rdma_link_layer
usnic_ib_port_link_layer(struct ib_device
*device
,
247 return IB_LINK_LAYER_ETHERNET
;
250 int usnic_ib_query_device(struct ib_device
*ibdev
,
251 struct ib_device_attr
*props
)
253 struct usnic_ib_dev
*us_ibdev
= to_usdev(ibdev
);
255 struct ethtool_drvinfo info
;
256 struct ethtool_cmd cmd
;
260 mutex_lock(&us_ibdev
->usdev_lock
);
261 us_ibdev
->netdev
->ethtool_ops
->get_drvinfo(us_ibdev
->netdev
, &info
);
262 us_ibdev
->netdev
->ethtool_ops
->get_settings(us_ibdev
->netdev
, &cmd
);
263 memset(props
, 0, sizeof(*props
));
264 usnic_mac_ip_to_gid(us_ibdev
->ufdev
->mac
, us_ibdev
->ufdev
->inaddr
,
266 memcpy(&props
->sys_image_guid
, &gid
.global
.interface_id
,
267 sizeof(gid
.global
.interface_id
));
268 usnic_ib_fw_string_to_u64(&info
.fw_version
[0], &props
->fw_ver
);
269 props
->max_mr_size
= USNIC_UIOM_MAX_MR_SIZE
;
270 props
->page_size_cap
= USNIC_UIOM_PAGE_SIZE
;
271 props
->vendor_id
= PCI_VENDOR_ID_CISCO
;
272 props
->vendor_part_id
= PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC
;
273 props
->hw_ver
= us_ibdev
->pdev
->subsystem_device
;
274 qp_per_vf
= max(us_ibdev
->vf_res_cnt
[USNIC_VNIC_RES_TYPE_WQ
],
275 us_ibdev
->vf_res_cnt
[USNIC_VNIC_RES_TYPE_RQ
]);
276 props
->max_qp
= qp_per_vf
*
277 atomic_read(&us_ibdev
->vf_cnt
.refcount
);
278 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
279 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
280 props
->max_cq
= us_ibdev
->vf_res_cnt
[USNIC_VNIC_RES_TYPE_CQ
] *
281 atomic_read(&us_ibdev
->vf_cnt
.refcount
);
282 props
->max_pd
= USNIC_UIOM_MAX_PD_CNT
;
283 props
->max_mr
= USNIC_UIOM_MAX_MR_CNT
;
284 props
->local_ca_ack_delay
= 0;
285 props
->max_pkeys
= 0;
286 props
->atomic_cap
= IB_ATOMIC_NONE
;
287 props
->masked_atomic_cap
= props
->atomic_cap
;
288 props
->max_qp_rd_atom
= 0;
289 props
->max_qp_init_rd_atom
= 0;
290 props
->max_res_rd_atom
= 0;
292 props
->max_srq_wr
= 0;
293 props
->max_srq_sge
= 0;
294 props
->max_fast_reg_page_list_len
= 0;
295 props
->max_mcast_grp
= 0;
296 props
->max_mcast_qp_attach
= 0;
297 props
->max_total_mcast_qp_attach
= 0;
298 props
->max_map_per_fmr
= 0;
299 /* Owned by Userspace
300 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
301 mutex_unlock(&us_ibdev
->usdev_lock
);
306 int usnic_ib_query_port(struct ib_device
*ibdev
, u8 port
,
307 struct ib_port_attr
*props
)
309 struct usnic_ib_dev
*us_ibdev
= to_usdev(ibdev
);
310 struct ethtool_cmd cmd
;
314 mutex_lock(&us_ibdev
->usdev_lock
);
315 us_ibdev
->netdev
->ethtool_ops
->get_settings(us_ibdev
->netdev
, &cmd
);
316 memset(props
, 0, sizeof(*props
));
323 if (!us_ibdev
->ufdev
->link_up
) {
324 props
->state
= IB_PORT_DOWN
;
325 props
->phys_state
= 3;
326 } else if (!us_ibdev
->ufdev
->inaddr
) {
327 props
->state
= IB_PORT_INIT
;
328 props
->phys_state
= 4;
330 props
->state
= IB_PORT_ACTIVE
;
331 props
->phys_state
= 5;
334 props
->port_cap_flags
= 0;
335 props
->gid_tbl_len
= 1;
336 props
->pkey_tbl_len
= 1;
337 props
->bad_pkey_cntr
= 0;
338 props
->qkey_viol_cntr
= 0;
339 eth_speed_to_ib_speed(cmd
.speed
, &props
->active_speed
,
340 &props
->active_width
);
341 props
->max_mtu
= IB_MTU_4096
;
342 props
->active_mtu
= iboe_get_mtu(us_ibdev
->ufdev
->mtu
);
343 /* Userspace will adjust for hdrs */
344 props
->max_msg_sz
= us_ibdev
->ufdev
->mtu
;
345 props
->max_vl_num
= 1;
346 mutex_unlock(&us_ibdev
->usdev_lock
);
351 int usnic_ib_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
353 struct ib_qp_init_attr
*qp_init_attr
)
355 struct usnic_ib_qp_grp
*qp_grp
;
356 struct usnic_ib_vf
*vf
;
361 memset(qp_attr
, 0, sizeof(*qp_attr
));
362 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
364 qp_grp
= to_uqp_grp(qp
);
366 mutex_lock(&vf
->pf
->usdev_lock
);
368 qp_attr
->qp_state
= qp_grp
->state
;
369 qp_attr
->cur_qp_state
= qp_grp
->state
;
371 switch (qp_grp
->ibqp
.qp_type
) {
376 usnic_err("Unexpected qp_type %d\n", qp_grp
->ibqp
.qp_type
);
381 mutex_unlock(&vf
->pf
->usdev_lock
);
385 mutex_unlock(&vf
->pf
->usdev_lock
);
389 int usnic_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
393 struct usnic_ib_dev
*us_ibdev
= to_usdev(ibdev
);
399 mutex_lock(&us_ibdev
->usdev_lock
);
400 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
401 usnic_mac_ip_to_gid(us_ibdev
->ufdev
->mac
, us_ibdev
->ufdev
->inaddr
,
403 mutex_unlock(&us_ibdev
->usdev_lock
);
408 int usnic_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
418 struct ib_pd
*usnic_ib_alloc_pd(struct ib_device
*ibdev
,
419 struct ib_ucontext
*context
,
420 struct ib_udata
*udata
)
422 struct usnic_ib_pd
*pd
;
427 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
429 return ERR_PTR(-ENOMEM
);
431 umem_pd
= pd
->umem_pd
= usnic_uiom_alloc_pd();
432 if (IS_ERR_OR_NULL(umem_pd
)) {
434 return ERR_PTR(umem_pd
? PTR_ERR(umem_pd
) : -ENOMEM
);
437 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
438 pd
, context
, ibdev
->name
);
442 int usnic_ib_dealloc_pd(struct ib_pd
*pd
)
444 usnic_info("freeing domain 0x%p\n", pd
);
446 usnic_uiom_dealloc_pd((to_upd(pd
))->umem_pd
);
451 struct ib_qp
*usnic_ib_create_qp(struct ib_pd
*pd
,
452 struct ib_qp_init_attr
*init_attr
,
453 struct ib_udata
*udata
)
456 struct usnic_ib_dev
*us_ibdev
;
457 struct usnic_ib_qp_grp
*qp_grp
;
458 struct usnic_ib_ucontext
*ucontext
;
460 struct usnic_vnic_res_spec res_spec
;
461 struct usnic_ib_create_qp_cmd cmd
;
462 struct usnic_transport_spec trans_spec
;
466 ucontext
= to_uucontext(pd
->uobject
->context
);
467 us_ibdev
= to_usdev(pd
->device
);
469 err
= ib_copy_from_udata(&cmd
, udata
, sizeof(cmd
));
471 usnic_err("%s: cannot copy udata for create_qp\n",
472 us_ibdev
->ib_dev
.name
);
473 return ERR_PTR(-EINVAL
);
476 err
= create_qp_validate_user_data(cmd
);
478 usnic_err("%s: Failed to validate user data\n",
479 us_ibdev
->ib_dev
.name
);
480 return ERR_PTR(-EINVAL
);
483 if (init_attr
->qp_type
!= IB_QPT_UD
) {
484 usnic_err("%s asked to make a non-UD QP: %d\n",
485 us_ibdev
->ib_dev
.name
, init_attr
->qp_type
);
486 return ERR_PTR(-EINVAL
);
489 trans_spec
= cmd
.spec
;
490 mutex_lock(&us_ibdev
->usdev_lock
);
491 cq_cnt
= (init_attr
->send_cq
== init_attr
->recv_cq
) ? 1 : 2;
492 res_spec
= min_transport_spec
[trans_spec
.trans_type
];
493 usnic_vnic_res_spec_update(&res_spec
, USNIC_VNIC_RES_TYPE_CQ
, cq_cnt
);
494 qp_grp
= find_free_vf_and_create_qp_grp(us_ibdev
, to_upd(pd
),
497 if (IS_ERR_OR_NULL(qp_grp
)) {
498 err
= qp_grp
? PTR_ERR(qp_grp
) : -ENOMEM
;
499 goto out_release_mutex
;
502 err
= usnic_ib_fill_create_qp_resp(qp_grp
, udata
);
505 goto out_release_qp_grp
;
508 qp_grp
->ctx
= ucontext
;
509 list_add_tail(&qp_grp
->link
, &ucontext
->qp_grp_list
);
510 usnic_ib_log_vf(qp_grp
->vf
);
511 mutex_unlock(&us_ibdev
->usdev_lock
);
512 return &qp_grp
->ibqp
;
515 qp_grp_destroy(qp_grp
);
517 mutex_unlock(&us_ibdev
->usdev_lock
);
521 int usnic_ib_destroy_qp(struct ib_qp
*qp
)
523 struct usnic_ib_qp_grp
*qp_grp
;
524 struct usnic_ib_vf
*vf
;
528 qp_grp
= to_uqp_grp(qp
);
530 mutex_lock(&vf
->pf
->usdev_lock
);
531 if (usnic_ib_qp_grp_modify(qp_grp
, IB_QPS_RESET
, NULL
)) {
532 usnic_err("Failed to move qp grp %u to reset\n",
536 list_del(&qp_grp
->link
);
537 qp_grp_destroy(qp_grp
);
538 mutex_unlock(&vf
->pf
->usdev_lock
);
543 int usnic_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
544 int attr_mask
, struct ib_udata
*udata
)
546 struct usnic_ib_qp_grp
*qp_grp
;
550 qp_grp
= to_uqp_grp(ibqp
);
552 /* TODO: Future Support All States */
553 mutex_lock(&qp_grp
->vf
->pf
->usdev_lock
);
554 if ((attr_mask
& IB_QP_STATE
) && attr
->qp_state
== IB_QPS_INIT
) {
555 status
= usnic_ib_qp_grp_modify(qp_grp
, IB_QPS_INIT
, NULL
);
556 } else if ((attr_mask
& IB_QP_STATE
) && attr
->qp_state
== IB_QPS_RTR
) {
557 status
= usnic_ib_qp_grp_modify(qp_grp
, IB_QPS_RTR
, NULL
);
558 } else if ((attr_mask
& IB_QP_STATE
) && attr
->qp_state
== IB_QPS_RTS
) {
559 status
= usnic_ib_qp_grp_modify(qp_grp
, IB_QPS_RTS
, NULL
);
561 usnic_err("Unexpected combination mask: %u state: %u\n",
562 attr_mask
& IB_QP_STATE
, attr
->qp_state
);
566 mutex_unlock(&qp_grp
->vf
->pf
->usdev_lock
);
570 struct ib_cq
*usnic_ib_create_cq(struct ib_device
*ibdev
, int entries
,
571 int vector
, struct ib_ucontext
*context
,
572 struct ib_udata
*udata
)
577 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
579 return ERR_PTR(-EBUSY
);
584 int usnic_ib_destroy_cq(struct ib_cq
*cq
)
591 struct ib_mr
*usnic_ib_reg_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
592 u64 virt_addr
, int access_flags
,
593 struct ib_udata
*udata
)
595 struct usnic_ib_mr
*mr
;
598 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start
,
601 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
602 if (IS_ERR_OR_NULL(mr
))
603 return ERR_PTR(mr
? PTR_ERR(mr
) : -ENOMEM
);
605 mr
->umem
= usnic_uiom_reg_get(to_upd(pd
)->umem_pd
, start
, length
,
607 if (IS_ERR_OR_NULL(mr
->umem
)) {
608 err
= mr
->umem
? PTR_ERR(mr
->umem
) : -EFAULT
;
612 mr
->ibmr
.lkey
= mr
->ibmr
.rkey
= 0;
620 int usnic_ib_dereg_mr(struct ib_mr
*ibmr
)
622 struct usnic_ib_mr
*mr
= to_umr(ibmr
);
624 usnic_dbg("va 0x%lx length 0x%zx\n", mr
->umem
->va
, mr
->umem
->length
);
626 usnic_uiom_reg_release(mr
->umem
, ibmr
->pd
->uobject
->context
->closing
);
631 struct ib_ucontext
*usnic_ib_alloc_ucontext(struct ib_device
*ibdev
,
632 struct ib_udata
*udata
)
634 struct usnic_ib_ucontext
*context
;
635 struct usnic_ib_dev
*us_ibdev
= to_usdev(ibdev
);
638 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
640 return ERR_PTR(-ENOMEM
);
642 INIT_LIST_HEAD(&context
->qp_grp_list
);
643 mutex_lock(&us_ibdev
->usdev_lock
);
644 list_add_tail(&context
->link
, &us_ibdev
->ctx_list
);
645 mutex_unlock(&us_ibdev
->usdev_lock
);
647 return &context
->ibucontext
;
650 int usnic_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
652 struct usnic_ib_ucontext
*context
= to_uucontext(ibcontext
);
653 struct usnic_ib_dev
*us_ibdev
= to_usdev(ibcontext
->device
);
656 mutex_lock(&us_ibdev
->usdev_lock
);
657 BUG_ON(!list_empty(&context
->qp_grp_list
));
658 list_del(&context
->link
);
659 mutex_unlock(&us_ibdev
->usdev_lock
);
664 int usnic_ib_mmap(struct ib_ucontext
*context
,
665 struct vm_area_struct
*vma
)
667 struct usnic_ib_ucontext
*uctx
= to_ucontext(context
);
668 struct usnic_ib_dev
*us_ibdev
;
669 struct usnic_ib_qp_grp
*qp_grp
;
670 struct usnic_ib_vf
*vf
;
671 struct vnic_dev_bar
*bar
;
678 us_ibdev
= to_usdev(context
->device
);
679 vma
->vm_flags
|= VM_IO
;
680 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
681 vfid
= vma
->vm_pgoff
;
682 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
683 vma
->vm_pgoff
, PAGE_SHIFT
, vfid
);
685 mutex_lock(&us_ibdev
->usdev_lock
);
686 list_for_each_entry(qp_grp
, &uctx
->qp_grp_list
, link
) {
688 if (usnic_vnic_get_index(vf
->vnic
) == vfid
) {
689 bar
= usnic_vnic_get_bar(vf
->vnic
, 0);
690 if ((vma
->vm_end
- vma
->vm_start
) != bar
->len
) {
691 usnic_err("Bar0 Len %lu - Request map %lu\n",
693 vma
->vm_end
- vma
->vm_start
);
694 mutex_unlock(&us_ibdev
->usdev_lock
);
697 bus_addr
= bar
->bus_addr
;
699 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
700 &bus_addr
, bar
->vaddr
, bar
->len
);
701 mutex_unlock(&us_ibdev
->usdev_lock
);
703 return remap_pfn_range(vma
,
705 bus_addr
>> PAGE_SHIFT
,
706 len
, vma
->vm_page_prot
);
710 mutex_unlock(&us_ibdev
->usdev_lock
);
711 usnic_err("No VF %u found\n", vfid
);
715 /* In ib callbacks section - Start of stub funcs */
716 struct ib_ah
*usnic_ib_create_ah(struct ib_pd
*pd
,
717 struct ib_ah_attr
*ah_attr
)
720 return ERR_PTR(-EPERM
);
723 int usnic_ib_destroy_ah(struct ib_ah
*ah
)
729 int usnic_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
730 struct ib_send_wr
**bad_wr
)
736 int usnic_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
737 struct ib_recv_wr
**bad_wr
)
743 int usnic_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
750 int usnic_ib_req_notify_cq(struct ib_cq
*cq
,
751 enum ib_cq_notify_flags flags
)
757 struct ib_mr
*usnic_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
760 return ERR_PTR(-ENOMEM
);
764 /* In ib callbacks section - End of stub funcs */
765 /* End of ib callbacks section */