1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
8 void mana_ib_uncfg_vport(struct mana_ib_dev
*dev
, struct mana_ib_pd
*pd
,
11 struct mana_port_context
*mpc
;
12 struct net_device
*ndev
;
14 ndev
= mana_ib_get_netdev(&dev
->ib_dev
, port
);
15 mpc
= netdev_priv(ndev
);
17 mutex_lock(&pd
->vport_mutex
);
19 pd
->vport_use_count
--;
20 WARN_ON(pd
->vport_use_count
< 0);
22 if (!pd
->vport_use_count
)
23 mana_uncfg_vport(mpc
);
25 mutex_unlock(&pd
->vport_mutex
);
28 int mana_ib_cfg_vport(struct mana_ib_dev
*dev
, u32 port
, struct mana_ib_pd
*pd
,
31 struct mana_port_context
*mpc
;
32 struct net_device
*ndev
;
35 ndev
= mana_ib_get_netdev(&dev
->ib_dev
, port
);
36 mpc
= netdev_priv(ndev
);
38 mutex_lock(&pd
->vport_mutex
);
40 pd
->vport_use_count
++;
41 if (pd
->vport_use_count
> 1) {
42 ibdev_dbg(&dev
->ib_dev
,
43 "Skip as this PD is already configured vport\n");
44 mutex_unlock(&pd
->vport_mutex
);
48 err
= mana_cfg_vport(mpc
, pd
->pdn
, doorbell_id
);
50 pd
->vport_use_count
--;
51 mutex_unlock(&pd
->vport_mutex
);
53 ibdev_dbg(&dev
->ib_dev
, "Failed to configure vPort %d\n", err
);
57 mutex_unlock(&pd
->vport_mutex
);
59 pd
->tx_shortform_allowed
= mpc
->tx_shortform_allowed
;
60 pd
->tx_vp_offset
= mpc
->tx_vp_offset
;
62 ibdev_dbg(&dev
->ib_dev
, "vport handle %llx pdid %x doorbell_id %x\n",
63 mpc
->port_handle
, pd
->pdn
, doorbell_id
);
68 int mana_ib_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
70 struct mana_ib_pd
*pd
= container_of(ibpd
, struct mana_ib_pd
, ibpd
);
71 struct ib_device
*ibdev
= ibpd
->device
;
72 struct gdma_create_pd_resp resp
= {};
73 struct gdma_create_pd_req req
= {};
74 enum gdma_pd_flags flags
= 0;
75 struct mana_ib_dev
*dev
;
76 struct gdma_context
*gc
;
79 dev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
82 mana_gd_init_req_hdr(&req
.hdr
, GDMA_CREATE_PD
, sizeof(req
),
86 err
= mana_gd_send_request(gc
, sizeof(req
), &req
,
89 if (err
|| resp
.hdr
.status
) {
90 ibdev_dbg(&dev
->ib_dev
,
91 "Failed to get pd_id err %d status %u\n", err
,
99 pd
->pd_handle
= resp
.pd_handle
;
100 pd
->pdn
= resp
.pd_id
;
101 ibdev_dbg(&dev
->ib_dev
, "pd_handle 0x%llx pd_id %d\n",
102 pd
->pd_handle
, pd
->pdn
);
104 mutex_init(&pd
->vport_mutex
);
105 pd
->vport_use_count
= 0;
109 int mana_ib_dealloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
111 struct mana_ib_pd
*pd
= container_of(ibpd
, struct mana_ib_pd
, ibpd
);
112 struct ib_device
*ibdev
= ibpd
->device
;
113 struct gdma_destory_pd_resp resp
= {};
114 struct gdma_destroy_pd_req req
= {};
115 struct mana_ib_dev
*dev
;
116 struct gdma_context
*gc
;
119 dev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
120 gc
= mdev_to_gc(dev
);
122 mana_gd_init_req_hdr(&req
.hdr
, GDMA_DESTROY_PD
, sizeof(req
),
125 req
.pd_handle
= pd
->pd_handle
;
126 err
= mana_gd_send_request(gc
, sizeof(req
), &req
,
127 sizeof(resp
), &resp
);
129 if (err
|| resp
.hdr
.status
) {
130 ibdev_dbg(&dev
->ib_dev
,
131 "Failed to destroy pd_handle 0x%llx err %d status %u",
132 pd
->pd_handle
, err
, resp
.hdr
.status
);
140 static int mana_gd_destroy_doorbell_page(struct gdma_context
*gc
,
143 struct gdma_destroy_resource_range_req req
= {};
144 struct gdma_resp_hdr resp
= {};
147 mana_gd_init_req_hdr(&req
.hdr
, GDMA_DESTROY_RESOURCE_RANGE
,
148 sizeof(req
), sizeof(resp
));
150 req
.resource_type
= GDMA_RESOURCE_DOORBELL_PAGE
;
151 req
.num_resources
= 1;
152 req
.allocated_resources
= doorbell_page
;
154 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
155 if (err
|| resp
.status
) {
157 "Failed to destroy doorbell page: ret %d, 0x%x\n",
159 return err
?: -EPROTO
;
165 static int mana_gd_allocate_doorbell_page(struct gdma_context
*gc
,
168 struct gdma_allocate_resource_range_req req
= {};
169 struct gdma_allocate_resource_range_resp resp
= {};
172 mana_gd_init_req_hdr(&req
.hdr
, GDMA_ALLOCATE_RESOURCE_RANGE
,
173 sizeof(req
), sizeof(resp
));
175 req
.resource_type
= GDMA_RESOURCE_DOORBELL_PAGE
;
176 req
.num_resources
= 1;
179 /* Have GDMA start searching from 0 */
180 req
.allocated_resources
= 0;
182 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
183 if (err
|| resp
.hdr
.status
) {
185 "Failed to allocate doorbell page: ret %d, 0x%x\n",
186 err
, resp
.hdr
.status
);
187 return err
?: -EPROTO
;
190 *doorbell_page
= resp
.allocated_resources
;
195 int mana_ib_alloc_ucontext(struct ib_ucontext
*ibcontext
,
196 struct ib_udata
*udata
)
198 struct mana_ib_ucontext
*ucontext
=
199 container_of(ibcontext
, struct mana_ib_ucontext
, ibucontext
);
200 struct ib_device
*ibdev
= ibcontext
->device
;
201 struct mana_ib_dev
*mdev
;
202 struct gdma_context
*gc
;
206 mdev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
207 gc
= mdev_to_gc(mdev
);
209 /* Allocate a doorbell page index */
210 ret
= mana_gd_allocate_doorbell_page(gc
, &doorbell_page
);
212 ibdev_dbg(ibdev
, "Failed to allocate doorbell page %d\n", ret
);
216 ibdev_dbg(ibdev
, "Doorbell page allocated %d\n", doorbell_page
);
218 ucontext
->doorbell
= doorbell_page
;
223 void mana_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
225 struct mana_ib_ucontext
*mana_ucontext
=
226 container_of(ibcontext
, struct mana_ib_ucontext
, ibucontext
);
227 struct ib_device
*ibdev
= ibcontext
->device
;
228 struct mana_ib_dev
*mdev
;
229 struct gdma_context
*gc
;
232 mdev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
233 gc
= mdev_to_gc(mdev
);
235 ret
= mana_gd_destroy_doorbell_page(gc
, mana_ucontext
->doorbell
);
237 ibdev_dbg(ibdev
, "Failed to destroy doorbell page %d\n", ret
);
240 int mana_ib_create_queue(struct mana_ib_dev
*mdev
, u64 addr
, u32 size
,
241 struct mana_ib_queue
*queue
)
243 struct ib_umem
*umem
;
247 queue
->id
= INVALID_QUEUE_ID
;
248 queue
->gdma_region
= GDMA_INVALID_DMA_REGION
;
250 umem
= ib_umem_get(&mdev
->ib_dev
, addr
, size
, IB_ACCESS_LOCAL_WRITE
);
253 ibdev_dbg(&mdev
->ib_dev
, "Failed to get umem, %d\n", err
);
257 err
= mana_ib_create_zero_offset_dma_region(mdev
, umem
, &queue
->gdma_region
);
259 ibdev_dbg(&mdev
->ib_dev
, "Failed to create dma region, %d\n", err
);
264 ibdev_dbg(&mdev
->ib_dev
, "created dma region 0x%llx\n", queue
->gdma_region
);
268 ib_umem_release(umem
);
272 void mana_ib_destroy_queue(struct mana_ib_dev
*mdev
, struct mana_ib_queue
*queue
)
274 /* Ignore return code as there is not much we can do about it.
275 * The error message is printed inside.
277 mana_ib_gd_destroy_dma_region(mdev
, queue
->gdma_region
);
278 ib_umem_release(queue
->umem
);
282 mana_ib_gd_first_dma_region(struct mana_ib_dev
*dev
,
283 struct gdma_context
*gc
,
284 struct gdma_create_dma_region_req
*create_req
,
285 size_t num_pages
, mana_handle_t
*gdma_region
,
288 struct gdma_create_dma_region_resp create_resp
= {};
289 unsigned int create_req_msg_size
;
292 create_req_msg_size
=
293 struct_size(create_req
, page_addr_list
, num_pages
);
294 create_req
->page_addr_list_len
= num_pages
;
296 err
= mana_gd_send_request(gc
, create_req_msg_size
, create_req
,
297 sizeof(create_resp
), &create_resp
);
298 if (err
|| create_resp
.hdr
.status
!= expected_status
) {
299 ibdev_dbg(&dev
->ib_dev
,
300 "Failed to create DMA region: %d, 0x%x\n",
301 err
, create_resp
.hdr
.status
);
308 *gdma_region
= create_resp
.dma_region_handle
;
309 ibdev_dbg(&dev
->ib_dev
, "Created DMA region handle 0x%llx\n",
316 mana_ib_gd_add_dma_region(struct mana_ib_dev
*dev
, struct gdma_context
*gc
,
317 struct gdma_dma_region_add_pages_req
*add_req
,
318 unsigned int num_pages
, u32 expected_status
)
320 unsigned int add_req_msg_size
=
321 struct_size(add_req
, page_addr_list
, num_pages
);
322 struct gdma_general_resp add_resp
= {};
325 mana_gd_init_req_hdr(&add_req
->hdr
, GDMA_DMA_REGION_ADD_PAGES
,
326 add_req_msg_size
, sizeof(add_resp
));
327 add_req
->page_addr_list_len
= num_pages
;
329 err
= mana_gd_send_request(gc
, add_req_msg_size
, add_req
,
330 sizeof(add_resp
), &add_resp
);
331 if (err
|| add_resp
.hdr
.status
!= expected_status
) {
332 ibdev_dbg(&dev
->ib_dev
,
333 "Failed to create DMA region: %d, 0x%x\n",
334 err
, add_resp
.hdr
.status
);
345 static int mana_ib_gd_create_dma_region(struct mana_ib_dev
*dev
, struct ib_umem
*umem
,
346 mana_handle_t
*gdma_region
, unsigned long page_sz
)
348 struct gdma_dma_region_add_pages_req
*add_req
= NULL
;
349 size_t num_pages_processed
= 0, num_pages_to_handle
;
350 struct gdma_create_dma_region_req
*create_req
;
351 unsigned int create_req_msg_size
;
352 struct hw_channel_context
*hwc
;
353 struct ib_block_iter biter
;
354 size_t max_pgs_add_cmd
= 0;
355 size_t max_pgs_create_cmd
;
356 struct gdma_context
*gc
;
357 size_t num_pages_total
;
358 unsigned int tail
= 0;
363 gc
= mdev_to_gc(dev
);
364 hwc
= gc
->hwc
.driver_data
;
366 num_pages_total
= ib_umem_num_dma_blocks(umem
, page_sz
);
369 (hwc
->max_req_msg_size
- sizeof(*create_req
)) / sizeof(u64
);
370 num_pages_to_handle
=
371 min_t(size_t, num_pages_total
, max_pgs_create_cmd
);
372 create_req_msg_size
=
373 struct_size(create_req
, page_addr_list
, num_pages_to_handle
);
375 request_buf
= kzalloc(hwc
->max_req_msg_size
, GFP_KERNEL
);
379 create_req
= request_buf
;
380 mana_gd_init_req_hdr(&create_req
->hdr
, GDMA_CREATE_DMA_REGION
,
382 sizeof(struct gdma_create_dma_region_resp
));
384 create_req
->length
= umem
->length
;
385 create_req
->offset_in_page
= ib_umem_dma_offset(umem
, page_sz
);
386 create_req
->gdma_page_type
= order_base_2(page_sz
) - MANA_PAGE_SHIFT
;
387 create_req
->page_count
= num_pages_total
;
389 ibdev_dbg(&dev
->ib_dev
, "size_dma_region %lu num_pages_total %lu\n",
390 umem
->length
, num_pages_total
);
392 ibdev_dbg(&dev
->ib_dev
, "page_sz %lu offset_in_page %u\n",
393 page_sz
, create_req
->offset_in_page
);
395 ibdev_dbg(&dev
->ib_dev
, "num_pages_to_handle %lu, gdma_page_type %u",
396 num_pages_to_handle
, create_req
->gdma_page_type
);
398 page_addr_list
= create_req
->page_addr_list
;
399 rdma_umem_for_each_dma_block(umem
, &biter
, page_sz
) {
400 u32 expected_status
= 0;
402 page_addr_list
[tail
++] = rdma_block_iter_dma_address(&biter
);
403 if (tail
< num_pages_to_handle
)
406 if (num_pages_processed
+ num_pages_to_handle
<
408 expected_status
= GDMA_STATUS_MORE_ENTRIES
;
410 if (!num_pages_processed
) {
411 /* First create message */
412 err
= mana_ib_gd_first_dma_region(dev
, gc
, create_req
,
418 max_pgs_add_cmd
= (hwc
->max_req_msg_size
-
419 sizeof(*add_req
)) / sizeof(u64
);
421 add_req
= request_buf
;
422 add_req
->dma_region_handle
= *gdma_region
;
423 add_req
->reserved3
= 0;
424 page_addr_list
= add_req
->page_addr_list
;
426 /* Subsequent create messages */
427 err
= mana_ib_gd_add_dma_region(dev
, gc
, add_req
, tail
,
433 num_pages_processed
+= tail
;
436 /* The remaining pages to create */
437 num_pages_to_handle
=
439 num_pages_total
- num_pages_processed
,
444 mana_ib_gd_destroy_dma_region(dev
, *gdma_region
);
451 int mana_ib_create_dma_region(struct mana_ib_dev
*dev
, struct ib_umem
*umem
,
452 mana_handle_t
*gdma_region
, u64 virt
)
454 unsigned long page_sz
;
456 page_sz
= ib_umem_find_best_pgsz(umem
, PAGE_SZ_BM
, virt
);
458 ibdev_dbg(&dev
->ib_dev
, "Failed to find page size.\n");
462 return mana_ib_gd_create_dma_region(dev
, umem
, gdma_region
, page_sz
);
465 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev
*dev
, struct ib_umem
*umem
,
466 mana_handle_t
*gdma_region
)
468 unsigned long page_sz
;
470 /* Hardware requires dma region to align to chosen page size */
471 page_sz
= ib_umem_find_best_pgoff(umem
, PAGE_SZ_BM
, 0);
473 ibdev_dbg(&dev
->ib_dev
, "Failed to find page size.\n");
477 return mana_ib_gd_create_dma_region(dev
, umem
, gdma_region
, page_sz
);
480 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev
*dev
, u64 gdma_region
)
482 struct gdma_context
*gc
= mdev_to_gc(dev
);
484 ibdev_dbg(&dev
->ib_dev
, "destroy dma region 0x%llx\n", gdma_region
);
486 return mana_gd_destroy_dma_region(gc
, gdma_region
);
489 int mana_ib_mmap(struct ib_ucontext
*ibcontext
, struct vm_area_struct
*vma
)
491 struct mana_ib_ucontext
*mana_ucontext
=
492 container_of(ibcontext
, struct mana_ib_ucontext
, ibucontext
);
493 struct ib_device
*ibdev
= ibcontext
->device
;
494 struct mana_ib_dev
*mdev
;
495 struct gdma_context
*gc
;
500 mdev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
501 gc
= mdev_to_gc(mdev
);
503 if (vma
->vm_pgoff
!= 0) {
504 ibdev_dbg(ibdev
, "Unexpected vm_pgoff %lu\n", vma
->vm_pgoff
);
508 /* Map to the page indexed by ucontext->doorbell */
509 pfn
= (gc
->phys_db_page_base
+
510 gc
->db_page_size
* mana_ucontext
->doorbell
) >>
512 prot
= pgprot_writecombine(vma
->vm_page_prot
);
514 ret
= rdma_user_mmap_io(ibcontext
, vma
, pfn
, PAGE_SIZE
, prot
,
517 ibdev_dbg(ibdev
, "can't rdma_user_mmap_io ret %d\n", ret
);
519 ibdev_dbg(ibdev
, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
520 pfn
, PAGE_SIZE
, ret
);
525 int mana_ib_get_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
526 struct ib_port_immutable
*immutable
)
528 struct ib_port_attr attr
;
531 err
= ib_query_port(ibdev
, port_num
, &attr
);
535 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
536 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
537 immutable
->core_cap_flags
= RDMA_CORE_PORT_RAW_PACKET
;
539 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
544 int mana_ib_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
545 struct ib_udata
*uhw
)
547 struct mana_ib_dev
*dev
= container_of(ibdev
,
548 struct mana_ib_dev
, ib_dev
);
550 memset(props
, 0, sizeof(*props
));
551 props
->max_mr_size
= MANA_IB_MAX_MR_SIZE
;
552 props
->page_size_cap
= PAGE_SZ_BM
;
553 props
->max_qp
= dev
->adapter_caps
.max_qp_count
;
554 props
->max_qp_wr
= dev
->adapter_caps
.max_qp_wr
;
555 props
->device_cap_flags
= IB_DEVICE_RC_RNR_NAK_GEN
;
556 props
->max_send_sge
= dev
->adapter_caps
.max_send_sge_count
;
557 props
->max_recv_sge
= dev
->adapter_caps
.max_recv_sge_count
;
558 props
->max_sge_rd
= dev
->adapter_caps
.max_recv_sge_count
;
559 props
->max_cq
= dev
->adapter_caps
.max_cq_count
;
560 props
->max_cqe
= dev
->adapter_caps
.max_qp_wr
;
561 props
->max_mr
= dev
->adapter_caps
.max_mr_count
;
562 props
->max_pd
= dev
->adapter_caps
.max_pd_count
;
563 props
->max_qp_rd_atom
= dev
->adapter_caps
.max_inbound_read_limit
;
564 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
565 props
->max_qp_init_rd_atom
= dev
->adapter_caps
.max_outbound_read_limit
;
566 props
->atomic_cap
= IB_ATOMIC_NONE
;
567 props
->masked_atomic_cap
= IB_ATOMIC_NONE
;
568 props
->max_ah
= INT_MAX
;
569 props
->max_pkeys
= 1;
570 props
->local_ca_ack_delay
= MANA_CA_ACK_DELAY
;
575 int mana_ib_query_port(struct ib_device
*ibdev
, u32 port
,
576 struct ib_port_attr
*props
)
578 struct net_device
*ndev
= mana_ib_get_netdev(ibdev
, port
);
583 memset(props
, 0, sizeof(*props
));
584 props
->max_mtu
= IB_MTU_4096
;
585 props
->active_mtu
= ib_mtu_int_to_enum(ndev
->mtu
);
587 if (netif_carrier_ok(ndev
) && netif_running(ndev
)) {
588 props
->state
= IB_PORT_ACTIVE
;
589 props
->phys_state
= IB_PORT_PHYS_STATE_LINK_UP
;
591 props
->state
= IB_PORT_DOWN
;
592 props
->phys_state
= IB_PORT_PHYS_STATE_DISABLED
;
595 props
->active_width
= IB_WIDTH_4X
;
596 props
->active_speed
= IB_SPEED_EDR
;
597 props
->pkey_tbl_len
= 1;
599 props
->gid_tbl_len
= 16;
604 enum rdma_link_layer
mana_ib_get_link_layer(struct ib_device
*device
, u32 port_num
)
606 return IB_LINK_LAYER_ETHERNET
;
609 int mana_ib_query_pkey(struct ib_device
*ibdev
, u32 port
, u16 index
, u16
*pkey
)
613 *pkey
= IB_DEFAULT_PKEY_FULL
;
617 int mana_ib_query_gid(struct ib_device
*ibdev
, u32 port
, int index
,
620 /* This version doesn't return GID properties */
624 void mana_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
628 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev
*dev
)
630 struct mana_ib_adapter_caps
*caps
= &dev
->adapter_caps
;
631 struct mana_ib_query_adapter_caps_resp resp
= {};
632 struct mana_ib_query_adapter_caps_req req
= {};
635 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_GET_ADAPTER_CAP
, sizeof(req
),
637 req
.hdr
.resp
.msg_version
= GDMA_MESSAGE_V3
;
638 req
.hdr
.dev_id
= dev
->gdma_dev
->dev_id
;
640 err
= mana_gd_send_request(mdev_to_gc(dev
), sizeof(req
),
641 &req
, sizeof(resp
), &resp
);
644 ibdev_err(&dev
->ib_dev
,
645 "Failed to query adapter caps err %d", err
);
649 caps
->max_sq_id
= resp
.max_sq_id
;
650 caps
->max_rq_id
= resp
.max_rq_id
;
651 caps
->max_cq_id
= resp
.max_cq_id
;
652 caps
->max_qp_count
= resp
.max_qp_count
;
653 caps
->max_cq_count
= resp
.max_cq_count
;
654 caps
->max_mr_count
= resp
.max_mr_count
;
655 caps
->max_pd_count
= resp
.max_pd_count
;
656 caps
->max_inbound_read_limit
= resp
.max_inbound_read_limit
;
657 caps
->max_outbound_read_limit
= resp
.max_outbound_read_limit
;
658 caps
->mw_count
= resp
.mw_count
;
659 caps
->max_srq_count
= resp
.max_srq_count
;
660 caps
->max_qp_wr
= min_t(u32
,
661 resp
.max_requester_sq_size
/ GDMA_MAX_SQE_SIZE
,
662 resp
.max_requester_rq_size
/ GDMA_MAX_RQE_SIZE
);
663 caps
->max_inline_data_size
= resp
.max_inline_data_size
;
664 caps
->max_send_sge_count
= resp
.max_send_sge_count
;
665 caps
->max_recv_sge_count
= resp
.max_recv_sge_count
;
671 mana_ib_event_handler(void *ctx
, struct gdma_queue
*q
, struct gdma_event
*event
)
673 struct mana_ib_dev
*mdev
= (struct mana_ib_dev
*)ctx
;
674 struct mana_ib_qp
*qp
;
678 switch (event
->type
) {
679 case GDMA_EQE_RNIC_QP_FATAL
:
680 qpn
= event
->details
[0];
681 qp
= mana_get_qp_ref(mdev
, qpn
);
684 if (qp
->ibqp
.event_handler
) {
685 ev
.device
= qp
->ibqp
.device
;
686 ev
.element
.qp
= &qp
->ibqp
;
687 ev
.event
= IB_EVENT_QP_FATAL
;
688 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
697 int mana_ib_create_eqs(struct mana_ib_dev
*mdev
)
699 struct gdma_context
*gc
= mdev_to_gc(mdev
);
700 struct gdma_queue_spec spec
= {};
704 spec
.monitor_avl_buf
= false;
705 spec
.queue_size
= EQ_SIZE
;
706 spec
.eq
.callback
= mana_ib_event_handler
;
707 spec
.eq
.context
= mdev
;
708 spec
.eq
.log2_throttle_limit
= LOG2_EQ_THROTTLE
;
709 spec
.eq
.msix_index
= 0;
711 err
= mana_gd_create_mana_eq(&gc
->mana_ib
, &spec
, &mdev
->fatal_err_eq
);
715 mdev
->eqs
= kcalloc(mdev
->ib_dev
.num_comp_vectors
, sizeof(struct gdma_queue
*),
719 goto destroy_fatal_eq
;
721 spec
.eq
.callback
= NULL
;
722 for (i
= 0; i
< mdev
->ib_dev
.num_comp_vectors
; i
++) {
723 spec
.eq
.msix_index
= (i
+ 1) % gc
->num_msix_usable
;
724 err
= mana_gd_create_mana_eq(mdev
->gdma_dev
, &spec
, &mdev
->eqs
[i
]);
733 mana_gd_destroy_queue(gc
, mdev
->eqs
[i
]);
736 mana_gd_destroy_queue(gc
, mdev
->fatal_err_eq
);
740 void mana_ib_destroy_eqs(struct mana_ib_dev
*mdev
)
742 struct gdma_context
*gc
= mdev_to_gc(mdev
);
745 mana_gd_destroy_queue(gc
, mdev
->fatal_err_eq
);
747 for (i
= 0; i
< mdev
->ib_dev
.num_comp_vectors
; i
++)
748 mana_gd_destroy_queue(gc
, mdev
->eqs
[i
]);
753 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev
*mdev
)
755 struct mana_rnic_create_adapter_resp resp
= {};
756 struct mana_rnic_create_adapter_req req
= {};
757 struct gdma_context
*gc
= mdev_to_gc(mdev
);
760 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CREATE_ADAPTER
, sizeof(req
), sizeof(resp
));
761 req
.hdr
.req
.msg_version
= GDMA_MESSAGE_V2
;
762 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
763 req
.notify_eq_id
= mdev
->fatal_err_eq
->id
;
765 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
767 ibdev_err(&mdev
->ib_dev
, "Failed to create RNIC adapter err %d", err
);
770 mdev
->adapter_handle
= resp
.adapter
;
775 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev
*mdev
)
777 struct mana_rnic_destroy_adapter_resp resp
= {};
778 struct mana_rnic_destroy_adapter_req req
= {};
779 struct gdma_context
*gc
;
782 gc
= mdev_to_gc(mdev
);
783 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_DESTROY_ADAPTER
, sizeof(req
), sizeof(resp
));
784 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
785 req
.adapter
= mdev
->adapter_handle
;
787 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
789 ibdev_err(&mdev
->ib_dev
, "Failed to destroy RNIC adapter err %d", err
);
796 int mana_ib_gd_add_gid(const struct ib_gid_attr
*attr
, void **context
)
798 struct mana_ib_dev
*mdev
= container_of(attr
->device
, struct mana_ib_dev
, ib_dev
);
799 enum rdma_network_type ntype
= rdma_gid_attr_network_type(attr
);
800 struct mana_rnic_config_addr_resp resp
= {};
801 struct gdma_context
*gc
= mdev_to_gc(mdev
);
802 struct mana_rnic_config_addr_req req
= {};
805 if (ntype
!= RDMA_NETWORK_IPV4
&& ntype
!= RDMA_NETWORK_IPV6
) {
806 ibdev_dbg(&mdev
->ib_dev
, "Unsupported rdma network type %d", ntype
);
810 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CONFIG_IP_ADDR
, sizeof(req
), sizeof(resp
));
811 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
812 req
.adapter
= mdev
->adapter_handle
;
813 req
.op
= ADDR_OP_ADD
;
814 req
.sgid_type
= (ntype
== RDMA_NETWORK_IPV6
) ? SGID_TYPE_IPV6
: SGID_TYPE_IPV4
;
815 copy_in_reverse(req
.ip_addr
, attr
->gid
.raw
, sizeof(union ib_gid
));
817 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
819 ibdev_err(&mdev
->ib_dev
, "Failed to config IP addr err %d\n", err
);
826 int mana_ib_gd_del_gid(const struct ib_gid_attr
*attr
, void **context
)
828 struct mana_ib_dev
*mdev
= container_of(attr
->device
, struct mana_ib_dev
, ib_dev
);
829 enum rdma_network_type ntype
= rdma_gid_attr_network_type(attr
);
830 struct mana_rnic_config_addr_resp resp
= {};
831 struct gdma_context
*gc
= mdev_to_gc(mdev
);
832 struct mana_rnic_config_addr_req req
= {};
835 if (ntype
!= RDMA_NETWORK_IPV4
&& ntype
!= RDMA_NETWORK_IPV6
) {
836 ibdev_dbg(&mdev
->ib_dev
, "Unsupported rdma network type %d", ntype
);
840 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CONFIG_IP_ADDR
, sizeof(req
), sizeof(resp
));
841 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
842 req
.adapter
= mdev
->adapter_handle
;
843 req
.op
= ADDR_OP_REMOVE
;
844 req
.sgid_type
= (ntype
== RDMA_NETWORK_IPV6
) ? SGID_TYPE_IPV6
: SGID_TYPE_IPV4
;
845 copy_in_reverse(req
.ip_addr
, attr
->gid
.raw
, sizeof(union ib_gid
));
847 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
849 ibdev_err(&mdev
->ib_dev
, "Failed to config IP addr err %d\n", err
);
856 int mana_ib_gd_config_mac(struct mana_ib_dev
*mdev
, enum mana_ib_addr_op op
, u8
*mac
)
858 struct mana_rnic_config_mac_addr_resp resp
= {};
859 struct mana_rnic_config_mac_addr_req req
= {};
860 struct gdma_context
*gc
= mdev_to_gc(mdev
);
863 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CONFIG_MAC_ADDR
, sizeof(req
), sizeof(resp
));
864 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
865 req
.adapter
= mdev
->adapter_handle
;
867 copy_in_reverse(req
.mac_addr
, mac
, ETH_ALEN
);
869 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
871 ibdev_err(&mdev
->ib_dev
, "Failed to config Mac addr err %d", err
);
878 int mana_ib_gd_create_cq(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
, u32 doorbell
)
880 struct gdma_context
*gc
= mdev_to_gc(mdev
);
881 struct mana_rnic_create_cq_resp resp
= {};
882 struct mana_rnic_create_cq_req req
= {};
885 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CREATE_CQ
, sizeof(req
), sizeof(resp
));
886 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
887 req
.adapter
= mdev
->adapter_handle
;
888 req
.gdma_region
= cq
->queue
.gdma_region
;
889 req
.eq_id
= mdev
->eqs
[cq
->comp_vector
]->id
;
890 req
.doorbell_page
= doorbell
;
892 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
895 ibdev_err(&mdev
->ib_dev
, "Failed to create cq err %d", err
);
899 cq
->queue
.id
= resp
.cq_id
;
900 cq
->cq_handle
= resp
.cq_handle
;
901 /* The GDMA region is now owned by the CQ handle */
902 cq
->queue
.gdma_region
= GDMA_INVALID_DMA_REGION
;
907 int mana_ib_gd_destroy_cq(struct mana_ib_dev
*mdev
, struct mana_ib_cq
*cq
)
909 struct gdma_context
*gc
= mdev_to_gc(mdev
);
910 struct mana_rnic_destroy_cq_resp resp
= {};
911 struct mana_rnic_destroy_cq_req req
= {};
914 if (cq
->cq_handle
== INVALID_MANA_HANDLE
)
917 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_DESTROY_CQ
, sizeof(req
), sizeof(resp
));
918 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
919 req
.adapter
= mdev
->adapter_handle
;
920 req
.cq_handle
= cq
->cq_handle
;
922 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
925 ibdev_err(&mdev
->ib_dev
, "Failed to destroy cq err %d", err
);
932 int mana_ib_gd_create_rc_qp(struct mana_ib_dev
*mdev
, struct mana_ib_qp
*qp
,
933 struct ib_qp_init_attr
*attr
, u32 doorbell
, u64 flags
)
935 struct mana_ib_cq
*send_cq
= container_of(qp
->ibqp
.send_cq
, struct mana_ib_cq
, ibcq
);
936 struct mana_ib_cq
*recv_cq
= container_of(qp
->ibqp
.recv_cq
, struct mana_ib_cq
, ibcq
);
937 struct mana_ib_pd
*pd
= container_of(qp
->ibqp
.pd
, struct mana_ib_pd
, ibpd
);
938 struct gdma_context
*gc
= mdev_to_gc(mdev
);
939 struct mana_rnic_create_qp_resp resp
= {};
940 struct mana_rnic_create_qp_req req
= {};
943 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_CREATE_RC_QP
, sizeof(req
), sizeof(resp
));
944 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
945 req
.adapter
= mdev
->adapter_handle
;
946 req
.pd_handle
= pd
->pd_handle
;
947 req
.send_cq_handle
= send_cq
->cq_handle
;
948 req
.recv_cq_handle
= recv_cq
->cq_handle
;
949 for (i
= 0; i
< MANA_RC_QUEUE_TYPE_MAX
; i
++)
950 req
.dma_region
[i
] = qp
->rc_qp
.queues
[i
].gdma_region
;
951 req
.doorbell_page
= doorbell
;
952 req
.max_send_wr
= attr
->cap
.max_send_wr
;
953 req
.max_recv_wr
= attr
->cap
.max_recv_wr
;
954 req
.max_send_sge
= attr
->cap
.max_send_sge
;
955 req
.max_recv_sge
= attr
->cap
.max_recv_sge
;
958 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
960 ibdev_err(&mdev
->ib_dev
, "Failed to create rc qp err %d", err
);
963 qp
->qp_handle
= resp
.rc_qp_handle
;
964 for (i
= 0; i
< MANA_RC_QUEUE_TYPE_MAX
; i
++) {
965 qp
->rc_qp
.queues
[i
].id
= resp
.queue_ids
[i
];
966 /* The GDMA regions are now owned by the RNIC QP handle */
967 qp
->rc_qp
.queues
[i
].gdma_region
= GDMA_INVALID_DMA_REGION
;
972 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev
*mdev
, struct mana_ib_qp
*qp
)
974 struct mana_rnic_destroy_rc_qp_resp resp
= {0};
975 struct mana_rnic_destroy_rc_qp_req req
= {0};
976 struct gdma_context
*gc
= mdev_to_gc(mdev
);
979 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_DESTROY_RC_QP
, sizeof(req
), sizeof(resp
));
980 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
981 req
.adapter
= mdev
->adapter_handle
;
982 req
.rc_qp_handle
= qp
->qp_handle
;
983 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
985 ibdev_err(&mdev
->ib_dev
, "Failed to destroy rc qp err %d", err
);