1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev
*dev
,
9 struct net_device
*ndev
,
10 mana_handle_t default_rxobj
,
11 mana_handle_t ind_table
[],
12 u32 log_ind_tbl_size
, u32 rx_hash_key_len
,
15 struct mana_port_context
*mpc
= netdev_priv(ndev
);
16 struct mana_cfg_rx_steer_req_v2
*req
;
17 struct mana_cfg_rx_steer_resp resp
= {};
18 struct gdma_context
*gc
;
24 req_buf_size
= struct_size(req
, indir_tab
, MANA_INDIRECT_TABLE_DEF_SIZE
);
25 req
= kzalloc(req_buf_size
, GFP_KERNEL
);
29 mana_gd_init_req_hdr(&req
->hdr
, MANA_CONFIG_VPORT_RX
, req_buf_size
,
32 req
->hdr
.req
.msg_version
= GDMA_MESSAGE_V2
;
34 req
->vport
= mpc
->port_handle
;
36 req
->update_default_rxobj
= 1;
37 req
->default_rxobj
= default_rxobj
;
38 req
->hdr
.dev_id
= gc
->mana
.dev_id
;
40 /* If there are more than 1 entries in indirection table, enable RSS */
42 req
->rss_enable
= true;
44 req
->num_indir_entries
= MANA_INDIRECT_TABLE_DEF_SIZE
;
45 req
->indir_tab_offset
= offsetof(struct mana_cfg_rx_steer_req_v2
,
47 req
->update_indir_tab
= true;
48 req
->cqe_coalescing_enable
= 1;
50 /* The ind table passed to the hardware must have
51 * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
52 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
54 ibdev_dbg(&dev
->ib_dev
, "ind table size %u\n", 1 << log_ind_tbl_size
);
55 for (i
= 0; i
< MANA_INDIRECT_TABLE_DEF_SIZE
; i
++) {
56 req
->indir_tab
[i
] = ind_table
[i
% (1 << log_ind_tbl_size
)];
57 ibdev_dbg(&dev
->ib_dev
, "index %u handle 0x%llx\n", i
,
61 req
->update_hashkey
= true;
63 memcpy(req
->hashkey
, rx_hash_key
, rx_hash_key_len
);
65 netdev_rss_key_fill(req
->hashkey
, MANA_HASH_KEY_SIZE
);
67 ibdev_dbg(&dev
->ib_dev
, "vport handle %llu default_rxobj 0x%llx\n",
68 req
->vport
, default_rxobj
);
70 err
= mana_gd_send_request(gc
, req_buf_size
, req
, sizeof(resp
), &resp
);
72 netdev_err(ndev
, "Failed to configure vPort RX: %d\n", err
);
76 if (resp
.hdr
.status
) {
77 netdev_err(ndev
, "vPort RX configuration failed: 0x%x\n",
83 netdev_info(ndev
, "Configured steering vPort %llu log_entries %u\n",
84 mpc
->port_handle
, log_ind_tbl_size
);
91 static int mana_ib_create_qp_rss(struct ib_qp
*ibqp
, struct ib_pd
*pd
,
92 struct ib_qp_init_attr
*attr
,
93 struct ib_udata
*udata
)
95 struct mana_ib_qp
*qp
= container_of(ibqp
, struct mana_ib_qp
, ibqp
);
96 struct mana_ib_dev
*mdev
=
97 container_of(pd
->device
, struct mana_ib_dev
, ib_dev
);
98 struct ib_rwq_ind_table
*ind_tbl
= attr
->rwq_ind_tbl
;
99 struct mana_ib_create_qp_rss_resp resp
= {};
100 struct mana_ib_create_qp_rss ucmd
= {};
101 mana_handle_t
*mana_ind_table
;
102 struct mana_port_context
*mpc
;
103 unsigned int ind_tbl_size
;
104 struct net_device
*ndev
;
105 struct mana_ib_cq
*cq
;
106 struct mana_ib_wq
*wq
;
114 if (!udata
|| udata
->inlen
< sizeof(ucmd
))
117 ret
= ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
));
119 ibdev_dbg(&mdev
->ib_dev
,
120 "Failed copy from udata for create rss-qp, err %d\n",
125 if (attr
->cap
.max_recv_wr
> mdev
->adapter_caps
.max_qp_wr
) {
126 ibdev_dbg(&mdev
->ib_dev
,
127 "Requested max_recv_wr %d exceeding limit\n",
128 attr
->cap
.max_recv_wr
);
132 if (attr
->cap
.max_recv_sge
> MAX_RX_WQE_SGL_ENTRIES
) {
133 ibdev_dbg(&mdev
->ib_dev
,
134 "Requested max_recv_sge %d exceeding limit\n",
135 attr
->cap
.max_recv_sge
);
139 ind_tbl_size
= 1 << ind_tbl
->log_ind_tbl_size
;
140 if (ind_tbl_size
> MANA_INDIRECT_TABLE_DEF_SIZE
) {
141 ibdev_dbg(&mdev
->ib_dev
,
142 "Indirect table size %d exceeding limit\n",
147 if (ucmd
.rx_hash_function
!= MANA_IB_RX_HASH_FUNC_TOEPLITZ
) {
148 ibdev_dbg(&mdev
->ib_dev
,
149 "RX Hash function is not supported, %d\n",
150 ucmd
.rx_hash_function
);
154 /* IB ports start with 1, MANA start with 0 */
156 ndev
= mana_ib_get_netdev(pd
->device
, port
);
158 ibdev_dbg(&mdev
->ib_dev
, "Invalid port %u in creating qp\n",
162 mpc
= netdev_priv(ndev
);
164 ibdev_dbg(&mdev
->ib_dev
, "rx_hash_function %d port %d\n",
165 ucmd
.rx_hash_function
, port
);
167 mana_ind_table
= kcalloc(ind_tbl_size
, sizeof(mana_handle_t
),
169 if (!mana_ind_table
) {
176 for (i
= 0; i
< ind_tbl_size
; i
++) {
177 struct mana_obj_spec wq_spec
= {};
178 struct mana_obj_spec cq_spec
= {};
180 ibwq
= ind_tbl
->ind_tbl
[i
];
181 wq
= container_of(ibwq
, struct mana_ib_wq
, ibwq
);
184 cq
= container_of(ibcq
, struct mana_ib_cq
, ibcq
);
186 wq_spec
.gdma_region
= wq
->queue
.gdma_region
;
187 wq_spec
.queue_size
= wq
->wq_buf_size
;
189 cq_spec
.gdma_region
= cq
->queue
.gdma_region
;
190 cq_spec
.queue_size
= cq
->cqe
* COMP_ENTRY_SIZE
;
191 cq_spec
.modr_ctx_id
= 0;
192 eq
= &mpc
->ac
->eqs
[cq
->comp_vector
];
193 cq_spec
.attached_eq
= eq
->eq
->id
;
195 ret
= mana_create_wq_obj(mpc
, mpc
->port_handle
, GDMA_RQ
,
196 &wq_spec
, &cq_spec
, &wq
->rx_object
);
198 /* Do cleanup starting with index i-1 */
203 /* The GDMA regions are now owned by the WQ object */
204 wq
->queue
.gdma_region
= GDMA_INVALID_DMA_REGION
;
205 cq
->queue
.gdma_region
= GDMA_INVALID_DMA_REGION
;
207 wq
->queue
.id
= wq_spec
.queue_index
;
208 cq
->queue
.id
= cq_spec
.queue_index
;
210 ibdev_dbg(&mdev
->ib_dev
,
211 "rx_object 0x%llx wq id %llu cq id %llu\n",
212 wq
->rx_object
, wq
->queue
.id
, cq
->queue
.id
);
214 resp
.entries
[i
].cqid
= cq
->queue
.id
;
215 resp
.entries
[i
].wqid
= wq
->queue
.id
;
217 mana_ind_table
[i
] = wq
->rx_object
;
219 /* Create CQ table entry */
220 ret
= mana_ib_install_cq_cb(mdev
, cq
);
224 resp
.num_entries
= i
;
226 ret
= mana_ib_cfg_vport_steering(mdev
, ndev
, wq
->rx_object
,
228 ind_tbl
->log_ind_tbl_size
,
229 ucmd
.rx_hash_key_len
,
234 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
236 ibdev_dbg(&mdev
->ib_dev
,
237 "Failed to copy to udata create rss-qp, %d\n",
242 kfree(mana_ind_table
);
248 ibwq
= ind_tbl
->ind_tbl
[i
];
250 wq
= container_of(ibwq
, struct mana_ib_wq
, ibwq
);
251 cq
= container_of(ibcq
, struct mana_ib_cq
, ibcq
);
253 mana_ib_remove_cq_cb(mdev
, cq
);
254 mana_destroy_wq_obj(mpc
, GDMA_RQ
, wq
->rx_object
);
257 kfree(mana_ind_table
);
262 static int mana_ib_create_qp_raw(struct ib_qp
*ibqp
, struct ib_pd
*ibpd
,
263 struct ib_qp_init_attr
*attr
,
264 struct ib_udata
*udata
)
266 struct mana_ib_pd
*pd
= container_of(ibpd
, struct mana_ib_pd
, ibpd
);
267 struct mana_ib_qp
*qp
= container_of(ibqp
, struct mana_ib_qp
, ibqp
);
268 struct mana_ib_dev
*mdev
=
269 container_of(ibpd
->device
, struct mana_ib_dev
, ib_dev
);
270 struct mana_ib_cq
*send_cq
=
271 container_of(attr
->send_cq
, struct mana_ib_cq
, ibcq
);
272 struct mana_ib_ucontext
*mana_ucontext
=
273 rdma_udata_to_drv_context(udata
, struct mana_ib_ucontext
,
275 struct mana_ib_create_qp_resp resp
= {};
276 struct mana_ib_create_qp ucmd
= {};
277 struct mana_obj_spec wq_spec
= {};
278 struct mana_obj_spec cq_spec
= {};
279 struct mana_port_context
*mpc
;
280 struct net_device
*ndev
;
286 if (!mana_ucontext
|| udata
->inlen
< sizeof(ucmd
))
289 err
= ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
));
291 ibdev_dbg(&mdev
->ib_dev
,
292 "Failed to copy from udata create qp-raw, %d\n", err
);
296 if (attr
->cap
.max_send_wr
> mdev
->adapter_caps
.max_qp_wr
) {
297 ibdev_dbg(&mdev
->ib_dev
,
298 "Requested max_send_wr %d exceeding limit\n",
299 attr
->cap
.max_send_wr
);
303 if (attr
->cap
.max_send_sge
> MAX_TX_WQE_SGL_ENTRIES
) {
304 ibdev_dbg(&mdev
->ib_dev
,
305 "Requested max_send_sge %d exceeding limit\n",
306 attr
->cap
.max_send_sge
);
311 ndev
= mana_ib_get_netdev(ibpd
->device
, port
);
313 ibdev_dbg(&mdev
->ib_dev
, "Invalid port %u in creating qp\n",
317 mpc
= netdev_priv(ndev
);
318 ibdev_dbg(&mdev
->ib_dev
, "port %u ndev %p mpc %p\n", port
, ndev
, mpc
);
320 err
= mana_ib_cfg_vport(mdev
, port
, pd
, mana_ucontext
->doorbell
);
326 ibdev_dbg(&mdev
->ib_dev
, "ucmd sq_buf_addr 0x%llx port %u\n",
327 ucmd
.sq_buf_addr
, ucmd
.port
);
329 err
= mana_ib_create_queue(mdev
, ucmd
.sq_buf_addr
, ucmd
.sq_buf_size
, &qp
->raw_sq
);
331 ibdev_dbg(&mdev
->ib_dev
,
332 "Failed to create queue for create qp-raw, err %d\n", err
);
336 /* Create a WQ on the same port handle used by the Ethernet */
337 wq_spec
.gdma_region
= qp
->raw_sq
.gdma_region
;
338 wq_spec
.queue_size
= ucmd
.sq_buf_size
;
340 cq_spec
.gdma_region
= send_cq
->queue
.gdma_region
;
341 cq_spec
.queue_size
= send_cq
->cqe
* COMP_ENTRY_SIZE
;
342 cq_spec
.modr_ctx_id
= 0;
343 eq_vec
= send_cq
->comp_vector
;
344 eq
= &mpc
->ac
->eqs
[eq_vec
];
345 cq_spec
.attached_eq
= eq
->eq
->id
;
347 err
= mana_create_wq_obj(mpc
, mpc
->port_handle
, GDMA_SQ
, &wq_spec
,
348 &cq_spec
, &qp
->qp_handle
);
350 ibdev_dbg(&mdev
->ib_dev
,
351 "Failed to create wq for create raw-qp, err %d\n",
353 goto err_destroy_queue
;
356 /* The GDMA regions are now owned by the WQ object */
357 qp
->raw_sq
.gdma_region
= GDMA_INVALID_DMA_REGION
;
358 send_cq
->queue
.gdma_region
= GDMA_INVALID_DMA_REGION
;
360 qp
->raw_sq
.id
= wq_spec
.queue_index
;
361 send_cq
->queue
.id
= cq_spec
.queue_index
;
363 /* Create CQ table entry */
364 err
= mana_ib_install_cq_cb(mdev
, send_cq
);
366 goto err_destroy_wq_obj
;
368 ibdev_dbg(&mdev
->ib_dev
,
369 "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
370 qp
->qp_handle
, qp
->raw_sq
.id
, send_cq
->queue
.id
);
372 resp
.sqid
= qp
->raw_sq
.id
;
373 resp
.cqid
= send_cq
->queue
.id
;
374 resp
.tx_vp_offset
= pd
->tx_vp_offset
;
376 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
378 ibdev_dbg(&mdev
->ib_dev
,
379 "Failed copy udata for create qp-raw, %d\n",
381 goto err_remove_cq_cb
;
387 mana_ib_remove_cq_cb(mdev
, send_cq
);
390 mana_destroy_wq_obj(mpc
, GDMA_SQ
, qp
->qp_handle
);
393 mana_ib_destroy_queue(mdev
, &qp
->raw_sq
);
396 mana_ib_uncfg_vport(mdev
, pd
, port
);
401 static int mana_table_store_qp(struct mana_ib_dev
*mdev
, struct mana_ib_qp
*qp
)
403 refcount_set(&qp
->refcount
, 1);
404 init_completion(&qp
->free
);
405 return xa_insert_irq(&mdev
->qp_table_wq
, qp
->ibqp
.qp_num
, qp
,
409 static void mana_table_remove_qp(struct mana_ib_dev
*mdev
,
410 struct mana_ib_qp
*qp
)
412 xa_erase_irq(&mdev
->qp_table_wq
, qp
->ibqp
.qp_num
);
414 wait_for_completion(&qp
->free
);
417 static int mana_ib_create_rc_qp(struct ib_qp
*ibqp
, struct ib_pd
*ibpd
,
418 struct ib_qp_init_attr
*attr
, struct ib_udata
*udata
)
420 struct mana_ib_dev
*mdev
= container_of(ibpd
->device
, struct mana_ib_dev
, ib_dev
);
421 struct mana_ib_qp
*qp
= container_of(ibqp
, struct mana_ib_qp
, ibqp
);
422 struct mana_ib_create_rc_qp_resp resp
= {};
423 struct mana_ib_ucontext
*mana_ucontext
;
424 struct mana_ib_create_rc_qp ucmd
= {};
429 if (!udata
|| udata
->inlen
< sizeof(ucmd
))
432 mana_ucontext
= rdma_udata_to_drv_context(udata
, struct mana_ib_ucontext
, ibucontext
);
433 doorbell
= mana_ucontext
->doorbell
;
434 flags
= MANA_RC_FLAG_NO_FMR
;
435 err
= ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
));
437 ibdev_dbg(&mdev
->ib_dev
, "Failed to copy from udata, %d\n", err
);
441 for (i
= 0, j
= 0; i
< MANA_RC_QUEUE_TYPE_MAX
; ++i
) {
442 /* skip FMR for user-level RC QPs */
443 if (i
== MANA_RC_SEND_QUEUE_FMR
) {
444 qp
->rc_qp
.queues
[i
].id
= INVALID_QUEUE_ID
;
445 qp
->rc_qp
.queues
[i
].gdma_region
= GDMA_INVALID_DMA_REGION
;
448 err
= mana_ib_create_queue(mdev
, ucmd
.queue_buf
[j
], ucmd
.queue_size
[j
],
449 &qp
->rc_qp
.queues
[i
]);
451 ibdev_err(&mdev
->ib_dev
, "Failed to create queue %d, err %d\n", i
, err
);
457 err
= mana_ib_gd_create_rc_qp(mdev
, qp
, attr
, doorbell
, flags
);
459 ibdev_err(&mdev
->ib_dev
, "Failed to create rc qp %d\n", err
);
462 qp
->ibqp
.qp_num
= qp
->rc_qp
.queues
[MANA_RC_RECV_QUEUE_RESPONDER
].id
;
463 qp
->port
= attr
->port_num
;
466 for (i
= 0, j
= 0; i
< MANA_RC_QUEUE_TYPE_MAX
; ++i
) {
467 if (i
== MANA_RC_SEND_QUEUE_FMR
)
469 resp
.queue_id
[j
] = qp
->rc_qp
.queues
[i
].id
;
472 err
= ib_copy_to_udata(udata
, &resp
, min(sizeof(resp
), udata
->outlen
));
474 ibdev_dbg(&mdev
->ib_dev
, "Failed to copy to udata, %d\n", err
);
479 err
= mana_table_store_qp(mdev
, qp
);
486 mana_ib_gd_destroy_rc_qp(mdev
, qp
);
489 mana_ib_destroy_queue(mdev
, &qp
->rc_qp
.queues
[i
]);
493 int mana_ib_create_qp(struct ib_qp
*ibqp
, struct ib_qp_init_attr
*attr
,
494 struct ib_udata
*udata
)
496 switch (attr
->qp_type
) {
497 case IB_QPT_RAW_PACKET
:
498 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
499 if (attr
->rwq_ind_tbl
)
500 return mana_ib_create_qp_rss(ibqp
, ibqp
->pd
, attr
,
503 return mana_ib_create_qp_raw(ibqp
, ibqp
->pd
, attr
, udata
);
505 return mana_ib_create_rc_qp(ibqp
, ibqp
->pd
, attr
, udata
);
507 ibdev_dbg(ibqp
->device
, "Creating QP type %u not supported\n",
514 static int mana_ib_gd_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
515 int attr_mask
, struct ib_udata
*udata
)
517 struct mana_ib_dev
*mdev
= container_of(ibqp
->device
, struct mana_ib_dev
, ib_dev
);
518 struct mana_ib_qp
*qp
= container_of(ibqp
, struct mana_ib_qp
, ibqp
);
519 struct mana_rnic_set_qp_state_resp resp
= {};
520 struct mana_rnic_set_qp_state_req req
= {};
521 struct gdma_context
*gc
= mdev_to_gc(mdev
);
522 struct mana_port_context
*mpc
;
523 struct net_device
*ndev
;
526 mana_gd_init_req_hdr(&req
.hdr
, MANA_IB_SET_QP_STATE
, sizeof(req
), sizeof(resp
));
527 req
.hdr
.dev_id
= gc
->mana_ib
.dev_id
;
528 req
.adapter
= mdev
->adapter_handle
;
529 req
.qp_handle
= qp
->qp_handle
;
530 req
.qp_state
= attr
->qp_state
;
531 req
.attr_mask
= attr_mask
;
532 req
.path_mtu
= attr
->path_mtu
;
533 req
.rq_psn
= attr
->rq_psn
;
534 req
.sq_psn
= attr
->sq_psn
;
535 req
.dest_qpn
= attr
->dest_qp_num
;
536 req
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
537 req
.retry_cnt
= attr
->retry_cnt
;
538 req
.rnr_retry
= attr
->rnr_retry
;
539 req
.min_rnr_timer
= attr
->min_rnr_timer
;
540 if (attr_mask
& IB_QP_AV
) {
541 ndev
= mana_ib_get_netdev(&mdev
->ib_dev
, ibqp
->port
);
543 ibdev_dbg(&mdev
->ib_dev
, "Invalid port %u in QP %u\n",
544 ibqp
->port
, ibqp
->qp_num
);
547 mpc
= netdev_priv(ndev
);
548 copy_in_reverse(req
.ah_attr
.src_mac
, mpc
->mac_addr
, ETH_ALEN
);
549 copy_in_reverse(req
.ah_attr
.dest_mac
, attr
->ah_attr
.roce
.dmac
, ETH_ALEN
);
550 copy_in_reverse(req
.ah_attr
.src_addr
, attr
->ah_attr
.grh
.sgid_attr
->gid
.raw
,
551 sizeof(union ib_gid
));
552 copy_in_reverse(req
.ah_attr
.dest_addr
, attr
->ah_attr
.grh
.dgid
.raw
,
553 sizeof(union ib_gid
));
554 if (rdma_gid_attr_network_type(attr
->ah_attr
.grh
.sgid_attr
) == RDMA_NETWORK_IPV4
) {
555 req
.ah_attr
.src_addr_type
= SGID_TYPE_IPV4
;
556 req
.ah_attr
.dest_addr_type
= SGID_TYPE_IPV4
;
558 req
.ah_attr
.src_addr_type
= SGID_TYPE_IPV6
;
559 req
.ah_attr
.dest_addr_type
= SGID_TYPE_IPV6
;
561 req
.ah_attr
.dest_port
= ROCE_V2_UDP_DPORT
;
562 req
.ah_attr
.src_port
= rdma_get_udp_sport(attr
->ah_attr
.grh
.flow_label
,
563 ibqp
->qp_num
, attr
->dest_qp_num
);
564 req
.ah_attr
.traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
565 req
.ah_attr
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
568 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
570 ibdev_err(&mdev
->ib_dev
, "Failed modify qp err %d", err
);
577 int mana_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
578 int attr_mask
, struct ib_udata
*udata
)
580 switch (ibqp
->qp_type
) {
582 return mana_ib_gd_modify_qp(ibqp
, attr
, attr_mask
, udata
);
584 ibdev_dbg(ibqp
->device
, "Modify QP type %u not supported", ibqp
->qp_type
);
589 static int mana_ib_destroy_qp_rss(struct mana_ib_qp
*qp
,
590 struct ib_rwq_ind_table
*ind_tbl
,
591 struct ib_udata
*udata
)
593 struct mana_ib_dev
*mdev
=
594 container_of(qp
->ibqp
.device
, struct mana_ib_dev
, ib_dev
);
595 struct mana_port_context
*mpc
;
596 struct net_device
*ndev
;
597 struct mana_ib_wq
*wq
;
601 ndev
= mana_ib_get_netdev(qp
->ibqp
.device
, qp
->port
);
602 mpc
= netdev_priv(ndev
);
604 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
605 ibwq
= ind_tbl
->ind_tbl
[i
];
606 wq
= container_of(ibwq
, struct mana_ib_wq
, ibwq
);
607 ibdev_dbg(&mdev
->ib_dev
, "destroying wq->rx_object %llu\n",
609 mana_destroy_wq_obj(mpc
, GDMA_RQ
, wq
->rx_object
);
615 static int mana_ib_destroy_qp_raw(struct mana_ib_qp
*qp
, struct ib_udata
*udata
)
617 struct mana_ib_dev
*mdev
=
618 container_of(qp
->ibqp
.device
, struct mana_ib_dev
, ib_dev
);
619 struct ib_pd
*ibpd
= qp
->ibqp
.pd
;
620 struct mana_port_context
*mpc
;
621 struct net_device
*ndev
;
622 struct mana_ib_pd
*pd
;
624 ndev
= mana_ib_get_netdev(qp
->ibqp
.device
, qp
->port
);
625 mpc
= netdev_priv(ndev
);
626 pd
= container_of(ibpd
, struct mana_ib_pd
, ibpd
);
628 mana_destroy_wq_obj(mpc
, GDMA_SQ
, qp
->qp_handle
);
630 mana_ib_destroy_queue(mdev
, &qp
->raw_sq
);
632 mana_ib_uncfg_vport(mdev
, pd
, qp
->port
);
637 static int mana_ib_destroy_rc_qp(struct mana_ib_qp
*qp
, struct ib_udata
*udata
)
639 struct mana_ib_dev
*mdev
=
640 container_of(qp
->ibqp
.device
, struct mana_ib_dev
, ib_dev
);
643 mana_table_remove_qp(mdev
, qp
);
645 /* Ignore return code as there is not much we can do about it.
646 * The error message is printed inside.
648 mana_ib_gd_destroy_rc_qp(mdev
, qp
);
649 for (i
= 0; i
< MANA_RC_QUEUE_TYPE_MAX
; ++i
)
650 mana_ib_destroy_queue(mdev
, &qp
->rc_qp
.queues
[i
]);
655 int mana_ib_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
)
657 struct mana_ib_qp
*qp
= container_of(ibqp
, struct mana_ib_qp
, ibqp
);
659 switch (ibqp
->qp_type
) {
660 case IB_QPT_RAW_PACKET
:
661 if (ibqp
->rwq_ind_tbl
)
662 return mana_ib_destroy_qp_rss(qp
, ibqp
->rwq_ind_tbl
,
665 return mana_ib_destroy_qp_raw(qp
, udata
);
667 return mana_ib_destroy_rc_qp(qp
, udata
);
669 ibdev_dbg(ibqp
->device
, "Unexpected QP type %u\n",