1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 #include <linux/vmalloc.h>
8 static int mana_hwc_get_msg_index(struct hw_channel_context
*hwc
, u16
*msg_id
)
10 struct gdma_resource
*r
= &hwc
->inflight_msg_res
;
16 spin_lock_irqsave(&r
->lock
, flags
);
18 index
= find_first_zero_bit(hwc
->inflight_msg_res
.map
,
19 hwc
->inflight_msg_res
.size
);
21 bitmap_set(hwc
->inflight_msg_res
.map
, index
, 1);
23 spin_unlock_irqrestore(&r
->lock
, flags
);
30 static void mana_hwc_put_msg_index(struct hw_channel_context
*hwc
, u16 msg_id
)
32 struct gdma_resource
*r
= &hwc
->inflight_msg_res
;
35 spin_lock_irqsave(&r
->lock
, flags
);
36 bitmap_clear(hwc
->inflight_msg_res
.map
, msg_id
, 1);
37 spin_unlock_irqrestore(&r
->lock
, flags
);
42 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx
*caller_ctx
,
43 const struct gdma_resp_hdr
*resp_msg
,
46 if (resp_len
< sizeof(*resp_msg
))
49 if (resp_len
> caller_ctx
->output_buflen
)
55 static int mana_hwc_post_rx_wqe(const struct hwc_wq
*hwc_rxq
,
56 struct hwc_work_request
*req
)
58 struct device
*dev
= hwc_rxq
->hwc
->dev
;
63 sge
->address
= (u64
)req
->buf_sge_addr
;
64 sge
->mem_key
= hwc_rxq
->msg_buf
->gpa_mkey
;
65 sge
->size
= req
->buf_len
;
67 memset(&req
->wqe_req
, 0, sizeof(struct gdma_wqe_request
));
68 req
->wqe_req
.sgl
= sge
;
69 req
->wqe_req
.num_sge
= 1;
70 req
->wqe_req
.client_data_unit
= 0;
72 err
= mana_gd_post_and_ring(hwc_rxq
->gdma_wq
, &req
->wqe_req
, NULL
);
74 dev_err(dev
, "Failed to post WQE on HWC RQ: %d\n", err
);
78 static void mana_hwc_handle_resp(struct hw_channel_context
*hwc
, u32 resp_len
,
79 struct hwc_work_request
*rx_req
)
81 const struct gdma_resp_hdr
*resp_msg
= rx_req
->buf_va
;
82 struct hwc_caller_ctx
*ctx
;
85 if (!test_bit(resp_msg
->response
.hwc_msg_id
,
86 hwc
->inflight_msg_res
.map
)) {
87 dev_err(hwc
->dev
, "hwc_rx: invalid msg_id = %u\n",
88 resp_msg
->response
.hwc_msg_id
);
89 mana_hwc_post_rx_wqe(hwc
->rxq
, rx_req
);
93 ctx
= hwc
->caller_ctx
+ resp_msg
->response
.hwc_msg_id
;
94 err
= mana_hwc_verify_resp_msg(ctx
, resp_msg
, resp_len
);
98 ctx
->status_code
= resp_msg
->status
;
100 memcpy(ctx
->output_buf
, resp_msg
, resp_len
);
104 /* Must post rx wqe before complete(), otherwise the next rx may
107 mana_hwc_post_rx_wqe(hwc
->rxq
, rx_req
);
109 complete(&ctx
->comp_event
);
112 static void mana_hwc_init_event_handler(void *ctx
, struct gdma_queue
*q_self
,
113 struct gdma_event
*event
)
115 struct hw_channel_context
*hwc
= ctx
;
116 struct gdma_dev
*gd
= hwc
->gdma_dev
;
117 union hwc_init_type_data type_data
;
118 union hwc_init_eq_id_db eq_db
;
121 switch (event
->type
) {
122 case GDMA_EQE_HWC_INIT_EQ_ID_DB
:
123 eq_db
.as_uint32
= event
->details
[0];
124 hwc
->cq
->gdma_eq
->id
= eq_db
.eq_id
;
125 gd
->doorbell
= eq_db
.doorbell
;
128 case GDMA_EQE_HWC_INIT_DATA
:
129 type_data
.as_uint32
= event
->details
[0];
130 type
= type_data
.type
;
131 val
= type_data
.value
;
134 case HWC_INIT_DATA_CQID
:
135 hwc
->cq
->gdma_cq
->id
= val
;
138 case HWC_INIT_DATA_RQID
:
139 hwc
->rxq
->gdma_wq
->id
= val
;
142 case HWC_INIT_DATA_SQID
:
143 hwc
->txq
->gdma_wq
->id
= val
;
146 case HWC_INIT_DATA_QUEUE_DEPTH
:
147 hwc
->hwc_init_q_depth_max
= (u16
)val
;
150 case HWC_INIT_DATA_MAX_REQUEST
:
151 hwc
->hwc_init_max_req_msg_size
= val
;
154 case HWC_INIT_DATA_MAX_RESPONSE
:
155 hwc
->hwc_init_max_resp_msg_size
= val
;
158 case HWC_INIT_DATA_MAX_NUM_CQS
:
159 gd
->gdma_context
->max_num_cqs
= val
;
162 case HWC_INIT_DATA_PDID
:
163 hwc
->gdma_dev
->pdid
= val
;
166 case HWC_INIT_DATA_GPA_MKEY
:
167 hwc
->rxq
->msg_buf
->gpa_mkey
= val
;
168 hwc
->txq
->msg_buf
->gpa_mkey
= val
;
171 case HWC_INIT_DATA_PF_DEST_RQ_ID
:
172 hwc
->pf_dest_vrq_id
= val
;
175 case HWC_INIT_DATA_PF_DEST_CQ_ID
:
176 hwc
->pf_dest_vrcq_id
= val
;
182 case GDMA_EQE_HWC_INIT_DONE
:
183 complete(&hwc
->hwc_init_eqe_comp
);
186 case GDMA_EQE_HWC_SOC_RECONFIG_DATA
:
187 type_data
.as_uint32
= event
->details
[0];
188 type
= type_data
.type
;
189 val
= type_data
.value
;
192 case HWC_DATA_CFG_HWC_TIMEOUT
:
193 hwc
->hwc_timeout
= val
;
197 dev_warn(hwc
->dev
, "Received unknown reconfig type %u\n", type
);
204 dev_warn(hwc
->dev
, "Received unknown gdma event %u\n", event
->type
);
205 /* Ignore unknown events, which should never happen. */
210 static void mana_hwc_rx_event_handler(void *ctx
, u32 gdma_rxq_id
,
211 const struct hwc_rx_oob
*rx_oob
)
213 struct hw_channel_context
*hwc
= ctx
;
214 struct hwc_wq
*hwc_rxq
= hwc
->rxq
;
215 struct hwc_work_request
*rx_req
;
216 struct gdma_resp_hdr
*resp
;
217 struct gdma_wqe
*dma_oob
;
218 struct gdma_queue
*rq
;
219 struct gdma_sge
*sge
;
224 if (WARN_ON_ONCE(hwc_rxq
->gdma_wq
->id
!= gdma_rxq_id
))
227 rq
= hwc_rxq
->gdma_wq
;
228 wqe
= mana_gd_get_wqe_ptr(rq
, rx_oob
->wqe_offset
/ GDMA_WQE_BU_SIZE
);
229 dma_oob
= (struct gdma_wqe
*)wqe
;
231 sge
= (struct gdma_sge
*)(wqe
+ 8 + dma_oob
->inline_oob_size_div4
* 4);
233 /* Select the RX work request for virtual address and for reposting. */
234 rq_base_addr
= hwc_rxq
->msg_buf
->mem_info
.dma_handle
;
235 rx_req_idx
= (sge
->address
- rq_base_addr
) / hwc
->max_req_msg_size
;
237 rx_req
= &hwc_rxq
->msg_buf
->reqs
[rx_req_idx
];
238 resp
= (struct gdma_resp_hdr
*)rx_req
->buf_va
;
240 if (resp
->response
.hwc_msg_id
>= hwc
->num_inflight_msg
) {
241 dev_err(hwc
->dev
, "HWC RX: wrong msg_id=%u\n",
242 resp
->response
.hwc_msg_id
);
246 mana_hwc_handle_resp(hwc
, rx_oob
->tx_oob_data_size
, rx_req
);
248 /* Can no longer use 'resp', because the buffer is posted to the HW
249 * in mana_hwc_handle_resp() above.
254 static void mana_hwc_tx_event_handler(void *ctx
, u32 gdma_txq_id
,
255 const struct hwc_rx_oob
*rx_oob
)
257 struct hw_channel_context
*hwc
= ctx
;
258 struct hwc_wq
*hwc_txq
= hwc
->txq
;
260 WARN_ON_ONCE(!hwc_txq
|| hwc_txq
->gdma_wq
->id
!= gdma_txq_id
);
263 static int mana_hwc_create_gdma_wq(struct hw_channel_context
*hwc
,
264 enum gdma_queue_type type
, u64 queue_size
,
265 struct gdma_queue
**queue
)
267 struct gdma_queue_spec spec
= {};
269 if (type
!= GDMA_SQ
&& type
!= GDMA_RQ
)
273 spec
.monitor_avl_buf
= false;
274 spec
.queue_size
= queue_size
;
276 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
279 static int mana_hwc_create_gdma_cq(struct hw_channel_context
*hwc
,
281 void *ctx
, gdma_cq_callback
*cb
,
282 struct gdma_queue
*parent_eq
,
283 struct gdma_queue
**queue
)
285 struct gdma_queue_spec spec
= {};
288 spec
.monitor_avl_buf
= false;
289 spec
.queue_size
= queue_size
;
290 spec
.cq
.context
= ctx
;
291 spec
.cq
.callback
= cb
;
292 spec
.cq
.parent_eq
= parent_eq
;
294 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
297 static int mana_hwc_create_gdma_eq(struct hw_channel_context
*hwc
,
299 void *ctx
, gdma_eq_callback
*cb
,
300 struct gdma_queue
**queue
)
302 struct gdma_queue_spec spec
= {};
305 spec
.monitor_avl_buf
= false;
306 spec
.queue_size
= queue_size
;
307 spec
.eq
.context
= ctx
;
308 spec
.eq
.callback
= cb
;
309 spec
.eq
.log2_throttle_limit
= DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ
;
310 spec
.eq
.msix_index
= 0;
312 return mana_gd_create_hwc_queue(hwc
->gdma_dev
, &spec
, queue
);
315 static void mana_hwc_comp_event(void *ctx
, struct gdma_queue
*q_self
)
317 struct hwc_rx_oob comp_data
= {};
318 struct gdma_comp
*completions
;
319 struct hwc_cq
*hwc_cq
= ctx
;
322 WARN_ON_ONCE(hwc_cq
->gdma_cq
!= q_self
);
324 completions
= hwc_cq
->comp_buf
;
325 comp_read
= mana_gd_poll_cq(q_self
, completions
, hwc_cq
->queue_depth
);
326 WARN_ON_ONCE(comp_read
<= 0 || comp_read
> hwc_cq
->queue_depth
);
328 for (i
= 0; i
< comp_read
; ++i
) {
329 comp_data
= *(struct hwc_rx_oob
*)completions
[i
].cqe_data
;
331 if (completions
[i
].is_sq
)
332 hwc_cq
->tx_event_handler(hwc_cq
->tx_event_ctx
,
333 completions
[i
].wq_num
,
336 hwc_cq
->rx_event_handler(hwc_cq
->rx_event_ctx
,
337 completions
[i
].wq_num
,
341 mana_gd_ring_cq(q_self
, SET_ARM_BIT
);
344 static void mana_hwc_destroy_cq(struct gdma_context
*gc
, struct hwc_cq
*hwc_cq
)
346 kfree(hwc_cq
->comp_buf
);
349 mana_gd_destroy_queue(gc
, hwc_cq
->gdma_cq
);
352 mana_gd_destroy_queue(gc
, hwc_cq
->gdma_eq
);
357 static int mana_hwc_create_cq(struct hw_channel_context
*hwc
, u16 q_depth
,
358 gdma_eq_callback
*callback
, void *ctx
,
359 hwc_rx_event_handler_t
*rx_ev_hdlr
,
361 hwc_tx_event_handler_t
*tx_ev_hdlr
,
362 void *tx_ev_ctx
, struct hwc_cq
**hwc_cq_ptr
)
364 struct gdma_queue
*eq
, *cq
;
365 struct gdma_comp
*comp_buf
;
366 struct hwc_cq
*hwc_cq
;
367 u32 eq_size
, cq_size
;
370 eq_size
= roundup_pow_of_two(GDMA_EQE_SIZE
* q_depth
);
371 if (eq_size
< MANA_MIN_QSIZE
)
372 eq_size
= MANA_MIN_QSIZE
;
374 cq_size
= roundup_pow_of_two(GDMA_CQE_SIZE
* q_depth
);
375 if (cq_size
< MANA_MIN_QSIZE
)
376 cq_size
= MANA_MIN_QSIZE
;
378 hwc_cq
= kzalloc(sizeof(*hwc_cq
), GFP_KERNEL
);
382 err
= mana_hwc_create_gdma_eq(hwc
, eq_size
, ctx
, callback
, &eq
);
384 dev_err(hwc
->dev
, "Failed to create HWC EQ for RQ: %d\n", err
);
387 hwc_cq
->gdma_eq
= eq
;
389 err
= mana_hwc_create_gdma_cq(hwc
, cq_size
, hwc_cq
, mana_hwc_comp_event
,
392 dev_err(hwc
->dev
, "Failed to create HWC CQ for RQ: %d\n", err
);
395 hwc_cq
->gdma_cq
= cq
;
397 comp_buf
= kcalloc(q_depth
, sizeof(*comp_buf
), GFP_KERNEL
);
404 hwc_cq
->comp_buf
= comp_buf
;
405 hwc_cq
->queue_depth
= q_depth
;
406 hwc_cq
->rx_event_handler
= rx_ev_hdlr
;
407 hwc_cq
->rx_event_ctx
= rx_ev_ctx
;
408 hwc_cq
->tx_event_handler
= tx_ev_hdlr
;
409 hwc_cq
->tx_event_ctx
= tx_ev_ctx
;
411 *hwc_cq_ptr
= hwc_cq
;
414 mana_hwc_destroy_cq(hwc
->gdma_dev
->gdma_context
, hwc_cq
);
418 static int mana_hwc_alloc_dma_buf(struct hw_channel_context
*hwc
, u16 q_depth
,
420 struct hwc_dma_buf
**dma_buf_ptr
)
422 struct gdma_context
*gc
= hwc
->gdma_dev
->gdma_context
;
423 struct hwc_work_request
*hwc_wr
;
424 struct hwc_dma_buf
*dma_buf
;
425 struct gdma_mem_info
*gmi
;
432 dma_buf
= kzalloc(struct_size(dma_buf
, reqs
, q_depth
), GFP_KERNEL
);
436 dma_buf
->num_reqs
= q_depth
;
438 buf_size
= MANA_PAGE_ALIGN(q_depth
* max_msg_size
);
440 gmi
= &dma_buf
->mem_info
;
441 err
= mana_gd_alloc_memory(gc
, buf_size
, gmi
);
443 dev_err(hwc
->dev
, "Failed to allocate DMA buffer: %d\n", err
);
447 virt_addr
= dma_buf
->mem_info
.virt_addr
;
448 base_pa
= (u8
*)dma_buf
->mem_info
.dma_handle
;
450 for (i
= 0; i
< q_depth
; i
++) {
451 hwc_wr
= &dma_buf
->reqs
[i
];
453 hwc_wr
->buf_va
= virt_addr
+ i
* max_msg_size
;
454 hwc_wr
->buf_sge_addr
= base_pa
+ i
* max_msg_size
;
456 hwc_wr
->buf_len
= max_msg_size
;
459 *dma_buf_ptr
= dma_buf
;
466 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context
*hwc
,
467 struct hwc_dma_buf
*dma_buf
)
472 mana_gd_free_memory(&dma_buf
->mem_info
);
477 static void mana_hwc_destroy_wq(struct hw_channel_context
*hwc
,
478 struct hwc_wq
*hwc_wq
)
480 mana_hwc_dealloc_dma_buf(hwc
, hwc_wq
->msg_buf
);
483 mana_gd_destroy_queue(hwc
->gdma_dev
->gdma_context
,
489 static int mana_hwc_create_wq(struct hw_channel_context
*hwc
,
490 enum gdma_queue_type q_type
, u16 q_depth
,
491 u32 max_msg_size
, struct hwc_cq
*hwc_cq
,
492 struct hwc_wq
**hwc_wq_ptr
)
494 struct gdma_queue
*queue
;
495 struct hwc_wq
*hwc_wq
;
499 WARN_ON(q_type
!= GDMA_SQ
&& q_type
!= GDMA_RQ
);
501 if (q_type
== GDMA_RQ
)
502 queue_size
= roundup_pow_of_two(GDMA_MAX_RQE_SIZE
* q_depth
);
504 queue_size
= roundup_pow_of_two(GDMA_MAX_SQE_SIZE
* q_depth
);
506 if (queue_size
< MANA_MIN_QSIZE
)
507 queue_size
= MANA_MIN_QSIZE
;
509 hwc_wq
= kzalloc(sizeof(*hwc_wq
), GFP_KERNEL
);
513 err
= mana_hwc_create_gdma_wq(hwc
, q_type
, queue_size
, &queue
);
518 hwc_wq
->gdma_wq
= queue
;
519 hwc_wq
->queue_depth
= q_depth
;
520 hwc_wq
->hwc_cq
= hwc_cq
;
522 err
= mana_hwc_alloc_dma_buf(hwc
, q_depth
, max_msg_size
,
527 *hwc_wq_ptr
= hwc_wq
;
531 mana_hwc_destroy_wq(hwc
, hwc_wq
);
535 static int mana_hwc_post_tx_wqe(const struct hwc_wq
*hwc_txq
,
536 struct hwc_work_request
*req
,
537 u32 dest_virt_rq_id
, u32 dest_virt_rcq_id
,
540 struct device
*dev
= hwc_txq
->hwc
->dev
;
541 struct hwc_tx_oob
*tx_oob
;
542 struct gdma_sge
*sge
;
545 if (req
->msg_size
== 0 || req
->msg_size
> req
->buf_len
) {
546 dev_err(dev
, "wrong msg_size: %u, buf_len: %u\n",
547 req
->msg_size
, req
->buf_len
);
551 tx_oob
= &req
->tx_oob
;
553 tx_oob
->vrq_id
= dest_virt_rq_id
;
554 tx_oob
->dest_vfid
= 0;
555 tx_oob
->vrcq_id
= dest_virt_rcq_id
;
556 tx_oob
->vscq_id
= hwc_txq
->hwc_cq
->gdma_cq
->id
;
557 tx_oob
->loopback
= false;
558 tx_oob
->lso_override
= false;
559 tx_oob
->dest_pf
= dest_pf
;
560 tx_oob
->vsq_id
= hwc_txq
->gdma_wq
->id
;
563 sge
->address
= (u64
)req
->buf_sge_addr
;
564 sge
->mem_key
= hwc_txq
->msg_buf
->gpa_mkey
;
565 sge
->size
= req
->msg_size
;
567 memset(&req
->wqe_req
, 0, sizeof(struct gdma_wqe_request
));
568 req
->wqe_req
.sgl
= sge
;
569 req
->wqe_req
.num_sge
= 1;
570 req
->wqe_req
.inline_oob_size
= sizeof(struct hwc_tx_oob
);
571 req
->wqe_req
.inline_oob_data
= tx_oob
;
572 req
->wqe_req
.client_data_unit
= 0;
574 err
= mana_gd_post_and_ring(hwc_txq
->gdma_wq
, &req
->wqe_req
, NULL
);
576 dev_err(dev
, "Failed to post WQE on HWC SQ: %d\n", err
);
580 static int mana_hwc_init_inflight_msg(struct hw_channel_context
*hwc
,
585 sema_init(&hwc
->sema
, num_msg
);
587 err
= mana_gd_alloc_res_map(num_msg
, &hwc
->inflight_msg_res
);
589 dev_err(hwc
->dev
, "Failed to init inflight_msg_res: %d\n", err
);
593 static int mana_hwc_test_channel(struct hw_channel_context
*hwc
, u16 q_depth
,
594 u32 max_req_msg_size
, u32 max_resp_msg_size
)
596 struct gdma_context
*gc
= hwc
->gdma_dev
->gdma_context
;
597 struct hwc_wq
*hwc_rxq
= hwc
->rxq
;
598 struct hwc_work_request
*req
;
599 struct hwc_caller_ctx
*ctx
;
603 /* Post all WQEs on the RQ */
604 for (i
= 0; i
< q_depth
; i
++) {
605 req
= &hwc_rxq
->msg_buf
->reqs
[i
];
606 err
= mana_hwc_post_rx_wqe(hwc_rxq
, req
);
611 ctx
= kcalloc(q_depth
, sizeof(*ctx
), GFP_KERNEL
);
615 for (i
= 0; i
< q_depth
; ++i
)
616 init_completion(&ctx
[i
].comp_event
);
618 hwc
->caller_ctx
= ctx
;
620 return mana_gd_test_eq(gc
, hwc
->cq
->gdma_eq
);
623 static int mana_hwc_establish_channel(struct gdma_context
*gc
, u16
*q_depth
,
624 u32
*max_req_msg_size
,
625 u32
*max_resp_msg_size
)
627 struct hw_channel_context
*hwc
= gc
->hwc
.driver_data
;
628 struct gdma_queue
*rq
= hwc
->rxq
->gdma_wq
;
629 struct gdma_queue
*sq
= hwc
->txq
->gdma_wq
;
630 struct gdma_queue
*eq
= hwc
->cq
->gdma_eq
;
631 struct gdma_queue
*cq
= hwc
->cq
->gdma_cq
;
634 init_completion(&hwc
->hwc_init_eqe_comp
);
636 err
= mana_smc_setup_hwc(&gc
->shm_channel
, false,
637 eq
->mem_info
.dma_handle
,
638 cq
->mem_info
.dma_handle
,
639 rq
->mem_info
.dma_handle
,
640 sq
->mem_info
.dma_handle
,
645 if (!wait_for_completion_timeout(&hwc
->hwc_init_eqe_comp
, 60 * HZ
))
648 *q_depth
= hwc
->hwc_init_q_depth_max
;
649 *max_req_msg_size
= hwc
->hwc_init_max_req_msg_size
;
650 *max_resp_msg_size
= hwc
->hwc_init_max_resp_msg_size
;
652 /* Both were set in mana_hwc_init_event_handler(). */
653 if (WARN_ON(cq
->id
>= gc
->max_num_cqs
))
656 gc
->cq_table
= vcalloc(gc
->max_num_cqs
, sizeof(struct gdma_queue
*));
660 gc
->cq_table
[cq
->id
] = cq
;
665 static int mana_hwc_init_queues(struct hw_channel_context
*hwc
, u16 q_depth
,
666 u32 max_req_msg_size
, u32 max_resp_msg_size
)
670 err
= mana_hwc_init_inflight_msg(hwc
, q_depth
);
674 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
675 * queue depth and RQ queue depth.
677 err
= mana_hwc_create_cq(hwc
, q_depth
* 2,
678 mana_hwc_init_event_handler
, hwc
,
679 mana_hwc_rx_event_handler
, hwc
,
680 mana_hwc_tx_event_handler
, hwc
, &hwc
->cq
);
682 dev_err(hwc
->dev
, "Failed to create HWC CQ: %d\n", err
);
686 err
= mana_hwc_create_wq(hwc
, GDMA_RQ
, q_depth
, max_req_msg_size
,
689 dev_err(hwc
->dev
, "Failed to create HWC RQ: %d\n", err
);
693 err
= mana_hwc_create_wq(hwc
, GDMA_SQ
, q_depth
, max_resp_msg_size
,
696 dev_err(hwc
->dev
, "Failed to create HWC SQ: %d\n", err
);
700 hwc
->num_inflight_msg
= q_depth
;
701 hwc
->max_req_msg_size
= max_req_msg_size
;
705 /* mana_hwc_create_channel() will do the cleanup.*/
709 int mana_hwc_create_channel(struct gdma_context
*gc
)
711 u32 max_req_msg_size
, max_resp_msg_size
;
712 struct gdma_dev
*gd
= &gc
->hwc
;
713 struct hw_channel_context
*hwc
;
717 hwc
= kzalloc(sizeof(*hwc
), GFP_KERNEL
);
721 gd
->gdma_context
= gc
;
722 gd
->driver_data
= hwc
;
725 hwc
->hwc_timeout
= HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS
;
727 /* HWC's instance number is always 0. */
728 gd
->dev_id
.as_uint32
= 0;
729 gd
->dev_id
.type
= GDMA_DEVICE_HWC
;
731 gd
->pdid
= INVALID_PDID
;
732 gd
->doorbell
= INVALID_DOORBELL
;
734 /* mana_hwc_init_queues() only creates the required data structures,
735 * and doesn't touch the HWC device.
737 err
= mana_hwc_init_queues(hwc
, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH
,
738 HW_CHANNEL_MAX_REQUEST_SIZE
,
739 HW_CHANNEL_MAX_RESPONSE_SIZE
);
741 dev_err(hwc
->dev
, "Failed to initialize HWC: %d\n", err
);
745 err
= mana_hwc_establish_channel(gc
, &q_depth_max
, &max_req_msg_size
,
748 dev_err(hwc
->dev
, "Failed to establish HWC: %d\n", err
);
752 err
= mana_hwc_test_channel(gc
->hwc
.driver_data
,
753 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH
,
754 max_req_msg_size
, max_resp_msg_size
);
756 dev_err(hwc
->dev
, "Failed to test HWC: %d\n", err
);
762 mana_hwc_destroy_channel(gc
);
766 void mana_hwc_destroy_channel(struct gdma_context
*gc
)
768 struct hw_channel_context
*hwc
= gc
->hwc
.driver_data
;
773 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
774 * non-zero, the HWC worked and we should tear down the HWC here.
776 if (gc
->max_num_cqs
> 0) {
777 mana_smc_teardown_hwc(&gc
->shm_channel
, false);
781 kfree(hwc
->caller_ctx
);
782 hwc
->caller_ctx
= NULL
;
785 mana_hwc_destroy_wq(hwc
, hwc
->txq
);
788 mana_hwc_destroy_wq(hwc
, hwc
->rxq
);
791 mana_hwc_destroy_cq(hwc
->gdma_dev
->gdma_context
, hwc
->cq
);
793 mana_gd_free_res_map(&hwc
->inflight_msg_res
);
795 hwc
->num_inflight_msg
= 0;
797 hwc
->gdma_dev
->doorbell
= INVALID_DOORBELL
;
798 hwc
->gdma_dev
->pdid
= INVALID_PDID
;
800 hwc
->hwc_timeout
= 0;
803 gc
->hwc
.driver_data
= NULL
;
804 gc
->hwc
.gdma_context
= NULL
;
810 int mana_hwc_send_request(struct hw_channel_context
*hwc
, u32 req_len
,
811 const void *req
, u32 resp_len
, void *resp
)
813 struct gdma_context
*gc
= hwc
->gdma_dev
->gdma_context
;
814 struct hwc_work_request
*tx_wr
;
815 struct hwc_wq
*txq
= hwc
->txq
;
816 struct gdma_req_hdr
*req_msg
;
817 struct hwc_caller_ctx
*ctx
;
823 mana_hwc_get_msg_index(hwc
, &msg_id
);
825 tx_wr
= &txq
->msg_buf
->reqs
[msg_id
];
827 if (req_len
> tx_wr
->buf_len
) {
828 dev_err(hwc
->dev
, "HWC: req msg size: %d > %d\n", req_len
,
834 ctx
= hwc
->caller_ctx
+ msg_id
;
835 ctx
->output_buf
= resp
;
836 ctx
->output_buflen
= resp_len
;
838 req_msg
= (struct gdma_req_hdr
*)tx_wr
->buf_va
;
840 memcpy(req_msg
, req
, req_len
);
842 req_msg
->req
.hwc_msg_id
= msg_id
;
844 tx_wr
->msg_size
= req_len
;
847 dest_vrq
= hwc
->pf_dest_vrq_id
;
848 dest_vrcq
= hwc
->pf_dest_vrcq_id
;
851 err
= mana_hwc_post_tx_wqe(txq
, tx_wr
, dest_vrq
, dest_vrcq
, false);
853 dev_err(hwc
->dev
, "HWC: Failed to post send WQE: %d\n", err
);
857 if (!wait_for_completion_timeout(&ctx
->comp_event
,
858 (msecs_to_jiffies(hwc
->hwc_timeout
)))) {
859 dev_err(hwc
->dev
, "HWC: Request timed out!\n");
869 if (ctx
->status_code
&& ctx
->status_code
!= GDMA_STATUS_MORE_ENTRIES
) {
870 dev_err(hwc
->dev
, "HWC: Failed hw_channel req: 0x%x\n",
876 mana_hwc_put_msg_index(hwc
, msg_id
);