1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
12 /* MSGQ module source file. */
18 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
20 bfa_msgq_cmdcbfn_t cbfn; \
22 cbfn = (_cmdq_ent)->cbfn; \
23 cbarg = (_cmdq_ent)->cbarg; \
24 (_cmdq_ent)->cbfn = NULL; \
25 (_cmdq_ent)->cbarg = NULL; \
27 cbfn(cbarg, (_status)); \
31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
);
43 bfa_fsm_state_decl(cmdq
, stopped
, struct bfa_msgq_cmdq
, enum cmdq_event
);
44 bfa_fsm_state_decl(cmdq
, init_wait
, struct bfa_msgq_cmdq
, enum cmdq_event
);
45 bfa_fsm_state_decl(cmdq
, ready
, struct bfa_msgq_cmdq
, enum cmdq_event
);
46 bfa_fsm_state_decl(cmdq
, dbell_wait
, struct bfa_msgq_cmdq
,
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq
*cmdq
)
52 struct bfa_msgq_cmd_entry
*cmdq_ent
;
54 cmdq
->producer_index
= 0;
55 cmdq
->consumer_index
= 0;
59 cmdq
->bytes_to_copy
= 0;
60 while (!list_empty(&cmdq
->pending_q
)) {
61 cmdq_ent
= list_first_entry(&cmdq
->pending_q
,
62 struct bfa_msgq_cmd_entry
, qe
);
63 list_del(&cmdq_ent
->qe
);
64 call_cmdq_ent_cbfn(cmdq_ent
, BFA_STATUS_FAILED
);
69 cmdq_sm_stopped(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
73 bfa_fsm_set_state(cmdq
, cmdq_sm_init_wait
);
82 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
91 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
93 bfa_wc_down(&cmdq
->msgq
->init_wc
);
97 cmdq_sm_init_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
102 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
106 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
109 case CMDQ_E_INIT_RESP
:
110 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
111 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
112 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
114 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
123 cmdq_sm_ready_entry(struct bfa_msgq_cmdq
*cmdq
)
128 cmdq_sm_ready(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
133 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
137 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
146 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
148 bfa_msgq_cmdq_dbell(cmdq
);
152 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
157 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
161 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
164 case CMDQ_E_DB_READY
:
165 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
166 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
167 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
169 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
178 bfa_msgq_cmdq_dbell_ready(void *arg
)
180 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
181 bfa_fsm_send_event(cmdq
, CMDQ_E_DB_READY
);
185 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
)
187 struct bfi_msgq_h2i_db
*dbell
=
188 (struct bfi_msgq_h2i_db
*)(&cmdq
->dbell_mb
.msg
[0]);
190 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
191 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_PI
, 0);
192 dbell
->mh
.mtag
.i2htok
= 0;
193 dbell
->idx
.cmdq_pi
= htons(cmdq
->producer_index
);
195 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->dbell_mb
,
196 bfa_msgq_cmdq_dbell_ready
, cmdq
)) {
197 bfa_msgq_cmdq_dbell_ready(cmdq
);
202 __cmd_copy(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq_cmd_entry
*cmd
)
204 size_t len
= cmd
->msg_size
;
209 src
= (u8
*)cmd
->msg_hdr
;
210 dst
= (u8
*)cmdq
->addr
.kva
;
211 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
214 to_copy
= (len
< BFI_MSGQ_CMD_ENTRY_SIZE
) ?
215 len
: BFI_MSGQ_CMD_ENTRY_SIZE
;
216 memcpy(dst
, src
, to_copy
);
218 src
+= BFI_MSGQ_CMD_ENTRY_SIZE
;
219 BFA_MSGQ_INDX_ADD(cmdq
->producer_index
, 1, cmdq
->depth
);
220 dst
= (u8
*)cmdq
->addr
.kva
;
221 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
228 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
230 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
231 struct bfa_msgq_cmd_entry
*cmd
;
234 cmdq
->consumer_index
= ntohs(dbell
->idx
.cmdq_ci
);
236 /* Walk through pending list to see if the command can be posted */
237 while (!list_empty(&cmdq
->pending_q
)) {
238 cmd
= list_first_entry(&cmdq
->pending_q
,
239 struct bfa_msgq_cmd_entry
, qe
);
240 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
241 BFA_MSGQ_FREE_CNT(cmdq
)) {
243 __cmd_copy(cmdq
, cmd
);
245 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
252 bfa_fsm_send_event(cmdq
, CMDQ_E_POST
);
256 bfa_msgq_cmdq_copy_next(void *arg
)
258 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
260 if (cmdq
->bytes_to_copy
)
261 bfa_msgq_cmdq_copy_rsp(cmdq
);
265 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
267 struct bfi_msgq_i2h_cmdq_copy_req
*req
=
268 (struct bfi_msgq_i2h_cmdq_copy_req
*)mb
;
271 cmdq
->offset
= ntohs(req
->offset
);
272 cmdq
->bytes_to_copy
= ntohs(req
->len
);
273 bfa_msgq_cmdq_copy_rsp(cmdq
);
277 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
)
279 struct bfi_msgq_h2i_cmdq_copy_rsp
*rsp
=
280 (struct bfi_msgq_h2i_cmdq_copy_rsp
*)&cmdq
->copy_mb
.msg
[0];
282 u8
*addr
= (u8
*)cmdq
->addr
.kva
;
284 memset(rsp
, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp
));
285 bfi_h2i_set(rsp
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_CMDQ_COPY_RSP
, 0);
286 rsp
->mh
.mtag
.i2htok
= htons(cmdq
->token
);
287 copied
= (cmdq
->bytes_to_copy
>= BFI_CMD_COPY_SZ
) ? BFI_CMD_COPY_SZ
:
289 addr
+= cmdq
->offset
;
290 memcpy(rsp
->data
, addr
, copied
);
293 cmdq
->offset
+= copied
;
294 cmdq
->bytes_to_copy
-= copied
;
296 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->copy_mb
,
297 bfa_msgq_cmdq_copy_next
, cmdq
)) {
298 bfa_msgq_cmdq_copy_next(cmdq
);
303 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq
*msgq
)
305 cmdq
->depth
= BFA_MSGQ_CMDQ_NUM_ENTRY
;
306 INIT_LIST_HEAD(&cmdq
->pending_q
);
308 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
311 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
);
318 RSPQ_E_INIT_RESP
= 5,
322 bfa_fsm_state_decl(rspq
, stopped
, struct bfa_msgq_rspq
, enum rspq_event
);
323 bfa_fsm_state_decl(rspq
, init_wait
, struct bfa_msgq_rspq
,
325 bfa_fsm_state_decl(rspq
, ready
, struct bfa_msgq_rspq
, enum rspq_event
);
326 bfa_fsm_state_decl(rspq
, dbell_wait
, struct bfa_msgq_rspq
,
330 rspq_sm_stopped_entry(struct bfa_msgq_rspq
*rspq
)
332 rspq
->producer_index
= 0;
333 rspq
->consumer_index
= 0;
338 rspq_sm_stopped(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
342 bfa_fsm_set_state(rspq
, rspq_sm_init_wait
);
356 rspq_sm_init_wait_entry(struct bfa_msgq_rspq
*rspq
)
358 bfa_wc_down(&rspq
->msgq
->init_wc
);
362 rspq_sm_init_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
367 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
370 case RSPQ_E_INIT_RESP
:
371 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
380 rspq_sm_ready_entry(struct bfa_msgq_rspq
*rspq
)
385 rspq_sm_ready(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
390 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
394 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
403 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq
*rspq
)
405 if (!bfa_nw_ioc_is_disabled(rspq
->msgq
->ioc
))
406 bfa_msgq_rspq_dbell(rspq
);
410 rspq_sm_dbell_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
415 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
419 rspq
->flags
|= BFA_MSGQ_RSPQ_F_DB_UPDATE
;
422 case RSPQ_E_DB_READY
:
423 if (rspq
->flags
& BFA_MSGQ_RSPQ_F_DB_UPDATE
) {
424 rspq
->flags
&= ~BFA_MSGQ_RSPQ_F_DB_UPDATE
;
425 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
427 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
436 bfa_msgq_rspq_dbell_ready(void *arg
)
438 struct bfa_msgq_rspq
*rspq
= (struct bfa_msgq_rspq
*)arg
;
439 bfa_fsm_send_event(rspq
, RSPQ_E_DB_READY
);
443 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
)
445 struct bfi_msgq_h2i_db
*dbell
=
446 (struct bfi_msgq_h2i_db
*)(&rspq
->dbell_mb
.msg
[0]);
448 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
449 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_CI
, 0);
450 dbell
->mh
.mtag
.i2htok
= 0;
451 dbell
->idx
.rspq_ci
= htons(rspq
->consumer_index
);
453 if (!bfa_nw_ioc_mbox_queue(rspq
->msgq
->ioc
, &rspq
->dbell_mb
,
454 bfa_msgq_rspq_dbell_ready
, rspq
)) {
455 bfa_msgq_rspq_dbell_ready(rspq
);
460 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq
*rspq
, struct bfi_mbmsg
*mb
)
462 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
463 struct bfi_msgq_mhdr
*msghdr
;
468 rspq
->producer_index
= ntohs(dbell
->idx
.rspq_pi
);
470 while (rspq
->consumer_index
!= rspq
->producer_index
) {
471 rspq_qe
= (u8
*)rspq
->addr
.kva
;
472 rspq_qe
+= (rspq
->consumer_index
* BFI_MSGQ_RSP_ENTRY_SIZE
);
473 msghdr
= (struct bfi_msgq_mhdr
*)rspq_qe
;
475 mc
= msghdr
->msg_class
;
476 num_entries
= ntohs(msghdr
->num_entries
);
478 if ((mc
>= BFI_MC_MAX
) || (rspq
->rsphdlr
[mc
].cbfn
== NULL
))
481 (rspq
->rsphdlr
[mc
].cbfn
)(rspq
->rsphdlr
[mc
].cbarg
, msghdr
);
483 BFA_MSGQ_INDX_ADD(rspq
->consumer_index
, num_entries
,
487 bfa_fsm_send_event(rspq
, RSPQ_E_RESP
);
491 bfa_msgq_rspq_attach(struct bfa_msgq_rspq
*rspq
, struct bfa_msgq
*msgq
)
493 rspq
->depth
= BFA_MSGQ_RSPQ_NUM_ENTRY
;
495 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
499 bfa_msgq_init_rsp(struct bfa_msgq
*msgq
,
500 struct bfi_mbmsg
*mb
)
502 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_INIT_RESP
);
503 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_INIT_RESP
);
507 bfa_msgq_init(void *arg
)
509 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)arg
;
510 struct bfi_msgq_cfg_req
*msgq_cfg
=
511 (struct bfi_msgq_cfg_req
*)&msgq
->init_mb
.msg
[0];
513 memset(msgq_cfg
, 0, sizeof(struct bfi_msgq_cfg_req
));
514 bfi_h2i_set(msgq_cfg
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_INIT_REQ
, 0);
515 msgq_cfg
->mh
.mtag
.i2htok
= 0;
517 bfa_dma_be_addr_set(msgq_cfg
->cmdq
.addr
, msgq
->cmdq
.addr
.pa
);
518 msgq_cfg
->cmdq
.q_depth
= htons(msgq
->cmdq
.depth
);
519 bfa_dma_be_addr_set(msgq_cfg
->rspq
.addr
, msgq
->rspq
.addr
.pa
);
520 msgq_cfg
->rspq
.q_depth
= htons(msgq
->rspq
.depth
);
522 bfa_nw_ioc_mbox_queue(msgq
->ioc
, &msgq
->init_mb
, NULL
, NULL
);
526 bfa_msgq_isr(void *cbarg
, struct bfi_mbmsg
*msg
)
528 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
530 switch (msg
->mh
.msg_id
) {
531 case BFI_MSGQ_I2H_INIT_RSP
:
532 bfa_msgq_init_rsp(msgq
, msg
);
535 case BFI_MSGQ_I2H_DOORBELL_PI
:
536 bfa_msgq_rspq_pi_update(&msgq
->rspq
, msg
);
539 case BFI_MSGQ_I2H_DOORBELL_CI
:
540 bfa_msgq_cmdq_ci_update(&msgq
->cmdq
, msg
);
543 case BFI_MSGQ_I2H_CMDQ_COPY_REQ
:
544 bfa_msgq_cmdq_copy_req(&msgq
->cmdq
, msg
);
553 bfa_msgq_notify(void *cbarg
, enum bfa_ioc_event event
)
555 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
558 case BFA_IOC_E_ENABLED
:
559 bfa_wc_init(&msgq
->init_wc
, bfa_msgq_init
, msgq
);
560 bfa_wc_up(&msgq
->init_wc
);
561 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_START
);
562 bfa_wc_up(&msgq
->init_wc
);
563 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_START
);
564 bfa_wc_wait(&msgq
->init_wc
);
567 case BFA_IOC_E_DISABLED
:
568 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_STOP
);
569 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_STOP
);
572 case BFA_IOC_E_FAILED
:
573 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_FAIL
);
574 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_FAIL
);
583 bfa_msgq_meminfo(void)
585 return roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
) +
586 roundup(BFA_MSGQ_RSPQ_SIZE
, BFA_DMA_ALIGN_SZ
);
590 bfa_msgq_memclaim(struct bfa_msgq
*msgq
, u8
*kva
, u64 pa
)
592 msgq
->cmdq
.addr
.kva
= kva
;
593 msgq
->cmdq
.addr
.pa
= pa
;
595 kva
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
596 pa
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
598 msgq
->rspq
.addr
.kva
= kva
;
599 msgq
->rspq
.addr
.pa
= pa
;
603 bfa_msgq_attach(struct bfa_msgq
*msgq
, struct bfa_ioc
*ioc
)
607 bfa_msgq_cmdq_attach(&msgq
->cmdq
, msgq
);
608 bfa_msgq_rspq_attach(&msgq
->rspq
, msgq
);
610 bfa_nw_ioc_mbox_regisr(msgq
->ioc
, BFI_MC_MSGQ
, bfa_msgq_isr
, msgq
);
611 bfa_ioc_notify_init(&msgq
->ioc_notify
, bfa_msgq_notify
, msgq
);
612 bfa_nw_ioc_notify_register(msgq
->ioc
, &msgq
->ioc_notify
);
616 bfa_msgq_regisr(struct bfa_msgq
*msgq
, enum bfi_mclass mc
,
617 bfa_msgq_mcfunc_t cbfn
, void *cbarg
)
619 msgq
->rspq
.rsphdlr
[mc
].cbfn
= cbfn
;
620 msgq
->rspq
.rsphdlr
[mc
].cbarg
= cbarg
;
624 bfa_msgq_cmd_post(struct bfa_msgq
*msgq
, struct bfa_msgq_cmd_entry
*cmd
)
626 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
627 BFA_MSGQ_FREE_CNT(&msgq
->cmdq
)) {
628 __cmd_copy(&msgq
->cmdq
, cmd
);
629 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
630 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_POST
);
632 list_add_tail(&cmd
->qe
, &msgq
->cmdq
.pending_q
);
637 bfa_msgq_rsp_copy(struct bfa_msgq
*msgq
, u8
*buf
, size_t buf_len
)
639 struct bfa_msgq_rspq
*rspq
= &msgq
->rspq
;
640 size_t len
= buf_len
;
645 ci
= rspq
->consumer_index
;
646 src
= (u8
*)rspq
->addr
.kva
;
647 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);
651 to_copy
= (len
< BFI_MSGQ_RSP_ENTRY_SIZE
) ?
652 len
: BFI_MSGQ_RSP_ENTRY_SIZE
;
653 memcpy(dst
, src
, to_copy
);
655 dst
+= BFI_MSGQ_RSP_ENTRY_SIZE
;
656 BFA_MSGQ_INDX_ADD(ci
, 1, rspq
->depth
);
657 src
= (u8
*)rspq
->addr
.kva
;
658 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);