1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
12 /* MSGQ module source file. */
18 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
20 bfa_msgq_cmdcbfn_t cbfn; \
22 cbfn = (_cmdq_ent)->cbfn; \
23 cbarg = (_cmdq_ent)->cbarg; \
24 (_cmdq_ent)->cbfn = NULL; \
25 (_cmdq_ent)->cbarg = NULL; \
27 cbfn(cbarg, (_status)); \
31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
);
43 bfa_fsm_state_decl(cmdq
, stopped
, struct bfa_msgq_cmdq
, enum cmdq_event
);
44 bfa_fsm_state_decl(cmdq
, init_wait
, struct bfa_msgq_cmdq
, enum cmdq_event
);
45 bfa_fsm_state_decl(cmdq
, ready
, struct bfa_msgq_cmdq
, enum cmdq_event
);
46 bfa_fsm_state_decl(cmdq
, dbell_wait
, struct bfa_msgq_cmdq
,
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq
*cmdq
)
52 struct bfa_msgq_cmd_entry
*cmdq_ent
;
54 cmdq
->producer_index
= 0;
55 cmdq
->consumer_index
= 0;
59 cmdq
->bytes_to_copy
= 0;
60 while (!list_empty(&cmdq
->pending_q
)) {
61 cmdq_ent
= list_first_entry(&cmdq
->pending_q
,
62 struct bfa_msgq_cmd_entry
, qe
);
63 list_del(&cmdq_ent
->qe
);
64 call_cmdq_ent_cbfn(cmdq_ent
, BFA_STATUS_FAILED
);
69 cmdq_sm_stopped(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
73 bfa_fsm_set_state(cmdq
, cmdq_sm_init_wait
);
82 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
91 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
93 bfa_wc_down(&cmdq
->msgq
->init_wc
);
97 cmdq_sm_init_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
102 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
106 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
109 case CMDQ_E_INIT_RESP
:
110 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
111 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
112 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
114 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
123 cmdq_sm_ready_entry(struct bfa_msgq_cmdq
*cmdq
)
128 cmdq_sm_ready(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
133 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
137 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
146 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
148 bfa_msgq_cmdq_dbell(cmdq
);
152 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
157 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
161 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
164 case CMDQ_E_DB_READY
:
165 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
166 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
167 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
169 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
178 bfa_msgq_cmdq_dbell_ready(void *arg
)
180 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
181 bfa_fsm_send_event(cmdq
, CMDQ_E_DB_READY
);
185 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
)
187 struct bfi_msgq_h2i_db
*dbell
=
188 (struct bfi_msgq_h2i_db
*)(&cmdq
->dbell_mb
.msg
[0]);
190 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
191 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_PI
, 0);
192 dbell
->mh
.mtag
.i2htok
= 0;
193 dbell
->idx
.cmdq_pi
= htons(cmdq
->producer_index
);
195 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->dbell_mb
,
196 bfa_msgq_cmdq_dbell_ready
, cmdq
)) {
197 bfa_msgq_cmdq_dbell_ready(cmdq
);
202 __cmd_copy(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq_cmd_entry
*cmd
)
204 size_t len
= cmd
->msg_size
;
208 src
= (u8
*)cmd
->msg_hdr
;
209 dst
= (u8
*)cmdq
->addr
.kva
;
210 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
213 to_copy
= (len
< BFI_MSGQ_CMD_ENTRY_SIZE
) ?
214 len
: BFI_MSGQ_CMD_ENTRY_SIZE
;
215 memcpy(dst
, src
, to_copy
);
217 src
+= BFI_MSGQ_CMD_ENTRY_SIZE
;
218 BFA_MSGQ_INDX_ADD(cmdq
->producer_index
, 1, cmdq
->depth
);
219 dst
= (u8
*)cmdq
->addr
.kva
;
220 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
226 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
228 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
229 struct bfa_msgq_cmd_entry
*cmd
;
232 cmdq
->consumer_index
= ntohs(dbell
->idx
.cmdq_ci
);
234 /* Walk through pending list to see if the command can be posted */
235 while (!list_empty(&cmdq
->pending_q
)) {
236 cmd
= list_first_entry(&cmdq
->pending_q
,
237 struct bfa_msgq_cmd_entry
, qe
);
238 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
239 BFA_MSGQ_FREE_CNT(cmdq
)) {
241 __cmd_copy(cmdq
, cmd
);
243 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
250 bfa_fsm_send_event(cmdq
, CMDQ_E_POST
);
254 bfa_msgq_cmdq_copy_next(void *arg
)
256 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
258 if (cmdq
->bytes_to_copy
)
259 bfa_msgq_cmdq_copy_rsp(cmdq
);
263 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
265 struct bfi_msgq_i2h_cmdq_copy_req
*req
=
266 (struct bfi_msgq_i2h_cmdq_copy_req
*)mb
;
269 cmdq
->offset
= ntohs(req
->offset
);
270 cmdq
->bytes_to_copy
= ntohs(req
->len
);
271 bfa_msgq_cmdq_copy_rsp(cmdq
);
275 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
)
277 struct bfi_msgq_h2i_cmdq_copy_rsp
*rsp
=
278 (struct bfi_msgq_h2i_cmdq_copy_rsp
*)&cmdq
->copy_mb
.msg
[0];
280 u8
*addr
= (u8
*)cmdq
->addr
.kva
;
282 memset(rsp
, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp
));
283 bfi_h2i_set(rsp
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_CMDQ_COPY_RSP
, 0);
284 rsp
->mh
.mtag
.i2htok
= htons(cmdq
->token
);
285 copied
= (cmdq
->bytes_to_copy
>= BFI_CMD_COPY_SZ
) ? BFI_CMD_COPY_SZ
:
287 addr
+= cmdq
->offset
;
288 memcpy(rsp
->data
, addr
, copied
);
291 cmdq
->offset
+= copied
;
292 cmdq
->bytes_to_copy
-= copied
;
294 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->copy_mb
,
295 bfa_msgq_cmdq_copy_next
, cmdq
)) {
296 bfa_msgq_cmdq_copy_next(cmdq
);
301 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq
*msgq
)
303 cmdq
->depth
= BFA_MSGQ_CMDQ_NUM_ENTRY
;
304 INIT_LIST_HEAD(&cmdq
->pending_q
);
306 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
309 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
);
316 RSPQ_E_INIT_RESP
= 5,
320 bfa_fsm_state_decl(rspq
, stopped
, struct bfa_msgq_rspq
, enum rspq_event
);
321 bfa_fsm_state_decl(rspq
, init_wait
, struct bfa_msgq_rspq
,
323 bfa_fsm_state_decl(rspq
, ready
, struct bfa_msgq_rspq
, enum rspq_event
);
324 bfa_fsm_state_decl(rspq
, dbell_wait
, struct bfa_msgq_rspq
,
328 rspq_sm_stopped_entry(struct bfa_msgq_rspq
*rspq
)
330 rspq
->producer_index
= 0;
331 rspq
->consumer_index
= 0;
336 rspq_sm_stopped(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
340 bfa_fsm_set_state(rspq
, rspq_sm_init_wait
);
354 rspq_sm_init_wait_entry(struct bfa_msgq_rspq
*rspq
)
356 bfa_wc_down(&rspq
->msgq
->init_wc
);
360 rspq_sm_init_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
365 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
368 case RSPQ_E_INIT_RESP
:
369 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
378 rspq_sm_ready_entry(struct bfa_msgq_rspq
*rspq
)
383 rspq_sm_ready(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
388 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
392 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
401 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq
*rspq
)
403 if (!bfa_nw_ioc_is_disabled(rspq
->msgq
->ioc
))
404 bfa_msgq_rspq_dbell(rspq
);
408 rspq_sm_dbell_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
413 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
417 rspq
->flags
|= BFA_MSGQ_RSPQ_F_DB_UPDATE
;
420 case RSPQ_E_DB_READY
:
421 if (rspq
->flags
& BFA_MSGQ_RSPQ_F_DB_UPDATE
) {
422 rspq
->flags
&= ~BFA_MSGQ_RSPQ_F_DB_UPDATE
;
423 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
425 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
434 bfa_msgq_rspq_dbell_ready(void *arg
)
436 struct bfa_msgq_rspq
*rspq
= (struct bfa_msgq_rspq
*)arg
;
437 bfa_fsm_send_event(rspq
, RSPQ_E_DB_READY
);
441 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
)
443 struct bfi_msgq_h2i_db
*dbell
=
444 (struct bfi_msgq_h2i_db
*)(&rspq
->dbell_mb
.msg
[0]);
446 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
447 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_CI
, 0);
448 dbell
->mh
.mtag
.i2htok
= 0;
449 dbell
->idx
.rspq_ci
= htons(rspq
->consumer_index
);
451 if (!bfa_nw_ioc_mbox_queue(rspq
->msgq
->ioc
, &rspq
->dbell_mb
,
452 bfa_msgq_rspq_dbell_ready
, rspq
)) {
453 bfa_msgq_rspq_dbell_ready(rspq
);
458 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq
*rspq
, struct bfi_mbmsg
*mb
)
460 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
461 struct bfi_msgq_mhdr
*msghdr
;
466 rspq
->producer_index
= ntohs(dbell
->idx
.rspq_pi
);
468 while (rspq
->consumer_index
!= rspq
->producer_index
) {
469 rspq_qe
= (u8
*)rspq
->addr
.kva
;
470 rspq_qe
+= (rspq
->consumer_index
* BFI_MSGQ_RSP_ENTRY_SIZE
);
471 msghdr
= (struct bfi_msgq_mhdr
*)rspq_qe
;
473 mc
= msghdr
->msg_class
;
474 num_entries
= ntohs(msghdr
->num_entries
);
476 if ((mc
>= BFI_MC_MAX
) || (rspq
->rsphdlr
[mc
].cbfn
== NULL
))
479 (rspq
->rsphdlr
[mc
].cbfn
)(rspq
->rsphdlr
[mc
].cbarg
, msghdr
);
481 BFA_MSGQ_INDX_ADD(rspq
->consumer_index
, num_entries
,
485 bfa_fsm_send_event(rspq
, RSPQ_E_RESP
);
489 bfa_msgq_rspq_attach(struct bfa_msgq_rspq
*rspq
, struct bfa_msgq
*msgq
)
491 rspq
->depth
= BFA_MSGQ_RSPQ_NUM_ENTRY
;
493 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
497 bfa_msgq_init_rsp(struct bfa_msgq
*msgq
,
498 struct bfi_mbmsg
*mb
)
500 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_INIT_RESP
);
501 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_INIT_RESP
);
505 bfa_msgq_init(void *arg
)
507 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)arg
;
508 struct bfi_msgq_cfg_req
*msgq_cfg
=
509 (struct bfi_msgq_cfg_req
*)&msgq
->init_mb
.msg
[0];
511 memset(msgq_cfg
, 0, sizeof(struct bfi_msgq_cfg_req
));
512 bfi_h2i_set(msgq_cfg
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_INIT_REQ
, 0);
513 msgq_cfg
->mh
.mtag
.i2htok
= 0;
515 bfa_dma_be_addr_set(msgq_cfg
->cmdq
.addr
, msgq
->cmdq
.addr
.pa
);
516 msgq_cfg
->cmdq
.q_depth
= htons(msgq
->cmdq
.depth
);
517 bfa_dma_be_addr_set(msgq_cfg
->rspq
.addr
, msgq
->rspq
.addr
.pa
);
518 msgq_cfg
->rspq
.q_depth
= htons(msgq
->rspq
.depth
);
520 bfa_nw_ioc_mbox_queue(msgq
->ioc
, &msgq
->init_mb
, NULL
, NULL
);
524 bfa_msgq_isr(void *cbarg
, struct bfi_mbmsg
*msg
)
526 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
528 switch (msg
->mh
.msg_id
) {
529 case BFI_MSGQ_I2H_INIT_RSP
:
530 bfa_msgq_init_rsp(msgq
, msg
);
533 case BFI_MSGQ_I2H_DOORBELL_PI
:
534 bfa_msgq_rspq_pi_update(&msgq
->rspq
, msg
);
537 case BFI_MSGQ_I2H_DOORBELL_CI
:
538 bfa_msgq_cmdq_ci_update(&msgq
->cmdq
, msg
);
541 case BFI_MSGQ_I2H_CMDQ_COPY_REQ
:
542 bfa_msgq_cmdq_copy_req(&msgq
->cmdq
, msg
);
551 bfa_msgq_notify(void *cbarg
, enum bfa_ioc_event event
)
553 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
556 case BFA_IOC_E_ENABLED
:
557 bfa_wc_init(&msgq
->init_wc
, bfa_msgq_init
, msgq
);
558 bfa_wc_up(&msgq
->init_wc
);
559 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_START
);
560 bfa_wc_up(&msgq
->init_wc
);
561 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_START
);
562 bfa_wc_wait(&msgq
->init_wc
);
565 case BFA_IOC_E_DISABLED
:
566 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_STOP
);
567 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_STOP
);
570 case BFA_IOC_E_FAILED
:
571 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_FAIL
);
572 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_FAIL
);
581 bfa_msgq_meminfo(void)
583 return roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
) +
584 roundup(BFA_MSGQ_RSPQ_SIZE
, BFA_DMA_ALIGN_SZ
);
588 bfa_msgq_memclaim(struct bfa_msgq
*msgq
, u8
*kva
, u64 pa
)
590 msgq
->cmdq
.addr
.kva
= kva
;
591 msgq
->cmdq
.addr
.pa
= pa
;
593 kva
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
594 pa
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
596 msgq
->rspq
.addr
.kva
= kva
;
597 msgq
->rspq
.addr
.pa
= pa
;
601 bfa_msgq_attach(struct bfa_msgq
*msgq
, struct bfa_ioc
*ioc
)
605 bfa_msgq_cmdq_attach(&msgq
->cmdq
, msgq
);
606 bfa_msgq_rspq_attach(&msgq
->rspq
, msgq
);
608 bfa_nw_ioc_mbox_regisr(msgq
->ioc
, BFI_MC_MSGQ
, bfa_msgq_isr
, msgq
);
609 bfa_ioc_notify_init(&msgq
->ioc_notify
, bfa_msgq_notify
, msgq
);
610 bfa_nw_ioc_notify_register(msgq
->ioc
, &msgq
->ioc_notify
);
614 bfa_msgq_regisr(struct bfa_msgq
*msgq
, enum bfi_mclass mc
,
615 bfa_msgq_mcfunc_t cbfn
, void *cbarg
)
617 msgq
->rspq
.rsphdlr
[mc
].cbfn
= cbfn
;
618 msgq
->rspq
.rsphdlr
[mc
].cbarg
= cbarg
;
622 bfa_msgq_cmd_post(struct bfa_msgq
*msgq
, struct bfa_msgq_cmd_entry
*cmd
)
624 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
625 BFA_MSGQ_FREE_CNT(&msgq
->cmdq
)) {
626 __cmd_copy(&msgq
->cmdq
, cmd
);
627 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
628 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_POST
);
630 list_add_tail(&cmd
->qe
, &msgq
->cmdq
.pending_q
);
635 bfa_msgq_rsp_copy(struct bfa_msgq
*msgq
, u8
*buf
, size_t buf_len
)
637 struct bfa_msgq_rspq
*rspq
= &msgq
->rspq
;
638 size_t len
= buf_len
;
643 ci
= rspq
->consumer_index
;
644 src
= (u8
*)rspq
->addr
.kva
;
645 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);
649 to_copy
= (len
< BFI_MSGQ_RSP_ENTRY_SIZE
) ?
650 len
: BFI_MSGQ_RSP_ENTRY_SIZE
;
651 memcpy(dst
, src
, to_copy
);
653 dst
+= BFI_MSGQ_RSP_ENTRY_SIZE
;
654 BFA_MSGQ_INDX_ADD(ci
, 1, rspq
->depth
);
655 src
= (u8
*)rspq
->addr
.kva
;
656 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);