2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
20 * @file bfa_msgq.c MSGQ module source file.
27 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
29 bfa_msgq_cmdcbfn_t cbfn; \
31 cbfn = (_cmdq_ent)->cbfn; \
32 cbarg = (_cmdq_ent)->cbarg; \
33 (_cmdq_ent)->cbfn = NULL; \
34 (_cmdq_ent)->cbarg = NULL; \
36 cbfn(cbarg, (_status)); \
40 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
);
41 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
);
52 bfa_fsm_state_decl(cmdq
, stopped
, struct bfa_msgq_cmdq
, enum cmdq_event
);
53 bfa_fsm_state_decl(cmdq
, init_wait
, struct bfa_msgq_cmdq
, enum cmdq_event
);
54 bfa_fsm_state_decl(cmdq
, ready
, struct bfa_msgq_cmdq
, enum cmdq_event
);
55 bfa_fsm_state_decl(cmdq
, dbell_wait
, struct bfa_msgq_cmdq
,
59 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq
*cmdq
)
61 struct bfa_msgq_cmd_entry
*cmdq_ent
;
63 cmdq
->producer_index
= 0;
64 cmdq
->consumer_index
= 0;
68 cmdq
->bytes_to_copy
= 0;
69 while (!list_empty(&cmdq
->pending_q
)) {
70 bfa_q_deq(&cmdq
->pending_q
, &cmdq_ent
);
71 bfa_q_qe_init(&cmdq_ent
->qe
);
72 call_cmdq_ent_cbfn(cmdq_ent
, BFA_STATUS_FAILED
);
77 cmdq_sm_stopped(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
81 bfa_fsm_set_state(cmdq
, cmdq_sm_init_wait
);
90 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
99 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
101 bfa_wc_down(&cmdq
->msgq
->init_wc
);
105 cmdq_sm_init_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
110 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
114 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
117 case CMDQ_E_INIT_RESP
:
118 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
119 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
120 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
122 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
131 cmdq_sm_ready_entry(struct bfa_msgq_cmdq
*cmdq
)
136 cmdq_sm_ready(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
141 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
145 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
154 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
156 bfa_msgq_cmdq_dbell(cmdq
);
160 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
165 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
169 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
172 case CMDQ_E_DB_READY
:
173 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
174 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
175 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
177 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
186 bfa_msgq_cmdq_dbell_ready(void *arg
)
188 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
189 bfa_fsm_send_event(cmdq
, CMDQ_E_DB_READY
);
193 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
)
195 struct bfi_msgq_h2i_db
*dbell
=
196 (struct bfi_msgq_h2i_db
*)(&cmdq
->dbell_mb
.msg
[0]);
198 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
199 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_PI
, 0);
200 dbell
->mh
.mtag
.i2htok
= 0;
201 dbell
->idx
.cmdq_pi
= htons(cmdq
->producer_index
);
203 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->dbell_mb
,
204 bfa_msgq_cmdq_dbell_ready
, cmdq
)) {
205 bfa_msgq_cmdq_dbell_ready(cmdq
);
210 __cmd_copy(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq_cmd_entry
*cmd
)
212 size_t len
= cmd
->msg_size
;
217 src
= (u8
*)cmd
->msg_hdr
;
218 dst
= (u8
*)cmdq
->addr
.kva
;
219 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
222 to_copy
= (len
< BFI_MSGQ_CMD_ENTRY_SIZE
) ?
223 len
: BFI_MSGQ_CMD_ENTRY_SIZE
;
224 memcpy(dst
, src
, to_copy
);
226 src
+= BFI_MSGQ_CMD_ENTRY_SIZE
;
227 BFA_MSGQ_INDX_ADD(cmdq
->producer_index
, 1, cmdq
->depth
);
228 dst
= (u8
*)cmdq
->addr
.kva
;
229 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
236 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
238 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
239 struct bfa_msgq_cmd_entry
*cmd
;
242 cmdq
->consumer_index
= ntohs(dbell
->idx
.cmdq_ci
);
244 /* Walk through pending list to see if the command can be posted */
245 while (!list_empty(&cmdq
->pending_q
)) {
247 (struct bfa_msgq_cmd_entry
*)bfa_q_first(&cmdq
->pending_q
);
248 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
249 BFA_MSGQ_FREE_CNT(cmdq
)) {
251 __cmd_copy(cmdq
, cmd
);
253 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
260 bfa_fsm_send_event(cmdq
, CMDQ_E_POST
);
264 bfa_msgq_cmdq_copy_next(void *arg
)
266 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
268 if (cmdq
->bytes_to_copy
)
269 bfa_msgq_cmdq_copy_rsp(cmdq
);
273 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
275 struct bfi_msgq_i2h_cmdq_copy_req
*req
=
276 (struct bfi_msgq_i2h_cmdq_copy_req
*)mb
;
279 cmdq
->offset
= ntohs(req
->offset
);
280 cmdq
->bytes_to_copy
= ntohs(req
->len
);
281 bfa_msgq_cmdq_copy_rsp(cmdq
);
285 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
)
287 struct bfi_msgq_h2i_cmdq_copy_rsp
*rsp
=
288 (struct bfi_msgq_h2i_cmdq_copy_rsp
*)&cmdq
->copy_mb
.msg
[0];
290 u8
*addr
= (u8
*)cmdq
->addr
.kva
;
292 memset(rsp
, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp
));
293 bfi_h2i_set(rsp
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_CMDQ_COPY_RSP
, 0);
294 rsp
->mh
.mtag
.i2htok
= htons(cmdq
->token
);
295 copied
= (cmdq
->bytes_to_copy
>= BFI_CMD_COPY_SZ
) ? BFI_CMD_COPY_SZ
:
297 addr
+= cmdq
->offset
;
298 memcpy(rsp
->data
, addr
, copied
);
301 cmdq
->offset
+= copied
;
302 cmdq
->bytes_to_copy
-= copied
;
304 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->copy_mb
,
305 bfa_msgq_cmdq_copy_next
, cmdq
)) {
306 bfa_msgq_cmdq_copy_next(cmdq
);
311 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq
*msgq
)
313 cmdq
->depth
= BFA_MSGQ_CMDQ_NUM_ENTRY
;
314 INIT_LIST_HEAD(&cmdq
->pending_q
);
316 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
319 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
);
326 RSPQ_E_INIT_RESP
= 5,
330 bfa_fsm_state_decl(rspq
, stopped
, struct bfa_msgq_rspq
, enum rspq_event
);
331 bfa_fsm_state_decl(rspq
, init_wait
, struct bfa_msgq_rspq
,
333 bfa_fsm_state_decl(rspq
, ready
, struct bfa_msgq_rspq
, enum rspq_event
);
334 bfa_fsm_state_decl(rspq
, dbell_wait
, struct bfa_msgq_rspq
,
338 rspq_sm_stopped_entry(struct bfa_msgq_rspq
*rspq
)
340 rspq
->producer_index
= 0;
341 rspq
->consumer_index
= 0;
346 rspq_sm_stopped(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
350 bfa_fsm_set_state(rspq
, rspq_sm_init_wait
);
364 rspq_sm_init_wait_entry(struct bfa_msgq_rspq
*rspq
)
366 bfa_wc_down(&rspq
->msgq
->init_wc
);
370 rspq_sm_init_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
375 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
378 case RSPQ_E_INIT_RESP
:
379 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
388 rspq_sm_ready_entry(struct bfa_msgq_rspq
*rspq
)
393 rspq_sm_ready(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
398 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
402 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
411 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq
*rspq
)
413 if (!bfa_nw_ioc_is_disabled(rspq
->msgq
->ioc
))
414 bfa_msgq_rspq_dbell(rspq
);
418 rspq_sm_dbell_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
423 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
427 rspq
->flags
|= BFA_MSGQ_RSPQ_F_DB_UPDATE
;
430 case RSPQ_E_DB_READY
:
431 if (rspq
->flags
& BFA_MSGQ_RSPQ_F_DB_UPDATE
) {
432 rspq
->flags
&= ~BFA_MSGQ_RSPQ_F_DB_UPDATE
;
433 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
435 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
444 bfa_msgq_rspq_dbell_ready(void *arg
)
446 struct bfa_msgq_rspq
*rspq
= (struct bfa_msgq_rspq
*)arg
;
447 bfa_fsm_send_event(rspq
, RSPQ_E_DB_READY
);
451 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
)
453 struct bfi_msgq_h2i_db
*dbell
=
454 (struct bfi_msgq_h2i_db
*)(&rspq
->dbell_mb
.msg
[0]);
456 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
457 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_CI
, 0);
458 dbell
->mh
.mtag
.i2htok
= 0;
459 dbell
->idx
.rspq_ci
= htons(rspq
->consumer_index
);
461 if (!bfa_nw_ioc_mbox_queue(rspq
->msgq
->ioc
, &rspq
->dbell_mb
,
462 bfa_msgq_rspq_dbell_ready
, rspq
)) {
463 bfa_msgq_rspq_dbell_ready(rspq
);
468 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq
*rspq
, struct bfi_mbmsg
*mb
)
470 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
471 struct bfi_msgq_mhdr
*msghdr
;
476 rspq
->producer_index
= ntohs(dbell
->idx
.rspq_pi
);
478 while (rspq
->consumer_index
!= rspq
->producer_index
) {
479 rspq_qe
= (u8
*)rspq
->addr
.kva
;
480 rspq_qe
+= (rspq
->consumer_index
* BFI_MSGQ_RSP_ENTRY_SIZE
);
481 msghdr
= (struct bfi_msgq_mhdr
*)rspq_qe
;
483 mc
= msghdr
->msg_class
;
484 num_entries
= ntohs(msghdr
->num_entries
);
486 if ((mc
>= BFI_MC_MAX
) || (rspq
->rsphdlr
[mc
].cbfn
== NULL
))
489 (rspq
->rsphdlr
[mc
].cbfn
)(rspq
->rsphdlr
[mc
].cbarg
, msghdr
);
491 BFA_MSGQ_INDX_ADD(rspq
->consumer_index
, num_entries
,
495 bfa_fsm_send_event(rspq
, RSPQ_E_RESP
);
499 bfa_msgq_rspq_attach(struct bfa_msgq_rspq
*rspq
, struct bfa_msgq
*msgq
)
501 rspq
->depth
= BFA_MSGQ_RSPQ_NUM_ENTRY
;
503 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
507 bfa_msgq_init_rsp(struct bfa_msgq
*msgq
,
508 struct bfi_mbmsg
*mb
)
510 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_INIT_RESP
);
511 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_INIT_RESP
);
515 bfa_msgq_init(void *arg
)
517 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)arg
;
518 struct bfi_msgq_cfg_req
*msgq_cfg
=
519 (struct bfi_msgq_cfg_req
*)&msgq
->init_mb
.msg
[0];
521 memset(msgq_cfg
, 0, sizeof(struct bfi_msgq_cfg_req
));
522 bfi_h2i_set(msgq_cfg
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_INIT_REQ
, 0);
523 msgq_cfg
->mh
.mtag
.i2htok
= 0;
525 bfa_dma_be_addr_set(msgq_cfg
->cmdq
.addr
, msgq
->cmdq
.addr
.pa
);
526 msgq_cfg
->cmdq
.q_depth
= htons(msgq
->cmdq
.depth
);
527 bfa_dma_be_addr_set(msgq_cfg
->rspq
.addr
, msgq
->rspq
.addr
.pa
);
528 msgq_cfg
->rspq
.q_depth
= htons(msgq
->rspq
.depth
);
530 bfa_nw_ioc_mbox_queue(msgq
->ioc
, &msgq
->init_mb
, NULL
, NULL
);
534 bfa_msgq_isr(void *cbarg
, struct bfi_mbmsg
*msg
)
536 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
538 switch (msg
->mh
.msg_id
) {
539 case BFI_MSGQ_I2H_INIT_RSP
:
540 bfa_msgq_init_rsp(msgq
, msg
);
543 case BFI_MSGQ_I2H_DOORBELL_PI
:
544 bfa_msgq_rspq_pi_update(&msgq
->rspq
, msg
);
547 case BFI_MSGQ_I2H_DOORBELL_CI
:
548 bfa_msgq_cmdq_ci_update(&msgq
->cmdq
, msg
);
551 case BFI_MSGQ_I2H_CMDQ_COPY_REQ
:
552 bfa_msgq_cmdq_copy_req(&msgq
->cmdq
, msg
);
561 bfa_msgq_notify(void *cbarg
, enum bfa_ioc_event event
)
563 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
566 case BFA_IOC_E_ENABLED
:
567 bfa_wc_init(&msgq
->init_wc
, bfa_msgq_init
, msgq
);
568 bfa_wc_up(&msgq
->init_wc
);
569 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_START
);
570 bfa_wc_up(&msgq
->init_wc
);
571 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_START
);
572 bfa_wc_wait(&msgq
->init_wc
);
575 case BFA_IOC_E_DISABLED
:
576 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_STOP
);
577 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_STOP
);
580 case BFA_IOC_E_FAILED
:
581 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_FAIL
);
582 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_FAIL
);
591 bfa_msgq_meminfo(void)
593 return roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
) +
594 roundup(BFA_MSGQ_RSPQ_SIZE
, BFA_DMA_ALIGN_SZ
);
598 bfa_msgq_memclaim(struct bfa_msgq
*msgq
, u8
*kva
, u64 pa
)
600 msgq
->cmdq
.addr
.kva
= kva
;
601 msgq
->cmdq
.addr
.pa
= pa
;
603 kva
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
604 pa
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
606 msgq
->rspq
.addr
.kva
= kva
;
607 msgq
->rspq
.addr
.pa
= pa
;
611 bfa_msgq_attach(struct bfa_msgq
*msgq
, struct bfa_ioc
*ioc
)
615 bfa_msgq_cmdq_attach(&msgq
->cmdq
, msgq
);
616 bfa_msgq_rspq_attach(&msgq
->rspq
, msgq
);
618 bfa_nw_ioc_mbox_regisr(msgq
->ioc
, BFI_MC_MSGQ
, bfa_msgq_isr
, msgq
);
619 bfa_q_qe_init(&msgq
->ioc_notify
);
620 bfa_ioc_notify_init(&msgq
->ioc_notify
, bfa_msgq_notify
, msgq
);
621 bfa_nw_ioc_notify_register(msgq
->ioc
, &msgq
->ioc_notify
);
625 bfa_msgq_regisr(struct bfa_msgq
*msgq
, enum bfi_mclass mc
,
626 bfa_msgq_mcfunc_t cbfn
, void *cbarg
)
628 msgq
->rspq
.rsphdlr
[mc
].cbfn
= cbfn
;
629 msgq
->rspq
.rsphdlr
[mc
].cbarg
= cbarg
;
633 bfa_msgq_cmd_post(struct bfa_msgq
*msgq
, struct bfa_msgq_cmd_entry
*cmd
)
635 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
636 BFA_MSGQ_FREE_CNT(&msgq
->cmdq
)) {
637 __cmd_copy(&msgq
->cmdq
, cmd
);
638 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
639 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_POST
);
641 list_add_tail(&cmd
->qe
, &msgq
->cmdq
.pending_q
);
646 bfa_msgq_rsp_copy(struct bfa_msgq
*msgq
, u8
*buf
, size_t buf_len
)
648 struct bfa_msgq_rspq
*rspq
= &msgq
->rspq
;
649 size_t len
= buf_len
;
654 ci
= rspq
->consumer_index
;
655 src
= (u8
*)rspq
->addr
.kva
;
656 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);
660 to_copy
= (len
< BFI_MSGQ_RSP_ENTRY_SIZE
) ?
661 len
: BFI_MSGQ_RSP_ENTRY_SIZE
;
662 memcpy(dst
, src
, to_copy
);
664 dst
+= BFI_MSGQ_RSP_ENTRY_SIZE
;
665 BFA_MSGQ_INDX_ADD(ci
, 1, rspq
->depth
);
666 src
= (u8
*)rspq
->addr
.kva
;
667 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);