2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
19 /* MSGQ module source file. */
25 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
27 bfa_msgq_cmdcbfn_t cbfn; \
29 cbfn = (_cmdq_ent)->cbfn; \
30 cbarg = (_cmdq_ent)->cbarg; \
31 (_cmdq_ent)->cbfn = NULL; \
32 (_cmdq_ent)->cbarg = NULL; \
34 cbfn(cbarg, (_status)); \
38 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
);
39 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
);
50 bfa_fsm_state_decl(cmdq
, stopped
, struct bfa_msgq_cmdq
, enum cmdq_event
);
51 bfa_fsm_state_decl(cmdq
, init_wait
, struct bfa_msgq_cmdq
, enum cmdq_event
);
52 bfa_fsm_state_decl(cmdq
, ready
, struct bfa_msgq_cmdq
, enum cmdq_event
);
53 bfa_fsm_state_decl(cmdq
, dbell_wait
, struct bfa_msgq_cmdq
,
57 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq
*cmdq
)
59 struct bfa_msgq_cmd_entry
*cmdq_ent
;
61 cmdq
->producer_index
= 0;
62 cmdq
->consumer_index
= 0;
66 cmdq
->bytes_to_copy
= 0;
67 while (!list_empty(&cmdq
->pending_q
)) {
68 bfa_q_deq(&cmdq
->pending_q
, &cmdq_ent
);
69 bfa_q_qe_init(&cmdq_ent
->qe
);
70 call_cmdq_ent_cbfn(cmdq_ent
, BFA_STATUS_FAILED
);
75 cmdq_sm_stopped(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
79 bfa_fsm_set_state(cmdq
, cmdq_sm_init_wait
);
88 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
97 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
99 bfa_wc_down(&cmdq
->msgq
->init_wc
);
103 cmdq_sm_init_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
108 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
112 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
115 case CMDQ_E_INIT_RESP
:
116 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
117 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
118 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
120 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
129 cmdq_sm_ready_entry(struct bfa_msgq_cmdq
*cmdq
)
134 cmdq_sm_ready(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
139 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
143 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
152 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq
*cmdq
)
154 bfa_msgq_cmdq_dbell(cmdq
);
158 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq
*cmdq
, enum cmdq_event event
)
163 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
167 cmdq
->flags
|= BFA_MSGQ_CMDQ_F_DB_UPDATE
;
170 case CMDQ_E_DB_READY
:
171 if (cmdq
->flags
& BFA_MSGQ_CMDQ_F_DB_UPDATE
) {
172 cmdq
->flags
&= ~BFA_MSGQ_CMDQ_F_DB_UPDATE
;
173 bfa_fsm_set_state(cmdq
, cmdq_sm_dbell_wait
);
175 bfa_fsm_set_state(cmdq
, cmdq_sm_ready
);
184 bfa_msgq_cmdq_dbell_ready(void *arg
)
186 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
187 bfa_fsm_send_event(cmdq
, CMDQ_E_DB_READY
);
191 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq
*cmdq
)
193 struct bfi_msgq_h2i_db
*dbell
=
194 (struct bfi_msgq_h2i_db
*)(&cmdq
->dbell_mb
.msg
[0]);
196 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
197 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_PI
, 0);
198 dbell
->mh
.mtag
.i2htok
= 0;
199 dbell
->idx
.cmdq_pi
= htons(cmdq
->producer_index
);
201 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->dbell_mb
,
202 bfa_msgq_cmdq_dbell_ready
, cmdq
)) {
203 bfa_msgq_cmdq_dbell_ready(cmdq
);
208 __cmd_copy(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq_cmd_entry
*cmd
)
210 size_t len
= cmd
->msg_size
;
215 src
= (u8
*)cmd
->msg_hdr
;
216 dst
= (u8
*)cmdq
->addr
.kva
;
217 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
220 to_copy
= (len
< BFI_MSGQ_CMD_ENTRY_SIZE
) ?
221 len
: BFI_MSGQ_CMD_ENTRY_SIZE
;
222 memcpy(dst
, src
, to_copy
);
224 src
+= BFI_MSGQ_CMD_ENTRY_SIZE
;
225 BFA_MSGQ_INDX_ADD(cmdq
->producer_index
, 1, cmdq
->depth
);
226 dst
= (u8
*)cmdq
->addr
.kva
;
227 dst
+= (cmdq
->producer_index
* BFI_MSGQ_CMD_ENTRY_SIZE
);
234 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
236 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
237 struct bfa_msgq_cmd_entry
*cmd
;
240 cmdq
->consumer_index
= ntohs(dbell
->idx
.cmdq_ci
);
242 /* Walk through pending list to see if the command can be posted */
243 while (!list_empty(&cmdq
->pending_q
)) {
245 (struct bfa_msgq_cmd_entry
*)bfa_q_first(&cmdq
->pending_q
);
246 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
247 BFA_MSGQ_FREE_CNT(cmdq
)) {
249 __cmd_copy(cmdq
, cmd
);
251 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
258 bfa_fsm_send_event(cmdq
, CMDQ_E_POST
);
262 bfa_msgq_cmdq_copy_next(void *arg
)
264 struct bfa_msgq_cmdq
*cmdq
= (struct bfa_msgq_cmdq
*)arg
;
266 if (cmdq
->bytes_to_copy
)
267 bfa_msgq_cmdq_copy_rsp(cmdq
);
271 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq
*cmdq
, struct bfi_mbmsg
*mb
)
273 struct bfi_msgq_i2h_cmdq_copy_req
*req
=
274 (struct bfi_msgq_i2h_cmdq_copy_req
*)mb
;
277 cmdq
->offset
= ntohs(req
->offset
);
278 cmdq
->bytes_to_copy
= ntohs(req
->len
);
279 bfa_msgq_cmdq_copy_rsp(cmdq
);
283 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq
*cmdq
)
285 struct bfi_msgq_h2i_cmdq_copy_rsp
*rsp
=
286 (struct bfi_msgq_h2i_cmdq_copy_rsp
*)&cmdq
->copy_mb
.msg
[0];
288 u8
*addr
= (u8
*)cmdq
->addr
.kva
;
290 memset(rsp
, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp
));
291 bfi_h2i_set(rsp
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_CMDQ_COPY_RSP
, 0);
292 rsp
->mh
.mtag
.i2htok
= htons(cmdq
->token
);
293 copied
= (cmdq
->bytes_to_copy
>= BFI_CMD_COPY_SZ
) ? BFI_CMD_COPY_SZ
:
295 addr
+= cmdq
->offset
;
296 memcpy(rsp
->data
, addr
, copied
);
299 cmdq
->offset
+= copied
;
300 cmdq
->bytes_to_copy
-= copied
;
302 if (!bfa_nw_ioc_mbox_queue(cmdq
->msgq
->ioc
, &cmdq
->copy_mb
,
303 bfa_msgq_cmdq_copy_next
, cmdq
)) {
304 bfa_msgq_cmdq_copy_next(cmdq
);
309 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq
*cmdq
, struct bfa_msgq
*msgq
)
311 cmdq
->depth
= BFA_MSGQ_CMDQ_NUM_ENTRY
;
312 INIT_LIST_HEAD(&cmdq
->pending_q
);
314 bfa_fsm_set_state(cmdq
, cmdq_sm_stopped
);
317 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
);
324 RSPQ_E_INIT_RESP
= 5,
328 bfa_fsm_state_decl(rspq
, stopped
, struct bfa_msgq_rspq
, enum rspq_event
);
329 bfa_fsm_state_decl(rspq
, init_wait
, struct bfa_msgq_rspq
,
331 bfa_fsm_state_decl(rspq
, ready
, struct bfa_msgq_rspq
, enum rspq_event
);
332 bfa_fsm_state_decl(rspq
, dbell_wait
, struct bfa_msgq_rspq
,
336 rspq_sm_stopped_entry(struct bfa_msgq_rspq
*rspq
)
338 rspq
->producer_index
= 0;
339 rspq
->consumer_index
= 0;
344 rspq_sm_stopped(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
348 bfa_fsm_set_state(rspq
, rspq_sm_init_wait
);
362 rspq_sm_init_wait_entry(struct bfa_msgq_rspq
*rspq
)
364 bfa_wc_down(&rspq
->msgq
->init_wc
);
368 rspq_sm_init_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
373 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
376 case RSPQ_E_INIT_RESP
:
377 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
386 rspq_sm_ready_entry(struct bfa_msgq_rspq
*rspq
)
391 rspq_sm_ready(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
396 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
400 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
409 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq
*rspq
)
411 if (!bfa_nw_ioc_is_disabled(rspq
->msgq
->ioc
))
412 bfa_msgq_rspq_dbell(rspq
);
416 rspq_sm_dbell_wait(struct bfa_msgq_rspq
*rspq
, enum rspq_event event
)
421 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
425 rspq
->flags
|= BFA_MSGQ_RSPQ_F_DB_UPDATE
;
428 case RSPQ_E_DB_READY
:
429 if (rspq
->flags
& BFA_MSGQ_RSPQ_F_DB_UPDATE
) {
430 rspq
->flags
&= ~BFA_MSGQ_RSPQ_F_DB_UPDATE
;
431 bfa_fsm_set_state(rspq
, rspq_sm_dbell_wait
);
433 bfa_fsm_set_state(rspq
, rspq_sm_ready
);
442 bfa_msgq_rspq_dbell_ready(void *arg
)
444 struct bfa_msgq_rspq
*rspq
= (struct bfa_msgq_rspq
*)arg
;
445 bfa_fsm_send_event(rspq
, RSPQ_E_DB_READY
);
449 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq
*rspq
)
451 struct bfi_msgq_h2i_db
*dbell
=
452 (struct bfi_msgq_h2i_db
*)(&rspq
->dbell_mb
.msg
[0]);
454 memset(dbell
, 0, sizeof(struct bfi_msgq_h2i_db
));
455 bfi_h2i_set(dbell
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_DOORBELL_CI
, 0);
456 dbell
->mh
.mtag
.i2htok
= 0;
457 dbell
->idx
.rspq_ci
= htons(rspq
->consumer_index
);
459 if (!bfa_nw_ioc_mbox_queue(rspq
->msgq
->ioc
, &rspq
->dbell_mb
,
460 bfa_msgq_rspq_dbell_ready
, rspq
)) {
461 bfa_msgq_rspq_dbell_ready(rspq
);
466 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq
*rspq
, struct bfi_mbmsg
*mb
)
468 struct bfi_msgq_i2h_db
*dbell
= (struct bfi_msgq_i2h_db
*)mb
;
469 struct bfi_msgq_mhdr
*msghdr
;
474 rspq
->producer_index
= ntohs(dbell
->idx
.rspq_pi
);
476 while (rspq
->consumer_index
!= rspq
->producer_index
) {
477 rspq_qe
= (u8
*)rspq
->addr
.kva
;
478 rspq_qe
+= (rspq
->consumer_index
* BFI_MSGQ_RSP_ENTRY_SIZE
);
479 msghdr
= (struct bfi_msgq_mhdr
*)rspq_qe
;
481 mc
= msghdr
->msg_class
;
482 num_entries
= ntohs(msghdr
->num_entries
);
484 if ((mc
>= BFI_MC_MAX
) || (rspq
->rsphdlr
[mc
].cbfn
== NULL
))
487 (rspq
->rsphdlr
[mc
].cbfn
)(rspq
->rsphdlr
[mc
].cbarg
, msghdr
);
489 BFA_MSGQ_INDX_ADD(rspq
->consumer_index
, num_entries
,
493 bfa_fsm_send_event(rspq
, RSPQ_E_RESP
);
497 bfa_msgq_rspq_attach(struct bfa_msgq_rspq
*rspq
, struct bfa_msgq
*msgq
)
499 rspq
->depth
= BFA_MSGQ_RSPQ_NUM_ENTRY
;
501 bfa_fsm_set_state(rspq
, rspq_sm_stopped
);
505 bfa_msgq_init_rsp(struct bfa_msgq
*msgq
,
506 struct bfi_mbmsg
*mb
)
508 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_INIT_RESP
);
509 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_INIT_RESP
);
513 bfa_msgq_init(void *arg
)
515 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)arg
;
516 struct bfi_msgq_cfg_req
*msgq_cfg
=
517 (struct bfi_msgq_cfg_req
*)&msgq
->init_mb
.msg
[0];
519 memset(msgq_cfg
, 0, sizeof(struct bfi_msgq_cfg_req
));
520 bfi_h2i_set(msgq_cfg
->mh
, BFI_MC_MSGQ
, BFI_MSGQ_H2I_INIT_REQ
, 0);
521 msgq_cfg
->mh
.mtag
.i2htok
= 0;
523 bfa_dma_be_addr_set(msgq_cfg
->cmdq
.addr
, msgq
->cmdq
.addr
.pa
);
524 msgq_cfg
->cmdq
.q_depth
= htons(msgq
->cmdq
.depth
);
525 bfa_dma_be_addr_set(msgq_cfg
->rspq
.addr
, msgq
->rspq
.addr
.pa
);
526 msgq_cfg
->rspq
.q_depth
= htons(msgq
->rspq
.depth
);
528 bfa_nw_ioc_mbox_queue(msgq
->ioc
, &msgq
->init_mb
, NULL
, NULL
);
532 bfa_msgq_isr(void *cbarg
, struct bfi_mbmsg
*msg
)
534 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
536 switch (msg
->mh
.msg_id
) {
537 case BFI_MSGQ_I2H_INIT_RSP
:
538 bfa_msgq_init_rsp(msgq
, msg
);
541 case BFI_MSGQ_I2H_DOORBELL_PI
:
542 bfa_msgq_rspq_pi_update(&msgq
->rspq
, msg
);
545 case BFI_MSGQ_I2H_DOORBELL_CI
:
546 bfa_msgq_cmdq_ci_update(&msgq
->cmdq
, msg
);
549 case BFI_MSGQ_I2H_CMDQ_COPY_REQ
:
550 bfa_msgq_cmdq_copy_req(&msgq
->cmdq
, msg
);
559 bfa_msgq_notify(void *cbarg
, enum bfa_ioc_event event
)
561 struct bfa_msgq
*msgq
= (struct bfa_msgq
*)cbarg
;
564 case BFA_IOC_E_ENABLED
:
565 bfa_wc_init(&msgq
->init_wc
, bfa_msgq_init
, msgq
);
566 bfa_wc_up(&msgq
->init_wc
);
567 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_START
);
568 bfa_wc_up(&msgq
->init_wc
);
569 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_START
);
570 bfa_wc_wait(&msgq
->init_wc
);
573 case BFA_IOC_E_DISABLED
:
574 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_STOP
);
575 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_STOP
);
578 case BFA_IOC_E_FAILED
:
579 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_FAIL
);
580 bfa_fsm_send_event(&msgq
->rspq
, RSPQ_E_FAIL
);
589 bfa_msgq_meminfo(void)
591 return roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
) +
592 roundup(BFA_MSGQ_RSPQ_SIZE
, BFA_DMA_ALIGN_SZ
);
596 bfa_msgq_memclaim(struct bfa_msgq
*msgq
, u8
*kva
, u64 pa
)
598 msgq
->cmdq
.addr
.kva
= kva
;
599 msgq
->cmdq
.addr
.pa
= pa
;
601 kva
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
602 pa
+= roundup(BFA_MSGQ_CMDQ_SIZE
, BFA_DMA_ALIGN_SZ
);
604 msgq
->rspq
.addr
.kva
= kva
;
605 msgq
->rspq
.addr
.pa
= pa
;
609 bfa_msgq_attach(struct bfa_msgq
*msgq
, struct bfa_ioc
*ioc
)
613 bfa_msgq_cmdq_attach(&msgq
->cmdq
, msgq
);
614 bfa_msgq_rspq_attach(&msgq
->rspq
, msgq
);
616 bfa_nw_ioc_mbox_regisr(msgq
->ioc
, BFI_MC_MSGQ
, bfa_msgq_isr
, msgq
);
617 bfa_q_qe_init(&msgq
->ioc_notify
);
618 bfa_ioc_notify_init(&msgq
->ioc_notify
, bfa_msgq_notify
, msgq
);
619 bfa_nw_ioc_notify_register(msgq
->ioc
, &msgq
->ioc_notify
);
623 bfa_msgq_regisr(struct bfa_msgq
*msgq
, enum bfi_mclass mc
,
624 bfa_msgq_mcfunc_t cbfn
, void *cbarg
)
626 msgq
->rspq
.rsphdlr
[mc
].cbfn
= cbfn
;
627 msgq
->rspq
.rsphdlr
[mc
].cbarg
= cbarg
;
631 bfa_msgq_cmd_post(struct bfa_msgq
*msgq
, struct bfa_msgq_cmd_entry
*cmd
)
633 if (ntohs(cmd
->msg_hdr
->num_entries
) <=
634 BFA_MSGQ_FREE_CNT(&msgq
->cmdq
)) {
635 __cmd_copy(&msgq
->cmdq
, cmd
);
636 call_cmdq_ent_cbfn(cmd
, BFA_STATUS_OK
);
637 bfa_fsm_send_event(&msgq
->cmdq
, CMDQ_E_POST
);
639 list_add_tail(&cmd
->qe
, &msgq
->cmdq
.pending_q
);
644 bfa_msgq_rsp_copy(struct bfa_msgq
*msgq
, u8
*buf
, size_t buf_len
)
646 struct bfa_msgq_rspq
*rspq
= &msgq
->rspq
;
647 size_t len
= buf_len
;
652 ci
= rspq
->consumer_index
;
653 src
= (u8
*)rspq
->addr
.kva
;
654 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);
658 to_copy
= (len
< BFI_MSGQ_RSP_ENTRY_SIZE
) ?
659 len
: BFI_MSGQ_RSP_ENTRY_SIZE
;
660 memcpy(dst
, src
, to_copy
);
662 dst
+= BFI_MSGQ_RSP_ENTRY_SIZE
;
663 BFA_MSGQ_INDX_ADD(ci
, 1, rspq
->depth
);
664 src
= (u8
*)rspq
->addr
.kva
;
665 src
+= (ci
* BFI_MSGQ_RSP_ENTRY_SIZE
);