PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / brocade / bna / bfa_msgq.c
blob55067d0d25cfd3ab0ab541e3c7ac94318cfd3498
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
19 /* MSGQ module source file. */
21 #include "bfi.h"
22 #include "bfa_msgq.h"
23 #include "bfa_ioc.h"
25 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
26 { \
27 bfa_msgq_cmdcbfn_t cbfn; \
28 void *cbarg; \
29 cbfn = (_cmdq_ent)->cbfn; \
30 cbarg = (_cmdq_ent)->cbarg; \
31 (_cmdq_ent)->cbfn = NULL; \
32 (_cmdq_ent)->cbarg = NULL; \
33 if (cbfn) { \
34 cbfn(cbarg, (_status)); \
35 } \
38 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
39 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
41 enum cmdq_event {
42 CMDQ_E_START = 1,
43 CMDQ_E_STOP = 2,
44 CMDQ_E_FAIL = 3,
45 CMDQ_E_POST = 4,
46 CMDQ_E_INIT_RESP = 5,
47 CMDQ_E_DB_READY = 6,
50 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
51 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
52 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
53 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
54 enum cmdq_event);
56 static void
57 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
59 struct bfa_msgq_cmd_entry *cmdq_ent;
61 cmdq->producer_index = 0;
62 cmdq->consumer_index = 0;
63 cmdq->flags = 0;
64 cmdq->token = 0;
65 cmdq->offset = 0;
66 cmdq->bytes_to_copy = 0;
67 while (!list_empty(&cmdq->pending_q)) {
68 bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
69 bfa_q_qe_init(&cmdq_ent->qe);
70 call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
74 static void
75 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
77 switch (event) {
78 case CMDQ_E_START:
79 bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
80 break;
82 case CMDQ_E_STOP:
83 case CMDQ_E_FAIL:
84 /* No-op */
85 break;
87 case CMDQ_E_POST:
88 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
89 break;
91 default:
92 bfa_sm_fault(event);
96 static void
97 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
99 bfa_wc_down(&cmdq->msgq->init_wc);
102 static void
103 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
105 switch (event) {
106 case CMDQ_E_STOP:
107 case CMDQ_E_FAIL:
108 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
109 break;
111 case CMDQ_E_POST:
112 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
113 break;
115 case CMDQ_E_INIT_RESP:
116 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
117 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
118 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
119 } else
120 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
121 break;
123 default:
124 bfa_sm_fault(event);
128 static void
129 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
133 static void
134 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
136 switch (event) {
137 case CMDQ_E_STOP:
138 case CMDQ_E_FAIL:
139 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
140 break;
142 case CMDQ_E_POST:
143 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
144 break;
146 default:
147 bfa_sm_fault(event);
151 static void
152 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
154 bfa_msgq_cmdq_dbell(cmdq);
157 static void
158 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
160 switch (event) {
161 case CMDQ_E_STOP:
162 case CMDQ_E_FAIL:
163 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
164 break;
166 case CMDQ_E_POST:
167 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
168 break;
170 case CMDQ_E_DB_READY:
171 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
172 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
173 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
174 } else
175 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
176 break;
178 default:
179 bfa_sm_fault(event);
183 static void
184 bfa_msgq_cmdq_dbell_ready(void *arg)
186 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
187 bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
190 static void
191 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
193 struct bfi_msgq_h2i_db *dbell =
194 (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
196 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
197 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
198 dbell->mh.mtag.i2htok = 0;
199 dbell->idx.cmdq_pi = htons(cmdq->producer_index);
201 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
202 bfa_msgq_cmdq_dbell_ready, cmdq)) {
203 bfa_msgq_cmdq_dbell_ready(cmdq);
207 static void
208 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
210 size_t len = cmd->msg_size;
211 int num_entries = 0;
212 size_t to_copy;
213 u8 *src, *dst;
215 src = (u8 *)cmd->msg_hdr;
216 dst = (u8 *)cmdq->addr.kva;
217 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
219 while (len) {
220 to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
221 len : BFI_MSGQ_CMD_ENTRY_SIZE;
222 memcpy(dst, src, to_copy);
223 len -= to_copy;
224 src += BFI_MSGQ_CMD_ENTRY_SIZE;
225 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
226 dst = (u8 *)cmdq->addr.kva;
227 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
228 num_entries++;
233 static void
234 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
236 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
237 struct bfa_msgq_cmd_entry *cmd;
238 int posted = 0;
240 cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
242 /* Walk through pending list to see if the command can be posted */
243 while (!list_empty(&cmdq->pending_q)) {
244 cmd =
245 (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
246 if (ntohs(cmd->msg_hdr->num_entries) <=
247 BFA_MSGQ_FREE_CNT(cmdq)) {
248 list_del(&cmd->qe);
249 __cmd_copy(cmdq, cmd);
250 posted = 1;
251 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
252 } else {
253 break;
257 if (posted)
258 bfa_fsm_send_event(cmdq, CMDQ_E_POST);
261 static void
262 bfa_msgq_cmdq_copy_next(void *arg)
264 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
266 if (cmdq->bytes_to_copy)
267 bfa_msgq_cmdq_copy_rsp(cmdq);
270 static void
271 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
273 struct bfi_msgq_i2h_cmdq_copy_req *req =
274 (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
276 cmdq->token = 0;
277 cmdq->offset = ntohs(req->offset);
278 cmdq->bytes_to_copy = ntohs(req->len);
279 bfa_msgq_cmdq_copy_rsp(cmdq);
282 static void
283 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
285 struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
286 (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
287 int copied;
288 u8 *addr = (u8 *)cmdq->addr.kva;
290 memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
291 bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
292 rsp->mh.mtag.i2htok = htons(cmdq->token);
293 copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
294 cmdq->bytes_to_copy;
295 addr += cmdq->offset;
296 memcpy(rsp->data, addr, copied);
298 cmdq->token++;
299 cmdq->offset += copied;
300 cmdq->bytes_to_copy -= copied;
302 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
303 bfa_msgq_cmdq_copy_next, cmdq)) {
304 bfa_msgq_cmdq_copy_next(cmdq);
308 static void
309 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
311 cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
312 INIT_LIST_HEAD(&cmdq->pending_q);
313 cmdq->msgq = msgq;
314 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
317 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
319 enum rspq_event {
320 RSPQ_E_START = 1,
321 RSPQ_E_STOP = 2,
322 RSPQ_E_FAIL = 3,
323 RSPQ_E_RESP = 4,
324 RSPQ_E_INIT_RESP = 5,
325 RSPQ_E_DB_READY = 6,
328 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
329 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
330 enum rspq_event);
331 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
332 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
333 enum rspq_event);
335 static void
336 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
338 rspq->producer_index = 0;
339 rspq->consumer_index = 0;
340 rspq->flags = 0;
343 static void
344 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
346 switch (event) {
347 case RSPQ_E_START:
348 bfa_fsm_set_state(rspq, rspq_sm_init_wait);
349 break;
351 case RSPQ_E_STOP:
352 case RSPQ_E_FAIL:
353 /* No-op */
354 break;
356 default:
357 bfa_sm_fault(event);
361 static void
362 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
364 bfa_wc_down(&rspq->msgq->init_wc);
367 static void
368 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
370 switch (event) {
371 case RSPQ_E_FAIL:
372 case RSPQ_E_STOP:
373 bfa_fsm_set_state(rspq, rspq_sm_stopped);
374 break;
376 case RSPQ_E_INIT_RESP:
377 bfa_fsm_set_state(rspq, rspq_sm_ready);
378 break;
380 default:
381 bfa_sm_fault(event);
385 static void
386 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
390 static void
391 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
393 switch (event) {
394 case RSPQ_E_STOP:
395 case RSPQ_E_FAIL:
396 bfa_fsm_set_state(rspq, rspq_sm_stopped);
397 break;
399 case RSPQ_E_RESP:
400 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
401 break;
403 default:
404 bfa_sm_fault(event);
408 static void
409 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
411 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
412 bfa_msgq_rspq_dbell(rspq);
415 static void
416 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
418 switch (event) {
419 case RSPQ_E_STOP:
420 case RSPQ_E_FAIL:
421 bfa_fsm_set_state(rspq, rspq_sm_stopped);
422 break;
424 case RSPQ_E_RESP:
425 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
426 break;
428 case RSPQ_E_DB_READY:
429 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
430 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
431 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
432 } else
433 bfa_fsm_set_state(rspq, rspq_sm_ready);
434 break;
436 default:
437 bfa_sm_fault(event);
441 static void
442 bfa_msgq_rspq_dbell_ready(void *arg)
444 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
445 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
448 static void
449 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
451 struct bfi_msgq_h2i_db *dbell =
452 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
454 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
455 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
456 dbell->mh.mtag.i2htok = 0;
457 dbell->idx.rspq_ci = htons(rspq->consumer_index);
459 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
460 bfa_msgq_rspq_dbell_ready, rspq)) {
461 bfa_msgq_rspq_dbell_ready(rspq);
465 static void
466 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
468 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
469 struct bfi_msgq_mhdr *msghdr;
470 int num_entries;
471 int mc;
472 u8 *rspq_qe;
474 rspq->producer_index = ntohs(dbell->idx.rspq_pi);
476 while (rspq->consumer_index != rspq->producer_index) {
477 rspq_qe = (u8 *)rspq->addr.kva;
478 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
479 msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
481 mc = msghdr->msg_class;
482 num_entries = ntohs(msghdr->num_entries);
484 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
485 break;
487 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
489 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
490 rspq->depth);
493 bfa_fsm_send_event(rspq, RSPQ_E_RESP);
496 static void
497 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
499 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
500 rspq->msgq = msgq;
501 bfa_fsm_set_state(rspq, rspq_sm_stopped);
504 static void
505 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
506 struct bfi_mbmsg *mb)
508 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
509 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
512 static void
513 bfa_msgq_init(void *arg)
515 struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
516 struct bfi_msgq_cfg_req *msgq_cfg =
517 (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
519 memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
520 bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
521 msgq_cfg->mh.mtag.i2htok = 0;
523 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
524 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
525 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
526 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
528 bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
531 static void
532 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
534 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
536 switch (msg->mh.msg_id) {
537 case BFI_MSGQ_I2H_INIT_RSP:
538 bfa_msgq_init_rsp(msgq, msg);
539 break;
541 case BFI_MSGQ_I2H_DOORBELL_PI:
542 bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
543 break;
545 case BFI_MSGQ_I2H_DOORBELL_CI:
546 bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
547 break;
549 case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
550 bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
551 break;
553 default:
554 BUG_ON(1);
558 static void
559 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
561 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
563 switch (event) {
564 case BFA_IOC_E_ENABLED:
565 bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
566 bfa_wc_up(&msgq->init_wc);
567 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
568 bfa_wc_up(&msgq->init_wc);
569 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
570 bfa_wc_wait(&msgq->init_wc);
571 break;
573 case BFA_IOC_E_DISABLED:
574 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
575 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
576 break;
578 case BFA_IOC_E_FAILED:
579 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
580 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
581 break;
583 default:
584 break;
589 bfa_msgq_meminfo(void)
591 return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
592 roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
595 void
596 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
598 msgq->cmdq.addr.kva = kva;
599 msgq->cmdq.addr.pa = pa;
601 kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
602 pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
604 msgq->rspq.addr.kva = kva;
605 msgq->rspq.addr.pa = pa;
608 void
609 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
611 msgq->ioc = ioc;
613 bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
614 bfa_msgq_rspq_attach(&msgq->rspq, msgq);
616 bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
617 bfa_q_qe_init(&msgq->ioc_notify);
618 bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
619 bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
622 void
623 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
624 bfa_msgq_mcfunc_t cbfn, void *cbarg)
626 msgq->rspq.rsphdlr[mc].cbfn = cbfn;
627 msgq->rspq.rsphdlr[mc].cbarg = cbarg;
630 void
631 bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
633 if (ntohs(cmd->msg_hdr->num_entries) <=
634 BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
635 __cmd_copy(&msgq->cmdq, cmd);
636 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
637 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
638 } else {
639 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
643 void
644 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
646 struct bfa_msgq_rspq *rspq = &msgq->rspq;
647 size_t len = buf_len;
648 size_t to_copy;
649 int ci;
650 u8 *src, *dst;
652 ci = rspq->consumer_index;
653 src = (u8 *)rspq->addr.kva;
654 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
655 dst = buf;
657 while (len) {
658 to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
659 len : BFI_MSGQ_RSP_ENTRY_SIZE;
660 memcpy(dst, src, to_copy);
661 len -= to_copy;
662 dst += BFI_MSGQ_RSP_ENTRY_SIZE;
663 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
664 src = (u8 *)rspq->addr.kva;
665 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);