WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / brocade / bna / bfa_msgq.c
blob47125f41953012b3cffdda4d1d5a2f3a9bfd9361
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5 /*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
12 /* MSGQ module source file. */
14 #include "bfi.h"
15 #include "bfa_msgq.h"
16 #include "bfa_ioc.h"
18 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
19 { \
20 bfa_msgq_cmdcbfn_t cbfn; \
21 void *cbarg; \
22 cbfn = (_cmdq_ent)->cbfn; \
23 cbarg = (_cmdq_ent)->cbarg; \
24 (_cmdq_ent)->cbfn = NULL; \
25 (_cmdq_ent)->cbarg = NULL; \
26 if (cbfn) { \
27 cbfn(cbarg, (_status)); \
28 } \
31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
34 enum cmdq_event {
35 CMDQ_E_START = 1,
36 CMDQ_E_STOP = 2,
37 CMDQ_E_FAIL = 3,
38 CMDQ_E_POST = 4,
39 CMDQ_E_INIT_RESP = 5,
40 CMDQ_E_DB_READY = 6,
43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
47 enum cmdq_event);
49 static void
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
52 struct bfa_msgq_cmd_entry *cmdq_ent;
54 cmdq->producer_index = 0;
55 cmdq->consumer_index = 0;
56 cmdq->flags = 0;
57 cmdq->token = 0;
58 cmdq->offset = 0;
59 cmdq->bytes_to_copy = 0;
60 while (!list_empty(&cmdq->pending_q)) {
61 cmdq_ent = list_first_entry(&cmdq->pending_q,
62 struct bfa_msgq_cmd_entry, qe);
63 list_del(&cmdq_ent->qe);
64 call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
68 static void
69 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
71 switch (event) {
72 case CMDQ_E_START:
73 bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
74 break;
76 case CMDQ_E_STOP:
77 case CMDQ_E_FAIL:
78 /* No-op */
79 break;
81 case CMDQ_E_POST:
82 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
83 break;
85 default:
86 bfa_sm_fault(event);
90 static void
91 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
93 bfa_wc_down(&cmdq->msgq->init_wc);
96 static void
97 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
99 switch (event) {
100 case CMDQ_E_STOP:
101 case CMDQ_E_FAIL:
102 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
103 break;
105 case CMDQ_E_POST:
106 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
107 break;
109 case CMDQ_E_INIT_RESP:
110 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
111 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
112 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
113 } else
114 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
115 break;
117 default:
118 bfa_sm_fault(event);
122 static void
123 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
127 static void
128 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
130 switch (event) {
131 case CMDQ_E_STOP:
132 case CMDQ_E_FAIL:
133 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
134 break;
136 case CMDQ_E_POST:
137 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
138 break;
140 default:
141 bfa_sm_fault(event);
145 static void
146 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
148 bfa_msgq_cmdq_dbell(cmdq);
151 static void
152 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
154 switch (event) {
155 case CMDQ_E_STOP:
156 case CMDQ_E_FAIL:
157 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
158 break;
160 case CMDQ_E_POST:
161 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
162 break;
164 case CMDQ_E_DB_READY:
165 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
166 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
167 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
168 } else
169 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
170 break;
172 default:
173 bfa_sm_fault(event);
177 static void
178 bfa_msgq_cmdq_dbell_ready(void *arg)
180 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
181 bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
184 static void
185 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
187 struct bfi_msgq_h2i_db *dbell =
188 (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
190 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
191 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
192 dbell->mh.mtag.i2htok = 0;
193 dbell->idx.cmdq_pi = htons(cmdq->producer_index);
195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
196 bfa_msgq_cmdq_dbell_ready, cmdq)) {
197 bfa_msgq_cmdq_dbell_ready(cmdq);
201 static void
202 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
204 size_t len = cmd->msg_size;
205 int num_entries = 0;
206 size_t to_copy;
207 u8 *src, *dst;
209 src = (u8 *)cmd->msg_hdr;
210 dst = (u8 *)cmdq->addr.kva;
211 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
213 while (len) {
214 to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
215 len : BFI_MSGQ_CMD_ENTRY_SIZE;
216 memcpy(dst, src, to_copy);
217 len -= to_copy;
218 src += BFI_MSGQ_CMD_ENTRY_SIZE;
219 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
220 dst = (u8 *)cmdq->addr.kva;
221 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
222 num_entries++;
227 static void
228 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
230 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
231 struct bfa_msgq_cmd_entry *cmd;
232 int posted = 0;
234 cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
236 /* Walk through pending list to see if the command can be posted */
237 while (!list_empty(&cmdq->pending_q)) {
238 cmd = list_first_entry(&cmdq->pending_q,
239 struct bfa_msgq_cmd_entry, qe);
240 if (ntohs(cmd->msg_hdr->num_entries) <=
241 BFA_MSGQ_FREE_CNT(cmdq)) {
242 list_del(&cmd->qe);
243 __cmd_copy(cmdq, cmd);
244 posted = 1;
245 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
246 } else {
247 break;
251 if (posted)
252 bfa_fsm_send_event(cmdq, CMDQ_E_POST);
255 static void
256 bfa_msgq_cmdq_copy_next(void *arg)
258 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
260 if (cmdq->bytes_to_copy)
261 bfa_msgq_cmdq_copy_rsp(cmdq);
264 static void
265 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
267 struct bfi_msgq_i2h_cmdq_copy_req *req =
268 (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
270 cmdq->token = 0;
271 cmdq->offset = ntohs(req->offset);
272 cmdq->bytes_to_copy = ntohs(req->len);
273 bfa_msgq_cmdq_copy_rsp(cmdq);
276 static void
277 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
279 struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
280 (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
281 int copied;
282 u8 *addr = (u8 *)cmdq->addr.kva;
284 memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
285 bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
286 rsp->mh.mtag.i2htok = htons(cmdq->token);
287 copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
288 cmdq->bytes_to_copy;
289 addr += cmdq->offset;
290 memcpy(rsp->data, addr, copied);
292 cmdq->token++;
293 cmdq->offset += copied;
294 cmdq->bytes_to_copy -= copied;
296 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
297 bfa_msgq_cmdq_copy_next, cmdq)) {
298 bfa_msgq_cmdq_copy_next(cmdq);
302 static void
303 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
305 cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
306 INIT_LIST_HEAD(&cmdq->pending_q);
307 cmdq->msgq = msgq;
308 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
311 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
313 enum rspq_event {
314 RSPQ_E_START = 1,
315 RSPQ_E_STOP = 2,
316 RSPQ_E_FAIL = 3,
317 RSPQ_E_RESP = 4,
318 RSPQ_E_INIT_RESP = 5,
319 RSPQ_E_DB_READY = 6,
322 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
323 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
324 enum rspq_event);
325 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
326 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
327 enum rspq_event);
329 static void
330 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
332 rspq->producer_index = 0;
333 rspq->consumer_index = 0;
334 rspq->flags = 0;
337 static void
338 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
340 switch (event) {
341 case RSPQ_E_START:
342 bfa_fsm_set_state(rspq, rspq_sm_init_wait);
343 break;
345 case RSPQ_E_STOP:
346 case RSPQ_E_FAIL:
347 /* No-op */
348 break;
350 default:
351 bfa_sm_fault(event);
355 static void
356 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
358 bfa_wc_down(&rspq->msgq->init_wc);
361 static void
362 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
364 switch (event) {
365 case RSPQ_E_FAIL:
366 case RSPQ_E_STOP:
367 bfa_fsm_set_state(rspq, rspq_sm_stopped);
368 break;
370 case RSPQ_E_INIT_RESP:
371 bfa_fsm_set_state(rspq, rspq_sm_ready);
372 break;
374 default:
375 bfa_sm_fault(event);
379 static void
380 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
384 static void
385 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
387 switch (event) {
388 case RSPQ_E_STOP:
389 case RSPQ_E_FAIL:
390 bfa_fsm_set_state(rspq, rspq_sm_stopped);
391 break;
393 case RSPQ_E_RESP:
394 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
395 break;
397 default:
398 bfa_sm_fault(event);
402 static void
403 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
405 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
406 bfa_msgq_rspq_dbell(rspq);
409 static void
410 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
412 switch (event) {
413 case RSPQ_E_STOP:
414 case RSPQ_E_FAIL:
415 bfa_fsm_set_state(rspq, rspq_sm_stopped);
416 break;
418 case RSPQ_E_RESP:
419 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
420 break;
422 case RSPQ_E_DB_READY:
423 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
424 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
425 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
426 } else
427 bfa_fsm_set_state(rspq, rspq_sm_ready);
428 break;
430 default:
431 bfa_sm_fault(event);
435 static void
436 bfa_msgq_rspq_dbell_ready(void *arg)
438 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
439 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
442 static void
443 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
445 struct bfi_msgq_h2i_db *dbell =
446 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
448 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
449 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
450 dbell->mh.mtag.i2htok = 0;
451 dbell->idx.rspq_ci = htons(rspq->consumer_index);
453 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
454 bfa_msgq_rspq_dbell_ready, rspq)) {
455 bfa_msgq_rspq_dbell_ready(rspq);
459 static void
460 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
462 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
463 struct bfi_msgq_mhdr *msghdr;
464 int num_entries;
465 int mc;
466 u8 *rspq_qe;
468 rspq->producer_index = ntohs(dbell->idx.rspq_pi);
470 while (rspq->consumer_index != rspq->producer_index) {
471 rspq_qe = (u8 *)rspq->addr.kva;
472 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
473 msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
475 mc = msghdr->msg_class;
476 num_entries = ntohs(msghdr->num_entries);
478 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
479 break;
481 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
483 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
484 rspq->depth);
487 bfa_fsm_send_event(rspq, RSPQ_E_RESP);
490 static void
491 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
493 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
494 rspq->msgq = msgq;
495 bfa_fsm_set_state(rspq, rspq_sm_stopped);
498 static void
499 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
500 struct bfi_mbmsg *mb)
502 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
503 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
506 static void
507 bfa_msgq_init(void *arg)
509 struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
510 struct bfi_msgq_cfg_req *msgq_cfg =
511 (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
513 memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
514 bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
515 msgq_cfg->mh.mtag.i2htok = 0;
517 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
518 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
519 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
520 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
522 bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
525 static void
526 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
528 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
530 switch (msg->mh.msg_id) {
531 case BFI_MSGQ_I2H_INIT_RSP:
532 bfa_msgq_init_rsp(msgq, msg);
533 break;
535 case BFI_MSGQ_I2H_DOORBELL_PI:
536 bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
537 break;
539 case BFI_MSGQ_I2H_DOORBELL_CI:
540 bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
541 break;
543 case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
544 bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
545 break;
547 default:
548 BUG_ON(1);
552 static void
553 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
555 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
557 switch (event) {
558 case BFA_IOC_E_ENABLED:
559 bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
560 bfa_wc_up(&msgq->init_wc);
561 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
562 bfa_wc_up(&msgq->init_wc);
563 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
564 bfa_wc_wait(&msgq->init_wc);
565 break;
567 case BFA_IOC_E_DISABLED:
568 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
569 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
570 break;
572 case BFA_IOC_E_FAILED:
573 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
574 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
575 break;
577 default:
578 break;
583 bfa_msgq_meminfo(void)
585 return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
586 roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
589 void
590 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
592 msgq->cmdq.addr.kva = kva;
593 msgq->cmdq.addr.pa = pa;
595 kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
596 pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
598 msgq->rspq.addr.kva = kva;
599 msgq->rspq.addr.pa = pa;
602 void
603 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
605 msgq->ioc = ioc;
607 bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
608 bfa_msgq_rspq_attach(&msgq->rspq, msgq);
610 bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
611 bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
612 bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
615 void
616 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
617 bfa_msgq_mcfunc_t cbfn, void *cbarg)
619 msgq->rspq.rsphdlr[mc].cbfn = cbfn;
620 msgq->rspq.rsphdlr[mc].cbarg = cbarg;
623 void
624 bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
626 if (ntohs(cmd->msg_hdr->num_entries) <=
627 BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
628 __cmd_copy(&msgq->cmdq, cmd);
629 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
630 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
631 } else {
632 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
636 void
637 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
639 struct bfa_msgq_rspq *rspq = &msgq->rspq;
640 size_t len = buf_len;
641 size_t to_copy;
642 int ci;
643 u8 *src, *dst;
645 ci = rspq->consumer_index;
646 src = (u8 *)rspq->addr.kva;
647 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
648 dst = buf;
650 while (len) {
651 to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
652 len : BFI_MSGQ_RSP_ENTRY_SIZE;
653 memcpy(dst, src, to_copy);
654 len -= to_copy;
655 dst += BFI_MSGQ_RSP_ENTRY_SIZE;
656 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
657 src = (u8 *)rspq->addr.kva;
658 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);