1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
31 /***************************************************************************
32 * Structures & Definitions
33 ***************************************************************************/
35 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
36 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
38 /***************************************************************************
39 * Blocking Imp. (BLOCK/EBLOCK mode)
40 ***************************************************************************/
41 static void qed_spq_blocking_cb(struct qed_hwfn
*p_hwfn
,
43 union event_ring_data
*data
,
46 struct qed_spq_comp_done
*comp_done
;
48 comp_done
= (struct qed_spq_comp_done
*)cookie
;
50 comp_done
->done
= 0x1;
51 comp_done
->fw_return_code
= fw_return_code
;
53 /* make update visible to waiting thread */
57 static int qed_spq_block(struct qed_hwfn
*p_hwfn
,
58 struct qed_spq_entry
*p_ent
,
61 int sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
62 struct qed_spq_comp_done
*comp_done
;
65 comp_done
= (struct qed_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
67 /* validate we receive completion update */
69 if (comp_done
->done
== 1) {
71 *p_fw_ret
= comp_done
->fw_return_code
;
74 usleep_range(5000, 10000);
78 DP_INFO(p_hwfn
, "Ramrod is stuck, requesting MCP drain\n");
79 rc
= qed_mcp_drain(p_hwfn
, p_hwfn
->p_main_ptt
);
81 DP_NOTICE(p_hwfn
, "MCP drain failed\n");
83 /* Retry after drain */
84 sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
86 /* validate we receive completion update */
88 if (comp_done
->done
== 1) {
90 *p_fw_ret
= comp_done
->fw_return_code
;
93 usleep_range(5000, 10000);
97 if (comp_done
->done
== 1) {
99 *p_fw_ret
= comp_done
->fw_return_code
;
103 DP_NOTICE(p_hwfn
, "Ramrod is stuck, MCP drain failed\n");
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
112 qed_spq_fill_entry(struct qed_hwfn
*p_hwfn
,
113 struct qed_spq_entry
*p_ent
)
117 switch (p_ent
->comp_mode
) {
118 case QED_SPQ_MODE_EBLOCK
:
119 case QED_SPQ_MODE_BLOCK
:
120 p_ent
->comp_cb
.function
= qed_spq_blocking_cb
;
122 case QED_SPQ_MODE_CB
:
125 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
130 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
131 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 p_ent
->elem
.hdr
.cmd_id
,
134 p_ent
->elem
.hdr
.protocol_id
,
135 p_ent
->elem
.data_ptr
.hi
,
136 p_ent
->elem
.data_ptr
.lo
,
137 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
138 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
144 /***************************************************************************
146 ***************************************************************************/
147 static void qed_spq_hw_initialize(struct qed_hwfn
*p_hwfn
,
148 struct qed_spq
*p_spq
)
151 struct qed_cxt_info cxt_info
;
152 struct core_conn_context
*p_cxt
;
153 union qed_qm_pq_params pq_params
;
156 cxt_info
.iid
= p_spq
->cid
;
158 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
161 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
166 p_cxt
= cxt_info
.p_cxt
;
168 SET_FIELD(p_cxt
->xstorm_ag_context
.flags10
,
169 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN
, 1);
170 SET_FIELD(p_cxt
->xstorm_ag_context
.flags1
,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE
, 1);
172 SET_FIELD(p_cxt
->xstorm_ag_context
.flags9
,
173 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN
, 1);
175 /* QM physical queue */
176 memset(&pq_params
, 0, sizeof(pq_params
));
177 pq_params
.core
.tc
= LB_TC
;
178 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
179 p_cxt
->xstorm_ag_context
.physical_q0
= cpu_to_le16(pq
);
181 p_cxt
->xstorm_st_context
.spq_base_lo
=
182 DMA_LO_LE(p_spq
->chain
.p_phys_addr
);
183 p_cxt
->xstorm_st_context
.spq_base_hi
=
184 DMA_HI_LE(p_spq
->chain
.p_phys_addr
);
186 DMA_REGPAIR_LE(p_cxt
->xstorm_st_context
.consolid_base_addr
,
187 p_hwfn
->p_consq
->chain
.p_phys_addr
);
190 static int qed_spq_hw_post(struct qed_hwfn
*p_hwfn
,
191 struct qed_spq
*p_spq
,
192 struct qed_spq_entry
*p_ent
)
194 struct qed_chain
*p_chain
= &p_hwfn
->p_spq
->chain
;
195 u16 echo
= qed_chain_get_prod_idx(p_chain
);
196 struct slow_path_element
*elem
;
197 struct core_db_data db
;
199 p_ent
->elem
.hdr
.echo
= cpu_to_le16(echo
);
200 elem
= qed_chain_produce(p_chain
);
202 DP_NOTICE(p_hwfn
, "Failed to produce from SPQ chain\n");
206 *elem
= p_ent
->elem
; /* struct assignment */
208 /* send a doorbell on the slow hwfn session */
209 memset(&db
, 0, sizeof(db
));
210 SET_FIELD(db
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
211 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
212 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
213 DQ_XCM_CORE_SPQ_PROD_CMD
);
214 db
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
216 /* validate producer is up to-date */
219 db
.spq_prod
= cpu_to_le16(qed_chain_get_prod_idx(p_chain
));
224 DOORBELL(p_hwfn
, qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
), *(u32
*)&db
);
226 /* make sure doorbell is rang */
229 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
230 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
231 qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
),
232 p_spq
->cid
, db
.params
, db
.agg_flags
,
233 qed_chain_get_prod_idx(p_chain
));
238 /***************************************************************************
239 * Asynchronous events
240 ***************************************************************************/
242 qed_async_event_completion(struct qed_hwfn
*p_hwfn
,
243 struct event_ring_entry
*p_eqe
)
246 "Unknown Async completion for protocol: %d\n",
251 /***************************************************************************
253 ***************************************************************************/
254 void qed_eq_prod_update(struct qed_hwfn
*p_hwfn
,
257 u32 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
258 USTORM_EQE_CONS_OFFSET(p_hwfn
->rel_pf_id
);
260 REG_WR16(p_hwfn
, addr
, prod
);
262 /* keep prod updates ordered */
266 int qed_eq_completion(struct qed_hwfn
*p_hwfn
,
270 struct qed_eq
*p_eq
= cookie
;
271 struct qed_chain
*p_chain
= &p_eq
->chain
;
274 /* take a snapshot of the FW consumer */
275 u16 fw_cons_idx
= le16_to_cpu(*p_eq
->p_fw_cons
);
277 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "fw_cons_idx %x\n", fw_cons_idx
);
279 /* Need to guarantee the fw_cons index we use points to a usuable
280 * element (to comply with our chain), so our macros would comply
282 if ((fw_cons_idx
& qed_chain_get_usable_per_page(p_chain
)) ==
283 qed_chain_get_usable_per_page(p_chain
))
284 fw_cons_idx
+= qed_chain_get_unusable_per_page(p_chain
);
286 /* Complete current segment of eq entries */
287 while (fw_cons_idx
!= qed_chain_get_cons_idx(p_chain
)) {
288 struct event_ring_entry
*p_eqe
= qed_chain_consume(p_chain
);
295 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
296 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
300 le16_to_cpu(p_eqe
->echo
),
301 p_eqe
->fw_return_code
,
304 if (GET_FIELD(p_eqe
->flags
, EVENT_RING_ENTRY_ASYNC
)) {
305 if (qed_async_event_completion(p_hwfn
, p_eqe
))
307 } else if (qed_spq_completion(p_hwfn
,
309 p_eqe
->fw_return_code
,
314 qed_chain_recycle_consumed(p_chain
);
317 qed_eq_prod_update(p_hwfn
, qed_chain_get_prod_idx(p_chain
));
322 struct qed_eq
*qed_eq_alloc(struct qed_hwfn
*p_hwfn
,
327 /* Allocate EQ struct */
328 p_eq
= kzalloc(sizeof(*p_eq
), GFP_KERNEL
);
330 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_eq'\n");
334 /* Allocate and initialize EQ chain*/
335 if (qed_chain_alloc(p_hwfn
->cdev
,
336 QED_CHAIN_USE_TO_PRODUCE
,
339 sizeof(union event_ring_element
),
341 DP_NOTICE(p_hwfn
, "Failed to allocate eq chain\n");
342 goto eq_allocate_fail
;
345 /* register EQ completion on the SP SB */
346 qed_int_register_cb(p_hwfn
,
355 qed_eq_free(p_hwfn
, p_eq
);
359 void qed_eq_setup(struct qed_hwfn
*p_hwfn
,
362 qed_chain_reset(&p_eq
->chain
);
365 void qed_eq_free(struct qed_hwfn
*p_hwfn
,
370 qed_chain_free(p_hwfn
->cdev
, &p_eq
->chain
);
374 /***************************************************************************
375 * CQE API - manipulate EQ functionality
376 ***************************************************************************/
377 static int qed_cqe_completion(
378 struct qed_hwfn
*p_hwfn
,
379 struct eth_slow_path_rx_cqe
*cqe
,
380 enum protocol_type protocol
)
382 /* @@@tmp - it's possible we'll eventually want to handle some
383 * actual commands that can arrive here, but for now this is only
384 * used to complete the ramrod using the echo value on the cqe
386 return qed_spq_completion(p_hwfn
, cqe
->echo
, 0, NULL
);
389 int qed_eth_cqe_completion(struct qed_hwfn
*p_hwfn
,
390 struct eth_slow_path_rx_cqe
*cqe
)
394 rc
= qed_cqe_completion(p_hwfn
, cqe
, PROTOCOLID_ETH
);
397 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
403 /***************************************************************************
404 * Slow hwfn Queue (spq)
405 ***************************************************************************/
406 void qed_spq_setup(struct qed_hwfn
*p_hwfn
)
408 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
409 struct qed_spq_entry
*p_virt
= NULL
;
410 dma_addr_t p_phys
= 0;
413 INIT_LIST_HEAD(&p_spq
->pending
);
414 INIT_LIST_HEAD(&p_spq
->completion_pending
);
415 INIT_LIST_HEAD(&p_spq
->free_pool
);
416 INIT_LIST_HEAD(&p_spq
->unlimited_pending
);
417 spin_lock_init(&p_spq
->lock
);
420 p_phys
= p_spq
->p_phys
+ offsetof(struct qed_spq_entry
, ramrod
);
421 p_virt
= p_spq
->p_virt
;
423 for (i
= 0; i
< p_spq
->chain
.capacity
; i
++) {
424 DMA_REGPAIR_LE(p_virt
->elem
.data_ptr
, p_phys
);
426 list_add_tail(&p_virt
->list
, &p_spq
->free_pool
);
429 p_phys
+= sizeof(struct qed_spq_entry
);
433 p_spq
->normal_count
= 0;
434 p_spq
->comp_count
= 0;
435 p_spq
->comp_sent_count
= 0;
436 p_spq
->unlimited_pending_count
= 0;
438 bitmap_zero(p_spq
->p_comp_bitmap
, SPQ_RING_SIZE
);
439 p_spq
->comp_bitmap_idx
= 0;
441 /* SPQ cid, cannot fail */
442 qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_spq
->cid
);
443 qed_spq_hw_initialize(p_hwfn
, p_spq
);
445 /* reset the chain itself */
446 qed_chain_reset(&p_spq
->chain
);
449 int qed_spq_alloc(struct qed_hwfn
*p_hwfn
)
451 struct qed_spq
*p_spq
= NULL
;
452 dma_addr_t p_phys
= 0;
453 struct qed_spq_entry
*p_virt
= NULL
;
457 kzalloc(sizeof(struct qed_spq
), GFP_KERNEL
);
459 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_spq'\n");
464 if (qed_chain_alloc(p_hwfn
->cdev
,
465 QED_CHAIN_USE_TO_PRODUCE
,
466 QED_CHAIN_MODE_SINGLE
,
467 0, /* N/A when the mode is SINGLE */
468 sizeof(struct slow_path_element
),
470 DP_NOTICE(p_hwfn
, "Failed to allocate spq chain\n");
471 goto spq_allocate_fail
;
474 /* allocate and fill the SPQ elements (incl. ramrod data list) */
475 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
476 p_spq
->chain
.capacity
*
477 sizeof(struct qed_spq_entry
),
482 goto spq_allocate_fail
;
484 p_spq
->p_virt
= p_virt
;
485 p_spq
->p_phys
= p_phys
;
486 p_hwfn
->p_spq
= p_spq
;
491 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
496 void qed_spq_free(struct qed_hwfn
*p_hwfn
)
498 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
504 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
505 p_spq
->chain
.capacity
*
506 sizeof(struct qed_spq_entry
),
510 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
516 qed_spq_get_entry(struct qed_hwfn
*p_hwfn
,
517 struct qed_spq_entry
**pp_ent
)
519 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
520 struct qed_spq_entry
*p_ent
= NULL
;
523 spin_lock_bh(&p_spq
->lock
);
525 if (list_empty(&p_spq
->free_pool
)) {
526 p_ent
= kzalloc(sizeof(*p_ent
), GFP_ATOMIC
);
531 p_ent
->queue
= &p_spq
->unlimited_pending
;
533 p_ent
= list_first_entry(&p_spq
->free_pool
,
534 struct qed_spq_entry
,
536 list_del(&p_ent
->list
);
537 p_ent
->queue
= &p_spq
->pending
;
543 spin_unlock_bh(&p_spq
->lock
);
547 /* Locked variant; Should be called while the SPQ lock is taken */
548 static void __qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
549 struct qed_spq_entry
*p_ent
)
551 list_add_tail(&p_ent
->list
, &p_hwfn
->p_spq
->free_pool
);
554 void qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
555 struct qed_spq_entry
*p_ent
)
557 spin_lock_bh(&p_hwfn
->p_spq
->lock
);
558 __qed_spq_return_entry(p_hwfn
, p_ent
);
559 spin_unlock_bh(&p_hwfn
->p_spq
->lock
);
563 * @brief qed_spq_add_entry - adds a new entry to the pending
564 * list. Should be used while lock is being held.
566 * Addes an entry to the pending list is there is room (en empty
567 * element is available in the free_pool), or else places the
568 * entry in the unlimited_pending pool.
577 qed_spq_add_entry(struct qed_hwfn
*p_hwfn
,
578 struct qed_spq_entry
*p_ent
,
579 enum spq_priority priority
)
581 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
583 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
585 if (list_empty(&p_spq
->free_pool
)) {
586 list_add_tail(&p_ent
->list
, &p_spq
->unlimited_pending
);
587 p_spq
->unlimited_pending_count
++;
591 struct qed_spq_entry
*p_en2
;
593 p_en2
= list_first_entry(&p_spq
->free_pool
,
594 struct qed_spq_entry
,
596 list_del(&p_en2
->list
);
598 /* Copy the ring element physical pointer to the new
599 * entry, since we are about to override the entire ring
600 * entry and don't want to lose the pointer.
602 p_ent
->elem
.data_ptr
= p_en2
->elem
.data_ptr
;
612 /* entry is to be placed in 'pending' queue */
614 case QED_SPQ_PRIORITY_NORMAL
:
615 list_add_tail(&p_ent
->list
, &p_spq
->pending
);
616 p_spq
->normal_count
++;
618 case QED_SPQ_PRIORITY_HIGH
:
619 list_add(&p_ent
->list
, &p_spq
->pending
);
629 /***************************************************************************
631 ***************************************************************************/
632 u32
qed_spq_get_cid(struct qed_hwfn
*p_hwfn
)
635 return 0xffffffff; /* illegal */
636 return p_hwfn
->p_spq
->cid
;
639 /***************************************************************************
640 * Posting new Ramrods
641 ***************************************************************************/
642 static int qed_spq_post_list(struct qed_hwfn
*p_hwfn
,
643 struct list_head
*head
,
646 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
649 while (qed_chain_get_elem_left(&p_spq
->chain
) > keep_reserve
&&
651 struct qed_spq_entry
*p_ent
=
652 list_first_entry(head
, struct qed_spq_entry
, list
);
653 list_del(&p_ent
->list
);
654 list_add_tail(&p_ent
->list
, &p_spq
->completion_pending
);
655 p_spq
->comp_sent_count
++;
657 rc
= qed_spq_hw_post(p_hwfn
, p_spq
, p_ent
);
659 list_del(&p_ent
->list
);
660 __qed_spq_return_entry(p_hwfn
, p_ent
);
668 static int qed_spq_pend_post(struct qed_hwfn
*p_hwfn
)
670 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
671 struct qed_spq_entry
*p_ent
= NULL
;
673 while (!list_empty(&p_spq
->free_pool
)) {
674 if (list_empty(&p_spq
->unlimited_pending
))
677 p_ent
= list_first_entry(&p_spq
->unlimited_pending
,
678 struct qed_spq_entry
,
683 list_del(&p_ent
->list
);
685 qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
688 return qed_spq_post_list(p_hwfn
, &p_spq
->pending
,
689 SPQ_HIGH_PRI_RESERVE_DEFAULT
);
692 int qed_spq_post(struct qed_hwfn
*p_hwfn
,
693 struct qed_spq_entry
*p_ent
,
697 struct qed_spq
*p_spq
= p_hwfn
? p_hwfn
->p_spq
: NULL
;
698 bool b_ret_ent
= true;
704 DP_NOTICE(p_hwfn
, "Got a NULL pointer\n");
708 /* Complete the entry */
709 rc
= qed_spq_fill_entry(p_hwfn
, p_ent
);
711 spin_lock_bh(&p_spq
->lock
);
713 /* Check return value after LOCK is taken for cleaner error flow */
717 /* Add the request to the pending queue */
718 rc
= qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
722 rc
= qed_spq_pend_post(p_hwfn
);
724 /* Since it's possible that pending failed for a different
725 * entry [although unlikely], the failed entry was already
726 * dealt with; No need to return it here.
732 spin_unlock_bh(&p_spq
->lock
);
734 if (p_ent
->comp_mode
== QED_SPQ_MODE_EBLOCK
) {
735 /* For entries in QED BLOCK mode, the completion code cannot
736 * perform the necessary cleanup - if it did, we couldn't
737 * access p_ent here to see whether it's successful or not.
738 * Thus, after gaining the answer perform the cleanup here.
740 rc
= qed_spq_block(p_hwfn
, p_ent
, fw_return_code
);
745 qed_spq_return_entry(p_hwfn
, p_ent
);
750 spin_lock_bh(&p_spq
->lock
);
751 list_del(&p_ent
->list
);
752 qed_chain_return_produced(&p_spq
->chain
);
755 /* return to the free pool */
757 __qed_spq_return_entry(p_hwfn
, p_ent
);
758 spin_unlock_bh(&p_spq
->lock
);
763 int qed_spq_completion(struct qed_hwfn
*p_hwfn
,
766 union event_ring_data
*p_data
)
768 struct qed_spq
*p_spq
;
769 struct qed_spq_entry
*p_ent
= NULL
;
770 struct qed_spq_entry
*tmp
;
771 struct qed_spq_entry
*found
= NULL
;
777 p_spq
= p_hwfn
->p_spq
;
781 spin_lock_bh(&p_spq
->lock
);
782 list_for_each_entry_safe(p_ent
, tmp
, &p_spq
->completion_pending
,
784 if (p_ent
->elem
.hdr
.echo
== echo
) {
785 u16 pos
= le16_to_cpu(echo
) % SPQ_RING_SIZE
;
787 list_del(&p_ent
->list
);
789 /* Avoid overriding of SPQ entries when getting
790 * out-of-order completions, by marking the completions
791 * in a bitmap and increasing the chain consumer only
792 * for the first successive completed entries.
794 bitmap_set(p_spq
->p_comp_bitmap
, pos
, SPQ_RING_SIZE
);
796 while (test_bit(p_spq
->comp_bitmap_idx
,
797 p_spq
->p_comp_bitmap
)) {
798 bitmap_clear(p_spq
->p_comp_bitmap
,
799 p_spq
->comp_bitmap_idx
,
801 p_spq
->comp_bitmap_idx
++;
802 qed_chain_return_produced(&p_spq
->chain
);
810 /* This is relatively uncommon - depends on scenarios
811 * which have mutliple per-PF sent ramrods.
813 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
814 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
816 le16_to_cpu(p_ent
->elem
.hdr
.echo
));
819 /* Release lock before callback, as callback may post
820 * an additional ramrod.
822 spin_unlock_bh(&p_spq
->lock
);
826 "Failed to find an entry this EQE completes\n");
830 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "Complete: func %p cookie %p)\n",
831 p_ent
->comp_cb
.function
, p_ent
->comp_cb
.cookie
);
832 if (found
->comp_cb
.function
)
833 found
->comp_cb
.function(p_hwfn
, found
->comp_cb
.cookie
, p_data
,
836 if (found
->comp_mode
!= QED_SPQ_MODE_EBLOCK
)
837 /* EBLOCK is responsible for freeing its own entry */
838 qed_spq_return_entry(p_hwfn
, found
);
840 /* Attempt to post pending requests */
841 spin_lock_bh(&p_spq
->lock
);
842 rc
= qed_spq_pend_post(p_hwfn
);
843 spin_unlock_bh(&p_spq
->lock
);
848 struct qed_consq
*qed_consq_alloc(struct qed_hwfn
*p_hwfn
)
850 struct qed_consq
*p_consq
;
852 /* Allocate ConsQ struct */
853 p_consq
= kzalloc(sizeof(*p_consq
), GFP_KERNEL
);
855 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_consq'\n");
859 /* Allocate and initialize EQ chain*/
860 if (qed_chain_alloc(p_hwfn
->cdev
,
861 QED_CHAIN_USE_TO_PRODUCE
,
863 QED_CHAIN_PAGE_SIZE
/ 0x80,
866 DP_NOTICE(p_hwfn
, "Failed to allocate consq chain");
867 goto consq_allocate_fail
;
873 qed_consq_free(p_hwfn
, p_consq
);
877 void qed_consq_setup(struct qed_hwfn
*p_hwfn
,
878 struct qed_consq
*p_consq
)
880 qed_chain_reset(&p_consq
->chain
);
883 void qed_consq_free(struct qed_hwfn
*p_hwfn
,
884 struct qed_consq
*p_consq
)
888 qed_chain_free(p_hwfn
->cdev
, &p_consq
->chain
);