1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 #include "ice_common.h"
7 * ice_adminq_init_regs - Initialize AdminQ registers
8 * @hw: pointer to the hardware structure
10 * This assumes the alloc_sq and alloc_rq functions have already been called
12 static void ice_adminq_init_regs(struct ice_hw
*hw
)
14 struct ice_ctl_q_info
*cq
= &hw
->adminq
;
16 cq
->sq
.head
= PF_FW_ATQH
;
17 cq
->sq
.tail
= PF_FW_ATQT
;
18 cq
->sq
.len
= PF_FW_ATQLEN
;
19 cq
->sq
.bah
= PF_FW_ATQBAH
;
20 cq
->sq
.bal
= PF_FW_ATQBAL
;
21 cq
->sq
.len_mask
= PF_FW_ATQLEN_ATQLEN_M
;
22 cq
->sq
.len_ena_mask
= PF_FW_ATQLEN_ATQENABLE_M
;
23 cq
->sq
.head_mask
= PF_FW_ATQH_ATQH_M
;
25 cq
->rq
.head
= PF_FW_ARQH
;
26 cq
->rq
.tail
= PF_FW_ARQT
;
27 cq
->rq
.len
= PF_FW_ARQLEN
;
28 cq
->rq
.bah
= PF_FW_ARQBAH
;
29 cq
->rq
.bal
= PF_FW_ARQBAL
;
30 cq
->rq
.len_mask
= PF_FW_ARQLEN_ARQLEN_M
;
31 cq
->rq
.len_ena_mask
= PF_FW_ARQLEN_ARQENABLE_M
;
32 cq
->rq
.head_mask
= PF_FW_ARQH_ARQH_M
;
37 * @hw: pointer to the hw struct
38 * @cq: pointer to the specific Control queue
40 * Returns true if Queue is enabled else false.
42 bool ice_check_sq_alive(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
44 /* check both queue-length and queue-enable fields */
45 if (cq
->sq
.len
&& cq
->sq
.len_mask
&& cq
->sq
.len_ena_mask
)
46 return (rd32(hw
, cq
->sq
.len
) & (cq
->sq
.len_mask
|
47 cq
->sq
.len_ena_mask
)) ==
48 (cq
->num_sq_entries
| cq
->sq
.len_ena_mask
);
54 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
55 * @hw: pointer to the hardware structure
56 * @cq: pointer to the specific Control queue
58 static enum ice_status
59 ice_alloc_ctrlq_sq_ring(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
61 size_t size
= cq
->num_sq_entries
* sizeof(struct ice_aq_desc
);
63 cq
->sq
.desc_buf
.va
= dmam_alloc_coherent(ice_hw_to_dev(hw
), size
,
65 GFP_KERNEL
| __GFP_ZERO
);
66 if (!cq
->sq
.desc_buf
.va
)
67 return ICE_ERR_NO_MEMORY
;
68 cq
->sq
.desc_buf
.size
= size
;
70 cq
->sq
.cmd_buf
= devm_kcalloc(ice_hw_to_dev(hw
), cq
->num_sq_entries
,
71 sizeof(struct ice_sq_cd
), GFP_KERNEL
);
72 if (!cq
->sq
.cmd_buf
) {
73 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->sq
.desc_buf
.size
,
74 cq
->sq
.desc_buf
.va
, cq
->sq
.desc_buf
.pa
);
75 cq
->sq
.desc_buf
.va
= NULL
;
76 cq
->sq
.desc_buf
.pa
= 0;
77 cq
->sq
.desc_buf
.size
= 0;
78 return ICE_ERR_NO_MEMORY
;
85 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
86 * @hw: pointer to the hardware structure
87 * @cq: pointer to the specific Control queue
89 static enum ice_status
90 ice_alloc_ctrlq_rq_ring(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
92 size_t size
= cq
->num_rq_entries
* sizeof(struct ice_aq_desc
);
94 cq
->rq
.desc_buf
.va
= dmam_alloc_coherent(ice_hw_to_dev(hw
), size
,
96 GFP_KERNEL
| __GFP_ZERO
);
97 if (!cq
->rq
.desc_buf
.va
)
98 return ICE_ERR_NO_MEMORY
;
99 cq
->rq
.desc_buf
.size
= size
;
104 * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
105 * @hw: pointer to the hardware structure
106 * @cq: pointer to the specific Control queue
108 * This assumes the posted send buffers have already been cleaned
111 static void ice_free_ctrlq_sq_ring(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
113 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->sq
.desc_buf
.size
,
114 cq
->sq
.desc_buf
.va
, cq
->sq
.desc_buf
.pa
);
115 cq
->sq
.desc_buf
.va
= NULL
;
116 cq
->sq
.desc_buf
.pa
= 0;
117 cq
->sq
.desc_buf
.size
= 0;
121 * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
122 * @hw: pointer to the hardware structure
123 * @cq: pointer to the specific Control queue
125 * This assumes the posted receive buffers have already been cleaned
128 static void ice_free_ctrlq_rq_ring(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
130 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->rq
.desc_buf
.size
,
131 cq
->rq
.desc_buf
.va
, cq
->rq
.desc_buf
.pa
);
132 cq
->rq
.desc_buf
.va
= NULL
;
133 cq
->rq
.desc_buf
.pa
= 0;
134 cq
->rq
.desc_buf
.size
= 0;
138 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
139 * @hw: pointer to the hardware structure
140 * @cq: pointer to the specific Control queue
142 static enum ice_status
143 ice_alloc_rq_bufs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
147 /* We'll be allocating the buffer info memory first, then we can
148 * allocate the mapped buffers for the event processing
150 cq
->rq
.dma_head
= devm_kcalloc(ice_hw_to_dev(hw
), cq
->num_rq_entries
,
151 sizeof(cq
->rq
.desc_buf
), GFP_KERNEL
);
152 if (!cq
->rq
.dma_head
)
153 return ICE_ERR_NO_MEMORY
;
154 cq
->rq
.r
.rq_bi
= (struct ice_dma_mem
*)cq
->rq
.dma_head
;
156 /* allocate the mapped buffers */
157 for (i
= 0; i
< cq
->num_rq_entries
; i
++) {
158 struct ice_aq_desc
*desc
;
159 struct ice_dma_mem
*bi
;
161 bi
= &cq
->rq
.r
.rq_bi
[i
];
162 bi
->va
= dmam_alloc_coherent(ice_hw_to_dev(hw
),
163 cq
->rq_buf_size
, &bi
->pa
,
164 GFP_KERNEL
| __GFP_ZERO
);
166 goto unwind_alloc_rq_bufs
;
167 bi
->size
= cq
->rq_buf_size
;
169 /* now configure the descriptors for use */
170 desc
= ICE_CTL_Q_DESC(cq
->rq
, i
);
172 desc
->flags
= cpu_to_le16(ICE_AQ_FLAG_BUF
);
173 if (cq
->rq_buf_size
> ICE_AQ_LG_BUF
)
174 desc
->flags
|= cpu_to_le16(ICE_AQ_FLAG_LB
);
176 /* This is in accordance with Admin queue design, there is no
177 * register for buffer size configuration
179 desc
->datalen
= cpu_to_le16(bi
->size
);
181 desc
->cookie_high
= 0;
182 desc
->cookie_low
= 0;
183 desc
->params
.generic
.addr_high
=
184 cpu_to_le32(upper_32_bits(bi
->pa
));
185 desc
->params
.generic
.addr_low
=
186 cpu_to_le32(lower_32_bits(bi
->pa
));
187 desc
->params
.generic
.param0
= 0;
188 desc
->params
.generic
.param1
= 0;
192 unwind_alloc_rq_bufs
:
193 /* don't try to free the one that failed... */
195 for (; i
>= 0; i
--) {
196 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->rq
.r
.rq_bi
[i
].size
,
197 cq
->rq
.r
.rq_bi
[i
].va
, cq
->rq
.r
.rq_bi
[i
].pa
);
198 cq
->rq
.r
.rq_bi
[i
].va
= NULL
;
199 cq
->rq
.r
.rq_bi
[i
].pa
= 0;
200 cq
->rq
.r
.rq_bi
[i
].size
= 0;
202 devm_kfree(ice_hw_to_dev(hw
), cq
->rq
.dma_head
);
204 return ICE_ERR_NO_MEMORY
;
208 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209 * @hw: pointer to the hardware structure
210 * @cq: pointer to the specific Control queue
212 static enum ice_status
213 ice_alloc_sq_bufs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
217 /* No mapped memory needed yet, just the buffer info structures */
218 cq
->sq
.dma_head
= devm_kcalloc(ice_hw_to_dev(hw
), cq
->num_sq_entries
,
219 sizeof(cq
->sq
.desc_buf
), GFP_KERNEL
);
220 if (!cq
->sq
.dma_head
)
221 return ICE_ERR_NO_MEMORY
;
222 cq
->sq
.r
.sq_bi
= (struct ice_dma_mem
*)cq
->sq
.dma_head
;
224 /* allocate the mapped buffers */
225 for (i
= 0; i
< cq
->num_sq_entries
; i
++) {
226 struct ice_dma_mem
*bi
;
228 bi
= &cq
->sq
.r
.sq_bi
[i
];
229 bi
->va
= dmam_alloc_coherent(ice_hw_to_dev(hw
),
230 cq
->sq_buf_size
, &bi
->pa
,
231 GFP_KERNEL
| __GFP_ZERO
);
233 goto unwind_alloc_sq_bufs
;
234 bi
->size
= cq
->sq_buf_size
;
238 unwind_alloc_sq_bufs
:
239 /* don't try to free the one that failed... */
241 for (; i
>= 0; i
--) {
242 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->sq
.r
.sq_bi
[i
].size
,
243 cq
->sq
.r
.sq_bi
[i
].va
, cq
->sq
.r
.sq_bi
[i
].pa
);
244 cq
->sq
.r
.sq_bi
[i
].va
= NULL
;
245 cq
->sq
.r
.sq_bi
[i
].pa
= 0;
246 cq
->sq
.r
.sq_bi
[i
].size
= 0;
248 devm_kfree(ice_hw_to_dev(hw
), cq
->sq
.dma_head
);
250 return ICE_ERR_NO_MEMORY
;
254 * ice_free_rq_bufs - Free ARQ buffer info elements
255 * @hw: pointer to the hardware structure
256 * @cq: pointer to the specific Control queue
258 static void ice_free_rq_bufs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
262 /* free descriptors */
263 for (i
= 0; i
< cq
->num_rq_entries
; i
++) {
264 dmam_free_coherent(ice_hw_to_dev(hw
), cq
->rq
.r
.rq_bi
[i
].size
,
265 cq
->rq
.r
.rq_bi
[i
].va
, cq
->rq
.r
.rq_bi
[i
].pa
);
266 cq
->rq
.r
.rq_bi
[i
].va
= NULL
;
267 cq
->rq
.r
.rq_bi
[i
].pa
= 0;
268 cq
->rq
.r
.rq_bi
[i
].size
= 0;
271 /* free the dma header */
272 devm_kfree(ice_hw_to_dev(hw
), cq
->rq
.dma_head
);
276 * ice_free_sq_bufs - Free ATQ buffer info elements
277 * @hw: pointer to the hardware structure
278 * @cq: pointer to the specific Control queue
280 static void ice_free_sq_bufs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
284 /* only unmap if the address is non-NULL */
285 for (i
= 0; i
< cq
->num_sq_entries
; i
++)
286 if (cq
->sq
.r
.sq_bi
[i
].pa
) {
287 dmam_free_coherent(ice_hw_to_dev(hw
),
288 cq
->sq
.r
.sq_bi
[i
].size
,
289 cq
->sq
.r
.sq_bi
[i
].va
,
290 cq
->sq
.r
.sq_bi
[i
].pa
);
291 cq
->sq
.r
.sq_bi
[i
].va
= NULL
;
292 cq
->sq
.r
.sq_bi
[i
].pa
= 0;
293 cq
->sq
.r
.sq_bi
[i
].size
= 0;
296 /* free the buffer info list */
297 devm_kfree(ice_hw_to_dev(hw
), cq
->sq
.cmd_buf
);
299 /* free the dma header */
300 devm_kfree(ice_hw_to_dev(hw
), cq
->sq
.dma_head
);
304 * ice_cfg_sq_regs - configure Control ATQ registers
305 * @hw: pointer to the hardware structure
306 * @cq: pointer to the specific Control queue
308 * Configure base address and length registers for the transmit queue
310 static enum ice_status
311 ice_cfg_sq_regs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
315 /* Clear Head and Tail */
316 wr32(hw
, cq
->sq
.head
, 0);
317 wr32(hw
, cq
->sq
.tail
, 0);
319 /* set starting point */
320 wr32(hw
, cq
->sq
.len
, (cq
->num_sq_entries
| cq
->sq
.len_ena_mask
));
321 wr32(hw
, cq
->sq
.bal
, lower_32_bits(cq
->sq
.desc_buf
.pa
));
322 wr32(hw
, cq
->sq
.bah
, upper_32_bits(cq
->sq
.desc_buf
.pa
));
324 /* Check one register to verify that config was applied */
325 reg
= rd32(hw
, cq
->sq
.bal
);
326 if (reg
!= lower_32_bits(cq
->sq
.desc_buf
.pa
))
327 return ICE_ERR_AQ_ERROR
;
333 * ice_cfg_rq_regs - configure Control ARQ register
334 * @hw: pointer to the hardware structure
335 * @cq: pointer to the specific Control queue
337 * Configure base address and length registers for the receive (event q)
339 static enum ice_status
340 ice_cfg_rq_regs(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
344 /* Clear Head and Tail */
345 wr32(hw
, cq
->rq
.head
, 0);
346 wr32(hw
, cq
->rq
.tail
, 0);
348 /* set starting point */
349 wr32(hw
, cq
->rq
.len
, (cq
->num_rq_entries
| cq
->rq
.len_ena_mask
));
350 wr32(hw
, cq
->rq
.bal
, lower_32_bits(cq
->rq
.desc_buf
.pa
));
351 wr32(hw
, cq
->rq
.bah
, upper_32_bits(cq
->rq
.desc_buf
.pa
));
353 /* Update tail in the HW to post pre-allocated buffers */
354 wr32(hw
, cq
->rq
.tail
, (u32
)(cq
->num_rq_entries
- 1));
356 /* Check one register to verify that config was applied */
357 reg
= rd32(hw
, cq
->rq
.bal
);
358 if (reg
!= lower_32_bits(cq
->rq
.desc_buf
.pa
))
359 return ICE_ERR_AQ_ERROR
;
365 * ice_init_sq - main initialization routine for Control ATQ
366 * @hw: pointer to the hardware structure
367 * @cq: pointer to the specific Control queue
369 * This is the main initialization routine for the Control Send Queue
370 * Prior to calling this function, drivers *MUST* set the following fields
371 * in the cq->structure:
372 * - cq->num_sq_entries
375 * Do *NOT* hold the lock when calling this as the memory allocation routines
376 * called are not going to be atomic context safe
378 static enum ice_status
ice_init_sq(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
380 enum ice_status ret_code
;
382 if (cq
->sq
.count
> 0) {
383 /* queue already initialized */
384 ret_code
= ICE_ERR_NOT_READY
;
385 goto init_ctrlq_exit
;
388 /* verify input for valid configuration */
389 if (!cq
->num_sq_entries
|| !cq
->sq_buf_size
) {
390 ret_code
= ICE_ERR_CFG
;
391 goto init_ctrlq_exit
;
394 cq
->sq
.next_to_use
= 0;
395 cq
->sq
.next_to_clean
= 0;
397 /* allocate the ring memory */
398 ret_code
= ice_alloc_ctrlq_sq_ring(hw
, cq
);
400 goto init_ctrlq_exit
;
402 /* allocate buffers in the rings */
403 ret_code
= ice_alloc_sq_bufs(hw
, cq
);
405 goto init_ctrlq_free_rings
;
407 /* initialize base registers */
408 ret_code
= ice_cfg_sq_regs(hw
, cq
);
410 goto init_ctrlq_free_rings
;
413 cq
->sq
.count
= cq
->num_sq_entries
;
414 goto init_ctrlq_exit
;
416 init_ctrlq_free_rings
:
417 ice_free_ctrlq_sq_ring(hw
, cq
);
424 * ice_init_rq - initialize ARQ
425 * @hw: pointer to the hardware structure
426 * @cq: pointer to the specific Control queue
428 * The main initialization routine for the Admin Receive (Event) Queue.
429 * Prior to calling this function, drivers *MUST* set the following fields
430 * in the cq->structure:
431 * - cq->num_rq_entries
434 * Do *NOT* hold the lock when calling this as the memory allocation routines
435 * called are not going to be atomic context safe
437 static enum ice_status
ice_init_rq(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
439 enum ice_status ret_code
;
441 if (cq
->rq
.count
> 0) {
442 /* queue already initialized */
443 ret_code
= ICE_ERR_NOT_READY
;
444 goto init_ctrlq_exit
;
447 /* verify input for valid configuration */
448 if (!cq
->num_rq_entries
|| !cq
->rq_buf_size
) {
449 ret_code
= ICE_ERR_CFG
;
450 goto init_ctrlq_exit
;
453 cq
->rq
.next_to_use
= 0;
454 cq
->rq
.next_to_clean
= 0;
456 /* allocate the ring memory */
457 ret_code
= ice_alloc_ctrlq_rq_ring(hw
, cq
);
459 goto init_ctrlq_exit
;
461 /* allocate buffers in the rings */
462 ret_code
= ice_alloc_rq_bufs(hw
, cq
);
464 goto init_ctrlq_free_rings
;
466 /* initialize base registers */
467 ret_code
= ice_cfg_rq_regs(hw
, cq
);
469 goto init_ctrlq_free_rings
;
472 cq
->rq
.count
= cq
->num_rq_entries
;
473 goto init_ctrlq_exit
;
475 init_ctrlq_free_rings
:
476 ice_free_ctrlq_rq_ring(hw
, cq
);
483 * ice_shutdown_sq - shutdown the Control ATQ
484 * @hw: pointer to the hardware structure
485 * @cq: pointer to the specific Control queue
487 * The main shutdown routine for the Control Transmit Queue
489 static enum ice_status
490 ice_shutdown_sq(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
492 enum ice_status ret_code
= 0;
494 mutex_lock(&cq
->sq_lock
);
497 ret_code
= ICE_ERR_NOT_READY
;
498 goto shutdown_sq_out
;
501 /* Stop firmware AdminQ processing */
502 wr32(hw
, cq
->sq
.head
, 0);
503 wr32(hw
, cq
->sq
.tail
, 0);
504 wr32(hw
, cq
->sq
.len
, 0);
505 wr32(hw
, cq
->sq
.bal
, 0);
506 wr32(hw
, cq
->sq
.bah
, 0);
508 cq
->sq
.count
= 0; /* to indicate uninitialized queue */
510 /* free ring buffers and the ring itself */
511 ice_free_sq_bufs(hw
, cq
);
512 ice_free_ctrlq_sq_ring(hw
, cq
);
515 mutex_unlock(&cq
->sq_lock
);
520 * ice_aq_ver_check - Check the reported AQ API version.
521 * @fw_branch: The "branch" of FW, typically describes the device type
522 * @fw_major: The major version of the FW API
523 * @fw_minor: The minor version increment of the FW API
525 * Checks if the driver should load on a given AQ API version.
527 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
529 static bool ice_aq_ver_check(u8 fw_branch
, u8 fw_major
, u8 fw_minor
)
531 if (fw_branch
!= EXP_FW_API_VER_BRANCH
)
533 if (fw_major
!= EXP_FW_API_VER_MAJOR
)
535 if (fw_minor
!= EXP_FW_API_VER_MINOR
)
541 * ice_shutdown_rq - shutdown Control ARQ
542 * @hw: pointer to the hardware structure
543 * @cq: pointer to the specific Control queue
545 * The main shutdown routine for the Control Receive Queue
547 static enum ice_status
548 ice_shutdown_rq(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
550 enum ice_status ret_code
= 0;
552 mutex_lock(&cq
->rq_lock
);
555 ret_code
= ICE_ERR_NOT_READY
;
556 goto shutdown_rq_out
;
559 /* Stop Control Queue processing */
560 wr32(hw
, cq
->rq
.head
, 0);
561 wr32(hw
, cq
->rq
.tail
, 0);
562 wr32(hw
, cq
->rq
.len
, 0);
563 wr32(hw
, cq
->rq
.bal
, 0);
564 wr32(hw
, cq
->rq
.bah
, 0);
566 /* set rq.count to 0 to indicate uninitialized queue */
569 /* free ring buffers and the ring itself */
570 ice_free_rq_bufs(hw
, cq
);
571 ice_free_ctrlq_rq_ring(hw
, cq
);
574 mutex_unlock(&cq
->rq_lock
);
579 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
580 * @hw: pointer to the hardware structure
582 static enum ice_status
ice_init_check_adminq(struct ice_hw
*hw
)
584 struct ice_ctl_q_info
*cq
= &hw
->adminq
;
585 enum ice_status status
;
587 status
= ice_aq_get_fw_ver(hw
, NULL
);
589 goto init_ctrlq_free_rq
;
591 if (!ice_aq_ver_check(hw
->api_branch
, hw
->api_maj_ver
,
593 status
= ICE_ERR_FW_API_VER
;
594 goto init_ctrlq_free_rq
;
601 ice_shutdown_rq(hw
, cq
);
602 mutex_destroy(&cq
->rq_lock
);
605 ice_shutdown_sq(hw
, cq
);
606 mutex_destroy(&cq
->sq_lock
);
612 * ice_init_ctrlq - main initialization routine for any control Queue
613 * @hw: pointer to the hardware structure
614 * @q_type: specific Control queue type
616 * Prior to calling this function, drivers *MUST* set the following fields
617 * in the cq->structure:
618 * - cq->num_sq_entries
619 * - cq->num_rq_entries
624 static enum ice_status
ice_init_ctrlq(struct ice_hw
*hw
, enum ice_ctl_q q_type
)
626 struct ice_ctl_q_info
*cq
;
627 enum ice_status ret_code
;
630 case ICE_CTL_Q_ADMIN
:
631 ice_adminq_init_regs(hw
);
635 return ICE_ERR_PARAM
;
639 /* verify input for valid configuration */
640 if (!cq
->num_rq_entries
|| !cq
->num_sq_entries
||
641 !cq
->rq_buf_size
|| !cq
->sq_buf_size
) {
644 mutex_init(&cq
->sq_lock
);
645 mutex_init(&cq
->rq_lock
);
647 /* setup SQ command write back timeout */
648 cq
->sq_cmd_timeout
= ICE_CTL_Q_SQ_CMD_TIMEOUT
;
650 /* allocate the ATQ */
651 ret_code
= ice_init_sq(hw
, cq
);
653 goto init_ctrlq_destroy_locks
;
655 /* allocate the ARQ */
656 ret_code
= ice_init_rq(hw
, cq
);
658 goto init_ctrlq_free_sq
;
664 ice_shutdown_sq(hw
, cq
);
665 init_ctrlq_destroy_locks
:
666 mutex_destroy(&cq
->sq_lock
);
667 mutex_destroy(&cq
->rq_lock
);
672 * ice_init_all_ctrlq - main initialization routine for all control queues
673 * @hw: pointer to the hardware structure
675 * Prior to calling this function, drivers *MUST* set the following fields
676 * in the cq->structure for all control queues:
677 * - cq->num_sq_entries
678 * - cq->num_rq_entries
682 enum ice_status
ice_init_all_ctrlq(struct ice_hw
*hw
)
684 enum ice_status ret_code
;
686 /* Init FW admin queue */
687 ret_code
= ice_init_ctrlq(hw
, ICE_CTL_Q_ADMIN
);
691 return ice_init_check_adminq(hw
);
695 * ice_shutdown_ctrlq - shutdown routine for any control queue
696 * @hw: pointer to the hardware structure
697 * @q_type: specific Control queue type
699 static void ice_shutdown_ctrlq(struct ice_hw
*hw
, enum ice_ctl_q q_type
)
701 struct ice_ctl_q_info
*cq
;
704 case ICE_CTL_Q_ADMIN
:
706 if (ice_check_sq_alive(hw
, cq
))
707 ice_aq_q_shutdown(hw
, true);
714 ice_shutdown_sq(hw
, cq
);
715 mutex_destroy(&cq
->sq_lock
);
718 ice_shutdown_rq(hw
, cq
);
719 mutex_destroy(&cq
->rq_lock
);
724 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
725 * @hw: pointer to the hardware structure
727 void ice_shutdown_all_ctrlq(struct ice_hw
*hw
)
729 /* Shutdown FW admin queue */
730 ice_shutdown_ctrlq(hw
, ICE_CTL_Q_ADMIN
);
734 * ice_clean_sq - cleans Admin send queue (ATQ)
735 * @hw: pointer to the hardware structure
736 * @cq: pointer to the specific Control queue
738 * returns the number of free desc
740 static u16
ice_clean_sq(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
742 struct ice_ctl_q_ring
*sq
= &cq
->sq
;
743 u16 ntc
= sq
->next_to_clean
;
744 struct ice_sq_cd
*details
;
745 struct ice_aq_desc
*desc
;
747 desc
= ICE_CTL_Q_DESC(*sq
, ntc
);
748 details
= ICE_CTL_Q_DETAILS(*sq
, ntc
);
750 while (rd32(hw
, cq
->sq
.head
) != ntc
) {
751 ice_debug(hw
, ICE_DBG_AQ_MSG
,
752 "ntc %d head %d.\n", ntc
, rd32(hw
, cq
->sq
.head
));
753 memset(desc
, 0, sizeof(*desc
));
754 memset(details
, 0, sizeof(*details
));
756 if (ntc
== sq
->count
)
758 desc
= ICE_CTL_Q_DESC(*sq
, ntc
);
759 details
= ICE_CTL_Q_DETAILS(*sq
, ntc
);
762 sq
->next_to_clean
= ntc
;
764 return ICE_CTL_Q_DESC_UNUSED(sq
);
768 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
769 * @hw: pointer to the hw struct
770 * @cq: pointer to the specific Control queue
772 * Returns true if the firmware has processed all descriptors on the
773 * admin send queue. Returns false if there are still requests pending.
775 static bool ice_sq_done(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
)
777 /* AQ designers suggest use of head for better
778 * timing reliability than DD bit
780 return rd32(hw
, cq
->sq
.head
) == cq
->sq
.next_to_use
;
784 * ice_sq_send_cmd - send command to Control Queue (ATQ)
785 * @hw: pointer to the hw struct
786 * @cq: pointer to the specific Control queue
787 * @desc: prefilled descriptor describing the command (non DMA mem)
788 * @buf: buffer to use for indirect commands (or NULL for direct commands)
789 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
790 * @cd: pointer to command details structure
792 * This is the main send command routine for the ATQ. It runs the q,
793 * cleans the queue, etc.
796 ice_sq_send_cmd(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
,
797 struct ice_aq_desc
*desc
, void *buf
, u16 buf_size
,
798 struct ice_sq_cd
*cd
)
800 struct ice_dma_mem
*dma_buf
= NULL
;
801 struct ice_aq_desc
*desc_on_ring
;
802 bool cmd_completed
= false;
803 enum ice_status status
= 0;
804 struct ice_sq_cd
*details
;
809 mutex_lock(&cq
->sq_lock
);
811 cq
->sq_last_status
= ICE_AQ_RC_OK
;
814 ice_debug(hw
, ICE_DBG_AQ_MSG
,
815 "Control Send queue not initialized.\n");
816 status
= ICE_ERR_AQ_EMPTY
;
817 goto sq_send_command_error
;
820 if ((buf
&& !buf_size
) || (!buf
&& buf_size
)) {
821 status
= ICE_ERR_PARAM
;
822 goto sq_send_command_error
;
826 if (buf_size
> cq
->sq_buf_size
) {
827 ice_debug(hw
, ICE_DBG_AQ_MSG
,
828 "Invalid buffer size for Control Send queue: %d.\n",
830 status
= ICE_ERR_INVAL_SIZE
;
831 goto sq_send_command_error
;
834 desc
->flags
|= cpu_to_le16(ICE_AQ_FLAG_BUF
);
835 if (buf_size
> ICE_AQ_LG_BUF
)
836 desc
->flags
|= cpu_to_le16(ICE_AQ_FLAG_LB
);
839 val
= rd32(hw
, cq
->sq
.head
);
840 if (val
>= cq
->num_sq_entries
) {
841 ice_debug(hw
, ICE_DBG_AQ_MSG
,
842 "head overrun at %d in the Control Send Queue ring\n",
844 status
= ICE_ERR_AQ_EMPTY
;
845 goto sq_send_command_error
;
848 details
= ICE_CTL_Q_DETAILS(cq
->sq
, cq
->sq
.next_to_use
);
850 memcpy(details
, cd
, sizeof(*details
));
852 memset(details
, 0, sizeof(*details
));
854 /* Call clean and check queue available function to reclaim the
855 * descriptors that were processed by FW/MBX; the function returns the
856 * number of desc available. The clean function called here could be
857 * called in a separate thread in case of asynchronous completions.
859 if (ice_clean_sq(hw
, cq
) == 0) {
860 ice_debug(hw
, ICE_DBG_AQ_MSG
,
861 "Error: Control Send Queue is full.\n");
862 status
= ICE_ERR_AQ_FULL
;
863 goto sq_send_command_error
;
866 /* initialize the temp desc pointer with the right desc */
867 desc_on_ring
= ICE_CTL_Q_DESC(cq
->sq
, cq
->sq
.next_to_use
);
869 /* if the desc is available copy the temp desc to the right place */
870 memcpy(desc_on_ring
, desc
, sizeof(*desc_on_ring
));
872 /* if buf is not NULL assume indirect command */
874 dma_buf
= &cq
->sq
.r
.sq_bi
[cq
->sq
.next_to_use
];
875 /* copy the user buf into the respective DMA buf */
876 memcpy(dma_buf
->va
, buf
, buf_size
);
877 desc_on_ring
->datalen
= cpu_to_le16(buf_size
);
879 /* Update the address values in the desc with the pa value
880 * for respective buffer
882 desc_on_ring
->params
.generic
.addr_high
=
883 cpu_to_le32(upper_32_bits(dma_buf
->pa
));
884 desc_on_ring
->params
.generic
.addr_low
=
885 cpu_to_le32(lower_32_bits(dma_buf
->pa
));
888 /* Debug desc and buffer */
889 ice_debug(hw
, ICE_DBG_AQ_MSG
,
890 "ATQ: Control Send queue desc and buffer:\n");
892 ice_debug_cq(hw
, ICE_DBG_AQ_CMD
, (void *)desc_on_ring
, buf
, buf_size
);
894 (cq
->sq
.next_to_use
)++;
895 if (cq
->sq
.next_to_use
== cq
->sq
.count
)
896 cq
->sq
.next_to_use
= 0;
897 wr32(hw
, cq
->sq
.tail
, cq
->sq
.next_to_use
);
900 if (ice_sq_done(hw
, cq
))
905 } while (total_delay
< cq
->sq_cmd_timeout
);
907 /* if ready, copy the desc back to temp */
908 if (ice_sq_done(hw
, cq
)) {
909 memcpy(desc
, desc_on_ring
, sizeof(*desc
));
911 /* get returned length to copy */
912 u16 copy_size
= le16_to_cpu(desc
->datalen
);
914 if (copy_size
> buf_size
) {
915 ice_debug(hw
, ICE_DBG_AQ_MSG
,
916 "Return len %d > than buf len %d\n",
917 copy_size
, buf_size
);
918 status
= ICE_ERR_AQ_ERROR
;
920 memcpy(buf
, dma_buf
->va
, copy_size
);
923 retval
= le16_to_cpu(desc
->retval
);
925 ice_debug(hw
, ICE_DBG_AQ_MSG
,
926 "Control Send Queue command completed with error 0x%x\n",
929 /* strip off FW internal code */
932 cmd_completed
= true;
933 if (!status
&& retval
!= ICE_AQ_RC_OK
)
934 status
= ICE_ERR_AQ_ERROR
;
935 cq
->sq_last_status
= (enum ice_aq_err
)retval
;
938 ice_debug(hw
, ICE_DBG_AQ_MSG
,
939 "ATQ: desc and buffer writeback:\n");
941 ice_debug_cq(hw
, ICE_DBG_AQ_CMD
, (void *)desc
, buf
, buf_size
);
943 /* save writeback AQ if requested */
944 if (details
->wb_desc
)
945 memcpy(details
->wb_desc
, desc_on_ring
,
946 sizeof(*details
->wb_desc
));
948 /* update the error if time out occurred */
949 if (!cmd_completed
) {
950 ice_debug(hw
, ICE_DBG_AQ_MSG
,
951 "Control Send Queue Writeback timeout.\n");
952 status
= ICE_ERR_AQ_TIMEOUT
;
955 sq_send_command_error
:
956 mutex_unlock(&cq
->sq_lock
);
961 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
962 * @desc: pointer to the temp descriptor (non DMA mem)
963 * @opcode: the opcode can be used to decide which flags to turn off or on
965 * Fill the desc with default values
967 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc
*desc
, u16 opcode
)
969 /* zero out the desc */
970 memset(desc
, 0, sizeof(*desc
));
971 desc
->opcode
= cpu_to_le16(opcode
);
972 desc
->flags
= cpu_to_le16(ICE_AQ_FLAG_SI
);
977 * @hw: pointer to the hw struct
978 * @cq: pointer to the specific Control queue
979 * @e: event info from the receive descriptor, includes any buffers
980 * @pending: number of events that could be left to process
982 * This function cleans one Admin Receive Queue element and returns
983 * the contents through e. It can also return how many events are
984 * left to process through 'pending'.
987 ice_clean_rq_elem(struct ice_hw
*hw
, struct ice_ctl_q_info
*cq
,
988 struct ice_rq_event_info
*e
, u16
*pending
)
990 u16 ntc
= cq
->rq
.next_to_clean
;
991 enum ice_status ret_code
= 0;
992 struct ice_aq_desc
*desc
;
993 struct ice_dma_mem
*bi
;
999 /* pre-clean the event info */
1000 memset(&e
->desc
, 0, sizeof(e
->desc
));
1002 /* take the lock before we start messing with the ring */
1003 mutex_lock(&cq
->rq_lock
);
1005 if (!cq
->rq
.count
) {
1006 ice_debug(hw
, ICE_DBG_AQ_MSG
,
1007 "Control Receive queue not initialized.\n");
1008 ret_code
= ICE_ERR_AQ_EMPTY
;
1009 goto clean_rq_elem_err
;
1012 /* set next_to_use to head */
1013 ntu
= (u16
)(rd32(hw
, cq
->rq
.head
) & cq
->rq
.head_mask
);
1016 /* nothing to do - shouldn't need to update ring's values */
1017 ret_code
= ICE_ERR_AQ_NO_WORK
;
1018 goto clean_rq_elem_out
;
1021 /* now clean the next descriptor */
1022 desc
= ICE_CTL_Q_DESC(cq
->rq
, ntc
);
1025 cq
->rq_last_status
= (enum ice_aq_err
)le16_to_cpu(desc
->retval
);
1026 flags
= le16_to_cpu(desc
->flags
);
1027 if (flags
& ICE_AQ_FLAG_ERR
) {
1028 ret_code
= ICE_ERR_AQ_ERROR
;
1029 ice_debug(hw
, ICE_DBG_AQ_MSG
,
1030 "Control Receive Queue Event received with error 0x%x\n",
1031 cq
->rq_last_status
);
1033 memcpy(&e
->desc
, desc
, sizeof(e
->desc
));
1034 datalen
= le16_to_cpu(desc
->datalen
);
1035 e
->msg_len
= min(datalen
, e
->buf_len
);
1036 if (e
->msg_buf
&& e
->msg_len
)
1037 memcpy(e
->msg_buf
, cq
->rq
.r
.rq_bi
[desc_idx
].va
, e
->msg_len
);
1039 ice_debug(hw
, ICE_DBG_AQ_MSG
, "ARQ: desc and buffer:\n");
1041 ice_debug_cq(hw
, ICE_DBG_AQ_CMD
, (void *)desc
, e
->msg_buf
,
1044 /* Restore the original datalen and buffer address in the desc,
1045 * FW updates datalen to indicate the event message size
1047 bi
= &cq
->rq
.r
.rq_bi
[ntc
];
1048 memset(desc
, 0, sizeof(*desc
));
1050 desc
->flags
= cpu_to_le16(ICE_AQ_FLAG_BUF
);
1051 if (cq
->rq_buf_size
> ICE_AQ_LG_BUF
)
1052 desc
->flags
|= cpu_to_le16(ICE_AQ_FLAG_LB
);
1053 desc
->datalen
= cpu_to_le16(bi
->size
);
1054 desc
->params
.generic
.addr_high
= cpu_to_le32(upper_32_bits(bi
->pa
));
1055 desc
->params
.generic
.addr_low
= cpu_to_le32(lower_32_bits(bi
->pa
));
1057 /* set tail = the last cleaned desc index. */
1058 wr32(hw
, cq
->rq
.tail
, ntc
);
1059 /* ntc is updated to tail + 1 */
1061 if (ntc
== cq
->num_rq_entries
)
1063 cq
->rq
.next_to_clean
= ntc
;
1064 cq
->rq
.next_to_use
= ntu
;
1067 /* Set pending if needed, unlock and return */
1069 /* re-read HW head to calculate actual pending messages */
1070 ntu
= (u16
)(rd32(hw
, cq
->rq
.head
) & cq
->rq
.head_mask
);
1071 *pending
= (u16
)((ntc
> ntu
? cq
->rq
.count
: 0) + (ntu
- ntc
));
1074 mutex_unlock(&cq
->rq_lock
);