1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/delay.h>
5 #include "i40e_alloc.h"
6 #include "i40e_register.h"
7 #include "i40e_prototype.h"
9 static void i40e_resume_aq(struct i40e_hw
*hw
);
12 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
13 * @hw: pointer to the hardware structure
15 static int i40e_alloc_adminq_asq_ring(struct i40e_hw
*hw
)
19 ret_code
= i40e_allocate_dma_mem(hw
, &hw
->aq
.asq
.desc_buf
,
20 (hw
->aq
.num_asq_entries
*
21 sizeof(struct i40e_aq_desc
)),
22 I40E_ADMINQ_DESC_ALIGNMENT
);
26 ret_code
= i40e_allocate_virt_mem(hw
, &hw
->aq
.asq
.cmd_buf
,
27 (hw
->aq
.num_asq_entries
*
28 sizeof(struct i40e_asq_cmd_details
)));
30 i40e_free_dma_mem(hw
, &hw
->aq
.asq
.desc_buf
);
38 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
39 * @hw: pointer to the hardware structure
41 static int i40e_alloc_adminq_arq_ring(struct i40e_hw
*hw
)
45 ret_code
= i40e_allocate_dma_mem(hw
, &hw
->aq
.arq
.desc_buf
,
46 (hw
->aq
.num_arq_entries
*
47 sizeof(struct i40e_aq_desc
)),
48 I40E_ADMINQ_DESC_ALIGNMENT
);
54 * i40e_free_adminq_asq - Free Admin Queue send rings
55 * @hw: pointer to the hardware structure
57 * This assumes the posted send buffers have already been cleaned
60 static void i40e_free_adminq_asq(struct i40e_hw
*hw
)
62 i40e_free_dma_mem(hw
, &hw
->aq
.asq
.desc_buf
);
66 * i40e_free_adminq_arq - Free Admin Queue receive rings
67 * @hw: pointer to the hardware structure
69 * This assumes the posted receive buffers have already been cleaned
72 static void i40e_free_adminq_arq(struct i40e_hw
*hw
)
74 i40e_free_dma_mem(hw
, &hw
->aq
.arq
.desc_buf
);
78 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
79 * @hw: pointer to the hardware structure
81 static int i40e_alloc_arq_bufs(struct i40e_hw
*hw
)
83 struct i40e_aq_desc
*desc
;
84 struct i40e_dma_mem
*bi
;
88 /* We'll be allocating the buffer info memory first, then we can
89 * allocate the mapped buffers for the event processing
92 /* buffer_info structures do not need alignment */
93 ret_code
= i40e_allocate_virt_mem(hw
, &hw
->aq
.arq
.dma_head
,
94 (hw
->aq
.num_arq_entries
* sizeof(struct i40e_dma_mem
)));
97 hw
->aq
.arq
.r
.arq_bi
= (struct i40e_dma_mem
*)hw
->aq
.arq
.dma_head
.va
;
99 /* allocate the mapped buffers */
100 for (i
= 0; i
< hw
->aq
.num_arq_entries
; i
++) {
101 bi
= &hw
->aq
.arq
.r
.arq_bi
[i
];
102 ret_code
= i40e_allocate_dma_mem(hw
, bi
,
104 I40E_ADMINQ_DESC_ALIGNMENT
);
106 goto unwind_alloc_arq_bufs
;
108 /* now configure the descriptors for use */
109 desc
= I40E_ADMINQ_DESC(hw
->aq
.arq
, i
);
111 desc
->flags
= cpu_to_le16(I40E_AQ_FLAG_BUF
);
112 if (hw
->aq
.arq_buf_size
> I40E_AQ_LARGE_BUF
)
113 desc
->flags
|= cpu_to_le16(I40E_AQ_FLAG_LB
);
115 /* This is in accordance with Admin queue design, there is no
116 * register for buffer size configuration
118 desc
->datalen
= cpu_to_le16((u16
)bi
->size
);
120 desc
->cookie_high
= 0;
121 desc
->cookie_low
= 0;
122 desc
->params
.external
.addr_high
=
123 cpu_to_le32(upper_32_bits(bi
->pa
));
124 desc
->params
.external
.addr_low
=
125 cpu_to_le32(lower_32_bits(bi
->pa
));
126 desc
->params
.external
.param0
= 0;
127 desc
->params
.external
.param1
= 0;
133 unwind_alloc_arq_bufs
:
134 /* don't try to free the one that failed... */
137 i40e_free_dma_mem(hw
, &hw
->aq
.arq
.r
.arq_bi
[i
]);
138 i40e_free_virt_mem(hw
, &hw
->aq
.arq
.dma_head
);
144 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
145 * @hw: pointer to the hardware structure
147 static int i40e_alloc_asq_bufs(struct i40e_hw
*hw
)
149 struct i40e_dma_mem
*bi
;
153 /* No mapped memory needed yet, just the buffer info structures */
154 ret_code
= i40e_allocate_virt_mem(hw
, &hw
->aq
.asq
.dma_head
,
155 (hw
->aq
.num_asq_entries
* sizeof(struct i40e_dma_mem
)));
158 hw
->aq
.asq
.r
.asq_bi
= (struct i40e_dma_mem
*)hw
->aq
.asq
.dma_head
.va
;
160 /* allocate the mapped buffers */
161 for (i
= 0; i
< hw
->aq
.num_asq_entries
; i
++) {
162 bi
= &hw
->aq
.asq
.r
.asq_bi
[i
];
163 ret_code
= i40e_allocate_dma_mem(hw
, bi
,
165 I40E_ADMINQ_DESC_ALIGNMENT
);
167 goto unwind_alloc_asq_bufs
;
172 unwind_alloc_asq_bufs
:
173 /* don't try to free the one that failed... */
176 i40e_free_dma_mem(hw
, &hw
->aq
.asq
.r
.asq_bi
[i
]);
177 i40e_free_virt_mem(hw
, &hw
->aq
.asq
.dma_head
);
183 * i40e_free_arq_bufs - Free receive queue buffer info elements
184 * @hw: pointer to the hardware structure
186 static void i40e_free_arq_bufs(struct i40e_hw
*hw
)
190 /* free descriptors */
191 for (i
= 0; i
< hw
->aq
.num_arq_entries
; i
++)
192 i40e_free_dma_mem(hw
, &hw
->aq
.arq
.r
.arq_bi
[i
]);
194 /* free the descriptor memory */
195 i40e_free_dma_mem(hw
, &hw
->aq
.arq
.desc_buf
);
197 /* free the dma header */
198 i40e_free_virt_mem(hw
, &hw
->aq
.arq
.dma_head
);
202 * i40e_free_asq_bufs - Free send queue buffer info elements
203 * @hw: pointer to the hardware structure
205 static void i40e_free_asq_bufs(struct i40e_hw
*hw
)
209 /* only unmap if the address is non-NULL */
210 for (i
= 0; i
< hw
->aq
.num_asq_entries
; i
++)
211 if (hw
->aq
.asq
.r
.asq_bi
[i
].pa
)
212 i40e_free_dma_mem(hw
, &hw
->aq
.asq
.r
.asq_bi
[i
]);
214 /* free the buffer info list */
215 i40e_free_virt_mem(hw
, &hw
->aq
.asq
.cmd_buf
);
217 /* free the descriptor memory */
218 i40e_free_dma_mem(hw
, &hw
->aq
.asq
.desc_buf
);
220 /* free the dma header */
221 i40e_free_virt_mem(hw
, &hw
->aq
.asq
.dma_head
);
225 * i40e_config_asq_regs - configure ASQ registers
226 * @hw: pointer to the hardware structure
228 * Configure base address and length registers for the transmit queue
230 static int i40e_config_asq_regs(struct i40e_hw
*hw
)
235 /* Clear Head and Tail */
236 wr32(hw
, I40E_PF_ATQH
, 0);
237 wr32(hw
, I40E_PF_ATQT
, 0);
239 /* set starting point */
240 wr32(hw
, I40E_PF_ATQLEN
, (hw
->aq
.num_asq_entries
|
241 I40E_PF_ATQLEN_ATQENABLE_MASK
));
242 wr32(hw
, I40E_PF_ATQBAL
, lower_32_bits(hw
->aq
.asq
.desc_buf
.pa
));
243 wr32(hw
, I40E_PF_ATQBAH
, upper_32_bits(hw
->aq
.asq
.desc_buf
.pa
));
245 /* Check one register to verify that config was applied */
246 reg
= rd32(hw
, I40E_PF_ATQBAL
);
247 if (reg
!= lower_32_bits(hw
->aq
.asq
.desc_buf
.pa
))
254 * i40e_config_arq_regs - ARQ register configuration
255 * @hw: pointer to the hardware structure
257 * Configure base address and length registers for the receive (event queue)
259 static int i40e_config_arq_regs(struct i40e_hw
*hw
)
264 /* Clear Head and Tail */
265 wr32(hw
, I40E_PF_ARQH
, 0);
266 wr32(hw
, I40E_PF_ARQT
, 0);
268 /* set starting point */
269 wr32(hw
, I40E_PF_ARQLEN
, (hw
->aq
.num_arq_entries
|
270 I40E_PF_ARQLEN_ARQENABLE_MASK
));
271 wr32(hw
, I40E_PF_ARQBAL
, lower_32_bits(hw
->aq
.arq
.desc_buf
.pa
));
272 wr32(hw
, I40E_PF_ARQBAH
, upper_32_bits(hw
->aq
.arq
.desc_buf
.pa
));
274 /* Update tail in the HW to post pre-allocated buffers */
275 wr32(hw
, I40E_PF_ARQT
, hw
->aq
.num_arq_entries
- 1);
277 /* Check one register to verify that config was applied */
278 reg
= rd32(hw
, I40E_PF_ARQBAL
);
279 if (reg
!= lower_32_bits(hw
->aq
.arq
.desc_buf
.pa
))
286 * i40e_init_asq - main initialization routine for ASQ
287 * @hw: pointer to the hardware structure
289 * This is the main initialization routine for the Admin Send Queue
290 * Prior to calling this function, drivers *MUST* set the following fields
291 * in the hw->aq structure:
292 * - hw->aq.num_asq_entries
293 * - hw->aq.arq_buf_size
295 * Do *NOT* hold the lock when calling this as the memory allocation routines
296 * called are not going to be atomic context safe
298 static int i40e_init_asq(struct i40e_hw
*hw
)
302 if (hw
->aq
.asq
.count
> 0) {
303 /* queue already initialized */
305 goto init_adminq_exit
;
308 /* verify input for valid configuration */
309 if ((hw
->aq
.num_asq_entries
== 0) ||
310 (hw
->aq
.asq_buf_size
== 0)) {
312 goto init_adminq_exit
;
315 hw
->aq
.asq
.next_to_use
= 0;
316 hw
->aq
.asq
.next_to_clean
= 0;
318 /* allocate the ring memory */
319 ret_code
= i40e_alloc_adminq_asq_ring(hw
);
321 goto init_adminq_exit
;
323 /* allocate buffers in the rings */
324 ret_code
= i40e_alloc_asq_bufs(hw
);
326 goto init_adminq_free_rings
;
328 /* initialize base registers */
329 ret_code
= i40e_config_asq_regs(hw
);
331 goto init_adminq_free_rings
;
334 hw
->aq
.asq
.count
= hw
->aq
.num_asq_entries
;
335 goto init_adminq_exit
;
337 init_adminq_free_rings
:
338 i40e_free_adminq_asq(hw
);
345 * i40e_init_arq - initialize ARQ
346 * @hw: pointer to the hardware structure
348 * The main initialization routine for the Admin Receive (Event) Queue.
349 * Prior to calling this function, drivers *MUST* set the following fields
350 * in the hw->aq structure:
351 * - hw->aq.num_asq_entries
352 * - hw->aq.arq_buf_size
354 * Do *NOT* hold the lock when calling this as the memory allocation routines
355 * called are not going to be atomic context safe
357 static int i40e_init_arq(struct i40e_hw
*hw
)
361 if (hw
->aq
.arq
.count
> 0) {
362 /* queue already initialized */
364 goto init_adminq_exit
;
367 /* verify input for valid configuration */
368 if ((hw
->aq
.num_arq_entries
== 0) ||
369 (hw
->aq
.arq_buf_size
== 0)) {
371 goto init_adminq_exit
;
374 hw
->aq
.arq
.next_to_use
= 0;
375 hw
->aq
.arq
.next_to_clean
= 0;
377 /* allocate the ring memory */
378 ret_code
= i40e_alloc_adminq_arq_ring(hw
);
380 goto init_adminq_exit
;
382 /* allocate buffers in the rings */
383 ret_code
= i40e_alloc_arq_bufs(hw
);
385 goto init_adminq_free_rings
;
387 /* initialize base registers */
388 ret_code
= i40e_config_arq_regs(hw
);
390 goto init_adminq_free_rings
;
393 hw
->aq
.arq
.count
= hw
->aq
.num_arq_entries
;
394 goto init_adminq_exit
;
396 init_adminq_free_rings
:
397 i40e_free_adminq_arq(hw
);
404 * i40e_shutdown_asq - shutdown the ASQ
405 * @hw: pointer to the hardware structure
407 * The main shutdown routine for the Admin Send Queue
409 static int i40e_shutdown_asq(struct i40e_hw
*hw
)
413 mutex_lock(&hw
->aq
.asq_mutex
);
415 if (hw
->aq
.asq
.count
== 0) {
417 goto shutdown_asq_out
;
420 /* Stop firmware AdminQ processing */
421 wr32(hw
, I40E_PF_ATQH
, 0);
422 wr32(hw
, I40E_PF_ATQT
, 0);
423 wr32(hw
, I40E_PF_ATQLEN
, 0);
424 wr32(hw
, I40E_PF_ATQBAL
, 0);
425 wr32(hw
, I40E_PF_ATQBAH
, 0);
427 hw
->aq
.asq
.count
= 0; /* to indicate uninitialized queue */
429 /* free ring buffers */
430 i40e_free_asq_bufs(hw
);
433 mutex_unlock(&hw
->aq
.asq_mutex
);
438 * i40e_shutdown_arq - shutdown ARQ
439 * @hw: pointer to the hardware structure
441 * The main shutdown routine for the Admin Receive Queue
443 static int i40e_shutdown_arq(struct i40e_hw
*hw
)
447 mutex_lock(&hw
->aq
.arq_mutex
);
449 if (hw
->aq
.arq
.count
== 0) {
451 goto shutdown_arq_out
;
454 /* Stop firmware AdminQ processing */
455 wr32(hw
, I40E_PF_ARQH
, 0);
456 wr32(hw
, I40E_PF_ARQT
, 0);
457 wr32(hw
, I40E_PF_ARQLEN
, 0);
458 wr32(hw
, I40E_PF_ARQBAL
, 0);
459 wr32(hw
, I40E_PF_ARQBAH
, 0);
461 hw
->aq
.arq
.count
= 0; /* to indicate uninitialized queue */
463 /* free ring buffers */
464 i40e_free_arq_bufs(hw
);
467 mutex_unlock(&hw
->aq
.arq_mutex
);
472 * i40e_set_hw_caps - set HW flags
473 * @hw: pointer to the hardware structure
475 static void i40e_set_hw_caps(struct i40e_hw
*hw
)
477 bitmap_zero(hw
->caps
, I40E_HW_CAPS_NBITS
);
479 switch (hw
->mac
.type
) {
481 if (i40e_is_aq_api_ver_ge(hw
, 1,
482 I40E_MINOR_VER_GET_LINK_INFO_XL710
)) {
483 set_bit(I40E_HW_CAP_AQ_PHY_ACCESS
, hw
->caps
);
484 set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE
, hw
->caps
);
485 /* The ability to RX (not drop) 802.1ad frames */
486 set_bit(I40E_HW_CAP_802_1AD
, hw
->caps
);
488 if (i40e_is_aq_api_ver_ge(hw
, 1, 5)) {
489 /* Supported in FW API version higher than 1.4 */
490 set_bit(I40E_HW_CAP_GENEVE_OFFLOAD
, hw
->caps
);
492 if (i40e_is_fw_ver_lt(hw
, 4, 33)) {
493 set_bit(I40E_HW_CAP_RESTART_AUTONEG
, hw
->caps
);
494 /* No DCB support for FW < v4.33 */
495 set_bit(I40E_HW_CAP_NO_DCB_SUPPORT
, hw
->caps
);
497 if (i40e_is_fw_ver_lt(hw
, 4, 3)) {
498 /* Disable FW LLDP if FW < v4.3 */
499 set_bit(I40E_HW_CAP_STOP_FW_LLDP
, hw
->caps
);
501 if (i40e_is_fw_ver_ge(hw
, 4, 40)) {
502 /* Use the FW Set LLDP MIB API if FW >= v4.40 */
503 set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB
, hw
->caps
);
505 if (i40e_is_fw_ver_ge(hw
, 6, 0)) {
506 /* Enable PTP L4 if FW > v6.0 */
507 set_bit(I40E_HW_CAP_PTP_L4
, hw
->caps
);
511 set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE
, hw
->caps
);
512 set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK
, hw
->caps
);
513 set_bit(I40E_HW_CAP_RSS_AQ
, hw
->caps
);
514 set_bit(I40E_HW_CAP_128_QP_RSS
, hw
->caps
);
515 set_bit(I40E_HW_CAP_ATR_EVICT
, hw
->caps
);
516 set_bit(I40E_HW_CAP_WB_ON_ITR
, hw
->caps
);
517 set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE
, hw
->caps
);
518 set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK
, hw
->caps
);
519 set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB
, hw
->caps
);
520 set_bit(I40E_HW_CAP_GENEVE_OFFLOAD
, hw
->caps
);
521 set_bit(I40E_HW_CAP_PTP_L4
, hw
->caps
);
522 set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE
, hw
->caps
);
523 set_bit(I40E_HW_CAP_OUTER_UDP_CSUM
, hw
->caps
);
525 if (rd32(hw
, I40E_GLQF_FDEVICTENA(1)) !=
526 I40E_FDEVICT_PCTYPE_DEFAULT
) {
527 hw_warn(hw
, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
528 clear_bit(I40E_HW_CAP_ATR_EVICT
, hw
->caps
);
531 if (i40e_is_aq_api_ver_ge(hw
, 1,
532 I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722
))
533 set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE
, hw
->caps
);
535 if (i40e_is_aq_api_ver_ge(hw
, 1,
536 I40E_MINOR_VER_GET_LINK_INFO_X722
))
537 set_bit(I40E_HW_CAP_AQ_PHY_ACCESS
, hw
->caps
);
539 if (i40e_is_aq_api_ver_ge(hw
, 1,
540 I40E_MINOR_VER_FW_REQUEST_FEC_X722
))
541 set_bit(I40E_HW_CAP_X722_FEC_REQUEST
, hw
->caps
);
548 /* Newer versions of firmware require lock when reading the NVM */
549 if (i40e_is_aq_api_ver_ge(hw
, 1, 5))
550 set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK
, hw
->caps
);
552 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
553 if (i40e_is_aq_api_ver_ge(hw
, 1, 7))
554 set_bit(I40E_HW_CAP_802_1AD
, hw
->caps
);
556 if (i40e_is_aq_api_ver_ge(hw
, 1, 8))
557 set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT
, hw
->caps
);
559 if (i40e_is_aq_api_ver_ge(hw
, 1, 9))
560 set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED
, hw
->caps
);
564 * i40e_init_adminq - main initialization routine for Admin Queue
565 * @hw: pointer to the hardware structure
567 * Prior to calling this function, drivers *MUST* set the following fields
568 * in the hw->aq structure:
569 * - hw->aq.num_asq_entries
570 * - hw->aq.num_arq_entries
571 * - hw->aq.arq_buf_size
572 * - hw->aq.asq_buf_size
574 int i40e_init_adminq(struct i40e_hw
*hw
)
576 u16 cfg_ptr
, oem_hi
, oem_lo
;
577 u16 eetrack_lo
, eetrack_hi
;
581 /* verify input for valid configuration */
582 if ((hw
->aq
.num_arq_entries
== 0) ||
583 (hw
->aq
.num_asq_entries
== 0) ||
584 (hw
->aq
.arq_buf_size
== 0) ||
585 (hw
->aq
.asq_buf_size
== 0)) {
587 goto init_adminq_exit
;
590 /* setup ASQ command write back timeout */
591 hw
->aq
.asq_cmd_timeout
= I40E_ASQ_CMD_TIMEOUT
;
593 /* allocate the ASQ */
594 ret_code
= i40e_init_asq(hw
);
596 goto init_adminq_destroy_locks
;
598 /* allocate the ARQ */
599 ret_code
= i40e_init_arq(hw
);
601 goto init_adminq_free_asq
;
603 /* There are some cases where the firmware may not be quite ready
604 * for AdminQ operations, so we retry the AdminQ setup a few times
605 * if we see timeouts in this first AQ call.
608 ret_code
= i40e_aq_get_firmware_version(hw
,
615 if (ret_code
!= -EIO
)
620 } while (retry
< 10);
622 goto init_adminq_free_arq
;
624 /* Some features were introduced in different FW API version
625 * for different MAC type.
627 i40e_set_hw_caps(hw
);
629 /* get the NVM version info */
630 i40e_read_nvm_word(hw
, I40E_SR_NVM_DEV_STARTER_VERSION
,
632 i40e_read_nvm_word(hw
, I40E_SR_NVM_EETRACK_LO
, &eetrack_lo
);
633 i40e_read_nvm_word(hw
, I40E_SR_NVM_EETRACK_HI
, &eetrack_hi
);
634 hw
->nvm
.eetrack
= (eetrack_hi
<< 16) | eetrack_lo
;
635 i40e_read_nvm_word(hw
, I40E_SR_BOOT_CONFIG_PTR
, &cfg_ptr
);
636 i40e_read_nvm_word(hw
, (cfg_ptr
+ I40E_NVM_OEM_VER_OFF
),
638 i40e_read_nvm_word(hw
, (cfg_ptr
+ (I40E_NVM_OEM_VER_OFF
+ 1)),
640 hw
->nvm
.oem_ver
= ((u32
)oem_hi
<< 16) | oem_lo
;
642 if (i40e_is_aq_api_ver_ge(hw
, I40E_FW_API_VERSION_MAJOR
+ 1, 0)) {
644 goto init_adminq_free_arq
;
647 /* pre-emptive resource lock release */
648 i40e_aq_release_resource(hw
, I40E_NVM_RESOURCE_ID
, 0, NULL
);
649 hw
->nvm_release_on_done
= false;
650 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT
;
655 goto init_adminq_exit
;
657 init_adminq_free_arq
:
658 i40e_shutdown_arq(hw
);
659 init_adminq_free_asq
:
660 i40e_shutdown_asq(hw
);
661 init_adminq_destroy_locks
:
668 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
669 * @hw: pointer to the hardware structure
671 void i40e_shutdown_adminq(struct i40e_hw
*hw
)
673 if (i40e_check_asq_alive(hw
))
674 i40e_aq_queue_shutdown(hw
, true);
676 i40e_shutdown_asq(hw
);
677 i40e_shutdown_arq(hw
);
680 i40e_free_virt_mem(hw
, &hw
->nvm_buff
);
684 * i40e_clean_asq - cleans Admin send queue
685 * @hw: pointer to the hardware structure
687 * returns the number of free desc
689 static u16
i40e_clean_asq(struct i40e_hw
*hw
)
691 struct i40e_adminq_ring
*asq
= &(hw
->aq
.asq
);
692 struct i40e_asq_cmd_details
*details
;
693 u16 ntc
= asq
->next_to_clean
;
694 struct i40e_aq_desc desc_cb
;
695 struct i40e_aq_desc
*desc
;
697 desc
= I40E_ADMINQ_DESC(*asq
, ntc
);
698 details
= I40E_ADMINQ_DETAILS(*asq
, ntc
);
699 while (rd32(hw
, I40E_PF_ATQH
) != ntc
) {
700 i40e_debug(hw
, I40E_DEBUG_AQ_COMMAND
,
701 "ntc %d head %d.\n", ntc
, rd32(hw
, I40E_PF_ATQH
));
703 if (details
->callback
) {
704 I40E_ADMINQ_CALLBACK cb_func
=
705 (I40E_ADMINQ_CALLBACK
)details
->callback
;
707 cb_func(hw
, &desc_cb
);
709 memset(desc
, 0, sizeof(*desc
));
710 memset(details
, 0, sizeof(*details
));
712 if (ntc
== asq
->count
)
714 desc
= I40E_ADMINQ_DESC(*asq
, ntc
);
715 details
= I40E_ADMINQ_DETAILS(*asq
, ntc
);
718 asq
->next_to_clean
= ntc
;
720 return I40E_DESC_UNUSED(asq
);
724 * i40e_asq_done - check if FW has processed the Admin Send Queue
725 * @hw: pointer to the hw struct
727 * Returns true if the firmware has processed all descriptors on the
728 * admin send queue. Returns false if there are still requests pending.
730 static bool i40e_asq_done(struct i40e_hw
*hw
)
732 /* AQ designers suggest use of head for better
733 * timing reliability than DD bit
735 return rd32(hw
, I40E_PF_ATQH
) == hw
->aq
.asq
.next_to_use
;
740 * i40e_asq_send_command_atomic_exec - send command to Admin Queue
741 * @hw: pointer to the hw struct
742 * @desc: prefilled descriptor describing the command (non DMA mem)
743 * @buff: buffer to use for indirect commands
744 * @buff_size: size of buffer for indirect commands
745 * @cmd_details: pointer to command details structure
746 * @is_atomic_context: is the function called in an atomic context?
748 * This is the main send command driver routine for the Admin Queue send
749 * queue. It runs the queue, cleans the queue, etc
752 i40e_asq_send_command_atomic_exec(struct i40e_hw
*hw
,
753 struct i40e_aq_desc
*desc
,
754 void *buff
, /* can be NULL */
756 struct i40e_asq_cmd_details
*cmd_details
,
757 bool is_atomic_context
)
759 struct i40e_dma_mem
*dma_buff
= NULL
;
760 struct i40e_asq_cmd_details
*details
;
761 struct i40e_aq_desc
*desc_on_ring
;
762 bool cmd_completed
= false;
767 if (hw
->aq
.asq
.count
== 0) {
768 i40e_debug(hw
, I40E_DEBUG_AQ_MESSAGE
,
769 "AQTX: Admin queue not initialized.\n");
771 goto asq_send_command_error
;
774 hw
->aq
.asq_last_status
= I40E_AQ_RC_OK
;
776 val
= rd32(hw
, I40E_PF_ATQH
);
777 if (val
>= hw
->aq
.num_asq_entries
) {
778 i40e_debug(hw
, I40E_DEBUG_AQ_MESSAGE
,
779 "AQTX: head overrun at %d\n", val
);
781 goto asq_send_command_error
;
784 details
= I40E_ADMINQ_DETAILS(hw
->aq
.asq
, hw
->aq
.asq
.next_to_use
);
786 *details
= *cmd_details
;
788 /* If the cmd_details are defined copy the cookie. The
789 * cpu_to_le32 is not needed here because the data is ignored
790 * by the FW, only used by the driver
792 if (details
->cookie
) {
794 cpu_to_le32(upper_32_bits(details
->cookie
));
796 cpu_to_le32(lower_32_bits(details
->cookie
));
799 memset(details
, 0, sizeof(struct i40e_asq_cmd_details
));
802 /* clear requested flags and then set additional flags if defined */
803 desc
->flags
&= ~cpu_to_le16(details
->flags_dis
);
804 desc
->flags
|= cpu_to_le16(details
->flags_ena
);
806 if (buff_size
> hw
->aq
.asq_buf_size
) {
808 I40E_DEBUG_AQ_MESSAGE
,
809 "AQTX: Invalid buffer size: %d.\n",
812 goto asq_send_command_error
;
815 if (details
->postpone
&& !details
->async
) {
817 I40E_DEBUG_AQ_MESSAGE
,
818 "AQTX: Async flag not set along with postpone flag");
820 goto asq_send_command_error
;
823 /* call clean and check queue available function to reclaim the
824 * descriptors that were processed by FW, the function returns the
825 * number of desc available
827 /* the clean function called here could be called in a separate thread
828 * in case of asynchronous completions
830 if (i40e_clean_asq(hw
) == 0) {
832 I40E_DEBUG_AQ_MESSAGE
,
833 "AQTX: Error queue is full.\n");
835 goto asq_send_command_error
;
838 /* initialize the temp desc pointer with the right desc */
839 desc_on_ring
= I40E_ADMINQ_DESC(hw
->aq
.asq
, hw
->aq
.asq
.next_to_use
);
841 /* if the desc is available copy the temp desc to the right place */
842 *desc_on_ring
= *desc
;
844 /* if buff is not NULL assume indirect command */
846 dma_buff
= &(hw
->aq
.asq
.r
.asq_bi
[hw
->aq
.asq
.next_to_use
]);
847 /* copy the user buff into the respective DMA buff */
848 memcpy(dma_buff
->va
, buff
, buff_size
);
849 desc_on_ring
->datalen
= cpu_to_le16(buff_size
);
851 /* Update the address values in the desc with the pa value
852 * for respective buffer
854 desc_on_ring
->params
.external
.addr_high
=
855 cpu_to_le32(upper_32_bits(dma_buff
->pa
));
856 desc_on_ring
->params
.external
.addr_low
=
857 cpu_to_le32(lower_32_bits(dma_buff
->pa
));
861 i40e_debug(hw
, I40E_DEBUG_AQ_COMMAND
, "AQTX: desc and buffer:\n");
862 i40e_debug_aq(hw
, I40E_DEBUG_AQ_COMMAND
, (void *)desc_on_ring
,
864 (hw
->aq
.asq
.next_to_use
)++;
865 if (hw
->aq
.asq
.next_to_use
== hw
->aq
.asq
.count
)
866 hw
->aq
.asq
.next_to_use
= 0;
867 if (!details
->postpone
)
868 wr32(hw
, I40E_PF_ATQT
, hw
->aq
.asq
.next_to_use
);
870 /* if cmd_details are not defined or async flag is not set,
871 * we need to wait for desc write back
873 if (!details
->async
&& !details
->postpone
) {
877 /* AQ designers suggest use of head for better
878 * timing reliability than DD bit
880 if (i40e_asq_done(hw
))
883 if (is_atomic_context
)
886 usleep_range(40, 60);
889 } while (total_delay
< hw
->aq
.asq_cmd_timeout
);
892 /* if ready, copy the desc back to temp */
893 if (i40e_asq_done(hw
)) {
894 *desc
= *desc_on_ring
;
896 memcpy(buff
, dma_buff
->va
, buff_size
);
897 retval
= le16_to_cpu(desc
->retval
);
900 I40E_DEBUG_AQ_MESSAGE
,
901 "AQTX: Command completed with error 0x%X.\n",
904 /* strip off FW internal code */
907 cmd_completed
= true;
908 if ((enum i40e_admin_queue_err
)retval
== I40E_AQ_RC_OK
)
910 else if ((enum i40e_admin_queue_err
)retval
== I40E_AQ_RC_EBUSY
)
914 hw
->aq
.asq_last_status
= (enum i40e_admin_queue_err
)retval
;
917 i40e_debug(hw
, I40E_DEBUG_AQ_COMMAND
,
918 "AQTX: desc and buffer writeback:\n");
919 i40e_debug_aq(hw
, I40E_DEBUG_AQ_COMMAND
, (void *)desc
, buff
, buff_size
);
921 /* save writeback aq if requested */
922 if (details
->wb_desc
)
923 *details
->wb_desc
= *desc_on_ring
;
925 /* update the error if time out occurred */
926 if ((!cmd_completed
) &&
927 (!details
->async
&& !details
->postpone
)) {
928 if (rd32(hw
, I40E_PF_ATQLEN
) & I40E_GL_ATQLEN_ATQCRIT_MASK
) {
929 i40e_debug(hw
, I40E_DEBUG_AQ_MESSAGE
,
930 "AQTX: AQ Critical error.\n");
933 i40e_debug(hw
, I40E_DEBUG_AQ_MESSAGE
,
934 "AQTX: Writeback timeout.\n");
939 asq_send_command_error
:
944 * i40e_asq_send_command_atomic - send command to Admin Queue
945 * @hw: pointer to the hw struct
946 * @desc: prefilled descriptor describing the command (non DMA mem)
947 * @buff: buffer to use for indirect commands
948 * @buff_size: size of buffer for indirect commands
949 * @cmd_details: pointer to command details structure
950 * @is_atomic_context: is the function called in an atomic context?
952 * Acquires the lock and calls the main send command execution
956 i40e_asq_send_command_atomic(struct i40e_hw
*hw
,
957 struct i40e_aq_desc
*desc
,
958 void *buff
, /* can be NULL */
960 struct i40e_asq_cmd_details
*cmd_details
,
961 bool is_atomic_context
)
965 mutex_lock(&hw
->aq
.asq_mutex
);
966 status
= i40e_asq_send_command_atomic_exec(hw
, desc
, buff
, buff_size
,
970 mutex_unlock(&hw
->aq
.asq_mutex
);
975 i40e_asq_send_command(struct i40e_hw
*hw
, struct i40e_aq_desc
*desc
,
976 void *buff
, /* can be NULL */ u16 buff_size
,
977 struct i40e_asq_cmd_details
*cmd_details
)
979 return i40e_asq_send_command_atomic(hw
, desc
, buff
, buff_size
,
984 * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
985 * @hw: pointer to the hw struct
986 * @desc: prefilled descriptor describing the command (non DMA mem)
987 * @buff: buffer to use for indirect commands
988 * @buff_size: size of buffer for indirect commands
989 * @cmd_details: pointer to command details structure
990 * @is_atomic_context: is the function called in an atomic context?
991 * @aq_status: pointer to Admin Queue status return value
993 * Acquires the lock and calls the main send command execution
994 * routine. Returns the last Admin Queue status in aq_status
995 * to avoid race conditions in access to hw->aq.asq_last_status.
998 i40e_asq_send_command_atomic_v2(struct i40e_hw
*hw
,
999 struct i40e_aq_desc
*desc
,
1000 void *buff
, /* can be NULL */
1002 struct i40e_asq_cmd_details
*cmd_details
,
1003 bool is_atomic_context
,
1004 enum i40e_admin_queue_err
*aq_status
)
1008 mutex_lock(&hw
->aq
.asq_mutex
);
1009 status
= i40e_asq_send_command_atomic_exec(hw
, desc
, buff
,
1014 *aq_status
= hw
->aq
.asq_last_status
;
1015 mutex_unlock(&hw
->aq
.asq_mutex
);
1020 i40e_asq_send_command_v2(struct i40e_hw
*hw
, struct i40e_aq_desc
*desc
,
1021 void *buff
, /* can be NULL */ u16 buff_size
,
1022 struct i40e_asq_cmd_details
*cmd_details
,
1023 enum i40e_admin_queue_err
*aq_status
)
1025 return i40e_asq_send_command_atomic_v2(hw
, desc
, buff
, buff_size
,
1026 cmd_details
, true, aq_status
);
1030 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1031 * @desc: pointer to the temp descriptor (non DMA mem)
1032 * @opcode: the opcode can be used to decide which flags to turn off or on
1034 * Fill the desc with default values
1036 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc
*desc
,
1039 /* zero out the desc */
1040 memset((void *)desc
, 0, sizeof(struct i40e_aq_desc
));
1041 desc
->opcode
= cpu_to_le16(opcode
);
1042 desc
->flags
= cpu_to_le16(I40E_AQ_FLAG_SI
);
1046 * i40e_clean_arq_element
1047 * @hw: pointer to the hw struct
1048 * @e: event info from the receive descriptor, includes any buffers
1049 * @pending: number of events that could be left to process
1051 * This function cleans one Admin Receive Queue element and returns
1052 * the contents through e. It can also return how many events are
1053 * left to process through 'pending'
1055 int i40e_clean_arq_element(struct i40e_hw
*hw
,
1056 struct i40e_arq_event_info
*e
,
1059 u16 ntc
= hw
->aq
.arq
.next_to_clean
;
1060 struct i40e_aq_desc
*desc
;
1061 struct i40e_dma_mem
*bi
;
1068 /* pre-clean the event info */
1069 memset(&e
->desc
, 0, sizeof(e
->desc
));
1071 /* take the lock before we start messing with the ring */
1072 mutex_lock(&hw
->aq
.arq_mutex
);
1074 if (hw
->aq
.arq
.count
== 0) {
1075 i40e_debug(hw
, I40E_DEBUG_AQ_MESSAGE
,
1076 "AQRX: Admin queue not initialized.\n");
1078 goto clean_arq_element_err
;
1081 /* set next_to_use to head */
1082 ntu
= rd32(hw
, I40E_PF_ARQH
) & I40E_PF_ARQH_ARQH_MASK
;
1084 /* nothing to do - shouldn't need to update ring's values */
1085 ret_code
= -EALREADY
;
1086 goto clean_arq_element_out
;
1089 /* now clean the next descriptor */
1090 desc
= I40E_ADMINQ_DESC(hw
->aq
.arq
, ntc
);
1093 hw
->aq
.arq_last_status
=
1094 (enum i40e_admin_queue_err
)le16_to_cpu(desc
->retval
);
1095 flags
= le16_to_cpu(desc
->flags
);
1096 if (flags
& I40E_AQ_FLAG_ERR
) {
1099 I40E_DEBUG_AQ_MESSAGE
,
1100 "AQRX: Event received with error 0x%X.\n",
1101 hw
->aq
.arq_last_status
);
1105 datalen
= le16_to_cpu(desc
->datalen
);
1106 e
->msg_len
= min(datalen
, e
->buf_len
);
1107 if (e
->msg_buf
!= NULL
&& (e
->msg_len
!= 0))
1108 memcpy(e
->msg_buf
, hw
->aq
.arq
.r
.arq_bi
[desc_idx
].va
,
1111 i40e_debug(hw
, I40E_DEBUG_AQ_COMMAND
, "AQRX: desc and buffer:\n");
1112 i40e_debug_aq(hw
, I40E_DEBUG_AQ_COMMAND
, (void *)desc
, e
->msg_buf
,
1113 hw
->aq
.arq_buf_size
);
1115 /* Restore the original datalen and buffer address in the desc,
1116 * FW updates datalen to indicate the event message
1119 bi
= &hw
->aq
.arq
.r
.arq_bi
[ntc
];
1120 memset((void *)desc
, 0, sizeof(struct i40e_aq_desc
));
1122 desc
->flags
= cpu_to_le16(I40E_AQ_FLAG_BUF
);
1123 if (hw
->aq
.arq_buf_size
> I40E_AQ_LARGE_BUF
)
1124 desc
->flags
|= cpu_to_le16(I40E_AQ_FLAG_LB
);
1125 desc
->datalen
= cpu_to_le16((u16
)bi
->size
);
1126 desc
->params
.external
.addr_high
= cpu_to_le32(upper_32_bits(bi
->pa
));
1127 desc
->params
.external
.addr_low
= cpu_to_le32(lower_32_bits(bi
->pa
));
1129 /* set tail = the last cleaned desc index. */
1130 wr32(hw
, I40E_PF_ARQT
, ntc
);
1131 /* ntc is updated to tail + 1 */
1133 if (ntc
== hw
->aq
.num_arq_entries
)
1135 hw
->aq
.arq
.next_to_clean
= ntc
;
1136 hw
->aq
.arq
.next_to_use
= ntu
;
1138 i40e_nvmupd_check_wait_event(hw
, le16_to_cpu(e
->desc
.opcode
), &e
->desc
);
1139 clean_arq_element_out
:
1140 /* Set pending if needed, unlock and return */
1142 *pending
= (ntc
> ntu
? hw
->aq
.arq
.count
: 0) + (ntu
- ntc
);
1143 clean_arq_element_err
:
1144 mutex_unlock(&hw
->aq
.arq_mutex
);
1149 static void i40e_resume_aq(struct i40e_hw
*hw
)
1151 /* Registers are reset after PF reset */
1152 hw
->aq
.asq
.next_to_use
= 0;
1153 hw
->aq
.asq
.next_to_clean
= 0;
1155 i40e_config_asq_regs(hw
);
1157 hw
->aq
.arq
.next_to_use
= 0;
1158 hw
->aq
.arq
.next_to_clean
= 0;
1160 i40e_config_arq_regs(hw
);