1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include "i40e_prototype.h"
30 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure
33 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM.
39 i40e_status
i40e_init_nvm(struct i40e_hw
*hw
)
41 struct i40e_nvm_info
*nvm
= &hw
->nvm
;
42 i40e_status ret_code
= 0;
46 /* The SR size is stored regardless of the nvm programming mode
47 * as the blank mode may be used in the factory line.
49 gens
= rd32(hw
, I40E_GLNVM_GENS
);
50 sr_size
= ((gens
& I40E_GLNVM_GENS_SR_SIZE_MASK
) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT
);
52 /* Switching to words (sr_size contains power of 2KB) */
53 nvm
->sr_size
= BIT(sr_size
) * I40E_SR_WORDS_IN_1KB
;
55 /* Check if we are in the normal or blank NVM programming mode */
56 fla
= rd32(hw
, I40E_GLNVM_FLA
);
57 if (fla
& I40E_GLNVM_FLA_LOCKED_MASK
) { /* Normal programming mode */
59 nvm
->timeout
= I40E_MAX_NVM_TIMEOUT
;
60 nvm
->blank_nvm_mode
= false;
61 } else { /* Blank programming mode */
62 nvm
->blank_nvm_mode
= true;
63 ret_code
= I40E_ERR_NVM_BLANK_MODE
;
64 i40e_debug(hw
, I40E_DEBUG_NVM
, "NVM init error: unsupported blank mode.\n");
71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write)
75 * This function will request NVM ownership for reading
76 * via the proper Admin Command.
78 i40e_status
i40e_acquire_nvm(struct i40e_hw
*hw
,
79 enum i40e_aq_resource_access_type access
)
81 i40e_status ret_code
= 0;
85 if (hw
->nvm
.blank_nvm_mode
)
86 goto i40e_i40e_acquire_nvm_exit
;
88 ret_code
= i40e_aq_request_resource(hw
, I40E_NVM_RESOURCE_ID
, access
,
90 /* Reading the Global Device Timer */
91 gtime
= rd32(hw
, I40E_GLVFGEN_TIMER
);
93 /* Store the timeout */
94 hw
->nvm
.hw_semaphore_timeout
= I40E_MS_TO_GTIME(time_left
) + gtime
;
97 i40e_debug(hw
, I40E_DEBUG_NVM
,
98 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
99 access
, time_left
, ret_code
, hw
->aq
.asq_last_status
);
101 if (ret_code
&& time_left
) {
102 /* Poll until the current NVM owner timeouts */
103 timeout
= I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT
) + gtime
;
104 while ((gtime
< timeout
) && time_left
) {
105 usleep_range(10000, 20000);
106 gtime
= rd32(hw
, I40E_GLVFGEN_TIMER
);
107 ret_code
= i40e_aq_request_resource(hw
,
108 I40E_NVM_RESOURCE_ID
,
109 access
, 0, &time_left
,
112 hw
->nvm
.hw_semaphore_timeout
=
113 I40E_MS_TO_GTIME(time_left
) + gtime
;
118 hw
->nvm
.hw_semaphore_timeout
= 0;
119 i40e_debug(hw
, I40E_DEBUG_NVM
,
120 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
121 time_left
, ret_code
, hw
->aq
.asq_last_status
);
125 i40e_i40e_acquire_nvm_exit
:
130 * i40e_release_nvm - Generic request for releasing the NVM ownership
131 * @hw: pointer to the HW structure
133 * This function will release NVM resource via the proper Admin Command.
135 void i40e_release_nvm(struct i40e_hw
*hw
)
137 if (!hw
->nvm
.blank_nvm_mode
)
138 i40e_aq_release_resource(hw
, I40E_NVM_RESOURCE_ID
, 0, NULL
);
142 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
143 * @hw: pointer to the HW structure
145 * Polls the SRCTL Shadow RAM register done bit.
147 static i40e_status
i40e_poll_sr_srctl_done_bit(struct i40e_hw
*hw
)
149 i40e_status ret_code
= I40E_ERR_TIMEOUT
;
152 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
153 for (wait_cnt
= 0; wait_cnt
< I40E_SRRD_SRCTL_ATTEMPTS
; wait_cnt
++) {
154 srctl
= rd32(hw
, I40E_GLNVM_SRCTL
);
155 if (srctl
& I40E_GLNVM_SRCTL_DONE_MASK
) {
161 if (ret_code
== I40E_ERR_TIMEOUT
)
162 i40e_debug(hw
, I40E_DEBUG_NVM
, "Done bit in GLNVM_SRCTL not set");
167 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
168 * @hw: pointer to the HW structure
169 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
170 * @data: word read from the Shadow RAM
172 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
174 static i40e_status
i40e_read_nvm_word_srctl(struct i40e_hw
*hw
, u16 offset
,
177 i40e_status ret_code
= I40E_ERR_TIMEOUT
;
180 if (offset
>= hw
->nvm
.sr_size
) {
181 i40e_debug(hw
, I40E_DEBUG_NVM
,
182 "NVM read error: offset %d beyond Shadow RAM limit %d\n",
183 offset
, hw
->nvm
.sr_size
);
184 ret_code
= I40E_ERR_PARAM
;
188 /* Poll the done bit first */
189 ret_code
= i40e_poll_sr_srctl_done_bit(hw
);
191 /* Write the address and start reading */
192 sr_reg
= ((u32
)offset
<< I40E_GLNVM_SRCTL_ADDR_SHIFT
) |
193 BIT(I40E_GLNVM_SRCTL_START_SHIFT
);
194 wr32(hw
, I40E_GLNVM_SRCTL
, sr_reg
);
196 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
197 ret_code
= i40e_poll_sr_srctl_done_bit(hw
);
199 sr_reg
= rd32(hw
, I40E_GLNVM_SRDATA
);
200 *data
= (u16
)((sr_reg
&
201 I40E_GLNVM_SRDATA_RDDATA_MASK
)
202 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT
);
206 i40e_debug(hw
, I40E_DEBUG_NVM
,
207 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
215 * i40e_read_nvm_aq - Read Shadow RAM.
216 * @hw: pointer to the HW structure.
217 * @module_pointer: module pointer location in words from the NVM beginning
218 * @offset: offset in words from module start
219 * @words: number of words to write
220 * @data: buffer with words to write to the Shadow RAM
221 * @last_command: tells the AdminQ that this is the last command
223 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
225 static i40e_status
i40e_read_nvm_aq(struct i40e_hw
*hw
, u8 module_pointer
,
226 u32 offset
, u16 words
, void *data
,
229 i40e_status ret_code
= I40E_ERR_NVM
;
230 struct i40e_asq_cmd_details cmd_details
;
232 memset(&cmd_details
, 0, sizeof(cmd_details
));
234 /* Here we are checking the SR limit only for the flat memory model.
235 * We cannot do it for the module-based model, as we did not acquire
236 * the NVM resource yet (we cannot get the module pointer value).
237 * Firmware will check the module-based model.
239 if ((offset
+ words
) > hw
->nvm
.sr_size
)
240 i40e_debug(hw
, I40E_DEBUG_NVM
,
241 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
242 (offset
+ words
), hw
->nvm
.sr_size
);
243 else if (words
> I40E_SR_SECTOR_SIZE_IN_WORDS
)
244 /* We can write only up to 4KB (one sector), in one AQ write */
245 i40e_debug(hw
, I40E_DEBUG_NVM
,
246 "NVM write fail error: tried to write %d words, limit is %d.\n",
247 words
, I40E_SR_SECTOR_SIZE_IN_WORDS
);
248 else if (((offset
+ (words
- 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS
)
249 != (offset
/ I40E_SR_SECTOR_SIZE_IN_WORDS
))
250 /* A single write cannot spread over two sectors */
251 i40e_debug(hw
, I40E_DEBUG_NVM
,
252 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
255 ret_code
= i40e_aq_read_nvm(hw
, module_pointer
,
256 2 * offset
, /*bytes*/
258 data
, last_command
, &cmd_details
);
264 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
265 * @hw: pointer to the HW structure
266 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
267 * @data: word read from the Shadow RAM
269 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
271 static i40e_status
i40e_read_nvm_word_aq(struct i40e_hw
*hw
, u16 offset
,
274 i40e_status ret_code
= I40E_ERR_TIMEOUT
;
276 ret_code
= i40e_read_nvm_aq(hw
, 0x0, offset
, 1, data
, true);
277 *data
= le16_to_cpu(*(__le16
*)data
);
283 * i40e_read_nvm_word - Reads Shadow RAM
284 * @hw: pointer to the HW structure
285 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
286 * @data: word read from the Shadow RAM
288 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
290 i40e_status
i40e_read_nvm_word(struct i40e_hw
*hw
, u16 offset
,
293 enum i40e_status_code ret_code
= 0;
295 if (hw
->flags
& I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE
) {
296 ret_code
= i40e_acquire_nvm(hw
, I40E_RESOURCE_READ
);
298 ret_code
= i40e_read_nvm_word_aq(hw
, offset
, data
);
299 i40e_release_nvm(hw
);
302 ret_code
= i40e_read_nvm_word_srctl(hw
, offset
, data
);
308 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
309 * @hw: pointer to the HW structure
310 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
311 * @words: (in) number of words to read; (out) number of words actually read
312 * @data: words read from the Shadow RAM
314 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
315 * method. The buffer read is preceded by the NVM ownership take
316 * and followed by the release.
318 static i40e_status
i40e_read_nvm_buffer_srctl(struct i40e_hw
*hw
, u16 offset
,
319 u16
*words
, u16
*data
)
321 i40e_status ret_code
= 0;
324 /* Loop thru the selected region */
325 for (word
= 0; word
< *words
; word
++) {
326 index
= offset
+ word
;
327 ret_code
= i40e_read_nvm_word_srctl(hw
, index
, &data
[word
]);
332 /* Update the number of words read from the Shadow RAM */
339 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
340 * @hw: pointer to the HW structure
341 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
342 * @words: (in) number of words to read; (out) number of words actually read
343 * @data: words read from the Shadow RAM
345 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
346 * method. The buffer read is preceded by the NVM ownership take
347 * and followed by the release.
349 static i40e_status
i40e_read_nvm_buffer_aq(struct i40e_hw
*hw
, u16 offset
,
350 u16
*words
, u16
*data
)
352 i40e_status ret_code
;
353 u16 read_size
= *words
;
354 bool last_cmd
= false;
359 /* Calculate number of bytes we should read in this step.
360 * FVL AQ do not allow to read more than one page at a time or
361 * to cross page boundaries.
363 if (offset
% I40E_SR_SECTOR_SIZE_IN_WORDS
)
364 read_size
= min(*words
,
365 (u16
)(I40E_SR_SECTOR_SIZE_IN_WORDS
-
366 (offset
% I40E_SR_SECTOR_SIZE_IN_WORDS
)));
368 read_size
= min((*words
- words_read
),
369 I40E_SR_SECTOR_SIZE_IN_WORDS
);
371 /* Check if this is last command, if so set proper flag */
372 if ((words_read
+ read_size
) >= *words
)
375 ret_code
= i40e_read_nvm_aq(hw
, 0x0, offset
, read_size
,
376 data
+ words_read
, last_cmd
);
378 goto read_nvm_buffer_aq_exit
;
380 /* Increment counter for words already read and move offset to
383 words_read
+= read_size
;
385 } while (words_read
< *words
);
387 for (i
= 0; i
< *words
; i
++)
388 data
[i
] = le16_to_cpu(((__le16
*)data
)[i
]);
390 read_nvm_buffer_aq_exit
:
396 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
397 * @hw: pointer to the HW structure
398 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
399 * @words: (in) number of words to read; (out) number of words actually read
400 * @data: words read from the Shadow RAM
402 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
403 * method. The buffer read is preceded by the NVM ownership take
404 * and followed by the release.
406 i40e_status
i40e_read_nvm_buffer(struct i40e_hw
*hw
, u16 offset
,
407 u16
*words
, u16
*data
)
409 enum i40e_status_code ret_code
= 0;
411 if (hw
->flags
& I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE
) {
412 ret_code
= i40e_acquire_nvm(hw
, I40E_RESOURCE_READ
);
414 ret_code
= i40e_read_nvm_buffer_aq(hw
, offset
, words
,
416 i40e_release_nvm(hw
);
419 ret_code
= i40e_read_nvm_buffer_srctl(hw
, offset
, words
, data
);
425 * i40e_write_nvm_aq - Writes Shadow RAM.
426 * @hw: pointer to the HW structure.
427 * @module_pointer: module pointer location in words from the NVM beginning
428 * @offset: offset in words from module start
429 * @words: number of words to write
430 * @data: buffer with words to write to the Shadow RAM
431 * @last_command: tells the AdminQ that this is the last command
433 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
435 static i40e_status
i40e_write_nvm_aq(struct i40e_hw
*hw
, u8 module_pointer
,
436 u32 offset
, u16 words
, void *data
,
439 i40e_status ret_code
= I40E_ERR_NVM
;
440 struct i40e_asq_cmd_details cmd_details
;
442 memset(&cmd_details
, 0, sizeof(cmd_details
));
443 cmd_details
.wb_desc
= &hw
->nvm_wb_desc
;
445 /* Here we are checking the SR limit only for the flat memory model.
446 * We cannot do it for the module-based model, as we did not acquire
447 * the NVM resource yet (we cannot get the module pointer value).
448 * Firmware will check the module-based model.
450 if ((offset
+ words
) > hw
->nvm
.sr_size
)
451 i40e_debug(hw
, I40E_DEBUG_NVM
,
452 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
453 (offset
+ words
), hw
->nvm
.sr_size
);
454 else if (words
> I40E_SR_SECTOR_SIZE_IN_WORDS
)
455 /* We can write only up to 4KB (one sector), in one AQ write */
456 i40e_debug(hw
, I40E_DEBUG_NVM
,
457 "NVM write fail error: tried to write %d words, limit is %d.\n",
458 words
, I40E_SR_SECTOR_SIZE_IN_WORDS
);
459 else if (((offset
+ (words
- 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS
)
460 != (offset
/ I40E_SR_SECTOR_SIZE_IN_WORDS
))
461 /* A single write cannot spread over two sectors */
462 i40e_debug(hw
, I40E_DEBUG_NVM
,
463 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
466 ret_code
= i40e_aq_update_nvm(hw
, module_pointer
,
467 2 * offset
, /*bytes*/
469 data
, last_command
, &cmd_details
);
475 * i40e_calc_nvm_checksum - Calculates and returns the checksum
476 * @hw: pointer to hardware structure
477 * @checksum: pointer to the checksum
479 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
480 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
481 * is customer specific and unknown. Therefore, this function skips all maximum
482 * possible size of VPD (1kB).
484 static i40e_status
i40e_calc_nvm_checksum(struct i40e_hw
*hw
,
487 i40e_status ret_code
;
488 struct i40e_virt_mem vmem
;
489 u16 pcie_alt_module
= 0;
490 u16 checksum_local
= 0;
495 ret_code
= i40e_allocate_virt_mem(hw
, &vmem
,
496 I40E_SR_SECTOR_SIZE_IN_WORDS
* sizeof(u16
));
498 goto i40e_calc_nvm_checksum_exit
;
499 data
= (u16
*)vmem
.va
;
501 /* read pointer to VPD area */
502 ret_code
= i40e_read_nvm_word(hw
, I40E_SR_VPD_PTR
, &vpd_module
);
504 ret_code
= I40E_ERR_NVM_CHECKSUM
;
505 goto i40e_calc_nvm_checksum_exit
;
508 /* read pointer to PCIe Alt Auto-load module */
509 ret_code
= i40e_read_nvm_word(hw
, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR
,
512 ret_code
= I40E_ERR_NVM_CHECKSUM
;
513 goto i40e_calc_nvm_checksum_exit
;
516 /* Calculate SW checksum that covers the whole 64kB shadow RAM
517 * except the VPD and PCIe ALT Auto-load modules
519 for (i
= 0; i
< hw
->nvm
.sr_size
; i
++) {
521 if ((i
% I40E_SR_SECTOR_SIZE_IN_WORDS
) == 0) {
522 u16 words
= I40E_SR_SECTOR_SIZE_IN_WORDS
;
524 ret_code
= i40e_read_nvm_buffer(hw
, i
, &words
, data
);
526 ret_code
= I40E_ERR_NVM_CHECKSUM
;
527 goto i40e_calc_nvm_checksum_exit
;
531 /* Skip Checksum word */
532 if (i
== I40E_SR_SW_CHECKSUM_WORD
)
534 /* Skip VPD module (convert byte size to word count) */
535 if ((i
>= (u32
)vpd_module
) &&
536 (i
< ((u32
)vpd_module
+
537 (I40E_SR_VPD_MODULE_MAX_SIZE
/ 2)))) {
540 /* Skip PCIe ALT module (convert byte size to word count) */
541 if ((i
>= (u32
)pcie_alt_module
) &&
542 (i
< ((u32
)pcie_alt_module
+
543 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE
/ 2)))) {
547 checksum_local
+= data
[i
% I40E_SR_SECTOR_SIZE_IN_WORDS
];
550 *checksum
= (u16
)I40E_SR_SW_CHECKSUM_BASE
- checksum_local
;
552 i40e_calc_nvm_checksum_exit
:
553 i40e_free_virt_mem(hw
, &vmem
);
558 * i40e_update_nvm_checksum - Updates the NVM checksum
559 * @hw: pointer to hardware structure
561 * NVM ownership must be acquired before calling this function and released
562 * on ARQ completion event reception by caller.
563 * This function will commit SR to NVM.
565 i40e_status
i40e_update_nvm_checksum(struct i40e_hw
*hw
)
567 i40e_status ret_code
;
571 ret_code
= i40e_calc_nvm_checksum(hw
, &checksum
);
573 le_sum
= cpu_to_le16(checksum
);
574 ret_code
= i40e_write_nvm_aq(hw
, 0x00, I40E_SR_SW_CHECKSUM_WORD
,
582 * i40e_validate_nvm_checksum - Validate EEPROM checksum
583 * @hw: pointer to hardware structure
584 * @checksum: calculated checksum
586 * Performs checksum calculation and validates the NVM SW checksum. If the
587 * caller does not need checksum, the value can be NULL.
589 i40e_status
i40e_validate_nvm_checksum(struct i40e_hw
*hw
,
592 i40e_status ret_code
= 0;
594 u16 checksum_local
= 0;
596 ret_code
= i40e_calc_nvm_checksum(hw
, &checksum_local
);
598 goto i40e_validate_nvm_checksum_exit
;
600 /* Do not use i40e_read_nvm_word() because we do not want to take
601 * the synchronization semaphores twice here.
603 i40e_read_nvm_word(hw
, I40E_SR_SW_CHECKSUM_WORD
, &checksum_sr
);
605 /* Verify read checksum from EEPROM is the same as
606 * calculated checksum
608 if (checksum_local
!= checksum_sr
)
609 ret_code
= I40E_ERR_NVM_CHECKSUM
;
611 /* If the user cares, return the calculated checksum */
613 *checksum
= checksum_local
;
615 i40e_validate_nvm_checksum_exit
:
619 static i40e_status
i40e_nvmupd_state_init(struct i40e_hw
*hw
,
620 struct i40e_nvm_access
*cmd
,
621 u8
*bytes
, int *perrno
);
622 static i40e_status
i40e_nvmupd_state_reading(struct i40e_hw
*hw
,
623 struct i40e_nvm_access
*cmd
,
624 u8
*bytes
, int *perrno
);
625 static i40e_status
i40e_nvmupd_state_writing(struct i40e_hw
*hw
,
626 struct i40e_nvm_access
*cmd
,
627 u8
*bytes
, int *errno
);
628 static enum i40e_nvmupd_cmd
i40e_nvmupd_validate_command(struct i40e_hw
*hw
,
629 struct i40e_nvm_access
*cmd
,
631 static i40e_status
i40e_nvmupd_nvm_erase(struct i40e_hw
*hw
,
632 struct i40e_nvm_access
*cmd
,
634 static i40e_status
i40e_nvmupd_nvm_write(struct i40e_hw
*hw
,
635 struct i40e_nvm_access
*cmd
,
636 u8
*bytes
, int *perrno
);
637 static i40e_status
i40e_nvmupd_nvm_read(struct i40e_hw
*hw
,
638 struct i40e_nvm_access
*cmd
,
639 u8
*bytes
, int *perrno
);
640 static i40e_status
i40e_nvmupd_exec_aq(struct i40e_hw
*hw
,
641 struct i40e_nvm_access
*cmd
,
642 u8
*bytes
, int *perrno
);
643 static i40e_status
i40e_nvmupd_get_aq_result(struct i40e_hw
*hw
,
644 struct i40e_nvm_access
*cmd
,
645 u8
*bytes
, int *perrno
);
646 static inline u8
i40e_nvmupd_get_module(u32 val
)
648 return (u8
)(val
& I40E_NVM_MOD_PNT_MASK
);
650 static inline u8
i40e_nvmupd_get_transaction(u32 val
)
652 return (u8
)((val
& I40E_NVM_TRANS_MASK
) >> I40E_NVM_TRANS_SHIFT
);
655 static const char * const i40e_nvm_update_state_str
[] = {
656 "I40E_NVMUPD_INVALID",
657 "I40E_NVMUPD_READ_CON",
658 "I40E_NVMUPD_READ_SNT",
659 "I40E_NVMUPD_READ_LCB",
660 "I40E_NVMUPD_READ_SA",
661 "I40E_NVMUPD_WRITE_ERA",
662 "I40E_NVMUPD_WRITE_CON",
663 "I40E_NVMUPD_WRITE_SNT",
664 "I40E_NVMUPD_WRITE_LCB",
665 "I40E_NVMUPD_WRITE_SA",
666 "I40E_NVMUPD_CSUM_CON",
667 "I40E_NVMUPD_CSUM_SA",
668 "I40E_NVMUPD_CSUM_LCB",
669 "I40E_NVMUPD_STATUS",
670 "I40E_NVMUPD_EXEC_AQ",
671 "I40E_NVMUPD_GET_AQ_RESULT",
675 * i40e_nvmupd_command - Process an NVM update command
676 * @hw: pointer to hardware structure
677 * @cmd: pointer to nvm update command
678 * @bytes: pointer to the data buffer
679 * @perrno: pointer to return error code
681 * Dispatches command depending on what update state is current
683 i40e_status
i40e_nvmupd_command(struct i40e_hw
*hw
,
684 struct i40e_nvm_access
*cmd
,
685 u8
*bytes
, int *perrno
)
688 enum i40e_nvmupd_cmd upd_cmd
;
693 /* early check for status command and debug msgs */
694 upd_cmd
= i40e_nvmupd_validate_command(hw
, cmd
, perrno
);
696 i40e_debug(hw
, I40E_DEBUG_NVM
, "%s state %d nvm_release_on_hold %d\n",
697 i40e_nvm_update_state_str
[upd_cmd
],
699 hw
->aq
.nvm_release_on_done
);
701 if (upd_cmd
== I40E_NVMUPD_INVALID
) {
703 i40e_debug(hw
, I40E_DEBUG_NVM
,
704 "i40e_nvmupd_validate_command returns %d errno %d\n",
708 /* a status request returns immediately rather than
709 * going into the state machine
711 if (upd_cmd
== I40E_NVMUPD_STATUS
) {
712 bytes
[0] = hw
->nvmupd_state
;
716 switch (hw
->nvmupd_state
) {
717 case I40E_NVMUPD_STATE_INIT
:
718 status
= i40e_nvmupd_state_init(hw
, cmd
, bytes
, perrno
);
721 case I40E_NVMUPD_STATE_READING
:
722 status
= i40e_nvmupd_state_reading(hw
, cmd
, bytes
, perrno
);
725 case I40E_NVMUPD_STATE_WRITING
:
726 status
= i40e_nvmupd_state_writing(hw
, cmd
, bytes
, perrno
);
729 case I40E_NVMUPD_STATE_INIT_WAIT
:
730 case I40E_NVMUPD_STATE_WRITE_WAIT
:
731 status
= I40E_ERR_NOT_READY
;
736 /* invalid state, should never happen */
737 i40e_debug(hw
, I40E_DEBUG_NVM
,
738 "NVMUPD: no such state %d\n", hw
->nvmupd_state
);
739 status
= I40E_NOT_SUPPORTED
;
747 * i40e_nvmupd_state_init - Handle NVM update state Init
748 * @hw: pointer to hardware structure
749 * @cmd: pointer to nvm update command buffer
750 * @bytes: pointer to the data buffer
751 * @perrno: pointer to return error code
753 * Process legitimate commands of the Init state and conditionally set next
754 * state. Reject all other commands.
756 static i40e_status
i40e_nvmupd_state_init(struct i40e_hw
*hw
,
757 struct i40e_nvm_access
*cmd
,
758 u8
*bytes
, int *perrno
)
760 i40e_status status
= 0;
761 enum i40e_nvmupd_cmd upd_cmd
;
763 upd_cmd
= i40e_nvmupd_validate_command(hw
, cmd
, perrno
);
766 case I40E_NVMUPD_READ_SA
:
767 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_READ
);
769 *perrno
= i40e_aq_rc_to_posix(status
,
770 hw
->aq
.asq_last_status
);
772 status
= i40e_nvmupd_nvm_read(hw
, cmd
, bytes
, perrno
);
773 i40e_release_nvm(hw
);
777 case I40E_NVMUPD_READ_SNT
:
778 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_READ
);
780 *perrno
= i40e_aq_rc_to_posix(status
,
781 hw
->aq
.asq_last_status
);
783 status
= i40e_nvmupd_nvm_read(hw
, cmd
, bytes
, perrno
);
785 i40e_release_nvm(hw
);
787 hw
->nvmupd_state
= I40E_NVMUPD_STATE_READING
;
791 case I40E_NVMUPD_WRITE_ERA
:
792 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_WRITE
);
794 *perrno
= i40e_aq_rc_to_posix(status
,
795 hw
->aq
.asq_last_status
);
797 status
= i40e_nvmupd_nvm_erase(hw
, cmd
, perrno
);
799 i40e_release_nvm(hw
);
801 hw
->aq
.nvm_release_on_done
= true;
802 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT_WAIT
;
807 case I40E_NVMUPD_WRITE_SA
:
808 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_WRITE
);
810 *perrno
= i40e_aq_rc_to_posix(status
,
811 hw
->aq
.asq_last_status
);
813 status
= i40e_nvmupd_nvm_write(hw
, cmd
, bytes
, perrno
);
815 i40e_release_nvm(hw
);
817 hw
->aq
.nvm_release_on_done
= true;
818 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT_WAIT
;
823 case I40E_NVMUPD_WRITE_SNT
:
824 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_WRITE
);
826 *perrno
= i40e_aq_rc_to_posix(status
,
827 hw
->aq
.asq_last_status
);
829 status
= i40e_nvmupd_nvm_write(hw
, cmd
, bytes
, perrno
);
831 i40e_release_nvm(hw
);
833 hw
->nvmupd_state
= I40E_NVMUPD_STATE_WRITE_WAIT
;
837 case I40E_NVMUPD_CSUM_SA
:
838 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_WRITE
);
840 *perrno
= i40e_aq_rc_to_posix(status
,
841 hw
->aq
.asq_last_status
);
843 status
= i40e_update_nvm_checksum(hw
);
845 *perrno
= hw
->aq
.asq_last_status
?
846 i40e_aq_rc_to_posix(status
,
847 hw
->aq
.asq_last_status
) :
849 i40e_release_nvm(hw
);
851 hw
->aq
.nvm_release_on_done
= true;
852 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT_WAIT
;
857 case I40E_NVMUPD_EXEC_AQ
:
858 status
= i40e_nvmupd_exec_aq(hw
, cmd
, bytes
, perrno
);
861 case I40E_NVMUPD_GET_AQ_RESULT
:
862 status
= i40e_nvmupd_get_aq_result(hw
, cmd
, bytes
, perrno
);
866 i40e_debug(hw
, I40E_DEBUG_NVM
,
867 "NVMUPD: bad cmd %s in init state\n",
868 i40e_nvm_update_state_str
[upd_cmd
]);
869 status
= I40E_ERR_NVM
;
877 * i40e_nvmupd_state_reading - Handle NVM update state Reading
878 * @hw: pointer to hardware structure
879 * @cmd: pointer to nvm update command buffer
880 * @bytes: pointer to the data buffer
881 * @perrno: pointer to return error code
883 * NVM ownership is already held. Process legitimate commands and set any
884 * change in state; reject all other commands.
886 static i40e_status
i40e_nvmupd_state_reading(struct i40e_hw
*hw
,
887 struct i40e_nvm_access
*cmd
,
888 u8
*bytes
, int *perrno
)
890 i40e_status status
= 0;
891 enum i40e_nvmupd_cmd upd_cmd
;
893 upd_cmd
= i40e_nvmupd_validate_command(hw
, cmd
, perrno
);
896 case I40E_NVMUPD_READ_SA
:
897 case I40E_NVMUPD_READ_CON
:
898 status
= i40e_nvmupd_nvm_read(hw
, cmd
, bytes
, perrno
);
901 case I40E_NVMUPD_READ_LCB
:
902 status
= i40e_nvmupd_nvm_read(hw
, cmd
, bytes
, perrno
);
903 i40e_release_nvm(hw
);
904 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT
;
908 i40e_debug(hw
, I40E_DEBUG_NVM
,
909 "NVMUPD: bad cmd %s in reading state.\n",
910 i40e_nvm_update_state_str
[upd_cmd
]);
911 status
= I40E_NOT_SUPPORTED
;
919 * i40e_nvmupd_state_writing - Handle NVM update state Writing
920 * @hw: pointer to hardware structure
921 * @cmd: pointer to nvm update command buffer
922 * @bytes: pointer to the data buffer
923 * @perrno: pointer to return error code
925 * NVM ownership is already held. Process legitimate commands and set any
926 * change in state; reject all other commands
928 static i40e_status
i40e_nvmupd_state_writing(struct i40e_hw
*hw
,
929 struct i40e_nvm_access
*cmd
,
930 u8
*bytes
, int *perrno
)
932 i40e_status status
= 0;
933 enum i40e_nvmupd_cmd upd_cmd
;
934 bool retry_attempt
= false;
936 upd_cmd
= i40e_nvmupd_validate_command(hw
, cmd
, perrno
);
940 case I40E_NVMUPD_WRITE_CON
:
941 status
= i40e_nvmupd_nvm_write(hw
, cmd
, bytes
, perrno
);
943 hw
->nvmupd_state
= I40E_NVMUPD_STATE_WRITE_WAIT
;
946 case I40E_NVMUPD_WRITE_LCB
:
947 status
= i40e_nvmupd_nvm_write(hw
, cmd
, bytes
, perrno
);
949 *perrno
= hw
->aq
.asq_last_status
?
950 i40e_aq_rc_to_posix(status
,
951 hw
->aq
.asq_last_status
) :
953 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT
;
955 hw
->aq
.nvm_release_on_done
= true;
956 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT_WAIT
;
960 case I40E_NVMUPD_CSUM_CON
:
961 status
= i40e_update_nvm_checksum(hw
);
963 *perrno
= hw
->aq
.asq_last_status
?
964 i40e_aq_rc_to_posix(status
,
965 hw
->aq
.asq_last_status
) :
967 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT
;
969 hw
->nvmupd_state
= I40E_NVMUPD_STATE_WRITE_WAIT
;
973 case I40E_NVMUPD_CSUM_LCB
:
974 status
= i40e_update_nvm_checksum(hw
);
976 *perrno
= hw
->aq
.asq_last_status
?
977 i40e_aq_rc_to_posix(status
,
978 hw
->aq
.asq_last_status
) :
980 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT
;
982 hw
->aq
.nvm_release_on_done
= true;
983 hw
->nvmupd_state
= I40E_NVMUPD_STATE_INIT_WAIT
;
988 i40e_debug(hw
, I40E_DEBUG_NVM
,
989 "NVMUPD: bad cmd %s in writing state.\n",
990 i40e_nvm_update_state_str
[upd_cmd
]);
991 status
= I40E_NOT_SUPPORTED
;
996 /* In some circumstances, a multi-write transaction takes longer
997 * than the default 3 minute timeout on the write semaphore. If
998 * the write failed with an EBUSY status, this is likely the problem,
999 * so here we try to reacquire the semaphore then retry the write.
1000 * We only do one retry, then give up.
1002 if (status
&& (hw
->aq
.asq_last_status
== I40E_AQ_RC_EBUSY
) &&
1004 i40e_status old_status
= status
;
1005 u32 old_asq_status
= hw
->aq
.asq_last_status
;
1008 gtime
= rd32(hw
, I40E_GLVFGEN_TIMER
);
1009 if (gtime
>= hw
->nvm
.hw_semaphore_timeout
) {
1010 i40e_debug(hw
, I40E_DEBUG_ALL
,
1011 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1012 gtime
, hw
->nvm
.hw_semaphore_timeout
);
1013 i40e_release_nvm(hw
);
1014 status
= i40e_acquire_nvm(hw
, I40E_RESOURCE_WRITE
);
1016 i40e_debug(hw
, I40E_DEBUG_ALL
,
1017 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1018 hw
->aq
.asq_last_status
);
1019 status
= old_status
;
1020 hw
->aq
.asq_last_status
= old_asq_status
;
1022 retry_attempt
= true;
1032 * i40e_nvmupd_validate_command - Validate given command
1033 * @hw: pointer to hardware structure
1034 * @cmd: pointer to nvm update command buffer
1035 * @perrno: pointer to return error code
1037 * Return one of the valid command types or I40E_NVMUPD_INVALID
1039 static enum i40e_nvmupd_cmd
i40e_nvmupd_validate_command(struct i40e_hw
*hw
,
1040 struct i40e_nvm_access
*cmd
,
1043 enum i40e_nvmupd_cmd upd_cmd
;
1044 u8 module
, transaction
;
1046 /* anything that doesn't match a recognized case is an error */
1047 upd_cmd
= I40E_NVMUPD_INVALID
;
1049 transaction
= i40e_nvmupd_get_transaction(cmd
->config
);
1050 module
= i40e_nvmupd_get_module(cmd
->config
);
1052 /* limits on data size */
1053 if ((cmd
->data_size
< 1) ||
1054 (cmd
->data_size
> I40E_NVMUPD_MAX_DATA
)) {
1055 i40e_debug(hw
, I40E_DEBUG_NVM
,
1056 "i40e_nvmupd_validate_command data_size %d\n",
1059 return I40E_NVMUPD_INVALID
;
1062 switch (cmd
->command
) {
1064 switch (transaction
) {
1066 upd_cmd
= I40E_NVMUPD_READ_CON
;
1069 upd_cmd
= I40E_NVMUPD_READ_SNT
;
1072 upd_cmd
= I40E_NVMUPD_READ_LCB
;
1075 upd_cmd
= I40E_NVMUPD_READ_SA
;
1079 upd_cmd
= I40E_NVMUPD_STATUS
;
1080 else if (module
== 0)
1081 upd_cmd
= I40E_NVMUPD_GET_AQ_RESULT
;
1086 case I40E_NVM_WRITE
:
1087 switch (transaction
) {
1089 upd_cmd
= I40E_NVMUPD_WRITE_CON
;
1092 upd_cmd
= I40E_NVMUPD_WRITE_SNT
;
1095 upd_cmd
= I40E_NVMUPD_WRITE_LCB
;
1098 upd_cmd
= I40E_NVMUPD_WRITE_SA
;
1101 upd_cmd
= I40E_NVMUPD_WRITE_ERA
;
1104 upd_cmd
= I40E_NVMUPD_CSUM_CON
;
1106 case (I40E_NVM_CSUM
|I40E_NVM_SA
):
1107 upd_cmd
= I40E_NVMUPD_CSUM_SA
;
1109 case (I40E_NVM_CSUM
|I40E_NVM_LCB
):
1110 upd_cmd
= I40E_NVMUPD_CSUM_LCB
;
1114 upd_cmd
= I40E_NVMUPD_EXEC_AQ
;
1124 * i40e_nvmupd_exec_aq - Run an AQ command
1125 * @hw: pointer to hardware structure
1126 * @cmd: pointer to nvm update command buffer
1127 * @bytes: pointer to the data buffer
1128 * @perrno: pointer to return error code
1130 * cmd structure contains identifiers and data buffer
1132 static i40e_status
i40e_nvmupd_exec_aq(struct i40e_hw
*hw
,
1133 struct i40e_nvm_access
*cmd
,
1134 u8
*bytes
, int *perrno
)
1136 struct i40e_asq_cmd_details cmd_details
;
1138 struct i40e_aq_desc
*aq_desc
;
1144 i40e_debug(hw
, I40E_DEBUG_NVM
, "NVMUPD: %s\n", __func__
);
1145 memset(&cmd_details
, 0, sizeof(cmd_details
));
1146 cmd_details
.wb_desc
= &hw
->nvm_wb_desc
;
1148 aq_desc_len
= sizeof(struct i40e_aq_desc
);
1149 memset(&hw
->nvm_wb_desc
, 0, aq_desc_len
);
1151 /* get the aq descriptor */
1152 if (cmd
->data_size
< aq_desc_len
) {
1153 i40e_debug(hw
, I40E_DEBUG_NVM
,
1154 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1155 cmd
->data_size
, aq_desc_len
);
1157 return I40E_ERR_PARAM
;
1159 aq_desc
= (struct i40e_aq_desc
*)bytes
;
1161 /* if data buffer needed, make sure it's ready */
1162 aq_data_len
= cmd
->data_size
- aq_desc_len
;
1163 buff_size
= max_t(u32
, aq_data_len
, le16_to_cpu(aq_desc
->datalen
));
1165 if (!hw
->nvm_buff
.va
) {
1166 status
= i40e_allocate_virt_mem(hw
, &hw
->nvm_buff
,
1167 hw
->aq
.asq_buf_size
);
1169 i40e_debug(hw
, I40E_DEBUG_NVM
,
1170 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1174 if (hw
->nvm_buff
.va
) {
1175 buff
= hw
->nvm_buff
.va
;
1176 memcpy(buff
, &bytes
[aq_desc_len
], aq_data_len
);
1180 /* and away we go! */
1181 status
= i40e_asq_send_command(hw
, aq_desc
, buff
,
1182 buff_size
, &cmd_details
);
1184 i40e_debug(hw
, I40E_DEBUG_NVM
,
1185 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1186 i40e_stat_str(hw
, status
),
1187 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1188 *perrno
= i40e_aq_rc_to_posix(status
, hw
->aq
.asq_last_status
);
1195 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1196 * @hw: pointer to hardware structure
1197 * @cmd: pointer to nvm update command buffer
1198 * @bytes: pointer to the data buffer
1199 * @perrno: pointer to return error code
1201 * cmd structure contains identifiers and data buffer
1203 static i40e_status
i40e_nvmupd_get_aq_result(struct i40e_hw
*hw
,
1204 struct i40e_nvm_access
*cmd
,
1205 u8
*bytes
, int *perrno
)
1212 i40e_debug(hw
, I40E_DEBUG_NVM
, "NVMUPD: %s\n", __func__
);
1214 aq_desc_len
= sizeof(struct i40e_aq_desc
);
1215 aq_total_len
= aq_desc_len
+ le16_to_cpu(hw
->nvm_wb_desc
.datalen
);
1217 /* check offset range */
1218 if (cmd
->offset
> aq_total_len
) {
1219 i40e_debug(hw
, I40E_DEBUG_NVM
, "%s: offset too big %d > %d\n",
1220 __func__
, cmd
->offset
, aq_total_len
);
1222 return I40E_ERR_PARAM
;
1225 /* check copylength range */
1226 if (cmd
->data_size
> (aq_total_len
- cmd
->offset
)) {
1227 int new_len
= aq_total_len
- cmd
->offset
;
1229 i40e_debug(hw
, I40E_DEBUG_NVM
, "%s: copy length %d too big, trimming to %d\n",
1230 __func__
, cmd
->data_size
, new_len
);
1231 cmd
->data_size
= new_len
;
1234 remainder
= cmd
->data_size
;
1235 if (cmd
->offset
< aq_desc_len
) {
1236 u32 len
= aq_desc_len
- cmd
->offset
;
1238 len
= min(len
, cmd
->data_size
);
1239 i40e_debug(hw
, I40E_DEBUG_NVM
, "%s: aq_desc bytes %d to %d\n",
1240 __func__
, cmd
->offset
, cmd
->offset
+ len
);
1242 buff
= ((u8
*)&hw
->nvm_wb_desc
) + cmd
->offset
;
1243 memcpy(bytes
, buff
, len
);
1247 buff
= hw
->nvm_buff
.va
;
1249 buff
= hw
->nvm_buff
.va
+ (cmd
->offset
- aq_desc_len
);
1252 if (remainder
> 0) {
1253 int start_byte
= buff
- (u8
*)hw
->nvm_buff
.va
;
1255 i40e_debug(hw
, I40E_DEBUG_NVM
, "%s: databuf bytes %d to %d\n",
1256 __func__
, start_byte
, start_byte
+ remainder
);
1257 memcpy(bytes
, buff
, remainder
);
1264 * i40e_nvmupd_nvm_read - Read NVM
1265 * @hw: pointer to hardware structure
1266 * @cmd: pointer to nvm update command buffer
1267 * @bytes: pointer to the data buffer
1268 * @perrno: pointer to return error code
1270 * cmd structure contains identifiers and data buffer
1272 static i40e_status
i40e_nvmupd_nvm_read(struct i40e_hw
*hw
,
1273 struct i40e_nvm_access
*cmd
,
1274 u8
*bytes
, int *perrno
)
1276 struct i40e_asq_cmd_details cmd_details
;
1278 u8 module
, transaction
;
1281 transaction
= i40e_nvmupd_get_transaction(cmd
->config
);
1282 module
= i40e_nvmupd_get_module(cmd
->config
);
1283 last
= (transaction
== I40E_NVM_LCB
) || (transaction
== I40E_NVM_SA
);
1285 memset(&cmd_details
, 0, sizeof(cmd_details
));
1286 cmd_details
.wb_desc
= &hw
->nvm_wb_desc
;
1288 status
= i40e_aq_read_nvm(hw
, module
, cmd
->offset
, (u16
)cmd
->data_size
,
1289 bytes
, last
, &cmd_details
);
1291 i40e_debug(hw
, I40E_DEBUG_NVM
,
1292 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1293 module
, cmd
->offset
, cmd
->data_size
);
1294 i40e_debug(hw
, I40E_DEBUG_NVM
,
1295 "i40e_nvmupd_nvm_read status %d aq %d\n",
1296 status
, hw
->aq
.asq_last_status
);
1297 *perrno
= i40e_aq_rc_to_posix(status
, hw
->aq
.asq_last_status
);
1304 * i40e_nvmupd_nvm_erase - Erase an NVM module
1305 * @hw: pointer to hardware structure
1306 * @cmd: pointer to nvm update command buffer
1307 * @perrno: pointer to return error code
1309 * module, offset, data_size and data are in cmd structure
1311 static i40e_status
i40e_nvmupd_nvm_erase(struct i40e_hw
*hw
,
1312 struct i40e_nvm_access
*cmd
,
1315 i40e_status status
= 0;
1316 struct i40e_asq_cmd_details cmd_details
;
1317 u8 module
, transaction
;
1320 transaction
= i40e_nvmupd_get_transaction(cmd
->config
);
1321 module
= i40e_nvmupd_get_module(cmd
->config
);
1322 last
= (transaction
& I40E_NVM_LCB
);
1324 memset(&cmd_details
, 0, sizeof(cmd_details
));
1325 cmd_details
.wb_desc
= &hw
->nvm_wb_desc
;
1327 status
= i40e_aq_erase_nvm(hw
, module
, cmd
->offset
, (u16
)cmd
->data_size
,
1328 last
, &cmd_details
);
1330 i40e_debug(hw
, I40E_DEBUG_NVM
,
1331 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1332 module
, cmd
->offset
, cmd
->data_size
);
1333 i40e_debug(hw
, I40E_DEBUG_NVM
,
1334 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1335 status
, hw
->aq
.asq_last_status
);
1336 *perrno
= i40e_aq_rc_to_posix(status
, hw
->aq
.asq_last_status
);
1343 * i40e_nvmupd_nvm_write - Write NVM
1344 * @hw: pointer to hardware structure
1345 * @cmd: pointer to nvm update command buffer
1346 * @bytes: pointer to the data buffer
1347 * @perrno: pointer to return error code
1349 * module, offset, data_size and data are in cmd structure
1351 static i40e_status
i40e_nvmupd_nvm_write(struct i40e_hw
*hw
,
1352 struct i40e_nvm_access
*cmd
,
1353 u8
*bytes
, int *perrno
)
1355 i40e_status status
= 0;
1356 struct i40e_asq_cmd_details cmd_details
;
1357 u8 module
, transaction
;
1360 transaction
= i40e_nvmupd_get_transaction(cmd
->config
);
1361 module
= i40e_nvmupd_get_module(cmd
->config
);
1362 last
= (transaction
& I40E_NVM_LCB
);
1364 memset(&cmd_details
, 0, sizeof(cmd_details
));
1365 cmd_details
.wb_desc
= &hw
->nvm_wb_desc
;
1367 status
= i40e_aq_update_nvm(hw
, module
, cmd
->offset
,
1368 (u16
)cmd
->data_size
, bytes
, last
,
1371 i40e_debug(hw
, I40E_DEBUG_NVM
,
1372 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1373 module
, cmd
->offset
, cmd
->data_size
);
1374 i40e_debug(hw
, I40E_DEBUG_NVM
,
1375 "i40e_nvmupd_nvm_write status %d aq %d\n",
1376 status
, hw
->aq
.asq_last_status
);
1377 *perrno
= i40e_aq_rc_to_posix(status
, hw
->aq
.asq_last_status
);