1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
15 #define NVM_MIN_SIZE SZ_32K
16 #define NVM_MAX_SIZE SZ_1M
17 #define NVM_DATA_DWORDS 16
19 /* Intel specific NVM offsets */
20 #define INTEL_NVM_DEVID 0x05
21 #define INTEL_NVM_VERSION 0x08
22 #define INTEL_NVM_CSS 0x10
23 #define INTEL_NVM_FLASH_SIZE 0x45
25 /* ASMedia specific NVM offsets */
26 #define ASMEDIA_NVM_DATE 0x1c
27 #define ASMEDIA_NVM_VERSION 0x28
29 static DEFINE_IDA(nvm_ida
);
32 * struct tb_nvm_vendor_ops - Vendor specific NVM operations
33 * @read_version: Reads out NVM version from the flash
34 * @validate: Validates the NVM image before update (optional)
35 * @write_headers: Writes headers before the rest of the image (optional)
37 struct tb_nvm_vendor_ops
{
38 int (*read_version
)(struct tb_nvm
*nvm
);
39 int (*validate
)(struct tb_nvm
*nvm
);
40 int (*write_headers
)(struct tb_nvm
*nvm
);
44 * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
46 * @vops: Vendor specific NVM operations
48 * Maps vendor ID to NVM vendor operations. If there is no mapping then
49 * NVM firmware upgrade is disabled for the device.
51 struct tb_nvm_vendor
{
53 const struct tb_nvm_vendor_ops
*vops
;
56 static int intel_switch_nvm_version(struct tb_nvm
*nvm
)
58 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
59 u32 val
, nvm_size
, hdr_size
;
63 * If the switch is in safe-mode the only accessible portion of
64 * the NVM is the non-active one where userspace is expected to
65 * write new functional NVM.
70 ret
= tb_switch_nvm_read(sw
, INTEL_NVM_FLASH_SIZE
, &val
, sizeof(val
));
74 hdr_size
= sw
->generation
< 3 ? SZ_8K
: SZ_16K
;
75 nvm_size
= (SZ_1M
<< (val
& 7)) / 8;
76 nvm_size
= (nvm_size
- hdr_size
) / 2;
78 ret
= tb_switch_nvm_read(sw
, INTEL_NVM_VERSION
, &val
, sizeof(val
));
82 nvm
->major
= (val
>> 16) & 0xff;
83 nvm
->minor
= (val
>> 8) & 0xff;
84 nvm
->active_size
= nvm_size
;
89 static int intel_switch_nvm_validate(struct tb_nvm
*nvm
)
91 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
92 unsigned int image_size
, hdr_size
;
93 u16 ds_size
, device_id
;
96 image_size
= nvm
->buf_data_size
;
99 * FARB pointer must point inside the image and must at least
100 * contain parts of the digital section we will be reading here.
102 hdr_size
= (*(u32
*)buf
) & 0xffffff;
103 if (hdr_size
+ INTEL_NVM_DEVID
+ 2 >= image_size
)
106 /* Digital section start should be aligned to 4k page */
107 if (!IS_ALIGNED(hdr_size
, SZ_4K
))
111 * Read digital section size and check that it also fits inside
114 ds_size
= *(u16
*)(buf
+ hdr_size
);
115 if (ds_size
>= image_size
)
122 * Make sure the device ID in the image matches the one
123 * we read from the switch config space.
125 device_id
= *(u16
*)(buf
+ hdr_size
+ INTEL_NVM_DEVID
);
126 if (device_id
!= sw
->config
.device_id
)
129 /* Skip headers in the image */
130 nvm
->buf_data_start
= buf
+ hdr_size
;
131 nvm
->buf_data_size
= image_size
- hdr_size
;
136 static int intel_switch_nvm_write_headers(struct tb_nvm
*nvm
)
138 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
140 if (sw
->generation
< 3) {
143 /* Write CSS headers first */
144 ret
= dma_port_flash_write(sw
->dma_port
,
145 DMA_PORT_CSS_ADDRESS
, nvm
->buf
+ INTEL_NVM_CSS
,
146 DMA_PORT_CSS_MAX_SIZE
);
154 static const struct tb_nvm_vendor_ops intel_switch_nvm_ops
= {
155 .read_version
= intel_switch_nvm_version
,
156 .validate
= intel_switch_nvm_validate
,
157 .write_headers
= intel_switch_nvm_write_headers
,
160 static int asmedia_switch_nvm_version(struct tb_nvm
*nvm
)
162 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
166 ret
= tb_switch_nvm_read(sw
, ASMEDIA_NVM_VERSION
, &val
, sizeof(val
));
170 nvm
->major
= (val
<< 16) & 0xff0000;
171 nvm
->major
|= val
& 0x00ff00;
172 nvm
->major
|= (val
>> 16) & 0x0000ff;
174 ret
= tb_switch_nvm_read(sw
, ASMEDIA_NVM_DATE
, &val
, sizeof(val
));
178 nvm
->minor
= (val
<< 16) & 0xff0000;
179 nvm
->minor
|= val
& 0x00ff00;
180 nvm
->minor
|= (val
>> 16) & 0x0000ff;
182 /* ASMedia NVM size is fixed to 512k */
183 nvm
->active_size
= SZ_512K
;
188 static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops
= {
189 .read_version
= asmedia_switch_nvm_version
,
192 /* Router vendor NVM support table */
193 static const struct tb_nvm_vendor switch_nvm_vendors
[] = {
194 { 0x174c, &asmedia_switch_nvm_ops
},
195 { PCI_VENDOR_ID_INTEL
, &intel_switch_nvm_ops
},
196 { 0x8087, &intel_switch_nvm_ops
},
199 static int intel_retimer_nvm_version(struct tb_nvm
*nvm
)
201 struct tb_retimer
*rt
= tb_to_retimer(nvm
->dev
);
205 ret
= tb_retimer_nvm_read(rt
, INTEL_NVM_VERSION
, &val
, sizeof(val
));
209 nvm
->major
= (val
>> 16) & 0xff;
210 nvm
->minor
= (val
>> 8) & 0xff;
212 ret
= tb_retimer_nvm_read(rt
, INTEL_NVM_FLASH_SIZE
, &val
, sizeof(val
));
216 nvm_size
= (SZ_1M
<< (val
& 7)) / 8;
217 nvm_size
= (nvm_size
- SZ_16K
) / 2;
218 nvm
->active_size
= nvm_size
;
223 static int intel_retimer_nvm_validate(struct tb_nvm
*nvm
)
225 struct tb_retimer
*rt
= tb_to_retimer(nvm
->dev
);
226 unsigned int image_size
, hdr_size
;
230 image_size
= nvm
->buf_data_size
;
233 * FARB pointer must point inside the image and must at least
234 * contain parts of the digital section we will be reading here.
236 hdr_size
= (*(u32
*)buf
) & 0xffffff;
237 if (hdr_size
+ INTEL_NVM_DEVID
+ 2 >= image_size
)
240 /* Digital section start should be aligned to 4k page */
241 if (!IS_ALIGNED(hdr_size
, SZ_4K
))
245 * Read digital section size and check that it also fits inside
248 ds_size
= *(u16
*)(buf
+ hdr_size
);
249 if (ds_size
>= image_size
)
253 * Make sure the device ID in the image matches the retimer
256 device
= *(u16
*)(buf
+ hdr_size
+ INTEL_NVM_DEVID
);
257 if (device
!= rt
->device
)
260 /* Skip headers in the image */
261 nvm
->buf_data_start
= buf
+ hdr_size
;
262 nvm
->buf_data_size
= image_size
- hdr_size
;
267 static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops
= {
268 .read_version
= intel_retimer_nvm_version
,
269 .validate
= intel_retimer_nvm_validate
,
272 /* Retimer vendor NVM support table */
273 static const struct tb_nvm_vendor retimer_nvm_vendors
[] = {
274 { 0x8087, &intel_retimer_nvm_ops
},
278 * tb_nvm_alloc() - Allocate new NVM structure
279 * @dev: Device owning the NVM
281 * Allocates new NVM structure with unique @id and returns it. In case
282 * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
283 * NVM format of the @dev is not known by the kernel.
285 struct tb_nvm
*tb_nvm_alloc(struct device
*dev
)
287 const struct tb_nvm_vendor_ops
*vops
= NULL
;
291 if (tb_is_switch(dev
)) {
292 const struct tb_switch
*sw
= tb_to_switch(dev
);
294 for (i
= 0; i
< ARRAY_SIZE(switch_nvm_vendors
); i
++) {
295 const struct tb_nvm_vendor
*v
= &switch_nvm_vendors
[i
];
297 if (v
->vendor
== sw
->config
.vendor_id
) {
304 tb_sw_dbg(sw
, "router NVM format of vendor %#x unknown\n",
305 sw
->config
.vendor_id
);
306 return ERR_PTR(-EOPNOTSUPP
);
308 } else if (tb_is_retimer(dev
)) {
309 const struct tb_retimer
*rt
= tb_to_retimer(dev
);
311 for (i
= 0; i
< ARRAY_SIZE(retimer_nvm_vendors
); i
++) {
312 const struct tb_nvm_vendor
*v
= &retimer_nvm_vendors
[i
];
314 if (v
->vendor
== rt
->vendor
) {
321 dev_dbg(dev
, "retimer NVM format of vendor %#x unknown\n",
323 return ERR_PTR(-EOPNOTSUPP
);
326 return ERR_PTR(-EOPNOTSUPP
);
329 nvm
= kzalloc(sizeof(*nvm
), GFP_KERNEL
);
331 return ERR_PTR(-ENOMEM
);
333 ret
= ida_alloc(&nvm_ida
, GFP_KERNEL
);
347 * tb_nvm_read_version() - Read and populate NVM version
348 * @nvm: NVM structure
350 * Uses vendor specific means to read out and fill in the existing
351 * active NVM version. Returns %0 in case of success and negative errno
354 int tb_nvm_read_version(struct tb_nvm
*nvm
)
356 const struct tb_nvm_vendor_ops
*vops
= nvm
->vops
;
358 if (vops
&& vops
->read_version
)
359 return vops
->read_version(nvm
);
365 * tb_nvm_validate() - Validate new NVM image
366 * @nvm: NVM structure
368 * Runs vendor specific validation over the new NVM image and if all
369 * checks pass returns %0. As side effect updates @nvm->buf_data_start
370 * and @nvm->buf_data_size fields to match the actual data to be written
373 * If the validation does not pass then returns negative errno.
375 int tb_nvm_validate(struct tb_nvm
*nvm
)
377 const struct tb_nvm_vendor_ops
*vops
= nvm
->vops
;
378 unsigned int image_size
;
386 /* Just do basic image size checks */
387 image_size
= nvm
->buf_data_size
;
388 if (image_size
< NVM_MIN_SIZE
|| image_size
> NVM_MAX_SIZE
)
392 * Set the default data start in the buffer. The validate method
393 * below can change this if needed.
395 nvm
->buf_data_start
= buf
;
397 return vops
->validate
? vops
->validate(nvm
) : 0;
401 * tb_nvm_write_headers() - Write headers before the rest of the image
402 * @nvm: NVM structure
404 * If the vendor NVM format requires writing headers before the rest of
405 * the image, this function does that. Can be called even if the device
406 * does not need this.
408 * Returns %0 in case of success and negative errno otherwise.
410 int tb_nvm_write_headers(struct tb_nvm
*nvm
)
412 const struct tb_nvm_vendor_ops
*vops
= nvm
->vops
;
414 return vops
->write_headers
? vops
->write_headers(nvm
) : 0;
418 * tb_nvm_add_active() - Adds active NVMem device to NVM
419 * @nvm: NVM structure
420 * @reg_read: Pointer to the function to read the NVM (passed directly to the
423 * Registers new active NVmem device for @nvm. The @reg_read is called
424 * directly from NVMem so it must handle possible concurrent access if
425 * needed. The first parameter passed to @reg_read is @nvm structure.
426 * Returns %0 in success and negative errno otherwise.
428 int tb_nvm_add_active(struct tb_nvm
*nvm
, nvmem_reg_read_t reg_read
)
430 struct nvmem_config config
;
431 struct nvmem_device
*nvmem
;
433 memset(&config
, 0, sizeof(config
));
435 config
.name
= "nvm_active";
436 config
.reg_read
= reg_read
;
437 config
.read_only
= true;
440 config
.word_size
= 4;
441 config
.size
= nvm
->active_size
;
442 config
.dev
= nvm
->dev
;
443 config
.owner
= THIS_MODULE
;
446 nvmem
= nvmem_register(&config
);
448 return PTR_ERR(nvmem
);
455 * tb_nvm_write_buf() - Write data to @nvm buffer
456 * @nvm: NVM structure
457 * @offset: Offset where to write the data
458 * @val: Data buffer to write
459 * @bytes: Number of bytes to write
461 * Helper function to cache the new NVM image before it is actually
462 * written to the flash. Copies @bytes from @val to @nvm->buf starting
465 int tb_nvm_write_buf(struct tb_nvm
*nvm
, unsigned int offset
, void *val
,
469 nvm
->buf
= vmalloc(NVM_MAX_SIZE
);
474 nvm
->flushed
= false;
475 nvm
->buf_data_size
= offset
+ bytes
;
476 memcpy(nvm
->buf
+ offset
, val
, bytes
);
481 * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
482 * @nvm: NVM structure
483 * @reg_write: Pointer to the function to write the NVM (passed directly
484 * to the NVMem device)
486 * Registers new non-active NVmem device for @nvm. The @reg_write is called
487 * directly from NVMem so it must handle possible concurrent access if
488 * needed. The first parameter passed to @reg_write is @nvm structure.
489 * The size of the NVMem device is set to %NVM_MAX_SIZE.
491 * Returns %0 in success and negative errno otherwise.
493 int tb_nvm_add_non_active(struct tb_nvm
*nvm
, nvmem_reg_write_t reg_write
)
495 struct nvmem_config config
;
496 struct nvmem_device
*nvmem
;
498 memset(&config
, 0, sizeof(config
));
500 config
.name
= "nvm_non_active";
501 config
.reg_write
= reg_write
;
502 config
.root_only
= true;
505 config
.word_size
= 4;
506 config
.size
= NVM_MAX_SIZE
;
507 config
.dev
= nvm
->dev
;
508 config
.owner
= THIS_MODULE
;
511 nvmem
= nvmem_register(&config
);
513 return PTR_ERR(nvmem
);
515 nvm
->non_active
= nvmem
;
520 * tb_nvm_free() - Release NVM and its resources
521 * @nvm: NVM structure to release
523 * Releases NVM and the NVMem devices if they were registered.
525 void tb_nvm_free(struct tb_nvm
*nvm
)
528 nvmem_unregister(nvm
->non_active
);
529 nvmem_unregister(nvm
->active
);
531 ida_free(&nvm_ida
, nvm
->id
);
537 * tb_nvm_read_data() - Read data from NVM
538 * @address: Start address on the flash
539 * @buf: Buffer where the read data is copied
540 * @size: Size of the buffer in bytes
541 * @retries: Number of retries if block read fails
542 * @read_block: Function that reads block from the flash
543 * @read_block_data: Data passsed to @read_block
545 * This is a generic function that reads data from NVM or NVM like
548 * Returns %0 on success and negative errno otherwise.
550 int tb_nvm_read_data(unsigned int address
, void *buf
, size_t size
,
551 unsigned int retries
, read_block_fn read_block
,
552 void *read_block_data
)
555 unsigned int dwaddress
, dwords
, offset
;
556 u8 data
[NVM_DATA_DWORDS
* 4];
560 offset
= address
& 3;
561 nbytes
= min_t(size_t, size
+ offset
, NVM_DATA_DWORDS
* 4);
563 dwaddress
= address
/ 4;
564 dwords
= ALIGN(nbytes
, 4) / 4;
566 ret
= read_block(read_block_data
, dwaddress
, data
, dwords
);
568 if (ret
!= -ENODEV
&& retries
--)
574 memcpy(buf
, data
+ offset
, nbytes
);
585 * tb_nvm_write_data() - Write data to NVM
586 * @address: Start address on the flash
587 * @buf: Buffer where the data is copied from
588 * @size: Size of the buffer in bytes
589 * @retries: Number of retries if the block write fails
590 * @write_block: Function that writes block to the flash
591 * @write_block_data: Data passwd to @write_block
593 * This is generic function that writes data to NVM or NVM like device.
595 * Returns %0 on success and negative errno otherwise.
597 int tb_nvm_write_data(unsigned int address
, const void *buf
, size_t size
,
598 unsigned int retries
, write_block_fn write_block
,
599 void *write_block_data
)
602 unsigned int offset
, dwaddress
;
603 u8 data
[NVM_DATA_DWORDS
* 4];
607 offset
= address
& 3;
608 nbytes
= min_t(u32
, size
+ offset
, NVM_DATA_DWORDS
* 4);
610 memcpy(data
+ offset
, buf
, nbytes
);
612 dwaddress
= address
/ 4;
613 ret
= write_block(write_block_data
, dwaddress
, data
, nbytes
/ 4);
615 if (ret
== -ETIMEDOUT
) {
631 void tb_nvm_exit(void)
633 ida_destroy(&nvm_ida
);