1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <amdblocks/lpc.h>
4 #include <amdblocks/spi.h>
6 #include <boot_device.h>
8 #include <commonlib/bsd/helpers.h>
9 #include <commonlib/region.h>
10 #include <console/console.h>
12 #include <device/pci_ops.h>
13 #include <soc/pci_devs.h>
14 #include <spi_flash.h>
19 /* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
20 #define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
22 struct spi_dma_transaction
{
30 static ssize_t
spi_dma_readat_mmap(const struct region_device
*rd
, void *b
, size_t offset
,
33 const struct mem_region_device
*mdev
;
35 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
37 memcpy(b
, &mdev
->base
[offset
], size
);
42 static bool spi_dma_is_busy(void)
44 return pci_read_config32(SOC_LPC_DEV
, LPC_ROM_DMA_EC_HOST_CONTROL
)
45 & LPC_ROM_DMA_CTRL_START
;
48 static bool spi_dma_has_error(void)
50 return pci_read_config32(SOC_LPC_DEV
, LPC_ROM_DMA_EC_HOST_CONTROL
)
51 & LPC_ROM_DMA_CTRL_ERROR
;
54 static bool can_use_dma(void *destination
, size_t source
, size_t size
)
57 * Print a notice if reading more than 1024 bytes using mmap. This makes
58 * it easier to debug why the SPI DMA wasn't used.
60 const size_t warning_size
= 1024;
62 if (size
< LPC_ROM_DMA_MIN_ALIGNMENT
)
65 if (!IS_ALIGNED((uintptr_t)destination
, LPC_ROM_DMA_MIN_ALIGNMENT
)) {
66 if (size
> warning_size
)
67 printk(BIOS_DEBUG
, "Target %p is unaligned\n", destination
);
71 if (!IS_ALIGNED(source
, LPC_ROM_DMA_MIN_ALIGNMENT
)) {
72 if (size
> warning_size
)
73 printk(BIOS_DEBUG
, "Source %#zx is unaligned\n", source
);
80 static void start_spi_dma_transaction(struct spi_dma_transaction
*transaction
)
84 printk(BIOS_SPEW
, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__
,
85 transaction
->destination
, transaction
->source
, transaction
->remaining
);
88 * We should have complete control over the DMA controller, so there shouldn't
89 * be any outstanding transactions.
91 assert(!spi_dma_is_busy());
92 assert(IS_ALIGNED((uintptr_t)transaction
->destination
, LPC_ROM_DMA_MIN_ALIGNMENT
));
93 assert(IS_ALIGNED(transaction
->source
, LPC_ROM_DMA_MIN_ALIGNMENT
));
94 assert(transaction
->remaining
>= LPC_ROM_DMA_MIN_ALIGNMENT
);
96 pci_write_config32(SOC_LPC_DEV
, LPC_ROM_DMA_SRC_ADDR
, transaction
->source
);
97 pci_write_config32(SOC_LPC_DEV
, LPC_ROM_DMA_DST_ADDR
,
98 (uintptr_t)transaction
->destination
);
100 ctrl
= pci_read_config32(SOC_LPC_DEV
, LPC_ROM_DMA_EC_HOST_CONTROL
);
101 ctrl
&= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK
;
103 transaction
->transfer_size
=
104 MIN(LPC_ROM_DMA_CTRL_MAX_BYTES
,
105 ALIGN_DOWN(transaction
->remaining
, LPC_ROM_DMA_MIN_ALIGNMENT
));
107 ctrl
|= LPC_ROM_DMA_CTRL_DW_COUNT(transaction
->transfer_size
);
108 ctrl
|= LPC_ROM_DMA_CTRL_ERROR
; /* Clear error */
109 ctrl
|= LPC_ROM_DMA_CTRL_START
;
112 * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA
115 thread_mutex_lock(&spi_hw_mutex
);
117 pci_write_config32(SOC_LPC_DEV
, LPC_ROM_DMA_EC_HOST_CONTROL
, ctrl
);
120 /* Returns true if transaction is still in progress. */
121 static bool continue_spi_dma_transaction(const struct region_device
*rd
,
122 struct spi_dma_transaction
*transaction
)
124 /* Verify we are looking at the correct transaction */
125 assert(pci_read_config32(SOC_LPC_DEV
, LPC_ROM_DMA_SRC_ADDR
) == transaction
->source
);
127 if (spi_dma_is_busy())
131 * Unlock the SPI mutex between DMA transactions to allow other users of the SPI
132 * controller to interleave their transactions.
134 thread_mutex_unlock(&spi_hw_mutex
);
136 if (spi_dma_has_error()) {
137 printk(BIOS_ERR
, "SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
138 transaction
->destination
, transaction
->source
,
139 transaction
->transfer_size
);
143 transaction
->destination
+= transaction
->transfer_size
;
144 transaction
->source
+= transaction
->transfer_size
;
145 transaction
->remaining
-= transaction
->transfer_size
;
147 if (transaction
->remaining
>= LPC_ROM_DMA_MIN_ALIGNMENT
) {
148 start_spi_dma_transaction(transaction
);
152 if (transaction
->remaining
> 0) {
153 /* Use mmap to finish off the transfer */
154 spi_dma_readat_mmap(rd
, transaction
->destination
, transaction
->source
,
155 transaction
->remaining
);
157 transaction
->destination
+= transaction
->remaining
;
158 transaction
->source
+= transaction
->remaining
;
159 transaction
->remaining
-= transaction
->remaining
;
165 static struct thread_mutex spi_dma_hw_mutex
;
167 static ssize_t
spi_dma_readat_dma(const struct region_device
*rd
, void *destination
,
168 size_t source
, size_t size
)
170 struct spi_dma_transaction transaction
= {
171 .destination
= destination
,
177 printk(BIOS_SPEW
, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__
,
178 destination
, source
, size
);
180 thread_mutex_lock(&spi_dma_hw_mutex
);
182 start_spi_dma_transaction(&transaction
);
186 } while (continue_spi_dma_transaction(rd
, &transaction
));
188 thread_mutex_unlock(&spi_dma_hw_mutex
);
190 printk(BIOS_SPEW
, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
191 __func__
, destination
, source
, transaction
.remaining
);
193 /* Allow queued up transaction to continue */
196 if (transaction
.remaining
)
199 return transaction
.size
;
202 static ssize_t
spi_dma_readat(const struct region_device
*rd
, void *b
, size_t offset
,
205 if (can_use_dma(b
, offset
, size
))
206 return spi_dma_readat_dma(rd
, b
, offset
, size
);
208 return spi_dma_readat_mmap(rd
, b
, offset
, size
);
211 static void *spi_dma_mmap(const struct region_device
*rd
, size_t offset
, size_t size
)
213 const struct mem_region_device
*mdev
;
216 mdev
= container_of(rd
, __typeof__(*mdev
), rdev
);
218 if (!CONFIG_CBFS_CACHE_SIZE
)
219 return &mdev
->base
[offset
];
221 mapping
= mem_pool_alloc(&cbfs_cache
, size
);
223 printk(BIOS_INFO
, "%s: Could not allocate %zu bytes from memory pool\n",
225 /* Fall-back to memory map */
226 return &mdev
->base
[offset
];
229 if (spi_dma_readat(rd
, mapping
, offset
, size
) != size
) {
230 printk(BIOS_ERR
, "%s: Error reading into mmap buffer\n", __func__
);
231 mem_pool_free(&cbfs_cache
, mapping
);
232 /* Fall-back to memory mapped read - not expected to fail atleast for now */
233 spi_dma_readat_mmap(rd
, mapping
, offset
, size
);
239 static int spi_dma_munmap(const struct region_device
*rd __always_unused
, void *mapping
)
241 if (CONFIG_CBFS_CACHE_SIZE
)
242 mem_pool_free(&cbfs_cache
, mapping
);
246 const struct region_device_ops spi_dma_rdev_ro_ops
= {
247 .mmap
= spi_dma_mmap
,
248 .munmap
= spi_dma_munmap
,
249 .readat
= spi_dma_readat
,
252 static const struct mem_region_device boot_dev
= {
254 .rdev
= REGION_DEV_INIT(&spi_dma_rdev_ro_ops
, 0, CONFIG_ROM_SIZE
),
257 const struct region_device
*boot_device_ro(void)
259 return &boot_dev
.rdev
;
262 uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window
*table
)
264 table
->flash_base
= 0;
265 table
->host_base
= (uint32_t)(uintptr_t)rom_base
;
266 table
->size
= CONFIG_ROM_SIZE
;
272 * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
273 * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
274 * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
275 * reliable on any prior generations.
277 static void spi_dma_fix(void)
279 /* Internal only registers */
280 uint8_t val
= spi_read8(SPI_MISC_CNTRL
);
282 spi_write8(SPI_MISC_CNTRL
, val
);
285 void boot_device_init(void)