soc/intel/ptl: Update ME specification version to 21
[coreboot.git] / src / soc / qualcomm / common / qspi.c
blob088c79d6f3bf59e3c547954b48518e9b24832cfe
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <spi-generic.h>
4 #include <spi_flash.h>
5 #include <arch/cache.h>
6 #include <device/mmio.h>
7 #include <soc/addressmap.h>
8 #include <soc/qspi_common.h>
9 #include <soc/clock.h>
10 #include <symbols.h>
11 #include <assert.h>
12 #include <gpio.h>
13 #include <string.h>
15 #define CACHE_LINE_SIZE 64
17 static int curr_desc_idx = -1;
19 struct cmd_desc {
20 uint32_t data_address;
21 uint32_t next_descriptor;
22 uint32_t direction:1;
23 uint32_t multi_io_mode:3;
24 uint32_t reserved1:4;
25 uint32_t fragment:1;
26 uint32_t reserved2:7;
27 uint32_t length:16;
28 //------------------------//
29 uint32_t bounce_src;
30 uint32_t bounce_dst;
31 uint32_t bounce_length;
32 uint64_t padding[5];
35 enum qspi_mode {
36 SDR_1BIT = 1,
37 SDR_2BIT = 2,
38 SDR_4BIT = 3,
39 DDR_1BIT = 5,
40 DDR_2BIT = 6,
41 DDR_4BIT = 7,
44 enum cs_state {
45 CS_DEASSERT,
46 CS_ASSERT
49 struct xfer_cfg {
50 enum qspi_mode mode;
53 enum bus_xfer_direction {
54 MASTER_READ = 0,
55 MASTER_WRITE = 1,
58 struct {
59 struct cmd_desc descriptors[3];
60 uint8_t buffers[3][CACHE_LINE_SIZE];
61 } *dma = (void *)_dma_coherent;
63 static void dma_transfer_chain(struct cmd_desc *chain)
65 uint32_t mstr_int_status;
67 write32(&qcom_qspi->mstr_int_sts, 0xFFFFFFFF);
68 write32(&qcom_qspi->next_dma_desc_addr, (uint32_t)(uintptr_t)chain);
70 while (1) {
71 mstr_int_status = read32(&qcom_qspi->mstr_int_sts);
72 if (mstr_int_status & DMA_CHAIN_DONE)
73 break;
77 static void flush_chain(void)
79 struct cmd_desc *desc = &dma->descriptors[0];
80 uint8_t *src;
81 uint8_t *dst;
83 dma_transfer_chain(desc);
85 while (desc) {
86 if (desc->direction == MASTER_READ) {
87 if (desc->bounce_length == 0)
88 dcache_invalidate_by_mva(
89 (void *)(uintptr_t)desc->data_address,
90 desc->length);
91 else {
92 src = (void *)(uintptr_t)desc->bounce_src;
93 dst = (void *)(uintptr_t)desc->bounce_dst;
94 memcpy(dst, src, desc->bounce_length);
97 desc = (void *)(uintptr_t)desc->next_descriptor;
99 curr_desc_idx = -1;
102 static struct cmd_desc *allocate_descriptor(void)
104 struct cmd_desc *current;
105 struct cmd_desc *next;
106 uint8_t index;
108 current = (curr_desc_idx == -1) ?
109 NULL : &dma->descriptors[curr_desc_idx];
111 index = ++curr_desc_idx;
112 next = &dma->descriptors[index];
114 next->data_address = (uint32_t)(uintptr_t)dma->buffers[index];
116 next->next_descriptor = 0;
117 next->direction = MASTER_READ;
118 next->multi_io_mode = 0;
119 next->reserved1 = 0;
121 * QSPI controller doesn't support transfer starts with read segment.
122 * So to support read transfers that are not preceded by write, set
123 * transfer fragment bit = 1
125 next->fragment = 1;
126 next->reserved2 = 0;
127 next->length = 0;
128 next->bounce_src = 0;
129 next->bounce_dst = 0;
130 next->bounce_length = 0;
132 if (current)
133 current->next_descriptor = (uint32_t)(uintptr_t)next;
135 return next;
138 static void cs_change(enum cs_state state)
140 gpio_set(QSPI_CS, state == CS_DEASSERT);
143 static void configure_gpios(void)
145 gpio_output(QSPI_CS, 1);
147 gpio_configure(QSPI_DATA_0, GPIO_FUNC_QSPI_DATA_0,
148 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
150 gpio_configure(QSPI_DATA_1, GPIO_FUNC_QSPI_DATA_1,
151 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
153 gpio_configure(QSPI_CLK, GPIO_FUNC_QSPI_CLK,
154 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
157 static void queue_bounce_data(uint8_t *data, uint32_t data_bytes,
158 enum qspi_mode data_mode, bool write)
160 struct cmd_desc *desc;
161 uint8_t *ptr;
163 desc = allocate_descriptor();
164 desc->direction = write;
165 desc->multi_io_mode = data_mode;
166 ptr = (void *)(uintptr_t)desc->data_address;
168 if (write) {
169 memcpy(ptr, data, data_bytes);
170 } else {
171 desc->bounce_src = (uint32_t)(uintptr_t)ptr;
172 desc->bounce_dst = (uint32_t)(uintptr_t)data;
173 desc->bounce_length = data_bytes;
176 desc->length = data_bytes;
179 static void queue_direct_data(uint8_t *data, uint32_t data_bytes,
180 enum qspi_mode data_mode, bool write)
182 struct cmd_desc *desc;
184 desc = allocate_descriptor();
185 desc->direction = write;
186 desc->multi_io_mode = data_mode;
187 desc->data_address = (uint32_t)(uintptr_t)data;
188 desc->length = data_bytes;
190 if (write)
191 dcache_clean_by_mva(data, data_bytes);
192 else
193 dcache_invalidate_by_mva(data, data_bytes);
196 static void queue_data(uint8_t *data, uint32_t data_bytes,
197 enum qspi_mode data_mode, bool write)
199 uint8_t *aligned_ptr;
200 uint8_t *epilog_ptr;
201 uint32_t prolog_bytes, aligned_bytes, epilog_bytes;
203 if (data_bytes == 0)
204 return;
206 aligned_ptr =
207 (uint8_t *)ALIGN_UP((uintptr_t)data, CACHE_LINE_SIZE);
209 prolog_bytes = MIN(data_bytes, aligned_ptr - data);
210 aligned_bytes = ALIGN_DOWN(data_bytes - prolog_bytes, CACHE_LINE_SIZE);
211 epilog_bytes = data_bytes - prolog_bytes - aligned_bytes;
213 epilog_ptr = data + prolog_bytes + aligned_bytes;
215 if (prolog_bytes)
216 queue_bounce_data(data, prolog_bytes, data_mode, write);
217 if (aligned_bytes)
218 queue_direct_data(aligned_ptr, aligned_bytes, data_mode, write);
219 if (epilog_bytes)
220 queue_bounce_data(epilog_ptr, epilog_bytes, data_mode, write);
223 static void reg_init(void)
225 uint32_t spi_mode;
226 uint32_t tx_data_oe_delay, tx_data_delay;
227 uint32_t mstr_config;
229 spi_mode = 0;
231 tx_data_oe_delay = 0;
232 tx_data_delay = 0;
234 mstr_config = (tx_data_oe_delay << TX_DATA_OE_DELAY_SHIFT) |
235 (tx_data_delay << TX_DATA_DELAY_SHIFT) | (SBL_EN) |
236 (spi_mode << SPI_MODE_SHIFT) |
237 (PIN_HOLDN) |
238 (FB_CLK_EN) |
239 (DMA_ENABLE) |
240 (FULL_CYCLE_MODE);
242 write32(&qcom_qspi->mstr_cfg, mstr_config);
243 write32(&qcom_qspi->ahb_mstr_cfg, 0xA42);
244 write32(&qcom_qspi->mstr_int_en, 0x0);
245 write32(&qcom_qspi->mstr_int_sts, 0xFFFFFFFF);
246 write32(&qcom_qspi->rd_fifo_cfg, 0x0);
247 write32(&qcom_qspi->rd_fifo_rst, RESET_FIFO);
250 void quadspi_init(uint32_t hz)
252 assert(dcache_line_bytes() == CACHE_LINE_SIZE);
253 clock_configure_qspi(hz * 4);
254 configure_gpios();
255 reg_init();
258 int qspi_claim_bus(const struct spi_slave *slave)
260 cs_change(CS_ASSERT);
261 return 0;
264 void qspi_release_bus(const struct spi_slave *slave)
266 cs_change(CS_DEASSERT);
269 static int xfer(enum qspi_mode mode, const void *dout, size_t out_bytes,
270 void *din, size_t in_bytes)
272 if ((out_bytes && !dout) || (in_bytes && !din) ||
273 (in_bytes && out_bytes)) {
274 return -1;
277 queue_data((uint8_t *)(out_bytes ? dout : din),
278 in_bytes | out_bytes, mode, !!out_bytes);
280 flush_chain();
282 return 0;
285 int qspi_xfer(const struct spi_slave *slave, const void *dout,
286 size_t out_bytes, void *din, size_t in_bytes)
288 return xfer(SDR_1BIT, dout, out_bytes, din, in_bytes);
291 int qspi_xfer_dual(const struct spi_slave *slave, const void *dout,
292 size_t out_bytes, void *din, size_t in_bytes)
294 return xfer(SDR_2BIT, dout, out_bytes, din, in_bytes);