2 * QEMU model of the Ibex SPI Controller
3 * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
5 * Copyright (C) 2022 Western Digital
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
28 #include "qemu/module.h"
29 #include "hw/registerfields.h"
30 #include "hw/ssi/ibex_spi_host.h"
32 #include "hw/qdev-properties.h"
33 #include "hw/qdev-properties-system.h"
34 #include "migration/vmstate.h"
37 REG32(INTR_STATE
, 0x00)
38 FIELD(INTR_STATE
, ERROR
, 0, 1)
39 FIELD(INTR_STATE
, SPI_EVENT
, 1, 1)
40 REG32(INTR_ENABLE
, 0x04)
41 FIELD(INTR_ENABLE
, ERROR
, 0, 1)
42 FIELD(INTR_ENABLE
, SPI_EVENT
, 1, 1)
43 REG32(INTR_TEST
, 0x08)
44 FIELD(INTR_TEST
, ERROR
, 0, 1)
45 FIELD(INTR_TEST
, SPI_EVENT
, 1, 1)
46 REG32(ALERT_TEST
, 0x0c)
47 FIELD(ALERT_TEST
, FETAL_TEST
, 0, 1)
49 FIELD(CONTROL
, RX_WATERMARK
, 0, 8)
50 FIELD(CONTROL
, TX_WATERMARK
, 1, 8)
51 FIELD(CONTROL
, OUTPUT_EN
, 29, 1)
52 FIELD(CONTROL
, SW_RST
, 30, 1)
53 FIELD(CONTROL
, SPIEN
, 31, 1)
55 FIELD(STATUS
, TXQD
, 0, 8)
56 FIELD(STATUS
, RXQD
, 18, 8)
57 FIELD(STATUS
, CMDQD
, 16, 3)
58 FIELD(STATUS
, RXWM
, 20, 1)
59 FIELD(STATUS
, BYTEORDER
, 22, 1)
60 FIELD(STATUS
, RXSTALL
, 23, 1)
61 FIELD(STATUS
, RXEMPTY
, 24, 1)
62 FIELD(STATUS
, RXFULL
, 25, 1)
63 FIELD(STATUS
, TXWM
, 26, 1)
64 FIELD(STATUS
, TXSTALL
, 27, 1)
65 FIELD(STATUS
, TXEMPTY
, 28, 1)
66 FIELD(STATUS
, TXFULL
, 29, 1)
67 FIELD(STATUS
, ACTIVE
, 30, 1)
68 FIELD(STATUS
, READY
, 31, 1)
69 REG32(CONFIGOPTS
, 0x18)
70 FIELD(CONFIGOPTS
, CLKDIV_0
, 0, 16)
71 FIELD(CONFIGOPTS
, CSNIDLE_0
, 16, 4)
72 FIELD(CONFIGOPTS
, CSNTRAIL_0
, 20, 4)
73 FIELD(CONFIGOPTS
, CSNLEAD_0
, 24, 4)
74 FIELD(CONFIGOPTS
, FULLCYC_0
, 29, 1)
75 FIELD(CONFIGOPTS
, CPHA_0
, 30, 1)
76 FIELD(CONFIGOPTS
, CPOL_0
, 31, 1)
78 FIELD(CSID
, CSID
, 0, 32)
80 FIELD(COMMAND
, LEN
, 0, 8)
81 FIELD(COMMAND
, CSAAT
, 9, 1)
82 FIELD(COMMAND
, SPEED
, 10, 2)
83 FIELD(COMMAND
, DIRECTION
, 12, 2)
84 REG32(ERROR_ENABLE
, 0x2c)
85 FIELD(ERROR_ENABLE
, CMDBUSY
, 0, 1)
86 FIELD(ERROR_ENABLE
, OVERFLOW
, 1, 1)
87 FIELD(ERROR_ENABLE
, UNDERFLOW
, 2, 1)
88 FIELD(ERROR_ENABLE
, CMDINVAL
, 3, 1)
89 FIELD(ERROR_ENABLE
, CSIDINVAL
, 4, 1)
90 REG32(ERROR_STATUS
, 0x30)
91 FIELD(ERROR_STATUS
, CMDBUSY
, 0, 1)
92 FIELD(ERROR_STATUS
, OVERFLOW
, 1, 1)
93 FIELD(ERROR_STATUS
, UNDERFLOW
, 2, 1)
94 FIELD(ERROR_STATUS
, CMDINVAL
, 3, 1)
95 FIELD(ERROR_STATUS
, CSIDINVAL
, 4, 1)
96 FIELD(ERROR_STATUS
, ACCESSINVAL
, 5, 1)
97 REG32(EVENT_ENABLE
, 0x34)
98 FIELD(EVENT_ENABLE
, RXFULL
, 0, 1)
99 FIELD(EVENT_ENABLE
, TXEMPTY
, 1, 1)
100 FIELD(EVENT_ENABLE
, RXWM
, 2, 1)
101 FIELD(EVENT_ENABLE
, TXWM
, 3, 1)
102 FIELD(EVENT_ENABLE
, READY
, 4, 1)
103 FIELD(EVENT_ENABLE
, IDLE
, 5, 1)
105 static inline uint8_t div4_round_up(uint8_t dividend
)
107 return (dividend
+ 3) / 4;
110 static void ibex_spi_rxfifo_reset(IbexSPIHostState
*s
)
112 uint32_t data
= s
->regs
[IBEX_SPI_HOST_STATUS
];
113 /* Empty the RX FIFO and assert RXEMPTY */
114 fifo8_reset(&s
->rx_fifo
);
115 data
= FIELD_DP32(data
, STATUS
, RXFULL
, 0);
116 data
= FIELD_DP32(data
, STATUS
, RXEMPTY
, 1);
117 s
->regs
[IBEX_SPI_HOST_STATUS
] = data
;
120 static void ibex_spi_txfifo_reset(IbexSPIHostState
*s
)
122 uint32_t data
= s
->regs
[IBEX_SPI_HOST_STATUS
];
123 /* Empty the TX FIFO and assert TXEMPTY */
124 fifo8_reset(&s
->tx_fifo
);
125 data
= FIELD_DP32(data
, STATUS
, TXFULL
, 0);
126 data
= FIELD_DP32(data
, STATUS
, TXEMPTY
, 1);
127 s
->regs
[IBEX_SPI_HOST_STATUS
] = data
;
130 static void ibex_spi_host_reset(DeviceState
*dev
)
132 IbexSPIHostState
*s
= IBEX_SPI_HOST(dev
);
133 trace_ibex_spi_host_reset("Resetting Ibex SPI");
135 /* SPI Host Register Reset */
136 s
->regs
[IBEX_SPI_HOST_INTR_STATE
] = 0x00;
137 s
->regs
[IBEX_SPI_HOST_INTR_ENABLE
] = 0x00;
138 s
->regs
[IBEX_SPI_HOST_INTR_TEST
] = 0x00;
139 s
->regs
[IBEX_SPI_HOST_ALERT_TEST
] = 0x00;
140 s
->regs
[IBEX_SPI_HOST_CONTROL
] = 0x7f;
141 s
->regs
[IBEX_SPI_HOST_STATUS
] = 0x00;
142 s
->regs
[IBEX_SPI_HOST_CONFIGOPTS
] = 0x00;
143 s
->regs
[IBEX_SPI_HOST_CSID
] = 0x00;
144 s
->regs
[IBEX_SPI_HOST_COMMAND
] = 0x00;
145 /* RX/TX Modelled by FIFO */
146 s
->regs
[IBEX_SPI_HOST_RXDATA
] = 0x00;
147 s
->regs
[IBEX_SPI_HOST_TXDATA
] = 0x00;
149 s
->regs
[IBEX_SPI_HOST_ERROR_ENABLE
] = 0x1F;
150 s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
] = 0x00;
151 s
->regs
[IBEX_SPI_HOST_EVENT_ENABLE
] = 0x00;
153 ibex_spi_rxfifo_reset(s
);
154 ibex_spi_txfifo_reset(s
);
156 s
->init_status
= true;
161 * Check if we need to trigger an interrupt.
162 * The two interrupts lines (host_err and event) can
163 * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
165 * Interrupts are triggered based on the ones
166 * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
168 static void ibex_spi_host_irq(IbexSPIHostState
*s
)
170 uint32_t intr_test_reg
= s
->regs
[IBEX_SPI_HOST_INTR_TEST
];
171 uint32_t intr_en_reg
= s
->regs
[IBEX_SPI_HOST_INTR_ENABLE
];
172 uint32_t intr_state_reg
= s
->regs
[IBEX_SPI_HOST_INTR_STATE
];
174 uint32_t err_en_reg
= s
->regs
[IBEX_SPI_HOST_ERROR_ENABLE
];
175 uint32_t event_en_reg
= s
->regs
[IBEX_SPI_HOST_EVENT_ENABLE
];
176 uint32_t err_status_reg
= s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
];
177 uint32_t status_reg
= s
->regs
[IBEX_SPI_HOST_STATUS
];
180 bool error_en
= FIELD_EX32(intr_en_reg
, INTR_ENABLE
, ERROR
);
181 bool event_en
= FIELD_EX32(intr_en_reg
, INTR_ENABLE
, SPI_EVENT
);
182 bool err_pending
= FIELD_EX32(intr_state_reg
, INTR_STATE
, ERROR
);
183 bool status_pending
= FIELD_EX32(intr_state_reg
, INTR_STATE
, SPI_EVENT
);
185 int err_irq
= 0, event_irq
= 0;
187 /* Error IRQ enabled and Error IRQ Cleared */
188 if (error_en
&& !err_pending
) {
189 /* Event enabled, Interrupt Test Error */
190 if (FIELD_EX32(intr_test_reg
, INTR_TEST
, ERROR
)) {
192 } else if (FIELD_EX32(err_en_reg
, ERROR_ENABLE
, CMDBUSY
) &&
193 FIELD_EX32(err_status_reg
, ERROR_STATUS
, CMDBUSY
)) {
194 /* Wrote to COMMAND when not READY */
196 } else if (FIELD_EX32(err_en_reg
, ERROR_ENABLE
, CMDINVAL
) &&
197 FIELD_EX32(err_status_reg
, ERROR_STATUS
, CMDINVAL
)) {
198 /* Invalid command segment */
200 } else if (FIELD_EX32(err_en_reg
, ERROR_ENABLE
, CSIDINVAL
) &&
201 FIELD_EX32(err_status_reg
, ERROR_STATUS
, CSIDINVAL
)) {
202 /* Invalid value for CSID */
206 s
->regs
[IBEX_SPI_HOST_INTR_STATE
] |= R_INTR_STATE_ERROR_MASK
;
208 qemu_set_irq(s
->host_err
, err_irq
);
211 /* Event IRQ Enabled and Event IRQ Cleared */
212 if (event_en
&& !status_pending
) {
213 if (FIELD_EX32(intr_test_reg
, INTR_STATE
, SPI_EVENT
)) {
214 /* Event enabled, Interrupt Test Event */
216 } else if (FIELD_EX32(event_en_reg
, EVENT_ENABLE
, READY
) &&
217 FIELD_EX32(status_reg
, STATUS
, READY
)) {
218 /* SPI Host ready for next command */
220 } else if (FIELD_EX32(event_en_reg
, EVENT_ENABLE
, TXEMPTY
) &&
221 FIELD_EX32(status_reg
, STATUS
, TXEMPTY
)) {
222 /* SPI TXEMPTY, TXFIFO drained */
224 } else if (FIELD_EX32(event_en_reg
, EVENT_ENABLE
, RXFULL
) &&
225 FIELD_EX32(status_reg
, STATUS
, RXFULL
)) {
226 /* SPI RXFULL, RXFIFO full */
230 s
->regs
[IBEX_SPI_HOST_INTR_STATE
] |= R_INTR_STATE_SPI_EVENT_MASK
;
232 qemu_set_irq(s
->event
, event_irq
);
236 static void ibex_spi_host_transfer(IbexSPIHostState
*s
)
238 uint32_t rx
, tx
, data
;
239 /* Get num of one byte transfers */
240 uint8_t segment_len
= FIELD_EX32(s
->regs
[IBEX_SPI_HOST_COMMAND
],
243 while (segment_len
> 0) {
244 if (fifo8_is_empty(&s
->tx_fifo
)) {
246 s
->regs
[IBEX_SPI_HOST_STATUS
] |= R_STATUS_TXSTALL_MASK
;
248 } else if (fifo8_is_full(&s
->rx_fifo
)) {
250 s
->regs
[IBEX_SPI_HOST_STATUS
] |= R_STATUS_RXSTALL_MASK
;
253 tx
= fifo8_pop(&s
->tx_fifo
);
256 rx
= ssi_transfer(s
->ssi
, tx
);
258 trace_ibex_spi_host_transfer(tx
, rx
);
260 if (!fifo8_is_full(&s
->rx_fifo
)) {
261 fifo8_push(&s
->rx_fifo
, rx
);
264 s
->regs
[IBEX_SPI_HOST_STATUS
] |= R_STATUS_RXFULL_MASK
;
269 data
= s
->regs
[IBEX_SPI_HOST_STATUS
];
271 data
= FIELD_DP32(data
, STATUS
, READY
, 1);
273 data
= FIELD_DP32(data
, STATUS
, RXQD
, div4_round_up(segment_len
));
275 data
= FIELD_DP32(data
, STATUS
, TXQD
, fifo8_num_used(&s
->tx_fifo
) / 4);
277 data
= FIELD_DP32(data
, STATUS
, TXFULL
, 0);
279 data
= FIELD_DP32(data
, STATUS
, RXEMPTY
, 0);
280 /* Update register status */
281 s
->regs
[IBEX_SPI_HOST_STATUS
] = data
;
282 /* Drop remaining bytes that exceed segment_len */
283 ibex_spi_txfifo_reset(s
);
285 ibex_spi_host_irq(s
);
288 static uint64_t ibex_spi_host_read(void *opaque
, hwaddr addr
,
291 IbexSPIHostState
*s
= opaque
;
295 trace_ibex_spi_host_read(addr
, size
);
297 /* Match reg index */
300 /* Skipping any W/O registers */
301 case IBEX_SPI_HOST_INTR_STATE
...IBEX_SPI_HOST_INTR_ENABLE
:
302 case IBEX_SPI_HOST_CONTROL
...IBEX_SPI_HOST_STATUS
:
305 case IBEX_SPI_HOST_CSID
:
308 case IBEX_SPI_HOST_CONFIGOPTS
:
309 rc
= s
->config_opts
[s
->regs
[IBEX_SPI_HOST_CSID
]];
311 case IBEX_SPI_HOST_TXDATA
:
314 case IBEX_SPI_HOST_RXDATA
:
316 s
->regs
[IBEX_SPI_HOST_STATUS
] &= ~R_STATUS_RXFULL_MASK
;
318 for (int i
= 0; i
< 4; ++i
) {
319 if (fifo8_is_empty(&s
->rx_fifo
)) {
320 /* Assert RXEMPTY, no IRQ */
321 s
->regs
[IBEX_SPI_HOST_STATUS
] |= R_STATUS_RXEMPTY_MASK
;
322 s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
] |=
323 R_ERROR_STATUS_UNDERFLOW_MASK
;
326 rx_byte
= fifo8_pop(&s
->rx_fifo
);
327 rc
|= rx_byte
<< (i
* 8);
330 case IBEX_SPI_HOST_ERROR_ENABLE
...IBEX_SPI_HOST_EVENT_ENABLE
:
334 qemu_log_mask(LOG_GUEST_ERROR
, "Bad offset 0x%" HWADDR_PRIx
"\n",
341 static void ibex_spi_host_write(void *opaque
, hwaddr addr
,
342 uint64_t val64
, unsigned int size
)
344 IbexSPIHostState
*s
= opaque
;
345 uint32_t val32
= val64
;
346 uint32_t shift_mask
= 0xff, status
= 0, data
= 0;
349 trace_ibex_spi_host_write(addr
, size
, val64
);
351 /* Match reg index */
355 /* Skipping any R/O registers */
356 case IBEX_SPI_HOST_INTR_STATE
:
357 /* rw1c status register */
358 if (FIELD_EX32(val32
, INTR_STATE
, ERROR
)) {
359 data
= FIELD_DP32(data
, INTR_STATE
, ERROR
, 0);
361 if (FIELD_EX32(val32
, INTR_STATE
, SPI_EVENT
)) {
362 data
= FIELD_DP32(data
, INTR_STATE
, SPI_EVENT
, 0);
364 s
->regs
[addr
] = data
;
366 case IBEX_SPI_HOST_INTR_ENABLE
:
367 s
->regs
[addr
] = val32
;
369 case IBEX_SPI_HOST_INTR_TEST
:
370 s
->regs
[addr
] = val32
;
371 ibex_spi_host_irq(s
);
373 case IBEX_SPI_HOST_ALERT_TEST
:
374 s
->regs
[addr
] = val32
;
375 qemu_log_mask(LOG_UNIMP
,
376 "%s: SPI_ALERT_TEST is not supported\n", __func__
);
378 case IBEX_SPI_HOST_CONTROL
:
379 s
->regs
[addr
] = val32
;
381 if (val32
& R_CONTROL_SW_RST_MASK
) {
382 ibex_spi_host_reset((DeviceState
*)s
);
383 /* Clear active if any */
384 s
->regs
[IBEX_SPI_HOST_STATUS
] &= ~R_STATUS_ACTIVE_MASK
;
387 if (val32
& R_CONTROL_OUTPUT_EN_MASK
) {
388 qemu_log_mask(LOG_UNIMP
,
389 "%s: CONTROL_OUTPUT_EN is not supported\n", __func__
);
392 case IBEX_SPI_HOST_CONFIGOPTS
:
393 /* Update the respective config-opts register based on CSIDth index */
394 s
->config_opts
[s
->regs
[IBEX_SPI_HOST_CSID
]] = val32
;
395 qemu_log_mask(LOG_UNIMP
,
396 "%s: CONFIGOPTS Hardware settings not supported\n",
399 case IBEX_SPI_HOST_CSID
:
400 if (val32
>= s
->num_cs
) {
401 /* CSID exceeds max num_cs */
402 s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
] |=
403 R_ERROR_STATUS_CSIDINVAL_MASK
;
404 ibex_spi_host_irq(s
);
407 s
->regs
[addr
] = val32
;
409 case IBEX_SPI_HOST_COMMAND
:
410 s
->regs
[addr
] = val32
;
412 /* STALL, IP not enabled */
413 if (!(FIELD_EX32(s
->regs
[IBEX_SPI_HOST_CONTROL
],
418 /* SPI not ready, IRQ Error */
419 if (!(FIELD_EX32(s
->regs
[IBEX_SPI_HOST_STATUS
],
421 s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
] |= R_ERROR_STATUS_CMDBUSY_MASK
;
422 ibex_spi_host_irq(s
);
426 /* Assert Not Ready */
427 s
->regs
[IBEX_SPI_HOST_STATUS
] &= ~R_STATUS_READY_MASK
;
429 if (FIELD_EX32(val32
, COMMAND
, DIRECTION
) != BIDIRECTIONAL_TRANSFER
) {
430 qemu_log_mask(LOG_UNIMP
,
431 "%s: Rx Only/Tx Only are not supported\n", __func__
);
434 if (val32
& R_COMMAND_CSAAT_MASK
) {
435 qemu_log_mask(LOG_UNIMP
,
436 "%s: CSAAT is not supported\n", __func__
);
438 if (val32
& R_COMMAND_SPEED_MASK
) {
439 qemu_log_mask(LOG_UNIMP
,
440 "%s: SPEED is not supported\n", __func__
);
443 /* Set Transfer Callback */
444 timer_mod(s
->fifo_trigger_handle
,
445 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
446 (TX_INTERRUPT_TRIGGER_DELAY_NS
));
449 case IBEX_SPI_HOST_TXDATA
:
451 * This is a hardware `feature` where
452 * the first word written to TXDATA after init is omitted entirely
454 if (s
->init_status
) {
455 s
->init_status
= false;
459 for (int i
= 0; i
< 4; ++i
) {
460 /* Attempting to write when TXFULL */
461 if (fifo8_is_full(&s
->tx_fifo
)) {
462 /* Assert RXEMPTY, no IRQ */
463 s
->regs
[IBEX_SPI_HOST_STATUS
] |= R_STATUS_TXFULL_MASK
;
464 s
->regs
[IBEX_SPI_HOST_ERROR_STATUS
] |=
465 R_ERROR_STATUS_OVERFLOW_MASK
;
466 ibex_spi_host_irq(s
);
469 /* Byte ordering is set by the IP */
470 status
= s
->regs
[IBEX_SPI_HOST_STATUS
];
471 if (FIELD_EX32(status
, STATUS
, BYTEORDER
) == 0) {
472 /* LE: LSB transmitted first (default for ibex processor) */
473 shift_mask
= 0xff << (i
* 8);
475 /* BE: MSB transmitted first */
476 qemu_log_mask(LOG_UNIMP
,
477 "%s: Big endian is not supported\n", __func__
);
480 fifo8_push(&s
->tx_fifo
, (val32
& shift_mask
) >> (i
* 8));
482 status
= s
->regs
[IBEX_SPI_HOST_STATUS
];
484 status
= FIELD_DP32(status
, STATUS
, TXEMPTY
, 0);
486 txqd_len
= FIELD_EX32(status
, STATUS
, TXQD
);
487 /* Partial bytes (size < 4) are padded, in words. */
489 status
= FIELD_DP32(status
, STATUS
, TXQD
, txqd_len
);
491 status
= FIELD_DP32(status
, STATUS
, READY
, 1);
492 /* Update register status */
493 s
->regs
[IBEX_SPI_HOST_STATUS
] = status
;
495 case IBEX_SPI_HOST_ERROR_ENABLE
:
496 s
->regs
[addr
] = val32
;
498 if (val32
& R_ERROR_ENABLE_CMDINVAL_MASK
) {
499 qemu_log_mask(LOG_UNIMP
,
500 "%s: Segment Length is not supported\n", __func__
);
503 case IBEX_SPI_HOST_ERROR_STATUS
:
505 * Indicates any errors that have occurred.
506 * When an error occurs, the corresponding bit must be cleared
507 * here before issuing any further commands
509 status
= s
->regs
[addr
];
510 /* rw1c status register */
511 if (FIELD_EX32(val32
, ERROR_STATUS
, CMDBUSY
)) {
512 status
= FIELD_DP32(status
, ERROR_STATUS
, CMDBUSY
, 0);
514 if (FIELD_EX32(val32
, ERROR_STATUS
, OVERFLOW
)) {
515 status
= FIELD_DP32(status
, ERROR_STATUS
, OVERFLOW
, 0);
517 if (FIELD_EX32(val32
, ERROR_STATUS
, UNDERFLOW
)) {
518 status
= FIELD_DP32(status
, ERROR_STATUS
, UNDERFLOW
, 0);
520 if (FIELD_EX32(val32
, ERROR_STATUS
, CMDINVAL
)) {
521 status
= FIELD_DP32(status
, ERROR_STATUS
, CMDINVAL
, 0);
523 if (FIELD_EX32(val32
, ERROR_STATUS
, CSIDINVAL
)) {
524 status
= FIELD_DP32(status
, ERROR_STATUS
, CSIDINVAL
, 0);
526 if (FIELD_EX32(val32
, ERROR_STATUS
, ACCESSINVAL
)) {
527 status
= FIELD_DP32(status
, ERROR_STATUS
, ACCESSINVAL
, 0);
529 s
->regs
[addr
] = status
;
531 case IBEX_SPI_HOST_EVENT_ENABLE
:
532 /* Controls which classes of SPI events raise an interrupt. */
533 s
->regs
[addr
] = val32
;
535 if (val32
& R_EVENT_ENABLE_RXWM_MASK
) {
536 qemu_log_mask(LOG_UNIMP
,
537 "%s: RXWM is not supported\n", __func__
);
539 if (val32
& R_EVENT_ENABLE_TXWM_MASK
) {
540 qemu_log_mask(LOG_UNIMP
,
541 "%s: TXWM is not supported\n", __func__
);
544 if (val32
& R_EVENT_ENABLE_IDLE_MASK
) {
545 qemu_log_mask(LOG_UNIMP
,
546 "%s: IDLE is not supported\n", __func__
);
550 qemu_log_mask(LOG_GUEST_ERROR
, "Bad offset 0x%" HWADDR_PRIx
"\n",
555 static const MemoryRegionOps ibex_spi_ops
= {
556 .read
= ibex_spi_host_read
,
557 .write
= ibex_spi_host_write
,
558 /* Ibex default LE */
559 .endianness
= DEVICE_LITTLE_ENDIAN
,
562 static Property ibex_spi_properties
[] = {
563 DEFINE_PROP_UINT32("num_cs", IbexSPIHostState
, num_cs
, 1),
564 DEFINE_PROP_END_OF_LIST(),
567 static const VMStateDescription vmstate_ibex
= {
568 .name
= TYPE_IBEX_SPI_HOST
,
570 .minimum_version_id
= 1,
571 .fields
= (VMStateField
[]) {
572 VMSTATE_UINT32_ARRAY(regs
, IbexSPIHostState
, IBEX_SPI_HOST_MAX_REGS
),
573 VMSTATE_VARRAY_UINT32(config_opts
, IbexSPIHostState
,
574 num_cs
, 0, vmstate_info_uint32
, uint32_t),
575 VMSTATE_FIFO8(rx_fifo
, IbexSPIHostState
),
576 VMSTATE_FIFO8(tx_fifo
, IbexSPIHostState
),
577 VMSTATE_TIMER_PTR(fifo_trigger_handle
, IbexSPIHostState
),
578 VMSTATE_BOOL(init_status
, IbexSPIHostState
),
579 VMSTATE_END_OF_LIST()
583 static void fifo_trigger_update(void *opaque
)
585 IbexSPIHostState
*s
= opaque
;
586 ibex_spi_host_transfer(s
);
589 static void ibex_spi_host_realize(DeviceState
*dev
, Error
**errp
)
591 IbexSPIHostState
*s
= IBEX_SPI_HOST(dev
);
594 s
->ssi
= ssi_create_bus(dev
, "ssi");
595 s
->cs_lines
= g_new0(qemu_irq
, s
->num_cs
);
597 for (i
= 0; i
< s
->num_cs
; ++i
) {
598 sysbus_init_irq(SYS_BUS_DEVICE(dev
), &s
->cs_lines
[i
]);
601 /* Setup CONFIGOPTS Multi-register */
602 s
->config_opts
= g_new0(uint32_t, s
->num_cs
);
604 /* Setup FIFO Interrupt Timer */
605 s
->fifo_trigger_handle
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
606 fifo_trigger_update
, s
);
608 /* FIFO sizes as per OT Spec */
609 fifo8_create(&s
->tx_fifo
, IBEX_SPI_HOST_TXFIFO_LEN
);
610 fifo8_create(&s
->rx_fifo
, IBEX_SPI_HOST_RXFIFO_LEN
);
613 static void ibex_spi_host_init(Object
*obj
)
615 IbexSPIHostState
*s
= IBEX_SPI_HOST(obj
);
617 sysbus_init_irq(SYS_BUS_DEVICE(obj
), &s
->host_err
);
618 sysbus_init_irq(SYS_BUS_DEVICE(obj
), &s
->event
);
620 memory_region_init_io(&s
->mmio
, obj
, &ibex_spi_ops
, s
,
621 TYPE_IBEX_SPI_HOST
, 0x1000);
622 sysbus_init_mmio(SYS_BUS_DEVICE(obj
), &s
->mmio
);
625 static void ibex_spi_host_class_init(ObjectClass
*klass
, void *data
)
627 DeviceClass
*dc
= DEVICE_CLASS(klass
);
628 dc
->realize
= ibex_spi_host_realize
;
629 dc
->reset
= ibex_spi_host_reset
;
630 dc
->vmsd
= &vmstate_ibex
;
631 device_class_set_props(dc
, ibex_spi_properties
);
634 static const TypeInfo ibex_spi_host_info
= {
635 .name
= TYPE_IBEX_SPI_HOST
,
636 .parent
= TYPE_SYS_BUS_DEVICE
,
637 .instance_size
= sizeof(IbexSPIHostState
),
638 .instance_init
= ibex_spi_host_init
,
639 .class_init
= ibex_spi_host_class_init
,
642 static void ibex_spi_host_register_types(void)
644 type_register_static(&ibex_spi_host_info
);
647 type_init(ibex_spi_host_register_types
)