2 * This file is part of Cleanflight and Betaflight.
4 * Cleanflight and Betaflight are free software. You can redistribute
5 * this software and/or modify this software under the terms of the
6 * GNU General Public License as published by the Free Software
7 * Foundation, either version 3 of the License, or (at your option)
10 * Cleanflight and Betaflight are distributed in the hope that they
11 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 * See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this software.
18 * If not, see <http://www.gnu.org/licenses/>.
29 #include "common/utils.h"
30 #include "common/maths.h"
32 #include "drivers/bus.h"
33 #include "drivers/bus_spi.h"
34 #include "drivers/bus_spi_impl.h"
35 #include "drivers/dma.h"
36 #include "drivers/io.h"
37 #include "drivers/rcc.h"
39 // Use DMA if possible if this many bytes are to be transferred
40 #define SPI_DMA_THRESHOLD 8
43 #define SPI2_NSS_PIN PB12
44 #define SPI2_SCK_PIN PB13
45 #define SPI2_MISO_PIN PB14
46 #define SPI2_MOSI_PIN PB15
50 #define SPI3_NSS_PIN PA15
51 #define SPI3_SCK_PIN PB3
52 #define SPI3_MISO_PIN PB4
53 #define SPI3_MOSI_PIN PB5
57 #define SPI4_NSS_PIN PA15
58 #define SPI4_SCK_PIN PB3
59 #define SPI4_MISO_PIN PB4
60 #define SPI4_MOSI_PIN PB5
64 #define SPI1_NSS_PIN NONE
67 #define SPI2_NSS_PIN NONE
70 #define SPI3_NSS_PIN NONE
73 #define SPI4_NSS_PIN NONE
76 #define SPI_DEFAULT_TIMEOUT 10
79 #define IS_DTCM(p) (((uint32_t)p & 0xfffe0000) == 0x20000000)
80 #elif defined(STM32F7)
81 #define IS_DTCM(p) (((uint32_t)p & 0xffff0000) == 0x20000000)
82 #elif defined(STM32G4)
83 #define IS_CCM(p) ((((uint32_t)p & 0xffff8000) == 0x10000000) || (((uint32_t)p & 0xffff8000) == 0x20018000))
85 static LL_SPI_InitTypeDef defaultInit
=
87 .TransferDirection
= LL_SPI_FULL_DUPLEX
,
88 .Mode
= LL_SPI_MODE_MASTER
,
89 .DataWidth
= LL_SPI_DATAWIDTH_8BIT
,
90 .NSS
= LL_SPI_NSS_SOFT
,
91 .BaudRate
= LL_SPI_BAUDRATEPRESCALER_DIV8
,
92 .BitOrder
= LL_SPI_MSB_FIRST
,
93 .CRCCalculation
= LL_SPI_CRCCALCULATION_DISABLE
,
94 .ClockPolarity
= LL_SPI_POLARITY_HIGH
,
95 .ClockPhase
= LL_SPI_PHASE_2EDGE
,
98 static uint32_t spiDivisorToBRbits(SPI_TypeDef
*instance
, uint16_t divisor
)
100 #if !defined(STM32H7)
101 // SPI2 and SPI3 are on APB1/AHB1 which PCLK is half that of APB2/AHB2.
103 if (instance
== SPI2
|| instance
== SPI3
) {
104 divisor
/= 2; // Safe for divisor == 0 or 1
110 divisor
= constrain(divisor
, 2, 256);
113 const uint32_t baudRatePrescaler
[8] = {
114 LL_SPI_BAUDRATEPRESCALER_DIV2
,
115 LL_SPI_BAUDRATEPRESCALER_DIV4
,
116 LL_SPI_BAUDRATEPRESCALER_DIV8
,
117 LL_SPI_BAUDRATEPRESCALER_DIV16
,
118 LL_SPI_BAUDRATEPRESCALER_DIV32
,
119 LL_SPI_BAUDRATEPRESCALER_DIV64
,
120 LL_SPI_BAUDRATEPRESCALER_DIV128
,
121 LL_SPI_BAUDRATEPRESCALER_DIV256
,
123 int prescalerIndex
= ffs(divisor
) - 2; // prescaler begins at "/2"
125 return baudRatePrescaler
[prescalerIndex
];
127 return (ffs(divisor
) - 2) << SPI_CR1_BR_Pos
;
131 void spiInitDevice(SPIDevice device
)
133 spiDevice_t
*spi
= &spiDevice
[device
];
140 RCC_ClockCmd(spi
->rcc
, ENABLE
);
141 RCC_ResetCmd(spi
->rcc
, ENABLE
);
143 IOInit(IOGetByTag(spi
->sck
), OWNER_SPI_SCK
, RESOURCE_INDEX(device
));
144 IOInit(IOGetByTag(spi
->miso
), OWNER_SPI_MISO
, RESOURCE_INDEX(device
));
145 IOInit(IOGetByTag(spi
->mosi
), OWNER_SPI_MOSI
, RESOURCE_INDEX(device
));
147 IOConfigGPIOAF(IOGetByTag(spi
->miso
), SPI_IO_AF_MISO_CFG
, spi
->misoAF
);
148 IOConfigGPIOAF(IOGetByTag(spi
->mosi
), SPI_IO_AF_CFG
, spi
->mosiAF
);
149 IOConfigGPIOAF(IOGetByTag(spi
->sck
), SPI_IO_AF_SCK_CFG_HIGH
, spi
->sckAF
);
151 LL_SPI_Disable(spi
->dev
);
152 LL_SPI_DeInit(spi
->dev
);
155 // Prevent glitching when SPI is disabled
156 LL_SPI_EnableGPIOControl(spi
->dev
);
158 LL_SPI_SetFIFOThreshold(spi
->dev
, LL_SPI_FIFO_TH_01DATA
);
159 LL_SPI_Init(spi
->dev
, &defaultInit
);
161 LL_SPI_SetRxFIFOThreshold(spi
->dev
, SPI_RXFIFO_THRESHOLD_QF
);
163 LL_SPI_Init(spi
->dev
, &defaultInit
);
164 LL_SPI_Enable(spi
->dev
);
168 void spiInternalResetDescriptors(busDevice_t
*bus
)
170 LL_DMA_InitTypeDef
*initTx
= bus
->initTx
;
172 LL_DMA_StructInit(initTx
);
173 #if defined(STM32G4) || defined(STM32H7)
174 initTx
->PeriphRequest
= bus
->dmaTx
->channel
;
176 initTx
->Channel
= bus
->dmaTx
->channel
;
178 initTx
->Mode
= LL_DMA_MODE_NORMAL
;
179 initTx
->Direction
= LL_DMA_DIRECTION_MEMORY_TO_PERIPH
;
181 initTx
->PeriphOrM2MSrcAddress
= (uint32_t)&bus
->busType_u
.spi
.instance
->TXDR
;
183 initTx
->PeriphOrM2MSrcAddress
= (uint32_t)&bus
->busType_u
.spi
.instance
->DR
;
185 initTx
->Priority
= LL_DMA_PRIORITY_LOW
;
186 initTx
->PeriphOrM2MSrcIncMode
= LL_DMA_PERIPH_NOINCREMENT
;
187 initTx
->PeriphOrM2MSrcDataSize
= LL_DMA_PDATAALIGN_BYTE
;
188 initTx
->MemoryOrM2MDstDataSize
= LL_DMA_MDATAALIGN_BYTE
;
191 LL_DMA_InitTypeDef
*initRx
= bus
->initRx
;
193 LL_DMA_StructInit(initRx
);
194 #if defined(STM32G4) || defined(STM32H7)
195 initRx
->PeriphRequest
= bus
->dmaRx
->channel
;
197 initRx
->Channel
= bus
->dmaRx
->channel
;
199 initRx
->Mode
= LL_DMA_MODE_NORMAL
;
200 initRx
->Direction
= LL_DMA_DIRECTION_PERIPH_TO_MEMORY
;
202 initRx
->PeriphOrM2MSrcAddress
= (uint32_t)&bus
->busType_u
.spi
.instance
->RXDR
;
204 initRx
->PeriphOrM2MSrcAddress
= (uint32_t)&bus
->busType_u
.spi
.instance
->DR
;
206 initRx
->Priority
= LL_DMA_PRIORITY_LOW
;
207 initRx
->PeriphOrM2MSrcIncMode
= LL_DMA_PERIPH_NOINCREMENT
;
208 initRx
->PeriphOrM2MSrcDataSize
= LL_DMA_PDATAALIGN_BYTE
;
212 void spiInternalResetStream(dmaChannelDescriptor_t
*descriptor
)
214 // Disable the stream
216 LL_DMA_DisableChannel(descriptor
->dma
, descriptor
->stream
);
217 while (LL_DMA_IsEnabledChannel(descriptor
->dma
, descriptor
->stream
));
219 LL_DMA_DisableStream(descriptor
->dma
, descriptor
->stream
);
220 while (LL_DMA_IsEnabledStream(descriptor
->dma
, descriptor
->stream
));
223 // Clear any pending interrupt flags
224 DMA_CLEAR_FLAG(descriptor
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
228 FAST_CODE
static bool spiInternalReadWriteBufPolled(SPI_TypeDef
*instance
, const uint8_t *txData
, uint8_t *rxData
, int len
)
231 LL_SPI_SetTransferSize(instance
, len
);
232 LL_SPI_Enable(instance
);
233 LL_SPI_StartMasterTransfer(instance
);
235 while (!LL_SPI_IsActiveFlag_TXP(instance
));
236 uint8_t b
= txData
? *(txData
++) : 0xFF;
237 LL_SPI_TransmitData8(instance
, b
);
239 while (!LL_SPI_IsActiveFlag_RXP(instance
));
240 b
= LL_SPI_ReceiveData8(instance
);
246 while (!LL_SPI_IsActiveFlag_EOT(instance
));
247 LL_SPI_ClearFlag_TXTF(instance
);
248 LL_SPI_Disable(instance
);
250 // set 16-bit transfer
251 CLEAR_BIT(instance
->CR2
, SPI_RXFIFO_THRESHOLD
);
253 while (!LL_SPI_IsActiveFlag_TXE(instance
));
256 w
= *((uint16_t *)txData
);
261 LL_SPI_TransmitData16(instance
, w
);
263 while (!LL_SPI_IsActiveFlag_RXNE(instance
));
264 w
= LL_SPI_ReceiveData16(instance
);
266 *((uint16_t *)rxData
) = w
;
271 // set 8-bit transfer
272 SET_BIT(instance
->CR2
, SPI_RXFIFO_THRESHOLD
);
274 while (!LL_SPI_IsActiveFlag_TXE(instance
));
275 uint8_t b
= txData
? *(txData
++) : 0xFF;
276 LL_SPI_TransmitData8(instance
, b
);
278 while (!LL_SPI_IsActiveFlag_RXNE(instance
));
279 b
= LL_SPI_ReceiveData8(instance
);
290 void spiInternalInitStream(const extDevice_t
*dev
, bool preInit
)
292 STATIC_DMA_DATA_AUTO
uint8_t dummyTxByte
= 0xff;
293 STATIC_DMA_DATA_AUTO
uint8_t dummyRxByte
;
294 busDevice_t
*bus
= dev
->bus
;
296 busSegment_t
*segment
= (busSegment_t
*)bus
->curSegment
;
299 // Prepare the init structure for the next segment to reduce inter-segment interval
301 if(segment
->len
== 0) {
302 // There's no following segment
307 int len
= segment
->len
;
309 uint8_t *txData
= segment
->u
.buffers
.txData
;
310 LL_DMA_InitTypeDef
*initTx
= bus
->initTx
;
313 #ifdef __DCACHE_PRESENT
315 if ((txData
< &_dmaram_start__
) || (txData
>= &_dmaram_end__
)) {
317 // No need to flush DTCM memory
318 if (!IS_DTCM(txData
)) {
320 // Flush the D cache to ensure the data to be written is in main memory
321 SCB_CleanDCache_by_Addr(
322 (uint32_t *)((uint32_t)txData
& ~CACHE_LINE_MASK
),
323 (((uint32_t)txData
& CACHE_LINE_MASK
) + len
- 1 + CACHE_LINE_SIZE
) & ~CACHE_LINE_MASK
);
325 #endif // __DCACHE_PRESENT
326 initTx
->MemoryOrM2MDstAddress
= (uint32_t)txData
;
327 initTx
->MemoryOrM2MDstIncMode
= LL_DMA_MEMORY_INCREMENT
;
329 initTx
->MemoryOrM2MDstAddress
= (uint32_t)&dummyTxByte
;
330 initTx
->MemoryOrM2MDstIncMode
= LL_DMA_MEMORY_NOINCREMENT
;
332 initTx
->NbData
= len
;
334 #if !defined(STM32G4) && !defined(STM32H7)
335 if (dev
->bus
->dmaRx
) {
337 uint8_t *rxData
= segment
->u
.buffers
.rxData
;
338 LL_DMA_InitTypeDef
*initRx
= bus
->initRx
;
341 /* Flush the D cache for the start and end of the receive buffer as
342 * the cache will be invalidated after the transfer and any valid data
343 * just before/after must be in memory at that point
345 #ifdef __DCACHE_PRESENT
346 // No need to flush/invalidate DTCM memory
348 if ((rxData
< &_dmaram_start__
) || (rxData
>= &_dmaram_end__
)) {
350 // No need to flush DTCM memory
351 if (!IS_DTCM(rxData
)) {
353 SCB_CleanInvalidateDCache_by_Addr(
354 (uint32_t *)((uint32_t)rxData
& ~CACHE_LINE_MASK
),
355 (((uint32_t)rxData
& CACHE_LINE_MASK
) + len
- 1 + CACHE_LINE_SIZE
) & ~CACHE_LINE_MASK
);
357 #endif // __DCACHE_PRESENT
358 initRx
->MemoryOrM2MDstAddress
= (uint32_t)rxData
;
359 initRx
->MemoryOrM2MDstIncMode
= LL_DMA_MEMORY_INCREMENT
;
361 initRx
->MemoryOrM2MDstAddress
= (uint32_t)&dummyRxByte
;
362 initRx
->MemoryOrM2MDstIncMode
= LL_DMA_MEMORY_NOINCREMENT
;
364 initRx
->NbData
= len
;
365 #if !defined(STM32G4) && !defined(STM32H7)
370 void spiInternalStartDMA(const extDevice_t
*dev
)
372 busDevice_t
*bus
= dev
->bus
;
374 dmaChannelDescriptor_t
*dmaTx
= bus
->dmaTx
;
375 dmaChannelDescriptor_t
*dmaRx
= bus
->dmaRx
;
377 #if !defined(STM32G4) && !defined(STM32H7)
380 // Use the correct callback argument
381 dmaRx
->userParam
= (uint32_t)dev
;
383 // Clear transfer flags
384 DMA_CLEAR_FLAG(dmaTx
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
385 DMA_CLEAR_FLAG(dmaRx
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
388 // Disable channels to enable update
389 LL_DMA_DisableChannel(dmaTx
->dma
, dmaTx
->stream
);
390 LL_DMA_DisableChannel(dmaRx
->dma
, dmaRx
->stream
);
392 /* Use the Rx interrupt as this occurs once the SPI operation is complete whereas the Tx interrupt
393 * occurs earlier when the Tx FIFO is empty, but the SPI operation is still in progress
395 LL_DMA_EnableIT_TC(dmaRx
->dma
, dmaRx
->stream
);
398 LL_DMA_Init(dmaTx
->dma
, dmaTx
->stream
, bus
->initTx
);
399 LL_DMA_Init(dmaRx
->dma
, dmaRx
->stream
, bus
->initRx
);
401 LL_SPI_EnableDMAReq_RX(dev
->bus
->busType_u
.spi
.instance
);
404 LL_DMA_EnableChannel(dmaTx
->dma
, dmaTx
->stream
);
405 LL_DMA_EnableChannel(dmaRx
->dma
, dmaRx
->stream
);
407 LL_SPI_EnableDMAReq_TX(dev
->bus
->busType_u
.spi
.instance
);
409 DMA_Stream_TypeDef
*streamRegsTx
= (DMA_Stream_TypeDef
*)dmaTx
->ref
;
410 DMA_Stream_TypeDef
*streamRegsRx
= (DMA_Stream_TypeDef
*)dmaRx
->ref
;
412 // Disable streams to enable update
413 LL_DMA_WriteReg(streamRegsTx
, CR
, 0U);
414 LL_DMA_WriteReg(streamRegsRx
, CR
, 0U);
416 /* Use the Rx interrupt as this occurs once the SPI operation is complete whereas the Tx interrupt
417 * occurs earlier when the Tx FIFO is empty, but the SPI operation is still in progress
419 LL_EX_DMA_EnableIT_TC(streamRegsRx
);
422 LL_DMA_Init(dmaTx
->dma
, dmaTx
->stream
, bus
->initTx
);
423 LL_DMA_Init(dmaRx
->dma
, dmaRx
->stream
, bus
->initRx
);
427 * If the user enables the used peripheral before the corresponding DMA stream, a FEIF
428 * (FIFO Error Interrupt Flag) may be set due to the fact the DMA is not ready to provide
429 * the first required data to the peripheral (in case of memory-to-peripheral transfer).
432 // Enable the SPI DMA Tx & Rx requests
434 LL_SPI_SetTransferSize(dev
->bus
->busType_u
.spi
.instance
, dev
->bus
->curSegment
->len
);
435 LL_DMA_EnableStream(dmaTx
->dma
, dmaTx
->stream
);
436 LL_DMA_EnableStream(dmaRx
->dma
, dmaRx
->stream
);
437 SET_BIT(dev
->bus
->busType_u
.spi
.instance
->CFG1
, SPI_CFG1_RXDMAEN
| SPI_CFG1_TXDMAEN
);
438 LL_SPI_Enable(dev
->bus
->busType_u
.spi
.instance
);
439 LL_SPI_StartMasterTransfer(dev
->bus
->busType_u
.spi
.instance
);
442 LL_DMA_EnableStream(dmaTx
->dma
, dmaTx
->stream
);
443 LL_DMA_EnableStream(dmaRx
->dma
, dmaRx
->stream
);
445 SET_BIT(dev
->bus
->busType_u
.spi
.instance
->CR2
, SPI_CR2_TXDMAEN
| SPI_CR2_RXDMAEN
);
447 #if !defined(STM32G4) && !defined(STM32H7)
449 DMA_Stream_TypeDef
*streamRegsTx
= (DMA_Stream_TypeDef
*)dmaTx
->ref
;
451 // Use the correct callback argument
452 dmaTx
->userParam
= (uint32_t)dev
;
454 // Clear transfer flags
455 DMA_CLEAR_FLAG(dmaTx
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
457 // Disable streams to enable update
458 LL_DMA_WriteReg(streamRegsTx
, CR
, 0U);
460 LL_EX_DMA_EnableIT_TC(streamRegsTx
);
463 LL_DMA_Init(dmaTx
->dma
, dmaTx
->stream
, bus
->initTx
);
467 * If the user enables the used peripheral before the corresponding DMA stream, a FEIF
468 * (FIFO Error Interrupt Flag) may be set due to the fact the DMA is not ready to provide
469 * the first required data to the peripheral (in case of memory-to-peripheral transfer).
472 // Enable the SPI DMA Tx request
474 LL_DMA_EnableStream(dmaTx
->dma
, dmaTx
->stream
);
476 SET_BIT(dev
->bus
->busType_u
.spi
.instance
->CR2
, SPI_CR2_TXDMAEN
);
482 void spiInternalStopDMA (const extDevice_t
*dev
)
484 busDevice_t
*bus
= dev
->bus
;
486 dmaChannelDescriptor_t
*dmaTx
= bus
->dmaTx
;
487 dmaChannelDescriptor_t
*dmaRx
= bus
->dmaRx
;
488 SPI_TypeDef
*instance
= bus
->busType_u
.spi
.instance
;
490 #if !defined(STM32G4) && !defined(STM32H7)
493 // Disable the DMA engine and SPI interface
495 LL_DMA_DisableChannel(dmaTx
->dma
, dmaTx
->stream
);
496 LL_DMA_DisableChannel(dmaRx
->dma
, dmaRx
->stream
);
498 LL_DMA_DisableStream(dmaRx
->dma
, dmaRx
->stream
);
499 LL_DMA_DisableStream(dmaTx
->dma
, dmaTx
->stream
);
502 // Clear transfer flags
503 DMA_CLEAR_FLAG(dmaRx
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
505 LL_SPI_DisableDMAReq_TX(instance
);
506 LL_SPI_DisableDMAReq_RX(instance
);
508 LL_SPI_ClearFlag_TXTF(dev
->bus
->busType_u
.spi
.instance
);
509 LL_SPI_Disable(dev
->bus
->busType_u
.spi
.instance
);
511 #if !defined(STM32G4) && !defined(STM32H7)
513 SPI_TypeDef
*instance
= bus
->busType_u
.spi
.instance
;
515 // Ensure the current transmission is complete
516 while (LL_SPI_IsActiveFlag_BSY(instance
));
518 // Drain the RX buffer
519 while (LL_SPI_IsActiveFlag_RXNE(instance
)) {
523 // Disable the DMA engine and SPI interface
524 LL_DMA_DisableStream(dmaTx
->dma
, dmaTx
->stream
);
526 DMA_CLEAR_FLAG(dmaTx
, DMA_IT_HTIF
| DMA_IT_TEIF
| DMA_IT_TCIF
);
528 LL_SPI_DisableDMAReq_TX(instance
);
530 #if !defined(STM32G4) && !defined(STM32H7)
535 // DMA transfer setup and start
536 FAST_CODE
void spiSequenceStart(const extDevice_t
*dev
)
538 busDevice_t
*bus
= dev
->bus
;
539 SPI_TypeDef
*instance
= bus
->busType_u
.spi
.instance
;
540 spiDevice_t
*spi
= &spiDevice
[spiDeviceByInstance(instance
)];
541 bool dmaSafe
= dev
->useDMA
;
542 uint32_t xferLen
= 0;
543 uint32_t segmentCount
= 0;
545 bus
->initSegment
= true;
548 #if !defined(STM32H7)
549 LL_SPI_Disable(instance
);
552 if (dev
->busType_u
.spi
.speed
!= bus
->busType_u
.spi
.speed
) {
553 LL_SPI_SetBaudRatePrescaler(instance
, spiDivisorToBRbits(instance
, dev
->busType_u
.spi
.speed
));
554 bus
->busType_u
.spi
.speed
= dev
->busType_u
.spi
.speed
;
557 // Switch SPI clock polarity/phase if necessary
558 if (dev
->busType_u
.spi
.leadingEdge
!= bus
->busType_u
.spi
.leadingEdge
) {
559 if (dev
->busType_u
.spi
.leadingEdge
) {
560 IOConfigGPIOAF(IOGetByTag(spi
->sck
), SPI_IO_AF_SCK_CFG_LOW
, spi
->sckAF
);
561 LL_SPI_SetClockPhase(instance
, LL_SPI_PHASE_1EDGE
);
562 LL_SPI_SetClockPolarity(instance
, LL_SPI_POLARITY_LOW
);
565 IOConfigGPIOAF(IOGetByTag(spi
->sck
), SPI_IO_AF_SCK_CFG_HIGH
, spi
->sckAF
);
566 LL_SPI_SetClockPhase(instance
, LL_SPI_PHASE_2EDGE
);
567 LL_SPI_SetClockPolarity(instance
, LL_SPI_POLARITY_HIGH
);
570 bus
->busType_u
.spi
.leadingEdge
= dev
->busType_u
.spi
.leadingEdge
;
573 #if !defined(STM32H7)
574 LL_SPI_Enable(instance
);
577 /* Where data is being read into a buffer which is cached, where the start or end of that
578 * buffer is not cache aligned, there is a risk of corruption of other data in that cache line.
579 * After the read is complete, the cache lines covering the structure will be invalidated to ensure
580 * that the processor sees the read data, not what was in cache previously. Unfortunately if
581 * there is any other data in the area covered by those cache lines, at the start or end of the
582 * buffer, it too will be invalidated, so had the processor written to those locations during the DMA
583 * operation those written values will be lost.
586 // Check that any reads are cache aligned and of multiple cache lines in length
587 for (busSegment_t
*checkSegment
= (busSegment_t
*)bus
->curSegment
; checkSegment
->len
; checkSegment
++) {
588 // Check there is no receive data as only transmit DMA is available
589 if ((checkSegment
->u
.buffers
.rxData
) && (bus
->dmaRx
== (dmaChannelDescriptor_t
*)NULL
)) {
594 // Check if RX data can be DMAed
595 if ((checkSegment
->u
.buffers
.rxData
) &&
596 // DTCM can't be accessed by DMA1/2 on the H7
597 (IS_DTCM(checkSegment
->u
.buffers
.rxData
) ||
598 // Memory declared as DMA_RAM will have an address between &_dmaram_start__ and &_dmaram_end__
599 (((checkSegment
->u
.buffers
.rxData
< &_dmaram_start__
) || (checkSegment
->u
.buffers
.rxData
>= &_dmaram_end__
)) &&
600 (((uint32_t)checkSegment
->u
.buffers
.rxData
& (CACHE_LINE_SIZE
- 1)) || (checkSegment
->len
& (CACHE_LINE_SIZE
- 1)))))) {
604 // Check if TX data can be DMAed
605 else if ((checkSegment
->u
.buffers
.txData
) && IS_DTCM(checkSegment
->u
.buffers
.txData
)) {
609 #elif defined(STM32F7)
610 if ((checkSegment
->u
.buffers
.rxData
) &&
611 // DTCM is accessible and uncached on the F7
612 (!IS_DTCM(checkSegment
->u
.buffers
.rxData
) &&
613 (((uint32_t)checkSegment
->u
.buffers
.rxData
& (CACHE_LINE_SIZE
- 1)) || (checkSegment
->len
& (CACHE_LINE_SIZE
- 1))))) {
617 #elif defined(STM32G4)
618 // Check if RX data can be DMAed
619 if ((checkSegment
->u
.buffers
.rxData
) &&
620 // CCM can't be accessed by DMA1/2 on the G4
621 IS_CCM(checkSegment
->u
.buffers
.rxData
)) {
625 if ((checkSegment
->u
.buffers
.txData
) &&
626 // CCM can't be accessed by DMA1/2 on the G4
627 IS_CCM(checkSegment
->u
.buffers
.txData
)) {
632 // Note that these counts are only valid if dmaSafe is true
634 xferLen
+= checkSegment
->len
;
637 // Use DMA if possible
638 // If there are more than one segments, or a single segment with negateCS negated in the list terminator then force DMA irrespective of length
639 if (bus
->useDMA
&& dmaSafe
&& ((segmentCount
> 1) ||
640 (xferLen
>= SPI_DMA_THRESHOLD
) ||
641 !bus
->curSegment
[segmentCount
].negateCS
)) {
642 // Intialise the init structures for the first transfer
643 spiInternalInitStream(dev
, false);
645 // Assert Chip Select
646 IOLo(dev
->busType_u
.spi
.csnPin
);
648 // Start the transfers
649 spiInternalStartDMA(dev
);
651 busSegment_t
*lastSegment
= NULL
;
652 bool segmentComplete
;
654 // Manually work through the segment list performing a transfer for each
655 while (bus
->curSegment
->len
) {
656 if (!lastSegment
|| lastSegment
->negateCS
) {
657 // Assert Chip Select if necessary - it's costly so only do so if necessary
658 IOLo(dev
->busType_u
.spi
.csnPin
);
661 spiInternalReadWriteBufPolled(
662 bus
->busType_u
.spi
.instance
,
663 bus
->curSegment
->u
.buffers
.txData
,
664 bus
->curSegment
->u
.buffers
.rxData
,
665 bus
->curSegment
->len
);
667 if (bus
->curSegment
->negateCS
) {
668 // Negate Chip Select
669 IOHi(dev
->busType_u
.spi
.csnPin
);
672 segmentComplete
= true;
673 if (bus
->curSegment
->callback
) {
674 switch(bus
->curSegment
->callback(dev
->callbackArg
)) {
676 // Repeat the last DMA segment
677 segmentComplete
= false;
681 bus
->curSegment
= (busSegment_t
*)BUS_SPI_FREE
;
682 segmentComplete
= false;
687 // Advance to the next DMA segment
691 if (segmentComplete
) {
692 lastSegment
= (busSegment_t
*)bus
->curSegment
;
697 // If a following transaction has been linked, start it
698 if (bus
->curSegment
->u
.link
.dev
) {
699 busSegment_t
*endSegment
= (busSegment_t
*)bus
->curSegment
;
700 const extDevice_t
*nextDev
= endSegment
->u
.link
.dev
;
701 busSegment_t
*nextSegments
= (busSegment_t
*)endSegment
->u
.link
.segments
;
702 bus
->curSegment
= nextSegments
;
703 endSegment
->u
.link
.dev
= NULL
;
704 endSegment
->u
.link
.segments
= NULL
;
705 spiSequenceStart(nextDev
);
707 // The end of the segment list has been reached, so mark transactions as complete
708 bus
->curSegment
= (busSegment_t
*)BUS_SPI_FREE
;