1 /* Blackfin Direct Memory Access (DMA) Channel model.
3 Copyright (C) 2010-2024 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* This must come before any other includes. */
28 #include "hw-device.h"
29 #include "dv-bfin_dma.h"
30 #include "dv-bfin_dmac.h"
32 /* Note: This DMA implementation requires the producer to be the master when
33 the peer is MDMA. The source is always a slave. This way we don't
34 have the two DMA devices thrashing each other with one trying to
35 write and the other trying to read. */
39 /* This top portion matches common dv_bfin struct. */
41 struct hw
*dma_master
;
44 struct hw_event
*handler
;
48 /* Order after here is important -- matches hardware MMR layout. */
50 struct { bu16 ndpl
, ndph
; };
54 struct { bu16 sal
, sah
; };
57 bu16
BFIN_MMR_16 (config
);
59 bu16
BFIN_MMR_16 (x_count
);
60 bs16
BFIN_MMR_16 (x_modify
);
61 bu16
BFIN_MMR_16 (y_count
);
62 bs16
BFIN_MMR_16 (y_modify
);
63 bu32 curr_desc_ptr
, curr_addr
;
64 bu16
BFIN_MMR_16 (irq_status
);
65 bu16
BFIN_MMR_16 (peripheral_map
);
66 bu16
BFIN_MMR_16 (curr_x_count
);
68 bu16
BFIN_MMR_16 (curr_y_count
);
71 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
72 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
74 static const char * const mmr_names
[] =
76 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
77 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
78 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
80 #define mmr_name(off) mmr_names[(off) / 4]
83 bfin_dma_enabled (struct bfin_dma
*dma
)
85 return (dma
->config
& DMAEN
);
89 bfin_dma_running (struct bfin_dma
*dma
)
91 return (dma
->irq_status
& DMA_RUN
);
95 bfin_dma_get_peer (struct hw
*me
, struct bfin_dma
*dma
)
99 return dma
->hw_peer
= bfin_dmac_get_peer (me
, dma
->peripheral_map
);
103 bfin_dma_process_desc (struct hw
*me
, struct bfin_dma
*dma
)
105 bu8 ndsize
= (dma
->config
& NDSIZE
) >> NDSIZE_SHIFT
;
106 bu16 _flows
[9], *flows
= _flows
;
108 HW_TRACE ((me
, "dma starting up %#x", dma
->config
));
110 switch (dma
->config
& WDSIZE
)
123 /* Address has to be mutiple of transfer size. */
124 if (dma
->start_addr
& (dma
->ele_size
- 1))
125 dma
->irq_status
|= DMA_ERR
;
127 if (dma
->ele_size
!= (unsigned) abs (dma
->x_modify
))
128 hw_abort (me
, "DMA config (striding) %#x not supported (x_modify: %d)",
129 dma
->config
, dma
->x_modify
);
131 switch (dma
->config
& DMAFLOW
)
136 hw_abort (me
, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
139 if (ndsize
== 0 || ndsize
> 7)
140 hw_abort (me
, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
141 sim_read (hw_system (me
), dma
->curr_desc_ptr
, flows
, ndsize
* 2);
144 if (ndsize
== 0 || ndsize
> 8)
145 hw_abort (me
, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
146 sim_read (hw_system (me
), dma
->next_desc_ptr
, flows
, ndsize
* 2);
149 if (ndsize
== 0 || ndsize
> 9)
150 hw_abort (me
, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
151 sim_read (hw_system (me
), dma
->next_desc_ptr
, flows
, ndsize
* 2);
154 hw_abort (me
, "DMA config error: invalid DMAFLOW %#x", dma
->config
);
165 (void *) &dma
->x_modify
,
167 (void *) &dma
->y_modify
,
170 switch (dma
->config
& DMAFLOW
)
173 dma
->ndph
= _flows
[1];
176 ATTRIBUTE_FALLTHROUGH
;
178 dma
->ndpl
= _flows
[0];
184 for (idx
= 0; idx
< ndsize
; ++idx
)
185 *stores
[idx
] = flows
[idx
];
188 dma
->curr_desc_ptr
= dma
->next_desc_ptr
;
189 dma
->curr_addr
= dma
->start_addr
;
190 dma
->curr_x_count
= dma
->x_count
? : 0xffff;
191 dma
->curr_y_count
= dma
->y_count
? : 0xffff;
195 bfin_dma_finish_x (struct hw
*me
, struct bfin_dma
*dma
)
197 /* XXX: This would be the time to process the next descriptor. */
198 /* XXX: Should this toggle Enable in dma->config ? */
200 if (dma
->config
& DI_EN
)
201 hw_port_event (me
, 0, 1);
203 if ((dma
->config
& DMA2D
) && dma
->curr_y_count
> 1)
205 dma
->curr_y_count
-= 1;
206 dma
->curr_x_count
= dma
->x_count
;
208 /* With 2D, last X transfer does not modify curr_addr. */
209 dma
->curr_addr
= dma
->curr_addr
- dma
->x_modify
+ dma
->y_modify
;
214 switch (dma
->config
& DMAFLOW
)
217 HW_TRACE ((me
, "dma is complete"));
218 dma
->irq_status
= (dma
->irq_status
& ~DMA_RUN
) | DMA_DONE
;
221 bfin_dma_process_desc (me
, dma
);
226 static void bfin_dma_hw_event_callback (struct hw
*, void *);
229 bfin_dma_reschedule (struct hw
*me
, unsigned delay
)
231 struct bfin_dma
*dma
= hw_data (me
);
234 hw_event_queue_deschedule (me
, dma
->handler
);
239 HW_TRACE ((me
, "scheduling next process in %u", delay
));
240 dma
->handler
= hw_event_queue_schedule (me
, delay
,
241 bfin_dma_hw_event_callback
, dma
);
244 /* Chew through the DMA over and over. */
246 bfin_dma_hw_event_callback (struct hw
*me
, void *data
)
248 struct bfin_dma
*dma
= data
;
250 struct dv_bfin
*bfin_peer
;
252 unsigned ret
, nr_bytes
, ele_count
;
255 peer
= bfin_dma_get_peer (me
, dma
);
256 bfin_peer
= hw_data (peer
);
258 if (dma
->x_modify
< 0)
259 /* XXX: This sucks performance wise. */
260 nr_bytes
= dma
->ele_size
;
262 nr_bytes
= min (sizeof (buf
), dma
->curr_x_count
* dma
->ele_size
);
264 /* Pumping a chunk! */
265 bfin_peer
->dma_master
= me
;
266 bfin_peer
->acked
= false;
267 if (dma
->config
& WNR
)
269 HW_TRACE ((me
, "dma transfer to 0x%08lx length %u",
270 (unsigned long) dma
->curr_addr
, nr_bytes
));
272 ret
= hw_dma_read_buffer (peer
, buf
, 0, dma
->curr_addr
, nr_bytes
);
273 /* Has the DMA stalled ? abort for now. */
276 /* XXX: How to handle partial DMA transfers ? */
277 if (ret
% dma
->ele_size
)
279 ret
= sim_write (hw_system (me
), dma
->curr_addr
, buf
, ret
);
283 HW_TRACE ((me
, "dma transfer from 0x%08lx length %u",
284 (unsigned long) dma
->curr_addr
, nr_bytes
));
286 ret
= sim_read (hw_system (me
), dma
->curr_addr
, buf
, nr_bytes
);
289 /* XXX: How to handle partial DMA transfers ? */
290 if (ret
% dma
->ele_size
)
292 ret
= hw_dma_write_buffer (peer
, buf
, 0, dma
->curr_addr
, ret
, 0);
297 /* Ignore partial writes. */
298 ele_count
= ret
/ dma
->ele_size
;
299 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
300 dma
->curr_x_count
-= ele_count
;
302 if ((!dma
->acked
&& dma
->curr_x_count
) || bfin_dma_finish_x (me
, dma
))
303 /* Still got work to do, so schedule again. */
305 bfin_dma_reschedule (me
, ret
? 1 : 5000);
310 /* Don't reschedule on errors ... */
311 dma
->irq_status
|= DMA_ERR
;
315 bfin_dma_io_write_buffer (struct hw
*me
, const void *source
, int space
,
316 address_word addr
, unsigned nr_bytes
)
318 struct bfin_dma
*dma
= hw_data (me
);
325 /* Invalid access mode is higher priority than missing register. */
326 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, true))
330 value
= dv_load_4 (source
);
332 value
= dv_load_2 (source
);
334 mmr_off
= addr
% dma
->base
;
335 valuep
= (void *)((uintptr_t)dma
+ mmr_base() + mmr_off
);
341 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
342 But does the HW discard writes or send up IVGHW ? The sim
343 simply discards atm ... */
346 case mmr_offset(next_desc_ptr
):
347 case mmr_offset(start_addr
):
348 case mmr_offset(curr_desc_ptr
):
349 case mmr_offset(curr_addr
):
350 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
351 if (!bfin_dma_running (dma
))
359 HW_TRACE ((me
, "discarding write while dma running"));
361 case mmr_offset(x_count
):
362 case mmr_offset(x_modify
):
363 case mmr_offset(y_count
):
364 case mmr_offset(y_modify
):
365 if (!bfin_dma_running (dma
))
368 case mmr_offset(peripheral_map
):
369 if (!bfin_dma_running (dma
))
371 *value16p
= (*value16p
& CTYPE
) | (value
& ~CTYPE
);
372 /* Clear peripheral peer so it gets looked up again. */
376 HW_TRACE ((me
, "discarding write while dma running"));
378 case mmr_offset(config
):
379 /* XXX: How to handle updating CONFIG of a running channel ? */
385 if (bfin_dma_enabled (dma
))
387 dma
->irq_status
|= DMA_RUN
;
388 bfin_dma_process_desc (me
, dma
);
389 /* The writer is the master. */
390 if (!(dma
->peripheral_map
& CTYPE
) || (dma
->config
& WNR
))
391 bfin_dma_reschedule (me
, 1);
395 dma
->irq_status
&= ~DMA_RUN
;
396 bfin_dma_reschedule (me
, 0);
399 case mmr_offset(irq_status
):
400 dv_w1c_2 (value16p
, value
, DMA_DONE
| DMA_ERR
);
402 case mmr_offset(curr_x_count
):
403 case mmr_offset(curr_y_count
):
404 if (!bfin_dma_running (dma
))
407 HW_TRACE ((me
, "discarding write while dma running"));
410 /* XXX: The HW lets the pad regions be read/written ... */
411 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
419 bfin_dma_io_read_buffer (struct hw
*me
, void *dest
, int space
,
420 address_word addr
, unsigned nr_bytes
)
422 struct bfin_dma
*dma
= hw_data (me
);
428 /* Invalid access mode is higher priority than missing register. */
429 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, false))
432 mmr_off
= addr
% dma
->base
;
433 valuep
= (void *)((uintptr_t)dma
+ mmr_base() + mmr_off
);
439 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
441 dv_store_4 (dest
, *value32p
);
443 dv_store_2 (dest
, *value16p
);
449 bfin_dma_dma_read_buffer (struct hw
*me
, void *dest
, int space
,
450 unsigned_word addr
, unsigned nr_bytes
)
452 struct bfin_dma
*dma
= hw_data (me
);
453 unsigned ret
, ele_count
;
455 HW_TRACE_DMA_READ ();
457 /* If someone is trying to read from me, I have to be enabled. */
458 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
461 /* XXX: handle x_modify ... */
462 ret
= sim_read (hw_system (me
), dma
->curr_addr
, dest
, nr_bytes
);
463 /* Ignore partial writes. */
464 ele_count
= ret
/ dma
->ele_size
;
465 /* Has the DMA stalled ? abort for now. */
469 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
470 dma
->curr_x_count
-= ele_count
;
472 if (dma
->curr_x_count
== 0)
473 bfin_dma_finish_x (me
, dma
);
479 bfin_dma_dma_write_buffer (struct hw
*me
, const void *source
,
480 int space
, unsigned_word addr
,
482 int violate_read_only_section
)
484 struct bfin_dma
*dma
= hw_data (me
);
485 unsigned ret
, ele_count
;
487 HW_TRACE_DMA_WRITE ();
489 /* If someone is trying to write to me, I have to be enabled. */
490 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
493 /* XXX: handle x_modify ... */
494 ret
= sim_write (hw_system (me
), dma
->curr_addr
, source
, nr_bytes
);
495 /* Ignore partial writes. */
496 ele_count
= ret
/ dma
->ele_size
;
497 /* Has the DMA stalled ? abort for now. */
501 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
502 dma
->curr_x_count
-= ele_count
;
504 if (dma
->curr_x_count
== 0)
505 bfin_dma_finish_x (me
, dma
);
510 static const struct hw_port_descriptor bfin_dma_ports
[] =
512 { "di", 0, 0, output_port
, }, /* DMA Interrupt */
517 attach_bfin_dma_regs (struct hw
*me
, struct bfin_dma
*dma
)
519 address_word attach_address
;
521 unsigned attach_size
;
522 reg_property_spec reg
;
524 if (hw_find_property (me
, "reg") == NULL
)
525 hw_abort (me
, "Missing \"reg\" property");
527 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
528 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
530 hw_unit_address_to_attach_address (hw_parent (me
),
532 &attach_space
, &attach_address
, me
);
533 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
535 if (attach_size
!= BFIN_MMR_DMA_SIZE
)
536 hw_abort (me
, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE
);
538 hw_attach_address (hw_parent (me
),
539 0, attach_space
, attach_address
, attach_size
, me
);
541 dma
->base
= attach_address
;
545 bfin_dma_finish (struct hw
*me
)
547 struct bfin_dma
*dma
;
549 dma
= HW_ZALLOC (me
, struct bfin_dma
);
551 set_hw_data (me
, dma
);
552 set_hw_io_read_buffer (me
, bfin_dma_io_read_buffer
);
553 set_hw_io_write_buffer (me
, bfin_dma_io_write_buffer
);
554 set_hw_dma_read_buffer (me
, bfin_dma_dma_read_buffer
);
555 set_hw_dma_write_buffer (me
, bfin_dma_dma_write_buffer
);
556 set_hw_ports (me
, bfin_dma_ports
);
558 attach_bfin_dma_regs (me
, dma
);
560 /* Initialize the DMA Channel. */
561 dma
->peripheral_map
= bfin_dmac_default_pmap (me
);
564 const struct hw_descriptor dv_bfin_dma_descriptor
[] =
566 {"bfin_dma", bfin_dma_finish
,},