1 /* Blackfin Direct Memory Access (DMA) Channel model.
3 Copyright (C) 2010-2019 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "hw-device.h"
26 #include "dv-bfin_dma.h"
27 #include "dv-bfin_dmac.h"
29 /* Note: This DMA implementation requires the producer to be the master when
30 the peer is MDMA. The source is always a slave. This way we don't
31 have the two DMA devices thrashing each other with one trying to
32 write and the other trying to read. */
36 /* This top portion matches common dv_bfin struct. */
38 struct hw
*dma_master
;
41 struct hw_event
*handler
;
45 /* Order after here is important -- matches hardware MMR layout. */
47 struct { bu16 ndpl
, ndph
; };
51 struct { bu16 sal
, sah
; };
54 bu16
BFIN_MMR_16 (config
);
56 bu16
BFIN_MMR_16 (x_count
);
57 bs16
BFIN_MMR_16 (x_modify
);
58 bu16
BFIN_MMR_16 (y_count
);
59 bs16
BFIN_MMR_16 (y_modify
);
60 bu32 curr_desc_ptr
, curr_addr
;
61 bu16
BFIN_MMR_16 (irq_status
);
62 bu16
BFIN_MMR_16 (peripheral_map
);
63 bu16
BFIN_MMR_16 (curr_x_count
);
65 bu16
BFIN_MMR_16 (curr_y_count
);
68 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
69 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
71 static const char * const mmr_names
[] =
73 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
74 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
75 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
77 #define mmr_name(off) mmr_names[(off) / 4]
80 bfin_dma_enabled (struct bfin_dma
*dma
)
82 return (dma
->config
& DMAEN
);
86 bfin_dma_running (struct bfin_dma
*dma
)
88 return (dma
->irq_status
& DMA_RUN
);
92 bfin_dma_get_peer (struct hw
*me
, struct bfin_dma
*dma
)
96 return dma
->hw_peer
= bfin_dmac_get_peer (me
, dma
->peripheral_map
);
100 bfin_dma_process_desc (struct hw
*me
, struct bfin_dma
*dma
)
102 bu8 ndsize
= (dma
->config
& NDSIZE
) >> NDSIZE_SHIFT
;
103 bu16 _flows
[9], *flows
= _flows
;
105 HW_TRACE ((me
, "dma starting up %#x", dma
->config
));
107 switch (dma
->config
& WDSIZE
)
120 /* Address has to be mutiple of transfer size. */
121 if (dma
->start_addr
& (dma
->ele_size
- 1))
122 dma
->irq_status
|= DMA_ERR
;
124 if (dma
->ele_size
!= (unsigned) abs (dma
->x_modify
))
125 hw_abort (me
, "DMA config (striding) %#x not supported (x_modify: %d)",
126 dma
->config
, dma
->x_modify
);
128 switch (dma
->config
& DMAFLOW
)
133 hw_abort (me
, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
136 if (ndsize
== 0 || ndsize
> 7)
137 hw_abort (me
, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
138 sim_read (hw_system (me
), dma
->curr_desc_ptr
, (void *)flows
, ndsize
* 2);
141 if (ndsize
== 0 || ndsize
> 8)
142 hw_abort (me
, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
143 sim_read (hw_system (me
), dma
->next_desc_ptr
, (void *)flows
, ndsize
* 2);
146 if (ndsize
== 0 || ndsize
> 9)
147 hw_abort (me
, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
148 sim_read (hw_system (me
), dma
->next_desc_ptr
, (void *)flows
, ndsize
* 2);
151 hw_abort (me
, "DMA config error: invalid DMAFLOW %#x", dma
->config
);
162 (void *) &dma
->x_modify
,
164 (void *) &dma
->y_modify
,
167 switch (dma
->config
& DMAFLOW
)
170 dma
->ndph
= _flows
[1];
174 dma
->ndpl
= _flows
[0];
180 for (idx
= 0; idx
< ndsize
; ++idx
)
181 *stores
[idx
] = flows
[idx
];
184 dma
->curr_desc_ptr
= dma
->next_desc_ptr
;
185 dma
->curr_addr
= dma
->start_addr
;
186 dma
->curr_x_count
= dma
->x_count
? : 0xffff;
187 dma
->curr_y_count
= dma
->y_count
? : 0xffff;
191 bfin_dma_finish_x (struct hw
*me
, struct bfin_dma
*dma
)
193 /* XXX: This would be the time to process the next descriptor. */
194 /* XXX: Should this toggle Enable in dma->config ? */
196 if (dma
->config
& DI_EN
)
197 hw_port_event (me
, 0, 1);
199 if ((dma
->config
& DMA2D
) && dma
->curr_y_count
> 1)
201 dma
->curr_y_count
-= 1;
202 dma
->curr_x_count
= dma
->x_count
;
204 /* With 2D, last X transfer does not modify curr_addr. */
205 dma
->curr_addr
= dma
->curr_addr
- dma
->x_modify
+ dma
->y_modify
;
210 switch (dma
->config
& DMAFLOW
)
213 HW_TRACE ((me
, "dma is complete"));
214 dma
->irq_status
= (dma
->irq_status
& ~DMA_RUN
) | DMA_DONE
;
217 bfin_dma_process_desc (me
, dma
);
222 static void bfin_dma_hw_event_callback (struct hw
*, void *);
225 bfin_dma_reschedule (struct hw
*me
, unsigned delay
)
227 struct bfin_dma
*dma
= hw_data (me
);
230 hw_event_queue_deschedule (me
, dma
->handler
);
235 HW_TRACE ((me
, "scheduling next process in %u", delay
));
236 dma
->handler
= hw_event_queue_schedule (me
, delay
,
237 bfin_dma_hw_event_callback
, dma
);
240 /* Chew through the DMA over and over. */
242 bfin_dma_hw_event_callback (struct hw
*me
, void *data
)
244 struct bfin_dma
*dma
= data
;
246 struct dv_bfin
*bfin_peer
;
248 unsigned ret
, nr_bytes
, ele_count
;
251 peer
= bfin_dma_get_peer (me
, dma
);
252 bfin_peer
= hw_data (peer
);
254 if (dma
->x_modify
< 0)
255 /* XXX: This sucks performance wise. */
256 nr_bytes
= dma
->ele_size
;
258 nr_bytes
= min (sizeof (buf
), dma
->curr_x_count
* dma
->ele_size
);
260 /* Pumping a chunk! */
261 bfin_peer
->dma_master
= me
;
262 bfin_peer
->acked
= false;
263 if (dma
->config
& WNR
)
265 HW_TRACE ((me
, "dma transfer to 0x%08lx length %u",
266 (unsigned long) dma
->curr_addr
, nr_bytes
));
268 ret
= hw_dma_read_buffer (peer
, buf
, 0, dma
->curr_addr
, nr_bytes
);
269 /* Has the DMA stalled ? abort for now. */
272 /* XXX: How to handle partial DMA transfers ? */
273 if (ret
% dma
->ele_size
)
275 ret
= sim_write (hw_system (me
), dma
->curr_addr
, buf
, ret
);
279 HW_TRACE ((me
, "dma transfer from 0x%08lx length %u",
280 (unsigned long) dma
->curr_addr
, nr_bytes
));
282 ret
= sim_read (hw_system (me
), dma
->curr_addr
, buf
, nr_bytes
);
285 /* XXX: How to handle partial DMA transfers ? */
286 if (ret
% dma
->ele_size
)
288 ret
= hw_dma_write_buffer (peer
, buf
, 0, dma
->curr_addr
, ret
, 0);
293 /* Ignore partial writes. */
294 ele_count
= ret
/ dma
->ele_size
;
295 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
296 dma
->curr_x_count
-= ele_count
;
298 if ((!dma
->acked
&& dma
->curr_x_count
) || bfin_dma_finish_x (me
, dma
))
299 /* Still got work to do, so schedule again. */
301 bfin_dma_reschedule (me
, ret
? 1 : 5000);
306 /* Don't reschedule on errors ... */
307 dma
->irq_status
|= DMA_ERR
;
311 bfin_dma_io_write_buffer (struct hw
*me
, const void *source
, int space
,
312 address_word addr
, unsigned nr_bytes
)
314 struct bfin_dma
*dma
= hw_data (me
);
321 /* Invalid access mode is higher priority than missing register. */
322 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, true))
326 value
= dv_load_4 (source
);
328 value
= dv_load_2 (source
);
330 mmr_off
= addr
% dma
->base
;
331 valuep
= (void *)((unsigned long)dma
+ mmr_base() + mmr_off
);
337 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
338 But does the HW discard writes or send up IVGHW ? The sim
339 simply discards atm ... */
342 case mmr_offset(next_desc_ptr
):
343 case mmr_offset(start_addr
):
344 case mmr_offset(curr_desc_ptr
):
345 case mmr_offset(curr_addr
):
346 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
347 if (!bfin_dma_running (dma
))
355 HW_TRACE ((me
, "discarding write while dma running"));
357 case mmr_offset(x_count
):
358 case mmr_offset(x_modify
):
359 case mmr_offset(y_count
):
360 case mmr_offset(y_modify
):
361 if (!bfin_dma_running (dma
))
364 case mmr_offset(peripheral_map
):
365 if (!bfin_dma_running (dma
))
367 *value16p
= (*value16p
& CTYPE
) | (value
& ~CTYPE
);
368 /* Clear peripheral peer so it gets looked up again. */
372 HW_TRACE ((me
, "discarding write while dma running"));
374 case mmr_offset(config
):
375 /* XXX: How to handle updating CONFIG of a running channel ? */
381 if (bfin_dma_enabled (dma
))
383 dma
->irq_status
|= DMA_RUN
;
384 bfin_dma_process_desc (me
, dma
);
385 /* The writer is the master. */
386 if (!(dma
->peripheral_map
& CTYPE
) || (dma
->config
& WNR
))
387 bfin_dma_reschedule (me
, 1);
391 dma
->irq_status
&= ~DMA_RUN
;
392 bfin_dma_reschedule (me
, 0);
395 case mmr_offset(irq_status
):
396 dv_w1c_2 (value16p
, value
, DMA_DONE
| DMA_ERR
);
398 case mmr_offset(curr_x_count
):
399 case mmr_offset(curr_y_count
):
400 if (!bfin_dma_running (dma
))
403 HW_TRACE ((me
, "discarding write while dma running"));
406 /* XXX: The HW lets the pad regions be read/written ... */
407 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
415 bfin_dma_io_read_buffer (struct hw
*me
, void *dest
, int space
,
416 address_word addr
, unsigned nr_bytes
)
418 struct bfin_dma
*dma
= hw_data (me
);
424 /* Invalid access mode is higher priority than missing register. */
425 if (!dv_bfin_mmr_require_16_32 (me
, addr
, nr_bytes
, false))
428 mmr_off
= addr
% dma
->base
;
429 valuep
= (void *)((unsigned long)dma
+ mmr_base() + mmr_off
);
435 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
437 dv_store_4 (dest
, *value32p
);
439 dv_store_2 (dest
, *value16p
);
445 bfin_dma_dma_read_buffer (struct hw
*me
, void *dest
, int space
,
446 unsigned_word addr
, unsigned nr_bytes
)
448 struct bfin_dma
*dma
= hw_data (me
);
449 unsigned ret
, ele_count
;
451 HW_TRACE_DMA_READ ();
453 /* If someone is trying to read from me, I have to be enabled. */
454 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
457 /* XXX: handle x_modify ... */
458 ret
= sim_read (hw_system (me
), dma
->curr_addr
, dest
, nr_bytes
);
459 /* Ignore partial writes. */
460 ele_count
= ret
/ dma
->ele_size
;
461 /* Has the DMA stalled ? abort for now. */
465 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
466 dma
->curr_x_count
-= ele_count
;
468 if (dma
->curr_x_count
== 0)
469 bfin_dma_finish_x (me
, dma
);
475 bfin_dma_dma_write_buffer (struct hw
*me
, const void *source
,
476 int space
, unsigned_word addr
,
478 int violate_read_only_section
)
480 struct bfin_dma
*dma
= hw_data (me
);
481 unsigned ret
, ele_count
;
483 HW_TRACE_DMA_WRITE ();
485 /* If someone is trying to write to me, I have to be enabled. */
486 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
489 /* XXX: handle x_modify ... */
490 ret
= sim_write (hw_system (me
), dma
->curr_addr
, source
, nr_bytes
);
491 /* Ignore partial writes. */
492 ele_count
= ret
/ dma
->ele_size
;
493 /* Has the DMA stalled ? abort for now. */
497 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
498 dma
->curr_x_count
-= ele_count
;
500 if (dma
->curr_x_count
== 0)
501 bfin_dma_finish_x (me
, dma
);
506 static const struct hw_port_descriptor bfin_dma_ports
[] =
508 { "di", 0, 0, output_port
, }, /* DMA Interrupt */
513 attach_bfin_dma_regs (struct hw
*me
, struct bfin_dma
*dma
)
515 address_word attach_address
;
517 unsigned attach_size
;
518 reg_property_spec reg
;
520 if (hw_find_property (me
, "reg") == NULL
)
521 hw_abort (me
, "Missing \"reg\" property");
523 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
524 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
526 hw_unit_address_to_attach_address (hw_parent (me
),
528 &attach_space
, &attach_address
, me
);
529 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
531 if (attach_size
!= BFIN_MMR_DMA_SIZE
)
532 hw_abort (me
, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE
);
534 hw_attach_address (hw_parent (me
),
535 0, attach_space
, attach_address
, attach_size
, me
);
537 dma
->base
= attach_address
;
541 bfin_dma_finish (struct hw
*me
)
543 struct bfin_dma
*dma
;
545 dma
= HW_ZALLOC (me
, struct bfin_dma
);
547 set_hw_data (me
, dma
);
548 set_hw_io_read_buffer (me
, bfin_dma_io_read_buffer
);
549 set_hw_io_write_buffer (me
, bfin_dma_io_write_buffer
);
550 set_hw_dma_read_buffer (me
, bfin_dma_dma_read_buffer
);
551 set_hw_dma_write_buffer (me
, bfin_dma_dma_write_buffer
);
552 set_hw_ports (me
, bfin_dma_ports
);
554 attach_bfin_dma_regs (me
, dma
);
556 /* Initialize the DMA Channel. */
557 dma
->peripheral_map
= bfin_dmac_default_pmap (me
);
560 const struct hw_descriptor dv_bfin_dma_descriptor
[] =
562 {"bfin_dma", bfin_dma_finish
,},