1 /* Blackfin Direct Memory Access (DMA) Channel model.
3 Copyright (C) 2010-2015 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "hw-device.h"
26 #include "dv-bfin_dma.h"
27 #include "dv-bfin_dmac.h"
29 /* Note: This DMA implementation requires the producer to be the master when
30 the peer is MDMA. The source is always a slave. This way we don't
31 have the two DMA devices thrashing each other with one trying to
32 write and the other trying to read. */
36 /* This top portion matches common dv_bfin struct. */
38 struct hw
*dma_master
;
41 struct hw_event
*handler
;
45 /* Order after here is important -- matches hardware MMR layout. */
47 struct { bu16 ndpl
, ndph
; };
51 struct { bu16 sal
, sah
; };
54 bu16
BFIN_MMR_16 (config
);
56 bu16
BFIN_MMR_16 (x_count
);
57 bs16
BFIN_MMR_16 (x_modify
);
58 bu16
BFIN_MMR_16 (y_count
);
59 bs16
BFIN_MMR_16 (y_modify
);
60 bu32 curr_desc_ptr
, curr_addr
;
61 bu16
BFIN_MMR_16 (irq_status
);
62 bu16
BFIN_MMR_16 (peripheral_map
);
63 bu16
BFIN_MMR_16 (curr_x_count
);
65 bu16
BFIN_MMR_16 (curr_y_count
);
68 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
69 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
71 static const char * const mmr_names
[] =
73 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
74 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
75 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
77 #define mmr_name(off) mmr_names[(off) / 4]
80 bfin_dma_enabled (struct bfin_dma
*dma
)
82 return (dma
->config
& DMAEN
);
86 bfin_dma_running (struct bfin_dma
*dma
)
88 return (dma
->irq_status
& DMA_RUN
);
92 bfin_dma_get_peer (struct hw
*me
, struct bfin_dma
*dma
)
96 return dma
->hw_peer
= bfin_dmac_get_peer (me
, dma
->peripheral_map
);
100 bfin_dma_process_desc (struct hw
*me
, struct bfin_dma
*dma
)
102 bu8 ndsize
= (dma
->config
& NDSIZE
) >> NDSIZE_SHIFT
;
103 bu16 _flows
[9], *flows
= _flows
;
105 HW_TRACE ((me
, "dma starting up %#x", dma
->config
));
107 switch (dma
->config
& WDSIZE
)
120 /* Address has to be mutiple of transfer size. */
121 if (dma
->start_addr
& (dma
->ele_size
- 1))
122 dma
->irq_status
|= DMA_ERR
;
124 if (dma
->ele_size
!= (unsigned) abs (dma
->x_modify
))
125 hw_abort (me
, "DMA config (striding) %#x not supported (x_modify: %d)",
126 dma
->config
, dma
->x_modify
);
128 switch (dma
->config
& DMAFLOW
)
133 hw_abort (me
, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
136 if (ndsize
== 0 || ndsize
> 7)
137 hw_abort (me
, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
138 sim_read (hw_system (me
), dma
->curr_desc_ptr
, (void *)flows
, ndsize
* 2);
141 if (ndsize
== 0 || ndsize
> 8)
142 hw_abort (me
, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
143 sim_read (hw_system (me
), dma
->next_desc_ptr
, (void *)flows
, ndsize
* 2);
146 if (ndsize
== 0 || ndsize
> 9)
147 hw_abort (me
, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
148 sim_read (hw_system (me
), dma
->next_desc_ptr
, (void *)flows
, ndsize
* 2);
151 hw_abort (me
, "DMA config error: invalid DMAFLOW %#x", dma
->config
);
162 (void *) &dma
->x_modify
,
164 (void *) &dma
->y_modify
,
167 switch (dma
->config
& DMAFLOW
)
170 dma
->ndph
= _flows
[1];
174 dma
->ndpl
= _flows
[0];
180 for (idx
= 0; idx
< ndsize
; ++idx
)
181 *stores
[idx
] = flows
[idx
];
184 dma
->curr_desc_ptr
= dma
->next_desc_ptr
;
185 dma
->curr_addr
= dma
->start_addr
;
186 dma
->curr_x_count
= dma
->x_count
? : 0xffff;
187 dma
->curr_y_count
= dma
->y_count
? : 0xffff;
191 bfin_dma_finish_x (struct hw
*me
, struct bfin_dma
*dma
)
193 /* XXX: This would be the time to process the next descriptor. */
194 /* XXX: Should this toggle Enable in dma->config ? */
196 if (dma
->config
& DI_EN
)
197 hw_port_event (me
, 0, 1);
199 if ((dma
->config
& DMA2D
) && dma
->curr_y_count
> 1)
201 dma
->curr_y_count
-= 1;
202 dma
->curr_x_count
= dma
->x_count
;
204 /* With 2D, last X transfer does not modify curr_addr. */
205 dma
->curr_addr
= dma
->curr_addr
- dma
->x_modify
+ dma
->y_modify
;
210 switch (dma
->config
& DMAFLOW
)
213 HW_TRACE ((me
, "dma is complete"));
214 dma
->irq_status
= (dma
->irq_status
& ~DMA_RUN
) | DMA_DONE
;
217 bfin_dma_process_desc (me
, dma
);
222 static void bfin_dma_hw_event_callback (struct hw
*, void *);
225 bfin_dma_reschedule (struct hw
*me
, unsigned delay
)
227 struct bfin_dma
*dma
= hw_data (me
);
230 hw_event_queue_deschedule (me
, dma
->handler
);
235 HW_TRACE ((me
, "scheduling next process in %u", delay
));
236 dma
->handler
= hw_event_queue_schedule (me
, delay
,
237 bfin_dma_hw_event_callback
, dma
);
240 /* Chew through the DMA over and over. */
242 bfin_dma_hw_event_callback (struct hw
*me
, void *data
)
244 struct bfin_dma
*dma
= data
;
246 struct dv_bfin
*bfin_peer
;
248 unsigned ret
, nr_bytes
, ele_count
;
251 peer
= bfin_dma_get_peer (me
, dma
);
252 bfin_peer
= hw_data (peer
);
254 if (dma
->x_modify
< 0)
255 /* XXX: This sucks performance wise. */
256 nr_bytes
= dma
->ele_size
;
258 nr_bytes
= MIN (sizeof (buf
), dma
->curr_x_count
* dma
->ele_size
);
260 /* Pumping a chunk! */
261 bfin_peer
->dma_master
= me
;
262 bfin_peer
->acked
= false;
263 if (dma
->config
& WNR
)
265 HW_TRACE ((me
, "dma transfer to 0x%08lx length %u",
266 (unsigned long) dma
->curr_addr
, nr_bytes
));
268 ret
= hw_dma_read_buffer (peer
, buf
, 0, dma
->curr_addr
, nr_bytes
);
269 /* Has the DMA stalled ? abort for now. */
272 /* XXX: How to handle partial DMA transfers ? */
273 if (ret
% dma
->ele_size
)
275 ret
= sim_write (hw_system (me
), dma
->curr_addr
, buf
, ret
);
279 HW_TRACE ((me
, "dma transfer from 0x%08lx length %u",
280 (unsigned long) dma
->curr_addr
, nr_bytes
));
282 ret
= sim_read (hw_system (me
), dma
->curr_addr
, buf
, nr_bytes
);
285 /* XXX: How to handle partial DMA transfers ? */
286 if (ret
% dma
->ele_size
)
288 ret
= hw_dma_write_buffer (peer
, buf
, 0, dma
->curr_addr
, ret
, 0);
293 /* Ignore partial writes. */
294 ele_count
= ret
/ dma
->ele_size
;
295 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
296 dma
->curr_x_count
-= ele_count
;
298 if ((!dma
->acked
&& dma
->curr_x_count
) || bfin_dma_finish_x (me
, dma
))
299 /* Still got work to do, so schedule again. */
301 bfin_dma_reschedule (me
, ret
? 1 : 5000);
306 /* Don't reschedule on errors ... */
307 dma
->irq_status
|= DMA_ERR
;
311 bfin_dma_io_write_buffer (struct hw
*me
, const void *source
, int space
,
312 address_word addr
, unsigned nr_bytes
)
314 struct bfin_dma
*dma
= hw_data (me
);
322 value
= dv_load_4 (source
);
324 value
= dv_load_2 (source
);
326 mmr_off
= addr
% dma
->base
;
327 valuep
= (void *)((unsigned long)dma
+ mmr_base() + mmr_off
);
333 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
334 But does the HW discard writes or send up IVGHW ? The sim
335 simply discards atm ... */
338 case mmr_offset(next_desc_ptr
):
339 case mmr_offset(start_addr
):
340 case mmr_offset(curr_desc_ptr
):
341 case mmr_offset(curr_addr
):
342 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
343 if (!bfin_dma_running (dma
))
351 HW_TRACE ((me
, "discarding write while dma running"));
353 case mmr_offset(x_count
):
354 case mmr_offset(x_modify
):
355 case mmr_offset(y_count
):
356 case mmr_offset(y_modify
):
357 if (!bfin_dma_running (dma
))
360 case mmr_offset(peripheral_map
):
361 if (!bfin_dma_running (dma
))
363 *value16p
= (*value16p
& CTYPE
) | (value
& ~CTYPE
);
364 /* Clear peripheral peer so it gets looked up again. */
368 HW_TRACE ((me
, "discarding write while dma running"));
370 case mmr_offset(config
):
371 /* XXX: How to handle updating CONFIG of a running channel ? */
377 if (bfin_dma_enabled (dma
))
379 dma
->irq_status
|= DMA_RUN
;
380 bfin_dma_process_desc (me
, dma
);
381 /* The writer is the master. */
382 if (!(dma
->peripheral_map
& CTYPE
) || (dma
->config
& WNR
))
383 bfin_dma_reschedule (me
, 1);
387 dma
->irq_status
&= ~DMA_RUN
;
388 bfin_dma_reschedule (me
, 0);
391 case mmr_offset(irq_status
):
392 dv_w1c_2 (value16p
, value
, DMA_DONE
| DMA_ERR
);
394 case mmr_offset(curr_x_count
):
395 case mmr_offset(curr_y_count
):
396 if (!bfin_dma_running (dma
))
399 HW_TRACE ((me
, "discarding write while dma running"));
402 /* XXX: The HW lets the pad regions be read/written ... */
403 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
411 bfin_dma_io_read_buffer (struct hw
*me
, void *dest
, int space
,
412 address_word addr
, unsigned nr_bytes
)
414 struct bfin_dma
*dma
= hw_data (me
);
420 mmr_off
= addr
% dma
->base
;
421 valuep
= (void *)((unsigned long)dma
+ mmr_base() + mmr_off
);
427 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
429 dv_store_4 (dest
, *value32p
);
431 dv_store_2 (dest
, *value16p
);
437 bfin_dma_dma_read_buffer (struct hw
*me
, void *dest
, int space
,
438 unsigned_word addr
, unsigned nr_bytes
)
440 struct bfin_dma
*dma
= hw_data (me
);
441 unsigned ret
, ele_count
;
443 HW_TRACE_DMA_READ ();
445 /* If someone is trying to read from me, I have to be enabled. */
446 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
449 /* XXX: handle x_modify ... */
450 ret
= sim_read (hw_system (me
), dma
->curr_addr
, dest
, nr_bytes
);
451 /* Ignore partial writes. */
452 ele_count
= ret
/ dma
->ele_size
;
453 /* Has the DMA stalled ? abort for now. */
457 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
458 dma
->curr_x_count
-= ele_count
;
460 if (dma
->curr_x_count
== 0)
461 bfin_dma_finish_x (me
, dma
);
467 bfin_dma_dma_write_buffer (struct hw
*me
, const void *source
,
468 int space
, unsigned_word addr
,
470 int violate_read_only_section
)
472 struct bfin_dma
*dma
= hw_data (me
);
473 unsigned ret
, ele_count
;
475 HW_TRACE_DMA_WRITE ();
477 /* If someone is trying to write to me, I have to be enabled. */
478 if (!bfin_dma_enabled (dma
) && !bfin_dma_running (dma
))
481 /* XXX: handle x_modify ... */
482 ret
= sim_write (hw_system (me
), dma
->curr_addr
, source
, nr_bytes
);
483 /* Ignore partial writes. */
484 ele_count
= ret
/ dma
->ele_size
;
485 /* Has the DMA stalled ? abort for now. */
489 dma
->curr_addr
+= ele_count
* dma
->x_modify
;
490 dma
->curr_x_count
-= ele_count
;
492 if (dma
->curr_x_count
== 0)
493 bfin_dma_finish_x (me
, dma
);
498 static const struct hw_port_descriptor bfin_dma_ports
[] =
500 { "di", 0, 0, output_port
, }, /* DMA Interrupt */
505 attach_bfin_dma_regs (struct hw
*me
, struct bfin_dma
*dma
)
507 address_word attach_address
;
509 unsigned attach_size
;
510 reg_property_spec reg
;
512 if (hw_find_property (me
, "reg") == NULL
)
513 hw_abort (me
, "Missing \"reg\" property");
515 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
516 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
518 hw_unit_address_to_attach_address (hw_parent (me
),
520 &attach_space
, &attach_address
, me
);
521 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
523 if (attach_size
!= BFIN_MMR_DMA_SIZE
)
524 hw_abort (me
, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE
);
526 hw_attach_address (hw_parent (me
),
527 0, attach_space
, attach_address
, attach_size
, me
);
529 dma
->base
= attach_address
;
533 bfin_dma_finish (struct hw
*me
)
535 struct bfin_dma
*dma
;
537 dma
= HW_ZALLOC (me
, struct bfin_dma
);
539 set_hw_data (me
, dma
);
540 set_hw_io_read_buffer (me
, bfin_dma_io_read_buffer
);
541 set_hw_io_write_buffer (me
, bfin_dma_io_write_buffer
);
542 set_hw_dma_read_buffer (me
, bfin_dma_dma_read_buffer
);
543 set_hw_dma_write_buffer (me
, bfin_dma_dma_write_buffer
);
544 set_hw_ports (me
, bfin_dma_ports
);
546 attach_bfin_dma_regs (me
, dma
);
548 /* Initialize the DMA Channel. */
549 dma
->peripheral_map
= bfin_dmac_default_pmap (me
);
552 const struct hw_descriptor dv_bfin_dma_descriptor
[] =
554 {"bfin_dma", bfin_dma_finish
,},