1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
5 * Copyright (C) The Asahi Linux Contributors
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/of_dma.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
20 #include "dmaengine.h"
22 #define NCHANNELS_MAX 64
23 #define IRQ_NOUTPUTS 4
26 * For allocation purposes we split the cache
27 * memory into blocks of fixed size (given in bytes).
29 #define SRAM_BLOCK 2048
31 #define RING_WRITE_SLOT GENMASK(1, 0)
32 #define RING_READ_SLOT GENMASK(5, 4)
33 #define RING_FULL BIT(9)
34 #define RING_EMPTY BIT(8)
35 #define RING_ERR BIT(10)
37 #define STATUS_DESC_DONE BIT(0)
38 #define STATUS_ERR BIT(6)
40 #define FLAG_DESC_NOTIFY BIT(16)
42 #define REG_TX_START 0x0000
43 #define REG_TX_STOP 0x0004
44 #define REG_RX_START 0x0008
45 #define REG_RX_STOP 0x000c
46 #define REG_IMPRINT 0x0090
47 #define REG_TX_SRAM_SIZE 0x0094
48 #define REG_RX_SRAM_SIZE 0x0098
50 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
51 #define REG_CHAN_CTL_RST_RINGS BIT(0)
53 #define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
54 #define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
56 #define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
58 #define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
60 #define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
61 #define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
62 #define BUS_WIDTH_8BIT 0x00
63 #define BUS_WIDTH_16BIT 0x01
64 #define BUS_WIDTH_32BIT 0x02
65 #define BUS_WIDTH_FRAME_2_WORDS 0x10
66 #define BUS_WIDTH_FRAME_4_WORDS 0x20
68 #define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200)
69 #define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16)
70 #define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0)
72 #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
73 #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
74 #define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
76 #define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
77 #define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
79 #define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
80 #define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
81 #define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
82 #define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
83 #define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
90 struct admac_data
*host
;
92 struct tasklet_struct tasklet
;
97 struct admac_tx
*current_tx
;
101 * We maintain a 'submitted' and 'issued' list mainly for interface
102 * correctness. Typical use of the driver (per channel) will be
103 * prepping, submitting and issuing a single cyclic transaction which
104 * will stay current until terminate_all is called.
106 struct list_head submitted
;
107 struct list_head issued
;
109 struct list_head to_free
;
115 * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than
116 * 64K and a 32-bit bitfield over 2K blocks covers it.
122 struct dma_device dma
;
125 struct reset_control
*rstc
;
127 struct mutex cache_alloc_lock
;
128 struct admac_sram txcache
, rxcache
;
133 struct admac_chan channels
[] __counted_by(nchannels
);
137 struct dma_async_tx_descriptor tx
;
144 size_t submitted_pos
;
145 size_t reclaimed_pos
;
147 struct list_head node
;
150 static int admac_alloc_sram_carveout(struct admac_data
*ad
,
151 enum dma_transfer_direction dir
,
154 struct admac_sram
*sram
;
155 int i
, ret
= 0, nblocks
;
157 if (dir
== DMA_MEM_TO_DEV
)
162 mutex_lock(&ad
->cache_alloc_lock
);
164 nblocks
= sram
->size
/ SRAM_BLOCK
;
165 for (i
= 0; i
< nblocks
; i
++)
166 if (!(sram
->allocated
& BIT(i
)))
170 *out
= FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE
, i
* SRAM_BLOCK
) |
171 FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE
, SRAM_BLOCK
);
172 sram
->allocated
|= BIT(i
);
177 mutex_unlock(&ad
->cache_alloc_lock
);
182 static void admac_free_sram_carveout(struct admac_data
*ad
,
183 enum dma_transfer_direction dir
,
186 struct admac_sram
*sram
;
187 u32 base
= FIELD_GET(CHAN_SRAM_CARVEOUT_BASE
, carveout
);
190 if (dir
== DMA_MEM_TO_DEV
)
195 if (WARN_ON(base
>= sram
->size
))
198 mutex_lock(&ad
->cache_alloc_lock
);
199 i
= base
/ SRAM_BLOCK
;
200 sram
->allocated
&= ~BIT(i
);
201 mutex_unlock(&ad
->cache_alloc_lock
);
204 static void admac_modify(struct admac_data
*ad
, int reg
, u32 mask
, u32 val
)
206 void __iomem
*addr
= ad
->base
+ reg
;
207 u32 curr
= readl_relaxed(addr
);
209 writel_relaxed((curr
& ~mask
) | (val
& mask
), addr
);
212 static struct admac_chan
*to_admac_chan(struct dma_chan
*chan
)
214 return container_of(chan
, struct admac_chan
, chan
);
217 static struct admac_tx
*to_admac_tx(struct dma_async_tx_descriptor
*tx
)
219 return container_of(tx
, struct admac_tx
, tx
);
222 static enum dma_transfer_direction
admac_chan_direction(int channo
)
224 /* Channel directions are hardwired */
225 return (channo
& 1) ? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
228 static dma_cookie_t
admac_tx_submit(struct dma_async_tx_descriptor
*tx
)
230 struct admac_tx
*adtx
= to_admac_tx(tx
);
231 struct admac_chan
*adchan
= to_admac_chan(tx
->chan
);
235 spin_lock_irqsave(&adchan
->lock
, flags
);
236 cookie
= dma_cookie_assign(tx
);
237 list_add_tail(&adtx
->node
, &adchan
->submitted
);
238 spin_unlock_irqrestore(&adchan
->lock
, flags
);
243 static int admac_desc_free(struct dma_async_tx_descriptor
*tx
)
245 kfree(to_admac_tx(tx
));
250 static struct dma_async_tx_descriptor
*admac_prep_dma_cyclic(
251 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
252 size_t period_len
, enum dma_transfer_direction direction
,
255 struct admac_chan
*adchan
= container_of(chan
, struct admac_chan
, chan
);
256 struct admac_tx
*adtx
;
258 if (direction
!= admac_chan_direction(adchan
->no
))
261 adtx
= kzalloc(sizeof(*adtx
), GFP_NOWAIT
);
267 adtx
->buf_addr
= buf_addr
;
268 adtx
->buf_len
= buf_len
;
269 adtx
->buf_end
= buf_addr
+ buf_len
;
270 adtx
->period_len
= period_len
;
272 adtx
->submitted_pos
= 0;
273 adtx
->reclaimed_pos
= 0;
275 dma_async_tx_descriptor_init(&adtx
->tx
, chan
);
276 adtx
->tx
.tx_submit
= admac_tx_submit
;
277 adtx
->tx
.desc_free
= admac_desc_free
;
283 * Write one hardware descriptor for a dmaengine cyclic transaction.
285 static void admac_cyclic_write_one_desc(struct admac_data
*ad
, int channo
,
290 addr
= tx
->buf_addr
+ (tx
->submitted_pos
% tx
->buf_len
);
292 /* If happens means we have buggy code */
293 WARN_ON_ONCE(addr
+ tx
->period_len
> tx
->buf_end
);
295 dev_dbg(ad
->dev
, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
296 channo
, &addr
, tx
->period_len
, FLAG_DESC_NOTIFY
);
298 writel_relaxed(lower_32_bits(addr
), ad
->base
+ REG_DESC_WRITE(channo
));
299 writel_relaxed(upper_32_bits(addr
), ad
->base
+ REG_DESC_WRITE(channo
));
300 writel_relaxed(tx
->period_len
, ad
->base
+ REG_DESC_WRITE(channo
));
301 writel_relaxed(FLAG_DESC_NOTIFY
, ad
->base
+ REG_DESC_WRITE(channo
));
303 tx
->submitted_pos
+= tx
->period_len
;
304 tx
->submitted_pos
%= 2 * tx
->buf_len
;
308 * Write all the hardware descriptors for a dmaengine cyclic
309 * transaction there is space for.
311 static void admac_cyclic_write_desc(struct admac_data
*ad
, int channo
,
316 for (i
= 0; i
< 4; i
++) {
317 if (readl_relaxed(ad
->base
+ REG_DESC_RING(channo
)) & RING_FULL
)
319 admac_cyclic_write_one_desc(ad
, channo
, tx
);
323 static int admac_ring_noccupied_slots(int ringval
)
325 int wrslot
= FIELD_GET(RING_WRITE_SLOT
, ringval
);
326 int rdslot
= FIELD_GET(RING_READ_SLOT
, ringval
);
328 if (wrslot
!= rdslot
) {
329 return (wrslot
+ 4 - rdslot
) % 4;
331 WARN_ON((ringval
& (RING_FULL
| RING_EMPTY
)) == 0);
333 if (ringval
& RING_FULL
)
341 * Read from hardware the residue of a cyclic dmaengine transaction.
343 static u32
admac_cyclic_read_residue(struct admac_data
*ad
, int channo
,
344 struct admac_tx
*adtx
)
347 u32 residue1
, residue2
;
351 ring1
= readl_relaxed(ad
->base
+ REG_REPORT_RING(channo
));
352 residue1
= readl_relaxed(ad
->base
+ REG_RESIDUE(channo
));
353 ring2
= readl_relaxed(ad
->base
+ REG_REPORT_RING(channo
));
354 residue2
= readl_relaxed(ad
->base
+ REG_RESIDUE(channo
));
356 if (residue2
> residue1
) {
358 * Controller must have loaded next descriptor between
359 * the two residue reads
361 nreports
= admac_ring_noccupied_slots(ring1
) + 1;
363 /* No descriptor load between the two reads, ring2 is safe to use */
364 nreports
= admac_ring_noccupied_slots(ring2
);
367 pos
= adtx
->reclaimed_pos
+ adtx
->period_len
* (nreports
+ 1) - residue2
;
369 return adtx
->buf_len
- pos
% adtx
->buf_len
;
372 static enum dma_status
admac_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
373 struct dma_tx_state
*txstate
)
375 struct admac_chan
*adchan
= to_admac_chan(chan
);
376 struct admac_data
*ad
= adchan
->host
;
377 struct admac_tx
*adtx
;
383 ret
= dma_cookie_status(chan
, cookie
, txstate
);
384 if (ret
== DMA_COMPLETE
|| !txstate
)
387 spin_lock_irqsave(&adchan
->lock
, flags
);
388 adtx
= adchan
->current_tx
;
390 if (adtx
&& adtx
->tx
.cookie
== cookie
) {
391 ret
= DMA_IN_PROGRESS
;
392 residue
= admac_cyclic_read_residue(ad
, adchan
->no
, adtx
);
394 ret
= DMA_IN_PROGRESS
;
396 list_for_each_entry(adtx
, &adchan
->issued
, node
) {
397 if (adtx
->tx
.cookie
== cookie
) {
398 residue
= adtx
->buf_len
;
403 spin_unlock_irqrestore(&adchan
->lock
, flags
);
405 dma_set_residue(txstate
, residue
);
409 static void admac_start_chan(struct admac_chan
*adchan
)
411 struct admac_data
*ad
= adchan
->host
;
412 u32 startbit
= 1 << (adchan
->no
/ 2);
414 writel_relaxed(STATUS_DESC_DONE
| STATUS_ERR
,
415 ad
->base
+ REG_CHAN_INTSTATUS(adchan
->no
, ad
->irq_index
));
416 writel_relaxed(STATUS_DESC_DONE
| STATUS_ERR
,
417 ad
->base
+ REG_CHAN_INTMASK(adchan
->no
, ad
->irq_index
));
419 switch (admac_chan_direction(adchan
->no
)) {
421 writel_relaxed(startbit
, ad
->base
+ REG_TX_START
);
424 writel_relaxed(startbit
, ad
->base
+ REG_RX_START
);
429 dev_dbg(adchan
->host
->dev
, "ch%d start\n", adchan
->no
);
432 static void admac_stop_chan(struct admac_chan
*adchan
)
434 struct admac_data
*ad
= adchan
->host
;
435 u32 stopbit
= 1 << (adchan
->no
/ 2);
437 switch (admac_chan_direction(adchan
->no
)) {
439 writel_relaxed(stopbit
, ad
->base
+ REG_TX_STOP
);
442 writel_relaxed(stopbit
, ad
->base
+ REG_RX_STOP
);
447 dev_dbg(adchan
->host
->dev
, "ch%d stop\n", adchan
->no
);
450 static void admac_reset_rings(struct admac_chan
*adchan
)
452 struct admac_data
*ad
= adchan
->host
;
454 writel_relaxed(REG_CHAN_CTL_RST_RINGS
,
455 ad
->base
+ REG_CHAN_CTL(adchan
->no
));
456 writel_relaxed(0, ad
->base
+ REG_CHAN_CTL(adchan
->no
));
459 static void admac_start_current_tx(struct admac_chan
*adchan
)
461 struct admac_data
*ad
= adchan
->host
;
464 admac_reset_rings(adchan
);
465 writel_relaxed(0, ad
->base
+ REG_CHAN_CTL(ch
));
467 admac_cyclic_write_one_desc(ad
, ch
, adchan
->current_tx
);
468 admac_start_chan(adchan
);
469 admac_cyclic_write_desc(ad
, ch
, adchan
->current_tx
);
472 static void admac_issue_pending(struct dma_chan
*chan
)
474 struct admac_chan
*adchan
= to_admac_chan(chan
);
478 spin_lock_irqsave(&adchan
->lock
, flags
);
479 list_splice_tail_init(&adchan
->submitted
, &adchan
->issued
);
480 if (!list_empty(&adchan
->issued
) && !adchan
->current_tx
) {
481 tx
= list_first_entry(&adchan
->issued
, struct admac_tx
, node
);
484 adchan
->current_tx
= tx
;
485 adchan
->nperiod_acks
= 0;
486 admac_start_current_tx(adchan
);
488 spin_unlock_irqrestore(&adchan
->lock
, flags
);
491 static int admac_pause(struct dma_chan
*chan
)
493 struct admac_chan
*adchan
= to_admac_chan(chan
);
495 admac_stop_chan(adchan
);
500 static int admac_resume(struct dma_chan
*chan
)
502 struct admac_chan
*adchan
= to_admac_chan(chan
);
504 admac_start_chan(adchan
);
509 static int admac_terminate_all(struct dma_chan
*chan
)
511 struct admac_chan
*adchan
= to_admac_chan(chan
);
514 spin_lock_irqsave(&adchan
->lock
, flags
);
515 admac_stop_chan(adchan
);
516 admac_reset_rings(adchan
);
518 if (adchan
->current_tx
) {
519 list_add_tail(&adchan
->current_tx
->node
, &adchan
->to_free
);
520 adchan
->current_tx
= NULL
;
523 * Descriptors can only be freed after the tasklet
524 * has been killed (in admac_synchronize).
526 list_splice_tail_init(&adchan
->submitted
, &adchan
->to_free
);
527 list_splice_tail_init(&adchan
->issued
, &adchan
->to_free
);
528 spin_unlock_irqrestore(&adchan
->lock
, flags
);
533 static void admac_synchronize(struct dma_chan
*chan
)
535 struct admac_chan
*adchan
= to_admac_chan(chan
);
536 struct admac_tx
*adtx
, *_adtx
;
540 spin_lock_irqsave(&adchan
->lock
, flags
);
541 list_splice_tail_init(&adchan
->to_free
, &head
);
542 spin_unlock_irqrestore(&adchan
->lock
, flags
);
544 tasklet_kill(&adchan
->tasklet
);
546 list_for_each_entry_safe(adtx
, _adtx
, &head
, node
) {
547 list_del(&adtx
->node
);
548 admac_desc_free(&adtx
->tx
);
552 static int admac_alloc_chan_resources(struct dma_chan
*chan
)
554 struct admac_chan
*adchan
= to_admac_chan(chan
);
555 struct admac_data
*ad
= adchan
->host
;
558 dma_cookie_init(&adchan
->chan
);
559 ret
= admac_alloc_sram_carveout(ad
, admac_chan_direction(adchan
->no
),
564 writel_relaxed(adchan
->carveout
,
565 ad
->base
+ REG_CHAN_SRAM_CARVEOUT(adchan
->no
));
569 static void admac_free_chan_resources(struct dma_chan
*chan
)
571 struct admac_chan
*adchan
= to_admac_chan(chan
);
573 admac_terminate_all(chan
);
574 admac_synchronize(chan
);
575 admac_free_sram_carveout(adchan
->host
, admac_chan_direction(adchan
->no
),
579 static struct dma_chan
*admac_dma_of_xlate(struct of_phandle_args
*dma_spec
,
580 struct of_dma
*ofdma
)
582 struct admac_data
*ad
= (struct admac_data
*) ofdma
->of_dma_data
;
585 if (dma_spec
->args_count
!= 1)
588 index
= dma_spec
->args
[0];
590 if (index
>= ad
->nchannels
) {
591 dev_err(ad
->dev
, "channel index %u out of bounds\n", index
);
595 return dma_get_slave_channel(&ad
->channels
[index
].chan
);
598 static int admac_drain_reports(struct admac_data
*ad
, int channo
)
602 for (count
= 0; count
< 4; count
++) {
603 u32 countval_hi
, countval_lo
, unk1
, flags
;
605 if (readl_relaxed(ad
->base
+ REG_REPORT_RING(channo
)) & RING_EMPTY
)
608 countval_lo
= readl_relaxed(ad
->base
+ REG_REPORT_READ(channo
));
609 countval_hi
= readl_relaxed(ad
->base
+ REG_REPORT_READ(channo
));
610 unk1
= readl_relaxed(ad
->base
+ REG_REPORT_READ(channo
));
611 flags
= readl_relaxed(ad
->base
+ REG_REPORT_READ(channo
));
613 dev_dbg(ad
->dev
, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
614 channo
, ((u64
) countval_hi
) << 32 | countval_lo
, unk1
, flags
);
620 static void admac_handle_status_err(struct admac_data
*ad
, int channo
)
622 bool handled
= false;
624 if (readl_relaxed(ad
->base
+ REG_DESC_RING(channo
)) & RING_ERR
) {
625 writel_relaxed(RING_ERR
, ad
->base
+ REG_DESC_RING(channo
));
626 dev_err_ratelimited(ad
->dev
, "ch%d descriptor ring error\n", channo
);
630 if (readl_relaxed(ad
->base
+ REG_REPORT_RING(channo
)) & RING_ERR
) {
631 writel_relaxed(RING_ERR
, ad
->base
+ REG_REPORT_RING(channo
));
632 dev_err_ratelimited(ad
->dev
, "ch%d report ring error\n", channo
);
636 if (unlikely(!handled
)) {
637 dev_err(ad
->dev
, "ch%d unknown error, masking errors as cause of IRQs\n", channo
);
638 admac_modify(ad
, REG_CHAN_INTMASK(channo
, ad
->irq_index
),
643 static void admac_handle_status_desc_done(struct admac_data
*ad
, int channo
)
645 struct admac_chan
*adchan
= &ad
->channels
[channo
];
649 writel_relaxed(STATUS_DESC_DONE
,
650 ad
->base
+ REG_CHAN_INTSTATUS(channo
, ad
->irq_index
));
652 spin_lock_irqsave(&adchan
->lock
, flags
);
653 nreports
= admac_drain_reports(ad
, channo
);
655 if (adchan
->current_tx
) {
656 struct admac_tx
*tx
= adchan
->current_tx
;
658 adchan
->nperiod_acks
+= nreports
;
659 tx
->reclaimed_pos
+= nreports
* tx
->period_len
;
660 tx
->reclaimed_pos
%= 2 * tx
->buf_len
;
662 admac_cyclic_write_desc(ad
, channo
, tx
);
663 tasklet_schedule(&adchan
->tasklet
);
665 spin_unlock_irqrestore(&adchan
->lock
, flags
);
668 static void admac_handle_chan_int(struct admac_data
*ad
, int no
)
670 u32 cause
= readl_relaxed(ad
->base
+ REG_CHAN_INTSTATUS(no
, ad
->irq_index
));
672 if (cause
& STATUS_ERR
)
673 admac_handle_status_err(ad
, no
);
675 if (cause
& STATUS_DESC_DONE
)
676 admac_handle_status_desc_done(ad
, no
);
679 static irqreturn_t
admac_interrupt(int irq
, void *devid
)
681 struct admac_data
*ad
= devid
;
682 u32 rx_intstate
, tx_intstate
, global_intstate
;
685 rx_intstate
= readl_relaxed(ad
->base
+ REG_RX_INTSTATE(ad
->irq_index
));
686 tx_intstate
= readl_relaxed(ad
->base
+ REG_TX_INTSTATE(ad
->irq_index
));
687 global_intstate
= readl_relaxed(ad
->base
+ REG_GLOBAL_INTSTATE(ad
->irq_index
));
689 if (!tx_intstate
&& !rx_intstate
&& !global_intstate
)
692 for (i
= 0; i
< ad
->nchannels
; i
+= 2) {
694 admac_handle_chan_int(ad
, i
);
698 for (i
= 1; i
< ad
->nchannels
; i
+= 2) {
700 admac_handle_chan_int(ad
, i
);
704 if (global_intstate
) {
705 dev_warn(ad
->dev
, "clearing unknown global interrupt flag: %x\n",
707 writel_relaxed(~(u32
) 0, ad
->base
+ REG_GLOBAL_INTSTATE(ad
->irq_index
));
713 static void admac_chan_tasklet(struct tasklet_struct
*t
)
715 struct admac_chan
*adchan
= from_tasklet(adchan
, t
, tasklet
);
716 struct admac_tx
*adtx
;
717 struct dmaengine_desc_callback cb
;
718 struct dmaengine_result tx_result
;
721 spin_lock_irq(&adchan
->lock
);
722 adtx
= adchan
->current_tx
;
723 nacks
= adchan
->nperiod_acks
;
724 adchan
->nperiod_acks
= 0;
725 spin_unlock_irq(&adchan
->lock
);
730 tx_result
.result
= DMA_TRANS_NOERROR
;
731 tx_result
.residue
= 0;
733 dmaengine_desc_get_callback(&adtx
->tx
, &cb
);
735 dmaengine_desc_callback_invoke(&cb
, &tx_result
);
738 static int admac_device_config(struct dma_chan
*chan
,
739 struct dma_slave_config
*config
)
741 struct admac_chan
*adchan
= to_admac_chan(chan
);
742 struct admac_data
*ad
= adchan
->host
;
743 bool is_tx
= admac_chan_direction(adchan
->no
) == DMA_MEM_TO_DEV
;
745 u32 bus_width
= readl_relaxed(ad
->base
+ REG_BUS_WIDTH(adchan
->no
)) &
746 ~(BUS_WIDTH_WORD_SIZE
| BUS_WIDTH_FRAME_SIZE
);
748 switch (is_tx
? config
->dst_addr_width
: config
->src_addr_width
) {
749 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
751 bus_width
|= BUS_WIDTH_8BIT
;
753 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
755 bus_width
|= BUS_WIDTH_16BIT
;
757 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
759 bus_width
|= BUS_WIDTH_32BIT
;
766 * We take port_window_size to be the number of words in a frame.
768 * The controller has some means of out-of-band signalling, to the peripheral,
769 * of words position in a frame. That's where the importance of this control
772 switch (is_tx
? config
->dst_port_window_size
: config
->src_port_window_size
) {
776 bus_width
|= BUS_WIDTH_FRAME_2_WORDS
;
779 bus_width
|= BUS_WIDTH_FRAME_4_WORDS
;
785 writel_relaxed(bus_width
, ad
->base
+ REG_BUS_WIDTH(adchan
->no
));
788 * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
789 * held in controller's per-channel FIFO. Transfers seem to be triggered
790 * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
792 * The numbers we set are more or less arbitrary.
794 writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT
, 0x30 * wordsize
)
795 | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD
, 0x18 * wordsize
),
796 ad
->base
+ REG_CHAN_FIFOCTL(adchan
->no
));
801 static int admac_probe(struct platform_device
*pdev
)
803 struct device_node
*np
= pdev
->dev
.of_node
;
804 struct admac_data
*ad
;
805 struct dma_device
*dma
;
809 err
= of_property_read_u32(np
, "dma-channels", &nchannels
);
810 if (err
|| nchannels
> NCHANNELS_MAX
) {
811 dev_err(&pdev
->dev
, "missing or invalid dma-channels property\n");
815 ad
= devm_kzalloc(&pdev
->dev
, struct_size(ad
, channels
, nchannels
), GFP_KERNEL
);
819 platform_set_drvdata(pdev
, ad
);
820 ad
->dev
= &pdev
->dev
;
821 ad
->nchannels
= nchannels
;
822 mutex_init(&ad
->cache_alloc_lock
);
825 * The controller has 4 IRQ outputs. Try them all until
826 * we find one we can use.
828 for (i
= 0; i
< IRQ_NOUTPUTS
; i
++) {
829 irq
= platform_get_irq_optional(pdev
, i
);
837 return dev_err_probe(&pdev
->dev
, irq
, "no usable interrupt\n");
840 ad
->base
= devm_platform_ioremap_resource(pdev
, 0);
841 if (IS_ERR(ad
->base
))
842 return dev_err_probe(&pdev
->dev
, PTR_ERR(ad
->base
),
843 "unable to obtain MMIO resource\n");
845 ad
->rstc
= devm_reset_control_get_optional_shared(&pdev
->dev
, NULL
);
846 if (IS_ERR(ad
->rstc
))
847 return PTR_ERR(ad
->rstc
);
851 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
852 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
854 dma
->dev
= &pdev
->dev
;
855 dma
->device_alloc_chan_resources
= admac_alloc_chan_resources
;
856 dma
->device_free_chan_resources
= admac_free_chan_resources
;
857 dma
->device_tx_status
= admac_tx_status
;
858 dma
->device_issue_pending
= admac_issue_pending
;
859 dma
->device_terminate_all
= admac_terminate_all
;
860 dma
->device_synchronize
= admac_synchronize
;
861 dma
->device_prep_dma_cyclic
= admac_prep_dma_cyclic
;
862 dma
->device_config
= admac_device_config
;
863 dma
->device_pause
= admac_pause
;
864 dma
->device_resume
= admac_resume
;
866 dma
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
867 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
868 dma
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
869 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
870 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
871 dma
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE
) |
872 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES
) |
873 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
875 INIT_LIST_HEAD(&dma
->channels
);
876 for (i
= 0; i
< nchannels
; i
++) {
877 struct admac_chan
*adchan
= &ad
->channels
[i
];
881 adchan
->chan
.device
= &ad
->dma
;
882 spin_lock_init(&adchan
->lock
);
883 INIT_LIST_HEAD(&adchan
->submitted
);
884 INIT_LIST_HEAD(&adchan
->issued
);
885 INIT_LIST_HEAD(&adchan
->to_free
);
886 list_add_tail(&adchan
->chan
.device_node
, &dma
->channels
);
887 tasklet_setup(&adchan
->tasklet
, admac_chan_tasklet
);
890 err
= reset_control_reset(ad
->rstc
);
892 return dev_err_probe(&pdev
->dev
, err
,
893 "unable to trigger reset\n");
895 err
= request_irq(irq
, admac_interrupt
, 0, dev_name(&pdev
->dev
), ad
);
897 dev_err_probe(&pdev
->dev
, err
,
898 "unable to register interrupt\n");
902 err
= dma_async_device_register(&ad
->dma
);
904 dev_err_probe(&pdev
->dev
, err
, "failed to register DMA device\n");
908 err
= of_dma_controller_register(pdev
->dev
.of_node
, admac_dma_of_xlate
, ad
);
910 dma_async_device_unregister(&ad
->dma
);
911 dev_err_probe(&pdev
->dev
, err
, "failed to register with OF\n");
915 ad
->txcache
.size
= readl_relaxed(ad
->base
+ REG_TX_SRAM_SIZE
);
916 ad
->rxcache
.size
= readl_relaxed(ad
->base
+ REG_RX_SRAM_SIZE
);
918 dev_info(&pdev
->dev
, "Audio DMA Controller\n");
919 dev_info(&pdev
->dev
, "imprint %x TX cache %u RX cache %u\n",
920 readl_relaxed(ad
->base
+ REG_IMPRINT
), ad
->txcache
.size
, ad
->rxcache
.size
);
925 free_irq(ad
->irq
, ad
);
927 reset_control_rearm(ad
->rstc
);
931 static void admac_remove(struct platform_device
*pdev
)
933 struct admac_data
*ad
= platform_get_drvdata(pdev
);
935 of_dma_controller_free(pdev
->dev
.of_node
);
936 dma_async_device_unregister(&ad
->dma
);
937 free_irq(ad
->irq
, ad
);
938 reset_control_rearm(ad
->rstc
);
941 static const struct of_device_id admac_of_match
[] = {
942 { .compatible
= "apple,admac", },
945 MODULE_DEVICE_TABLE(of
, admac_of_match
);
947 static struct platform_driver apple_admac_driver
= {
949 .name
= "apple-admac",
950 .of_match_table
= admac_of_match
,
952 .probe
= admac_probe
,
953 .remove
= admac_remove
,
955 module_platform_driver(apple_admac_driver
);
957 MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>");
958 MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
959 MODULE_LICENSE("GPL");