1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm Technologies HIDMA DMA engine low level code
5 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
8 #include <linux/dmaengine.h>
9 #include <linux/slab.h>
10 #include <linux/interrupt.h>
12 #include <linux/highmem.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/delay.h>
15 #include <linux/atomic.h>
16 #include <linux/iopoll.h>
17 #include <linux/kfifo.h>
18 #include <linux/bitops.h>
22 #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */
24 #define HIDMA_TRCA_CTRLSTS_REG 0x000
25 #define HIDMA_TRCA_RING_LOW_REG 0x008
26 #define HIDMA_TRCA_RING_HIGH_REG 0x00C
27 #define HIDMA_TRCA_RING_LEN_REG 0x010
28 #define HIDMA_TRCA_DOORBELL_REG 0x400
30 #define HIDMA_EVCA_CTRLSTS_REG 0x000
31 #define HIDMA_EVCA_INTCTRL_REG 0x004
32 #define HIDMA_EVCA_RING_LOW_REG 0x008
33 #define HIDMA_EVCA_RING_HIGH_REG 0x00C
34 #define HIDMA_EVCA_RING_LEN_REG 0x010
35 #define HIDMA_EVCA_WRITE_PTR_REG 0x020
36 #define HIDMA_EVCA_DOORBELL_REG 0x400
38 #define HIDMA_EVCA_IRQ_STAT_REG 0x100
39 #define HIDMA_EVCA_IRQ_CLR_REG 0x108
40 #define HIDMA_EVCA_IRQ_EN_REG 0x110
42 #define HIDMA_EVRE_CFG_IDX 0
44 #define HIDMA_EVRE_ERRINFO_BIT_POS 24
45 #define HIDMA_EVRE_CODE_BIT_POS 28
47 #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
48 #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
50 #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
51 #define HIDMA_CH_STATE_MASK GENMASK(7, 0)
52 #define HIDMA_CH_STATE_BIT_POS 0x8
54 #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
55 #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
56 #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
57 #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
58 #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
59 #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
61 #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
62 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
63 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
64 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
65 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
66 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
68 #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
71 if (iter >= ring_size) \
75 #define HIDMA_CH_STATE(val) \
76 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
78 #define HIDMA_ERR_INT_MASK \
79 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
80 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
81 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
82 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
83 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
93 HIDMA_CH_DISABLED
= 0,
96 HIDMA_CH_SUSPENDED
= 3,
101 HIDMA_EVRE_STATUS_COMPLETE
= 1,
102 HIDMA_EVRE_STATUS_ERROR
= 4,
105 static int hidma_is_chan_enabled(int state
)
108 case HIDMA_CH_ENABLED
:
109 case HIDMA_CH_RUNNING
:
116 void hidma_ll_free(struct hidma_lldev
*lldev
, u32 tre_ch
)
118 struct hidma_tre
*tre
;
120 if (tre_ch
>= lldev
->nr_tres
) {
121 dev_err(lldev
->dev
, "invalid TRE number in free:%d", tre_ch
);
125 tre
= &lldev
->trepool
[tre_ch
];
126 if (atomic_read(&tre
->allocated
) != true) {
127 dev_err(lldev
->dev
, "trying to free an unused TRE:%d", tre_ch
);
131 atomic_set(&tre
->allocated
, 0);
134 int hidma_ll_request(struct hidma_lldev
*lldev
, u32 sig
, const char *dev_name
,
135 void (*callback
)(void *data
), void *data
, u32
*tre_ch
)
138 struct hidma_tre
*tre
;
141 if (!tre_ch
|| !lldev
)
144 /* need to have at least one empty spot in the queue */
145 for (i
= 0; i
< lldev
->nr_tres
- 1; i
++) {
146 if (atomic_add_unless(&lldev
->trepool
[i
].allocated
, 1, 1))
150 if (i
== (lldev
->nr_tres
- 1))
153 tre
= &lldev
->trepool
[i
];
155 tre
->dev_name
= dev_name
;
156 tre
->callback
= callback
;
164 tre_local
= &tre
->tre_local
[0];
165 tre_local
[HIDMA_TRE_CFG_IDX
] = (lldev
->chidx
& 0xFF) << 8;
166 tre_local
[HIDMA_TRE_CFG_IDX
] |= BIT(16); /* set IEOB */
174 * Multiple TREs may be queued and waiting in the pending queue.
176 static void hidma_ll_tre_complete(struct tasklet_struct
*t
)
178 struct hidma_lldev
*lldev
= from_tasklet(lldev
, t
, task
);
179 struct hidma_tre
*tre
;
181 while (kfifo_out(&lldev
->handoff_fifo
, &tre
, 1)) {
182 /* call the user if it has been read by the hardware */
184 tre
->callback(tre
->data
);
188 static int hidma_post_completed(struct hidma_lldev
*lldev
, u8 err_info
,
191 struct hidma_tre
*tre
;
195 spin_lock_irqsave(&lldev
->lock
, flags
);
197 tre_iterator
= lldev
->tre_processed_off
;
198 tre
= lldev
->pending_tre_list
[tre_iterator
/ HIDMA_TRE_SIZE
];
200 spin_unlock_irqrestore(&lldev
->lock
, flags
);
201 dev_warn(lldev
->dev
, "tre_index [%d] and tre out of sync\n",
202 tre_iterator
/ HIDMA_TRE_SIZE
);
205 lldev
->pending_tre_list
[tre
->tre_index
] = NULL
;
208 * Keep track of pending TREs that SW is expecting to receive
209 * from HW. We got one now. Decrement our counter.
211 if (atomic_dec_return(&lldev
->pending_tre_count
) < 0) {
212 dev_warn(lldev
->dev
, "tre count mismatch on completion");
213 atomic_set(&lldev
->pending_tre_count
, 0);
216 HIDMA_INCREMENT_ITERATOR(tre_iterator
, HIDMA_TRE_SIZE
,
217 lldev
->tre_ring_size
);
218 lldev
->tre_processed_off
= tre_iterator
;
219 spin_unlock_irqrestore(&lldev
->lock
, flags
);
221 tre
->err_info
= err_info
;
222 tre
->err_code
= err_code
;
225 kfifo_put(&lldev
->handoff_fifo
, tre
);
226 tasklet_schedule(&lldev
->task
);
232 * Called to handle the interrupt for the channel.
233 * Return a positive number if TRE or EVRE were consumed on this run.
234 * Return a positive number if there are pending TREs or EVREs.
235 * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
237 static int hidma_handle_tre_completion(struct hidma_lldev
*lldev
)
239 u32 evre_ring_size
= lldev
->evre_ring_size
;
240 u32 err_info
, err_code
, evre_write_off
;
242 u32 num_completed
= 0;
244 evre_write_off
= readl_relaxed(lldev
->evca
+ HIDMA_EVCA_WRITE_PTR_REG
);
245 evre_iterator
= lldev
->evre_processed_off
;
247 if ((evre_write_off
> evre_ring_size
) ||
248 (evre_write_off
% HIDMA_EVRE_SIZE
)) {
249 dev_err(lldev
->dev
, "HW reports invalid EVRE write offset\n");
254 * By the time control reaches here the number of EVREs and TREs
255 * may not match. Only consume the ones that hardware told us.
257 while ((evre_iterator
!= evre_write_off
)) {
258 u32
*current_evre
= lldev
->evre_ring
+ evre_iterator
;
261 cfg
= current_evre
[HIDMA_EVRE_CFG_IDX
];
262 err_info
= cfg
>> HIDMA_EVRE_ERRINFO_BIT_POS
;
263 err_info
&= HIDMA_EVRE_ERRINFO_MASK
;
265 (cfg
>> HIDMA_EVRE_CODE_BIT_POS
) & HIDMA_EVRE_CODE_MASK
;
267 if (hidma_post_completed(lldev
, err_info
, err_code
))
270 HIDMA_INCREMENT_ITERATOR(evre_iterator
, HIDMA_EVRE_SIZE
,
274 * Read the new event descriptor written by the HW.
275 * As we are processing the delivered events, other events
276 * get queued to the SW for processing.
279 readl_relaxed(lldev
->evca
+ HIDMA_EVCA_WRITE_PTR_REG
);
283 * An error interrupt might have arrived while we are processing
284 * the completed interrupt.
286 if (!hidma_ll_isenabled(lldev
))
291 u32 evre_read_off
= (lldev
->evre_processed_off
+
292 HIDMA_EVRE_SIZE
* num_completed
);
293 evre_read_off
= evre_read_off
% evre_ring_size
;
294 writel(evre_read_off
, lldev
->evca
+ HIDMA_EVCA_DOORBELL_REG
);
296 /* record the last processed tre offset */
297 lldev
->evre_processed_off
= evre_read_off
;
300 return num_completed
;
303 void hidma_cleanup_pending_tre(struct hidma_lldev
*lldev
, u8 err_info
,
306 while (atomic_read(&lldev
->pending_tre_count
)) {
307 if (hidma_post_completed(lldev
, err_info
, err_code
))
312 static int hidma_ll_reset(struct hidma_lldev
*lldev
)
317 val
= readl(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
318 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
319 val
|= HIDMA_CH_RESET
<< 16;
320 writel(val
, lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
323 * Delay 10ms after reset to allow DMA logic to quiesce.
324 * Do a polled read up to 1ms and 10ms maximum.
326 ret
= readl_poll_timeout(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
, val
,
327 HIDMA_CH_STATE(val
) == HIDMA_CH_DISABLED
,
330 dev_err(lldev
->dev
, "transfer channel did not reset\n");
334 val
= readl(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
335 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
336 val
|= HIDMA_CH_RESET
<< 16;
337 writel(val
, lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
340 * Delay 10ms after reset to allow DMA logic to quiesce.
341 * Do a polled read up to 1ms and 10ms maximum.
343 ret
= readl_poll_timeout(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
, val
,
344 HIDMA_CH_STATE(val
) == HIDMA_CH_DISABLED
,
349 lldev
->trch_state
= HIDMA_CH_DISABLED
;
350 lldev
->evch_state
= HIDMA_CH_DISABLED
;
355 * The interrupt handler for HIDMA will try to consume as many pending
356 * EVRE from the event queue as possible. Each EVRE has an associated
357 * TRE that holds the user interface parameters. EVRE reports the
358 * result of the transaction. Hardware guarantees ordering between EVREs
359 * and TREs. We use last processed offset to figure out which TRE is
360 * associated with which EVRE. If two TREs are consumed by HW, the EVREs
361 * are in order in the event ring.
363 * This handler will do a one pass for consuming EVREs. Other EVREs may
364 * be delivered while we are working. It will try to consume incoming
365 * EVREs one more time and return.
367 * For unprocessed EVREs, hardware will trigger another interrupt until
368 * all the interrupt bits are cleared.
370 * Hardware guarantees that by the time interrupt is observed, all data
371 * transactions in flight are delivered to their respective places and
372 * are visible to the CPU.
374 * On demand paging for IOMMU is only supported for PCIe via PRI
375 * (Page Request Interface) not for HIDMA. All other hardware instances
376 * including HIDMA work on pinned DMA addresses.
378 * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
379 * IOMMU latency will be built into the data movement time. By the time
380 * interrupt happens, IOMMU lookups + data movement has already taken place.
382 * While the first read in a typical PCI endpoint ISR flushes all outstanding
383 * requests traditionally to the destination, this concept does not apply
386 static void hidma_ll_int_handler_internal(struct hidma_lldev
*lldev
, int cause
)
388 unsigned long irqflags
;
390 if (cause
& HIDMA_ERR_INT_MASK
) {
391 dev_err(lldev
->dev
, "error 0x%x, disabling...\n",
394 /* Clear out pending interrupts */
395 writel(cause
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
397 /* No further submissions. */
398 hidma_ll_disable(lldev
);
400 /* Driver completes the txn and intimates the client.*/
401 hidma_cleanup_pending_tre(lldev
, 0xFF,
402 HIDMA_EVRE_STATUS_ERROR
);
407 spin_lock_irqsave(&lldev
->lock
, irqflags
);
408 writel_relaxed(cause
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
409 spin_unlock_irqrestore(&lldev
->lock
, irqflags
);
412 * Fine tuned for this HW...
414 * This ISR has been designed for this particular hardware. Relaxed
415 * read and write accessors are used for performance reasons due to
416 * interrupt delivery guarantees. Do not copy this code blindly and
417 * expect that to work.
419 * Try to consume as many EVREs as possible.
421 hidma_handle_tre_completion(lldev
);
424 irqreturn_t
hidma_ll_inthandler(int chirq
, void *arg
)
426 struct hidma_lldev
*lldev
= arg
;
431 status
= readl_relaxed(lldev
->evca
+ HIDMA_EVCA_IRQ_STAT_REG
);
432 enable
= readl_relaxed(lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
433 cause
= status
& enable
;
436 hidma_ll_int_handler_internal(lldev
, cause
);
439 * Another interrupt might have arrived while we are
440 * processing this one. Read the new cause.
442 status
= readl_relaxed(lldev
->evca
+ HIDMA_EVCA_IRQ_STAT_REG
);
443 enable
= readl_relaxed(lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
444 cause
= status
& enable
;
450 irqreturn_t
hidma_ll_inthandler_msi(int chirq
, void *arg
, int cause
)
452 struct hidma_lldev
*lldev
= arg
;
454 hidma_ll_int_handler_internal(lldev
, cause
);
458 int hidma_ll_enable(struct hidma_lldev
*lldev
)
463 val
= readl(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
464 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
465 val
|= HIDMA_CH_ENABLE
<< 16;
466 writel(val
, lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
468 ret
= readl_poll_timeout(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
, val
,
469 hidma_is_chan_enabled(HIDMA_CH_STATE(val
)),
472 dev_err(lldev
->dev
, "event channel did not get enabled\n");
476 val
= readl(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
477 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
478 val
|= HIDMA_CH_ENABLE
<< 16;
479 writel(val
, lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
481 ret
= readl_poll_timeout(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
, val
,
482 hidma_is_chan_enabled(HIDMA_CH_STATE(val
)),
485 dev_err(lldev
->dev
, "transfer channel did not get enabled\n");
489 lldev
->trch_state
= HIDMA_CH_ENABLED
;
490 lldev
->evch_state
= HIDMA_CH_ENABLED
;
493 writel(ENABLE_IRQS
, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
498 void hidma_ll_start(struct hidma_lldev
*lldev
)
500 unsigned long irqflags
;
502 spin_lock_irqsave(&lldev
->lock
, irqflags
);
503 writel(lldev
->tre_write_offset
, lldev
->trca
+ HIDMA_TRCA_DOORBELL_REG
);
504 spin_unlock_irqrestore(&lldev
->lock
, irqflags
);
507 bool hidma_ll_isenabled(struct hidma_lldev
*lldev
)
511 val
= readl(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
512 lldev
->trch_state
= HIDMA_CH_STATE(val
);
513 val
= readl(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
514 lldev
->evch_state
= HIDMA_CH_STATE(val
);
516 /* both channels have to be enabled before calling this function */
517 if (hidma_is_chan_enabled(lldev
->trch_state
) &&
518 hidma_is_chan_enabled(lldev
->evch_state
))
524 void hidma_ll_queue_request(struct hidma_lldev
*lldev
, u32 tre_ch
)
526 struct hidma_tre
*tre
;
529 tre
= &lldev
->trepool
[tre_ch
];
531 /* copy the TRE into its location in the TRE ring */
532 spin_lock_irqsave(&lldev
->lock
, flags
);
533 tre
->tre_index
= lldev
->tre_write_offset
/ HIDMA_TRE_SIZE
;
534 lldev
->pending_tre_list
[tre
->tre_index
] = tre
;
535 memcpy(lldev
->tre_ring
+ lldev
->tre_write_offset
,
536 &tre
->tre_local
[0], HIDMA_TRE_SIZE
);
540 atomic_inc(&lldev
->pending_tre_count
);
541 lldev
->tre_write_offset
= (lldev
->tre_write_offset
+ HIDMA_TRE_SIZE
)
542 % lldev
->tre_ring_size
;
543 spin_unlock_irqrestore(&lldev
->lock
, flags
);
547 * Note that even though we stop this channel if there is a pending transaction
548 * in flight it will complete and follow the callback. This request will
549 * prevent further requests to be made.
551 int hidma_ll_disable(struct hidma_lldev
*lldev
)
556 /* The channel needs to be in working state */
557 if (!hidma_ll_isenabled(lldev
))
560 val
= readl(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
561 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
562 val
|= HIDMA_CH_SUSPEND
<< 16;
563 writel(val
, lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
);
566 * Start the wait right after the suspend is confirmed.
567 * Do a polled read up to 1ms and 10ms maximum.
569 ret
= readl_poll_timeout(lldev
->trca
+ HIDMA_TRCA_CTRLSTS_REG
, val
,
570 HIDMA_CH_STATE(val
) == HIDMA_CH_SUSPENDED
,
575 val
= readl(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
576 val
&= ~(HIDMA_CH_CONTROL_MASK
<< 16);
577 val
|= HIDMA_CH_SUSPEND
<< 16;
578 writel(val
, lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
);
581 * Start the wait right after the suspend is confirmed
582 * Delay up to 10ms after reset to allow DMA logic to quiesce.
584 ret
= readl_poll_timeout(lldev
->evca
+ HIDMA_EVCA_CTRLSTS_REG
, val
,
585 HIDMA_CH_STATE(val
) == HIDMA_CH_SUSPENDED
,
590 lldev
->trch_state
= HIDMA_CH_SUSPENDED
;
591 lldev
->evch_state
= HIDMA_CH_SUSPENDED
;
593 /* disable interrupts */
594 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
598 void hidma_ll_set_transfer_params(struct hidma_lldev
*lldev
, u32 tre_ch
,
599 dma_addr_t src
, dma_addr_t dest
, u32 len
,
600 u32 flags
, u32 txntype
)
602 struct hidma_tre
*tre
;
605 if (tre_ch
>= lldev
->nr_tres
) {
606 dev_err(lldev
->dev
, "invalid TRE number in transfer params:%d",
611 tre
= &lldev
->trepool
[tre_ch
];
612 if (atomic_read(&tre
->allocated
) != true) {
613 dev_err(lldev
->dev
, "trying to set params on an unused TRE:%d",
618 tre_local
= &tre
->tre_local
[0];
619 tre_local
[HIDMA_TRE_CFG_IDX
] &= ~GENMASK(7, 0);
620 tre_local
[HIDMA_TRE_CFG_IDX
] |= txntype
;
621 tre_local
[HIDMA_TRE_LEN_IDX
] = len
;
622 tre_local
[HIDMA_TRE_SRC_LOW_IDX
] = lower_32_bits(src
);
623 tre_local
[HIDMA_TRE_SRC_HI_IDX
] = upper_32_bits(src
);
624 tre_local
[HIDMA_TRE_DEST_LOW_IDX
] = lower_32_bits(dest
);
625 tre_local
[HIDMA_TRE_DEST_HI_IDX
] = upper_32_bits(dest
);
626 tre
->int_flags
= flags
;
630 * Called during initialization and after an error condition
631 * to restore hardware state.
633 int hidma_ll_setup(struct hidma_lldev
*lldev
)
638 u32 nr_tres
= lldev
->nr_tres
;
640 atomic_set(&lldev
->pending_tre_count
, 0);
641 lldev
->tre_processed_off
= 0;
642 lldev
->evre_processed_off
= 0;
643 lldev
->tre_write_offset
= 0;
645 /* disable interrupts */
646 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
648 /* clear all pending interrupts */
649 val
= readl(lldev
->evca
+ HIDMA_EVCA_IRQ_STAT_REG
);
650 writel(val
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
652 rc
= hidma_ll_reset(lldev
);
657 * Clear all pending interrupts again.
658 * Otherwise, we observe reset complete interrupts.
660 val
= readl(lldev
->evca
+ HIDMA_EVCA_IRQ_STAT_REG
);
661 writel(val
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
663 /* disable interrupts again after reset */
664 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
666 addr
= lldev
->tre_dma
;
667 writel(lower_32_bits(addr
), lldev
->trca
+ HIDMA_TRCA_RING_LOW_REG
);
668 writel(upper_32_bits(addr
), lldev
->trca
+ HIDMA_TRCA_RING_HIGH_REG
);
669 writel(lldev
->tre_ring_size
, lldev
->trca
+ HIDMA_TRCA_RING_LEN_REG
);
671 addr
= lldev
->evre_dma
;
672 writel(lower_32_bits(addr
), lldev
->evca
+ HIDMA_EVCA_RING_LOW_REG
);
673 writel(upper_32_bits(addr
), lldev
->evca
+ HIDMA_EVCA_RING_HIGH_REG
);
674 writel(HIDMA_EVRE_SIZE
* nr_tres
,
675 lldev
->evca
+ HIDMA_EVCA_RING_LEN_REG
);
677 /* configure interrupts */
678 hidma_ll_setup_irq(lldev
, lldev
->msi_support
);
680 rc
= hidma_ll_enable(lldev
);
687 void hidma_ll_setup_irq(struct hidma_lldev
*lldev
, bool msi
)
691 lldev
->msi_support
= msi
;
693 /* disable interrupts again after reset */
694 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
695 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
697 /* support IRQ by default */
698 val
= readl(lldev
->evca
+ HIDMA_EVCA_INTCTRL_REG
);
700 if (!lldev
->msi_support
)
702 writel(val
, lldev
->evca
+ HIDMA_EVCA_INTCTRL_REG
);
704 /* clear all pending interrupts and enable them */
705 writel(ENABLE_IRQS
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
706 writel(ENABLE_IRQS
, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
709 struct hidma_lldev
*hidma_ll_init(struct device
*dev
, u32 nr_tres
,
710 void __iomem
*trca
, void __iomem
*evca
,
714 struct hidma_lldev
*lldev
;
718 if (!trca
|| !evca
|| !dev
|| !nr_tres
)
721 /* need at least four TREs */
725 /* need an extra space */
728 lldev
= devm_kzalloc(dev
, sizeof(struct hidma_lldev
), GFP_KERNEL
);
735 sz
= sizeof(struct hidma_tre
);
736 lldev
->trepool
= devm_kcalloc(lldev
->dev
, nr_tres
, sz
, GFP_KERNEL
);
740 required_bytes
= sizeof(lldev
->pending_tre_list
[0]);
741 lldev
->pending_tre_list
= devm_kcalloc(dev
, nr_tres
, required_bytes
,
743 if (!lldev
->pending_tre_list
)
746 sz
= (HIDMA_TRE_SIZE
+ 1) * nr_tres
;
747 lldev
->tre_ring
= dmam_alloc_coherent(dev
, sz
, &lldev
->tre_dma
,
749 if (!lldev
->tre_ring
)
752 lldev
->tre_ring_size
= HIDMA_TRE_SIZE
* nr_tres
;
753 lldev
->nr_tres
= nr_tres
;
755 /* the TRE ring has to be TRE_SIZE aligned */
756 if (!IS_ALIGNED(lldev
->tre_dma
, HIDMA_TRE_SIZE
)) {
759 tre_ring_shift
= lldev
->tre_dma
% HIDMA_TRE_SIZE
;
760 tre_ring_shift
= HIDMA_TRE_SIZE
- tre_ring_shift
;
761 lldev
->tre_dma
+= tre_ring_shift
;
762 lldev
->tre_ring
+= tre_ring_shift
;
765 sz
= (HIDMA_EVRE_SIZE
+ 1) * nr_tres
;
766 lldev
->evre_ring
= dmam_alloc_coherent(dev
, sz
, &lldev
->evre_dma
,
768 if (!lldev
->evre_ring
)
771 lldev
->evre_ring_size
= HIDMA_EVRE_SIZE
* nr_tres
;
773 /* the EVRE ring has to be EVRE_SIZE aligned */
774 if (!IS_ALIGNED(lldev
->evre_dma
, HIDMA_EVRE_SIZE
)) {
777 evre_ring_shift
= lldev
->evre_dma
% HIDMA_EVRE_SIZE
;
778 evre_ring_shift
= HIDMA_EVRE_SIZE
- evre_ring_shift
;
779 lldev
->evre_dma
+= evre_ring_shift
;
780 lldev
->evre_ring
+= evre_ring_shift
;
782 lldev
->nr_tres
= nr_tres
;
783 lldev
->chidx
= chidx
;
785 sz
= nr_tres
* sizeof(struct hidma_tre
*);
786 rc
= kfifo_alloc(&lldev
->handoff_fifo
, sz
, GFP_KERNEL
);
790 rc
= hidma_ll_setup(lldev
);
794 spin_lock_init(&lldev
->lock
);
795 tasklet_setup(&lldev
->task
, hidma_ll_tre_complete
);
796 lldev
->initialized
= 1;
797 writel(ENABLE_IRQS
, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
801 int hidma_ll_uninit(struct hidma_lldev
*lldev
)
810 if (!lldev
->initialized
)
813 lldev
->initialized
= 0;
815 required_bytes
= sizeof(struct hidma_tre
) * lldev
->nr_tres
;
816 tasklet_kill(&lldev
->task
);
817 memset(lldev
->trepool
, 0, required_bytes
);
818 lldev
->trepool
= NULL
;
819 atomic_set(&lldev
->pending_tre_count
, 0);
820 lldev
->tre_write_offset
= 0;
822 rc
= hidma_ll_reset(lldev
);
825 * Clear all pending interrupts again.
826 * Otherwise, we observe reset complete interrupts.
828 val
= readl(lldev
->evca
+ HIDMA_EVCA_IRQ_STAT_REG
);
829 writel(val
, lldev
->evca
+ HIDMA_EVCA_IRQ_CLR_REG
);
830 writel(0, lldev
->evca
+ HIDMA_EVCA_IRQ_EN_REG
);
834 enum dma_status
hidma_ll_status(struct hidma_lldev
*lldev
, u32 tre_ch
)
836 enum dma_status ret
= DMA_ERROR
;
837 struct hidma_tre
*tre
;
841 spin_lock_irqsave(&lldev
->lock
, flags
);
843 tre
= &lldev
->trepool
[tre_ch
];
844 err_code
= tre
->err_code
;
846 if (err_code
& HIDMA_EVRE_STATUS_COMPLETE
)
848 else if (err_code
& HIDMA_EVRE_STATUS_ERROR
)
851 ret
= DMA_IN_PROGRESS
;
852 spin_unlock_irqrestore(&lldev
->lock
, flags
);