Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / dma / qcom / hidma.h
blobf212466744f32f083de453737d22a0aaee39273a
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Qualcomm Technologies HIDMA data structures
5 * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
6 */
8 #ifndef QCOM_HIDMA_H
9 #define QCOM_HIDMA_H
11 #include <linux/kfifo.h>
12 #include <linux/interrupt.h>
13 #include <linux/dmaengine.h>
15 #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */
16 #define HIDMA_TRE_CFG_IDX 0
17 #define HIDMA_TRE_LEN_IDX 1
18 #define HIDMA_TRE_SRC_LOW_IDX 2
19 #define HIDMA_TRE_SRC_HI_IDX 3
20 #define HIDMA_TRE_DEST_LOW_IDX 4
21 #define HIDMA_TRE_DEST_HI_IDX 5
23 enum tre_type {
24 HIDMA_TRE_MEMCPY = 3,
25 HIDMA_TRE_MEMSET = 4,
28 struct hidma_tre {
29 atomic_t allocated; /* if this channel is allocated */
30 bool queued; /* flag whether this is pending */
31 u16 status; /* status */
32 u32 idx; /* index of the tre */
33 u32 dma_sig; /* signature of the tre */
34 const char *dev_name; /* name of the device */
35 void (*callback)(void *data); /* requester callback */
36 void *data; /* Data associated with this channel*/
37 struct hidma_lldev *lldev; /* lldma device pointer */
38 u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
39 u32 tre_index; /* the offset where this was written*/
40 u32 int_flags; /* interrupt flags */
41 u8 err_info; /* error record in this transfer */
42 u8 err_code; /* completion code */
45 struct hidma_lldev {
46 bool msi_support; /* flag indicating MSI support */
47 bool initialized; /* initialized flag */
48 u8 trch_state; /* trch_state of the device */
49 u8 evch_state; /* evch_state of the device */
50 u8 chidx; /* channel index in the core */
51 u32 nr_tres; /* max number of configs */
52 spinlock_t lock; /* reentrancy */
53 struct hidma_tre *trepool; /* trepool of user configs */
54 struct device *dev; /* device */
55 void __iomem *trca; /* Transfer Channel address */
56 void __iomem *evca; /* Event Channel address */
57 struct hidma_tre
58 **pending_tre_list; /* Pointers to pending TREs */
59 atomic_t pending_tre_count; /* Number of TREs pending */
61 void *tre_ring; /* TRE ring */
62 dma_addr_t tre_dma; /* TRE ring to be shared with HW */
63 u32 tre_ring_size; /* Byte size of the ring */
64 u32 tre_processed_off; /* last processed TRE */
66 void *evre_ring; /* EVRE ring */
67 dma_addr_t evre_dma; /* EVRE ring to be shared with HW */
68 u32 evre_ring_size; /* Byte size of the ring */
69 u32 evre_processed_off; /* last processed EVRE */
71 u32 tre_write_offset; /* TRE write location */
72 struct tasklet_struct task; /* task delivering notifications */
73 DECLARE_KFIFO_PTR(handoff_fifo,
74 struct hidma_tre *); /* pending TREs FIFO */
77 struct hidma_desc {
78 struct dma_async_tx_descriptor desc;
79 /* link list node for this channel*/
80 struct list_head node;
81 u32 tre_ch;
84 struct hidma_chan {
85 bool paused;
86 bool allocated;
87 char dbg_name[16];
88 u32 dma_sig;
89 dma_cookie_t last_success;
92 * active descriptor on this channel
93 * It is used by the DMA complete notification to
94 * locate the descriptor that initiated the transfer.
96 struct hidma_dev *dmadev;
97 struct hidma_desc *running;
99 struct dma_chan chan;
100 struct list_head free;
101 struct list_head prepared;
102 struct list_head queued;
103 struct list_head active;
104 struct list_head completed;
106 /* Lock for this structure */
107 spinlock_t lock;
110 struct hidma_dev {
111 int irq;
112 int chidx;
113 u32 nr_descriptors;
114 int msi_virqbase;
116 struct hidma_lldev *lldev;
117 void __iomem *dev_trca;
118 struct resource *trca_resource;
119 void __iomem *dev_evca;
120 struct resource *evca_resource;
122 /* used to protect the pending channel list*/
123 spinlock_t lock;
124 struct dma_device ddev;
126 struct dentry *debugfs;
128 /* sysfs entry for the channel id */
129 struct device_attribute *chid_attrs;
131 /* Task delivering issue_pending */
132 struct tasklet_struct task;
135 int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
136 const char *dev_name,
137 void (*callback)(void *data), void *data, u32 *tre_ch);
139 void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
140 enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
141 bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
142 void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
143 void hidma_ll_start(struct hidma_lldev *llhndl);
144 int hidma_ll_disable(struct hidma_lldev *lldev);
145 int hidma_ll_enable(struct hidma_lldev *llhndl);
146 void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
147 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
148 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
149 int hidma_ll_setup(struct hidma_lldev *lldev);
150 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
151 void __iomem *trca, void __iomem *evca,
152 u8 chidx);
153 int hidma_ll_uninit(struct hidma_lldev *llhndl);
154 irqreturn_t hidma_ll_inthandler(int irq, void *arg);
155 irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
156 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
157 u8 err_code);
158 void hidma_debug_init(struct hidma_dev *dmadev);
159 void hidma_debug_uninit(struct hidma_dev *dmadev);
160 #endif