1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2020, Linaro Limited
7 #include <dt-bindings/dma/qcom-gpi.h>
8 #include <linux/bitfield.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/dma/qcom-gpi-dma.h>
15 #include <linux/scatterlist.h>
16 #include <linux/slab.h>
17 #include "../dmaengine.h"
18 #include "../virt-dma.h"
20 #define TRE_TYPE_DMA 0x10
21 #define TRE_TYPE_GO 0x20
22 #define TRE_TYPE_CONFIG0 0x22
25 #define TRE_FLAGS_CHAIN BIT(0)
26 #define TRE_FLAGS_IEOB BIT(8)
27 #define TRE_FLAGS_IEOT BIT(9)
28 #define TRE_FLAGS_BEI BIT(10)
29 #define TRE_FLAGS_LINK BIT(11)
30 #define TRE_FLAGS_TYPE GENMASK(23, 16)
33 #define TRE_SPI_C0_WORD_SZ GENMASK(4, 0)
34 #define TRE_SPI_C0_LOOPBACK BIT(8)
35 #define TRE_SPI_C0_CS BIT(11)
36 #define TRE_SPI_C0_CPHA BIT(12)
37 #define TRE_SPI_C0_CPOL BIT(13)
38 #define TRE_SPI_C0_TX_PACK BIT(24)
39 #define TRE_SPI_C0_RX_PACK BIT(25)
42 #define TRE_C0_CLK_DIV GENMASK(11, 0)
43 #define TRE_C0_CLK_SRC GENMASK(19, 16)
46 #define TRE_SPI_GO_CMD GENMASK(4, 0)
47 #define TRE_SPI_GO_CS GENMASK(10, 8)
48 #define TRE_SPI_GO_FRAG BIT(26)
51 #define TRE_RX_LEN GENMASK(23, 0)
54 #define TRE_I2C_C0_TLOW GENMASK(7, 0)
55 #define TRE_I2C_C0_THIGH GENMASK(15, 8)
56 #define TRE_I2C_C0_TCYL GENMASK(23, 16)
57 #define TRE_I2C_C0_TX_PACK BIT(24)
58 #define TRE_I2C_C0_RX_PACK BIT(25)
61 #define TRE_I2C_GO_CMD GENMASK(4, 0)
62 #define TRE_I2C_GO_ADDR GENMASK(14, 8)
63 #define TRE_I2C_GO_STRETCH BIT(26)
66 #define TRE_DMA_LEN GENMASK(23, 0)
68 /* Register offsets from gpi-top */
69 #define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
70 #define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
71 #define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20)
72 #define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14)
73 #define GPII_n_CH_k_CNTXT_0_DIR BIT(3)
74 #define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0)
76 #define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \
77 (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \
78 FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \
79 FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \
80 FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto))
82 #define GPI_CHTYPE_DIR_IN (0)
83 #define GPI_CHTYPE_DIR_OUT (1)
85 #define GPI_CHTYPE_PROTO_GPI (0x2)
87 #define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k)))
88 #define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
89 #define GPII_n_CH_CMD_OPCODE GENMASK(31, 24)
90 #define GPII_n_CH_CMD_CHID GENMASK(7, 0)
91 #define GPII_n_CH_CMD(opcode, chid) \
92 (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \
93 FIELD_PREP(GPII_n_CH_CMD_CHID, chid))
95 #define GPII_n_CH_CMD_ALLOCATE (0)
96 #define GPII_n_CH_CMD_START (1)
97 #define GPII_n_CH_CMD_STOP (2)
98 #define GPII_n_CH_CMD_RESET (9)
99 #define GPII_n_CH_CMD_DE_ALLOC (10)
100 #define GPII_n_CH_CMD_UART_SW_STALE (32)
101 #define GPII_n_CH_CMD_UART_RFR_READY (33)
102 #define GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
104 /* EV Context Array */
105 #define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
106 #define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
107 #define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20)
108 #define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16)
109 #define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0)
111 #define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \
112 (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \
113 FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \
114 FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype))
116 #define GPI_INTTYPE_IRQ (1)
117 #define GPI_CHTYPE_GPI_EV (0x2)
120 CNTXT_0_CONFIG
= 0x0,
121 CNTXT_1_R_LENGTH
= 0x4,
122 CNTXT_2_RING_BASE_LSB
= 0x8,
123 CNTXT_3_RING_BASE_MSB
= 0xC,
124 CNTXT_4_RING_RP_LSB
= 0x10,
125 CNTXT_5_RING_RP_MSB
= 0x14,
126 CNTXT_6_RING_WP_LSB
= 0x18,
127 CNTXT_7_RING_WP_MSB
= 0x1C,
128 CNTXT_8_RING_INT_MOD
= 0x20,
129 CNTXT_9_RING_INTVEC
= 0x24,
130 CNTXT_10_RING_MSI_LSB
= 0x28,
131 CNTXT_11_RING_MSI_MSB
= 0x2C,
132 CNTXT_12_RING_RP_UPDATE_LSB
= 0x30,
133 CNTXT_13_RING_RP_UPDATE_MSB
= 0x34,
136 #define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
137 #define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n)))
138 #define GPII_n_EV_CMD_OPCODE GENMASK(31, 24)
139 #define GPII_n_EV_CMD_CHID GENMASK(7, 0)
140 #define GPII_n_EV_CMD(opcode, chid) \
141 (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \
142 FIELD_PREP(GPII_n_EV_CMD_CHID, chid))
144 #define GPII_n_EV_CH_CMD_ALLOCATE (0x00)
145 #define GPII_n_EV_CH_CMD_RESET (0x09)
146 #define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
148 #define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n)))
150 /* mask type register */
151 #define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n)))
152 #define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0)
153 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6)
154 #define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3)
155 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2)
156 #define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1)
157 #define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0)
159 #define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n)))
160 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n)))
162 /* Mask channel control interrupt register */
163 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n)))
164 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0)
166 /* Mask event control interrupt register */
167 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n)))
168 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0)
170 #define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n)))
171 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n)))
173 /* Mask event interrupt register */
174 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n)))
175 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0)
177 #define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n)))
178 #define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n)))
179 #define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0)
181 /* GPII specific Global - Enable bit register */
182 #define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n)))
183 #define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n)))
184 #define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n)))
186 /* GPII general interrupt - Enable bit register */
187 #define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n)))
188 #define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0)
190 #define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n)))
192 /* GPII Interrupt Type register */
193 #define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n)))
194 #define GPII_n_CNTXT_INTSET_BMSK BIT(0)
196 #define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n)))
197 #define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n)))
198 #define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n)))
199 #define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n)))
201 #define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n)))
204 #define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
206 /* Scratch registers */
207 #define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
208 #define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0)
209 #define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4)
210 #define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16)
211 #define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \
212 (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \
213 FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \
214 FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid))
215 #define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
216 #define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
217 #define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
219 struct __packed gpi_tre
{
223 enum msm_gpi_tce_code
{
224 MSM_GPI_TCE_SUCCESS
= 1,
227 MSM_GPI_TCE_UNEXP_ERR
= 16,
230 #define CMD_TIMEOUT_MS (250)
232 #define MAX_CHANNELS_PER_GPII (2)
233 #define GPI_TX_CHAN (0)
234 #define GPI_RX_CHAN (1)
235 #define STATE_IGNORE (U32_MAX)
236 #define EV_FACTOR (2)
237 #define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
240 struct __packed xfer_compl_event
{
249 struct __packed immediate_data_event
{
260 struct __packed qup_notif_event
{
270 struct __packed gpi_ere
{
275 XFER_COMPLETE_EV_TYPE
= 0x22,
276 IMMEDIATE_DATA_EV_TYPE
= 0x30,
277 QUP_NOTIF_EV_TYPE
= 0x31,
278 STALE_EV_TYPE
= 0xFF,
281 union __packed gpi_event
{
282 struct __packed xfer_compl_event xfer_compl_event
;
283 struct __packed immediate_data_event immediate_data_event
;
284 struct __packed qup_notif_event qup_notif_event
;
285 struct __packed gpi_ere gpi_ere
;
288 enum gpii_irq_settings
{
289 DEFAULT_IRQ_SETTINGS
,
294 DEFAULT_EV_CH_STATE
= 0,
295 EV_STATE_NOT_ALLOCATED
= DEFAULT_EV_CH_STATE
,
300 static const char *const gpi_ev_state_str
[MAX_EV_STATES
] = {
301 [EV_STATE_NOT_ALLOCATED
] = "NOT ALLOCATED",
302 [EV_STATE_ALLOCATED
] = "ALLOCATED",
305 #define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \
306 "INVALID" : gpi_ev_state_str[(_state)])
309 DEFAULT_CH_STATE
= 0x0,
310 CH_STATE_NOT_ALLOCATED
= DEFAULT_CH_STATE
,
311 CH_STATE_ALLOCATED
= 0x1,
312 CH_STATE_STARTED
= 0x2,
313 CH_STATE_STOPPED
= 0x3,
314 CH_STATE_STOP_IN_PROC
= 0x4,
315 CH_STATE_ERROR
= 0xf,
321 GPI_CH_CMD_ALLOCATE
= GPI_CH_CMD_BEGIN
,
326 GPI_CH_CMD_UART_SW_STALE
,
327 GPI_CH_CMD_UART_RFR_READY
,
328 GPI_CH_CMD_UART_RFR_NOT_READY
,
329 GPI_CH_CMD_END
= GPI_CH_CMD_UART_RFR_NOT_READY
,
331 GPI_EV_CMD_ALLOCATE
= GPI_EV_CMD_BEGIN
,
334 GPI_EV_CMD_END
= GPI_EV_CMD_DEALLOC
,
338 #define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END)
340 static const char *const gpi_cmd_str
[GPI_MAX_CMD
] = {
341 [GPI_CH_CMD_ALLOCATE
] = "CH ALLOCATE",
342 [GPI_CH_CMD_START
] = "CH START",
343 [GPI_CH_CMD_STOP
] = "CH STOP",
344 [GPI_CH_CMD_RESET
] = "CH_RESET",
345 [GPI_CH_CMD_DE_ALLOC
] = "DE ALLOC",
346 [GPI_CH_CMD_UART_SW_STALE
] = "UART SW STALE",
347 [GPI_CH_CMD_UART_RFR_READY
] = "UART RFR READY",
348 [GPI_CH_CMD_UART_RFR_NOT_READY
] = "UART RFR NOT READY",
349 [GPI_EV_CMD_ALLOCATE
] = "EV ALLOCATE",
350 [GPI_EV_CMD_RESET
] = "EV RESET",
351 [GPI_EV_CMD_DEALLOC
] = "EV DEALLOC",
354 #define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \
358 * @DISABLE_STATE: no register access allowed
359 * @CONFIG_STATE: client has configured the channel
360 * @PREP_HARDWARE: register access is allowed
361 * however, no processing EVENTS
362 * @ACTIVE_STATE: channels are fully operational
363 * @PREPARE_TERMINATE: graceful termination of channels
364 * register access is allowed
365 * @PAUSE_STATE: channels are active, but not processing any events
377 #define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE)
379 static const char *const gpi_pm_state_str
[MAX_PM_STATE
] = {
380 [DISABLE_STATE
] = "DISABLE",
381 [CONFIG_STATE
] = "CONFIG",
382 [PREPARE_HARDWARE
] = "PREPARE HARDWARE",
383 [ACTIVE_STATE
] = "ACTIVE",
384 [PREPARE_TERMINATE
] = "PREPARE TERMINATE",
385 [PAUSE_STATE
] = "PAUSE",
388 #define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \
389 "INVALID" : gpi_pm_state_str[(_state)])
391 static const struct {
392 enum gpi_cmd gpi_cmd
;
395 } gpi_cmd_info
[GPI_MAX_CMD
] = {
398 GPII_n_CH_CMD_ALLOCATE
,
418 GPII_n_CH_CMD_DE_ALLOC
,
419 CH_STATE_NOT_ALLOCATED
,
422 GPI_CH_CMD_UART_SW_STALE
,
423 GPII_n_CH_CMD_UART_SW_STALE
,
427 GPI_CH_CMD_UART_RFR_READY
,
428 GPII_n_CH_CMD_UART_RFR_READY
,
432 GPI_CH_CMD_UART_RFR_NOT_READY
,
433 GPII_n_CH_CMD_UART_RFR_NOT_READY
,
438 GPII_n_EV_CH_CMD_ALLOCATE
,
443 GPII_n_EV_CH_CMD_RESET
,
448 GPII_n_EV_CH_CMD_DE_ALLOC
,
449 EV_STATE_NOT_ALLOCATED
,
456 phys_addr_t phys_addr
;
457 dma_addr_t dma_handle
;
468 struct dma_device dma_device
;
470 struct resource
*res
;
472 void __iomem
*ee_base
; /*ee register base address*/
473 u32 max_gpii
; /* maximum # of gpii instances available per gpi block */
474 u32 gpii_mask
; /* gpii instances available for apps */
475 u32 ev_factor
; /* ev ring length factor */
480 struct virt_dma_chan vc
;
485 enum gpi_ch_state ch_state
;
486 enum gpi_pm_state pm_state
;
487 void __iomem
*ch_cntxt_base_reg
;
488 void __iomem
*ch_cntxt_db_reg
;
489 void __iomem
*ch_cmd_reg
;
491 struct gpi_ring ch_ring
;
497 struct gchan gchan
[MAX_CHANNELS_PER_GPII
];
498 struct gpi_dev
*gpi_dev
;
500 void __iomem
*regs
; /* points to gpi top */
501 void __iomem
*ev_cntxt_base_reg
;
502 void __iomem
*ev_cntxt_db_reg
;
503 void __iomem
*ev_ring_rp_lsb_reg
;
504 void __iomem
*ev_cmd_reg
;
505 void __iomem
*ieob_clr_reg
;
506 struct mutex ctrl_lock
;
507 enum gpi_ev_state ev_state
;
509 enum gpi_pm_state pm_state
;
511 struct gpi_ring ev_ring
;
512 struct tasklet_struct ev_task
; /* event processing tasklet */
513 struct completion cmd_completion
;
514 enum gpi_cmd gpi_cmd
;
515 u32 cntxt_type_irq_msk
;
522 struct virt_dma_desc vd
;
524 void *db
; /* DB register to program */
526 struct gpi_tre tre
[MAX_TRE
];
530 static const u32 GPII_CHAN_DIR
[MAX_CHANNELS_PER_GPII
] = {
531 GPI_CHTYPE_DIR_OUT
, GPI_CHTYPE_DIR_IN
534 static irqreturn_t
gpi_handle_irq(int irq
, void *data
);
535 static void gpi_ring_recycle_ev_element(struct gpi_ring
*ring
);
536 static int gpi_ring_add_element(struct gpi_ring
*ring
, void **wp
);
537 static void gpi_process_events(struct gpii
*gpii
);
539 static inline struct gchan
*to_gchan(struct dma_chan
*dma_chan
)
541 return container_of(dma_chan
, struct gchan
, vc
.chan
);
544 static inline struct gpi_desc
*to_gpi_desc(struct virt_dma_desc
*vd
)
546 return container_of(vd
, struct gpi_desc
, vd
);
549 static inline phys_addr_t
to_physical(const struct gpi_ring
*const ring
,
552 return ring
->phys_addr
+ (addr
- ring
->base
);
555 static inline void *to_virtual(const struct gpi_ring
*const ring
, phys_addr_t addr
)
557 return ring
->base
+ (addr
- ring
->phys_addr
);
560 static inline u32
gpi_read_reg(struct gpii
*gpii
, void __iomem
*addr
)
562 return readl_relaxed(addr
);
565 static inline void gpi_write_reg(struct gpii
*gpii
, void __iomem
*addr
, u32 val
)
567 writel_relaxed(val
, addr
);
570 /* gpi_write_reg_field - write to specific bit field */
571 static inline void gpi_write_reg_field(struct gpii
*gpii
, void __iomem
*addr
,
572 u32 mask
, u32 shift
, u32 val
)
574 u32 tmp
= gpi_read_reg(gpii
, addr
);
577 val
= tmp
| ((val
<< shift
) & mask
);
578 gpi_write_reg(gpii
, addr
, val
);
581 static __always_inline
void
582 gpi_update_reg(struct gpii
*gpii
, u32 offset
, u32 mask
, u32 val
)
584 void __iomem
*addr
= gpii
->regs
+ offset
;
585 u32 tmp
= gpi_read_reg(gpii
, addr
);
588 tmp
|= u32_encode_bits(val
, mask
);
590 gpi_write_reg(gpii
, addr
, tmp
);
593 static void gpi_disable_interrupts(struct gpii
*gpii
)
595 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
596 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, 0);
597 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii
->gpii_id
),
598 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
, 0);
599 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
600 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
, 0);
601 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
602 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
, 0);
603 gpi_update_reg(gpii
, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii
->gpii_id
),
604 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, 0);
605 gpi_update_reg(gpii
, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii
->gpii_id
),
606 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, 0);
607 gpi_update_reg(gpii
, GPII_n_CNTXT_INTSET_OFFS(gpii
->gpii_id
),
608 GPII_n_CNTXT_INTSET_BMSK
, 0);
610 gpii
->cntxt_type_irq_msk
= 0;
611 devm_free_irq(gpii
->gpi_dev
->dev
, gpii
->irq
, gpii
);
612 gpii
->configured_irq
= false;
615 /* configure and enable interrupts */
616 static int gpi_config_interrupts(struct gpii
*gpii
, enum gpii_irq_settings settings
, bool mask
)
618 const u32 enable
= (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL
|
619 GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
|
620 GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
|
621 GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
|
622 GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
);
625 if (!gpii
->configured_irq
) {
626 ret
= devm_request_irq(gpii
->gpi_dev
->dev
, gpii
->irq
,
627 gpi_handle_irq
, IRQF_TRIGGER_HIGH
,
630 dev_err(gpii
->gpi_dev
->dev
, "error request irq:%d ret:%d\n",
636 if (settings
== MASK_IEOB_SETTINGS
) {
638 * GPII only uses one EV ring per gpii so we can globally
639 * enable/disable IEOB interrupt
642 gpii
->cntxt_type_irq_msk
|= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
;
644 gpii
->cntxt_type_irq_msk
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
);
645 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
646 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, gpii
->cntxt_type_irq_msk
);
648 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
649 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, enable
);
650 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii
->gpii_id
),
651 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
,
652 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
);
653 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
654 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
,
655 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
);
656 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
657 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
,
658 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
);
659 gpi_update_reg(gpii
, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii
->gpii_id
),
660 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
,
661 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
);
662 gpi_update_reg(gpii
, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii
->gpii_id
),
663 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, GPII_n_CNTXT_GPII_IRQ_EN_BMSK
);
664 gpi_update_reg(gpii
, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
665 gpi_update_reg(gpii
, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
666 gpi_update_reg(gpii
, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
667 gpi_update_reg(gpii
, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
668 gpi_update_reg(gpii
, GPII_n_CNTXT_INTSET_OFFS(gpii
->gpii_id
),
669 GPII_n_CNTXT_INTSET_BMSK
, 1);
670 gpi_update_reg(gpii
, GPII_n_ERROR_LOG_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
672 gpii
->cntxt_type_irq_msk
= enable
;
675 gpii
->configured_irq
= true;
679 /* Sends gpii event or channel command */
680 static int gpi_send_cmd(struct gpii
*gpii
, struct gchan
*gchan
,
681 enum gpi_cmd gpi_cmd
)
683 u32 chid
= MAX_CHANNELS_PER_GPII
;
684 unsigned long timeout
;
685 void __iomem
*cmd_reg
;
688 if (gpi_cmd
>= GPI_MAX_CMD
)
690 if (IS_CHAN_CMD(gpi_cmd
))
693 dev_dbg(gpii
->gpi_dev
->dev
,
694 "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd
), chid
);
696 /* send opcode and wait for completion */
697 reinit_completion(&gpii
->cmd_completion
);
698 gpii
->gpi_cmd
= gpi_cmd
;
700 cmd_reg
= IS_CHAN_CMD(gpi_cmd
) ? gchan
->ch_cmd_reg
: gpii
->ev_cmd_reg
;
701 cmd
= IS_CHAN_CMD(gpi_cmd
) ? GPII_n_CH_CMD(gpi_cmd_info
[gpi_cmd
].opcode
, chid
) :
702 GPII_n_EV_CMD(gpi_cmd_info
[gpi_cmd
].opcode
, 0);
703 gpi_write_reg(gpii
, cmd_reg
, cmd
);
704 timeout
= wait_for_completion_timeout(&gpii
->cmd_completion
,
705 msecs_to_jiffies(CMD_TIMEOUT_MS
));
707 dev_err(gpii
->gpi_dev
->dev
, "cmd: %s completion timeout:%u\n",
708 TO_GPI_CMD_STR(gpi_cmd
), chid
);
712 /* confirm new ch state is correct , if the cmd is a state change cmd */
713 if (gpi_cmd_info
[gpi_cmd
].state
== STATE_IGNORE
)
716 if (IS_CHAN_CMD(gpi_cmd
) && gchan
->ch_state
== gpi_cmd_info
[gpi_cmd
].state
)
719 if (!IS_CHAN_CMD(gpi_cmd
) && gpii
->ev_state
== gpi_cmd_info
[gpi_cmd
].state
)
725 /* program transfer ring DB register */
726 static inline void gpi_write_ch_db(struct gchan
*gchan
,
727 struct gpi_ring
*ring
, void *wp
)
729 struct gpii
*gpii
= gchan
->gpii
;
732 p_wp
= to_physical(ring
, wp
);
733 gpi_write_reg(gpii
, gchan
->ch_cntxt_db_reg
, p_wp
);
736 /* program event ring DB register */
737 static inline void gpi_write_ev_db(struct gpii
*gpii
,
738 struct gpi_ring
*ring
, void *wp
)
742 p_wp
= ring
->phys_addr
+ (wp
- ring
->base
);
743 gpi_write_reg(gpii
, gpii
->ev_cntxt_db_reg
, p_wp
);
746 /* process transfer completion interrupt */
747 static void gpi_process_ieob(struct gpii
*gpii
)
749 gpi_write_reg(gpii
, gpii
->ieob_clr_reg
, BIT(0));
751 gpi_config_interrupts(gpii
, MASK_IEOB_SETTINGS
, 0);
752 tasklet_hi_schedule(&gpii
->ev_task
);
755 /* process channel control interrupt */
756 static void gpi_process_ch_ctrl_irq(struct gpii
*gpii
)
758 u32 gpii_id
= gpii
->gpii_id
;
759 u32 offset
= GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id
);
760 u32 ch_irq
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
764 /* clear the status */
765 offset
= GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id
);
766 gpi_write_reg(gpii
, gpii
->regs
+ offset
, (u32
)ch_irq
);
768 for (chid
= 0; chid
< MAX_CHANNELS_PER_GPII
; chid
++) {
769 if (!(BIT(chid
) & ch_irq
))
772 gchan
= &gpii
->gchan
[chid
];
773 state
= gpi_read_reg(gpii
, gchan
->ch_cntxt_base_reg
+
775 state
= FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE
, state
);
778 * CH_CMD_DEALLOC cmd always successful. However cmd does
779 * not change hardware status. So overwriting software state
782 if (gpii
->gpi_cmd
== GPI_CH_CMD_DE_ALLOC
)
783 state
= DEFAULT_CH_STATE
;
784 gchan
->ch_state
= state
;
787 * Triggering complete all if ch_state is not a stop in process.
788 * Stop in process is a transition state and we will wait for
789 * stop interrupt before notifying.
791 if (gchan
->ch_state
!= CH_STATE_STOP_IN_PROC
)
792 complete_all(&gpii
->cmd_completion
);
796 /* processing gpi general error interrupts */
797 static void gpi_process_gen_err_irq(struct gpii
*gpii
)
799 u32 gpii_id
= gpii
->gpii_id
;
800 u32 offset
= GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id
);
801 u32 irq_stts
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
803 /* clear the status */
804 dev_dbg(gpii
->gpi_dev
->dev
, "irq_stts:0x%x\n", irq_stts
);
806 /* Clear the register */
807 offset
= GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id
);
808 gpi_write_reg(gpii
, gpii
->regs
+ offset
, irq_stts
);
811 /* processing gpi level error interrupts */
812 static void gpi_process_glob_err_irq(struct gpii
*gpii
)
814 u32 gpii_id
= gpii
->gpii_id
;
815 u32 offset
= GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id
);
816 u32 irq_stts
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
818 offset
= GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id
);
819 gpi_write_reg(gpii
, gpii
->regs
+ offset
, irq_stts
);
821 /* only error interrupt should be set */
822 if (irq_stts
& ~GPI_GLOB_IRQ_ERROR_INT_MSK
) {
823 dev_err(gpii
->gpi_dev
->dev
, "invalid error status:0x%x\n", irq_stts
);
827 offset
= GPII_n_ERROR_LOG_OFFS(gpii_id
);
828 gpi_write_reg(gpii
, gpii
->regs
+ offset
, 0);
831 /* gpii interrupt handler */
832 static irqreturn_t
gpi_handle_irq(int irq
, void *data
)
834 struct gpii
*gpii
= data
;
835 u32 gpii_id
= gpii
->gpii_id
;
839 read_lock_irqsave(&gpii
->pm_lock
, flags
);
842 * States are out of sync to receive interrupt
843 * while software state is in DISABLE state, bailing out.
845 if (!REG_ACCESS_VALID(gpii
->pm_state
)) {
846 dev_err(gpii
->gpi_dev
->dev
, "receive interrupt while in %s state\n",
847 TO_GPI_PM_STR(gpii
->pm_state
));
851 offset
= GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii
->gpii_id
);
852 type
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
855 /* global gpii error */
856 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
) {
857 gpi_process_glob_err_irq(gpii
);
858 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
);
861 /* transfer complete interrupt */
862 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
) {
863 gpi_process_ieob(gpii
);
864 type
&= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
;
867 /* event control irq */
868 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
) {
872 dev_dbg(gpii
->gpi_dev
->dev
,
873 "processing EV CTRL interrupt\n");
874 offset
= GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id
);
875 ev_ch_irq
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
877 offset
= GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
879 gpi_write_reg(gpii
, gpii
->regs
+ offset
, ev_ch_irq
);
880 ev_state
= gpi_read_reg(gpii
, gpii
->ev_cntxt_base_reg
+
882 ev_state
= FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE
, ev_state
);
885 * CMD EV_CMD_DEALLOC is always successful. However
886 * cmd does not change hardware status. So overwriting
887 * software state to default state.
889 if (gpii
->gpi_cmd
== GPI_EV_CMD_DEALLOC
)
890 ev_state
= DEFAULT_EV_CH_STATE
;
892 gpii
->ev_state
= ev_state
;
893 dev_dbg(gpii
->gpi_dev
->dev
, "setting EV state to %s\n",
894 TO_GPI_EV_STATE_STR(gpii
->ev_state
));
895 complete_all(&gpii
->cmd_completion
);
896 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
);
899 /* channel control irq */
900 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
) {
901 dev_dbg(gpii
->gpi_dev
->dev
, "process CH CTRL interrupts\n");
902 gpi_process_ch_ctrl_irq(gpii
);
903 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
);
907 dev_err(gpii
->gpi_dev
->dev
, "Unhandled interrupt status:0x%x\n", type
);
908 gpi_process_gen_err_irq(gpii
);
912 offset
= GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii
->gpii_id
);
913 type
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
917 read_unlock_irqrestore(&gpii
->pm_lock
, flags
);
922 /* process DMA Immediate completion data events */
923 static void gpi_process_imed_data_event(struct gchan
*gchan
,
924 struct immediate_data_event
*imed_event
)
926 struct gpii
*gpii
= gchan
->gpii
;
927 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
928 void *tre
= ch_ring
->base
+ (ch_ring
->el_size
* imed_event
->tre_index
);
929 struct dmaengine_result result
;
930 struct gpi_desc
*gpi_desc
;
931 struct virt_dma_desc
*vd
;
936 * If channel not active don't process event
938 if (gchan
->pm_state
!= ACTIVE_STATE
) {
939 dev_err(gpii
->gpi_dev
->dev
, "skipping processing event because ch @ %s state\n",
940 TO_GPI_PM_STR(gchan
->pm_state
));
944 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
945 vd
= vchan_next_desc(&gchan
->vc
);
947 struct gpi_ere
*gpi_ere
;
948 struct gpi_tre
*gpi_tre
;
950 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
951 dev_dbg(gpii
->gpi_dev
->dev
, "event without a pending descriptor!\n");
952 gpi_ere
= (struct gpi_ere
*)imed_event
;
953 dev_dbg(gpii
->gpi_dev
->dev
,
954 "Event: %08x %08x %08x %08x\n",
955 gpi_ere
->dword
[0], gpi_ere
->dword
[1],
956 gpi_ere
->dword
[2], gpi_ere
->dword
[3]);
958 dev_dbg(gpii
->gpi_dev
->dev
,
959 "Pending TRE: %08x %08x %08x %08x\n",
960 gpi_tre
->dword
[0], gpi_tre
->dword
[1],
961 gpi_tre
->dword
[2], gpi_tre
->dword
[3]);
964 gpi_desc
= to_gpi_desc(vd
);
965 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
968 * RP pointed by Event is to last TRE processed,
969 * we need to update ring rp to tre + 1
971 tre
+= ch_ring
->el_size
;
972 if (tre
>= (ch_ring
->base
+ ch_ring
->len
))
976 /* make sure rp updates are immediately visible to all cores */
979 chid
= imed_event
->chid
;
980 if (imed_event
->code
== MSM_GPI_TCE_EOT
&& gpii
->ieob_set
) {
981 if (chid
== GPI_RX_CHAN
)
987 if (imed_event
->code
== MSM_GPI_TCE_UNEXP_ERR
)
988 result
.result
= DMA_TRANS_ABORTED
;
990 result
.result
= DMA_TRANS_NOERROR
;
991 result
.residue
= gpi_desc
->len
- imed_event
->length
;
993 dma_cookie_complete(&vd
->tx
);
994 dmaengine_desc_get_callback_invoke(&vd
->tx
, &result
);
997 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
999 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1004 /* processing transfer completion events */
1005 static void gpi_process_xfer_compl_event(struct gchan
*gchan
,
1006 struct xfer_compl_event
*compl_event
)
1008 struct gpii
*gpii
= gchan
->gpii
;
1009 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1010 void *ev_rp
= to_virtual(ch_ring
, compl_event
->ptr
);
1011 struct virt_dma_desc
*vd
;
1012 struct gpi_desc
*gpi_desc
;
1013 struct dmaengine_result result
;
1014 unsigned long flags
;
1017 /* only process events on active channel */
1018 if (unlikely(gchan
->pm_state
!= ACTIVE_STATE
)) {
1019 dev_err(gpii
->gpi_dev
->dev
, "skipping processing event because ch @ %s state\n",
1020 TO_GPI_PM_STR(gchan
->pm_state
));
1024 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1025 vd
= vchan_next_desc(&gchan
->vc
);
1027 struct gpi_ere
*gpi_ere
;
1029 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1030 dev_err(gpii
->gpi_dev
->dev
, "Event without a pending descriptor!\n");
1031 gpi_ere
= (struct gpi_ere
*)compl_event
;
1032 dev_err(gpii
->gpi_dev
->dev
,
1033 "Event: %08x %08x %08x %08x\n",
1034 gpi_ere
->dword
[0], gpi_ere
->dword
[1],
1035 gpi_ere
->dword
[2], gpi_ere
->dword
[3]);
1039 gpi_desc
= to_gpi_desc(vd
);
1040 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1043 * RP pointed by Event is to last TRE processed,
1044 * we need to update ring rp to ev_rp + 1
1046 ev_rp
+= ch_ring
->el_size
;
1047 if (ev_rp
>= (ch_ring
->base
+ ch_ring
->len
))
1048 ev_rp
= ch_ring
->base
;
1049 ch_ring
->rp
= ev_rp
;
1051 /* update must be visible to other cores */
1054 chid
= compl_event
->chid
;
1055 if (compl_event
->code
== MSM_GPI_TCE_EOT
&& gpii
->ieob_set
) {
1056 if (chid
== GPI_RX_CHAN
)
1062 if (compl_event
->code
== MSM_GPI_TCE_UNEXP_ERR
) {
1063 dev_err(gpii
->gpi_dev
->dev
, "Error in Transaction\n");
1064 result
.result
= DMA_TRANS_ABORTED
;
1066 dev_dbg(gpii
->gpi_dev
->dev
, "Transaction Success\n");
1067 result
.result
= DMA_TRANS_NOERROR
;
1069 result
.residue
= gpi_desc
->len
- compl_event
->length
;
1070 dev_dbg(gpii
->gpi_dev
->dev
, "Residue %d\n", result
.residue
);
1072 dma_cookie_complete(&vd
->tx
);
1073 dmaengine_desc_get_callback_invoke(&vd
->tx
, &result
);
1076 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1077 list_del(&vd
->node
);
1078 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1083 /* process all events */
1084 static void gpi_process_events(struct gpii
*gpii
)
1086 struct gpi_ring
*ev_ring
= &gpii
->ev_ring
;
1087 phys_addr_t cntxt_rp
;
1089 union gpi_event
*gpi_event
;
1090 struct gchan
*gchan
;
1093 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1094 rp
= to_virtual(ev_ring
, cntxt_rp
);
1097 while (rp
!= ev_ring
->rp
) {
1098 gpi_event
= ev_ring
->rp
;
1099 chid
= gpi_event
->xfer_compl_event
.chid
;
1100 type
= gpi_event
->xfer_compl_event
.type
;
1102 dev_dbg(gpii
->gpi_dev
->dev
,
1103 "Event: CHID:%u, type:%x %08x %08x %08x %08x\n",
1104 chid
, type
, gpi_event
->gpi_ere
.dword
[0],
1105 gpi_event
->gpi_ere
.dword
[1], gpi_event
->gpi_ere
.dword
[2],
1106 gpi_event
->gpi_ere
.dword
[3]);
1109 case XFER_COMPLETE_EV_TYPE
:
1110 gchan
= &gpii
->gchan
[chid
];
1111 gpi_process_xfer_compl_event(gchan
,
1112 &gpi_event
->xfer_compl_event
);
1115 dev_dbg(gpii
->gpi_dev
->dev
, "stale event, not processing\n");
1117 case IMMEDIATE_DATA_EV_TYPE
:
1118 gchan
= &gpii
->gchan
[chid
];
1119 gpi_process_imed_data_event(gchan
,
1120 &gpi_event
->immediate_data_event
);
1122 case QUP_NOTIF_EV_TYPE
:
1123 dev_dbg(gpii
->gpi_dev
->dev
, "QUP_NOTIF_EV_TYPE\n");
1126 dev_dbg(gpii
->gpi_dev
->dev
,
1127 "not supported event type:0x%x\n", type
);
1129 gpi_ring_recycle_ev_element(ev_ring
);
1131 gpi_write_ev_db(gpii
, ev_ring
, ev_ring
->wp
);
1133 /* clear pending IEOB events */
1134 gpi_write_reg(gpii
, gpii
->ieob_clr_reg
, BIT(0));
1136 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1137 rp
= to_virtual(ev_ring
, cntxt_rp
);
1139 } while (rp
!= ev_ring
->rp
);
1142 /* processing events using tasklet */
1143 static void gpi_ev_tasklet(unsigned long data
)
1145 struct gpii
*gpii
= (struct gpii
*)data
;
1147 read_lock(&gpii
->pm_lock
);
1148 if (!REG_ACCESS_VALID(gpii
->pm_state
)) {
1149 read_unlock(&gpii
->pm_lock
);
1150 dev_err(gpii
->gpi_dev
->dev
, "not processing any events, pm_state:%s\n",
1151 TO_GPI_PM_STR(gpii
->pm_state
));
1155 /* process the events */
1156 gpi_process_events(gpii
);
1158 /* enable IEOB, switching back to interrupts */
1159 gpi_config_interrupts(gpii
, MASK_IEOB_SETTINGS
, 1);
1160 read_unlock(&gpii
->pm_lock
);
1163 /* marks all pending events for the channel as stale */
1164 static void gpi_mark_stale_events(struct gchan
*gchan
)
1166 struct gpii
*gpii
= gchan
->gpii
;
1167 struct gpi_ring
*ev_ring
= &gpii
->ev_ring
;
1168 u32 cntxt_rp
, local_rp
;
1171 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1173 ev_rp
= ev_ring
->rp
;
1174 local_rp
= (u32
)to_physical(ev_ring
, ev_rp
);
1175 while (local_rp
!= cntxt_rp
) {
1176 union gpi_event
*gpi_event
= ev_rp
;
1177 u32 chid
= gpi_event
->xfer_compl_event
.chid
;
1179 if (chid
== gchan
->chid
)
1180 gpi_event
->xfer_compl_event
.type
= STALE_EV_TYPE
;
1181 ev_rp
+= ev_ring
->el_size
;
1182 if (ev_rp
>= (ev_ring
->base
+ ev_ring
->len
))
1183 ev_rp
= ev_ring
->base
;
1184 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1185 local_rp
= (u32
)to_physical(ev_ring
, ev_rp
);
1189 /* reset sw state and issue channel reset or de-alloc */
1190 static int gpi_reset_chan(struct gchan
*gchan
, enum gpi_cmd gpi_cmd
)
1192 struct gpii
*gpii
= gchan
->gpii
;
1193 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1197 ret
= gpi_send_cmd(gpii
, gchan
, gpi_cmd
);
1199 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1200 TO_GPI_CMD_STR(gpi_cmd
), ret
);
1204 /* initialize the local ring ptrs */
1205 ch_ring
->rp
= ch_ring
->base
;
1206 ch_ring
->wp
= ch_ring
->base
;
1208 /* visible to other cores */
1211 /* check event ring for any stale events */
1212 write_lock_irq(&gpii
->pm_lock
);
1213 gpi_mark_stale_events(gchan
);
1215 /* remove all async descriptors */
1216 spin_lock(&gchan
->vc
.lock
);
1217 vchan_get_all_descriptors(&gchan
->vc
, &list
);
1218 spin_unlock(&gchan
->vc
.lock
);
1219 write_unlock_irq(&gpii
->pm_lock
);
1220 vchan_dma_desc_free_list(&gchan
->vc
, &list
);
1225 static int gpi_start_chan(struct gchan
*gchan
)
1227 struct gpii
*gpii
= gchan
->gpii
;
1230 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_START
);
1232 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1233 TO_GPI_CMD_STR(GPI_CH_CMD_START
), ret
);
1237 /* gpii CH is active now */
1238 write_lock_irq(&gpii
->pm_lock
);
1239 gchan
->pm_state
= ACTIVE_STATE
;
1240 write_unlock_irq(&gpii
->pm_lock
);
1245 static int gpi_stop_chan(struct gchan
*gchan
)
1247 struct gpii
*gpii
= gchan
->gpii
;
1250 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_STOP
);
1252 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1253 TO_GPI_CMD_STR(GPI_CH_CMD_STOP
), ret
);
1260 /* allocate and configure the transfer channel */
1261 static int gpi_alloc_chan(struct gchan
*chan
, bool send_alloc_cmd
)
1263 struct gpii
*gpii
= chan
->gpii
;
1264 struct gpi_ring
*ring
= &chan
->ch_ring
;
1266 u32 id
= gpii
->gpii_id
;
1267 u32 chid
= chan
->chid
;
1268 u32 pair_chid
= !chid
;
1270 if (send_alloc_cmd
) {
1271 ret
= gpi_send_cmd(gpii
, chan
, GPI_CH_CMD_ALLOCATE
);
1273 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1274 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE
), ret
);
1279 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_0_CONFIG
,
1280 GPII_n_CH_k_CNTXT_0(ring
->el_size
, 0, chan
->dir
, GPI_CHTYPE_PROTO_GPI
));
1281 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_1_R_LENGTH
, ring
->len
);
1282 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_2_RING_BASE_LSB
, ring
->phys_addr
);
1283 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_3_RING_BASE_MSB
,
1284 upper_32_bits(ring
->phys_addr
));
1285 gpi_write_reg(gpii
, chan
->ch_cntxt_db_reg
+ CNTXT_5_RING_RP_MSB
- CNTXT_4_RING_RP_LSB
,
1286 upper_32_bits(ring
->phys_addr
));
1287 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_0_OFFS(id
, chid
),
1288 GPII_n_CH_k_SCRATCH_0(pair_chid
, chan
->protocol
, chan
->seid
));
1289 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_1_OFFS(id
, chid
), 0);
1290 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_2_OFFS(id
, chid
), 0);
1291 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_3_OFFS(id
, chid
), 0);
1292 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_QOS_OFFS(id
, chid
), 1);
1294 /* flush all the writes */
1299 /* allocate and configure event ring */
1300 static int gpi_alloc_ev_chan(struct gpii
*gpii
)
1302 struct gpi_ring
*ring
= &gpii
->ev_ring
;
1303 void __iomem
*base
= gpii
->ev_cntxt_base_reg
;
1306 ret
= gpi_send_cmd(gpii
, NULL
, GPI_EV_CMD_ALLOCATE
);
1308 dev_err(gpii
->gpi_dev
->dev
, "error with cmd:%s ret:%d\n",
1309 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE
), ret
);
1313 /* program event context */
1314 gpi_write_reg(gpii
, base
+ CNTXT_0_CONFIG
,
1315 GPII_n_EV_k_CNTXT_0(ring
->el_size
, GPI_INTTYPE_IRQ
, GPI_CHTYPE_GPI_EV
));
1316 gpi_write_reg(gpii
, base
+ CNTXT_1_R_LENGTH
, ring
->len
);
1317 gpi_write_reg(gpii
, base
+ CNTXT_2_RING_BASE_LSB
, lower_32_bits(ring
->phys_addr
));
1318 gpi_write_reg(gpii
, base
+ CNTXT_3_RING_BASE_MSB
, upper_32_bits(ring
->phys_addr
));
1319 gpi_write_reg(gpii
, gpii
->ev_cntxt_db_reg
+ CNTXT_5_RING_RP_MSB
- CNTXT_4_RING_RP_LSB
,
1320 upper_32_bits(ring
->phys_addr
));
1321 gpi_write_reg(gpii
, base
+ CNTXT_8_RING_INT_MOD
, 0);
1322 gpi_write_reg(gpii
, base
+ CNTXT_10_RING_MSI_LSB
, 0);
1323 gpi_write_reg(gpii
, base
+ CNTXT_11_RING_MSI_MSB
, 0);
1324 gpi_write_reg(gpii
, base
+ CNTXT_8_RING_INT_MOD
, 0);
1325 gpi_write_reg(gpii
, base
+ CNTXT_12_RING_RP_UPDATE_LSB
, 0);
1326 gpi_write_reg(gpii
, base
+ CNTXT_13_RING_RP_UPDATE_MSB
, 0);
1328 /* add events to ring */
1329 ring
->wp
= (ring
->base
+ ring
->len
- ring
->el_size
);
1331 /* flush all the writes */
1334 /* gpii is active now */
1335 write_lock_irq(&gpii
->pm_lock
);
1336 gpii
->pm_state
= ACTIVE_STATE
;
1337 write_unlock_irq(&gpii
->pm_lock
);
1338 gpi_write_ev_db(gpii
, ring
, ring
->wp
);
1343 /* calculate # of ERE/TRE available to queue */
1344 static int gpi_ring_num_elements_avail(const struct gpi_ring
* const ring
)
1348 if (ring
->wp
< ring
->rp
) {
1349 elements
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
1351 elements
= (ring
->rp
- ring
->base
) / ring
->el_size
;
1352 elements
+= ((ring
->base
+ ring
->len
- ring
->wp
) / ring
->el_size
) - 1;
1358 static int gpi_ring_add_element(struct gpi_ring
*ring
, void **wp
)
1360 if (gpi_ring_num_elements_avail(ring
) <= 0)
1364 ring
->wp
+= ring
->el_size
;
1365 if (ring
->wp
>= (ring
->base
+ ring
->len
))
1366 ring
->wp
= ring
->base
;
1368 /* visible to other cores */
1374 static void gpi_ring_recycle_ev_element(struct gpi_ring
*ring
)
1377 ring
->wp
+= ring
->el_size
;
1378 if (ring
->wp
>= (ring
->base
+ ring
->len
))
1379 ring
->wp
= ring
->base
;
1382 ring
->rp
+= ring
->el_size
;
1383 if (ring
->rp
>= (ring
->base
+ ring
->len
))
1384 ring
->rp
= ring
->base
;
1386 /* visible to other cores */
1390 static void gpi_free_ring(struct gpi_ring
*ring
,
1393 dma_free_coherent(gpii
->gpi_dev
->dev
, ring
->alloc_size
,
1394 ring
->pre_aligned
, ring
->dma_handle
);
1395 memset(ring
, 0, sizeof(*ring
));
1398 /* allocate memory for transfer and event rings */
1399 static int gpi_alloc_ring(struct gpi_ring
*ring
, u32 elements
,
1400 u32 el_size
, struct gpii
*gpii
)
1402 u64 len
= elements
* el_size
;
1405 /* ring len must be power of 2 */
1406 bit
= find_last_bit((unsigned long *)&len
, 32);
1407 if (((1 << bit
) - 1) & len
)
1410 ring
->alloc_size
= (len
+ (len
- 1));
1411 dev_dbg(gpii
->gpi_dev
->dev
,
1412 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
1413 elements
, el_size
, (elements
* el_size
), len
,
1416 ring
->pre_aligned
= dma_alloc_coherent(gpii
->gpi_dev
->dev
,
1418 &ring
->dma_handle
, GFP_KERNEL
);
1419 if (!ring
->pre_aligned
) {
1420 dev_err(gpii
->gpi_dev
->dev
, "could not alloc size:%zu mem for ring\n",
1425 /* align the physical mem */
1426 ring
->phys_addr
= (ring
->dma_handle
+ (len
- 1)) & ~(len
- 1);
1427 ring
->base
= ring
->pre_aligned
+ (ring
->phys_addr
- ring
->dma_handle
);
1428 ring
->rp
= ring
->base
;
1429 ring
->wp
= ring
->base
;
1431 ring
->el_size
= el_size
;
1432 ring
->elements
= ring
->len
/ ring
->el_size
;
1433 memset(ring
->base
, 0, ring
->len
);
1434 ring
->configured
= true;
1436 /* update to other cores */
1439 dev_dbg(gpii
->gpi_dev
->dev
,
1440 "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
1441 &ring
->dma_handle
, &ring
->phys_addr
, ring
->len
,
1442 ring
->el_size
, ring
->elements
);
1447 /* copy tre into transfer ring */
1448 static void gpi_queue_xfer(struct gpii
*gpii
, struct gchan
*gchan
,
1449 struct gpi_tre
*gpi_tre
, void **wp
)
1451 struct gpi_tre
*ch_tre
;
1454 /* get next tre location we can copy */
1455 ret
= gpi_ring_add_element(&gchan
->ch_ring
, (void **)&ch_tre
);
1456 if (unlikely(ret
)) {
1457 dev_err(gpii
->gpi_dev
->dev
, "Error adding ring element to xfer ring\n");
1461 /* copy the tre info */
1462 memcpy(ch_tre
, gpi_tre
, sizeof(*ch_tre
));
1466 /* reset and restart transfer channel */
1467 static int gpi_terminate_all(struct dma_chan
*chan
)
1469 struct gchan
*gchan
= to_gchan(chan
);
1470 struct gpii
*gpii
= gchan
->gpii
;
1471 int schid
, echid
, i
;
1474 mutex_lock(&gpii
->ctrl_lock
);
1477 * treat both channels as a group if its protocol is not UART
1478 * STOP, RESET, or START needs to be in lockstep
1480 schid
= (gchan
->protocol
== QCOM_GPI_UART
) ? gchan
->chid
: 0;
1481 echid
= (gchan
->protocol
== QCOM_GPI_UART
) ? schid
+ 1 : MAX_CHANNELS_PER_GPII
;
1483 /* stop the channel */
1484 for (i
= schid
; i
< echid
; i
++) {
1485 gchan
= &gpii
->gchan
[i
];
1487 /* disable ch state so no more TRE processing */
1488 write_lock_irq(&gpii
->pm_lock
);
1489 gchan
->pm_state
= PREPARE_TERMINATE
;
1490 write_unlock_irq(&gpii
->pm_lock
);
1492 /* send command to Stop the channel */
1493 ret
= gpi_stop_chan(gchan
);
1496 /* reset the channels (clears any pending tre) */
1497 for (i
= schid
; i
< echid
; i
++) {
1498 gchan
= &gpii
->gchan
[i
];
1500 ret
= gpi_reset_chan(gchan
, GPI_CH_CMD_RESET
);
1502 dev_err(gpii
->gpi_dev
->dev
, "Error resetting channel ret:%d\n", ret
);
1503 goto terminate_exit
;
1506 /* reprogram channel CNTXT */
1507 ret
= gpi_alloc_chan(gchan
, false);
1509 dev_err(gpii
->gpi_dev
->dev
, "Error alloc_channel ret:%d\n", ret
);
1510 goto terminate_exit
;
1514 /* restart the channels */
1515 for (i
= schid
; i
< echid
; i
++) {
1516 gchan
= &gpii
->gchan
[i
];
1518 ret
= gpi_start_chan(gchan
);
1520 dev_err(gpii
->gpi_dev
->dev
, "Error Starting Channel ret:%d\n", ret
);
1521 goto terminate_exit
;
1526 mutex_unlock(&gpii
->ctrl_lock
);
1530 /* pause dma transfer for all channels */
1531 static int gpi_pause(struct dma_chan
*chan
)
1533 struct gchan
*gchan
= to_gchan(chan
);
1534 struct gpii
*gpii
= gchan
->gpii
;
1537 mutex_lock(&gpii
->ctrl_lock
);
1540 * pause/resume are per gpii not per channel, so
1541 * client needs to call pause only once
1543 if (gpii
->pm_state
== PAUSE_STATE
) {
1544 dev_dbg(gpii
->gpi_dev
->dev
, "channel is already paused\n");
1545 mutex_unlock(&gpii
->ctrl_lock
);
1549 /* send stop command to stop the channels */
1550 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1551 ret
= gpi_stop_chan(&gpii
->gchan
[i
]);
1553 mutex_unlock(&gpii
->ctrl_lock
);
1558 disable_irq(gpii
->irq
);
1560 /* Wait for threads to complete out */
1561 tasklet_kill(&gpii
->ev_task
);
1563 write_lock_irq(&gpii
->pm_lock
);
1564 gpii
->pm_state
= PAUSE_STATE
;
1565 write_unlock_irq(&gpii
->pm_lock
);
1566 mutex_unlock(&gpii
->ctrl_lock
);
1571 /* resume dma transfer */
1572 static int gpi_resume(struct dma_chan
*chan
)
1574 struct gchan
*gchan
= to_gchan(chan
);
1575 struct gpii
*gpii
= gchan
->gpii
;
1578 mutex_lock(&gpii
->ctrl_lock
);
1579 if (gpii
->pm_state
== ACTIVE_STATE
) {
1580 dev_dbg(gpii
->gpi_dev
->dev
, "channel is already active\n");
1581 mutex_unlock(&gpii
->ctrl_lock
);
1585 enable_irq(gpii
->irq
);
1587 /* send start command to start the channels */
1588 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1589 ret
= gpi_send_cmd(gpii
, &gpii
->gchan
[i
], GPI_CH_CMD_START
);
1591 dev_err(gpii
->gpi_dev
->dev
, "Error starting chan, ret:%d\n", ret
);
1592 mutex_unlock(&gpii
->ctrl_lock
);
1597 write_lock_irq(&gpii
->pm_lock
);
1598 gpii
->pm_state
= ACTIVE_STATE
;
1599 write_unlock_irq(&gpii
->pm_lock
);
1600 mutex_unlock(&gpii
->ctrl_lock
);
1605 static void gpi_desc_free(struct virt_dma_desc
*vd
)
1607 struct gpi_desc
*gpi_desc
= to_gpi_desc(vd
);
1614 gpi_peripheral_config(struct dma_chan
*chan
, struct dma_slave_config
*config
)
1616 struct gchan
*gchan
= to_gchan(chan
);
1618 if (!config
->peripheral_config
)
1621 gchan
->config
= krealloc(gchan
->config
, config
->peripheral_size
, GFP_NOWAIT
);
1625 memcpy(gchan
->config
, config
->peripheral_config
, config
->peripheral_size
);
1630 static int gpi_create_i2c_tre(struct gchan
*chan
, struct gpi_desc
*desc
,
1631 struct scatterlist
*sgl
, enum dma_transfer_direction direction
)
1633 struct gpi_i2c_config
*i2c
= chan
->config
;
1634 struct device
*dev
= chan
->gpii
->gpi_dev
->dev
;
1635 unsigned int tre_idx
= 0;
1637 struct gpi_tre
*tre
;
1640 /* first create config tre if applicable */
1641 if (i2c
->set_config
) {
1642 tre
= &desc
->tre
[tre_idx
];
1645 tre
->dword
[0] = u32_encode_bits(i2c
->low_count
, TRE_I2C_C0_TLOW
);
1646 tre
->dword
[0] |= u32_encode_bits(i2c
->high_count
, TRE_I2C_C0_THIGH
);
1647 tre
->dword
[0] |= u32_encode_bits(i2c
->cycle_count
, TRE_I2C_C0_TCYL
);
1648 tre
->dword
[0] |= u32_encode_bits(i2c
->pack_enable
, TRE_I2C_C0_TX_PACK
);
1649 tre
->dword
[0] |= u32_encode_bits(i2c
->pack_enable
, TRE_I2C_C0_RX_PACK
);
1653 tre
->dword
[2] = u32_encode_bits(i2c
->clk_div
, TRE_C0_CLK_DIV
);
1655 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_CONFIG0
, TRE_FLAGS_TYPE
);
1656 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1659 /* create the GO tre for Tx */
1660 if (i2c
->op
== I2C_WRITE
) {
1661 tre
= &desc
->tre
[tre_idx
];
1665 tre
->dword
[0] = u32_encode_bits(I2C_READ
, TRE_I2C_GO_CMD
);
1667 tre
->dword
[0] = u32_encode_bits(i2c
->op
, TRE_I2C_GO_CMD
);
1669 tre
->dword
[0] |= u32_encode_bits(i2c
->addr
, TRE_I2C_GO_ADDR
);
1670 tre
->dword
[0] |= u32_encode_bits(i2c
->stretch
, TRE_I2C_GO_STRETCH
);
1673 tre
->dword
[2] = u32_encode_bits(i2c
->rx_len
, TRE_RX_LEN
);
1675 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_GO
, TRE_FLAGS_TYPE
);
1678 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_LINK
);
1680 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1683 if (i2c
->op
== I2C_READ
|| i2c
->multi_msg
== false) {
1684 /* create the DMA TRE */
1685 tre
= &desc
->tre
[tre_idx
];
1688 address
= sg_dma_address(sgl
);
1689 tre
->dword
[0] = lower_32_bits(address
);
1690 tre
->dword
[1] = upper_32_bits(address
);
1692 tre
->dword
[2] = u32_encode_bits(sg_dma_len(sgl
), TRE_DMA_LEN
);
1694 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_DMA
, TRE_FLAGS_TYPE
);
1695 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT
);
1698 for (i
= 0; i
< tre_idx
; i
++)
1699 dev_dbg(dev
, "TRE:%d %x:%x:%x:%x\n", i
, desc
->tre
[i
].dword
[0],
1700 desc
->tre
[i
].dword
[1], desc
->tre
[i
].dword
[2], desc
->tre
[i
].dword
[3]);
1705 static int gpi_create_spi_tre(struct gchan
*chan
, struct gpi_desc
*desc
,
1706 struct scatterlist
*sgl
, enum dma_transfer_direction direction
)
1708 struct gpi_spi_config
*spi
= chan
->config
;
1709 struct device
*dev
= chan
->gpii
->gpi_dev
->dev
;
1710 unsigned int tre_idx
= 0;
1712 struct gpi_tre
*tre
;
1715 /* first create config tre if applicable */
1716 if (direction
== DMA_MEM_TO_DEV
&& spi
->set_config
) {
1717 tre
= &desc
->tre
[tre_idx
];
1720 tre
->dword
[0] = u32_encode_bits(spi
->word_len
, TRE_SPI_C0_WORD_SZ
);
1721 tre
->dword
[0] |= u32_encode_bits(spi
->loopback_en
, TRE_SPI_C0_LOOPBACK
);
1722 tre
->dword
[0] |= u32_encode_bits(spi
->clock_pol_high
, TRE_SPI_C0_CPOL
);
1723 tre
->dword
[0] |= u32_encode_bits(spi
->data_pol_high
, TRE_SPI_C0_CPHA
);
1724 tre
->dword
[0] |= u32_encode_bits(spi
->pack_en
, TRE_SPI_C0_TX_PACK
);
1725 tre
->dword
[0] |= u32_encode_bits(spi
->pack_en
, TRE_SPI_C0_RX_PACK
);
1729 tre
->dword
[2] = u32_encode_bits(spi
->clk_div
, TRE_C0_CLK_DIV
);
1730 tre
->dword
[2] |= u32_encode_bits(spi
->clk_src
, TRE_C0_CLK_SRC
);
1732 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_CONFIG0
, TRE_FLAGS_TYPE
);
1733 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1736 /* create the GO tre for Tx */
1737 if (direction
== DMA_MEM_TO_DEV
) {
1738 tre
= &desc
->tre
[tre_idx
];
1741 tre
->dword
[0] = u32_encode_bits(spi
->fragmentation
, TRE_SPI_GO_FRAG
);
1742 tre
->dword
[0] |= u32_encode_bits(spi
->cs
, TRE_SPI_GO_CS
);
1743 tre
->dword
[0] |= u32_encode_bits(spi
->cmd
, TRE_SPI_GO_CMD
);
1747 tre
->dword
[2] = u32_encode_bits(spi
->rx_len
, TRE_RX_LEN
);
1749 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_GO
, TRE_FLAGS_TYPE
);
1750 if (spi
->cmd
== SPI_RX
) {
1751 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB
);
1752 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_LINK
);
1753 } else if (spi
->cmd
== SPI_TX
) {
1754 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1755 } else { /* SPI_DUPLEX */
1756 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1757 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_LINK
);
1761 /* create the dma tre */
1762 tre
= &desc
->tre
[tre_idx
];
1765 address
= sg_dma_address(sgl
);
1766 tre
->dword
[0] = lower_32_bits(address
);
1767 tre
->dword
[1] = upper_32_bits(address
);
1769 tre
->dword
[2] = u32_encode_bits(sg_dma_len(sgl
), TRE_DMA_LEN
);
1771 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_DMA
, TRE_FLAGS_TYPE
);
1772 if (direction
== DMA_MEM_TO_DEV
)
1773 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT
);
1775 for (i
= 0; i
< tre_idx
; i
++)
1776 dev_dbg(dev
, "TRE:%d %x:%x:%x:%x\n", i
, desc
->tre
[i
].dword
[0],
1777 desc
->tre
[i
].dword
[1], desc
->tre
[i
].dword
[2], desc
->tre
[i
].dword
[3]);
1782 /* copy tre into transfer ring */
1783 static struct dma_async_tx_descriptor
*
1784 gpi_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1785 unsigned int sg_len
, enum dma_transfer_direction direction
,
1786 unsigned long flags
, void *context
)
1788 struct gchan
*gchan
= to_gchan(chan
);
1789 struct gpii
*gpii
= gchan
->gpii
;
1790 struct device
*dev
= gpii
->gpi_dev
->dev
;
1791 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1792 struct gpi_desc
*gpi_desc
;
1797 gpii
->ieob_set
= false;
1798 if (!is_slave_direction(direction
)) {
1799 dev_err(gpii
->gpi_dev
->dev
, "invalid dma direction: %d\n", direction
);
1804 dev_err(dev
, "Multi sg sent, we support only one atm: %d\n", sg_len
);
1809 set_config
= *(u32
*)gchan
->config
;
1812 if (direction
== DMA_DEV_TO_MEM
) /* rx */
1815 /* calculate # of elements required & available */
1816 nr
= gpi_ring_num_elements_avail(ch_ring
);
1818 dev_err(dev
, "not enough space in ring, avail:%u required:%u\n", nr
, nr_tre
);
1822 gpi_desc
= kzalloc(sizeof(*gpi_desc
), GFP_NOWAIT
);
1826 /* create TREs for xfer */
1827 if (gchan
->protocol
== QCOM_GPI_SPI
) {
1828 i
= gpi_create_spi_tre(gchan
, gpi_desc
, sgl
, direction
);
1829 } else if (gchan
->protocol
== QCOM_GPI_I2C
) {
1830 i
= gpi_create_i2c_tre(gchan
, gpi_desc
, sgl
, direction
);
1832 dev_err(dev
, "invalid peripheral: %d\n", gchan
->protocol
);
1837 /* set up the descriptor */
1838 gpi_desc
->gchan
= gchan
;
1839 gpi_desc
->len
= sg_dma_len(sgl
);
1840 gpi_desc
->num_tre
= i
;
1842 return vchan_tx_prep(&gchan
->vc
, &gpi_desc
->vd
, flags
);
1845 /* rings transfer ring db to being transfer */
1846 static void gpi_issue_pending(struct dma_chan
*chan
)
1848 struct gchan
*gchan
= to_gchan(chan
);
1849 struct gpii
*gpii
= gchan
->gpii
;
1850 unsigned long flags
, pm_lock_flags
;
1851 struct virt_dma_desc
*vd
= NULL
;
1852 struct gpi_desc
*gpi_desc
;
1853 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1854 void *tre
, *wp
= NULL
;
1857 read_lock_irqsave(&gpii
->pm_lock
, pm_lock_flags
);
1859 /* move all submitted descriptors to issued list */
1860 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1861 if (vchan_issue_pending(&gchan
->vc
))
1862 vd
= list_last_entry(&gchan
->vc
.desc_issued
,
1863 struct virt_dma_desc
, node
);
1864 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1866 /* nothing to do list is empty */
1868 read_unlock_irqrestore(&gpii
->pm_lock
, pm_lock_flags
);
1872 gpi_desc
= to_gpi_desc(vd
);
1873 for (i
= 0; i
< gpi_desc
->num_tre
; i
++) {
1874 tre
= &gpi_desc
->tre
[i
];
1875 gpi_queue_xfer(gpii
, gchan
, tre
, &wp
);
1878 gpi_desc
->db
= ch_ring
->wp
;
1879 gpi_write_ch_db(gchan
, &gchan
->ch_ring
, gpi_desc
->db
);
1880 read_unlock_irqrestore(&gpii
->pm_lock
, pm_lock_flags
);
1883 static int gpi_ch_init(struct gchan
*gchan
)
1885 struct gpii
*gpii
= gchan
->gpii
;
1886 const int ev_factor
= gpii
->gpi_dev
->ev_factor
;
1890 gchan
->pm_state
= CONFIG_STATE
;
1892 /* check if both channels are configured before continue */
1893 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++)
1894 if (gpii
->gchan
[i
].pm_state
!= CONFIG_STATE
)
1897 /* protocol must be same for both channels */
1898 if (gpii
->gchan
[0].protocol
!= gpii
->gchan
[1].protocol
) {
1899 dev_err(gpii
->gpi_dev
->dev
, "protocol did not match protocol %u != %u\n",
1900 gpii
->gchan
[0].protocol
, gpii
->gchan
[1].protocol
);
1905 /* allocate memory for event ring */
1906 elements
= CHAN_TRES
<< ev_factor
;
1907 ret
= gpi_alloc_ring(&gpii
->ev_ring
, elements
,
1908 sizeof(union gpi_event
), gpii
);
1912 /* configure interrupts */
1913 write_lock_irq(&gpii
->pm_lock
);
1914 gpii
->pm_state
= PREPARE_HARDWARE
;
1915 write_unlock_irq(&gpii
->pm_lock
);
1916 ret
= gpi_config_interrupts(gpii
, DEFAULT_IRQ_SETTINGS
, 0);
1918 dev_err(gpii
->gpi_dev
->dev
, "error config. interrupts, ret:%d\n", ret
);
1919 goto error_config_int
;
1922 /* allocate event rings */
1923 ret
= gpi_alloc_ev_chan(gpii
);
1925 dev_err(gpii
->gpi_dev
->dev
, "error alloc_ev_chan:%d\n", ret
);
1926 goto error_alloc_ev_ring
;
1929 /* Allocate all channels */
1930 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1931 ret
= gpi_alloc_chan(&gpii
->gchan
[i
], true);
1933 dev_err(gpii
->gpi_dev
->dev
, "Error allocating chan:%d\n", ret
);
1934 goto error_alloc_chan
;
1938 /* start channels */
1939 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1940 ret
= gpi_start_chan(&gpii
->gchan
[i
]);
1942 dev_err(gpii
->gpi_dev
->dev
, "Error start chan:%d\n", ret
);
1943 goto error_start_chan
;
1949 for (i
= i
- 1; i
>= 0; i
--) {
1950 gpi_stop_chan(&gpii
->gchan
[i
]);
1951 gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_RESET
);
1955 for (i
= i
- 1; i
>= 0; i
--)
1956 gpi_reset_chan(gchan
, GPI_CH_CMD_DE_ALLOC
);
1957 error_alloc_ev_ring
:
1958 gpi_disable_interrupts(gpii
);
1960 gpi_free_ring(&gpii
->ev_ring
, gpii
);
1965 /* release all channel resources */
1966 static void gpi_free_chan_resources(struct dma_chan
*chan
)
1968 struct gchan
*gchan
= to_gchan(chan
);
1969 struct gpii
*gpii
= gchan
->gpii
;
1970 enum gpi_pm_state cur_state
;
1973 mutex_lock(&gpii
->ctrl_lock
);
1975 cur_state
= gchan
->pm_state
;
1977 /* disable ch state so no more TRE processing for this channel */
1978 write_lock_irq(&gpii
->pm_lock
);
1979 gchan
->pm_state
= PREPARE_TERMINATE
;
1980 write_unlock_irq(&gpii
->pm_lock
);
1982 /* attempt to do graceful hardware shutdown */
1983 if (cur_state
== ACTIVE_STATE
) {
1984 gpi_stop_chan(gchan
);
1986 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_RESET
);
1988 dev_err(gpii
->gpi_dev
->dev
, "error resetting channel:%d\n", ret
);
1990 gpi_reset_chan(gchan
, GPI_CH_CMD_DE_ALLOC
);
1993 /* free all allocated memory */
1994 gpi_free_ring(&gchan
->ch_ring
, gpii
);
1995 vchan_free_chan_resources(&gchan
->vc
);
1996 kfree(gchan
->config
);
1998 write_lock_irq(&gpii
->pm_lock
);
1999 gchan
->pm_state
= DISABLE_STATE
;
2000 write_unlock_irq(&gpii
->pm_lock
);
2002 /* if other rings are still active exit */
2003 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++)
2004 if (gpii
->gchan
[i
].ch_ring
.configured
)
2007 /* deallocate EV Ring */
2008 cur_state
= gpii
->pm_state
;
2009 write_lock_irq(&gpii
->pm_lock
);
2010 gpii
->pm_state
= PREPARE_TERMINATE
;
2011 write_unlock_irq(&gpii
->pm_lock
);
2013 /* wait for threads to complete out */
2014 tasklet_kill(&gpii
->ev_task
);
2016 /* send command to de allocate event ring */
2017 if (cur_state
== ACTIVE_STATE
)
2018 gpi_send_cmd(gpii
, NULL
, GPI_EV_CMD_DEALLOC
);
2020 gpi_free_ring(&gpii
->ev_ring
, gpii
);
2022 /* disable interrupts */
2023 if (cur_state
== ACTIVE_STATE
)
2024 gpi_disable_interrupts(gpii
);
2026 /* set final state to disable */
2027 write_lock_irq(&gpii
->pm_lock
);
2028 gpii
->pm_state
= DISABLE_STATE
;
2029 write_unlock_irq(&gpii
->pm_lock
);
2032 mutex_unlock(&gpii
->ctrl_lock
);
2035 /* allocate channel resources */
2036 static int gpi_alloc_chan_resources(struct dma_chan
*chan
)
2038 struct gchan
*gchan
= to_gchan(chan
);
2039 struct gpii
*gpii
= gchan
->gpii
;
2042 mutex_lock(&gpii
->ctrl_lock
);
2044 /* allocate memory for transfer ring */
2045 ret
= gpi_alloc_ring(&gchan
->ch_ring
, CHAN_TRES
,
2046 sizeof(struct gpi_tre
), gpii
);
2048 goto xfer_alloc_err
;
2050 ret
= gpi_ch_init(gchan
);
2052 mutex_unlock(&gpii
->ctrl_lock
);
2056 mutex_unlock(&gpii
->ctrl_lock
);
2061 static int gpi_find_avail_gpii(struct gpi_dev
*gpi_dev
, u32 seid
)
2063 struct gchan
*tx_chan
, *rx_chan
;
2066 /* check if same seid is already configured for another chid */
2067 for (gpii
= 0; gpii
< gpi_dev
->max_gpii
; gpii
++) {
2068 if (!((1 << gpii
) & gpi_dev
->gpii_mask
))
2071 tx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_TX_CHAN
];
2072 rx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_RX_CHAN
];
2074 if (rx_chan
->vc
.chan
.client_count
&& rx_chan
->seid
== seid
)
2076 if (tx_chan
->vc
.chan
.client_count
&& tx_chan
->seid
== seid
)
2080 /* no channels configured with same seid, return next avail gpii */
2081 for (gpii
= 0; gpii
< gpi_dev
->max_gpii
; gpii
++) {
2082 if (!((1 << gpii
) & gpi_dev
->gpii_mask
))
2085 tx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_TX_CHAN
];
2086 rx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_RX_CHAN
];
2088 /* check if gpii is configured */
2089 if (tx_chan
->vc
.chan
.client_count
||
2090 rx_chan
->vc
.chan
.client_count
)
2093 /* found a free gpii */
2097 /* no gpii instance available to use */
2101 /* gpi_of_dma_xlate: open client requested channel */
2102 static struct dma_chan
*gpi_of_dma_xlate(struct of_phandle_args
*args
,
2103 struct of_dma
*of_dma
)
2105 struct gpi_dev
*gpi_dev
= (struct gpi_dev
*)of_dma
->of_dma_data
;
2108 struct gchan
*gchan
;
2110 if (args
->args_count
< 3) {
2111 dev_err(gpi_dev
->dev
, "gpii require minimum 2 args, client passed:%d args\n",
2116 chid
= args
->args
[0];
2117 if (chid
>= MAX_CHANNELS_PER_GPII
) {
2118 dev_err(gpi_dev
->dev
, "gpii channel:%d not valid\n", chid
);
2122 seid
= args
->args
[1];
2124 /* find next available gpii to use */
2125 gpii
= gpi_find_avail_gpii(gpi_dev
, seid
);
2127 dev_err(gpi_dev
->dev
, "no available gpii instances\n");
2131 gchan
= &gpi_dev
->gpiis
[gpii
].gchan
[chid
];
2132 if (gchan
->vc
.chan
.client_count
) {
2133 dev_err(gpi_dev
->dev
, "gpii:%d chid:%d seid:%d already configured\n",
2134 gpii
, chid
, gchan
->seid
);
2139 gchan
->protocol
= args
->args
[2];
2141 return dma_get_slave_channel(&gchan
->vc
.chan
);
2144 static int gpi_probe(struct platform_device
*pdev
)
2146 struct gpi_dev
*gpi_dev
;
2151 gpi_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*gpi_dev
), GFP_KERNEL
);
2155 gpi_dev
->dev
= &pdev
->dev
;
2156 gpi_dev
->regs
= devm_platform_get_and_ioremap_resource(pdev
, 0, &gpi_dev
->res
);
2157 if (IS_ERR(gpi_dev
->regs
))
2158 return PTR_ERR(gpi_dev
->regs
);
2159 gpi_dev
->ee_base
= gpi_dev
->regs
;
2161 ret
= of_property_read_u32(gpi_dev
->dev
->of_node
, "dma-channels",
2162 &gpi_dev
->max_gpii
);
2164 dev_err(gpi_dev
->dev
, "missing 'max-no-gpii' DT node\n");
2168 ret
= of_property_read_u32(gpi_dev
->dev
->of_node
, "dma-channel-mask",
2169 &gpi_dev
->gpii_mask
);
2171 dev_err(gpi_dev
->dev
, "missing 'gpii-mask' DT node\n");
2175 ee_offset
= (uintptr_t)device_get_match_data(gpi_dev
->dev
);
2176 gpi_dev
->ee_base
= gpi_dev
->ee_base
- ee_offset
;
2178 gpi_dev
->ev_factor
= EV_FACTOR
;
2180 ret
= dma_set_mask(gpi_dev
->dev
, DMA_BIT_MASK(64));
2182 dev_err(gpi_dev
->dev
, "Error setting dma_mask to 64, ret:%d\n", ret
);
2186 gpi_dev
->gpiis
= devm_kzalloc(gpi_dev
->dev
, sizeof(*gpi_dev
->gpiis
) *
2187 gpi_dev
->max_gpii
, GFP_KERNEL
);
2188 if (!gpi_dev
->gpiis
)
2191 /* setup all the supported gpii */
2192 INIT_LIST_HEAD(&gpi_dev
->dma_device
.channels
);
2193 for (i
= 0; i
< gpi_dev
->max_gpii
; i
++) {
2194 struct gpii
*gpii
= &gpi_dev
->gpiis
[i
];
2197 if (!((1 << i
) & gpi_dev
->gpii_mask
))
2200 /* set up ev cntxt register map */
2201 gpii
->ev_cntxt_base_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_k_CNTXT_0_OFFS(i
, 0);
2202 gpii
->ev_cntxt_db_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_k_DOORBELL_0_OFFS(i
, 0);
2203 gpii
->ev_ring_rp_lsb_reg
= gpii
->ev_cntxt_base_reg
+ CNTXT_4_RING_RP_LSB
;
2204 gpii
->ev_cmd_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_CMD_OFFS(i
);
2205 gpii
->ieob_clr_reg
= gpi_dev
->ee_base
+ GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i
);
2208 ret
= platform_get_irq(pdev
, i
);
2213 /* set up channel specific register info */
2214 for (chan
= 0; chan
< MAX_CHANNELS_PER_GPII
; chan
++) {
2215 struct gchan
*gchan
= &gpii
->gchan
[chan
];
2217 /* set up ch cntxt register map */
2218 gchan
->ch_cntxt_base_reg
= gpi_dev
->ee_base
+
2219 GPII_n_CH_k_CNTXT_0_OFFS(i
, chan
);
2220 gchan
->ch_cntxt_db_reg
= gpi_dev
->ee_base
+
2221 GPII_n_CH_k_DOORBELL_0_OFFS(i
, chan
);
2222 gchan
->ch_cmd_reg
= gpi_dev
->ee_base
+ GPII_n_CH_CMD_OFFS(i
);
2225 vchan_init(&gchan
->vc
, &gpi_dev
->dma_device
);
2226 gchan
->vc
.desc_free
= gpi_desc_free
;
2229 gchan
->dir
= GPII_CHAN_DIR
[chan
];
2231 mutex_init(&gpii
->ctrl_lock
);
2232 rwlock_init(&gpii
->pm_lock
);
2233 tasklet_init(&gpii
->ev_task
, gpi_ev_tasklet
,
2234 (unsigned long)gpii
);
2235 init_completion(&gpii
->cmd_completion
);
2237 gpii
->regs
= gpi_dev
->ee_base
;
2238 gpii
->gpi_dev
= gpi_dev
;
2241 platform_set_drvdata(pdev
, gpi_dev
);
2243 /* clear and Set capabilities */
2244 dma_cap_zero(gpi_dev
->dma_device
.cap_mask
);
2245 dma_cap_set(DMA_SLAVE
, gpi_dev
->dma_device
.cap_mask
);
2247 /* configure dmaengine apis */
2248 gpi_dev
->dma_device
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2249 gpi_dev
->dma_device
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
2250 gpi_dev
->dma_device
.src_addr_widths
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
2251 gpi_dev
->dma_device
.dst_addr_widths
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
2252 gpi_dev
->dma_device
.device_alloc_chan_resources
= gpi_alloc_chan_resources
;
2253 gpi_dev
->dma_device
.device_free_chan_resources
= gpi_free_chan_resources
;
2254 gpi_dev
->dma_device
.device_tx_status
= dma_cookie_status
;
2255 gpi_dev
->dma_device
.device_issue_pending
= gpi_issue_pending
;
2256 gpi_dev
->dma_device
.device_prep_slave_sg
= gpi_prep_slave_sg
;
2257 gpi_dev
->dma_device
.device_config
= gpi_peripheral_config
;
2258 gpi_dev
->dma_device
.device_terminate_all
= gpi_terminate_all
;
2259 gpi_dev
->dma_device
.dev
= gpi_dev
->dev
;
2260 gpi_dev
->dma_device
.device_pause
= gpi_pause
;
2261 gpi_dev
->dma_device
.device_resume
= gpi_resume
;
2263 /* register with dmaengine framework */
2264 ret
= dma_async_device_register(&gpi_dev
->dma_device
);
2266 dev_err(gpi_dev
->dev
, "async_device_register failed ret:%d", ret
);
2270 ret
= of_dma_controller_register(gpi_dev
->dev
->of_node
,
2271 gpi_of_dma_xlate
, gpi_dev
);
2273 dev_err(gpi_dev
->dev
, "of_dma_controller_reg failed ret:%d", ret
);
2280 static const struct of_device_id gpi_of_match
[] = {
2281 { .compatible
= "qcom,sdm845-gpi-dma", .data
= (void *)0x0 },
2282 { .compatible
= "qcom,sm6350-gpi-dma", .data
= (void *)0x10000 },
2284 * Do not grow the list for compatible devices. Instead use
2285 * qcom,sdm845-gpi-dma (for ee_offset = 0x0) or qcom,sm6350-gpi-dma
2286 * (for ee_offset = 0x10000).
2288 { .compatible
= "qcom,sc7280-gpi-dma", .data
= (void *)0x10000 },
2289 { .compatible
= "qcom,sm8150-gpi-dma", .data
= (void *)0x0 },
2290 { .compatible
= "qcom,sm8250-gpi-dma", .data
= (void *)0x0 },
2291 { .compatible
= "qcom,sm8350-gpi-dma", .data
= (void *)0x10000 },
2292 { .compatible
= "qcom,sm8450-gpi-dma", .data
= (void *)0x10000 },
2295 MODULE_DEVICE_TABLE(of
, gpi_of_match
);
2297 static struct platform_driver gpi_driver
= {
2300 .name
= KBUILD_MODNAME
,
2301 .of_match_table
= gpi_of_match
,
2305 static int __init
gpi_init(void)
2307 return platform_driver_register(&gpi_driver
);
2309 subsys_initcall(gpi_init
)
2311 MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2312 MODULE_LICENSE("GPL v2");