1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2020, Linaro Limited
7 #include <dt-bindings/dma/qcom-gpi.h>
8 #include <linux/bitfield.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/dma/qcom-gpi-dma.h>
15 #include <linux/scatterlist.h>
16 #include <linux/slab.h>
17 #include "../dmaengine.h"
18 #include "../virt-dma.h"
20 #define TRE_TYPE_DMA 0x10
21 #define TRE_TYPE_GO 0x20
22 #define TRE_TYPE_CONFIG0 0x22
25 #define TRE_FLAGS_CHAIN BIT(0)
26 #define TRE_FLAGS_IEOB BIT(8)
27 #define TRE_FLAGS_IEOT BIT(9)
28 #define TRE_FLAGS_BEI BIT(10)
29 #define TRE_FLAGS_LINK BIT(11)
30 #define TRE_FLAGS_TYPE GENMASK(23, 16)
33 #define TRE_SPI_C0_WORD_SZ GENMASK(4, 0)
34 #define TRE_SPI_C0_LOOPBACK BIT(8)
35 #define TRE_SPI_C0_CS BIT(11)
36 #define TRE_SPI_C0_CPHA BIT(12)
37 #define TRE_SPI_C0_CPOL BIT(13)
38 #define TRE_SPI_C0_TX_PACK BIT(24)
39 #define TRE_SPI_C0_RX_PACK BIT(25)
42 #define TRE_C0_CLK_DIV GENMASK(11, 0)
43 #define TRE_C0_CLK_SRC GENMASK(19, 16)
46 #define TRE_SPI_GO_CMD GENMASK(4, 0)
47 #define TRE_SPI_GO_CS GENMASK(10, 8)
48 #define TRE_SPI_GO_FRAG BIT(26)
51 #define TRE_RX_LEN GENMASK(23, 0)
54 #define TRE_I2C_C0_TLOW GENMASK(7, 0)
55 #define TRE_I2C_C0_THIGH GENMASK(15, 8)
56 #define TRE_I2C_C0_TCYL GENMASK(23, 16)
57 #define TRE_I2C_C0_TX_PACK BIT(24)
58 #define TRE_I2C_C0_RX_PACK BIT(25)
61 #define TRE_I2C_GO_CMD GENMASK(4, 0)
62 #define TRE_I2C_GO_ADDR GENMASK(14, 8)
63 #define TRE_I2C_GO_STRETCH BIT(26)
66 #define TRE_DMA_LEN GENMASK(23, 0)
68 /* Register offsets from gpi-top */
69 #define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
70 #define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
71 #define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20)
72 #define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14)
73 #define GPII_n_CH_k_CNTXT_0_DIR BIT(3)
74 #define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0)
76 #define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \
77 (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \
78 FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \
79 FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \
80 FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto))
82 #define GPI_CHTYPE_DIR_IN (0)
83 #define GPI_CHTYPE_DIR_OUT (1)
85 #define GPI_CHTYPE_PROTO_GPI (0x2)
87 #define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k)))
88 #define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
89 #define GPII_n_CH_CMD_OPCODE GENMASK(31, 24)
90 #define GPII_n_CH_CMD_CHID GENMASK(7, 0)
91 #define GPII_n_CH_CMD(opcode, chid) \
92 (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \
93 FIELD_PREP(GPII_n_CH_CMD_CHID, chid))
95 #define GPII_n_CH_CMD_ALLOCATE (0)
96 #define GPII_n_CH_CMD_START (1)
97 #define GPII_n_CH_CMD_STOP (2)
98 #define GPII_n_CH_CMD_RESET (9)
99 #define GPII_n_CH_CMD_DE_ALLOC (10)
100 #define GPII_n_CH_CMD_UART_SW_STALE (32)
101 #define GPII_n_CH_CMD_UART_RFR_READY (33)
102 #define GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
104 /* EV Context Array */
105 #define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
106 #define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
107 #define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20)
108 #define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16)
109 #define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0)
111 #define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \
112 (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \
113 FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \
114 FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype))
116 #define GPI_INTTYPE_IRQ (1)
117 #define GPI_CHTYPE_GPI_EV (0x2)
120 CNTXT_0_CONFIG
= 0x0,
121 CNTXT_1_R_LENGTH
= 0x4,
122 CNTXT_2_RING_BASE_LSB
= 0x8,
123 CNTXT_3_RING_BASE_MSB
= 0xC,
124 CNTXT_4_RING_RP_LSB
= 0x10,
125 CNTXT_5_RING_RP_MSB
= 0x14,
126 CNTXT_6_RING_WP_LSB
= 0x18,
127 CNTXT_7_RING_WP_MSB
= 0x1C,
128 CNTXT_8_RING_INT_MOD
= 0x20,
129 CNTXT_9_RING_INTVEC
= 0x24,
130 CNTXT_10_RING_MSI_LSB
= 0x28,
131 CNTXT_11_RING_MSI_MSB
= 0x2C,
132 CNTXT_12_RING_RP_UPDATE_LSB
= 0x30,
133 CNTXT_13_RING_RP_UPDATE_MSB
= 0x34,
136 #define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
137 #define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n)))
138 #define GPII_n_EV_CMD_OPCODE GENMASK(31, 24)
139 #define GPII_n_EV_CMD_CHID GENMASK(7, 0)
140 #define GPII_n_EV_CMD(opcode, chid) \
141 (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \
142 FIELD_PREP(GPII_n_EV_CMD_CHID, chid))
144 #define GPII_n_EV_CH_CMD_ALLOCATE (0x00)
145 #define GPII_n_EV_CH_CMD_RESET (0x09)
146 #define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
148 #define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n)))
150 /* mask type register */
151 #define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n)))
152 #define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0)
153 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6)
154 #define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3)
155 #define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2)
156 #define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1)
157 #define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0)
159 #define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n)))
160 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n)))
162 /* Mask channel control interrupt register */
163 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n)))
164 #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0)
166 /* Mask event control interrupt register */
167 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n)))
168 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0)
170 #define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n)))
171 #define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n)))
173 /* Mask event interrupt register */
174 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n)))
175 #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0)
177 #define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n)))
178 #define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n)))
179 #define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0)
181 /* GPII specific Global - Enable bit register */
182 #define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n)))
183 #define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n)))
184 #define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n)))
186 /* GPII general interrupt - Enable bit register */
187 #define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n)))
188 #define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0)
190 #define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n)))
192 /* GPII Interrupt Type register */
193 #define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n)))
194 #define GPII_n_CNTXT_INTSET_BMSK BIT(0)
196 #define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n)))
197 #define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n)))
198 #define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n)))
199 #define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n)))
201 #define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n)))
204 #define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
206 /* Scratch registers */
207 #define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
208 #define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0)
209 #define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4)
210 #define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16)
211 #define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \
212 (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \
213 FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \
214 FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid))
215 #define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
216 #define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
217 #define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
219 struct __packed gpi_tre
{
223 enum msm_gpi_tce_code
{
224 MSM_GPI_TCE_SUCCESS
= 1,
227 MSM_GPI_TCE_UNEXP_ERR
= 16,
230 #define CMD_TIMEOUT_MS (250)
232 #define MAX_CHANNELS_PER_GPII (2)
233 #define GPI_TX_CHAN (0)
234 #define GPI_RX_CHAN (1)
235 #define STATE_IGNORE (U32_MAX)
236 #define EV_FACTOR (2)
237 #define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
240 struct __packed xfer_compl_event
{
249 struct __packed immediate_data_event
{
260 struct __packed qup_notif_event
{
270 struct __packed gpi_ere
{
275 XFER_COMPLETE_EV_TYPE
= 0x22,
276 IMMEDIATE_DATA_EV_TYPE
= 0x30,
277 QUP_NOTIF_EV_TYPE
= 0x31,
278 STALE_EV_TYPE
= 0xFF,
281 union __packed gpi_event
{
282 struct __packed xfer_compl_event xfer_compl_event
;
283 struct __packed immediate_data_event immediate_data_event
;
284 struct __packed qup_notif_event qup_notif_event
;
285 struct __packed gpi_ere gpi_ere
;
288 enum gpii_irq_settings
{
289 DEFAULT_IRQ_SETTINGS
,
294 DEFAULT_EV_CH_STATE
= 0,
295 EV_STATE_NOT_ALLOCATED
= DEFAULT_EV_CH_STATE
,
300 static const char *const gpi_ev_state_str
[MAX_EV_STATES
] = {
301 [EV_STATE_NOT_ALLOCATED
] = "NOT ALLOCATED",
302 [EV_STATE_ALLOCATED
] = "ALLOCATED",
305 #define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \
306 "INVALID" : gpi_ev_state_str[(_state)])
309 DEFAULT_CH_STATE
= 0x0,
310 CH_STATE_NOT_ALLOCATED
= DEFAULT_CH_STATE
,
311 CH_STATE_ALLOCATED
= 0x1,
312 CH_STATE_STARTED
= 0x2,
313 CH_STATE_STOPPED
= 0x3,
314 CH_STATE_STOP_IN_PROC
= 0x4,
315 CH_STATE_ERROR
= 0xf,
321 GPI_CH_CMD_ALLOCATE
= GPI_CH_CMD_BEGIN
,
326 GPI_CH_CMD_UART_SW_STALE
,
327 GPI_CH_CMD_UART_RFR_READY
,
328 GPI_CH_CMD_UART_RFR_NOT_READY
,
329 GPI_CH_CMD_END
= GPI_CH_CMD_UART_RFR_NOT_READY
,
331 GPI_EV_CMD_ALLOCATE
= GPI_EV_CMD_BEGIN
,
334 GPI_EV_CMD_END
= GPI_EV_CMD_DEALLOC
,
338 #define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END)
340 static const char *const gpi_cmd_str
[GPI_MAX_CMD
] = {
341 [GPI_CH_CMD_ALLOCATE
] = "CH ALLOCATE",
342 [GPI_CH_CMD_START
] = "CH START",
343 [GPI_CH_CMD_STOP
] = "CH STOP",
344 [GPI_CH_CMD_RESET
] = "CH_RESET",
345 [GPI_CH_CMD_DE_ALLOC
] = "DE ALLOC",
346 [GPI_CH_CMD_UART_SW_STALE
] = "UART SW STALE",
347 [GPI_CH_CMD_UART_RFR_READY
] = "UART RFR READY",
348 [GPI_CH_CMD_UART_RFR_NOT_READY
] = "UART RFR NOT READY",
349 [GPI_EV_CMD_ALLOCATE
] = "EV ALLOCATE",
350 [GPI_EV_CMD_RESET
] = "EV RESET",
351 [GPI_EV_CMD_DEALLOC
] = "EV DEALLOC",
354 #define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \
358 * @DISABLE_STATE: no register access allowed
359 * @CONFIG_STATE: client has configured the channel
360 * @PREP_HARDWARE: register access is allowed
361 * however, no processing EVENTS
362 * @ACTIVE_STATE: channels are fully operational
363 * @PREPARE_TERMINATE: graceful termination of channels
364 * register access is allowed
365 * @PAUSE_STATE: channels are active, but not processing any events
377 #define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE)
379 static const char *const gpi_pm_state_str
[MAX_PM_STATE
] = {
380 [DISABLE_STATE
] = "DISABLE",
381 [CONFIG_STATE
] = "CONFIG",
382 [PREPARE_HARDWARE
] = "PREPARE HARDWARE",
383 [ACTIVE_STATE
] = "ACTIVE",
384 [PREPARE_TERMINATE
] = "PREPARE TERMINATE",
385 [PAUSE_STATE
] = "PAUSE",
388 #define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \
389 "INVALID" : gpi_pm_state_str[(_state)])
391 static const struct {
392 enum gpi_cmd gpi_cmd
;
395 } gpi_cmd_info
[GPI_MAX_CMD
] = {
398 GPII_n_CH_CMD_ALLOCATE
,
418 GPII_n_CH_CMD_DE_ALLOC
,
419 CH_STATE_NOT_ALLOCATED
,
422 GPI_CH_CMD_UART_SW_STALE
,
423 GPII_n_CH_CMD_UART_SW_STALE
,
427 GPI_CH_CMD_UART_RFR_READY
,
428 GPII_n_CH_CMD_UART_RFR_READY
,
432 GPI_CH_CMD_UART_RFR_NOT_READY
,
433 GPII_n_CH_CMD_UART_RFR_NOT_READY
,
438 GPII_n_EV_CH_CMD_ALLOCATE
,
443 GPII_n_EV_CH_CMD_RESET
,
448 GPII_n_EV_CH_CMD_DE_ALLOC
,
449 EV_STATE_NOT_ALLOCATED
,
456 phys_addr_t phys_addr
;
457 dma_addr_t dma_handle
;
468 struct dma_device dma_device
;
470 struct resource
*res
;
472 void __iomem
*ee_base
; /*ee register base address*/
473 u32 max_gpii
; /* maximum # of gpii instances available per gpi block */
474 u32 gpii_mask
; /* gpii instances available for apps */
475 u32 ev_factor
; /* ev ring length factor */
486 struct virt_dma_chan vc
;
491 enum gpi_ch_state ch_state
;
492 enum gpi_pm_state pm_state
;
493 void __iomem
*ch_cntxt_base_reg
;
494 void __iomem
*ch_cntxt_db_reg
;
495 void __iomem
*ch_cmd_reg
;
497 struct gpi_ring ch_ring
;
503 struct gchan gchan
[MAX_CHANNELS_PER_GPII
];
504 struct gpi_dev
*gpi_dev
;
506 void __iomem
*regs
; /* points to gpi top */
507 void __iomem
*ev_cntxt_base_reg
;
508 void __iomem
*ev_cntxt_db_reg
;
509 void __iomem
*ev_ring_rp_lsb_reg
;
510 void __iomem
*ev_cmd_reg
;
511 void __iomem
*ieob_clr_reg
;
512 struct mutex ctrl_lock
;
513 enum gpi_ev_state ev_state
;
515 enum gpi_pm_state pm_state
;
517 struct gpi_ring ev_ring
;
518 struct tasklet_struct ev_task
; /* event processing tasklet */
519 struct completion cmd_completion
;
520 enum gpi_cmd gpi_cmd
;
521 u32 cntxt_type_irq_msk
;
528 struct virt_dma_desc vd
;
530 void *db
; /* DB register to program */
532 struct gpi_tre tre
[MAX_TRE
];
536 static const u32 GPII_CHAN_DIR
[MAX_CHANNELS_PER_GPII
] = {
537 GPI_CHTYPE_DIR_OUT
, GPI_CHTYPE_DIR_IN
540 static irqreturn_t
gpi_handle_irq(int irq
, void *data
);
541 static void gpi_ring_recycle_ev_element(struct gpi_ring
*ring
);
542 static int gpi_ring_add_element(struct gpi_ring
*ring
, void **wp
);
543 static void gpi_process_events(struct gpii
*gpii
);
545 static inline struct gchan
*to_gchan(struct dma_chan
*dma_chan
)
547 return container_of(dma_chan
, struct gchan
, vc
.chan
);
550 static inline struct gpi_desc
*to_gpi_desc(struct virt_dma_desc
*vd
)
552 return container_of(vd
, struct gpi_desc
, vd
);
555 static inline phys_addr_t
to_physical(const struct gpi_ring
*const ring
,
558 return ring
->phys_addr
+ (addr
- ring
->base
);
561 static inline void *to_virtual(const struct gpi_ring
*const ring
, phys_addr_t addr
)
563 return ring
->base
+ (addr
- ring
->phys_addr
);
566 static inline u32
gpi_read_reg(struct gpii
*gpii
, void __iomem
*addr
)
568 return readl_relaxed(addr
);
571 static inline void gpi_write_reg(struct gpii
*gpii
, void __iomem
*addr
, u32 val
)
573 writel_relaxed(val
, addr
);
576 /* gpi_write_reg_field - write to specific bit field */
577 static inline void gpi_write_reg_field(struct gpii
*gpii
, void __iomem
*addr
,
578 u32 mask
, u32 shift
, u32 val
)
580 u32 tmp
= gpi_read_reg(gpii
, addr
);
583 val
= tmp
| ((val
<< shift
) & mask
);
584 gpi_write_reg(gpii
, addr
, val
);
588 gpi_update_reg(struct gpii
*gpii
, u32 offset
, u32 mask
, u32 val
)
590 void __iomem
*addr
= gpii
->regs
+ offset
;
591 u32 tmp
= gpi_read_reg(gpii
, addr
);
594 tmp
|= u32_encode_bits(val
, mask
);
596 gpi_write_reg(gpii
, addr
, tmp
);
599 static void gpi_disable_interrupts(struct gpii
*gpii
)
601 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
602 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, 0);
603 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii
->gpii_id
),
604 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
, 0);
605 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
606 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
, 0);
607 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
608 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
, 0);
609 gpi_update_reg(gpii
, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii
->gpii_id
),
610 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, 0);
611 gpi_update_reg(gpii
, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii
->gpii_id
),
612 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, 0);
613 gpi_update_reg(gpii
, GPII_n_CNTXT_INTSET_OFFS(gpii
->gpii_id
),
614 GPII_n_CNTXT_INTSET_BMSK
, 0);
616 gpii
->cntxt_type_irq_msk
= 0;
617 devm_free_irq(gpii
->gpi_dev
->dev
, gpii
->irq
, gpii
);
618 gpii
->configured_irq
= false;
621 /* configure and enable interrupts */
622 static int gpi_config_interrupts(struct gpii
*gpii
, enum gpii_irq_settings settings
, bool mask
)
624 const u32 enable
= (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL
|
625 GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
|
626 GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
|
627 GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
|
628 GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
);
631 if (!gpii
->configured_irq
) {
632 ret
= devm_request_irq(gpii
->gpi_dev
->dev
, gpii
->irq
,
633 gpi_handle_irq
, IRQF_TRIGGER_HIGH
,
636 dev_err(gpii
->gpi_dev
->dev
, "error request irq:%d ret:%d\n",
642 if (settings
== MASK_IEOB_SETTINGS
) {
644 * GPII only uses one EV ring per gpii so we can globally
645 * enable/disable IEOB interrupt
648 gpii
->cntxt_type_irq_msk
|= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
;
650 gpii
->cntxt_type_irq_msk
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
);
651 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
652 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, gpii
->cntxt_type_irq_msk
);
654 gpi_update_reg(gpii
, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii
->gpii_id
),
655 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK
, enable
);
656 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii
->gpii_id
),
657 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
,
658 GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK
);
659 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
660 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
,
661 GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK
);
662 gpi_update_reg(gpii
, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii
->gpii_id
),
663 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
,
664 GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK
);
665 gpi_update_reg(gpii
, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii
->gpii_id
),
666 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
,
667 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
);
668 gpi_update_reg(gpii
, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii
->gpii_id
),
669 GPII_n_CNTXT_GPII_IRQ_EN_BMSK
, GPII_n_CNTXT_GPII_IRQ_EN_BMSK
);
670 gpi_update_reg(gpii
, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
671 gpi_update_reg(gpii
, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
672 gpi_update_reg(gpii
, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
673 gpi_update_reg(gpii
, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
674 gpi_update_reg(gpii
, GPII_n_CNTXT_INTSET_OFFS(gpii
->gpii_id
),
675 GPII_n_CNTXT_INTSET_BMSK
, 1);
676 gpi_update_reg(gpii
, GPII_n_ERROR_LOG_OFFS(gpii
->gpii_id
), U32_MAX
, 0);
678 gpii
->cntxt_type_irq_msk
= enable
;
681 gpii
->configured_irq
= true;
685 /* Sends gpii event or channel command */
686 static int gpi_send_cmd(struct gpii
*gpii
, struct gchan
*gchan
,
687 enum gpi_cmd gpi_cmd
)
689 u32 chid
= MAX_CHANNELS_PER_GPII
;
690 unsigned long timeout
;
691 void __iomem
*cmd_reg
;
694 if (gpi_cmd
>= GPI_MAX_CMD
)
696 if (IS_CHAN_CMD(gpi_cmd
))
699 dev_dbg(gpii
->gpi_dev
->dev
,
700 "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd
), chid
);
702 /* send opcode and wait for completion */
703 reinit_completion(&gpii
->cmd_completion
);
704 gpii
->gpi_cmd
= gpi_cmd
;
706 cmd_reg
= IS_CHAN_CMD(gpi_cmd
) ? gchan
->ch_cmd_reg
: gpii
->ev_cmd_reg
;
707 cmd
= IS_CHAN_CMD(gpi_cmd
) ? GPII_n_CH_CMD(gpi_cmd_info
[gpi_cmd
].opcode
, chid
) :
708 GPII_n_EV_CMD(gpi_cmd_info
[gpi_cmd
].opcode
, 0);
709 gpi_write_reg(gpii
, cmd_reg
, cmd
);
710 timeout
= wait_for_completion_timeout(&gpii
->cmd_completion
,
711 msecs_to_jiffies(CMD_TIMEOUT_MS
));
713 dev_err(gpii
->gpi_dev
->dev
, "cmd: %s completion timeout:%u\n",
714 TO_GPI_CMD_STR(gpi_cmd
), chid
);
718 /* confirm new ch state is correct , if the cmd is a state change cmd */
719 if (gpi_cmd_info
[gpi_cmd
].state
== STATE_IGNORE
)
722 if (IS_CHAN_CMD(gpi_cmd
) && gchan
->ch_state
== gpi_cmd_info
[gpi_cmd
].state
)
725 if (!IS_CHAN_CMD(gpi_cmd
) && gpii
->ev_state
== gpi_cmd_info
[gpi_cmd
].state
)
731 /* program transfer ring DB register */
732 static inline void gpi_write_ch_db(struct gchan
*gchan
,
733 struct gpi_ring
*ring
, void *wp
)
735 struct gpii
*gpii
= gchan
->gpii
;
738 p_wp
= to_physical(ring
, wp
);
739 gpi_write_reg(gpii
, gchan
->ch_cntxt_db_reg
, p_wp
);
742 /* program event ring DB register */
743 static inline void gpi_write_ev_db(struct gpii
*gpii
,
744 struct gpi_ring
*ring
, void *wp
)
748 p_wp
= ring
->phys_addr
+ (wp
- ring
->base
);
749 gpi_write_reg(gpii
, gpii
->ev_cntxt_db_reg
, p_wp
);
752 /* process transfer completion interrupt */
753 static void gpi_process_ieob(struct gpii
*gpii
)
755 gpi_write_reg(gpii
, gpii
->ieob_clr_reg
, BIT(0));
757 gpi_config_interrupts(gpii
, MASK_IEOB_SETTINGS
, 0);
758 tasklet_hi_schedule(&gpii
->ev_task
);
761 /* process channel control interrupt */
762 static void gpi_process_ch_ctrl_irq(struct gpii
*gpii
)
764 u32 gpii_id
= gpii
->gpii_id
;
765 u32 offset
= GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id
);
766 u32 ch_irq
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
770 /* clear the status */
771 offset
= GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id
);
772 gpi_write_reg(gpii
, gpii
->regs
+ offset
, (u32
)ch_irq
);
774 for (chid
= 0; chid
< MAX_CHANNELS_PER_GPII
; chid
++) {
775 if (!(BIT(chid
) & ch_irq
))
778 gchan
= &gpii
->gchan
[chid
];
779 state
= gpi_read_reg(gpii
, gchan
->ch_cntxt_base_reg
+
781 state
= FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE
, state
);
784 * CH_CMD_DEALLOC cmd always successful. However cmd does
785 * not change hardware status. So overwriting software state
788 if (gpii
->gpi_cmd
== GPI_CH_CMD_DE_ALLOC
)
789 state
= DEFAULT_CH_STATE
;
790 gchan
->ch_state
= state
;
793 * Triggering complete all if ch_state is not a stop in process.
794 * Stop in process is a transition state and we will wait for
795 * stop interrupt before notifying.
797 if (gchan
->ch_state
!= CH_STATE_STOP_IN_PROC
)
798 complete_all(&gpii
->cmd_completion
);
802 /* processing gpi general error interrupts */
803 static void gpi_process_gen_err_irq(struct gpii
*gpii
)
805 u32 gpii_id
= gpii
->gpii_id
;
806 u32 offset
= GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id
);
807 u32 irq_stts
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
809 /* clear the status */
810 dev_dbg(gpii
->gpi_dev
->dev
, "irq_stts:0x%x\n", irq_stts
);
812 /* Clear the register */
813 offset
= GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id
);
814 gpi_write_reg(gpii
, gpii
->regs
+ offset
, irq_stts
);
817 /* processing gpi level error interrupts */
818 static void gpi_process_glob_err_irq(struct gpii
*gpii
)
820 u32 gpii_id
= gpii
->gpii_id
;
821 u32 offset
= GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id
);
822 u32 irq_stts
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
824 offset
= GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id
);
825 gpi_write_reg(gpii
, gpii
->regs
+ offset
, irq_stts
);
827 /* only error interrupt should be set */
828 if (irq_stts
& ~GPI_GLOB_IRQ_ERROR_INT_MSK
) {
829 dev_err(gpii
->gpi_dev
->dev
, "invalid error status:0x%x\n", irq_stts
);
833 offset
= GPII_n_ERROR_LOG_OFFS(gpii_id
);
834 gpi_write_reg(gpii
, gpii
->regs
+ offset
, 0);
837 /* gpii interrupt handler */
838 static irqreturn_t
gpi_handle_irq(int irq
, void *data
)
840 struct gpii
*gpii
= data
;
841 u32 gpii_id
= gpii
->gpii_id
;
845 read_lock_irqsave(&gpii
->pm_lock
, flags
);
848 * States are out of sync to receive interrupt
849 * while software state is in DISABLE state, bailing out.
851 if (!REG_ACCESS_VALID(gpii
->pm_state
)) {
852 dev_err(gpii
->gpi_dev
->dev
, "receive interrupt while in %s state\n",
853 TO_GPI_PM_STR(gpii
->pm_state
));
857 offset
= GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii
->gpii_id
);
858 type
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
861 /* global gpii error */
862 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
) {
863 gpi_process_glob_err_irq(gpii
);
864 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB
);
867 /* transfer complete interrupt */
868 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
) {
869 gpi_process_ieob(gpii
);
870 type
&= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB
;
873 /* event control irq */
874 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
) {
878 dev_dbg(gpii
->gpi_dev
->dev
,
879 "processing EV CTRL interrupt\n");
880 offset
= GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id
);
881 ev_ch_irq
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
883 offset
= GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
885 gpi_write_reg(gpii
, gpii
->regs
+ offset
, ev_ch_irq
);
886 ev_state
= gpi_read_reg(gpii
, gpii
->ev_cntxt_base_reg
+
888 ev_state
= FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE
, ev_state
);
891 * CMD EV_CMD_DEALLOC is always successful. However
892 * cmd does not change hardware status. So overwriting
893 * software state to default state.
895 if (gpii
->gpi_cmd
== GPI_EV_CMD_DEALLOC
)
896 ev_state
= DEFAULT_EV_CH_STATE
;
898 gpii
->ev_state
= ev_state
;
899 dev_dbg(gpii
->gpi_dev
->dev
, "setting EV state to %s\n",
900 TO_GPI_EV_STATE_STR(gpii
->ev_state
));
901 complete_all(&gpii
->cmd_completion
);
902 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL
);
905 /* channel control irq */
906 if (type
& GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
) {
907 dev_dbg(gpii
->gpi_dev
->dev
, "process CH CTRL interrupts\n");
908 gpi_process_ch_ctrl_irq(gpii
);
909 type
&= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL
);
913 dev_err(gpii
->gpi_dev
->dev
, "Unhandled interrupt status:0x%x\n", type
);
914 gpi_process_gen_err_irq(gpii
);
918 offset
= GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii
->gpii_id
);
919 type
= gpi_read_reg(gpii
, gpii
->regs
+ offset
);
923 read_unlock_irqrestore(&gpii
->pm_lock
, flags
);
928 /* process DMA Immediate completion data events */
929 static void gpi_process_imed_data_event(struct gchan
*gchan
,
930 struct immediate_data_event
*imed_event
)
932 struct gpii
*gpii
= gchan
->gpii
;
933 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
934 void *tre
= ch_ring
->base
+ (ch_ring
->el_size
* imed_event
->tre_index
);
935 struct dmaengine_result result
;
936 struct gpi_desc
*gpi_desc
;
937 struct virt_dma_desc
*vd
;
942 * If channel not active don't process event
944 if (gchan
->pm_state
!= ACTIVE_STATE
) {
945 dev_err(gpii
->gpi_dev
->dev
, "skipping processing event because ch @ %s state\n",
946 TO_GPI_PM_STR(gchan
->pm_state
));
950 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
951 vd
= vchan_next_desc(&gchan
->vc
);
953 struct gpi_ere
*gpi_ere
;
954 struct gpi_tre
*gpi_tre
;
956 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
957 dev_dbg(gpii
->gpi_dev
->dev
, "event without a pending descriptor!\n");
958 gpi_ere
= (struct gpi_ere
*)imed_event
;
959 dev_dbg(gpii
->gpi_dev
->dev
,
960 "Event: %08x %08x %08x %08x\n",
961 gpi_ere
->dword
[0], gpi_ere
->dword
[1],
962 gpi_ere
->dword
[2], gpi_ere
->dword
[3]);
964 dev_dbg(gpii
->gpi_dev
->dev
,
965 "Pending TRE: %08x %08x %08x %08x\n",
966 gpi_tre
->dword
[0], gpi_tre
->dword
[1],
967 gpi_tre
->dword
[2], gpi_tre
->dword
[3]);
970 gpi_desc
= to_gpi_desc(vd
);
971 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
974 * RP pointed by Event is to last TRE processed,
975 * we need to update ring rp to tre + 1
977 tre
+= ch_ring
->el_size
;
978 if (tre
>= (ch_ring
->base
+ ch_ring
->len
))
982 /* make sure rp updates are immediately visible to all cores */
985 chid
= imed_event
->chid
;
986 if (imed_event
->code
== MSM_GPI_TCE_EOT
&& gpii
->ieob_set
) {
987 if (chid
== GPI_RX_CHAN
)
993 if (imed_event
->code
== MSM_GPI_TCE_UNEXP_ERR
)
994 result
.result
= DMA_TRANS_ABORTED
;
996 result
.result
= DMA_TRANS_NOERROR
;
997 result
.residue
= gpi_desc
->len
- imed_event
->length
;
999 dma_cookie_complete(&vd
->tx
);
1000 dmaengine_desc_get_callback_invoke(&vd
->tx
, &result
);
1003 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1004 list_del(&vd
->node
);
1005 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1010 /* processing transfer completion events */
1011 static void gpi_process_xfer_compl_event(struct gchan
*gchan
,
1012 struct xfer_compl_event
*compl_event
)
1014 struct gpii
*gpii
= gchan
->gpii
;
1015 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1016 void *ev_rp
= to_virtual(ch_ring
, compl_event
->ptr
);
1017 struct virt_dma_desc
*vd
;
1018 struct gpi_desc
*gpi_desc
;
1019 struct dmaengine_result result
;
1020 unsigned long flags
;
1023 /* only process events on active channel */
1024 if (unlikely(gchan
->pm_state
!= ACTIVE_STATE
)) {
1025 dev_err(gpii
->gpi_dev
->dev
, "skipping processing event because ch @ %s state\n",
1026 TO_GPI_PM_STR(gchan
->pm_state
));
1030 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1031 vd
= vchan_next_desc(&gchan
->vc
);
1033 struct gpi_ere
*gpi_ere
;
1035 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1036 dev_err(gpii
->gpi_dev
->dev
, "Event without a pending descriptor!\n");
1037 gpi_ere
= (struct gpi_ere
*)compl_event
;
1038 dev_err(gpii
->gpi_dev
->dev
,
1039 "Event: %08x %08x %08x %08x\n",
1040 gpi_ere
->dword
[0], gpi_ere
->dword
[1],
1041 gpi_ere
->dword
[2], gpi_ere
->dword
[3]);
1045 gpi_desc
= to_gpi_desc(vd
);
1046 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1049 * RP pointed by Event is to last TRE processed,
1050 * we need to update ring rp to ev_rp + 1
1052 ev_rp
+= ch_ring
->el_size
;
1053 if (ev_rp
>= (ch_ring
->base
+ ch_ring
->len
))
1054 ev_rp
= ch_ring
->base
;
1055 ch_ring
->rp
= ev_rp
;
1057 /* update must be visible to other cores */
1060 chid
= compl_event
->chid
;
1061 if (compl_event
->code
== MSM_GPI_TCE_EOT
&& gpii
->ieob_set
) {
1062 if (chid
== GPI_RX_CHAN
)
1068 if (compl_event
->code
== MSM_GPI_TCE_UNEXP_ERR
) {
1069 dev_err(gpii
->gpi_dev
->dev
, "Error in Transaction\n");
1070 result
.result
= DMA_TRANS_ABORTED
;
1072 dev_dbg(gpii
->gpi_dev
->dev
, "Transaction Success\n");
1073 result
.result
= DMA_TRANS_NOERROR
;
1075 result
.residue
= gpi_desc
->len
- compl_event
->length
;
1076 dev_dbg(gpii
->gpi_dev
->dev
, "Residue %d\n", result
.residue
);
1078 dma_cookie_complete(&vd
->tx
);
1079 dmaengine_desc_get_callback_invoke(&vd
->tx
, &result
);
1082 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1083 list_del(&vd
->node
);
1084 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1089 /* process all events */
1090 static void gpi_process_events(struct gpii
*gpii
)
1092 struct gpi_ring
*ev_ring
= &gpii
->ev_ring
;
1093 phys_addr_t cntxt_rp
;
1095 union gpi_event
*gpi_event
;
1096 struct gchan
*gchan
;
1099 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1100 rp
= to_virtual(ev_ring
, cntxt_rp
);
1103 while (rp
!= ev_ring
->rp
) {
1104 gpi_event
= ev_ring
->rp
;
1105 chid
= gpi_event
->xfer_compl_event
.chid
;
1106 type
= gpi_event
->xfer_compl_event
.type
;
1108 dev_dbg(gpii
->gpi_dev
->dev
,
1109 "Event: CHID:%u, type:%x %08x %08x %08x %08x\n",
1110 chid
, type
, gpi_event
->gpi_ere
.dword
[0],
1111 gpi_event
->gpi_ere
.dword
[1], gpi_event
->gpi_ere
.dword
[2],
1112 gpi_event
->gpi_ere
.dword
[3]);
1115 case XFER_COMPLETE_EV_TYPE
:
1116 gchan
= &gpii
->gchan
[chid
];
1117 gpi_process_xfer_compl_event(gchan
,
1118 &gpi_event
->xfer_compl_event
);
1121 dev_dbg(gpii
->gpi_dev
->dev
, "stale event, not processing\n");
1123 case IMMEDIATE_DATA_EV_TYPE
:
1124 gchan
= &gpii
->gchan
[chid
];
1125 gpi_process_imed_data_event(gchan
,
1126 &gpi_event
->immediate_data_event
);
1128 case QUP_NOTIF_EV_TYPE
:
1129 dev_dbg(gpii
->gpi_dev
->dev
, "QUP_NOTIF_EV_TYPE\n");
1132 dev_dbg(gpii
->gpi_dev
->dev
,
1133 "not supported event type:0x%x\n", type
);
1135 gpi_ring_recycle_ev_element(ev_ring
);
1137 gpi_write_ev_db(gpii
, ev_ring
, ev_ring
->wp
);
1139 /* clear pending IEOB events */
1140 gpi_write_reg(gpii
, gpii
->ieob_clr_reg
, BIT(0));
1142 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1143 rp
= to_virtual(ev_ring
, cntxt_rp
);
1145 } while (rp
!= ev_ring
->rp
);
1148 /* processing events using tasklet */
1149 static void gpi_ev_tasklet(unsigned long data
)
1151 struct gpii
*gpii
= (struct gpii
*)data
;
1153 read_lock_bh(&gpii
->pm_lock
);
1154 if (!REG_ACCESS_VALID(gpii
->pm_state
)) {
1155 read_unlock_bh(&gpii
->pm_lock
);
1156 dev_err(gpii
->gpi_dev
->dev
, "not processing any events, pm_state:%s\n",
1157 TO_GPI_PM_STR(gpii
->pm_state
));
1161 /* process the events */
1162 gpi_process_events(gpii
);
1164 /* enable IEOB, switching back to interrupts */
1165 gpi_config_interrupts(gpii
, MASK_IEOB_SETTINGS
, 1);
1166 read_unlock_bh(&gpii
->pm_lock
);
1169 /* marks all pending events for the channel as stale */
1170 static void gpi_mark_stale_events(struct gchan
*gchan
)
1172 struct gpii
*gpii
= gchan
->gpii
;
1173 struct gpi_ring
*ev_ring
= &gpii
->ev_ring
;
1174 u32 cntxt_rp
, local_rp
;
1177 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1179 ev_rp
= ev_ring
->rp
;
1180 local_rp
= (u32
)to_physical(ev_ring
, ev_rp
);
1181 while (local_rp
!= cntxt_rp
) {
1182 union gpi_event
*gpi_event
= ev_rp
;
1183 u32 chid
= gpi_event
->xfer_compl_event
.chid
;
1185 if (chid
== gchan
->chid
)
1186 gpi_event
->xfer_compl_event
.type
= STALE_EV_TYPE
;
1187 ev_rp
+= ev_ring
->el_size
;
1188 if (ev_rp
>= (ev_ring
->base
+ ev_ring
->len
))
1189 ev_rp
= ev_ring
->base
;
1190 cntxt_rp
= gpi_read_reg(gpii
, gpii
->ev_ring_rp_lsb_reg
);
1191 local_rp
= (u32
)to_physical(ev_ring
, ev_rp
);
1195 /* reset sw state and issue channel reset or de-alloc */
1196 static int gpi_reset_chan(struct gchan
*gchan
, enum gpi_cmd gpi_cmd
)
1198 struct gpii
*gpii
= gchan
->gpii
;
1199 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1200 unsigned long flags
;
1204 ret
= gpi_send_cmd(gpii
, gchan
, gpi_cmd
);
1206 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1207 TO_GPI_CMD_STR(gpi_cmd
), ret
);
1211 /* initialize the local ring ptrs */
1212 ch_ring
->rp
= ch_ring
->base
;
1213 ch_ring
->wp
= ch_ring
->base
;
1215 /* visible to other cores */
1218 /* check event ring for any stale events */
1219 write_lock_irq(&gpii
->pm_lock
);
1220 gpi_mark_stale_events(gchan
);
1222 /* remove all async descriptors */
1223 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1224 vchan_get_all_descriptors(&gchan
->vc
, &list
);
1225 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1226 write_unlock_irq(&gpii
->pm_lock
);
1227 vchan_dma_desc_free_list(&gchan
->vc
, &list
);
1232 static int gpi_start_chan(struct gchan
*gchan
)
1234 struct gpii
*gpii
= gchan
->gpii
;
1237 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_START
);
1239 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1240 TO_GPI_CMD_STR(GPI_CH_CMD_START
), ret
);
1244 /* gpii CH is active now */
1245 write_lock_irq(&gpii
->pm_lock
);
1246 gchan
->pm_state
= ACTIVE_STATE
;
1247 write_unlock_irq(&gpii
->pm_lock
);
1252 static int gpi_stop_chan(struct gchan
*gchan
)
1254 struct gpii
*gpii
= gchan
->gpii
;
1257 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_STOP
);
1259 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1260 TO_GPI_CMD_STR(GPI_CH_CMD_STOP
), ret
);
1267 /* allocate and configure the transfer channel */
1268 static int gpi_alloc_chan(struct gchan
*chan
, bool send_alloc_cmd
)
1270 struct gpii
*gpii
= chan
->gpii
;
1271 struct gpi_ring
*ring
= &chan
->ch_ring
;
1273 u32 id
= gpii
->gpii_id
;
1274 u32 chid
= chan
->chid
;
1275 u32 pair_chid
= !chid
;
1277 if (send_alloc_cmd
) {
1278 ret
= gpi_send_cmd(gpii
, chan
, GPI_CH_CMD_ALLOCATE
);
1280 dev_err(gpii
->gpi_dev
->dev
, "Error with cmd:%s ret:%d\n",
1281 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE
), ret
);
1286 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_0_CONFIG
,
1287 GPII_n_CH_k_CNTXT_0(ring
->el_size
, 0, chan
->dir
, GPI_CHTYPE_PROTO_GPI
));
1288 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_1_R_LENGTH
, ring
->len
);
1289 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_2_RING_BASE_LSB
, ring
->phys_addr
);
1290 gpi_write_reg(gpii
, chan
->ch_cntxt_base_reg
+ CNTXT_3_RING_BASE_MSB
,
1291 upper_32_bits(ring
->phys_addr
));
1292 gpi_write_reg(gpii
, chan
->ch_cntxt_db_reg
+ CNTXT_5_RING_RP_MSB
- CNTXT_4_RING_RP_LSB
,
1293 upper_32_bits(ring
->phys_addr
));
1294 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_0_OFFS(id
, chid
),
1295 GPII_n_CH_k_SCRATCH_0(pair_chid
, chan
->protocol
, chan
->seid
));
1296 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_1_OFFS(id
, chid
), 0);
1297 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_2_OFFS(id
, chid
), 0);
1298 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_SCRATCH_3_OFFS(id
, chid
), 0);
1299 gpi_write_reg(gpii
, gpii
->regs
+ GPII_n_CH_k_QOS_OFFS(id
, chid
), 1);
1301 /* flush all the writes */
1306 /* allocate and configure event ring */
1307 static int gpi_alloc_ev_chan(struct gpii
*gpii
)
1309 struct gpi_ring
*ring
= &gpii
->ev_ring
;
1310 void __iomem
*base
= gpii
->ev_cntxt_base_reg
;
1313 ret
= gpi_send_cmd(gpii
, NULL
, GPI_EV_CMD_ALLOCATE
);
1315 dev_err(gpii
->gpi_dev
->dev
, "error with cmd:%s ret:%d\n",
1316 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE
), ret
);
1320 /* program event context */
1321 gpi_write_reg(gpii
, base
+ CNTXT_0_CONFIG
,
1322 GPII_n_EV_k_CNTXT_0(ring
->el_size
, GPI_INTTYPE_IRQ
, GPI_CHTYPE_GPI_EV
));
1323 gpi_write_reg(gpii
, base
+ CNTXT_1_R_LENGTH
, ring
->len
);
1324 gpi_write_reg(gpii
, base
+ CNTXT_2_RING_BASE_LSB
, lower_32_bits(ring
->phys_addr
));
1325 gpi_write_reg(gpii
, base
+ CNTXT_3_RING_BASE_MSB
, upper_32_bits(ring
->phys_addr
));
1326 gpi_write_reg(gpii
, gpii
->ev_cntxt_db_reg
+ CNTXT_5_RING_RP_MSB
- CNTXT_4_RING_RP_LSB
,
1327 upper_32_bits(ring
->phys_addr
));
1328 gpi_write_reg(gpii
, base
+ CNTXT_8_RING_INT_MOD
, 0);
1329 gpi_write_reg(gpii
, base
+ CNTXT_10_RING_MSI_LSB
, 0);
1330 gpi_write_reg(gpii
, base
+ CNTXT_11_RING_MSI_MSB
, 0);
1331 gpi_write_reg(gpii
, base
+ CNTXT_8_RING_INT_MOD
, 0);
1332 gpi_write_reg(gpii
, base
+ CNTXT_12_RING_RP_UPDATE_LSB
, 0);
1333 gpi_write_reg(gpii
, base
+ CNTXT_13_RING_RP_UPDATE_MSB
, 0);
1335 /* add events to ring */
1336 ring
->wp
= (ring
->base
+ ring
->len
- ring
->el_size
);
1338 /* flush all the writes */
1341 /* gpii is active now */
1342 write_lock_irq(&gpii
->pm_lock
);
1343 gpii
->pm_state
= ACTIVE_STATE
;
1344 write_unlock_irq(&gpii
->pm_lock
);
1345 gpi_write_ev_db(gpii
, ring
, ring
->wp
);
1350 /* calculate # of ERE/TRE available to queue */
1351 static int gpi_ring_num_elements_avail(const struct gpi_ring
* const ring
)
1355 if (ring
->wp
< ring
->rp
) {
1356 elements
= ((ring
->rp
- ring
->wp
) / ring
->el_size
) - 1;
1358 elements
= (ring
->rp
- ring
->base
) / ring
->el_size
;
1359 elements
+= ((ring
->base
+ ring
->len
- ring
->wp
) / ring
->el_size
) - 1;
1365 static int gpi_ring_add_element(struct gpi_ring
*ring
, void **wp
)
1367 if (gpi_ring_num_elements_avail(ring
) <= 0)
1371 ring
->wp
+= ring
->el_size
;
1372 if (ring
->wp
>= (ring
->base
+ ring
->len
))
1373 ring
->wp
= ring
->base
;
1375 /* visible to other cores */
1381 static void gpi_ring_recycle_ev_element(struct gpi_ring
*ring
)
1384 ring
->wp
+= ring
->el_size
;
1385 if (ring
->wp
>= (ring
->base
+ ring
->len
))
1386 ring
->wp
= ring
->base
;
1389 ring
->rp
+= ring
->el_size
;
1390 if (ring
->rp
>= (ring
->base
+ ring
->len
))
1391 ring
->rp
= ring
->base
;
1393 /* visible to other cores */
1397 static void gpi_free_ring(struct gpi_ring
*ring
,
1400 dma_free_coherent(gpii
->gpi_dev
->dev
, ring
->alloc_size
,
1401 ring
->pre_aligned
, ring
->dma_handle
);
1402 memset(ring
, 0, sizeof(*ring
));
1405 /* allocate memory for transfer and event rings */
1406 static int gpi_alloc_ring(struct gpi_ring
*ring
, u32 elements
,
1407 u32 el_size
, struct gpii
*gpii
)
1409 u64 len
= elements
* el_size
;
1412 /* ring len must be power of 2 */
1413 bit
= find_last_bit((unsigned long *)&len
, 32);
1414 if (((1 << bit
) - 1) & len
)
1417 ring
->alloc_size
= (len
+ (len
- 1));
1418 dev_dbg(gpii
->gpi_dev
->dev
,
1419 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
1420 elements
, el_size
, (elements
* el_size
), len
,
1423 ring
->pre_aligned
= dma_alloc_coherent(gpii
->gpi_dev
->dev
,
1425 &ring
->dma_handle
, GFP_KERNEL
);
1426 if (!ring
->pre_aligned
) {
1427 dev_err(gpii
->gpi_dev
->dev
, "could not alloc size:%zu mem for ring\n",
1432 /* align the physical mem */
1433 ring
->phys_addr
= (ring
->dma_handle
+ (len
- 1)) & ~(len
- 1);
1434 ring
->base
= ring
->pre_aligned
+ (ring
->phys_addr
- ring
->dma_handle
);
1435 ring
->rp
= ring
->base
;
1436 ring
->wp
= ring
->base
;
1438 ring
->el_size
= el_size
;
1439 ring
->elements
= ring
->len
/ ring
->el_size
;
1440 memset(ring
->base
, 0, ring
->len
);
1441 ring
->configured
= true;
1443 /* update to other cores */
1446 dev_dbg(gpii
->gpi_dev
->dev
,
1447 "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
1448 &ring
->dma_handle
, &ring
->phys_addr
, ring
->len
,
1449 ring
->el_size
, ring
->elements
);
1454 /* copy tre into transfer ring */
1455 static void gpi_queue_xfer(struct gpii
*gpii
, struct gchan
*gchan
,
1456 struct gpi_tre
*gpi_tre
, void **wp
)
1458 struct gpi_tre
*ch_tre
;
1461 /* get next tre location we can copy */
1462 ret
= gpi_ring_add_element(&gchan
->ch_ring
, (void **)&ch_tre
);
1463 if (unlikely(ret
)) {
1464 dev_err(gpii
->gpi_dev
->dev
, "Error adding ring element to xfer ring\n");
1468 /* copy the tre info */
1469 memcpy(ch_tre
, gpi_tre
, sizeof(*ch_tre
));
1473 /* reset and restart transfer channel */
1474 static int gpi_terminate_all(struct dma_chan
*chan
)
1476 struct gchan
*gchan
= to_gchan(chan
);
1477 struct gpii
*gpii
= gchan
->gpii
;
1478 int schid
, echid
, i
;
1481 mutex_lock(&gpii
->ctrl_lock
);
1484 * treat both channels as a group if its protocol is not UART
1485 * STOP, RESET, or START needs to be in lockstep
1487 schid
= (gchan
->protocol
== QCOM_GPI_UART
) ? gchan
->chid
: 0;
1488 echid
= (gchan
->protocol
== QCOM_GPI_UART
) ? schid
+ 1 : MAX_CHANNELS_PER_GPII
;
1490 /* stop the channel */
1491 for (i
= schid
; i
< echid
; i
++) {
1492 gchan
= &gpii
->gchan
[i
];
1494 /* disable ch state so no more TRE processing */
1495 write_lock_irq(&gpii
->pm_lock
);
1496 gchan
->pm_state
= PREPARE_TERMINATE
;
1497 write_unlock_irq(&gpii
->pm_lock
);
1499 /* send command to Stop the channel */
1500 ret
= gpi_stop_chan(gchan
);
1503 /* reset the channels (clears any pending tre) */
1504 for (i
= schid
; i
< echid
; i
++) {
1505 gchan
= &gpii
->gchan
[i
];
1507 ret
= gpi_reset_chan(gchan
, GPI_CH_CMD_RESET
);
1509 dev_err(gpii
->gpi_dev
->dev
, "Error resetting channel ret:%d\n", ret
);
1510 goto terminate_exit
;
1513 /* reprogram channel CNTXT */
1514 ret
= gpi_alloc_chan(gchan
, false);
1516 dev_err(gpii
->gpi_dev
->dev
, "Error alloc_channel ret:%d\n", ret
);
1517 goto terminate_exit
;
1521 /* restart the channels */
1522 for (i
= schid
; i
< echid
; i
++) {
1523 gchan
= &gpii
->gchan
[i
];
1525 ret
= gpi_start_chan(gchan
);
1527 dev_err(gpii
->gpi_dev
->dev
, "Error Starting Channel ret:%d\n", ret
);
1528 goto terminate_exit
;
1533 mutex_unlock(&gpii
->ctrl_lock
);
1537 /* pause dma transfer for all channels */
1538 static int gpi_pause(struct dma_chan
*chan
)
1540 struct gchan
*gchan
= to_gchan(chan
);
1541 struct gpii
*gpii
= gchan
->gpii
;
1544 mutex_lock(&gpii
->ctrl_lock
);
1547 * pause/resume are per gpii not per channel, so
1548 * client needs to call pause only once
1550 if (gpii
->pm_state
== PAUSE_STATE
) {
1551 dev_dbg(gpii
->gpi_dev
->dev
, "channel is already paused\n");
1552 mutex_unlock(&gpii
->ctrl_lock
);
1556 /* send stop command to stop the channels */
1557 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1558 ret
= gpi_stop_chan(&gpii
->gchan
[i
]);
1560 mutex_unlock(&gpii
->ctrl_lock
);
1565 disable_irq(gpii
->irq
);
1567 /* Wait for threads to complete out */
1568 tasklet_kill(&gpii
->ev_task
);
1570 write_lock_irq(&gpii
->pm_lock
);
1571 gpii
->pm_state
= PAUSE_STATE
;
1572 write_unlock_irq(&gpii
->pm_lock
);
1573 mutex_unlock(&gpii
->ctrl_lock
);
1578 /* resume dma transfer */
1579 static int gpi_resume(struct dma_chan
*chan
)
1581 struct gchan
*gchan
= to_gchan(chan
);
1582 struct gpii
*gpii
= gchan
->gpii
;
1585 mutex_lock(&gpii
->ctrl_lock
);
1586 if (gpii
->pm_state
== ACTIVE_STATE
) {
1587 dev_dbg(gpii
->gpi_dev
->dev
, "channel is already active\n");
1588 mutex_unlock(&gpii
->ctrl_lock
);
1592 enable_irq(gpii
->irq
);
1594 /* send start command to start the channels */
1595 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1596 ret
= gpi_send_cmd(gpii
, &gpii
->gchan
[i
], GPI_CH_CMD_START
);
1598 dev_err(gpii
->gpi_dev
->dev
, "Error starting chan, ret:%d\n", ret
);
1599 mutex_unlock(&gpii
->ctrl_lock
);
1604 write_lock_irq(&gpii
->pm_lock
);
1605 gpii
->pm_state
= ACTIVE_STATE
;
1606 write_unlock_irq(&gpii
->pm_lock
);
1607 mutex_unlock(&gpii
->ctrl_lock
);
1612 static void gpi_desc_free(struct virt_dma_desc
*vd
)
1614 struct gpi_desc
*gpi_desc
= to_gpi_desc(vd
);
1621 gpi_peripheral_config(struct dma_chan
*chan
, struct dma_slave_config
*config
)
1623 struct gchan
*gchan
= to_gchan(chan
);
1625 if (!config
->peripheral_config
)
1628 gchan
->config
= krealloc(gchan
->config
, config
->peripheral_size
, GFP_NOWAIT
);
1632 memcpy(gchan
->config
, config
->peripheral_config
, config
->peripheral_size
);
1637 static int gpi_create_i2c_tre(struct gchan
*chan
, struct gpi_desc
*desc
,
1638 struct scatterlist
*sgl
, enum dma_transfer_direction direction
)
1640 struct gpi_i2c_config
*i2c
= chan
->config
;
1641 struct device
*dev
= chan
->gpii
->gpi_dev
->dev
;
1642 unsigned int tre_idx
= 0;
1644 struct gpi_tre
*tre
;
1647 /* first create config tre if applicable */
1648 if (i2c
->set_config
) {
1649 tre
= &desc
->tre
[tre_idx
];
1652 tre
->dword
[0] = u32_encode_bits(i2c
->low_count
, TRE_I2C_C0_TLOW
);
1653 tre
->dword
[0] |= u32_encode_bits(i2c
->high_count
, TRE_I2C_C0_THIGH
);
1654 tre
->dword
[0] |= u32_encode_bits(i2c
->cycle_count
, TRE_I2C_C0_TCYL
);
1655 tre
->dword
[0] |= u32_encode_bits(i2c
->pack_enable
, TRE_I2C_C0_TX_PACK
);
1656 tre
->dword
[0] |= u32_encode_bits(i2c
->pack_enable
, TRE_I2C_C0_RX_PACK
);
1660 tre
->dword
[2] = u32_encode_bits(i2c
->clk_div
, TRE_C0_CLK_DIV
);
1662 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_CONFIG0
, TRE_FLAGS_TYPE
);
1663 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1666 /* create the GO tre for Tx */
1667 if (i2c
->op
== I2C_WRITE
) {
1668 tre
= &desc
->tre
[tre_idx
];
1672 tre
->dword
[0] = u32_encode_bits(I2C_READ
, TRE_I2C_GO_CMD
);
1674 tre
->dword
[0] = u32_encode_bits(i2c
->op
, TRE_I2C_GO_CMD
);
1676 tre
->dword
[0] |= u32_encode_bits(i2c
->addr
, TRE_I2C_GO_ADDR
);
1677 tre
->dword
[0] |= u32_encode_bits(i2c
->stretch
, TRE_I2C_GO_STRETCH
);
1680 tre
->dword
[2] = u32_encode_bits(i2c
->rx_len
, TRE_RX_LEN
);
1682 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_GO
, TRE_FLAGS_TYPE
);
1685 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_LINK
);
1687 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1690 if (i2c
->op
== I2C_READ
|| i2c
->multi_msg
== false) {
1691 /* create the DMA TRE */
1692 tre
= &desc
->tre
[tre_idx
];
1695 address
= sg_dma_address(sgl
);
1696 tre
->dword
[0] = lower_32_bits(address
);
1697 tre
->dword
[1] = upper_32_bits(address
);
1699 tre
->dword
[2] = u32_encode_bits(sg_dma_len(sgl
), TRE_DMA_LEN
);
1701 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_DMA
, TRE_FLAGS_TYPE
);
1702 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT
);
1705 for (i
= 0; i
< tre_idx
; i
++)
1706 dev_dbg(dev
, "TRE:%d %x:%x:%x:%x\n", i
, desc
->tre
[i
].dword
[0],
1707 desc
->tre
[i
].dword
[1], desc
->tre
[i
].dword
[2], desc
->tre
[i
].dword
[3]);
1712 static int gpi_create_spi_tre(struct gchan
*chan
, struct gpi_desc
*desc
,
1713 struct scatterlist
*sgl
, enum dma_transfer_direction direction
)
1715 struct gpi_spi_config
*spi
= chan
->config
;
1716 struct device
*dev
= chan
->gpii
->gpi_dev
->dev
;
1717 unsigned int tre_idx
= 0;
1719 struct gpi_tre
*tre
;
1722 /* first create config tre if applicable */
1723 if (direction
== DMA_MEM_TO_DEV
&& spi
->set_config
) {
1724 tre
= &desc
->tre
[tre_idx
];
1727 tre
->dword
[0] = u32_encode_bits(spi
->word_len
, TRE_SPI_C0_WORD_SZ
);
1728 tre
->dword
[0] |= u32_encode_bits(spi
->loopback_en
, TRE_SPI_C0_LOOPBACK
);
1729 tre
->dword
[0] |= u32_encode_bits(spi
->clock_pol_high
, TRE_SPI_C0_CPOL
);
1730 tre
->dword
[0] |= u32_encode_bits(spi
->data_pol_high
, TRE_SPI_C0_CPHA
);
1731 tre
->dword
[0] |= u32_encode_bits(spi
->pack_en
, TRE_SPI_C0_TX_PACK
);
1732 tre
->dword
[0] |= u32_encode_bits(spi
->pack_en
, TRE_SPI_C0_RX_PACK
);
1736 tre
->dword
[2] = u32_encode_bits(spi
->clk_div
, TRE_C0_CLK_DIV
);
1737 tre
->dword
[2] |= u32_encode_bits(spi
->clk_src
, TRE_C0_CLK_SRC
);
1739 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_CONFIG0
, TRE_FLAGS_TYPE
);
1740 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1743 /* create the GO tre for Tx */
1744 if (direction
== DMA_MEM_TO_DEV
) {
1745 tre
= &desc
->tre
[tre_idx
];
1748 tre
->dword
[0] = u32_encode_bits(spi
->fragmentation
, TRE_SPI_GO_FRAG
);
1749 tre
->dword
[0] |= u32_encode_bits(spi
->cs
, TRE_SPI_GO_CS
);
1750 tre
->dword
[0] |= u32_encode_bits(spi
->cmd
, TRE_SPI_GO_CMD
);
1754 tre
->dword
[2] = u32_encode_bits(spi
->rx_len
, TRE_RX_LEN
);
1756 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_GO
, TRE_FLAGS_TYPE
);
1757 if (spi
->cmd
== SPI_RX
)
1758 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB
);
1760 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN
);
1763 /* create the dma tre */
1764 tre
= &desc
->tre
[tre_idx
];
1767 address
= sg_dma_address(sgl
);
1768 tre
->dword
[0] = lower_32_bits(address
);
1769 tre
->dword
[1] = upper_32_bits(address
);
1771 tre
->dword
[2] = u32_encode_bits(sg_dma_len(sgl
), TRE_DMA_LEN
);
1773 tre
->dword
[3] = u32_encode_bits(TRE_TYPE_DMA
, TRE_FLAGS_TYPE
);
1774 if (direction
== DMA_MEM_TO_DEV
)
1775 tre
->dword
[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT
);
1777 for (i
= 0; i
< tre_idx
; i
++)
1778 dev_dbg(dev
, "TRE:%d %x:%x:%x:%x\n", i
, desc
->tre
[i
].dword
[0],
1779 desc
->tre
[i
].dword
[1], desc
->tre
[i
].dword
[2], desc
->tre
[i
].dword
[3]);
1784 /* copy tre into transfer ring */
1785 static struct dma_async_tx_descriptor
*
1786 gpi_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1787 unsigned int sg_len
, enum dma_transfer_direction direction
,
1788 unsigned long flags
, void *context
)
1790 struct gchan
*gchan
= to_gchan(chan
);
1791 struct gpii
*gpii
= gchan
->gpii
;
1792 struct device
*dev
= gpii
->gpi_dev
->dev
;
1793 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1794 struct gpi_desc
*gpi_desc
;
1799 gpii
->ieob_set
= false;
1800 if (!is_slave_direction(direction
)) {
1801 dev_err(gpii
->gpi_dev
->dev
, "invalid dma direction: %d\n", direction
);
1806 dev_err(dev
, "Multi sg sent, we support only one atm: %d\n", sg_len
);
1811 set_config
= *(u32
*)gchan
->config
;
1814 if (direction
== DMA_DEV_TO_MEM
) /* rx */
1817 /* calculate # of elements required & available */
1818 nr
= gpi_ring_num_elements_avail(ch_ring
);
1820 dev_err(dev
, "not enough space in ring, avail:%u required:%u\n", nr
, nr_tre
);
1824 gpi_desc
= kzalloc(sizeof(*gpi_desc
), GFP_NOWAIT
);
1828 /* create TREs for xfer */
1829 if (gchan
->protocol
== QCOM_GPI_SPI
) {
1830 i
= gpi_create_spi_tre(gchan
, gpi_desc
, sgl
, direction
);
1831 } else if (gchan
->protocol
== QCOM_GPI_I2C
) {
1832 i
= gpi_create_i2c_tre(gchan
, gpi_desc
, sgl
, direction
);
1834 dev_err(dev
, "invalid peripheral: %d\n", gchan
->protocol
);
1839 /* set up the descriptor */
1840 gpi_desc
->gchan
= gchan
;
1841 gpi_desc
->len
= sg_dma_len(sgl
);
1842 gpi_desc
->num_tre
= i
;
1844 return vchan_tx_prep(&gchan
->vc
, &gpi_desc
->vd
, flags
);
1847 /* rings transfer ring db to being transfer */
1848 static void gpi_issue_pending(struct dma_chan
*chan
)
1850 struct gchan
*gchan
= to_gchan(chan
);
1851 struct gpii
*gpii
= gchan
->gpii
;
1852 unsigned long flags
, pm_lock_flags
;
1853 struct virt_dma_desc
*vd
= NULL
;
1854 struct gpi_desc
*gpi_desc
;
1855 struct gpi_ring
*ch_ring
= &gchan
->ch_ring
;
1856 void *tre
, *wp
= NULL
;
1859 read_lock_irqsave(&gpii
->pm_lock
, pm_lock_flags
);
1861 /* move all submitted discriptors to issued list */
1862 spin_lock_irqsave(&gchan
->vc
.lock
, flags
);
1863 if (vchan_issue_pending(&gchan
->vc
))
1864 vd
= list_last_entry(&gchan
->vc
.desc_issued
,
1865 struct virt_dma_desc
, node
);
1866 spin_unlock_irqrestore(&gchan
->vc
.lock
, flags
);
1868 /* nothing to do list is empty */
1870 read_unlock_irqrestore(&gpii
->pm_lock
, pm_lock_flags
);
1874 gpi_desc
= to_gpi_desc(vd
);
1875 for (i
= 0; i
< gpi_desc
->num_tre
; i
++) {
1876 tre
= &gpi_desc
->tre
[i
];
1877 gpi_queue_xfer(gpii
, gchan
, tre
, &wp
);
1880 gpi_desc
->db
= ch_ring
->wp
;
1881 gpi_write_ch_db(gchan
, &gchan
->ch_ring
, gpi_desc
->db
);
1882 read_unlock_irqrestore(&gpii
->pm_lock
, pm_lock_flags
);
1885 static int gpi_ch_init(struct gchan
*gchan
)
1887 struct gpii
*gpii
= gchan
->gpii
;
1888 const int ev_factor
= gpii
->gpi_dev
->ev_factor
;
1892 gchan
->pm_state
= CONFIG_STATE
;
1894 /* check if both channels are configured before continue */
1895 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++)
1896 if (gpii
->gchan
[i
].pm_state
!= CONFIG_STATE
)
1899 /* protocol must be same for both channels */
1900 if (gpii
->gchan
[0].protocol
!= gpii
->gchan
[1].protocol
) {
1901 dev_err(gpii
->gpi_dev
->dev
, "protocol did not match protocol %u != %u\n",
1902 gpii
->gchan
[0].protocol
, gpii
->gchan
[1].protocol
);
1907 /* allocate memory for event ring */
1908 elements
= CHAN_TRES
<< ev_factor
;
1909 ret
= gpi_alloc_ring(&gpii
->ev_ring
, elements
,
1910 sizeof(union gpi_event
), gpii
);
1914 /* configure interrupts */
1915 write_lock_irq(&gpii
->pm_lock
);
1916 gpii
->pm_state
= PREPARE_HARDWARE
;
1917 write_unlock_irq(&gpii
->pm_lock
);
1918 ret
= gpi_config_interrupts(gpii
, DEFAULT_IRQ_SETTINGS
, 0);
1920 dev_err(gpii
->gpi_dev
->dev
, "error config. interrupts, ret:%d\n", ret
);
1921 goto error_config_int
;
1924 /* allocate event rings */
1925 ret
= gpi_alloc_ev_chan(gpii
);
1927 dev_err(gpii
->gpi_dev
->dev
, "error alloc_ev_chan:%d\n", ret
);
1928 goto error_alloc_ev_ring
;
1931 /* Allocate all channels */
1932 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1933 ret
= gpi_alloc_chan(&gpii
->gchan
[i
], true);
1935 dev_err(gpii
->gpi_dev
->dev
, "Error allocating chan:%d\n", ret
);
1936 goto error_alloc_chan
;
1940 /* start channels */
1941 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++) {
1942 ret
= gpi_start_chan(&gpii
->gchan
[i
]);
1944 dev_err(gpii
->gpi_dev
->dev
, "Error start chan:%d\n", ret
);
1945 goto error_start_chan
;
1951 for (i
= i
- 1; i
>= 0; i
--) {
1952 gpi_stop_chan(&gpii
->gchan
[i
]);
1953 gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_RESET
);
1957 for (i
= i
- 1; i
>= 0; i
--)
1958 gpi_reset_chan(gchan
, GPI_CH_CMD_DE_ALLOC
);
1959 error_alloc_ev_ring
:
1960 gpi_disable_interrupts(gpii
);
1962 gpi_free_ring(&gpii
->ev_ring
, gpii
);
1964 mutex_unlock(&gpii
->ctrl_lock
);
1968 /* release all channel resources */
1969 static void gpi_free_chan_resources(struct dma_chan
*chan
)
1971 struct gchan
*gchan
= to_gchan(chan
);
1972 struct gpii
*gpii
= gchan
->gpii
;
1973 enum gpi_pm_state cur_state
;
1976 mutex_lock(&gpii
->ctrl_lock
);
1978 cur_state
= gchan
->pm_state
;
1980 /* disable ch state so no more TRE processing for this channel */
1981 write_lock_irq(&gpii
->pm_lock
);
1982 gchan
->pm_state
= PREPARE_TERMINATE
;
1983 write_unlock_irq(&gpii
->pm_lock
);
1985 /* attempt to do graceful hardware shutdown */
1986 if (cur_state
== ACTIVE_STATE
) {
1987 gpi_stop_chan(gchan
);
1989 ret
= gpi_send_cmd(gpii
, gchan
, GPI_CH_CMD_RESET
);
1991 dev_err(gpii
->gpi_dev
->dev
, "error resetting channel:%d\n", ret
);
1993 gpi_reset_chan(gchan
, GPI_CH_CMD_DE_ALLOC
);
1996 /* free all allocated memory */
1997 gpi_free_ring(&gchan
->ch_ring
, gpii
);
1998 vchan_free_chan_resources(&gchan
->vc
);
1999 kfree(gchan
->config
);
2001 write_lock_irq(&gpii
->pm_lock
);
2002 gchan
->pm_state
= DISABLE_STATE
;
2003 write_unlock_irq(&gpii
->pm_lock
);
2005 /* if other rings are still active exit */
2006 for (i
= 0; i
< MAX_CHANNELS_PER_GPII
; i
++)
2007 if (gpii
->gchan
[i
].ch_ring
.configured
)
2010 /* deallocate EV Ring */
2011 cur_state
= gpii
->pm_state
;
2012 write_lock_irq(&gpii
->pm_lock
);
2013 gpii
->pm_state
= PREPARE_TERMINATE
;
2014 write_unlock_irq(&gpii
->pm_lock
);
2016 /* wait for threads to complete out */
2017 tasklet_kill(&gpii
->ev_task
);
2019 /* send command to de allocate event ring */
2020 if (cur_state
== ACTIVE_STATE
)
2021 gpi_send_cmd(gpii
, NULL
, GPI_EV_CMD_DEALLOC
);
2023 gpi_free_ring(&gpii
->ev_ring
, gpii
);
2025 /* disable interrupts */
2026 if (cur_state
== ACTIVE_STATE
)
2027 gpi_disable_interrupts(gpii
);
2029 /* set final state to disable */
2030 write_lock_irq(&gpii
->pm_lock
);
2031 gpii
->pm_state
= DISABLE_STATE
;
2032 write_unlock_irq(&gpii
->pm_lock
);
2035 mutex_unlock(&gpii
->ctrl_lock
);
2038 /* allocate channel resources */
2039 static int gpi_alloc_chan_resources(struct dma_chan
*chan
)
2041 struct gchan
*gchan
= to_gchan(chan
);
2042 struct gpii
*gpii
= gchan
->gpii
;
2045 mutex_lock(&gpii
->ctrl_lock
);
2047 /* allocate memory for transfer ring */
2048 ret
= gpi_alloc_ring(&gchan
->ch_ring
, CHAN_TRES
,
2049 sizeof(struct gpi_tre
), gpii
);
2051 goto xfer_alloc_err
;
2053 ret
= gpi_ch_init(gchan
);
2055 mutex_unlock(&gpii
->ctrl_lock
);
2059 mutex_unlock(&gpii
->ctrl_lock
);
2064 static int gpi_find_avail_gpii(struct gpi_dev
*gpi_dev
, u32 seid
)
2066 struct gchan
*tx_chan
, *rx_chan
;
2069 /* check if same seid is already configured for another chid */
2070 for (gpii
= 0; gpii
< gpi_dev
->max_gpii
; gpii
++) {
2071 if (!((1 << gpii
) & gpi_dev
->gpii_mask
))
2074 tx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_TX_CHAN
];
2075 rx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_RX_CHAN
];
2077 if (rx_chan
->vc
.chan
.client_count
&& rx_chan
->seid
== seid
)
2079 if (tx_chan
->vc
.chan
.client_count
&& tx_chan
->seid
== seid
)
2083 /* no channels configured with same seid, return next avail gpii */
2084 for (gpii
= 0; gpii
< gpi_dev
->max_gpii
; gpii
++) {
2085 if (!((1 << gpii
) & gpi_dev
->gpii_mask
))
2088 tx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_TX_CHAN
];
2089 rx_chan
= &gpi_dev
->gpiis
[gpii
].gchan
[GPI_RX_CHAN
];
2091 /* check if gpii is configured */
2092 if (tx_chan
->vc
.chan
.client_count
||
2093 rx_chan
->vc
.chan
.client_count
)
2096 /* found a free gpii */
2100 /* no gpii instance available to use */
2104 /* gpi_of_dma_xlate: open client requested channel */
2105 static struct dma_chan
*gpi_of_dma_xlate(struct of_phandle_args
*args
,
2106 struct of_dma
*of_dma
)
2108 struct gpi_dev
*gpi_dev
= (struct gpi_dev
*)of_dma
->of_dma_data
;
2111 struct gchan
*gchan
;
2113 if (args
->args_count
< 3) {
2114 dev_err(gpi_dev
->dev
, "gpii require minimum 2 args, client passed:%d args\n",
2119 chid
= args
->args
[0];
2120 if (chid
>= MAX_CHANNELS_PER_GPII
) {
2121 dev_err(gpi_dev
->dev
, "gpii channel:%d not valid\n", chid
);
2125 seid
= args
->args
[1];
2127 /* find next available gpii to use */
2128 gpii
= gpi_find_avail_gpii(gpi_dev
, seid
);
2130 dev_err(gpi_dev
->dev
, "no available gpii instances\n");
2134 gchan
= &gpi_dev
->gpiis
[gpii
].gchan
[chid
];
2135 if (gchan
->vc
.chan
.client_count
) {
2136 dev_err(gpi_dev
->dev
, "gpii:%d chid:%d seid:%d already configured\n",
2137 gpii
, chid
, gchan
->seid
);
2142 gchan
->protocol
= args
->args
[2];
2144 return dma_get_slave_channel(&gchan
->vc
.chan
);
2147 static int gpi_probe(struct platform_device
*pdev
)
2149 struct gpi_dev
*gpi_dev
;
2153 gpi_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*gpi_dev
), GFP_KERNEL
);
2157 gpi_dev
->dev
= &pdev
->dev
;
2158 gpi_dev
->res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2159 gpi_dev
->regs
= devm_ioremap_resource(gpi_dev
->dev
, gpi_dev
->res
);
2160 if (IS_ERR(gpi_dev
->regs
))
2161 return PTR_ERR(gpi_dev
->regs
);
2162 gpi_dev
->ee_base
= gpi_dev
->regs
;
2164 ret
= of_property_read_u32(gpi_dev
->dev
->of_node
, "dma-channels",
2165 &gpi_dev
->max_gpii
);
2167 dev_err(gpi_dev
->dev
, "missing 'max-no-gpii' DT node\n");
2171 ret
= of_property_read_u32(gpi_dev
->dev
->of_node
, "dma-channel-mask",
2172 &gpi_dev
->gpii_mask
);
2174 dev_err(gpi_dev
->dev
, "missing 'gpii-mask' DT node\n");
2178 gpi_dev
->ev_factor
= EV_FACTOR
;
2180 ret
= dma_set_mask(gpi_dev
->dev
, DMA_BIT_MASK(64));
2182 dev_err(gpi_dev
->dev
, "Error setting dma_mask to 64, ret:%d\n", ret
);
2186 gpi_dev
->gpiis
= devm_kzalloc(gpi_dev
->dev
, sizeof(*gpi_dev
->gpiis
) *
2187 gpi_dev
->max_gpii
, GFP_KERNEL
);
2188 if (!gpi_dev
->gpiis
)
2191 /* setup all the supported gpii */
2192 INIT_LIST_HEAD(&gpi_dev
->dma_device
.channels
);
2193 for (i
= 0; i
< gpi_dev
->max_gpii
; i
++) {
2194 struct gpii
*gpii
= &gpi_dev
->gpiis
[i
];
2197 if (!((1 << i
) & gpi_dev
->gpii_mask
))
2200 /* set up ev cntxt register map */
2201 gpii
->ev_cntxt_base_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_k_CNTXT_0_OFFS(i
, 0);
2202 gpii
->ev_cntxt_db_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_k_DOORBELL_0_OFFS(i
, 0);
2203 gpii
->ev_ring_rp_lsb_reg
= gpii
->ev_cntxt_base_reg
+ CNTXT_4_RING_RP_LSB
;
2204 gpii
->ev_cmd_reg
= gpi_dev
->ee_base
+ GPII_n_EV_CH_CMD_OFFS(i
);
2205 gpii
->ieob_clr_reg
= gpi_dev
->ee_base
+ GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i
);
2208 ret
= platform_get_irq(pdev
, i
);
2210 dev_err(gpi_dev
->dev
, "platform_get_irq failed for %d:%d\n", i
, ret
);
2215 /* set up channel specific register info */
2216 for (chan
= 0; chan
< MAX_CHANNELS_PER_GPII
; chan
++) {
2217 struct gchan
*gchan
= &gpii
->gchan
[chan
];
2219 /* set up ch cntxt register map */
2220 gchan
->ch_cntxt_base_reg
= gpi_dev
->ee_base
+
2221 GPII_n_CH_k_CNTXT_0_OFFS(i
, chan
);
2222 gchan
->ch_cntxt_db_reg
= gpi_dev
->ee_base
+
2223 GPII_n_CH_k_DOORBELL_0_OFFS(i
, chan
);
2224 gchan
->ch_cmd_reg
= gpi_dev
->ee_base
+ GPII_n_CH_CMD_OFFS(i
);
2227 vchan_init(&gchan
->vc
, &gpi_dev
->dma_device
);
2228 gchan
->vc
.desc_free
= gpi_desc_free
;
2231 gchan
->dir
= GPII_CHAN_DIR
[chan
];
2233 mutex_init(&gpii
->ctrl_lock
);
2234 rwlock_init(&gpii
->pm_lock
);
2235 tasklet_init(&gpii
->ev_task
, gpi_ev_tasklet
,
2236 (unsigned long)gpii
);
2237 init_completion(&gpii
->cmd_completion
);
2239 gpii
->regs
= gpi_dev
->ee_base
;
2240 gpii
->gpi_dev
= gpi_dev
;
2243 platform_set_drvdata(pdev
, gpi_dev
);
2245 /* clear and Set capabilities */
2246 dma_cap_zero(gpi_dev
->dma_device
.cap_mask
);
2247 dma_cap_set(DMA_SLAVE
, gpi_dev
->dma_device
.cap_mask
);
2249 /* configure dmaengine apis */
2250 gpi_dev
->dma_device
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2251 gpi_dev
->dma_device
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
2252 gpi_dev
->dma_device
.src_addr_widths
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
2253 gpi_dev
->dma_device
.dst_addr_widths
= DMA_SLAVE_BUSWIDTH_8_BYTES
;
2254 gpi_dev
->dma_device
.device_alloc_chan_resources
= gpi_alloc_chan_resources
;
2255 gpi_dev
->dma_device
.device_free_chan_resources
= gpi_free_chan_resources
;
2256 gpi_dev
->dma_device
.device_tx_status
= dma_cookie_status
;
2257 gpi_dev
->dma_device
.device_issue_pending
= gpi_issue_pending
;
2258 gpi_dev
->dma_device
.device_prep_slave_sg
= gpi_prep_slave_sg
;
2259 gpi_dev
->dma_device
.device_config
= gpi_peripheral_config
;
2260 gpi_dev
->dma_device
.device_terminate_all
= gpi_terminate_all
;
2261 gpi_dev
->dma_device
.dev
= gpi_dev
->dev
;
2262 gpi_dev
->dma_device
.device_pause
= gpi_pause
;
2263 gpi_dev
->dma_device
.device_resume
= gpi_resume
;
2265 /* register with dmaengine framework */
2266 ret
= dma_async_device_register(&gpi_dev
->dma_device
);
2268 dev_err(gpi_dev
->dev
, "async_device_register failed ret:%d", ret
);
2272 ret
= of_dma_controller_register(gpi_dev
->dev
->of_node
,
2273 gpi_of_dma_xlate
, gpi_dev
);
2275 dev_err(gpi_dev
->dev
, "of_dma_controller_reg failed ret:%d", ret
);
2282 static const struct of_device_id gpi_of_match
[] = {
2283 { .compatible
= "qcom,sdm845-gpi-dma" },
2286 MODULE_DEVICE_TABLE(of
, gpi_of_match
);
2288 static struct platform_driver gpi_driver
= {
2291 .name
= KBUILD_MODNAME
,
2292 .of_match_table
= gpi_of_match
,
2296 static int __init
gpi_init(void)
2298 return platform_driver_register(&gpi_driver
);
2300 subsys_initcall(gpi_init
)
2302 MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2303 MODULE_LICENSE("GPL v2");