treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mfd / stm32-timers.c
blobefcd4b980c94c271e585580d26da8035f5318644
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) STMicroelectronics 2016
4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
5 */
7 #include <linux/bitfield.h>
8 #include <linux/mfd/stm32-timers.h>
9 #include <linux/module.h>
10 #include <linux/of_platform.h>
11 #include <linux/reset.h>
13 #define STM32_TIMERS_MAX_REGISTERS 0x3fc
15 /* DIER register DMA enable bits */
16 static const u32 stm32_timers_dier_dmaen[STM32_TIMERS_MAX_DMAS] = {
17 TIM_DIER_CC1DE,
18 TIM_DIER_CC2DE,
19 TIM_DIER_CC3DE,
20 TIM_DIER_CC4DE,
21 TIM_DIER_UIE,
22 TIM_DIER_TDE,
23 TIM_DIER_COMDE
26 static void stm32_timers_dma_done(void *p)
28 struct stm32_timers_dma *dma = p;
29 struct dma_tx_state state;
30 enum dma_status status;
32 status = dmaengine_tx_status(dma->chan, dma->chan->cookie, &state);
33 if (status == DMA_COMPLETE)
34 complete(&dma->completion);
37 /**
38 * stm32_timers_dma_burst_read - Read from timers registers using DMA.
40 * Read from STM32 timers registers using DMA on a single event.
41 * @dev: reference to stm32_timers MFD device
42 * @buf: DMA'able destination buffer
43 * @id: stm32_timers_dmas event identifier (ch[1..4], up, trig or com)
44 * @reg: registers start offset for DMA to read from (like CCRx for capture)
45 * @num_reg: number of registers to read upon each DMA request, starting @reg.
46 * @bursts: number of bursts to read (e.g. like two for pwm period capture)
47 * @tmo_ms: timeout (milliseconds)
49 int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
50 enum stm32_timers_dmas id, u32 reg,
51 unsigned int num_reg, unsigned int bursts,
52 unsigned long tmo_ms)
54 struct stm32_timers *ddata = dev_get_drvdata(dev);
55 unsigned long timeout = msecs_to_jiffies(tmo_ms);
56 struct regmap *regmap = ddata->regmap;
57 struct stm32_timers_dma *dma = &ddata->dma;
58 size_t len = num_reg * bursts * sizeof(u32);
59 struct dma_async_tx_descriptor *desc;
60 struct dma_slave_config config;
61 dma_cookie_t cookie;
62 dma_addr_t dma_buf;
63 u32 dbl, dba;
64 long err;
65 int ret;
67 /* Sanity check */
68 if (id < STM32_TIMERS_DMA_CH1 || id >= STM32_TIMERS_MAX_DMAS)
69 return -EINVAL;
71 if (!num_reg || !bursts || reg > STM32_TIMERS_MAX_REGISTERS ||
72 (reg + num_reg * sizeof(u32)) > STM32_TIMERS_MAX_REGISTERS)
73 return -EINVAL;
75 if (!dma->chans[id])
76 return -ENODEV;
77 mutex_lock(&dma->lock);
79 /* Select DMA channel in use */
80 dma->chan = dma->chans[id];
81 dma_buf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
82 if (dma_mapping_error(dev, dma_buf)) {
83 ret = -ENOMEM;
84 goto unlock;
87 /* Prepare DMA read from timer registers, using DMA burst mode */
88 memset(&config, 0, sizeof(config));
89 config.src_addr = (dma_addr_t)dma->phys_base + TIM_DMAR;
90 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
91 ret = dmaengine_slave_config(dma->chan, &config);
92 if (ret)
93 goto unmap;
95 desc = dmaengine_prep_slave_single(dma->chan, dma_buf, len,
96 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
97 if (!desc) {
98 ret = -EBUSY;
99 goto unmap;
102 desc->callback = stm32_timers_dma_done;
103 desc->callback_param = dma;
104 cookie = dmaengine_submit(desc);
105 ret = dma_submit_error(cookie);
106 if (ret)
107 goto dma_term;
109 reinit_completion(&dma->completion);
110 dma_async_issue_pending(dma->chan);
112 /* Setup and enable timer DMA burst mode */
113 dbl = FIELD_PREP(TIM_DCR_DBL, bursts - 1);
114 dba = FIELD_PREP(TIM_DCR_DBA, reg >> 2);
115 ret = regmap_write(regmap, TIM_DCR, dbl | dba);
116 if (ret)
117 goto dma_term;
119 /* Clear pending flags before enabling DMA request */
120 ret = regmap_write(regmap, TIM_SR, 0);
121 if (ret)
122 goto dcr_clr;
124 ret = regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id],
125 stm32_timers_dier_dmaen[id]);
126 if (ret)
127 goto dcr_clr;
129 err = wait_for_completion_interruptible_timeout(&dma->completion,
130 timeout);
131 if (err == 0)
132 ret = -ETIMEDOUT;
133 else if (err < 0)
134 ret = err;
136 regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id], 0);
137 regmap_write(regmap, TIM_SR, 0);
138 dcr_clr:
139 regmap_write(regmap, TIM_DCR, 0);
140 dma_term:
141 dmaengine_terminate_all(dma->chan);
142 unmap:
143 dma_unmap_single(dev, dma_buf, len, DMA_FROM_DEVICE);
144 unlock:
145 dma->chan = NULL;
146 mutex_unlock(&dma->lock);
148 return ret;
150 EXPORT_SYMBOL_GPL(stm32_timers_dma_burst_read);
152 static const struct regmap_config stm32_timers_regmap_cfg = {
153 .reg_bits = 32,
154 .val_bits = 32,
155 .reg_stride = sizeof(u32),
156 .max_register = STM32_TIMERS_MAX_REGISTERS,
159 static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
162 * Only the available bits will be written so when readback
163 * we get the maximum value of auto reload register
165 regmap_write(ddata->regmap, TIM_ARR, ~0L);
166 regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
167 regmap_write(ddata->regmap, TIM_ARR, 0x0);
170 static void stm32_timers_dma_probe(struct device *dev,
171 struct stm32_timers *ddata)
173 int i;
174 char name[4];
176 init_completion(&ddata->dma.completion);
177 mutex_init(&ddata->dma.lock);
179 /* Optional DMA support: get valid DMA channel(s) or NULL */
180 for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
181 snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
182 ddata->dma.chans[i] = dma_request_slave_channel(dev, name);
184 ddata->dma.chans[STM32_TIMERS_DMA_UP] =
185 dma_request_slave_channel(dev, "up");
186 ddata->dma.chans[STM32_TIMERS_DMA_TRIG] =
187 dma_request_slave_channel(dev, "trig");
188 ddata->dma.chans[STM32_TIMERS_DMA_COM] =
189 dma_request_slave_channel(dev, "com");
192 static void stm32_timers_dma_remove(struct device *dev,
193 struct stm32_timers *ddata)
195 int i;
197 for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++)
198 if (ddata->dma.chans[i])
199 dma_release_channel(ddata->dma.chans[i]);
202 static int stm32_timers_probe(struct platform_device *pdev)
204 struct device *dev = &pdev->dev;
205 struct stm32_timers *ddata;
206 struct resource *res;
207 void __iomem *mmio;
208 int ret;
210 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
211 if (!ddata)
212 return -ENOMEM;
214 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215 mmio = devm_ioremap_resource(dev, res);
216 if (IS_ERR(mmio))
217 return PTR_ERR(mmio);
219 /* Timer physical addr for DMA */
220 ddata->dma.phys_base = res->start;
222 ddata->regmap = devm_regmap_init_mmio_clk(dev, "int", mmio,
223 &stm32_timers_regmap_cfg);
224 if (IS_ERR(ddata->regmap))
225 return PTR_ERR(ddata->regmap);
227 ddata->clk = devm_clk_get(dev, NULL);
228 if (IS_ERR(ddata->clk))
229 return PTR_ERR(ddata->clk);
231 stm32_timers_get_arr_size(ddata);
233 stm32_timers_dma_probe(dev, ddata);
235 platform_set_drvdata(pdev, ddata);
237 ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
238 if (ret)
239 stm32_timers_dma_remove(dev, ddata);
241 return ret;
244 static int stm32_timers_remove(struct platform_device *pdev)
246 struct stm32_timers *ddata = platform_get_drvdata(pdev);
249 * Don't use devm_ here: enfore of_platform_depopulate() happens before
250 * DMA are released, to avoid race on DMA.
252 of_platform_depopulate(&pdev->dev);
253 stm32_timers_dma_remove(&pdev->dev, ddata);
255 return 0;
258 static const struct of_device_id stm32_timers_of_match[] = {
259 { .compatible = "st,stm32-timers", },
260 { /* end node */ },
262 MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
264 static struct platform_driver stm32_timers_driver = {
265 .probe = stm32_timers_probe,
266 .remove = stm32_timers_remove,
267 .driver = {
268 .name = "stm32-timers",
269 .of_match_table = stm32_timers_of_match,
272 module_platform_driver(stm32_timers_driver);
274 MODULE_DESCRIPTION("STMicroelectronics STM32 Timers");
275 MODULE_LICENSE("GPL v2");