ACPI: EC: Rework flushing of pending work
[linux/fpc-iii.git] / sound / soc / meson / axg-fifo.c
blob5a3749938900fa7f1bbb33169f8d8c8847524d52
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
6 #include <linux/clk.h>
7 #include <linux/of_irq.h>
8 #include <linux/of_platform.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/reset.h>
12 #include <sound/pcm_params.h>
13 #include <sound/soc.h>
14 #include <sound/soc-dai.h>
16 #include "axg-fifo.h"
19 * This file implements the platform operations common to the playback and
20 * capture frontend DAI. The logic behind this two types of fifo is very
21 * similar but some difference exist.
22 * These differences are handled in the respective DAI drivers
25 static struct snd_pcm_hardware axg_fifo_hw = {
26 .info = (SNDRV_PCM_INFO_INTERLEAVED |
27 SNDRV_PCM_INFO_MMAP |
28 SNDRV_PCM_INFO_MMAP_VALID |
29 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 SNDRV_PCM_INFO_PAUSE),
32 .formats = AXG_FIFO_FORMATS,
33 .rate_min = 5512,
34 .rate_max = 192000,
35 .channels_min = 1,
36 .channels_max = AXG_FIFO_CH_MAX,
37 .period_bytes_min = AXG_FIFO_MIN_DEPTH,
38 .period_bytes_max = UINT_MAX,
39 .periods_min = 2,
40 .periods_max = UINT_MAX,
42 /* No real justification for this */
43 .buffer_bytes_max = 1 * 1024 * 1024,
46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
48 struct snd_soc_pcm_runtime *rtd = ss->private_data;
50 return rtd->cpu_dai;
53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
55 struct snd_soc_dai *dai = axg_fifo_dai(ss);
57 return snd_soc_dai_get_drvdata(dai);
60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
62 struct snd_soc_dai *dai = axg_fifo_dai(ss);
64 return dai->dev;
67 static void __dma_enable(struct axg_fifo *fifo, bool enable)
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 enable ? CTRL0_DMA_EN : 0);
73 static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
75 struct axg_fifo *fifo = axg_fifo_data(ss);
77 switch (cmd) {
78 case SNDRV_PCM_TRIGGER_START:
79 case SNDRV_PCM_TRIGGER_RESUME:
80 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
81 __dma_enable(fifo, true);
82 break;
83 case SNDRV_PCM_TRIGGER_SUSPEND:
84 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85 case SNDRV_PCM_TRIGGER_STOP:
86 __dma_enable(fifo, false);
87 break;
88 default:
89 return -EINVAL;
92 return 0;
95 static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
97 struct axg_fifo *fifo = axg_fifo_data(ss);
98 struct snd_pcm_runtime *runtime = ss->runtime;
99 unsigned int addr;
101 regmap_read(fifo->map, FIFO_STATUS2, &addr);
103 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
106 static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
107 struct snd_pcm_hw_params *params)
109 struct snd_pcm_runtime *runtime = ss->runtime;
110 struct axg_fifo *fifo = axg_fifo_data(ss);
111 dma_addr_t end_ptr;
112 unsigned int burst_num;
113 int ret;
115 ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params));
116 if (ret < 0)
117 return ret;
119 /* Setup dma memory pointers */
120 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
121 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
122 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
124 /* Setup interrupt periodicity */
125 burst_num = params_period_bytes(params) / AXG_FIFO_BURST;
126 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
128 /* Enable block count irq */
129 regmap_update_bits(fifo->map, FIFO_CTRL0,
130 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
131 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
133 return 0;
136 static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
137 struct snd_pcm_hw_params *params)
139 struct axg_fifo *fifo = axg_fifo_data(ss);
140 struct snd_pcm_runtime *runtime = ss->runtime;
141 int ret;
143 ret = axg_fifo_pcm_hw_params(ss, params);
144 if (ret)
145 return ret;
147 /* Set the initial memory address of the DMA */
148 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
150 return 0;
153 static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
155 struct axg_fifo *fifo = axg_fifo_data(ss);
157 /* Disable the block count irq */
158 regmap_update_bits(fifo->map, FIFO_CTRL0,
159 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
161 return snd_pcm_lib_free_pages(ss);
164 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
166 regmap_update_bits(fifo->map, FIFO_CTRL1,
167 CTRL1_INT_CLR(FIFO_INT_MASK),
168 CTRL1_INT_CLR(mask));
170 /* Clear must also be cleared */
171 regmap_update_bits(fifo->map, FIFO_CTRL1,
172 CTRL1_INT_CLR(FIFO_INT_MASK),
176 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
178 struct snd_pcm_substream *ss = dev_id;
179 struct axg_fifo *fifo = axg_fifo_data(ss);
180 unsigned int status;
182 regmap_read(fifo->map, FIFO_STATUS1, &status);
184 status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
185 if (status & FIFO_INT_COUNT_REPEAT)
186 snd_pcm_period_elapsed(ss);
187 else
188 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
189 status);
191 /* Ack irqs */
192 axg_fifo_ack_irq(fifo, status);
194 return IRQ_RETVAL(status);
197 static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
199 struct axg_fifo *fifo = axg_fifo_data(ss);
200 struct device *dev = axg_fifo_dev(ss);
201 int ret;
203 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
206 * Make sure the buffer and period size are multiple of the FIFO
207 * minimum depth size
209 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
210 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
211 AXG_FIFO_MIN_DEPTH);
212 if (ret)
213 return ret;
215 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
216 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
217 AXG_FIFO_MIN_DEPTH);
218 if (ret)
219 return ret;
221 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
222 dev_name(dev), ss);
223 if (ret)
224 return ret;
226 /* Enable pclk to access registers and clock the fifo ip */
227 ret = clk_prepare_enable(fifo->pclk);
228 if (ret)
229 return ret;
231 /* Setup status2 so it reports the memory pointer */
232 regmap_update_bits(fifo->map, FIFO_CTRL1,
233 CTRL1_STATUS2_SEL_MASK,
234 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
236 /* Make sure the dma is initially disabled */
237 __dma_enable(fifo, false);
239 /* Disable irqs until params are ready */
240 regmap_update_bits(fifo->map, FIFO_CTRL0,
241 CTRL0_INT_EN(FIFO_INT_MASK), 0);
243 /* Clear any pending interrupt */
244 axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
246 /* Take memory arbitror out of reset */
247 ret = reset_control_deassert(fifo->arb);
248 if (ret)
249 clk_disable_unprepare(fifo->pclk);
251 return ret;
254 static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
256 struct axg_fifo *fifo = axg_fifo_data(ss);
257 int ret;
259 /* Put the memory arbitror back in reset */
260 ret = reset_control_assert(fifo->arb);
262 /* Disable fifo ip and register access */
263 clk_disable_unprepare(fifo->pclk);
265 /* remove IRQ */
266 free_irq(fifo->irq, ss);
268 return ret;
271 const struct snd_pcm_ops axg_fifo_pcm_ops = {
272 .open = axg_fifo_pcm_open,
273 .close = axg_fifo_pcm_close,
274 .ioctl = snd_pcm_lib_ioctl,
275 .hw_params = axg_fifo_pcm_hw_params,
276 .hw_free = axg_fifo_pcm_hw_free,
277 .pointer = axg_fifo_pcm_pointer,
278 .trigger = axg_fifo_pcm_trigger,
280 EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
282 const struct snd_pcm_ops g12a_fifo_pcm_ops = {
283 .open = axg_fifo_pcm_open,
284 .close = axg_fifo_pcm_close,
285 .ioctl = snd_pcm_lib_ioctl,
286 .hw_params = g12a_fifo_pcm_hw_params,
287 .hw_free = axg_fifo_pcm_hw_free,
288 .pointer = axg_fifo_pcm_pointer,
289 .trigger = axg_fifo_pcm_trigger,
291 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_ops);
293 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
295 struct snd_card *card = rtd->card->snd_card;
296 size_t size = axg_fifo_hw.buffer_bytes_max;
298 snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
299 SNDRV_DMA_TYPE_DEV, card->dev,
300 size, size);
301 return 0;
303 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
305 static const struct regmap_config axg_fifo_regmap_cfg = {
306 .reg_bits = 32,
307 .val_bits = 32,
308 .reg_stride = 4,
309 .max_register = FIFO_CTRL2,
312 int axg_fifo_probe(struct platform_device *pdev)
314 struct device *dev = &pdev->dev;
315 const struct axg_fifo_match_data *data;
316 struct axg_fifo *fifo;
317 void __iomem *regs;
319 data = of_device_get_match_data(dev);
320 if (!data) {
321 dev_err(dev, "failed to match device\n");
322 return -ENODEV;
325 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
326 if (!fifo)
327 return -ENOMEM;
328 platform_set_drvdata(pdev, fifo);
330 regs = devm_platform_ioremap_resource(pdev, 0);
331 if (IS_ERR(regs))
332 return PTR_ERR(regs);
334 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
335 if (IS_ERR(fifo->map)) {
336 dev_err(dev, "failed to init regmap: %ld\n",
337 PTR_ERR(fifo->map));
338 return PTR_ERR(fifo->map);
341 fifo->pclk = devm_clk_get(dev, NULL);
342 if (IS_ERR(fifo->pclk)) {
343 if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
344 dev_err(dev, "failed to get pclk: %ld\n",
345 PTR_ERR(fifo->pclk));
346 return PTR_ERR(fifo->pclk);
349 fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
350 if (IS_ERR(fifo->arb)) {
351 if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
352 dev_err(dev, "failed to get arb reset: %ld\n",
353 PTR_ERR(fifo->arb));
354 return PTR_ERR(fifo->arb);
357 fifo->irq = of_irq_get(dev->of_node, 0);
358 if (fifo->irq <= 0) {
359 dev_err(dev, "failed to get irq: %d\n", fifo->irq);
360 return fifo->irq;
363 return devm_snd_soc_register_component(dev, data->component_drv,
364 data->dai_drv, 1);
366 EXPORT_SYMBOL_GPL(axg_fifo_probe);
368 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver");
369 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
370 MODULE_LICENSE("GPL v2");