Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / sound / soc / fsl / imx-pcm-fiq.c
blobe63cd5ecfd8feacd396f835756af90107fd2e2a6
1 /*
2 * imx-pcm-fiq.c -- ALSA Soc Audio Layer
4 * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
6 * This code is based on code copyrighted by Freescale,
7 * Liam Girdwood, Javier Martin and probably others.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include <sound/dmaengine_pcm.h>
26 #include <sound/initval.h>
27 #include <sound/pcm.h>
28 #include <sound/pcm_params.h>
29 #include <sound/soc.h>
31 #include <asm/fiq.h>
33 #include <linux/platform_data/asoc-imx-ssi.h>
35 #include "imx-ssi.h"
36 #include "imx-pcm.h"
38 struct imx_pcm_runtime_data {
39 unsigned int period;
40 int periods;
41 unsigned long offset;
42 struct hrtimer hrt;
43 int poll_time_ns;
44 struct snd_pcm_substream *substream;
45 atomic_t playing;
46 atomic_t capturing;
49 static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
51 struct imx_pcm_runtime_data *iprtd =
52 container_of(hrt, struct imx_pcm_runtime_data, hrt);
53 struct snd_pcm_substream *substream = iprtd->substream;
54 struct pt_regs regs;
56 if (!atomic_read(&iprtd->playing) && !atomic_read(&iprtd->capturing))
57 return HRTIMER_NORESTART;
59 get_fiq_regs(&regs);
61 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
62 iprtd->offset = regs.ARM_r8 & 0xffff;
63 else
64 iprtd->offset = regs.ARM_r9 & 0xffff;
66 snd_pcm_period_elapsed(substream);
68 hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
70 return HRTIMER_RESTART;
73 static struct fiq_handler fh = {
74 .name = DRV_NAME,
77 static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
78 struct snd_pcm_hw_params *params)
80 struct snd_pcm_runtime *runtime = substream->runtime;
81 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
83 iprtd->periods = params_periods(params);
84 iprtd->period = params_period_bytes(params);
85 iprtd->offset = 0;
86 iprtd->poll_time_ns = 1000000000 / params_rate(params) *
87 params_period_size(params);
88 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
90 return 0;
93 static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
95 struct snd_pcm_runtime *runtime = substream->runtime;
96 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
97 struct pt_regs regs;
99 get_fiq_regs(&regs);
100 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
101 regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16;
102 else
103 regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16;
105 set_fiq_regs(&regs);
107 return 0;
110 static int imx_pcm_fiq;
112 static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
114 struct snd_pcm_runtime *runtime = substream->runtime;
115 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
117 switch (cmd) {
118 case SNDRV_PCM_TRIGGER_START:
119 case SNDRV_PCM_TRIGGER_RESUME:
120 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
121 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
122 atomic_set(&iprtd->playing, 1);
123 else
124 atomic_set(&iprtd->capturing, 1);
125 hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
126 HRTIMER_MODE_REL);
127 enable_fiq(imx_pcm_fiq);
128 break;
130 case SNDRV_PCM_TRIGGER_STOP:
131 case SNDRV_PCM_TRIGGER_SUSPEND:
132 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
133 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
134 atomic_set(&iprtd->playing, 0);
135 else
136 atomic_set(&iprtd->capturing, 0);
137 if (!atomic_read(&iprtd->playing) &&
138 !atomic_read(&iprtd->capturing))
139 disable_fiq(imx_pcm_fiq);
140 break;
142 default:
143 return -EINVAL;
146 return 0;
149 static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream)
151 struct snd_pcm_runtime *runtime = substream->runtime;
152 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
154 return bytes_to_frames(substream->runtime, iprtd->offset);
157 static struct snd_pcm_hardware snd_imx_hardware = {
158 .info = SNDRV_PCM_INFO_INTERLEAVED |
159 SNDRV_PCM_INFO_BLOCK_TRANSFER |
160 SNDRV_PCM_INFO_MMAP |
161 SNDRV_PCM_INFO_MMAP_VALID |
162 SNDRV_PCM_INFO_PAUSE |
163 SNDRV_PCM_INFO_RESUME,
164 .formats = SNDRV_PCM_FMTBIT_S16_LE,
165 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
166 .period_bytes_min = 128,
167 .period_bytes_max = 16 * 1024,
168 .periods_min = 4,
169 .periods_max = 255,
170 .fifo_size = 0,
173 static int snd_imx_open(struct snd_pcm_substream *substream)
175 struct snd_pcm_runtime *runtime = substream->runtime;
176 struct imx_pcm_runtime_data *iprtd;
177 int ret;
179 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
180 if (iprtd == NULL)
181 return -ENOMEM;
182 runtime->private_data = iprtd;
184 iprtd->substream = substream;
186 atomic_set(&iprtd->playing, 0);
187 atomic_set(&iprtd->capturing, 0);
188 hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189 iprtd->hrt.function = snd_hrtimer_callback;
191 ret = snd_pcm_hw_constraint_integer(substream->runtime,
192 SNDRV_PCM_HW_PARAM_PERIODS);
193 if (ret < 0) {
194 kfree(iprtd);
195 return ret;
198 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
199 return 0;
202 static int snd_imx_close(struct snd_pcm_substream *substream)
204 struct snd_pcm_runtime *runtime = substream->runtime;
205 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
207 hrtimer_cancel(&iprtd->hrt);
209 kfree(iprtd);
211 return 0;
214 static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
215 struct vm_area_struct *vma)
217 struct snd_pcm_runtime *runtime = substream->runtime;
218 int ret;
220 ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
221 runtime->dma_addr, runtime->dma_bytes);
223 pr_debug("%s: ret: %d %p %pad 0x%08x\n", __func__, ret,
224 runtime->dma_area,
225 &runtime->dma_addr,
226 runtime->dma_bytes);
227 return ret;
230 static struct snd_pcm_ops imx_pcm_ops = {
231 .open = snd_imx_open,
232 .close = snd_imx_close,
233 .ioctl = snd_pcm_lib_ioctl,
234 .hw_params = snd_imx_pcm_hw_params,
235 .prepare = snd_imx_pcm_prepare,
236 .trigger = snd_imx_pcm_trigger,
237 .pointer = snd_imx_pcm_pointer,
238 .mmap = snd_imx_pcm_mmap,
241 static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
243 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
244 struct snd_dma_buffer *buf = &substream->dma_buffer;
245 size_t size = IMX_SSI_DMABUF_SIZE;
247 buf->dev.type = SNDRV_DMA_TYPE_DEV;
248 buf->dev.dev = pcm->card->dev;
249 buf->private_data = NULL;
250 buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
251 if (!buf->area)
252 return -ENOMEM;
253 buf->bytes = size;
255 return 0;
258 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
260 struct snd_card *card = rtd->card->snd_card;
261 struct snd_pcm *pcm = rtd->pcm;
262 int ret;
264 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
265 if (ret)
266 return ret;
268 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
269 ret = imx_pcm_preallocate_dma_buffer(pcm,
270 SNDRV_PCM_STREAM_PLAYBACK);
271 if (ret)
272 return ret;
275 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
276 ret = imx_pcm_preallocate_dma_buffer(pcm,
277 SNDRV_PCM_STREAM_CAPTURE);
278 if (ret)
279 return ret;
282 return 0;
285 static int ssi_irq = 0;
287 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
289 struct snd_pcm *pcm = rtd->pcm;
290 struct snd_pcm_substream *substream;
291 int ret;
293 ret = imx_pcm_new(rtd);
294 if (ret)
295 return ret;
297 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
298 if (substream) {
299 struct snd_dma_buffer *buf = &substream->dma_buffer;
301 imx_ssi_fiq_tx_buffer = (unsigned long)buf->area;
304 substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
305 if (substream) {
306 struct snd_dma_buffer *buf = &substream->dma_buffer;
308 imx_ssi_fiq_rx_buffer = (unsigned long)buf->area;
311 set_fiq_handler(&imx_ssi_fiq_start,
312 &imx_ssi_fiq_end - &imx_ssi_fiq_start);
314 return 0;
317 static void imx_pcm_free(struct snd_pcm *pcm)
319 struct snd_pcm_substream *substream;
320 struct snd_dma_buffer *buf;
321 int stream;
323 for (stream = 0; stream < 2; stream++) {
324 substream = pcm->streams[stream].substream;
325 if (!substream)
326 continue;
328 buf = &substream->dma_buffer;
329 if (!buf->area)
330 continue;
332 dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
333 buf->area = NULL;
337 static void imx_pcm_fiq_free(struct snd_pcm *pcm)
339 mxc_set_irq_fiq(ssi_irq, 0);
340 release_fiq(&fh);
341 imx_pcm_free(pcm);
344 static struct snd_soc_platform_driver imx_soc_platform_fiq = {
345 .ops = &imx_pcm_ops,
346 .pcm_new = imx_pcm_fiq_new,
347 .pcm_free = imx_pcm_fiq_free,
350 int imx_pcm_fiq_init(struct platform_device *pdev,
351 struct imx_pcm_fiq_params *params)
353 int ret;
355 ret = claim_fiq(&fh);
356 if (ret) {
357 dev_err(&pdev->dev, "failed to claim fiq: %d", ret);
358 return ret;
361 mxc_set_irq_fiq(params->irq, 1);
362 ssi_irq = params->irq;
364 imx_pcm_fiq = params->irq;
366 imx_ssi_fiq_base = (unsigned long)params->base;
368 params->dma_params_tx->maxburst = 4;
369 params->dma_params_rx->maxburst = 6;
371 ret = snd_soc_register_platform(&pdev->dev, &imx_soc_platform_fiq);
372 if (ret)
373 goto failed_register;
375 return 0;
377 failed_register:
378 mxc_set_irq_fiq(ssi_irq, 0);
379 release_fiq(&fh);
381 return ret;
383 EXPORT_SYMBOL_GPL(imx_pcm_fiq_init);
385 void imx_pcm_fiq_exit(struct platform_device *pdev)
387 snd_soc_unregister_platform(&pdev->dev);
389 EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit);
391 MODULE_LICENSE("GPL");