x86/xen: resume timer irqs early
[linux/fpc-iii.git] / sound / soc / fsl / imx-pcm-fiq.c
blob2fc872b2deff7bc9d2028f9e2ff48348b131a2e3
1 /*
2 * imx-pcm-fiq.c -- ALSA Soc Audio Layer
4 * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
6 * This code is based on code copyrighted by Freescale,
7 * Liam Girdwood, Javier Martin and probably others.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include <sound/dmaengine_pcm.h>
26 #include <sound/initval.h>
27 #include <sound/pcm.h>
28 #include <sound/pcm_params.h>
29 #include <sound/soc.h>
31 #include <asm/fiq.h>
33 #include <linux/platform_data/asoc-imx-ssi.h>
35 #include "imx-ssi.h"
36 #include "imx-pcm.h"
38 struct imx_pcm_runtime_data {
39 unsigned int period;
40 int periods;
41 unsigned long offset;
42 unsigned long last_offset;
43 unsigned long size;
44 struct hrtimer hrt;
45 int poll_time_ns;
46 struct snd_pcm_substream *substream;
47 atomic_t playing;
48 atomic_t capturing;
51 static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
53 struct imx_pcm_runtime_data *iprtd =
54 container_of(hrt, struct imx_pcm_runtime_data, hrt);
55 struct snd_pcm_substream *substream = iprtd->substream;
56 struct snd_pcm_runtime *runtime = substream->runtime;
57 struct pt_regs regs;
58 unsigned long delta;
60 if (!atomic_read(&iprtd->playing) && !atomic_read(&iprtd->capturing))
61 return HRTIMER_NORESTART;
63 get_fiq_regs(&regs);
65 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
66 iprtd->offset = regs.ARM_r8 & 0xffff;
67 else
68 iprtd->offset = regs.ARM_r9 & 0xffff;
70 /* How much data have we transferred since the last period report? */
71 if (iprtd->offset >= iprtd->last_offset)
72 delta = iprtd->offset - iprtd->last_offset;
73 else
74 delta = runtime->buffer_size + iprtd->offset
75 - iprtd->last_offset;
77 /* If we've transferred at least a period then report it and
78 * reset our poll time */
79 if (delta >= iprtd->period) {
80 snd_pcm_period_elapsed(substream);
81 iprtd->last_offset = iprtd->offset;
84 hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
86 return HRTIMER_RESTART;
89 static struct fiq_handler fh = {
90 .name = DRV_NAME,
93 static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
94 struct snd_pcm_hw_params *params)
96 struct snd_pcm_runtime *runtime = substream->runtime;
97 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
99 iprtd->size = params_buffer_bytes(params);
100 iprtd->periods = params_periods(params);
101 iprtd->period = params_period_bytes(params) ;
102 iprtd->offset = 0;
103 iprtd->last_offset = 0;
104 iprtd->poll_time_ns = 1000000000 / params_rate(params) *
105 params_period_size(params);
106 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
108 return 0;
111 static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
113 struct snd_pcm_runtime *runtime = substream->runtime;
114 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
115 struct pt_regs regs;
117 get_fiq_regs(&regs);
118 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
119 regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16;
120 else
121 regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16;
123 set_fiq_regs(&regs);
125 return 0;
128 static int imx_pcm_fiq;
130 static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
132 struct snd_pcm_runtime *runtime = substream->runtime;
133 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
135 switch (cmd) {
136 case SNDRV_PCM_TRIGGER_START:
137 case SNDRV_PCM_TRIGGER_RESUME:
138 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
139 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
140 atomic_set(&iprtd->playing, 1);
141 else
142 atomic_set(&iprtd->capturing, 1);
143 hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
144 HRTIMER_MODE_REL);
145 enable_fiq(imx_pcm_fiq);
146 break;
148 case SNDRV_PCM_TRIGGER_STOP:
149 case SNDRV_PCM_TRIGGER_SUSPEND:
150 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
151 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
152 atomic_set(&iprtd->playing, 0);
153 else
154 atomic_set(&iprtd->capturing, 0);
155 if (!atomic_read(&iprtd->playing) &&
156 !atomic_read(&iprtd->capturing))
157 disable_fiq(imx_pcm_fiq);
158 break;
160 default:
161 return -EINVAL;
164 return 0;
167 static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream)
169 struct snd_pcm_runtime *runtime = substream->runtime;
170 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
172 return bytes_to_frames(substream->runtime, iprtd->offset);
175 static struct snd_pcm_hardware snd_imx_hardware = {
176 .info = SNDRV_PCM_INFO_INTERLEAVED |
177 SNDRV_PCM_INFO_BLOCK_TRANSFER |
178 SNDRV_PCM_INFO_MMAP |
179 SNDRV_PCM_INFO_MMAP_VALID |
180 SNDRV_PCM_INFO_PAUSE |
181 SNDRV_PCM_INFO_RESUME,
182 .formats = SNDRV_PCM_FMTBIT_S16_LE,
183 .rate_min = 8000,
184 .channels_min = 2,
185 .channels_max = 2,
186 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
187 .period_bytes_min = 128,
188 .period_bytes_max = 16 * 1024,
189 .periods_min = 4,
190 .periods_max = 255,
191 .fifo_size = 0,
194 static int snd_imx_open(struct snd_pcm_substream *substream)
196 struct snd_pcm_runtime *runtime = substream->runtime;
197 struct imx_pcm_runtime_data *iprtd;
198 int ret;
200 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
201 if (iprtd == NULL)
202 return -ENOMEM;
203 runtime->private_data = iprtd;
205 iprtd->substream = substream;
207 atomic_set(&iprtd->playing, 0);
208 atomic_set(&iprtd->capturing, 0);
209 hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
210 iprtd->hrt.function = snd_hrtimer_callback;
212 ret = snd_pcm_hw_constraint_integer(substream->runtime,
213 SNDRV_PCM_HW_PARAM_PERIODS);
214 if (ret < 0) {
215 kfree(iprtd);
216 return ret;
219 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
220 return 0;
223 static int snd_imx_close(struct snd_pcm_substream *substream)
225 struct snd_pcm_runtime *runtime = substream->runtime;
226 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
228 hrtimer_cancel(&iprtd->hrt);
230 kfree(iprtd);
232 return 0;
235 static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
236 struct vm_area_struct *vma)
238 struct snd_pcm_runtime *runtime = substream->runtime;
239 int ret;
241 ret = dma_mmap_writecombine(substream->pcm->card->dev, vma,
242 runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
244 pr_debug("%s: ret: %d %p 0x%08x 0x%08x\n", __func__, ret,
245 runtime->dma_area,
246 runtime->dma_addr,
247 runtime->dma_bytes);
248 return ret;
251 static struct snd_pcm_ops imx_pcm_ops = {
252 .open = snd_imx_open,
253 .close = snd_imx_close,
254 .ioctl = snd_pcm_lib_ioctl,
255 .hw_params = snd_imx_pcm_hw_params,
256 .prepare = snd_imx_pcm_prepare,
257 .trigger = snd_imx_pcm_trigger,
258 .pointer = snd_imx_pcm_pointer,
259 .mmap = snd_imx_pcm_mmap,
262 static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
264 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
265 struct snd_dma_buffer *buf = &substream->dma_buffer;
266 size_t size = IMX_SSI_DMABUF_SIZE;
268 buf->dev.type = SNDRV_DMA_TYPE_DEV;
269 buf->dev.dev = pcm->card->dev;
270 buf->private_data = NULL;
271 buf->area = dma_alloc_writecombine(pcm->card->dev, size,
272 &buf->addr, GFP_KERNEL);
273 if (!buf->area)
274 return -ENOMEM;
275 buf->bytes = size;
277 return 0;
280 static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
282 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
284 struct snd_card *card = rtd->card->snd_card;
285 struct snd_pcm *pcm = rtd->pcm;
286 int ret = 0;
288 if (!card->dev->dma_mask)
289 card->dev->dma_mask = &imx_pcm_dmamask;
290 if (!card->dev->coherent_dma_mask)
291 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
292 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
293 ret = imx_pcm_preallocate_dma_buffer(pcm,
294 SNDRV_PCM_STREAM_PLAYBACK);
295 if (ret)
296 goto out;
299 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
300 ret = imx_pcm_preallocate_dma_buffer(pcm,
301 SNDRV_PCM_STREAM_CAPTURE);
302 if (ret)
303 goto out;
306 out:
307 return ret;
310 static int ssi_irq = 0;
312 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
314 struct snd_pcm *pcm = rtd->pcm;
315 struct snd_pcm_substream *substream;
316 int ret;
318 ret = imx_pcm_new(rtd);
319 if (ret)
320 return ret;
322 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
323 if (substream) {
324 struct snd_dma_buffer *buf = &substream->dma_buffer;
326 imx_ssi_fiq_tx_buffer = (unsigned long)buf->area;
329 substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
330 if (substream) {
331 struct snd_dma_buffer *buf = &substream->dma_buffer;
333 imx_ssi_fiq_rx_buffer = (unsigned long)buf->area;
336 set_fiq_handler(&imx_ssi_fiq_start,
337 &imx_ssi_fiq_end - &imx_ssi_fiq_start);
339 return 0;
342 static void imx_pcm_free(struct snd_pcm *pcm)
344 struct snd_pcm_substream *substream;
345 struct snd_dma_buffer *buf;
346 int stream;
348 for (stream = 0; stream < 2; stream++) {
349 substream = pcm->streams[stream].substream;
350 if (!substream)
351 continue;
353 buf = &substream->dma_buffer;
354 if (!buf->area)
355 continue;
357 dma_free_writecombine(pcm->card->dev, buf->bytes,
358 buf->area, buf->addr);
359 buf->area = NULL;
363 static void imx_pcm_fiq_free(struct snd_pcm *pcm)
365 mxc_set_irq_fiq(ssi_irq, 0);
366 release_fiq(&fh);
367 imx_pcm_free(pcm);
370 static struct snd_soc_platform_driver imx_soc_platform_fiq = {
371 .ops = &imx_pcm_ops,
372 .pcm_new = imx_pcm_fiq_new,
373 .pcm_free = imx_pcm_fiq_free,
376 int imx_pcm_fiq_init(struct platform_device *pdev,
377 struct imx_pcm_fiq_params *params)
379 int ret;
381 ret = claim_fiq(&fh);
382 if (ret) {
383 dev_err(&pdev->dev, "failed to claim fiq: %d", ret);
384 return ret;
387 mxc_set_irq_fiq(params->irq, 1);
388 ssi_irq = params->irq;
390 imx_pcm_fiq = params->irq;
392 imx_ssi_fiq_base = (unsigned long)params->base;
394 params->dma_params_tx->maxburst = 4;
395 params->dma_params_rx->maxburst = 6;
397 ret = snd_soc_register_platform(&pdev->dev, &imx_soc_platform_fiq);
398 if (ret)
399 goto failed_register;
401 return 0;
403 failed_register:
404 mxc_set_irq_fiq(ssi_irq, 0);
405 release_fiq(&fh);
407 return ret;
409 EXPORT_SYMBOL_GPL(imx_pcm_fiq_init);
411 void imx_pcm_fiq_exit(struct platform_device *pdev)
413 snd_soc_unregister_platform(&pdev->dev);
415 EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit);
417 MODULE_LICENSE("GPL");