conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / sound / soc / txx9 / txx9aclc.c
blob938a58a5a244c1d75c43ed969e3c14b64fb08bb1
1 /*
2 * Generic TXx9 ACLC platform driver
4 * Copyright (C) 2009 Atsushi Nemoto
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/platform_device.h>
17 #include <linux/scatterlist.h>
18 #include <sound/core.h>
19 #include <sound/pcm.h>
20 #include <sound/pcm_params.h>
21 #include <sound/soc.h>
22 #include "txx9aclc.h"
24 static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
26 * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
27 * needs more works for noncoherent MIPS.
29 .info = SNDRV_PCM_INFO_INTERLEAVED |
30 SNDRV_PCM_INFO_BATCH |
31 SNDRV_PCM_INFO_PAUSE,
32 #ifdef __BIG_ENDIAN
33 .formats = SNDRV_PCM_FMTBIT_S16_BE,
34 #else
35 .formats = SNDRV_PCM_FMTBIT_S16_LE,
36 #endif
37 .period_bytes_min = 1024,
38 .period_bytes_max = 8 * 1024,
39 .periods_min = 2,
40 .periods_max = 4096,
41 .buffer_bytes_max = 32 * 1024,
44 static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params)
47 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
48 struct snd_soc_device *socdev = rtd->socdev;
49 struct snd_pcm_runtime *runtime = substream->runtime;
50 struct txx9aclc_dmadata *dmadata = runtime->private_data;
51 int ret;
53 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
54 if (ret < 0)
55 return ret;
57 dev_dbg(socdev->dev,
58 "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
59 "runtime->min_align %ld\n",
60 (unsigned long)runtime->dma_area,
61 (unsigned long)runtime->dma_addr, runtime->dma_bytes,
62 runtime->min_align);
63 dev_dbg(socdev->dev,
64 "periods %d period_bytes %d stream %d\n",
65 params_periods(params), params_period_bytes(params),
66 substream->stream);
68 dmadata->substream = substream;
69 dmadata->pos = 0;
70 return 0;
73 static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
75 return snd_pcm_lib_free_pages(substream);
78 static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
80 struct snd_pcm_runtime *runtime = substream->runtime;
81 struct txx9aclc_dmadata *dmadata = runtime->private_data;
83 dmadata->dma_addr = runtime->dma_addr;
84 dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
85 dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
87 if (dmadata->buffer_bytes == dmadata->period_bytes) {
88 dmadata->frag_bytes = dmadata->period_bytes >> 1;
89 dmadata->frags = 2;
90 } else {
91 dmadata->frag_bytes = dmadata->period_bytes;
92 dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
94 dmadata->frag_count = 0;
95 dmadata->pos = 0;
96 return 0;
99 static void txx9aclc_dma_complete(void *arg)
101 struct txx9aclc_dmadata *dmadata = arg;
102 unsigned long flags;
104 /* dma completion handler cannot submit new operations */
105 spin_lock_irqsave(&dmadata->dma_lock, flags);
106 if (dmadata->frag_count >= 0) {
107 dmadata->dmacount--;
108 BUG_ON(dmadata->dmacount < 0);
109 tasklet_schedule(&dmadata->tasklet);
111 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
114 static struct dma_async_tx_descriptor *
115 txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
117 struct dma_chan *chan = dmadata->dma_chan;
118 struct dma_async_tx_descriptor *desc;
119 struct scatterlist sg;
121 sg_init_table(&sg, 1);
122 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
123 dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
124 sg_dma_address(&sg) = buf_dma_addr;
125 desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
126 dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
127 DMA_TO_DEVICE : DMA_FROM_DEVICE,
128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
129 if (!desc) {
130 dev_err(&chan->dev->device, "cannot prepare slave dma\n");
131 return NULL;
133 desc->callback = txx9aclc_dma_complete;
134 desc->callback_param = dmadata;
135 desc->tx_submit(desc);
136 return desc;
139 #define NR_DMA_CHAIN 2
141 static void txx9aclc_dma_tasklet(unsigned long data)
143 struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
144 struct dma_chan *chan = dmadata->dma_chan;
145 struct dma_async_tx_descriptor *desc;
146 struct snd_pcm_substream *substream = dmadata->substream;
147 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
148 ACCTL_AUDODMA : ACCTL_AUDIDMA;
149 int i;
150 unsigned long flags;
152 spin_lock_irqsave(&dmadata->dma_lock, flags);
153 if (dmadata->frag_count < 0) {
154 struct txx9aclc_soc_device *dev =
155 container_of(dmadata, struct txx9aclc_soc_device,
156 dmadata[substream->stream]);
157 struct txx9aclc_plat_drvdata *drvdata =
158 txx9aclc_get_plat_drvdata(dev);
159 void __iomem *base = drvdata->base;
161 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
162 chan->device->device_terminate_all(chan);
163 /* first time */
164 for (i = 0; i < NR_DMA_CHAIN; i++) {
165 desc = txx9aclc_dma_submit(dmadata,
166 dmadata->dma_addr + i * dmadata->frag_bytes);
167 if (!desc)
168 return;
170 dmadata->dmacount = NR_DMA_CHAIN;
171 chan->device->device_issue_pending(chan);
172 spin_lock_irqsave(&dmadata->dma_lock, flags);
173 __raw_writel(ctlbit, base + ACCTLEN);
174 dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
175 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
176 return;
178 BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN);
179 while (dmadata->dmacount < NR_DMA_CHAIN) {
180 dmadata->dmacount++;
181 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
182 desc = txx9aclc_dma_submit(dmadata,
183 dmadata->dma_addr +
184 dmadata->frag_count * dmadata->frag_bytes);
185 if (!desc)
186 return;
187 chan->device->device_issue_pending(chan);
189 spin_lock_irqsave(&dmadata->dma_lock, flags);
190 dmadata->frag_count++;
191 dmadata->frag_count %= dmadata->frags;
192 dmadata->pos += dmadata->frag_bytes;
193 dmadata->pos %= dmadata->buffer_bytes;
194 if ((dmadata->frag_count * dmadata->frag_bytes) %
195 dmadata->period_bytes == 0)
196 snd_pcm_period_elapsed(substream);
198 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
201 static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
203 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
204 struct snd_soc_pcm_runtime *rtd = substream->private_data;
205 struct txx9aclc_soc_device *dev =
206 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
207 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
208 void __iomem *base = drvdata->base;
209 unsigned long flags;
210 int ret = 0;
211 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
212 ACCTL_AUDODMA : ACCTL_AUDIDMA;
214 spin_lock_irqsave(&dmadata->dma_lock, flags);
215 switch (cmd) {
216 case SNDRV_PCM_TRIGGER_START:
217 dmadata->frag_count = -1;
218 tasklet_schedule(&dmadata->tasklet);
219 break;
220 case SNDRV_PCM_TRIGGER_STOP:
221 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
222 case SNDRV_PCM_TRIGGER_SUSPEND:
223 __raw_writel(ctlbit, base + ACCTLDIS);
224 break;
225 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
226 case SNDRV_PCM_TRIGGER_RESUME:
227 __raw_writel(ctlbit, base + ACCTLEN);
228 break;
229 default:
230 ret = -EINVAL;
232 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
233 return ret;
236 static snd_pcm_uframes_t
237 txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
239 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
241 return bytes_to_frames(substream->runtime, dmadata->pos);
244 static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
246 struct snd_soc_pcm_runtime *rtd = substream->private_data;
247 struct txx9aclc_soc_device *dev =
248 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
249 struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
250 int ret;
252 ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
253 if (ret)
254 return ret;
255 /* ensure that buffer size is a multiple of period size */
256 ret = snd_pcm_hw_constraint_integer(substream->runtime,
257 SNDRV_PCM_HW_PARAM_PERIODS);
258 if (ret < 0)
259 return ret;
260 substream->runtime->private_data = dmadata;
261 return 0;
264 static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
266 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
267 struct dma_chan *chan = dmadata->dma_chan;
269 dmadata->frag_count = -1;
270 chan->device->device_terminate_all(chan);
271 return 0;
274 static struct snd_pcm_ops txx9aclc_pcm_ops = {
275 .open = txx9aclc_pcm_open,
276 .close = txx9aclc_pcm_close,
277 .ioctl = snd_pcm_lib_ioctl,
278 .hw_params = txx9aclc_pcm_hw_params,
279 .hw_free = txx9aclc_pcm_hw_free,
280 .prepare = txx9aclc_pcm_prepare,
281 .trigger = txx9aclc_pcm_trigger,
282 .pointer = txx9aclc_pcm_pointer,
285 static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
287 snd_pcm_lib_preallocate_free_for_all(pcm);
290 static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
291 struct snd_pcm *pcm)
293 return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
294 card->dev, 64 * 1024, 4 * 1024 * 1024);
297 static bool filter(struct dma_chan *chan, void *param)
299 struct txx9aclc_dmadata *dmadata = param;
300 char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
302 snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
303 (int)dmadata->dma_res->start);
304 if (strcmp(dev_name(chan->device->dev), devname) == 0) {
305 chan->private = &dmadata->dma_slave;
306 return true;
308 return false;
311 static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
312 struct txx9aclc_dmadata *dmadata)
314 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
315 struct txx9dmac_slave *ds = &dmadata->dma_slave;
316 dma_cap_mask_t mask;
318 spin_lock_init(&dmadata->dma_lock);
320 ds->reg_width = sizeof(u32);
321 if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
322 ds->tx_reg = drvdata->physbase + ACAUDODAT;
323 ds->rx_reg = 0;
324 } else {
325 ds->tx_reg = 0;
326 ds->rx_reg = drvdata->physbase + ACAUDIDAT;
329 /* Try to grab a DMA channel */
330 dma_cap_zero(mask);
331 dma_cap_set(DMA_SLAVE, mask);
332 dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
333 if (!dmadata->dma_chan) {
334 dev_err(dev->soc_dev.dev,
335 "DMA channel for %s is not available\n",
336 dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
337 "playback" : "capture");
338 return -EBUSY;
340 tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
341 (unsigned long)dmadata);
342 return 0;
345 static int txx9aclc_pcm_probe(struct platform_device *pdev)
347 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
348 struct txx9aclc_soc_device *dev =
349 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
350 struct resource *r;
351 int i;
352 int ret;
354 dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
355 dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
356 for (i = 0; i < 2; i++) {
357 r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i);
358 if (!r) {
359 ret = -EBUSY;
360 goto exit;
362 dev->dmadata[i].dma_res = r;
363 ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
364 if (ret)
365 goto exit;
367 return 0;
369 exit:
370 for (i = 0; i < 2; i++) {
371 if (dev->dmadata[i].dma_chan)
372 dma_release_channel(dev->dmadata[i].dma_chan);
373 dev->dmadata[i].dma_chan = NULL;
375 return ret;
378 static int txx9aclc_pcm_remove(struct platform_device *pdev)
380 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
381 struct txx9aclc_soc_device *dev =
382 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
383 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
384 void __iomem *base = drvdata->base;
385 int i;
387 /* disable all FIFO DMAs */
388 __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
389 /* dummy R/W to clear pending DMAREQ if any */
390 __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
392 for (i = 0; i < 2; i++) {
393 struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
394 struct dma_chan *chan = dmadata->dma_chan;
395 if (chan) {
396 dmadata->frag_count = -1;
397 chan->device->device_terminate_all(chan);
398 dma_release_channel(chan);
400 dev->dmadata[i].dma_chan = NULL;
402 return 0;
405 struct snd_soc_platform txx9aclc_soc_platform = {
406 .name = "txx9aclc-audio",
407 .probe = txx9aclc_pcm_probe,
408 .remove = txx9aclc_pcm_remove,
409 .pcm_ops = &txx9aclc_pcm_ops,
410 .pcm_new = txx9aclc_pcm_new,
411 .pcm_free = txx9aclc_pcm_free_dma_buffers,
413 EXPORT_SYMBOL_GPL(txx9aclc_soc_platform);
415 static int __init txx9aclc_soc_platform_init(void)
417 return snd_soc_register_platform(&txx9aclc_soc_platform);
420 static void __exit txx9aclc_soc_platform_exit(void)
422 snd_soc_unregister_platform(&txx9aclc_soc_platform);
425 module_init(txx9aclc_soc_platform_init);
426 module_exit(txx9aclc_soc_platform_exit);
428 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
429 MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
430 MODULE_LICENSE("GPL");