WIP FPC-III support
[linux/fpc-iii.git] / sound / soc / mediatek / common / mtk-afe-fe-dai.c
blob3cb2adf420bbf783a936cca05620ea07dd9f33f7
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
5 * Copyright (c) 2016 MediaTek Inc.
6 * Author: Garlic Tseng <garlic.tseng@mediatek.com>
7 */
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/regmap.h>
13 #include <sound/soc.h>
14 #include "mtk-afe-platform-driver.h"
15 #include <sound/pcm_params.h>
16 #include "mtk-afe-fe-dai.h"
17 #include "mtk-base-afe.h"
19 #define AFE_BASE_END_OFFSET 8
21 static int mtk_regmap_update_bits(struct regmap *map, int reg,
22 unsigned int mask,
23 unsigned int val, int shift)
25 if (reg < 0 || WARN_ON_ONCE(shift < 0))
26 return 0;
27 return regmap_update_bits(map, reg, mask << shift, val << shift);
30 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
32 if (reg < 0)
33 return 0;
34 return regmap_write(map, reg, val);
37 int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
38 struct snd_soc_dai *dai)
40 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
41 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
42 struct snd_pcm_runtime *runtime = substream->runtime;
43 int memif_num = asoc_rtd_to_cpu(rtd, 0)->id;
44 struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
45 const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
46 int ret;
48 memif->substream = substream;
50 snd_pcm_hw_constraint_step(substream->runtime, 0,
51 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
52 /* enable agent */
53 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
54 1, 0, memif->data->agent_disable_shift);
56 snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
59 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
60 * smaller than period_size due to AFE's internal buffer.
61 * This easily leads to overrun when avail_min is period_size.
62 * One more period can hold the possible unread buffer.
64 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
65 int periods_max = mtk_afe_hardware->periods_max;
67 ret = snd_pcm_hw_constraint_minmax(runtime,
68 SNDRV_PCM_HW_PARAM_PERIODS,
69 3, periods_max);
70 if (ret < 0) {
71 dev_err(afe->dev, "hw_constraint_minmax failed\n");
72 return ret;
76 ret = snd_pcm_hw_constraint_integer(runtime,
77 SNDRV_PCM_HW_PARAM_PERIODS);
78 if (ret < 0)
79 dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
81 /* dynamic allocate irq to memif */
82 if (memif->irq_usage < 0) {
83 int irq_id = mtk_dynamic_irq_acquire(afe);
85 if (irq_id != afe->irqs_size) {
86 /* link */
87 memif->irq_usage = irq_id;
88 } else {
89 dev_err(afe->dev, "%s() error: no more asys irq\n",
90 __func__);
91 ret = -EBUSY;
94 return ret;
96 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
98 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
99 struct snd_soc_dai *dai)
101 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
102 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
103 struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
104 int irq_id;
106 irq_id = memif->irq_usage;
108 mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
109 1, 1, memif->data->agent_disable_shift);
111 if (!memif->const_irq) {
112 mtk_dynamic_irq_release(afe, irq_id);
113 memif->irq_usage = -1;
114 memif->substream = NULL;
117 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
119 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
120 struct snd_pcm_hw_params *params,
121 struct snd_soc_dai *dai)
123 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
124 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
125 int id = asoc_rtd_to_cpu(rtd, 0)->id;
126 struct mtk_base_afe_memif *memif = &afe->memif[id];
127 int ret;
128 unsigned int channels = params_channels(params);
129 unsigned int rate = params_rate(params);
130 snd_pcm_format_t format = params_format(params);
132 if (afe->request_dram_resource)
133 afe->request_dram_resource(afe->dev);
135 dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
136 __func__, memif->data->name,
137 channels, rate, format,
138 &substream->runtime->dma_addr,
139 substream->runtime->dma_area,
140 substream->runtime->dma_bytes);
142 memset_io(substream->runtime->dma_area, 0,
143 substream->runtime->dma_bytes);
145 /* set addr */
146 ret = mtk_memif_set_addr(afe, id,
147 substream->runtime->dma_area,
148 substream->runtime->dma_addr,
149 substream->runtime->dma_bytes);
150 if (ret) {
151 dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
152 __func__, id, ret);
153 return ret;
156 /* set channel */
157 ret = mtk_memif_set_channel(afe, id, channels);
158 if (ret) {
159 dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
160 __func__, id, channels, ret);
161 return ret;
164 /* set rate */
165 ret = mtk_memif_set_rate_substream(substream, id, rate);
166 if (ret) {
167 dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
168 __func__, id, rate, ret);
169 return ret;
172 /* set format */
173 ret = mtk_memif_set_format(afe, id, format);
174 if (ret) {
175 dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
176 __func__, id, format, ret);
177 return ret;
180 return 0;
182 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
184 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
185 struct snd_soc_dai *dai)
187 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
189 if (afe->release_dram_resource)
190 afe->release_dram_resource(afe->dev);
192 return 0;
194 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
196 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
197 struct snd_soc_dai *dai)
199 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
200 struct snd_pcm_runtime * const runtime = substream->runtime;
201 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
202 int id = asoc_rtd_to_cpu(rtd, 0)->id;
203 struct mtk_base_afe_memif *memif = &afe->memif[id];
204 struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
205 const struct mtk_base_irq_data *irq_data = irqs->irq_data;
206 unsigned int counter = runtime->period_size;
207 int fs;
208 int ret;
210 dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
212 switch (cmd) {
213 case SNDRV_PCM_TRIGGER_START:
214 case SNDRV_PCM_TRIGGER_RESUME:
215 ret = mtk_memif_set_enable(afe, id);
216 if (ret) {
217 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
218 __func__, id, ret);
219 return ret;
222 /* set irq counter */
223 mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
224 irq_data->irq_cnt_maskbit, counter,
225 irq_data->irq_cnt_shift);
227 /* set irq fs */
228 fs = afe->irq_fs(substream, runtime->rate);
230 if (fs < 0)
231 return -EINVAL;
233 mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
234 irq_data->irq_fs_maskbit, fs,
235 irq_data->irq_fs_shift);
237 /* enable interrupt */
238 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
239 1, 1, irq_data->irq_en_shift);
241 return 0;
242 case SNDRV_PCM_TRIGGER_STOP:
243 case SNDRV_PCM_TRIGGER_SUSPEND:
244 ret = mtk_memif_set_disable(afe, id);
245 if (ret) {
246 dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
247 __func__, id, ret);
250 /* disable interrupt */
251 mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
252 1, 0, irq_data->irq_en_shift);
253 /* and clear pending IRQ */
254 mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
255 1 << irq_data->irq_clr_shift);
256 return ret;
257 default:
258 return -EINVAL;
261 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
263 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
264 struct snd_soc_dai *dai)
266 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
267 struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
268 int id = asoc_rtd_to_cpu(rtd, 0)->id;
269 int pbuf_size;
271 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
272 if (afe->get_memif_pbuf_size) {
273 pbuf_size = afe->get_memif_pbuf_size(substream);
274 mtk_memif_set_pbuf_size(afe, id, pbuf_size);
277 return 0;
279 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
281 const struct snd_soc_dai_ops mtk_afe_fe_ops = {
282 .startup = mtk_afe_fe_startup,
283 .shutdown = mtk_afe_fe_shutdown,
284 .hw_params = mtk_afe_fe_hw_params,
285 .hw_free = mtk_afe_fe_hw_free,
286 .prepare = mtk_afe_fe_prepare,
287 .trigger = mtk_afe_fe_trigger,
289 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
291 static DEFINE_MUTEX(irqs_lock);
292 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
294 int i;
296 mutex_lock(&afe->irq_alloc_lock);
297 for (i = 0; i < afe->irqs_size; ++i) {
298 if (afe->irqs[i].irq_occupyed == 0) {
299 afe->irqs[i].irq_occupyed = 1;
300 mutex_unlock(&afe->irq_alloc_lock);
301 return i;
304 mutex_unlock(&afe->irq_alloc_lock);
305 return afe->irqs_size;
307 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
309 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
311 mutex_lock(&afe->irq_alloc_lock);
312 if (irq_id >= 0 && irq_id < afe->irqs_size) {
313 afe->irqs[irq_id].irq_occupyed = 0;
314 mutex_unlock(&afe->irq_alloc_lock);
315 return 0;
317 mutex_unlock(&afe->irq_alloc_lock);
318 return -EINVAL;
320 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
322 int mtk_afe_suspend(struct snd_soc_component *component)
324 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
325 struct device *dev = afe->dev;
326 struct regmap *regmap = afe->regmap;
327 int i;
329 if (pm_runtime_status_suspended(dev) || afe->suspended)
330 return 0;
332 if (!afe->reg_back_up)
333 afe->reg_back_up =
334 devm_kcalloc(dev, afe->reg_back_up_list_num,
335 sizeof(unsigned int), GFP_KERNEL);
337 for (i = 0; i < afe->reg_back_up_list_num; i++)
338 regmap_read(regmap, afe->reg_back_up_list[i],
339 &afe->reg_back_up[i]);
341 afe->suspended = true;
342 afe->runtime_suspend(dev);
343 return 0;
345 EXPORT_SYMBOL_GPL(mtk_afe_suspend);
347 int mtk_afe_resume(struct snd_soc_component *component)
349 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
350 struct device *dev = afe->dev;
351 struct regmap *regmap = afe->regmap;
352 int i = 0;
354 if (pm_runtime_status_suspended(dev) || !afe->suspended)
355 return 0;
357 afe->runtime_resume(dev);
359 if (!afe->reg_back_up)
360 dev_dbg(dev, "%s no reg_backup\n", __func__);
362 for (i = 0; i < afe->reg_back_up_list_num; i++)
363 mtk_regmap_write(regmap, afe->reg_back_up_list[i],
364 afe->reg_back_up[i]);
366 afe->suspended = false;
367 return 0;
369 EXPORT_SYMBOL_GPL(mtk_afe_resume);
371 int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
373 struct mtk_base_afe_memif *memif = &afe->memif[id];
375 if (memif->data->enable_shift < 0) {
376 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
377 __func__, id);
378 return 0;
380 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
381 1, 1, memif->data->enable_shift);
383 EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
385 int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
387 struct mtk_base_afe_memif *memif = &afe->memif[id];
389 if (memif->data->enable_shift < 0) {
390 dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
391 __func__, id);
392 return 0;
394 return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
395 1, 0, memif->data->enable_shift);
397 EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
399 int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
400 unsigned char *dma_area,
401 dma_addr_t dma_addr,
402 size_t dma_bytes)
404 struct mtk_base_afe_memif *memif = &afe->memif[id];
405 int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
406 unsigned int phys_buf_addr = lower_32_bits(dma_addr);
407 unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
409 memif->dma_area = dma_area;
410 memif->dma_addr = dma_addr;
411 memif->dma_bytes = dma_bytes;
413 /* start */
414 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
415 phys_buf_addr);
416 /* end */
417 if (memif->data->reg_ofs_end)
418 mtk_regmap_write(afe->regmap,
419 memif->data->reg_ofs_end,
420 phys_buf_addr + dma_bytes - 1);
421 else
422 mtk_regmap_write(afe->regmap,
423 memif->data->reg_ofs_base +
424 AFE_BASE_END_OFFSET,
425 phys_buf_addr + dma_bytes - 1);
427 /* set start, end, upper 32 bits */
428 if (memif->data->reg_ofs_base_msb) {
429 mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
430 phys_buf_addr_upper_32);
431 mtk_regmap_write(afe->regmap,
432 memif->data->reg_ofs_end_msb,
433 phys_buf_addr_upper_32);
436 /* set MSB to 33-bit */
437 if (memif->data->msb_reg >= 0)
438 mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
439 1, msb_at_bit33, memif->data->msb_shift);
441 return 0;
443 EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
445 int mtk_memif_set_channel(struct mtk_base_afe *afe,
446 int id, unsigned int channel)
448 struct mtk_base_afe_memif *memif = &afe->memif[id];
449 unsigned int mono;
451 if (memif->data->mono_shift < 0)
452 return 0;
454 if (memif->data->quad_ch_mask) {
455 unsigned int quad_ch = (channel == 4) ? 1 : 0;
457 mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
458 memif->data->quad_ch_mask,
459 quad_ch, memif->data->quad_ch_shift);
462 if (memif->data->mono_invert)
463 mono = (channel == 1) ? 0 : 1;
464 else
465 mono = (channel == 1) ? 1 : 0;
467 return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
468 1, mono, memif->data->mono_shift);
470 EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
472 static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
473 int id, int fs)
475 struct mtk_base_afe_memif *memif = &afe->memif[id];
477 if (memif->data->fs_shift >= 0)
478 mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
479 memif->data->fs_maskbit,
480 fs, memif->data->fs_shift);
482 return 0;
485 int mtk_memif_set_rate(struct mtk_base_afe *afe,
486 int id, unsigned int rate)
488 int fs = 0;
490 if (!afe->get_dai_fs) {
491 dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
492 __func__);
493 return -EINVAL;
496 fs = afe->get_dai_fs(afe, id, rate);
498 if (fs < 0)
499 return -EINVAL;
501 return mtk_memif_set_rate_fs(afe, id, fs);
503 EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
505 int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
506 int id, unsigned int rate)
508 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
509 struct snd_soc_component *component =
510 snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
511 struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
513 int fs = 0;
515 if (!afe->memif_fs) {
516 dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
517 __func__);
518 return -EINVAL;
521 fs = afe->memif_fs(substream, rate);
523 if (fs < 0)
524 return -EINVAL;
526 return mtk_memif_set_rate_fs(afe, id, fs);
528 EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
530 int mtk_memif_set_format(struct mtk_base_afe *afe,
531 int id, snd_pcm_format_t format)
533 struct mtk_base_afe_memif *memif = &afe->memif[id];
534 int hd_audio = 0;
535 int hd_align = 0;
537 /* set hd mode */
538 switch (format) {
539 case SNDRV_PCM_FORMAT_S16_LE:
540 case SNDRV_PCM_FORMAT_U16_LE:
541 hd_audio = 0;
542 break;
543 case SNDRV_PCM_FORMAT_S32_LE:
544 case SNDRV_PCM_FORMAT_U32_LE:
545 if (afe->memif_32bit_supported) {
546 hd_audio = 2;
547 hd_align = 0;
548 } else {
549 hd_audio = 1;
550 hd_align = 1;
552 break;
553 case SNDRV_PCM_FORMAT_S24_LE:
554 case SNDRV_PCM_FORMAT_U24_LE:
555 hd_audio = 1;
556 break;
557 default:
558 dev_err(afe->dev, "%s() error: unsupported format %d\n",
559 __func__, format);
560 break;
563 mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
564 0x3, hd_audio, memif->data->hd_shift);
566 mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
567 0x1, hd_align, memif->data->hd_align_mshift);
569 return 0;
571 EXPORT_SYMBOL_GPL(mtk_memif_set_format);
573 int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
574 int id, int pbuf_size)
576 const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
578 if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
579 return 0;
581 mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
582 memif_data->pbuf_mask,
583 pbuf_size, memif_data->pbuf_shift);
585 mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
586 memif_data->minlen_mask,
587 pbuf_size, memif_data->minlen_shift);
588 return 0;
590 EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
592 MODULE_DESCRIPTION("Mediatek simple fe dai operator");
593 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
594 MODULE_LICENSE("GPL v2");