qlcnic: Fix mailbox completion handling during spurious interrupt
[linux/fpc-iii.git] / sound / pci / hda / hda_controller.c
blob26ce990592a04d102dde33ac41ad3a9edfc870a5
1 /*
3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include "hda_controller.h"
34 #define CREATE_TRACE_POINTS
35 #include "hda_intel_trace.h"
37 /* DSP lock helpers */
38 #ifdef CONFIG_SND_HDA_DSP_LOADER
39 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
40 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
41 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
42 #define dsp_is_locked(dev) ((dev)->locked)
43 #else
44 #define dsp_lock_init(dev) do {} while (0)
45 #define dsp_lock(dev) do {} while (0)
46 #define dsp_unlock(dev) do {} while (0)
47 #define dsp_is_locked(dev) 0
48 #endif
51 * AZX stream operations.
54 /* start a stream */
55 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
58 * Before stream start, initialize parameter
60 azx_dev->insufficient = 1;
62 /* enable SIE */
63 azx_writel(chip, INTCTL,
64 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
65 /* set DMA start and interrupt mask */
66 azx_sd_writeb(chip, azx_dev, SD_CTL,
67 azx_sd_readb(chip, azx_dev, SD_CTL) |
68 SD_CTL_DMA_START | SD_INT_MASK);
71 /* stop DMA */
72 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
74 azx_sd_writeb(chip, azx_dev, SD_CTL,
75 azx_sd_readb(chip, azx_dev, SD_CTL) &
76 ~(SD_CTL_DMA_START | SD_INT_MASK));
77 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
80 /* stop a stream */
81 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
83 azx_stream_clear(chip, azx_dev);
84 /* disable SIE */
85 azx_writel(chip, INTCTL,
86 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
88 EXPORT_SYMBOL_GPL(azx_stream_stop);
90 /* reset stream */
91 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
93 unsigned char val;
94 int timeout;
96 azx_stream_clear(chip, azx_dev);
98 azx_sd_writeb(chip, azx_dev, SD_CTL,
99 azx_sd_readb(chip, azx_dev, SD_CTL) |
100 SD_CTL_STREAM_RESET);
101 udelay(3);
102 timeout = 300;
103 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
104 SD_CTL_STREAM_RESET) && --timeout)
106 val &= ~SD_CTL_STREAM_RESET;
107 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
108 udelay(3);
110 timeout = 300;
111 /* waiting for hardware to report that the stream is out of reset */
112 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
113 SD_CTL_STREAM_RESET) && --timeout)
116 /* reset first position - may not be synced with hw at this time */
117 *azx_dev->posbuf = 0;
121 * set up the SD for streaming
123 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
125 unsigned int val;
126 /* make sure the run bit is zero for SD */
127 azx_stream_clear(chip, azx_dev);
128 /* program the stream_tag */
129 val = azx_sd_readl(chip, azx_dev, SD_CTL);
130 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
131 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
132 if (!azx_snoop(chip))
133 val |= SD_CTL_TRAFFIC_PRIO;
134 azx_sd_writel(chip, azx_dev, SD_CTL, val);
136 /* program the length of samples in cyclic buffer */
137 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
139 /* program the stream format */
140 /* this value needs to be the same as the one programmed */
141 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
143 /* program the stream LVI (last valid index) of the BDL */
144 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
146 /* program the BDL address */
147 /* lower BDL address */
148 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
149 /* upper BDL address */
150 azx_sd_writel(chip, azx_dev, SD_BDLPU,
151 upper_32_bits(azx_dev->bdl.addr));
153 /* enable the position buffer */
154 if (chip->get_position[0] != azx_get_pos_lpib ||
155 chip->get_position[1] != azx_get_pos_lpib) {
156 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
157 azx_writel(chip, DPLBASE,
158 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
161 /* set the interrupt enable bits in the descriptor control register */
162 azx_sd_writel(chip, azx_dev, SD_CTL,
163 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
165 return 0;
168 /* assign a stream for the PCM */
169 static inline struct azx_dev *
170 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
172 int dev, i, nums;
173 struct azx_dev *res = NULL;
174 /* make a non-zero unique key for the substream */
175 int key = (substream->pcm->device << 16) | (substream->number << 2) |
176 (substream->stream + 1);
178 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
179 dev = chip->playback_index_offset;
180 nums = chip->playback_streams;
181 } else {
182 dev = chip->capture_index_offset;
183 nums = chip->capture_streams;
185 for (i = 0; i < nums; i++, dev++) {
186 struct azx_dev *azx_dev = &chip->azx_dev[dev];
187 dsp_lock(azx_dev);
188 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
189 if (azx_dev->assigned_key == key) {
190 azx_dev->opened = 1;
191 azx_dev->assigned_key = key;
192 dsp_unlock(azx_dev);
193 return azx_dev;
195 if (!res ||
196 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
197 res = azx_dev;
199 dsp_unlock(azx_dev);
201 if (res) {
202 dsp_lock(res);
203 res->opened = 1;
204 res->assigned_key = key;
205 dsp_unlock(res);
207 return res;
210 /* release the assigned stream */
211 static inline void azx_release_device(struct azx_dev *azx_dev)
213 azx_dev->opened = 0;
216 static cycle_t azx_cc_read(const struct cyclecounter *cc)
218 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
219 struct snd_pcm_substream *substream = azx_dev->substream;
220 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
221 struct azx *chip = apcm->chip;
223 return azx_readl(chip, WALLCLK);
226 static void azx_timecounter_init(struct snd_pcm_substream *substream,
227 bool force, cycle_t last)
229 struct azx_dev *azx_dev = get_azx_dev(substream);
230 struct timecounter *tc = &azx_dev->azx_tc;
231 struct cyclecounter *cc = &azx_dev->azx_cc;
232 u64 nsec;
234 cc->read = azx_cc_read;
235 cc->mask = CLOCKSOURCE_MASK(32);
238 * Converting from 24 MHz to ns means applying a 125/3 factor.
239 * To avoid any saturation issues in intermediate operations,
240 * the 125 factor is applied first. The division is applied
241 * last after reading the timecounter value.
242 * Applying the 1/3 factor as part of the multiplication
243 * requires at least 20 bits for a decent precision, however
244 * overflows occur after about 4 hours or less, not a option.
247 cc->mult = 125; /* saturation after 195 years */
248 cc->shift = 0;
250 nsec = 0; /* audio time is elapsed time since trigger */
251 timecounter_init(tc, cc, nsec);
252 if (force)
254 * force timecounter to use predefined value,
255 * used for synchronized starts
257 tc->cycle_last = last;
260 static inline struct hda_pcm_stream *
261 to_hda_pcm_stream(struct snd_pcm_substream *substream)
263 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
264 return &apcm->info->stream[substream->stream];
267 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
268 u64 nsec)
270 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
271 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
272 u64 codec_frames, codec_nsecs;
274 if (!hinfo->ops.get_delay)
275 return nsec;
277 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
278 codec_nsecs = div_u64(codec_frames * 1000000000LL,
279 substream->runtime->rate);
281 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
282 return nsec + codec_nsecs;
284 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
288 * set up a BDL entry
290 static int setup_bdle(struct azx *chip,
291 struct snd_dma_buffer *dmab,
292 struct azx_dev *azx_dev, u32 **bdlp,
293 int ofs, int size, int with_ioc)
295 u32 *bdl = *bdlp;
297 while (size > 0) {
298 dma_addr_t addr;
299 int chunk;
301 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
302 return -EINVAL;
304 addr = snd_sgbuf_get_addr(dmab, ofs);
305 /* program the address field of the BDL entry */
306 bdl[0] = cpu_to_le32((u32)addr);
307 bdl[1] = cpu_to_le32(upper_32_bits(addr));
308 /* program the size field of the BDL entry */
309 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
310 /* one BDLE cannot cross 4K boundary on CTHDA chips */
311 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
312 u32 remain = 0x1000 - (ofs & 0xfff);
313 if (chunk > remain)
314 chunk = remain;
316 bdl[2] = cpu_to_le32(chunk);
317 /* program the IOC to enable interrupt
318 * only when the whole fragment is processed
320 size -= chunk;
321 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
322 bdl += 4;
323 azx_dev->frags++;
324 ofs += chunk;
326 *bdlp = bdl;
327 return ofs;
331 * set up BDL entries
333 static int azx_setup_periods(struct azx *chip,
334 struct snd_pcm_substream *substream,
335 struct azx_dev *azx_dev)
337 u32 *bdl;
338 int i, ofs, periods, period_bytes;
339 int pos_adj = 0;
341 /* reset BDL address */
342 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
343 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
345 period_bytes = azx_dev->period_bytes;
346 periods = azx_dev->bufsize / period_bytes;
348 /* program the initial BDL entries */
349 bdl = (u32 *)azx_dev->bdl.area;
350 ofs = 0;
351 azx_dev->frags = 0;
353 if (chip->bdl_pos_adj)
354 pos_adj = chip->bdl_pos_adj[chip->dev_index];
355 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
356 struct snd_pcm_runtime *runtime = substream->runtime;
357 int pos_align = pos_adj;
358 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
359 if (!pos_adj)
360 pos_adj = pos_align;
361 else
362 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
363 pos_align;
364 pos_adj = frames_to_bytes(runtime, pos_adj);
365 if (pos_adj >= period_bytes) {
366 dev_warn(chip->card->dev,"Too big adjustment %d\n",
367 pos_adj);
368 pos_adj = 0;
369 } else {
370 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
371 azx_dev,
372 &bdl, ofs, pos_adj, true);
373 if (ofs < 0)
374 goto error;
376 } else
377 pos_adj = 0;
379 for (i = 0; i < periods; i++) {
380 if (i == periods - 1 && pos_adj)
381 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
382 azx_dev, &bdl, ofs,
383 period_bytes - pos_adj, 0);
384 else
385 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
386 azx_dev, &bdl, ofs,
387 period_bytes,
388 !azx_dev->no_period_wakeup);
389 if (ofs < 0)
390 goto error;
392 return 0;
394 error:
395 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
396 azx_dev->bufsize, period_bytes);
397 return -EINVAL;
401 * PCM ops
404 static int azx_pcm_close(struct snd_pcm_substream *substream)
406 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
407 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
408 struct azx *chip = apcm->chip;
409 struct azx_dev *azx_dev = get_azx_dev(substream);
410 unsigned long flags;
412 mutex_lock(&chip->open_mutex);
413 spin_lock_irqsave(&chip->reg_lock, flags);
414 azx_dev->substream = NULL;
415 azx_dev->running = 0;
416 spin_unlock_irqrestore(&chip->reg_lock, flags);
417 azx_release_device(azx_dev);
418 if (hinfo->ops.close)
419 hinfo->ops.close(hinfo, apcm->codec, substream);
420 snd_hda_power_down(apcm->codec);
421 mutex_unlock(&chip->open_mutex);
422 snd_hda_codec_pcm_put(apcm->info);
423 return 0;
426 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
427 struct snd_pcm_hw_params *hw_params)
429 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
430 struct azx *chip = apcm->chip;
431 int ret;
433 dsp_lock(get_azx_dev(substream));
434 if (dsp_is_locked(get_azx_dev(substream))) {
435 ret = -EBUSY;
436 goto unlock;
439 ret = chip->ops->substream_alloc_pages(chip, substream,
440 params_buffer_bytes(hw_params));
441 unlock:
442 dsp_unlock(get_azx_dev(substream));
443 return ret;
446 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
448 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
449 struct azx_dev *azx_dev = get_azx_dev(substream);
450 struct azx *chip = apcm->chip;
451 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
452 int err;
454 /* reset BDL address */
455 dsp_lock(azx_dev);
456 if (!dsp_is_locked(azx_dev)) {
457 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
458 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
459 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
460 azx_dev->bufsize = 0;
461 azx_dev->period_bytes = 0;
462 azx_dev->format_val = 0;
465 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
467 err = chip->ops->substream_free_pages(chip, substream);
468 azx_dev->prepared = 0;
469 dsp_unlock(azx_dev);
470 return err;
473 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
475 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
476 struct azx *chip = apcm->chip;
477 struct azx_dev *azx_dev = get_azx_dev(substream);
478 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
479 struct snd_pcm_runtime *runtime = substream->runtime;
480 unsigned int bufsize, period_bytes, format_val, stream_tag;
481 int err;
482 struct hda_spdif_out *spdif =
483 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
484 unsigned short ctls = spdif ? spdif->ctls : 0;
486 dsp_lock(azx_dev);
487 if (dsp_is_locked(azx_dev)) {
488 err = -EBUSY;
489 goto unlock;
492 azx_stream_reset(chip, azx_dev);
493 format_val = snd_hda_calc_stream_format(apcm->codec,
494 runtime->rate,
495 runtime->channels,
496 runtime->format,
497 hinfo->maxbps,
498 ctls);
499 if (!format_val) {
500 dev_err(chip->card->dev,
501 "invalid format_val, rate=%d, ch=%d, format=%d\n",
502 runtime->rate, runtime->channels, runtime->format);
503 err = -EINVAL;
504 goto unlock;
507 bufsize = snd_pcm_lib_buffer_bytes(substream);
508 period_bytes = snd_pcm_lib_period_bytes(substream);
510 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
511 bufsize, format_val);
513 if (bufsize != azx_dev->bufsize ||
514 period_bytes != azx_dev->period_bytes ||
515 format_val != azx_dev->format_val ||
516 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
517 azx_dev->bufsize = bufsize;
518 azx_dev->period_bytes = period_bytes;
519 azx_dev->format_val = format_val;
520 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
521 err = azx_setup_periods(chip, substream, azx_dev);
522 if (err < 0)
523 goto unlock;
526 /* when LPIB delay correction gives a small negative value,
527 * we ignore it; currently set the threshold statically to
528 * 64 frames
530 if (runtime->period_size > 64)
531 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
532 else
533 azx_dev->delay_negative_threshold = 0;
535 /* wallclk has 24Mhz clock source */
536 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
537 runtime->rate) * 1000);
538 azx_setup_controller(chip, azx_dev);
539 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
540 azx_dev->fifo_size =
541 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
542 else
543 azx_dev->fifo_size = 0;
545 stream_tag = azx_dev->stream_tag;
546 /* CA-IBG chips need the playback stream starting from 1 */
547 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
548 stream_tag > chip->capture_streams)
549 stream_tag -= chip->capture_streams;
550 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
551 azx_dev->format_val, substream);
553 unlock:
554 if (!err)
555 azx_dev->prepared = 1;
556 dsp_unlock(azx_dev);
557 return err;
560 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
562 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
563 struct azx *chip = apcm->chip;
564 struct azx_dev *azx_dev;
565 struct snd_pcm_substream *s;
566 int rstart = 0, start, nsync = 0, sbits = 0;
567 int nwait, timeout;
569 azx_dev = get_azx_dev(substream);
570 trace_azx_pcm_trigger(chip, azx_dev, cmd);
572 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
573 return -EPIPE;
575 switch (cmd) {
576 case SNDRV_PCM_TRIGGER_START:
577 rstart = 1;
578 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
579 case SNDRV_PCM_TRIGGER_RESUME:
580 start = 1;
581 break;
582 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
583 case SNDRV_PCM_TRIGGER_SUSPEND:
584 case SNDRV_PCM_TRIGGER_STOP:
585 start = 0;
586 break;
587 default:
588 return -EINVAL;
591 snd_pcm_group_for_each_entry(s, substream) {
592 if (s->pcm->card != substream->pcm->card)
593 continue;
594 azx_dev = get_azx_dev(s);
595 sbits |= 1 << azx_dev->index;
596 nsync++;
597 snd_pcm_trigger_done(s, substream);
600 spin_lock(&chip->reg_lock);
602 /* first, set SYNC bits of corresponding streams */
603 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
604 azx_writel(chip, OLD_SSYNC,
605 azx_readl(chip, OLD_SSYNC) | sbits);
606 else
607 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
609 snd_pcm_group_for_each_entry(s, substream) {
610 if (s->pcm->card != substream->pcm->card)
611 continue;
612 azx_dev = get_azx_dev(s);
613 if (start) {
614 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
615 if (!rstart)
616 azx_dev->start_wallclk -=
617 azx_dev->period_wallclk;
618 azx_stream_start(chip, azx_dev);
619 } else {
620 azx_stream_stop(chip, azx_dev);
622 azx_dev->running = start;
624 spin_unlock(&chip->reg_lock);
625 if (start) {
626 /* wait until all FIFOs get ready */
627 for (timeout = 5000; timeout; timeout--) {
628 nwait = 0;
629 snd_pcm_group_for_each_entry(s, substream) {
630 if (s->pcm->card != substream->pcm->card)
631 continue;
632 azx_dev = get_azx_dev(s);
633 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
634 SD_STS_FIFO_READY))
635 nwait++;
637 if (!nwait)
638 break;
639 cpu_relax();
641 } else {
642 /* wait until all RUN bits are cleared */
643 for (timeout = 5000; timeout; timeout--) {
644 nwait = 0;
645 snd_pcm_group_for_each_entry(s, substream) {
646 if (s->pcm->card != substream->pcm->card)
647 continue;
648 azx_dev = get_azx_dev(s);
649 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
650 SD_CTL_DMA_START)
651 nwait++;
653 if (!nwait)
654 break;
655 cpu_relax();
658 spin_lock(&chip->reg_lock);
659 /* reset SYNC bits */
660 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
661 azx_writel(chip, OLD_SSYNC,
662 azx_readl(chip, OLD_SSYNC) & ~sbits);
663 else
664 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
665 if (start) {
666 azx_timecounter_init(substream, 0, 0);
667 snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
668 substream->runtime->trigger_tstamp_latched = true;
670 if (nsync > 1) {
671 cycle_t cycle_last;
673 /* same start cycle for master and group */
674 azx_dev = get_azx_dev(substream);
675 cycle_last = azx_dev->azx_tc.cycle_last;
677 snd_pcm_group_for_each_entry(s, substream) {
678 if (s->pcm->card != substream->pcm->card)
679 continue;
680 azx_timecounter_init(s, 1, cycle_last);
684 spin_unlock(&chip->reg_lock);
685 return 0;
688 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
690 return azx_sd_readl(chip, azx_dev, SD_LPIB);
692 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
694 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
696 return le32_to_cpu(*azx_dev->posbuf);
698 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
700 unsigned int azx_get_position(struct azx *chip,
701 struct azx_dev *azx_dev)
703 struct snd_pcm_substream *substream = azx_dev->substream;
704 unsigned int pos;
705 int stream = substream->stream;
706 int delay = 0;
708 if (chip->get_position[stream])
709 pos = chip->get_position[stream](chip, azx_dev);
710 else /* use the position buffer as default */
711 pos = azx_get_pos_posbuf(chip, azx_dev);
713 if (pos >= azx_dev->bufsize)
714 pos = 0;
716 if (substream->runtime) {
717 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
718 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
720 if (chip->get_delay[stream])
721 delay += chip->get_delay[stream](chip, azx_dev, pos);
722 if (hinfo->ops.get_delay)
723 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
724 substream);
725 substream->runtime->delay = delay;
728 trace_azx_get_position(chip, azx_dev, pos, delay);
729 return pos;
731 EXPORT_SYMBOL_GPL(azx_get_position);
733 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
735 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
736 struct azx *chip = apcm->chip;
737 struct azx_dev *azx_dev = get_azx_dev(substream);
738 return bytes_to_frames(substream->runtime,
739 azx_get_position(chip, azx_dev));
742 static int azx_get_time_info(struct snd_pcm_substream *substream,
743 struct timespec *system_ts, struct timespec *audio_ts,
744 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
745 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
747 struct azx_dev *azx_dev = get_azx_dev(substream);
748 u64 nsec;
750 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
751 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
753 snd_pcm_gettime(substream->runtime, system_ts);
755 nsec = timecounter_read(&azx_dev->azx_tc);
756 nsec = div_u64(nsec, 3); /* can be optimized */
757 if (audio_tstamp_config->report_delay)
758 nsec = azx_adjust_codec_delay(substream, nsec);
760 *audio_ts = ns_to_timespec(nsec);
762 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
763 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
764 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
766 } else
767 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
769 return 0;
772 static struct snd_pcm_hardware azx_pcm_hw = {
773 .info = (SNDRV_PCM_INFO_MMAP |
774 SNDRV_PCM_INFO_INTERLEAVED |
775 SNDRV_PCM_INFO_BLOCK_TRANSFER |
776 SNDRV_PCM_INFO_MMAP_VALID |
777 /* No full-resume yet implemented */
778 /* SNDRV_PCM_INFO_RESUME |*/
779 SNDRV_PCM_INFO_PAUSE |
780 SNDRV_PCM_INFO_SYNC_START |
781 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
782 SNDRV_PCM_INFO_HAS_LINK_ATIME |
783 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
784 .formats = SNDRV_PCM_FMTBIT_S16_LE,
785 .rates = SNDRV_PCM_RATE_48000,
786 .rate_min = 48000,
787 .rate_max = 48000,
788 .channels_min = 2,
789 .channels_max = 2,
790 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
791 .period_bytes_min = 128,
792 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
793 .periods_min = 2,
794 .periods_max = AZX_MAX_FRAG,
795 .fifo_size = 0,
798 static int azx_pcm_open(struct snd_pcm_substream *substream)
800 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
801 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
802 struct azx *chip = apcm->chip;
803 struct azx_dev *azx_dev;
804 struct snd_pcm_runtime *runtime = substream->runtime;
805 unsigned long flags;
806 int err;
807 int buff_step;
809 snd_hda_codec_pcm_get(apcm->info);
810 mutex_lock(&chip->open_mutex);
811 azx_dev = azx_assign_device(chip, substream);
812 if (azx_dev == NULL) {
813 err = -EBUSY;
814 goto unlock;
816 runtime->hw = azx_pcm_hw;
817 runtime->hw.channels_min = hinfo->channels_min;
818 runtime->hw.channels_max = hinfo->channels_max;
819 runtime->hw.formats = hinfo->formats;
820 runtime->hw.rates = hinfo->rates;
821 snd_pcm_limit_hw_rates(runtime);
822 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
824 /* avoid wrap-around with wall-clock */
825 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
827 178000000);
829 if (chip->align_buffer_size)
830 /* constrain buffer sizes to be multiple of 128
831 bytes. This is more efficient in terms of memory
832 access but isn't required by the HDA spec and
833 prevents users from specifying exact period/buffer
834 sizes. For example for 44.1kHz, a period size set
835 to 20ms will be rounded to 19.59ms. */
836 buff_step = 128;
837 else
838 /* Don't enforce steps on buffer sizes, still need to
839 be multiple of 4 bytes (HDA spec). Tested on Intel
840 HDA controllers, may not work on all devices where
841 option needs to be disabled */
842 buff_step = 4;
844 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
845 buff_step);
846 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
847 buff_step);
848 snd_hda_power_up(apcm->codec);
849 if (hinfo->ops.open)
850 err = hinfo->ops.open(hinfo, apcm->codec, substream);
851 else
852 err = -ENODEV;
853 if (err < 0) {
854 azx_release_device(azx_dev);
855 goto powerdown;
857 snd_pcm_limit_hw_rates(runtime);
858 /* sanity check */
859 if (snd_BUG_ON(!runtime->hw.channels_min) ||
860 snd_BUG_ON(!runtime->hw.channels_max) ||
861 snd_BUG_ON(!runtime->hw.formats) ||
862 snd_BUG_ON(!runtime->hw.rates)) {
863 azx_release_device(azx_dev);
864 if (hinfo->ops.close)
865 hinfo->ops.close(hinfo, apcm->codec, substream);
866 err = -EINVAL;
867 goto powerdown;
870 /* disable LINK_ATIME timestamps for capture streams
871 until we figure out how to handle digital inputs */
872 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
873 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
874 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
877 spin_lock_irqsave(&chip->reg_lock, flags);
878 azx_dev->substream = substream;
879 azx_dev->running = 0;
880 spin_unlock_irqrestore(&chip->reg_lock, flags);
882 runtime->private_data = azx_dev;
883 snd_pcm_set_sync(substream);
884 mutex_unlock(&chip->open_mutex);
885 return 0;
887 powerdown:
888 snd_hda_power_down(apcm->codec);
889 unlock:
890 mutex_unlock(&chip->open_mutex);
891 snd_hda_codec_pcm_put(apcm->info);
892 return err;
895 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
896 struct vm_area_struct *area)
898 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
899 struct azx *chip = apcm->chip;
900 if (chip->ops->pcm_mmap_prepare)
901 chip->ops->pcm_mmap_prepare(substream, area);
902 return snd_pcm_lib_default_mmap(substream, area);
905 static struct snd_pcm_ops azx_pcm_ops = {
906 .open = azx_pcm_open,
907 .close = azx_pcm_close,
908 .ioctl = snd_pcm_lib_ioctl,
909 .hw_params = azx_pcm_hw_params,
910 .hw_free = azx_pcm_hw_free,
911 .prepare = azx_pcm_prepare,
912 .trigger = azx_pcm_trigger,
913 .pointer = azx_pcm_pointer,
914 .get_time_info = azx_get_time_info,
915 .mmap = azx_pcm_mmap,
916 .page = snd_pcm_sgbuf_ops_page,
919 static void azx_pcm_free(struct snd_pcm *pcm)
921 struct azx_pcm *apcm = pcm->private_data;
922 if (apcm) {
923 list_del(&apcm->list);
924 apcm->info->pcm = NULL;
925 kfree(apcm);
929 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
931 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
932 struct hda_pcm *cpcm)
934 struct azx *chip = bus->private_data;
935 struct snd_pcm *pcm;
936 struct azx_pcm *apcm;
937 int pcm_dev = cpcm->device;
938 unsigned int size;
939 int s, err;
941 list_for_each_entry(apcm, &chip->pcm_list, list) {
942 if (apcm->pcm->device == pcm_dev) {
943 dev_err(chip->card->dev, "PCM %d already exists\n",
944 pcm_dev);
945 return -EBUSY;
948 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
949 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
950 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
951 &pcm);
952 if (err < 0)
953 return err;
954 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
955 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
956 if (apcm == NULL)
957 return -ENOMEM;
958 apcm->chip = chip;
959 apcm->pcm = pcm;
960 apcm->codec = codec;
961 apcm->info = cpcm;
962 pcm->private_data = apcm;
963 pcm->private_free = azx_pcm_free;
964 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
965 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
966 list_add_tail(&apcm->list, &chip->pcm_list);
967 cpcm->pcm = pcm;
968 for (s = 0; s < 2; s++) {
969 if (cpcm->stream[s].substreams)
970 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
972 /* buffer pre-allocation */
973 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
974 if (size > MAX_PREALLOC_SIZE)
975 size = MAX_PREALLOC_SIZE;
976 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
977 chip->card->dev,
978 size, MAX_PREALLOC_SIZE);
979 return 0;
983 * CORB / RIRB interface
985 static int azx_alloc_cmd_io(struct azx *chip)
987 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
988 return chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
989 PAGE_SIZE, &chip->rb);
992 static void azx_init_cmd_io(struct azx *chip)
994 int timeout;
996 spin_lock_irq(&chip->reg_lock);
997 /* CORB set up */
998 chip->corb.addr = chip->rb.addr;
999 chip->corb.buf = (u32 *)chip->rb.area;
1000 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1001 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1003 /* set the corb size to 256 entries (ULI requires explicitly) */
1004 azx_writeb(chip, CORBSIZE, 0x02);
1005 /* set the corb write pointer to 0 */
1006 azx_writew(chip, CORBWP, 0);
1008 /* reset the corb hw read pointer */
1009 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
1010 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1011 for (timeout = 1000; timeout > 0; timeout--) {
1012 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
1013 break;
1014 udelay(1);
1016 if (timeout <= 0)
1017 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1018 azx_readw(chip, CORBRP));
1020 azx_writew(chip, CORBRP, 0);
1021 for (timeout = 1000; timeout > 0; timeout--) {
1022 if (azx_readw(chip, CORBRP) == 0)
1023 break;
1024 udelay(1);
1026 if (timeout <= 0)
1027 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1028 azx_readw(chip, CORBRP));
1031 /* enable corb dma */
1032 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1034 /* RIRB set up */
1035 chip->rirb.addr = chip->rb.addr + 2048;
1036 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1037 chip->rirb.wp = chip->rirb.rp = 0;
1038 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1039 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1040 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1042 /* set the rirb size to 256 entries (ULI requires explicitly) */
1043 azx_writeb(chip, RIRBSIZE, 0x02);
1044 /* reset the rirb hw write pointer */
1045 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1046 /* set N=1, get RIRB response interrupt for new entry */
1047 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1048 azx_writew(chip, RINTCNT, 0xc0);
1049 else
1050 azx_writew(chip, RINTCNT, 1);
1051 /* enable rirb dma and response irq */
1052 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1053 spin_unlock_irq(&chip->reg_lock);
1056 static void azx_free_cmd_io(struct azx *chip)
1058 spin_lock_irq(&chip->reg_lock);
1059 /* disable ringbuffer DMAs */
1060 azx_writeb(chip, RIRBCTL, 0);
1061 azx_writeb(chip, CORBCTL, 0);
1062 spin_unlock_irq(&chip->reg_lock);
1065 static unsigned int azx_command_addr(u32 cmd)
1067 unsigned int addr = cmd >> 28;
1069 if (addr >= AZX_MAX_CODECS) {
1070 snd_BUG();
1071 addr = 0;
1074 return addr;
1077 /* send a command */
1078 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1080 struct azx *chip = bus->private_data;
1081 unsigned int addr = azx_command_addr(val);
1082 unsigned int wp, rp;
1084 spin_lock_irq(&chip->reg_lock);
1086 /* add command to corb */
1087 wp = azx_readw(chip, CORBWP);
1088 if (wp == 0xffff) {
1089 /* something wrong, controller likely turned to D3 */
1090 spin_unlock_irq(&chip->reg_lock);
1091 return -EIO;
1093 wp++;
1094 wp %= AZX_MAX_CORB_ENTRIES;
1096 rp = azx_readw(chip, CORBRP);
1097 if (wp == rp) {
1098 /* oops, it's full */
1099 spin_unlock_irq(&chip->reg_lock);
1100 return -EAGAIN;
1103 chip->rirb.cmds[addr]++;
1104 chip->corb.buf[wp] = cpu_to_le32(val);
1105 azx_writew(chip, CORBWP, wp);
1107 spin_unlock_irq(&chip->reg_lock);
1109 return 0;
1112 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1114 /* retrieve RIRB entry - called from interrupt handler */
1115 static void azx_update_rirb(struct azx *chip)
1117 unsigned int rp, wp;
1118 unsigned int addr;
1119 u32 res, res_ex;
1121 wp = azx_readw(chip, RIRBWP);
1122 if (wp == 0xffff) {
1123 /* something wrong, controller likely turned to D3 */
1124 return;
1127 if (wp == chip->rirb.wp)
1128 return;
1129 chip->rirb.wp = wp;
1131 while (chip->rirb.rp != wp) {
1132 chip->rirb.rp++;
1133 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1135 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1136 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1137 res = le32_to_cpu(chip->rirb.buf[rp]);
1138 addr = res_ex & 0xf;
1139 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1140 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1141 res, res_ex,
1142 chip->rirb.rp, wp);
1143 snd_BUG();
1144 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1145 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1146 else if (chip->rirb.cmds[addr]) {
1147 chip->rirb.res[addr] = res;
1148 smp_wmb();
1149 chip->rirb.cmds[addr]--;
1150 } else if (printk_ratelimit()) {
1151 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1152 res, res_ex,
1153 chip->last_cmd[addr]);
1158 /* receive a response */
1159 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1160 unsigned int addr)
1162 struct azx *chip = bus->private_data;
1163 unsigned long timeout;
1164 unsigned long loopcounter;
1165 int do_poll = 0;
1167 again:
1168 timeout = jiffies + msecs_to_jiffies(1000);
1170 for (loopcounter = 0;; loopcounter++) {
1171 if (chip->polling_mode || do_poll) {
1172 spin_lock_irq(&chip->reg_lock);
1173 azx_update_rirb(chip);
1174 spin_unlock_irq(&chip->reg_lock);
1176 if (!chip->rirb.cmds[addr]) {
1177 smp_rmb();
1178 bus->rirb_error = 0;
1180 if (!do_poll)
1181 chip->poll_count = 0;
1182 return chip->rirb.res[addr]; /* the last value */
1184 if (time_after(jiffies, timeout))
1185 break;
1186 if (bus->needs_damn_long_delay || loopcounter > 3000)
1187 msleep(2); /* temporary workaround */
1188 else {
1189 udelay(10);
1190 cond_resched();
1194 if (bus->no_response_fallback)
1195 return -1;
1197 if (!chip->polling_mode && chip->poll_count < 2) {
1198 dev_dbg(chip->card->dev,
1199 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1200 chip->last_cmd[addr]);
1201 do_poll = 1;
1202 chip->poll_count++;
1203 goto again;
1207 if (!chip->polling_mode) {
1208 dev_warn(chip->card->dev,
1209 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1210 chip->last_cmd[addr]);
1211 chip->polling_mode = 1;
1212 goto again;
1215 if (chip->msi) {
1216 dev_warn(chip->card->dev,
1217 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1218 chip->last_cmd[addr]);
1219 if (chip->ops->disable_msi_reset_irq(chip) &&
1220 chip->ops->disable_msi_reset_irq(chip) < 0) {
1221 bus->rirb_error = 1;
1222 return -1;
1224 goto again;
1227 if (chip->probing) {
1228 /* If this critical timeout happens during the codec probing
1229 * phase, this is likely an access to a non-existing codec
1230 * slot. Better to return an error and reset the system.
1232 return -1;
1235 /* a fatal communication error; need either to reset or to fallback
1236 * to the single_cmd mode
1238 bus->rirb_error = 1;
1239 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1240 bus->response_reset = 1;
1241 return -1; /* give a chance to retry */
1244 dev_err(chip->card->dev,
1245 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1246 chip->last_cmd[addr]);
1247 chip->single_cmd = 1;
1248 bus->response_reset = 0;
1249 /* release CORB/RIRB */
1250 azx_free_cmd_io(chip);
1251 /* disable unsolicited responses */
1252 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1253 return -1;
1257 * Use the single immediate command instead of CORB/RIRB for simplicity
1259 * Note: according to Intel, this is not preferred use. The command was
1260 * intended for the BIOS only, and may get confused with unsolicited
1261 * responses. So, we shouldn't use it for normal operation from the
1262 * driver.
1263 * I left the codes, however, for debugging/testing purposes.
1266 /* receive a response */
1267 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1269 int timeout = 50;
1271 while (timeout--) {
1272 /* check IRV busy bit */
1273 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1274 /* reuse rirb.res as the response return value */
1275 chip->rirb.res[addr] = azx_readl(chip, IR);
1276 return 0;
1278 udelay(1);
1280 if (printk_ratelimit())
1281 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1282 azx_readw(chip, IRS));
1283 chip->rirb.res[addr] = -1;
1284 return -EIO;
1287 /* send a command */
1288 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1290 struct azx *chip = bus->private_data;
1291 unsigned int addr = azx_command_addr(val);
1292 int timeout = 50;
1294 bus->rirb_error = 0;
1295 while (timeout--) {
1296 /* check ICB busy bit */
1297 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1298 /* Clear IRV valid bit */
1299 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1300 AZX_IRS_VALID);
1301 azx_writel(chip, IC, val);
1302 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1303 AZX_IRS_BUSY);
1304 return azx_single_wait_for_response(chip, addr);
1306 udelay(1);
1308 if (printk_ratelimit())
1309 dev_dbg(chip->card->dev,
1310 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1311 azx_readw(chip, IRS), val);
1312 return -EIO;
1315 /* receive a response */
1316 static unsigned int azx_single_get_response(struct hda_bus *bus,
1317 unsigned int addr)
1319 struct azx *chip = bus->private_data;
1320 return chip->rirb.res[addr];
1324 * The below are the main callbacks from hda_codec.
1326 * They are just the skeleton to call sub-callbacks according to the
1327 * current setting of chip->single_cmd.
1330 /* send a command */
1331 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1333 struct azx *chip = bus->private_data;
1335 if (chip->disabled)
1336 return 0;
1337 chip->last_cmd[azx_command_addr(val)] = val;
1338 if (chip->single_cmd)
1339 return azx_single_send_cmd(bus, val);
1340 else
1341 return azx_corb_send_cmd(bus, val);
1344 /* get a response */
1345 static unsigned int azx_get_response(struct hda_bus *bus,
1346 unsigned int addr)
1348 struct azx *chip = bus->private_data;
1349 if (chip->disabled)
1350 return 0;
1351 if (chip->single_cmd)
1352 return azx_single_get_response(bus, addr);
1353 else
1354 return azx_rirb_get_response(bus, addr);
1357 #ifdef CONFIG_SND_HDA_DSP_LOADER
1359 * DSP loading code (e.g. for CA0132)
1362 /* use the first stream for loading DSP */
1363 static struct azx_dev *
1364 azx_get_dsp_loader_dev(struct azx *chip)
1366 return &chip->azx_dev[chip->playback_index_offset];
1369 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1370 unsigned int byte_size,
1371 struct snd_dma_buffer *bufp)
1373 u32 *bdl;
1374 struct azx *chip = bus->private_data;
1375 struct azx_dev *azx_dev;
1376 int err;
1378 azx_dev = azx_get_dsp_loader_dev(chip);
1380 dsp_lock(azx_dev);
1381 spin_lock_irq(&chip->reg_lock);
1382 if (azx_dev->running || azx_dev->locked) {
1383 spin_unlock_irq(&chip->reg_lock);
1384 err = -EBUSY;
1385 goto unlock;
1387 azx_dev->prepared = 0;
1388 chip->saved_azx_dev = *azx_dev;
1389 azx_dev->locked = 1;
1390 spin_unlock_irq(&chip->reg_lock);
1392 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1393 byte_size, bufp);
1394 if (err < 0)
1395 goto err_alloc;
1397 azx_dev->bufsize = byte_size;
1398 azx_dev->period_bytes = byte_size;
1399 azx_dev->format_val = format;
1401 azx_stream_reset(chip, azx_dev);
1403 /* reset BDL address */
1404 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1405 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1407 azx_dev->frags = 0;
1408 bdl = (u32 *)azx_dev->bdl.area;
1409 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1410 if (err < 0)
1411 goto error;
1413 azx_setup_controller(chip, azx_dev);
1414 dsp_unlock(azx_dev);
1415 return azx_dev->stream_tag;
1417 error:
1418 chip->ops->dma_free_pages(chip, bufp);
1419 err_alloc:
1420 spin_lock_irq(&chip->reg_lock);
1421 if (azx_dev->opened)
1422 *azx_dev = chip->saved_azx_dev;
1423 azx_dev->locked = 0;
1424 spin_unlock_irq(&chip->reg_lock);
1425 unlock:
1426 dsp_unlock(azx_dev);
1427 return err;
1430 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1432 struct azx *chip = bus->private_data;
1433 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1435 if (start)
1436 azx_stream_start(chip, azx_dev);
1437 else
1438 azx_stream_stop(chip, azx_dev);
1439 azx_dev->running = start;
1442 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1443 struct snd_dma_buffer *dmab)
1445 struct azx *chip = bus->private_data;
1446 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1448 if (!dmab->area || !azx_dev->locked)
1449 return;
1451 dsp_lock(azx_dev);
1452 /* reset BDL address */
1453 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1454 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1455 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1456 azx_dev->bufsize = 0;
1457 azx_dev->period_bytes = 0;
1458 azx_dev->format_val = 0;
1460 chip->ops->dma_free_pages(chip, dmab);
1461 dmab->area = NULL;
1463 spin_lock_irq(&chip->reg_lock);
1464 if (azx_dev->opened)
1465 *azx_dev = chip->saved_azx_dev;
1466 azx_dev->locked = 0;
1467 spin_unlock_irq(&chip->reg_lock);
1468 dsp_unlock(azx_dev);
1470 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1472 int azx_alloc_stream_pages(struct azx *chip)
1474 int i, err;
1476 for (i = 0; i < chip->num_streams; i++) {
1477 dsp_lock_init(&chip->azx_dev[i]);
1478 /* allocate memory for the BDL for each stream */
1479 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1480 BDL_SIZE,
1481 &chip->azx_dev[i].bdl);
1482 if (err < 0)
1483 return -ENOMEM;
1485 /* allocate memory for the position buffer */
1486 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1487 chip->num_streams * 8, &chip->posbuf);
1488 if (err < 0)
1489 return -ENOMEM;
1491 /* allocate CORB/RIRB */
1492 err = azx_alloc_cmd_io(chip);
1493 if (err < 0)
1494 return err;
1495 return 0;
1497 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1499 void azx_free_stream_pages(struct azx *chip)
1501 int i;
1502 if (chip->azx_dev) {
1503 for (i = 0; i < chip->num_streams; i++)
1504 if (chip->azx_dev[i].bdl.area)
1505 chip->ops->dma_free_pages(
1506 chip, &chip->azx_dev[i].bdl);
1508 if (chip->rb.area)
1509 chip->ops->dma_free_pages(chip, &chip->rb);
1510 if (chip->posbuf.area)
1511 chip->ops->dma_free_pages(chip, &chip->posbuf);
1513 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1516 * Lowlevel interface
1519 /* enter link reset */
1520 void azx_enter_link_reset(struct azx *chip)
1522 unsigned long timeout;
1524 /* reset controller */
1525 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1527 timeout = jiffies + msecs_to_jiffies(100);
1528 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1529 time_before(jiffies, timeout))
1530 usleep_range(500, 1000);
1532 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1534 /* exit link reset */
1535 static void azx_exit_link_reset(struct azx *chip)
1537 unsigned long timeout;
1539 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1541 timeout = jiffies + msecs_to_jiffies(100);
1542 while (!azx_readb(chip, GCTL) &&
1543 time_before(jiffies, timeout))
1544 usleep_range(500, 1000);
1547 /* reset codec link */
1548 static int azx_reset(struct azx *chip, bool full_reset)
1550 if (!full_reset)
1551 goto __skip;
1553 /* clear STATESTS */
1554 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1556 /* reset controller */
1557 azx_enter_link_reset(chip);
1559 /* delay for >= 100us for codec PLL to settle per spec
1560 * Rev 0.9 section 5.5.1
1562 usleep_range(500, 1000);
1564 /* Bring controller out of reset */
1565 azx_exit_link_reset(chip);
1567 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1568 usleep_range(1000, 1200);
1570 __skip:
1571 /* check to see if controller is ready */
1572 if (!azx_readb(chip, GCTL)) {
1573 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1574 return -EBUSY;
1577 /* Accept unsolicited responses */
1578 if (!chip->single_cmd)
1579 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1580 AZX_GCTL_UNSOL);
1582 /* detect codecs */
1583 if (!chip->codec_mask) {
1584 chip->codec_mask = azx_readw(chip, STATESTS);
1585 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1586 chip->codec_mask);
1589 return 0;
1592 /* enable interrupts */
1593 static void azx_int_enable(struct azx *chip)
1595 /* enable controller CIE and GIE */
1596 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1597 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1600 /* disable interrupts */
1601 static void azx_int_disable(struct azx *chip)
1603 int i;
1605 /* disable interrupts in stream descriptor */
1606 for (i = 0; i < chip->num_streams; i++) {
1607 struct azx_dev *azx_dev = &chip->azx_dev[i];
1608 azx_sd_writeb(chip, azx_dev, SD_CTL,
1609 azx_sd_readb(chip, azx_dev, SD_CTL) &
1610 ~SD_INT_MASK);
1613 /* disable SIE for all streams */
1614 azx_writeb(chip, INTCTL, 0);
1616 /* disable controller CIE and GIE */
1617 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1618 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1621 /* clear interrupts */
1622 static void azx_int_clear(struct azx *chip)
1624 int i;
1626 /* clear stream status */
1627 for (i = 0; i < chip->num_streams; i++) {
1628 struct azx_dev *azx_dev = &chip->azx_dev[i];
1629 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1632 /* clear STATESTS */
1633 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1635 /* clear rirb status */
1636 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1638 /* clear int status */
1639 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1643 * reset and start the controller registers
1645 void azx_init_chip(struct azx *chip, bool full_reset)
1647 if (chip->initialized)
1648 return;
1650 /* reset controller */
1651 azx_reset(chip, full_reset);
1653 /* initialize interrupts */
1654 azx_int_clear(chip);
1655 azx_int_enable(chip);
1657 /* initialize the codec command I/O */
1658 if (!chip->single_cmd)
1659 azx_init_cmd_io(chip);
1661 /* program the position buffer */
1662 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1663 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1665 chip->initialized = 1;
1667 EXPORT_SYMBOL_GPL(azx_init_chip);
1669 void azx_stop_chip(struct azx *chip)
1671 if (!chip->initialized)
1672 return;
1674 /* disable interrupts */
1675 azx_int_disable(chip);
1676 azx_int_clear(chip);
1678 /* disable CORB/RIRB */
1679 azx_free_cmd_io(chip);
1681 /* disable position buffer */
1682 azx_writel(chip, DPLBASE, 0);
1683 azx_writel(chip, DPUBASE, 0);
1685 chip->initialized = 0;
1687 EXPORT_SYMBOL_GPL(azx_stop_chip);
1690 * interrupt handler
1692 irqreturn_t azx_interrupt(int irq, void *dev_id)
1694 struct azx *chip = dev_id;
1695 struct azx_dev *azx_dev;
1696 u32 status;
1697 u8 sd_status;
1698 int i;
1700 #ifdef CONFIG_PM
1701 if (azx_has_pm_runtime(chip))
1702 if (!pm_runtime_active(chip->card->dev))
1703 return IRQ_NONE;
1704 #endif
1706 spin_lock(&chip->reg_lock);
1708 if (chip->disabled) {
1709 spin_unlock(&chip->reg_lock);
1710 return IRQ_NONE;
1713 status = azx_readl(chip, INTSTS);
1714 if (status == 0 || status == 0xffffffff) {
1715 spin_unlock(&chip->reg_lock);
1716 return IRQ_NONE;
1719 for (i = 0; i < chip->num_streams; i++) {
1720 azx_dev = &chip->azx_dev[i];
1721 if (status & azx_dev->sd_int_sta_mask) {
1722 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1723 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1724 if (!azx_dev->substream || !azx_dev->running ||
1725 !(sd_status & SD_INT_COMPLETE))
1726 continue;
1727 /* check whether this IRQ is really acceptable */
1728 if (!chip->ops->position_check ||
1729 chip->ops->position_check(chip, azx_dev)) {
1730 spin_unlock(&chip->reg_lock);
1731 snd_pcm_period_elapsed(azx_dev->substream);
1732 spin_lock(&chip->reg_lock);
1737 /* clear rirb int */
1738 status = azx_readb(chip, RIRBSTS);
1739 if (status & RIRB_INT_MASK) {
1740 if (status & RIRB_INT_RESPONSE) {
1741 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1742 udelay(80);
1743 azx_update_rirb(chip);
1745 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1748 spin_unlock(&chip->reg_lock);
1750 return IRQ_HANDLED;
1752 EXPORT_SYMBOL_GPL(azx_interrupt);
1755 * Codec initerface
1759 * Probe the given codec address
1761 static int probe_codec(struct azx *chip, int addr)
1763 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1764 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1765 unsigned int res;
1767 mutex_lock(&chip->bus->core.cmd_mutex);
1768 chip->probing = 1;
1769 azx_send_cmd(chip->bus, cmd);
1770 res = azx_get_response(chip->bus, addr);
1771 chip->probing = 0;
1772 mutex_unlock(&chip->bus->core.cmd_mutex);
1773 if (res == -1)
1774 return -EIO;
1775 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1776 return 0;
1779 static void azx_bus_reset(struct hda_bus *bus)
1781 struct azx *chip = bus->private_data;
1783 bus->in_reset = 1;
1784 azx_stop_chip(chip);
1785 azx_init_chip(chip, true);
1786 if (chip->initialized)
1787 snd_hda_bus_reset(chip->bus);
1788 bus->in_reset = 0;
1791 static int get_jackpoll_interval(struct azx *chip)
1793 int i;
1794 unsigned int j;
1796 if (!chip->jackpoll_ms)
1797 return 0;
1799 i = chip->jackpoll_ms[chip->dev_index];
1800 if (i == 0)
1801 return 0;
1802 if (i < 50 || i > 60000)
1803 j = 0;
1804 else
1805 j = msecs_to_jiffies(i);
1806 if (j == 0)
1807 dev_warn(chip->card->dev,
1808 "jackpoll_ms value out of range: %d\n", i);
1809 return j;
1812 static struct hda_bus_ops bus_ops = {
1813 .command = azx_send_cmd,
1814 .get_response = azx_get_response,
1815 .attach_pcm = azx_attach_pcm_stream,
1816 .bus_reset = azx_bus_reset,
1817 #ifdef CONFIG_SND_HDA_DSP_LOADER
1818 .load_dsp_prepare = azx_load_dsp_prepare,
1819 .load_dsp_trigger = azx_load_dsp_trigger,
1820 .load_dsp_cleanup = azx_load_dsp_cleanup,
1821 #endif
1824 /* HD-audio bus initialization */
1825 int azx_bus_create(struct azx *chip, const char *model)
1827 struct hda_bus *bus;
1828 int err;
1830 err = snd_hda_bus_new(chip->card, &bus);
1831 if (err < 0)
1832 return err;
1834 chip->bus = bus;
1835 bus->private_data = chip;
1836 bus->pci = chip->pci;
1837 bus->modelname = model;
1838 bus->ops = bus_ops;
1840 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1841 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1842 bus->needs_damn_long_delay = 1;
1845 /* AMD chipsets often cause the communication stalls upon certain
1846 * sequence like the pin-detection. It seems that forcing the synced
1847 * access works around the stall. Grrr...
1849 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1850 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1851 bus->core.sync_write = 1;
1852 bus->allow_bus_reset = 1;
1855 return 0;
1857 EXPORT_SYMBOL_GPL(azx_bus_create);
1859 /* Probe codecs */
1860 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1862 struct hda_bus *bus = chip->bus;
1863 int c, codecs, err;
1865 codecs = 0;
1866 if (!max_slots)
1867 max_slots = AZX_DEFAULT_CODECS;
1869 /* First try to probe all given codec slots */
1870 for (c = 0; c < max_slots; c++) {
1871 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1872 if (probe_codec(chip, c) < 0) {
1873 /* Some BIOSen give you wrong codec addresses
1874 * that don't exist
1876 dev_warn(chip->card->dev,
1877 "Codec #%d probe error; disabling it...\n", c);
1878 chip->codec_mask &= ~(1 << c);
1879 /* More badly, accessing to a non-existing
1880 * codec often screws up the controller chip,
1881 * and disturbs the further communications.
1882 * Thus if an error occurs during probing,
1883 * better to reset the controller chip to
1884 * get back to the sanity state.
1886 azx_stop_chip(chip);
1887 azx_init_chip(chip, true);
1892 /* Then create codec instances */
1893 for (c = 0; c < max_slots; c++) {
1894 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1895 struct hda_codec *codec;
1896 err = snd_hda_codec_new(bus, bus->card, c, &codec);
1897 if (err < 0)
1898 continue;
1899 codec->jackpoll_interval = get_jackpoll_interval(chip);
1900 codec->beep_mode = chip->beep_mode;
1901 codecs++;
1904 if (!codecs) {
1905 dev_err(chip->card->dev, "no codecs initialized\n");
1906 return -ENXIO;
1908 return 0;
1910 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1912 /* configure each codec instance */
1913 int azx_codec_configure(struct azx *chip)
1915 struct hda_codec *codec;
1916 list_for_each_codec(codec, chip->bus) {
1917 snd_hda_codec_configure(codec);
1919 return 0;
1921 EXPORT_SYMBOL_GPL(azx_codec_configure);
1924 static bool is_input_stream(struct azx *chip, unsigned char index)
1926 return (index >= chip->capture_index_offset &&
1927 index < chip->capture_index_offset + chip->capture_streams);
1930 /* initialize SD streams */
1931 int azx_init_stream(struct azx *chip)
1933 int i;
1934 int in_stream_tag = 0;
1935 int out_stream_tag = 0;
1937 /* initialize each stream (aka device)
1938 * assign the starting bdl address to each stream (device)
1939 * and initialize
1941 for (i = 0; i < chip->num_streams; i++) {
1942 struct azx_dev *azx_dev = &chip->azx_dev[i];
1943 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1944 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1945 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1946 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1947 azx_dev->sd_int_sta_mask = 1 << i;
1948 azx_dev->index = i;
1950 /* stream tag must be unique throughout
1951 * the stream direction group,
1952 * valid values 1...15
1953 * use separate stream tag if the flag
1954 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1956 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1957 azx_dev->stream_tag =
1958 is_input_stream(chip, i) ?
1959 ++in_stream_tag :
1960 ++out_stream_tag;
1961 else
1962 azx_dev->stream_tag = i + 1;
1965 return 0;
1967 EXPORT_SYMBOL_GPL(azx_init_stream);
1969 MODULE_LICENSE("GPL");
1970 MODULE_DESCRIPTION("Common HDA driver functions");