3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
33 #include "hda_controller.h"
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
52 * AZX stream operations.
56 static void azx_stream_start(struct azx
*chip
, struct azx_dev
*azx_dev
)
59 * Before stream start, initialize parameter
61 azx_dev
->insufficient
= 1;
64 azx_writel(chip
, INTCTL
,
65 azx_readl(chip
, INTCTL
) | (1 << azx_dev
->index
));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
68 azx_sd_readb(chip
, azx_dev
, SD_CTL
) |
69 SD_CTL_DMA_START
| SD_INT_MASK
);
73 static void azx_stream_clear(struct azx
*chip
, struct azx_dev
*azx_dev
)
75 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
76 azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
77 ~(SD_CTL_DMA_START
| SD_INT_MASK
));
78 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
); /* to be sure */
82 void azx_stream_stop(struct azx
*chip
, struct azx_dev
*azx_dev
)
84 azx_stream_clear(chip
, azx_dev
);
86 azx_writel(chip
, INTCTL
,
87 azx_readl(chip
, INTCTL
) & ~(1 << azx_dev
->index
));
89 EXPORT_SYMBOL_GPL(azx_stream_stop
);
92 static void azx_stream_reset(struct azx
*chip
, struct azx_dev
*azx_dev
)
97 azx_stream_clear(chip
, azx_dev
);
99 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
100 azx_sd_readb(chip
, azx_dev
, SD_CTL
) |
101 SD_CTL_STREAM_RESET
);
104 while (!((val
= azx_sd_readb(chip
, azx_dev
, SD_CTL
)) &
105 SD_CTL_STREAM_RESET
) && --timeout
)
107 val
&= ~SD_CTL_STREAM_RESET
;
108 azx_sd_writeb(chip
, azx_dev
, SD_CTL
, val
);
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val
= azx_sd_readb(chip
, azx_dev
, SD_CTL
)) &
114 SD_CTL_STREAM_RESET
) && --timeout
)
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev
->posbuf
= 0;
122 * set up the SD for streaming
124 static int azx_setup_controller(struct azx
*chip
, struct azx_dev
*azx_dev
)
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip
, azx_dev
);
129 /* program the stream_tag */
130 val
= azx_sd_readl(chip
, azx_dev
, SD_CTL
);
131 val
= (val
& ~SD_CTL_STREAM_TAG_MASK
) |
132 (azx_dev
->stream_tag
<< SD_CTL_STREAM_TAG_SHIFT
);
133 if (!azx_snoop(chip
))
134 val
|= SD_CTL_TRAFFIC_PRIO
;
135 azx_sd_writel(chip
, azx_dev
, SD_CTL
, val
);
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip
, azx_dev
, SD_CBL
, azx_dev
->bufsize
);
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip
, azx_dev
, SD_FORMAT
, azx_dev
->format_val
);
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip
, azx_dev
, SD_LVI
, azx_dev
->frags
- 1);
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, (u32
)azx_dev
->bdl
.addr
);
150 /* upper BDL address */
151 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
,
152 upper_32_bits(azx_dev
->bdl
.addr
));
154 /* enable the position buffer */
155 if (chip
->position_fix
[0] != POS_FIX_LPIB
||
156 chip
->position_fix
[1] != POS_FIX_LPIB
) {
157 if (!(azx_readl(chip
, DPLBASE
) & ICH6_DPLBASE_ENABLE
))
158 azx_writel(chip
, DPLBASE
,
159 (u32
)chip
->posbuf
.addr
| ICH6_DPLBASE_ENABLE
);
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip
, azx_dev
, SD_CTL
,
164 azx_sd_readl(chip
, azx_dev
, SD_CTL
) | SD_INT_MASK
);
169 /* assign a stream for the PCM */
170 static inline struct azx_dev
*
171 azx_assign_device(struct azx
*chip
, struct snd_pcm_substream
*substream
)
174 struct azx_dev
*res
= NULL
;
175 /* make a non-zero unique key for the substream */
176 int key
= (substream
->pcm
->device
<< 16) | (substream
->number
<< 2) |
177 (substream
->stream
+ 1);
179 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
) {
180 dev
= chip
->playback_index_offset
;
181 nums
= chip
->playback_streams
;
183 dev
= chip
->capture_index_offset
;
184 nums
= chip
->capture_streams
;
186 for (i
= 0; i
< nums
; i
++, dev
++) {
187 struct azx_dev
*azx_dev
= &chip
->azx_dev
[dev
];
189 if (!azx_dev
->opened
&& !dsp_is_locked(azx_dev
)) {
190 if (azx_dev
->assigned_key
== key
) {
192 azx_dev
->assigned_key
= key
;
204 res
->assigned_key
= key
;
210 /* release the assigned stream */
211 static inline void azx_release_device(struct azx_dev
*azx_dev
)
216 static cycle_t
azx_cc_read(const struct cyclecounter
*cc
)
218 struct azx_dev
*azx_dev
= container_of(cc
, struct azx_dev
, azx_cc
);
219 struct snd_pcm_substream
*substream
= azx_dev
->substream
;
220 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
221 struct azx
*chip
= apcm
->chip
;
223 return azx_readl(chip
, WALLCLK
);
226 static void azx_timecounter_init(struct snd_pcm_substream
*substream
,
227 bool force
, cycle_t last
)
229 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
230 struct timecounter
*tc
= &azx_dev
->azx_tc
;
231 struct cyclecounter
*cc
= &azx_dev
->azx_cc
;
234 cc
->read
= azx_cc_read
;
235 cc
->mask
= CLOCKSOURCE_MASK(32);
238 * Converting from 24 MHz to ns means applying a 125/3 factor.
239 * To avoid any saturation issues in intermediate operations,
240 * the 125 factor is applied first. The division is applied
241 * last after reading the timecounter value.
242 * Applying the 1/3 factor as part of the multiplication
243 * requires at least 20 bits for a decent precision, however
244 * overflows occur after about 4 hours or less, not a option.
247 cc
->mult
= 125; /* saturation after 195 years */
250 nsec
= 0; /* audio time is elapsed time since trigger */
251 timecounter_init(tc
, cc
, nsec
);
254 * force timecounter to use predefined value,
255 * used for synchronized starts
257 tc
->cycle_last
= last
;
260 static u64
azx_adjust_codec_delay(struct snd_pcm_substream
*substream
,
263 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
264 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
265 u64 codec_frames
, codec_nsecs
;
267 if (!hinfo
->ops
.get_delay
)
270 codec_frames
= hinfo
->ops
.get_delay(hinfo
, apcm
->codec
, substream
);
271 codec_nsecs
= div_u64(codec_frames
* 1000000000LL,
272 substream
->runtime
->rate
);
274 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
)
275 return nsec
+ codec_nsecs
;
277 return (nsec
> codec_nsecs
) ? nsec
- codec_nsecs
: 0;
283 static int setup_bdle(struct azx
*chip
,
284 struct snd_dma_buffer
*dmab
,
285 struct azx_dev
*azx_dev
, u32
**bdlp
,
286 int ofs
, int size
, int with_ioc
)
294 if (azx_dev
->frags
>= AZX_MAX_BDL_ENTRIES
)
297 addr
= snd_sgbuf_get_addr(dmab
, ofs
);
298 /* program the address field of the BDL entry */
299 bdl
[0] = cpu_to_le32((u32
)addr
);
300 bdl
[1] = cpu_to_le32(upper_32_bits(addr
));
301 /* program the size field of the BDL entry */
302 chunk
= snd_sgbuf_get_chunk_size(dmab
, ofs
, size
);
303 /* one BDLE cannot cross 4K boundary on CTHDA chips */
304 if (chip
->driver_caps
& AZX_DCAPS_4K_BDLE_BOUNDARY
) {
305 u32 remain
= 0x1000 - (ofs
& 0xfff);
309 bdl
[2] = cpu_to_le32(chunk
);
310 /* program the IOC to enable interrupt
311 * only when the whole fragment is processed
314 bdl
[3] = (size
|| !with_ioc
) ? 0 : cpu_to_le32(0x01);
326 static int azx_setup_periods(struct azx
*chip
,
327 struct snd_pcm_substream
*substream
,
328 struct azx_dev
*azx_dev
)
331 int i
, ofs
, periods
, period_bytes
;
334 /* reset BDL address */
335 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
336 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
338 period_bytes
= azx_dev
->period_bytes
;
339 periods
= azx_dev
->bufsize
/ period_bytes
;
341 /* program the initial BDL entries */
342 bdl
= (u32
*)azx_dev
->bdl
.area
;
346 if (chip
->bdl_pos_adj
)
347 pos_adj
= chip
->bdl_pos_adj
[chip
->dev_index
];
348 if (!azx_dev
->no_period_wakeup
&& pos_adj
> 0) {
349 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
350 int pos_align
= pos_adj
;
351 pos_adj
= (pos_adj
* runtime
->rate
+ 47999) / 48000;
355 pos_adj
= ((pos_adj
+ pos_align
- 1) / pos_align
) *
357 pos_adj
= frames_to_bytes(runtime
, pos_adj
);
358 if (pos_adj
>= period_bytes
) {
359 dev_warn(chip
->card
->dev
,"Too big adjustment %d\n",
363 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
365 &bdl
, ofs
, pos_adj
, true);
372 for (i
= 0; i
< periods
; i
++) {
373 if (i
== periods
- 1 && pos_adj
)
374 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
376 period_bytes
- pos_adj
, 0);
378 ofs
= setup_bdle(chip
, snd_pcm_get_dma_buf(substream
),
381 !azx_dev
->no_period_wakeup
);
388 dev_err(chip
->card
->dev
, "Too many BDL entries: buffer=%d, period=%d\n",
389 azx_dev
->bufsize
, period_bytes
);
397 static int azx_pcm_close(struct snd_pcm_substream
*substream
)
399 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
400 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
401 struct azx
*chip
= apcm
->chip
;
402 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
405 mutex_lock(&chip
->open_mutex
);
406 spin_lock_irqsave(&chip
->reg_lock
, flags
);
407 azx_dev
->substream
= NULL
;
408 azx_dev
->running
= 0;
409 spin_unlock_irqrestore(&chip
->reg_lock
, flags
);
410 azx_release_device(azx_dev
);
411 hinfo
->ops
.close(hinfo
, apcm
->codec
, substream
);
412 snd_hda_power_down(apcm
->codec
);
413 mutex_unlock(&chip
->open_mutex
);
417 static int azx_pcm_hw_params(struct snd_pcm_substream
*substream
,
418 struct snd_pcm_hw_params
*hw_params
)
420 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
421 struct azx
*chip
= apcm
->chip
;
424 dsp_lock(get_azx_dev(substream
));
425 if (dsp_is_locked(get_azx_dev(substream
))) {
430 ret
= chip
->ops
->substream_alloc_pages(chip
, substream
,
431 params_buffer_bytes(hw_params
));
433 dsp_unlock(get_azx_dev(substream
));
437 static int azx_pcm_hw_free(struct snd_pcm_substream
*substream
)
439 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
440 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
441 struct azx
*chip
= apcm
->chip
;
442 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
445 /* reset BDL address */
447 if (!dsp_is_locked(azx_dev
)) {
448 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
449 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
450 azx_sd_writel(chip
, azx_dev
, SD_CTL
, 0);
451 azx_dev
->bufsize
= 0;
452 azx_dev
->period_bytes
= 0;
453 azx_dev
->format_val
= 0;
456 snd_hda_codec_cleanup(apcm
->codec
, hinfo
, substream
);
458 err
= chip
->ops
->substream_free_pages(chip
, substream
);
459 azx_dev
->prepared
= 0;
464 static int azx_pcm_prepare(struct snd_pcm_substream
*substream
)
466 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
467 struct azx
*chip
= apcm
->chip
;
468 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
469 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
470 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
471 unsigned int bufsize
, period_bytes
, format_val
, stream_tag
;
473 struct hda_spdif_out
*spdif
=
474 snd_hda_spdif_out_of_nid(apcm
->codec
, hinfo
->nid
);
475 unsigned short ctls
= spdif
? spdif
->ctls
: 0;
478 if (dsp_is_locked(azx_dev
)) {
483 azx_stream_reset(chip
, azx_dev
);
484 format_val
= snd_hda_calc_stream_format(runtime
->rate
,
490 dev_err(chip
->card
->dev
,
491 "invalid format_val, rate=%d, ch=%d, format=%d\n",
492 runtime
->rate
, runtime
->channels
, runtime
->format
);
497 bufsize
= snd_pcm_lib_buffer_bytes(substream
);
498 period_bytes
= snd_pcm_lib_period_bytes(substream
);
500 dev_dbg(chip
->card
->dev
, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
501 bufsize
, format_val
);
503 if (bufsize
!= azx_dev
->bufsize
||
504 period_bytes
!= azx_dev
->period_bytes
||
505 format_val
!= azx_dev
->format_val
||
506 runtime
->no_period_wakeup
!= azx_dev
->no_period_wakeup
) {
507 azx_dev
->bufsize
= bufsize
;
508 azx_dev
->period_bytes
= period_bytes
;
509 azx_dev
->format_val
= format_val
;
510 azx_dev
->no_period_wakeup
= runtime
->no_period_wakeup
;
511 err
= azx_setup_periods(chip
, substream
, azx_dev
);
516 /* when LPIB delay correction gives a small negative value,
517 * we ignore it; currently set the threshold statically to
520 if (runtime
->period_size
> 64)
521 azx_dev
->delay_negative_threshold
= -frames_to_bytes(runtime
, 64);
523 azx_dev
->delay_negative_threshold
= 0;
525 /* wallclk has 24Mhz clock source */
526 azx_dev
->period_wallclk
= (((runtime
->period_size
* 24000) /
527 runtime
->rate
) * 1000);
528 azx_setup_controller(chip
, azx_dev
);
529 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
531 azx_sd_readw(chip
, azx_dev
, SD_FIFOSIZE
) + 1;
533 azx_dev
->fifo_size
= 0;
535 stream_tag
= azx_dev
->stream_tag
;
536 /* CA-IBG chips need the playback stream starting from 1 */
537 if ((chip
->driver_caps
& AZX_DCAPS_CTX_WORKAROUND
) &&
538 stream_tag
> chip
->capture_streams
)
539 stream_tag
-= chip
->capture_streams
;
540 err
= snd_hda_codec_prepare(apcm
->codec
, hinfo
, stream_tag
,
541 azx_dev
->format_val
, substream
);
545 azx_dev
->prepared
= 1;
550 static int azx_pcm_trigger(struct snd_pcm_substream
*substream
, int cmd
)
552 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
553 struct azx
*chip
= apcm
->chip
;
554 struct azx_dev
*azx_dev
;
555 struct snd_pcm_substream
*s
;
556 int rstart
= 0, start
, nsync
= 0, sbits
= 0;
559 azx_dev
= get_azx_dev(substream
);
560 trace_azx_pcm_trigger(chip
, azx_dev
, cmd
);
562 if (dsp_is_locked(azx_dev
) || !azx_dev
->prepared
)
566 case SNDRV_PCM_TRIGGER_START
:
568 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE
:
569 case SNDRV_PCM_TRIGGER_RESUME
:
572 case SNDRV_PCM_TRIGGER_PAUSE_PUSH
:
573 case SNDRV_PCM_TRIGGER_SUSPEND
:
574 case SNDRV_PCM_TRIGGER_STOP
:
581 snd_pcm_group_for_each_entry(s
, substream
) {
582 if (s
->pcm
->card
!= substream
->pcm
->card
)
584 azx_dev
= get_azx_dev(s
);
585 sbits
|= 1 << azx_dev
->index
;
587 snd_pcm_trigger_done(s
, substream
);
590 spin_lock(&chip
->reg_lock
);
592 /* first, set SYNC bits of corresponding streams */
593 if (chip
->driver_caps
& AZX_DCAPS_OLD_SSYNC
)
594 azx_writel(chip
, OLD_SSYNC
,
595 azx_readl(chip
, OLD_SSYNC
) | sbits
);
597 azx_writel(chip
, SSYNC
, azx_readl(chip
, SSYNC
) | sbits
);
599 snd_pcm_group_for_each_entry(s
, substream
) {
600 if (s
->pcm
->card
!= substream
->pcm
->card
)
602 azx_dev
= get_azx_dev(s
);
604 azx_dev
->start_wallclk
= azx_readl(chip
, WALLCLK
);
606 azx_dev
->start_wallclk
-=
607 azx_dev
->period_wallclk
;
608 azx_stream_start(chip
, azx_dev
);
610 azx_stream_stop(chip
, azx_dev
);
612 azx_dev
->running
= start
;
614 spin_unlock(&chip
->reg_lock
);
616 /* wait until all FIFOs get ready */
617 for (timeout
= 5000; timeout
; timeout
--) {
619 snd_pcm_group_for_each_entry(s
, substream
) {
620 if (s
->pcm
->card
!= substream
->pcm
->card
)
622 azx_dev
= get_azx_dev(s
);
623 if (!(azx_sd_readb(chip
, azx_dev
, SD_STS
) &
632 /* wait until all RUN bits are cleared */
633 for (timeout
= 5000; timeout
; timeout
--) {
635 snd_pcm_group_for_each_entry(s
, substream
) {
636 if (s
->pcm
->card
!= substream
->pcm
->card
)
638 azx_dev
= get_azx_dev(s
);
639 if (azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
648 spin_lock(&chip
->reg_lock
);
649 /* reset SYNC bits */
650 if (chip
->driver_caps
& AZX_DCAPS_OLD_SSYNC
)
651 azx_writel(chip
, OLD_SSYNC
,
652 azx_readl(chip
, OLD_SSYNC
) & ~sbits
);
654 azx_writel(chip
, SSYNC
, azx_readl(chip
, SSYNC
) & ~sbits
);
656 azx_timecounter_init(substream
, 0, 0);
660 /* same start cycle for master and group */
661 azx_dev
= get_azx_dev(substream
);
662 cycle_last
= azx_dev
->azx_tc
.cycle_last
;
664 snd_pcm_group_for_each_entry(s
, substream
) {
665 if (s
->pcm
->card
!= substream
->pcm
->card
)
667 azx_timecounter_init(s
, 1, cycle_last
);
671 spin_unlock(&chip
->reg_lock
);
675 /* get the current DMA position with correction on VIA chips */
676 static unsigned int azx_via_get_position(struct azx
*chip
,
677 struct azx_dev
*azx_dev
)
679 unsigned int link_pos
, mini_pos
, bound_pos
;
680 unsigned int mod_link_pos
, mod_dma_pos
, mod_mini_pos
;
681 unsigned int fifo_size
;
683 link_pos
= azx_sd_readl(chip
, azx_dev
, SD_LPIB
);
684 if (azx_dev
->substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
) {
685 /* Playback, no problem using link position */
691 * use mod to get the DMA position just like old chipset
693 mod_dma_pos
= le32_to_cpu(*azx_dev
->posbuf
);
694 mod_dma_pos
%= azx_dev
->period_bytes
;
696 /* azx_dev->fifo_size can't get FIFO size of in stream.
697 * Get from base address + offset.
699 fifo_size
= readw(chip
->remap_addr
+ VIA_IN_STREAM0_FIFO_SIZE_OFFSET
);
701 if (azx_dev
->insufficient
) {
702 /* Link position never gather than FIFO size */
703 if (link_pos
<= fifo_size
)
706 azx_dev
->insufficient
= 0;
709 if (link_pos
<= fifo_size
)
710 mini_pos
= azx_dev
->bufsize
+ link_pos
- fifo_size
;
712 mini_pos
= link_pos
- fifo_size
;
714 /* Find nearest previous boudary */
715 mod_mini_pos
= mini_pos
% azx_dev
->period_bytes
;
716 mod_link_pos
= link_pos
% azx_dev
->period_bytes
;
717 if (mod_link_pos
>= fifo_size
)
718 bound_pos
= link_pos
- mod_link_pos
;
719 else if (mod_dma_pos
>= mod_mini_pos
)
720 bound_pos
= mini_pos
- mod_mini_pos
;
722 bound_pos
= mini_pos
- mod_mini_pos
+ azx_dev
->period_bytes
;
723 if (bound_pos
>= azx_dev
->bufsize
)
727 /* Calculate real DMA position we want */
728 return bound_pos
+ mod_dma_pos
;
731 unsigned int azx_get_position(struct azx
*chip
,
732 struct azx_dev
*azx_dev
,
735 struct snd_pcm_substream
*substream
= azx_dev
->substream
;
736 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
738 int stream
= substream
->stream
;
739 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[stream
];
742 switch (chip
->position_fix
[stream
]) {
745 pos
= azx_sd_readl(chip
, azx_dev
, SD_LPIB
);
747 case POS_FIX_VIACOMBO
:
748 pos
= azx_via_get_position(chip
, azx_dev
);
751 /* use the position buffer */
752 pos
= le32_to_cpu(*azx_dev
->posbuf
);
753 if (with_check
&& chip
->position_fix
[stream
] == POS_FIX_AUTO
) {
754 if (!pos
|| pos
== (u32
)-1) {
755 dev_info(chip
->card
->dev
,
756 "Invalid position buffer, using LPIB read method instead.\n");
757 chip
->position_fix
[stream
] = POS_FIX_LPIB
;
758 pos
= azx_sd_readl(chip
, azx_dev
, SD_LPIB
);
760 chip
->position_fix
[stream
] = POS_FIX_POSBUF
;
765 if (pos
>= azx_dev
->bufsize
)
768 /* calculate runtime delay from LPIB */
769 if (substream
->runtime
&&
770 chip
->position_fix
[stream
] == POS_FIX_POSBUF
&&
771 (chip
->driver_caps
& AZX_DCAPS_COUNT_LPIB_DELAY
)) {
772 unsigned int lpib_pos
= azx_sd_readl(chip
, azx_dev
, SD_LPIB
);
773 if (stream
== SNDRV_PCM_STREAM_PLAYBACK
)
774 delay
= pos
- lpib_pos
;
776 delay
= lpib_pos
- pos
;
778 if (delay
>= azx_dev
->delay_negative_threshold
)
781 delay
+= azx_dev
->bufsize
;
783 if (delay
>= azx_dev
->period_bytes
) {
784 dev_info(chip
->card
->dev
,
785 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
786 delay
, azx_dev
->period_bytes
);
788 chip
->driver_caps
&= ~AZX_DCAPS_COUNT_LPIB_DELAY
;
790 delay
= bytes_to_frames(substream
->runtime
, delay
);
793 if (substream
->runtime
) {
794 if (hinfo
->ops
.get_delay
)
795 delay
+= hinfo
->ops
.get_delay(hinfo
, apcm
->codec
,
797 substream
->runtime
->delay
= delay
;
800 trace_azx_get_position(chip
, azx_dev
, pos
, delay
);
803 EXPORT_SYMBOL_GPL(azx_get_position
);
805 static snd_pcm_uframes_t
azx_pcm_pointer(struct snd_pcm_substream
*substream
)
807 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
808 struct azx
*chip
= apcm
->chip
;
809 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
810 return bytes_to_frames(substream
->runtime
,
811 azx_get_position(chip
, azx_dev
, false));
814 static int azx_get_wallclock_tstamp(struct snd_pcm_substream
*substream
,
817 struct azx_dev
*azx_dev
= get_azx_dev(substream
);
820 nsec
= timecounter_read(&azx_dev
->azx_tc
);
821 nsec
= div_u64(nsec
, 3); /* can be optimized */
822 nsec
= azx_adjust_codec_delay(substream
, nsec
);
824 *ts
= ns_to_timespec(nsec
);
829 static struct snd_pcm_hardware azx_pcm_hw
= {
830 .info
= (SNDRV_PCM_INFO_MMAP
|
831 SNDRV_PCM_INFO_INTERLEAVED
|
832 SNDRV_PCM_INFO_BLOCK_TRANSFER
|
833 SNDRV_PCM_INFO_MMAP_VALID
|
834 /* No full-resume yet implemented */
835 /* SNDRV_PCM_INFO_RESUME |*/
836 SNDRV_PCM_INFO_PAUSE
|
837 SNDRV_PCM_INFO_SYNC_START
|
838 SNDRV_PCM_INFO_HAS_WALL_CLOCK
|
839 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
),
840 .formats
= SNDRV_PCM_FMTBIT_S16_LE
,
841 .rates
= SNDRV_PCM_RATE_48000
,
846 .buffer_bytes_max
= AZX_MAX_BUF_SIZE
,
847 .period_bytes_min
= 128,
848 .period_bytes_max
= AZX_MAX_BUF_SIZE
/ 2,
850 .periods_max
= AZX_MAX_FRAG
,
854 static int azx_pcm_open(struct snd_pcm_substream
*substream
)
856 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
857 struct hda_pcm_stream
*hinfo
= apcm
->hinfo
[substream
->stream
];
858 struct azx
*chip
= apcm
->chip
;
859 struct azx_dev
*azx_dev
;
860 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
865 mutex_lock(&chip
->open_mutex
);
866 azx_dev
= azx_assign_device(chip
, substream
);
867 if (azx_dev
== NULL
) {
868 mutex_unlock(&chip
->open_mutex
);
871 runtime
->hw
= azx_pcm_hw
;
872 runtime
->hw
.channels_min
= hinfo
->channels_min
;
873 runtime
->hw
.channels_max
= hinfo
->channels_max
;
874 runtime
->hw
.formats
= hinfo
->formats
;
875 runtime
->hw
.rates
= hinfo
->rates
;
876 snd_pcm_limit_hw_rates(runtime
);
877 snd_pcm_hw_constraint_integer(runtime
, SNDRV_PCM_HW_PARAM_PERIODS
);
879 /* avoid wrap-around with wall-clock */
880 snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_BUFFER_TIME
,
884 if (chip
->align_buffer_size
)
885 /* constrain buffer sizes to be multiple of 128
886 bytes. This is more efficient in terms of memory
887 access but isn't required by the HDA spec and
888 prevents users from specifying exact period/buffer
889 sizes. For example for 44.1kHz, a period size set
890 to 20ms will be rounded to 19.59ms. */
893 /* Don't enforce steps on buffer sizes, still need to
894 be multiple of 4 bytes (HDA spec). Tested on Intel
895 HDA controllers, may not work on all devices where
896 option needs to be disabled */
899 snd_pcm_hw_constraint_step(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
,
901 snd_pcm_hw_constraint_step(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES
,
903 snd_hda_power_up_d3wait(apcm
->codec
);
904 err
= hinfo
->ops
.open(hinfo
, apcm
->codec
, substream
);
906 azx_release_device(azx_dev
);
907 snd_hda_power_down(apcm
->codec
);
908 mutex_unlock(&chip
->open_mutex
);
911 snd_pcm_limit_hw_rates(runtime
);
913 if (snd_BUG_ON(!runtime
->hw
.channels_min
) ||
914 snd_BUG_ON(!runtime
->hw
.channels_max
) ||
915 snd_BUG_ON(!runtime
->hw
.formats
) ||
916 snd_BUG_ON(!runtime
->hw
.rates
)) {
917 azx_release_device(azx_dev
);
918 hinfo
->ops
.close(hinfo
, apcm
->codec
, substream
);
919 snd_hda_power_down(apcm
->codec
);
920 mutex_unlock(&chip
->open_mutex
);
924 /* disable WALLCLOCK timestamps for capture streams
925 until we figure out how to handle digital inputs */
926 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
)
927 runtime
->hw
.info
&= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK
;
929 spin_lock_irqsave(&chip
->reg_lock
, flags
);
930 azx_dev
->substream
= substream
;
931 azx_dev
->running
= 0;
932 spin_unlock_irqrestore(&chip
->reg_lock
, flags
);
934 runtime
->private_data
= azx_dev
;
935 snd_pcm_set_sync(substream
);
936 mutex_unlock(&chip
->open_mutex
);
940 static int azx_pcm_mmap(struct snd_pcm_substream
*substream
,
941 struct vm_area_struct
*area
)
943 struct azx_pcm
*apcm
= snd_pcm_substream_chip(substream
);
944 struct azx
*chip
= apcm
->chip
;
945 if (chip
->ops
->pcm_mmap_prepare
)
946 chip
->ops
->pcm_mmap_prepare(substream
, area
);
947 return snd_pcm_lib_default_mmap(substream
, area
);
950 static struct snd_pcm_ops azx_pcm_ops
= {
951 .open
= azx_pcm_open
,
952 .close
= azx_pcm_close
,
953 .ioctl
= snd_pcm_lib_ioctl
,
954 .hw_params
= azx_pcm_hw_params
,
955 .hw_free
= azx_pcm_hw_free
,
956 .prepare
= azx_pcm_prepare
,
957 .trigger
= azx_pcm_trigger
,
958 .pointer
= azx_pcm_pointer
,
959 .wall_clock
= azx_get_wallclock_tstamp
,
960 .mmap
= azx_pcm_mmap
,
961 .page
= snd_pcm_sgbuf_ops_page
,
964 static void azx_pcm_free(struct snd_pcm
*pcm
)
966 struct azx_pcm
*apcm
= pcm
->private_data
;
968 list_del(&apcm
->list
);
973 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
975 static int azx_attach_pcm_stream(struct hda_bus
*bus
, struct hda_codec
*codec
,
976 struct hda_pcm
*cpcm
)
978 struct azx
*chip
= bus
->private_data
;
980 struct azx_pcm
*apcm
;
981 int pcm_dev
= cpcm
->device
;
985 list_for_each_entry(apcm
, &chip
->pcm_list
, list
) {
986 if (apcm
->pcm
->device
== pcm_dev
) {
987 dev_err(chip
->card
->dev
, "PCM %d already exists\n",
992 err
= snd_pcm_new(chip
->card
, cpcm
->name
, pcm_dev
,
993 cpcm
->stream
[SNDRV_PCM_STREAM_PLAYBACK
].substreams
,
994 cpcm
->stream
[SNDRV_PCM_STREAM_CAPTURE
].substreams
,
998 strlcpy(pcm
->name
, cpcm
->name
, sizeof(pcm
->name
));
999 apcm
= kzalloc(sizeof(*apcm
), GFP_KERNEL
);
1004 apcm
->codec
= codec
;
1005 pcm
->private_data
= apcm
;
1006 pcm
->private_free
= azx_pcm_free
;
1007 if (cpcm
->pcm_type
== HDA_PCM_TYPE_MODEM
)
1008 pcm
->dev_class
= SNDRV_PCM_CLASS_MODEM
;
1009 list_add_tail(&apcm
->list
, &chip
->pcm_list
);
1011 for (s
= 0; s
< 2; s
++) {
1012 apcm
->hinfo
[s
] = &cpcm
->stream
[s
];
1013 if (cpcm
->stream
[s
].substreams
)
1014 snd_pcm_set_ops(pcm
, s
, &azx_pcm_ops
);
1016 /* buffer pre-allocation */
1017 size
= CONFIG_SND_HDA_PREALLOC_SIZE
* 1024;
1018 if (size
> MAX_PREALLOC_SIZE
)
1019 size
= MAX_PREALLOC_SIZE
;
1020 snd_pcm_lib_preallocate_pages_for_all(pcm
, SNDRV_DMA_TYPE_DEV_SG
,
1022 size
, MAX_PREALLOC_SIZE
);
1024 pcm
->dev
= &codec
->dev
;
1029 * CORB / RIRB interface
1031 static int azx_alloc_cmd_io(struct azx
*chip
)
1035 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1036 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
1037 PAGE_SIZE
, &chip
->rb
);
1039 dev_err(chip
->card
->dev
, "cannot allocate CORB/RIRB\n");
1042 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io
);
1044 static void azx_init_cmd_io(struct azx
*chip
)
1048 spin_lock_irq(&chip
->reg_lock
);
1050 chip
->corb
.addr
= chip
->rb
.addr
;
1051 chip
->corb
.buf
= (u32
*)chip
->rb
.area
;
1052 azx_writel(chip
, CORBLBASE
, (u32
)chip
->corb
.addr
);
1053 azx_writel(chip
, CORBUBASE
, upper_32_bits(chip
->corb
.addr
));
1055 /* set the corb size to 256 entries (ULI requires explicitly) */
1056 azx_writeb(chip
, CORBSIZE
, 0x02);
1057 /* set the corb write pointer to 0 */
1058 azx_writew(chip
, CORBWP
, 0);
1060 /* reset the corb hw read pointer */
1061 azx_writew(chip
, CORBRP
, ICH6_CORBRP_RST
);
1062 if (!(chip
->driver_caps
& AZX_DCAPS_CORBRP_SELF_CLEAR
)) {
1063 for (timeout
= 1000; timeout
> 0; timeout
--) {
1064 if ((azx_readw(chip
, CORBRP
) & ICH6_CORBRP_RST
) == ICH6_CORBRP_RST
)
1069 dev_err(chip
->card
->dev
, "CORB reset timeout#1, CORBRP = %d\n",
1070 azx_readw(chip
, CORBRP
));
1072 azx_writew(chip
, CORBRP
, 0);
1073 for (timeout
= 1000; timeout
> 0; timeout
--) {
1074 if (azx_readw(chip
, CORBRP
) == 0)
1079 dev_err(chip
->card
->dev
, "CORB reset timeout#2, CORBRP = %d\n",
1080 azx_readw(chip
, CORBRP
));
1083 /* enable corb dma */
1084 azx_writeb(chip
, CORBCTL
, ICH6_CORBCTL_RUN
);
1087 chip
->rirb
.addr
= chip
->rb
.addr
+ 2048;
1088 chip
->rirb
.buf
= (u32
*)(chip
->rb
.area
+ 2048);
1089 chip
->rirb
.wp
= chip
->rirb
.rp
= 0;
1090 memset(chip
->rirb
.cmds
, 0, sizeof(chip
->rirb
.cmds
));
1091 azx_writel(chip
, RIRBLBASE
, (u32
)chip
->rirb
.addr
);
1092 azx_writel(chip
, RIRBUBASE
, upper_32_bits(chip
->rirb
.addr
));
1094 /* set the rirb size to 256 entries (ULI requires explicitly) */
1095 azx_writeb(chip
, RIRBSIZE
, 0x02);
1096 /* reset the rirb hw write pointer */
1097 azx_writew(chip
, RIRBWP
, ICH6_RIRBWP_RST
);
1098 /* set N=1, get RIRB response interrupt for new entry */
1099 if (chip
->driver_caps
& AZX_DCAPS_CTX_WORKAROUND
)
1100 azx_writew(chip
, RINTCNT
, 0xc0);
1102 azx_writew(chip
, RINTCNT
, 1);
1103 /* enable rirb dma and response irq */
1104 azx_writeb(chip
, RIRBCTL
, ICH6_RBCTL_DMA_EN
| ICH6_RBCTL_IRQ_EN
);
1105 spin_unlock_irq(&chip
->reg_lock
);
1107 EXPORT_SYMBOL_GPL(azx_init_cmd_io
);
1109 static void azx_free_cmd_io(struct azx
*chip
)
1111 spin_lock_irq(&chip
->reg_lock
);
1112 /* disable ringbuffer DMAs */
1113 azx_writeb(chip
, RIRBCTL
, 0);
1114 azx_writeb(chip
, CORBCTL
, 0);
1115 spin_unlock_irq(&chip
->reg_lock
);
1117 EXPORT_SYMBOL_GPL(azx_free_cmd_io
);
1119 static unsigned int azx_command_addr(u32 cmd
)
1121 unsigned int addr
= cmd
>> 28;
1123 if (addr
>= AZX_MAX_CODECS
) {
1131 /* send a command */
1132 static int azx_corb_send_cmd(struct hda_bus
*bus
, u32 val
)
1134 struct azx
*chip
= bus
->private_data
;
1135 unsigned int addr
= azx_command_addr(val
);
1136 unsigned int wp
, rp
;
1138 spin_lock_irq(&chip
->reg_lock
);
1140 /* add command to corb */
1141 wp
= azx_readw(chip
, CORBWP
);
1143 /* something wrong, controller likely turned to D3 */
1144 spin_unlock_irq(&chip
->reg_lock
);
1148 wp
%= ICH6_MAX_CORB_ENTRIES
;
1150 rp
= azx_readw(chip
, CORBRP
);
1152 /* oops, it's full */
1153 spin_unlock_irq(&chip
->reg_lock
);
1157 chip
->rirb
.cmds
[addr
]++;
1158 chip
->corb
.buf
[wp
] = cpu_to_le32(val
);
1159 azx_writew(chip
, CORBWP
, wp
);
1161 spin_unlock_irq(&chip
->reg_lock
);
1166 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1168 /* retrieve RIRB entry - called from interrupt handler */
1169 static void azx_update_rirb(struct azx
*chip
)
1171 unsigned int rp
, wp
;
1175 wp
= azx_readw(chip
, RIRBWP
);
1177 /* something wrong, controller likely turned to D3 */
1181 if (wp
== chip
->rirb
.wp
)
1185 while (chip
->rirb
.rp
!= wp
) {
1187 chip
->rirb
.rp
%= ICH6_MAX_RIRB_ENTRIES
;
1189 rp
= chip
->rirb
.rp
<< 1; /* an RIRB entry is 8-bytes */
1190 res_ex
= le32_to_cpu(chip
->rirb
.buf
[rp
+ 1]);
1191 res
= le32_to_cpu(chip
->rirb
.buf
[rp
]);
1192 addr
= res_ex
& 0xf;
1193 if ((addr
>= AZX_MAX_CODECS
) || !(chip
->codec_mask
& (1 << addr
))) {
1194 dev_err(chip
->card
->dev
, "spurious response %#x:%#x, rp = %d, wp = %d",
1199 else if (res_ex
& ICH6_RIRB_EX_UNSOL_EV
)
1200 snd_hda_queue_unsol_event(chip
->bus
, res
, res_ex
);
1201 else if (chip
->rirb
.cmds
[addr
]) {
1202 chip
->rirb
.res
[addr
] = res
;
1204 chip
->rirb
.cmds
[addr
]--;
1205 } else if (printk_ratelimit()) {
1206 dev_err(chip
->card
->dev
, "spurious response %#x:%#x, last cmd=%#08x\n",
1208 chip
->last_cmd
[addr
]);
1213 /* receive a response */
1214 static unsigned int azx_rirb_get_response(struct hda_bus
*bus
,
1217 struct azx
*chip
= bus
->private_data
;
1218 unsigned long timeout
;
1219 unsigned long loopcounter
;
1223 timeout
= jiffies
+ msecs_to_jiffies(1000);
1225 for (loopcounter
= 0;; loopcounter
++) {
1226 if (chip
->polling_mode
|| do_poll
) {
1227 spin_lock_irq(&chip
->reg_lock
);
1228 azx_update_rirb(chip
);
1229 spin_unlock_irq(&chip
->reg_lock
);
1231 if (!chip
->rirb
.cmds
[addr
]) {
1233 bus
->rirb_error
= 0;
1236 chip
->poll_count
= 0;
1237 return chip
->rirb
.res
[addr
]; /* the last value */
1239 if (time_after(jiffies
, timeout
))
1241 if (bus
->needs_damn_long_delay
|| loopcounter
> 3000)
1242 msleep(2); /* temporary workaround */
1249 if (!bus
->no_response_fallback
)
1252 if (!chip
->polling_mode
&& chip
->poll_count
< 2) {
1253 dev_dbg(chip
->card
->dev
,
1254 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1255 chip
->last_cmd
[addr
]);
1262 if (!chip
->polling_mode
) {
1263 dev_warn(chip
->card
->dev
,
1264 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1265 chip
->last_cmd
[addr
]);
1266 chip
->polling_mode
= 1;
1271 dev_warn(chip
->card
->dev
,
1272 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1273 chip
->last_cmd
[addr
]);
1274 if (chip
->ops
->disable_msi_reset_irq(chip
) &&
1275 chip
->ops
->disable_msi_reset_irq(chip
) < 0) {
1276 bus
->rirb_error
= 1;
1282 if (chip
->probing
) {
1283 /* If this critical timeout happens during the codec probing
1284 * phase, this is likely an access to a non-existing codec
1285 * slot. Better to return an error and reset the system.
1290 /* a fatal communication error; need either to reset or to fallback
1291 * to the single_cmd mode
1293 bus
->rirb_error
= 1;
1294 if (bus
->allow_bus_reset
&& !bus
->response_reset
&& !bus
->in_reset
) {
1295 bus
->response_reset
= 1;
1296 return -1; /* give a chance to retry */
1299 dev_err(chip
->card
->dev
,
1300 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1301 chip
->last_cmd
[addr
]);
1302 chip
->single_cmd
= 1;
1303 bus
->response_reset
= 0;
1304 /* release CORB/RIRB */
1305 azx_free_cmd_io(chip
);
1306 /* disable unsolicited responses */
1307 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) & ~ICH6_GCTL_UNSOL
);
1312 * Use the single immediate command instead of CORB/RIRB for simplicity
1314 * Note: according to Intel, this is not preferred use. The command was
1315 * intended for the BIOS only, and may get confused with unsolicited
1316 * responses. So, we shouldn't use it for normal operation from the
1318 * I left the codes, however, for debugging/testing purposes.
1321 /* receive a response */
1322 static int azx_single_wait_for_response(struct azx
*chip
, unsigned int addr
)
1327 /* check IRV busy bit */
1328 if (azx_readw(chip
, IRS
) & ICH6_IRS_VALID
) {
1329 /* reuse rirb.res as the response return value */
1330 chip
->rirb
.res
[addr
] = azx_readl(chip
, IR
);
1335 if (printk_ratelimit())
1336 dev_dbg(chip
->card
->dev
, "get_response timeout: IRS=0x%x\n",
1337 azx_readw(chip
, IRS
));
1338 chip
->rirb
.res
[addr
] = -1;
1342 /* send a command */
1343 static int azx_single_send_cmd(struct hda_bus
*bus
, u32 val
)
1345 struct azx
*chip
= bus
->private_data
;
1346 unsigned int addr
= azx_command_addr(val
);
1349 bus
->rirb_error
= 0;
1351 /* check ICB busy bit */
1352 if (!((azx_readw(chip
, IRS
) & ICH6_IRS_BUSY
))) {
1353 /* Clear IRV valid bit */
1354 azx_writew(chip
, IRS
, azx_readw(chip
, IRS
) |
1356 azx_writel(chip
, IC
, val
);
1357 azx_writew(chip
, IRS
, azx_readw(chip
, IRS
) |
1359 return azx_single_wait_for_response(chip
, addr
);
1363 if (printk_ratelimit())
1364 dev_dbg(chip
->card
->dev
,
1365 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1366 azx_readw(chip
, IRS
), val
);
1370 /* receive a response */
1371 static unsigned int azx_single_get_response(struct hda_bus
*bus
,
1374 struct azx
*chip
= bus
->private_data
;
1375 return chip
->rirb
.res
[addr
];
1379 * The below are the main callbacks from hda_codec.
1381 * They are just the skeleton to call sub-callbacks according to the
1382 * current setting of chip->single_cmd.
1385 /* send a command */
1386 static int azx_send_cmd(struct hda_bus
*bus
, unsigned int val
)
1388 struct azx
*chip
= bus
->private_data
;
1392 chip
->last_cmd
[azx_command_addr(val
)] = val
;
1393 if (chip
->single_cmd
)
1394 return azx_single_send_cmd(bus
, val
);
1396 return azx_corb_send_cmd(bus
, val
);
1398 EXPORT_SYMBOL_GPL(azx_send_cmd
);
1400 /* get a response */
1401 static unsigned int azx_get_response(struct hda_bus
*bus
,
1404 struct azx
*chip
= bus
->private_data
;
1407 if (chip
->single_cmd
)
1408 return azx_single_get_response(bus
, addr
);
1410 return azx_rirb_get_response(bus
, addr
);
1412 EXPORT_SYMBOL_GPL(azx_get_response
);
1414 #ifdef CONFIG_SND_HDA_DSP_LOADER
1416 * DSP loading code (e.g. for CA0132)
1419 /* use the first stream for loading DSP */
1420 static struct azx_dev
*
1421 azx_get_dsp_loader_dev(struct azx
*chip
)
1423 return &chip
->azx_dev
[chip
->playback_index_offset
];
1426 static int azx_load_dsp_prepare(struct hda_bus
*bus
, unsigned int format
,
1427 unsigned int byte_size
,
1428 struct snd_dma_buffer
*bufp
)
1431 struct azx
*chip
= bus
->private_data
;
1432 struct azx_dev
*azx_dev
;
1435 azx_dev
= azx_get_dsp_loader_dev(chip
);
1438 spin_lock_irq(&chip
->reg_lock
);
1439 if (azx_dev
->running
|| azx_dev
->locked
) {
1440 spin_unlock_irq(&chip
->reg_lock
);
1444 azx_dev
->prepared
= 0;
1445 chip
->saved_azx_dev
= *azx_dev
;
1446 azx_dev
->locked
= 1;
1447 spin_unlock_irq(&chip
->reg_lock
);
1449 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV_SG
,
1454 azx_dev
->bufsize
= byte_size
;
1455 azx_dev
->period_bytes
= byte_size
;
1456 azx_dev
->format_val
= format
;
1458 azx_stream_reset(chip
, azx_dev
);
1460 /* reset BDL address */
1461 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
1462 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
1465 bdl
= (u32
*)azx_dev
->bdl
.area
;
1466 err
= setup_bdle(chip
, bufp
, azx_dev
, &bdl
, 0, byte_size
, 0);
1470 azx_setup_controller(chip
, azx_dev
);
1471 dsp_unlock(azx_dev
);
1472 return azx_dev
->stream_tag
;
1475 chip
->ops
->dma_free_pages(chip
, bufp
);
1477 spin_lock_irq(&chip
->reg_lock
);
1478 if (azx_dev
->opened
)
1479 *azx_dev
= chip
->saved_azx_dev
;
1480 azx_dev
->locked
= 0;
1481 spin_unlock_irq(&chip
->reg_lock
);
1483 dsp_unlock(azx_dev
);
1487 static void azx_load_dsp_trigger(struct hda_bus
*bus
, bool start
)
1489 struct azx
*chip
= bus
->private_data
;
1490 struct azx_dev
*azx_dev
= azx_get_dsp_loader_dev(chip
);
1493 azx_stream_start(chip
, azx_dev
);
1495 azx_stream_stop(chip
, azx_dev
);
1496 azx_dev
->running
= start
;
1499 static void azx_load_dsp_cleanup(struct hda_bus
*bus
,
1500 struct snd_dma_buffer
*dmab
)
1502 struct azx
*chip
= bus
->private_data
;
1503 struct azx_dev
*azx_dev
= azx_get_dsp_loader_dev(chip
);
1505 if (!dmab
->area
|| !azx_dev
->locked
)
1509 /* reset BDL address */
1510 azx_sd_writel(chip
, azx_dev
, SD_BDLPL
, 0);
1511 azx_sd_writel(chip
, azx_dev
, SD_BDLPU
, 0);
1512 azx_sd_writel(chip
, azx_dev
, SD_CTL
, 0);
1513 azx_dev
->bufsize
= 0;
1514 azx_dev
->period_bytes
= 0;
1515 azx_dev
->format_val
= 0;
1517 chip
->ops
->dma_free_pages(chip
, dmab
);
1520 spin_lock_irq(&chip
->reg_lock
);
1521 if (azx_dev
->opened
)
1522 *azx_dev
= chip
->saved_azx_dev
;
1523 azx_dev
->locked
= 0;
1524 spin_unlock_irq(&chip
->reg_lock
);
1525 dsp_unlock(azx_dev
);
1527 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1529 int azx_alloc_stream_pages(struct azx
*chip
)
1532 struct snd_card
*card
= chip
->card
;
1534 for (i
= 0; i
< chip
->num_streams
; i
++) {
1535 dsp_lock_init(&chip
->azx_dev
[i
]);
1536 /* allocate memory for the BDL for each stream */
1537 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
1539 &chip
->azx_dev
[i
].bdl
);
1541 dev_err(card
->dev
, "cannot allocate BDL\n");
1545 /* allocate memory for the position buffer */
1546 err
= chip
->ops
->dma_alloc_pages(chip
, SNDRV_DMA_TYPE_DEV
,
1547 chip
->num_streams
* 8, &chip
->posbuf
);
1549 dev_err(card
->dev
, "cannot allocate posbuf\n");
1553 /* allocate CORB/RIRB */
1554 err
= azx_alloc_cmd_io(chip
);
1559 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages
);
1561 void azx_free_stream_pages(struct azx
*chip
)
1564 if (chip
->azx_dev
) {
1565 for (i
= 0; i
< chip
->num_streams
; i
++)
1566 if (chip
->azx_dev
[i
].bdl
.area
)
1567 chip
->ops
->dma_free_pages(
1568 chip
, &chip
->azx_dev
[i
].bdl
);
1571 chip
->ops
->dma_free_pages(chip
, &chip
->rb
);
1572 if (chip
->posbuf
.area
)
1573 chip
->ops
->dma_free_pages(chip
, &chip
->posbuf
);
1575 EXPORT_SYMBOL_GPL(azx_free_stream_pages
);
1578 * Lowlevel interface
1581 /* enter link reset */
1582 void azx_enter_link_reset(struct azx
*chip
)
1584 unsigned long timeout
;
1586 /* reset controller */
1587 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) & ~ICH6_GCTL_RESET
);
1589 timeout
= jiffies
+ msecs_to_jiffies(100);
1590 while ((azx_readb(chip
, GCTL
) & ICH6_GCTL_RESET
) &&
1591 time_before(jiffies
, timeout
))
1592 usleep_range(500, 1000);
1594 EXPORT_SYMBOL_GPL(azx_enter_link_reset
);
1596 /* exit link reset */
1597 static void azx_exit_link_reset(struct azx
*chip
)
1599 unsigned long timeout
;
1601 azx_writeb(chip
, GCTL
, azx_readb(chip
, GCTL
) | ICH6_GCTL_RESET
);
1603 timeout
= jiffies
+ msecs_to_jiffies(100);
1604 while (!azx_readb(chip
, GCTL
) &&
1605 time_before(jiffies
, timeout
))
1606 usleep_range(500, 1000);
1609 /* reset codec link */
1610 static int azx_reset(struct azx
*chip
, bool full_reset
)
1615 /* clear STATESTS */
1616 azx_writew(chip
, STATESTS
, STATESTS_INT_MASK
);
1618 /* reset controller */
1619 azx_enter_link_reset(chip
);
1621 /* delay for >= 100us for codec PLL to settle per spec
1622 * Rev 0.9 section 5.5.1
1624 usleep_range(500, 1000);
1626 /* Bring controller out of reset */
1627 azx_exit_link_reset(chip
);
1629 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1630 usleep_range(1000, 1200);
1633 /* check to see if controller is ready */
1634 if (!azx_readb(chip
, GCTL
)) {
1635 dev_dbg(chip
->card
->dev
, "azx_reset: controller not ready!\n");
1639 /* Accept unsolicited responses */
1640 if (!chip
->single_cmd
)
1641 azx_writel(chip
, GCTL
, azx_readl(chip
, GCTL
) |
1645 if (!chip
->codec_mask
) {
1646 chip
->codec_mask
= azx_readw(chip
, STATESTS
);
1647 dev_dbg(chip
->card
->dev
, "codec_mask = 0x%x\n",
1654 /* enable interrupts */
1655 static void azx_int_enable(struct azx
*chip
)
1657 /* enable controller CIE and GIE */
1658 azx_writel(chip
, INTCTL
, azx_readl(chip
, INTCTL
) |
1659 ICH6_INT_CTRL_EN
| ICH6_INT_GLOBAL_EN
);
1662 /* disable interrupts */
1663 static void azx_int_disable(struct azx
*chip
)
1667 /* disable interrupts in stream descriptor */
1668 for (i
= 0; i
< chip
->num_streams
; i
++) {
1669 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
1670 azx_sd_writeb(chip
, azx_dev
, SD_CTL
,
1671 azx_sd_readb(chip
, azx_dev
, SD_CTL
) &
1675 /* disable SIE for all streams */
1676 azx_writeb(chip
, INTCTL
, 0);
1678 /* disable controller CIE and GIE */
1679 azx_writel(chip
, INTCTL
, azx_readl(chip
, INTCTL
) &
1680 ~(ICH6_INT_CTRL_EN
| ICH6_INT_GLOBAL_EN
));
1683 /* clear interrupts */
1684 static void azx_int_clear(struct azx
*chip
)
1688 /* clear stream status */
1689 for (i
= 0; i
< chip
->num_streams
; i
++) {
1690 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
1691 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
);
1694 /* clear STATESTS */
1695 azx_writew(chip
, STATESTS
, STATESTS_INT_MASK
);
1697 /* clear rirb status */
1698 azx_writeb(chip
, RIRBSTS
, RIRB_INT_MASK
);
1700 /* clear int status */
1701 azx_writel(chip
, INTSTS
, ICH6_INT_CTRL_EN
| ICH6_INT_ALL_STREAM
);
1705 * reset and start the controller registers
1707 void azx_init_chip(struct azx
*chip
, bool full_reset
)
1709 if (chip
->initialized
)
1712 /* reset controller */
1713 azx_reset(chip
, full_reset
);
1715 /* initialize interrupts */
1716 azx_int_clear(chip
);
1717 azx_int_enable(chip
);
1719 /* initialize the codec command I/O */
1720 if (!chip
->single_cmd
)
1721 azx_init_cmd_io(chip
);
1723 /* program the position buffer */
1724 azx_writel(chip
, DPLBASE
, (u32
)chip
->posbuf
.addr
);
1725 azx_writel(chip
, DPUBASE
, upper_32_bits(chip
->posbuf
.addr
));
1727 chip
->initialized
= 1;
1729 EXPORT_SYMBOL_GPL(azx_init_chip
);
1731 void azx_stop_chip(struct azx
*chip
)
1733 if (!chip
->initialized
)
1736 /* disable interrupts */
1737 azx_int_disable(chip
);
1738 azx_int_clear(chip
);
1740 /* disable CORB/RIRB */
1741 azx_free_cmd_io(chip
);
1743 /* disable position buffer */
1744 azx_writel(chip
, DPLBASE
, 0);
1745 azx_writel(chip
, DPUBASE
, 0);
1747 chip
->initialized
= 0;
1749 EXPORT_SYMBOL_GPL(azx_stop_chip
);
1754 irqreturn_t
azx_interrupt(int irq
, void *dev_id
)
1756 struct azx
*chip
= dev_id
;
1757 struct azx_dev
*azx_dev
;
1762 #ifdef CONFIG_PM_RUNTIME
1763 if (chip
->driver_caps
& AZX_DCAPS_PM_RUNTIME
)
1764 if (!pm_runtime_active(chip
->card
->dev
))
1768 spin_lock(&chip
->reg_lock
);
1770 if (chip
->disabled
) {
1771 spin_unlock(&chip
->reg_lock
);
1775 status
= azx_readl(chip
, INTSTS
);
1776 if (status
== 0 || status
== 0xffffffff) {
1777 spin_unlock(&chip
->reg_lock
);
1781 for (i
= 0; i
< chip
->num_streams
; i
++) {
1782 azx_dev
= &chip
->azx_dev
[i
];
1783 if (status
& azx_dev
->sd_int_sta_mask
) {
1784 sd_status
= azx_sd_readb(chip
, azx_dev
, SD_STS
);
1785 azx_sd_writeb(chip
, azx_dev
, SD_STS
, SD_INT_MASK
);
1786 if (!azx_dev
->substream
|| !azx_dev
->running
||
1787 !(sd_status
& SD_INT_COMPLETE
))
1789 /* check whether this IRQ is really acceptable */
1790 if (!chip
->ops
->position_check
||
1791 chip
->ops
->position_check(chip
, azx_dev
)) {
1792 spin_unlock(&chip
->reg_lock
);
1793 snd_pcm_period_elapsed(azx_dev
->substream
);
1794 spin_lock(&chip
->reg_lock
);
1799 /* clear rirb int */
1800 status
= azx_readb(chip
, RIRBSTS
);
1801 if (status
& RIRB_INT_MASK
) {
1802 if (status
& RIRB_INT_RESPONSE
) {
1803 if (chip
->driver_caps
& AZX_DCAPS_RIRB_PRE_DELAY
)
1805 azx_update_rirb(chip
);
1807 azx_writeb(chip
, RIRBSTS
, RIRB_INT_MASK
);
1810 spin_unlock(&chip
->reg_lock
);
1814 EXPORT_SYMBOL_GPL(azx_interrupt
);
1821 * Probe the given codec address
1823 static int probe_codec(struct azx
*chip
, int addr
)
1825 unsigned int cmd
= (addr
<< 28) | (AC_NODE_ROOT
<< 20) |
1826 (AC_VERB_PARAMETERS
<< 8) | AC_PAR_VENDOR_ID
;
1829 mutex_lock(&chip
->bus
->cmd_mutex
);
1831 azx_send_cmd(chip
->bus
, cmd
);
1832 res
= azx_get_response(chip
->bus
, addr
);
1834 mutex_unlock(&chip
->bus
->cmd_mutex
);
1837 dev_dbg(chip
->card
->dev
, "codec #%d probed OK\n", addr
);
1841 static void azx_bus_reset(struct hda_bus
*bus
)
1843 struct azx
*chip
= bus
->private_data
;
1846 azx_stop_chip(chip
);
1847 azx_init_chip(chip
, true);
1849 if (chip
->initialized
) {
1851 list_for_each_entry(p
, &chip
->pcm_list
, list
)
1852 snd_pcm_suspend_all(p
->pcm
);
1853 snd_hda_suspend(chip
->bus
);
1854 snd_hda_resume(chip
->bus
);
1861 /* power-up/down the controller */
1862 static void azx_power_notify(struct hda_bus
*bus
, bool power_up
)
1864 struct azx
*chip
= bus
->private_data
;
1866 if (!(chip
->driver_caps
& AZX_DCAPS_PM_RUNTIME
))
1870 pm_runtime_get_sync(chip
->card
->dev
);
1872 pm_runtime_put_sync(chip
->card
->dev
);
1876 static int get_jackpoll_interval(struct azx
*chip
)
1881 if (!chip
->jackpoll_ms
)
1884 i
= chip
->jackpoll_ms
[chip
->dev_index
];
1887 if (i
< 50 || i
> 60000)
1890 j
= msecs_to_jiffies(i
);
1892 dev_warn(chip
->card
->dev
,
1893 "jackpoll_ms value out of range: %d\n", i
);
1897 /* Codec initialization */
1898 int azx_codec_create(struct azx
*chip
, const char *model
,
1899 unsigned int max_slots
,
1902 struct hda_bus_template bus_temp
;
1905 memset(&bus_temp
, 0, sizeof(bus_temp
));
1906 bus_temp
.private_data
= chip
;
1907 bus_temp
.modelname
= model
;
1908 bus_temp
.pci
= chip
->pci
;
1909 bus_temp
.ops
.command
= azx_send_cmd
;
1910 bus_temp
.ops
.get_response
= azx_get_response
;
1911 bus_temp
.ops
.attach_pcm
= azx_attach_pcm_stream
;
1912 bus_temp
.ops
.bus_reset
= azx_bus_reset
;
1914 bus_temp
.power_save
= power_save_to
;
1915 bus_temp
.ops
.pm_notify
= azx_power_notify
;
1917 #ifdef CONFIG_SND_HDA_DSP_LOADER
1918 bus_temp
.ops
.load_dsp_prepare
= azx_load_dsp_prepare
;
1919 bus_temp
.ops
.load_dsp_trigger
= azx_load_dsp_trigger
;
1920 bus_temp
.ops
.load_dsp_cleanup
= azx_load_dsp_cleanup
;
1923 err
= snd_hda_bus_new(chip
->card
, &bus_temp
, &chip
->bus
);
1927 if (chip
->driver_caps
& AZX_DCAPS_RIRB_DELAY
) {
1928 dev_dbg(chip
->card
->dev
, "Enable delay in RIRB handling\n");
1929 chip
->bus
->needs_damn_long_delay
= 1;
1934 max_slots
= AZX_DEFAULT_CODECS
;
1936 /* First try to probe all given codec slots */
1937 for (c
= 0; c
< max_slots
; c
++) {
1938 if ((chip
->codec_mask
& (1 << c
)) & chip
->codec_probe_mask
) {
1939 if (probe_codec(chip
, c
) < 0) {
1940 /* Some BIOSen give you wrong codec addresses
1943 dev_warn(chip
->card
->dev
,
1944 "Codec #%d probe error; disabling it...\n", c
);
1945 chip
->codec_mask
&= ~(1 << c
);
1946 /* More badly, accessing to a non-existing
1947 * codec often screws up the controller chip,
1948 * and disturbs the further communications.
1949 * Thus if an error occurs during probing,
1950 * better to reset the controller chip to
1951 * get back to the sanity state.
1953 azx_stop_chip(chip
);
1954 azx_init_chip(chip
, true);
1959 /* AMD chipsets often cause the communication stalls upon certain
1960 * sequence like the pin-detection. It seems that forcing the synced
1961 * access works around the stall. Grrr...
1963 if (chip
->driver_caps
& AZX_DCAPS_SYNC_WRITE
) {
1964 dev_dbg(chip
->card
->dev
, "Enable sync_write for stable communication\n");
1965 chip
->bus
->sync_write
= 1;
1966 chip
->bus
->allow_bus_reset
= 1;
1969 /* Then create codec instances */
1970 for (c
= 0; c
< max_slots
; c
++) {
1971 if ((chip
->codec_mask
& (1 << c
)) & chip
->codec_probe_mask
) {
1972 struct hda_codec
*codec
;
1973 err
= snd_hda_codec_new(chip
->bus
, c
, &codec
);
1976 codec
->jackpoll_interval
= get_jackpoll_interval(chip
);
1977 codec
->beep_mode
= chip
->beep_mode
;
1982 dev_err(chip
->card
->dev
, "no codecs initialized\n");
1987 EXPORT_SYMBOL_GPL(azx_codec_create
);
1989 /* configure each codec instance */
1990 int azx_codec_configure(struct azx
*chip
)
1992 struct hda_codec
*codec
;
1993 list_for_each_entry(codec
, &chip
->bus
->codec_list
, list
) {
1994 snd_hda_codec_configure(codec
);
1998 EXPORT_SYMBOL_GPL(azx_codec_configure
);
2000 /* mixer creation - all stuff is implemented in hda module */
2001 int azx_mixer_create(struct azx
*chip
)
2003 return snd_hda_build_controls(chip
->bus
);
2005 EXPORT_SYMBOL_GPL(azx_mixer_create
);
2008 /* initialize SD streams */
2009 int azx_init_stream(struct azx
*chip
)
2013 /* initialize each stream (aka device)
2014 * assign the starting bdl address to each stream (device)
2017 for (i
= 0; i
< chip
->num_streams
; i
++) {
2018 struct azx_dev
*azx_dev
= &chip
->azx_dev
[i
];
2019 azx_dev
->posbuf
= (u32 __iomem
*)(chip
->posbuf
.area
+ i
* 8);
2020 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2021 azx_dev
->sd_addr
= chip
->remap_addr
+ (0x20 * i
+ 0x80);
2022 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2023 azx_dev
->sd_int_sta_mask
= 1 << i
;
2024 /* stream tag: must be non-zero and unique */
2026 azx_dev
->stream_tag
= i
+ 1;
2031 EXPORT_SYMBOL_GPL(azx_init_stream
);
2033 MODULE_LICENSE("GPL");
2034 MODULE_DESCRIPTION("Common HDA driver funcitons");