2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/module.h>
24 #include <linux/file.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/time.h>
28 #include <linux/pm_qos.h>
30 #include <linux/dma-mapping.h>
31 #include <sound/core.h>
32 #include <sound/control.h>
33 #include <sound/info.h>
34 #include <sound/pcm.h>
35 #include <sound/pcm_params.h>
36 #include <sound/timer.h>
37 #include <sound/minors.h>
38 #include <linux/uio.h>
39 #include <linux/delay.h>
41 #include "pcm_local.h"
43 #ifdef CONFIG_SND_DEBUG
44 #define CREATE_TRACE_POINTS
45 #include "pcm_param_trace.h"
47 #define trace_hw_mask_param_enabled() 0
48 #define trace_hw_interval_param_enabled() 0
49 #define trace_hw_mask_param(substream, type, index, prev, curr)
50 #define trace_hw_interval_param(substream, type, index, prev, curr)
57 struct snd_pcm_hw_params_old
{
59 unsigned int masks
[SNDRV_PCM_HW_PARAM_SUBFORMAT
-
60 SNDRV_PCM_HW_PARAM_ACCESS
+ 1];
61 struct snd_interval intervals
[SNDRV_PCM_HW_PARAM_TICK_TIME
-
62 SNDRV_PCM_HW_PARAM_SAMPLE_BITS
+ 1];
67 unsigned int rate_num
;
68 unsigned int rate_den
;
69 snd_pcm_uframes_t fifo_size
;
70 unsigned char reserved
[64];
73 #ifdef CONFIG_SND_SUPPORT_OLD_API
74 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
75 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
77 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream
*substream
,
78 struct snd_pcm_hw_params_old __user
* _oparams
);
79 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream
*substream
,
80 struct snd_pcm_hw_params_old __user
* _oparams
);
82 static int snd_pcm_open(struct file
*file
, struct snd_pcm
*pcm
, int stream
);
88 static DEFINE_RWLOCK(snd_pcm_link_rwlock
);
89 static DECLARE_RWSEM(snd_pcm_link_rwsem
);
91 /* Writer in rwsem may block readers even during its waiting in queue,
92 * and this may lead to a deadlock when the code path takes read sem
93 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
94 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
95 * sleep until all the readers are completed without blocking by writer.
97 static inline void down_write_nonfifo(struct rw_semaphore
*lock
)
99 while (!down_write_trylock(lock
))
103 #define PCM_LOCK_DEFAULT 0
104 #define PCM_LOCK_IRQ 1
105 #define PCM_LOCK_IRQSAVE 2
107 static unsigned long __snd_pcm_stream_lock_mode(struct snd_pcm_substream
*substream
,
110 unsigned long flags
= 0;
111 if (substream
->pcm
->nonatomic
) {
112 down_read_nested(&snd_pcm_link_rwsem
, SINGLE_DEPTH_NESTING
);
113 mutex_lock(&substream
->self_group
.mutex
);
116 case PCM_LOCK_DEFAULT
:
117 read_lock(&snd_pcm_link_rwlock
);
120 read_lock_irq(&snd_pcm_link_rwlock
);
122 case PCM_LOCK_IRQSAVE
:
123 read_lock_irqsave(&snd_pcm_link_rwlock
, flags
);
126 spin_lock(&substream
->self_group
.lock
);
131 static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream
*substream
,
132 unsigned int mode
, unsigned long flags
)
134 if (substream
->pcm
->nonatomic
) {
135 mutex_unlock(&substream
->self_group
.mutex
);
136 up_read(&snd_pcm_link_rwsem
);
138 spin_unlock(&substream
->self_group
.lock
);
141 case PCM_LOCK_DEFAULT
:
142 read_unlock(&snd_pcm_link_rwlock
);
145 read_unlock_irq(&snd_pcm_link_rwlock
);
147 case PCM_LOCK_IRQSAVE
:
148 read_unlock_irqrestore(&snd_pcm_link_rwlock
, flags
);
155 * snd_pcm_stream_lock - Lock the PCM stream
156 * @substream: PCM substream
158 * This locks the PCM stream's spinlock or mutex depending on the nonatomic
159 * flag of the given substream. This also takes the global link rw lock
160 * (or rw sem), too, for avoiding the race with linked streams.
162 void snd_pcm_stream_lock(struct snd_pcm_substream
*substream
)
164 __snd_pcm_stream_lock_mode(substream
, PCM_LOCK_DEFAULT
);
166 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock
);
169 * snd_pcm_stream_lock - Unlock the PCM stream
170 * @substream: PCM substream
172 * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
174 void snd_pcm_stream_unlock(struct snd_pcm_substream
*substream
)
176 __snd_pcm_stream_unlock_mode(substream
, PCM_LOCK_DEFAULT
, 0);
178 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock
);
181 * snd_pcm_stream_lock_irq - Lock the PCM stream
182 * @substream: PCM substream
184 * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
185 * IRQ (only when nonatomic is false). In nonatomic case, this is identical
186 * as snd_pcm_stream_lock().
188 void snd_pcm_stream_lock_irq(struct snd_pcm_substream
*substream
)
190 __snd_pcm_stream_lock_mode(substream
, PCM_LOCK_IRQ
);
192 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq
);
195 * snd_pcm_stream_unlock_irq - Unlock the PCM stream
196 * @substream: PCM substream
198 * This is a counter-part of snd_pcm_stream_lock_irq().
200 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream
*substream
)
202 __snd_pcm_stream_unlock_mode(substream
, PCM_LOCK_IRQ
, 0);
204 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq
);
206 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream
*substream
)
208 return __snd_pcm_stream_lock_mode(substream
, PCM_LOCK_IRQSAVE
);
210 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave
);
213 * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
214 * @substream: PCM substream
217 * This is a counter-part of snd_pcm_stream_lock_irqsave().
219 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream
*substream
,
222 __snd_pcm_stream_unlock_mode(substream
, PCM_LOCK_IRQSAVE
, flags
);
224 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore
);
226 int snd_pcm_info(struct snd_pcm_substream
*substream
, struct snd_pcm_info
*info
)
228 struct snd_pcm
*pcm
= substream
->pcm
;
229 struct snd_pcm_str
*pstr
= substream
->pstr
;
231 memset(info
, 0, sizeof(*info
));
232 info
->card
= pcm
->card
->number
;
233 info
->device
= pcm
->device
;
234 info
->stream
= substream
->stream
;
235 info
->subdevice
= substream
->number
;
236 strlcpy(info
->id
, pcm
->id
, sizeof(info
->id
));
237 strlcpy(info
->name
, pcm
->name
, sizeof(info
->name
));
238 info
->dev_class
= pcm
->dev_class
;
239 info
->dev_subclass
= pcm
->dev_subclass
;
240 info
->subdevices_count
= pstr
->substream_count
;
241 info
->subdevices_avail
= pstr
->substream_count
- pstr
->substream_opened
;
242 strlcpy(info
->subname
, substream
->name
, sizeof(info
->subname
));
247 int snd_pcm_info_user(struct snd_pcm_substream
*substream
,
248 struct snd_pcm_info __user
* _info
)
250 struct snd_pcm_info
*info
;
253 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
256 err
= snd_pcm_info(substream
, info
);
258 if (copy_to_user(_info
, info
, sizeof(*info
)))
265 static bool hw_support_mmap(struct snd_pcm_substream
*substream
)
267 if (!(substream
->runtime
->hw
.info
& SNDRV_PCM_INFO_MMAP
))
269 /* architecture supports dma_mmap_coherent()? */
270 #if defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) || !defined(CONFIG_HAS_DMA)
271 if (!substream
->ops
->mmap
&&
272 substream
->dma_buffer
.dev
.type
== SNDRV_DMA_TYPE_DEV
)
278 static int constrain_mask_params(struct snd_pcm_substream
*substream
,
279 struct snd_pcm_hw_params
*params
)
281 struct snd_pcm_hw_constraints
*constrs
=
282 &substream
->runtime
->hw_constraints
;
285 struct snd_mask old_mask
;
288 for (k
= SNDRV_PCM_HW_PARAM_FIRST_MASK
; k
<= SNDRV_PCM_HW_PARAM_LAST_MASK
; k
++) {
289 m
= hw_param_mask(params
, k
);
290 if (snd_mask_empty(m
))
293 /* This parameter is not requested to change by a caller. */
294 if (!(params
->rmask
& (1 << k
)))
297 if (trace_hw_mask_param_enabled())
300 changed
= snd_mask_refine(m
, constrs_mask(constrs
, k
));
306 /* Set corresponding flag so that the caller gets it. */
307 trace_hw_mask_param(substream
, k
, 0, &old_mask
, m
);
308 params
->cmask
|= 1 << k
;
314 static int constrain_interval_params(struct snd_pcm_substream
*substream
,
315 struct snd_pcm_hw_params
*params
)
317 struct snd_pcm_hw_constraints
*constrs
=
318 &substream
->runtime
->hw_constraints
;
319 struct snd_interval
*i
;
321 struct snd_interval old_interval
;
324 for (k
= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL
; k
<= SNDRV_PCM_HW_PARAM_LAST_INTERVAL
; k
++) {
325 i
= hw_param_interval(params
, k
);
326 if (snd_interval_empty(i
))
329 /* This parameter is not requested to change by a caller. */
330 if (!(params
->rmask
& (1 << k
)))
333 if (trace_hw_interval_param_enabled())
336 changed
= snd_interval_refine(i
, constrs_interval(constrs
, k
));
342 /* Set corresponding flag so that the caller gets it. */
343 trace_hw_interval_param(substream
, k
, 0, &old_interval
, i
);
344 params
->cmask
|= 1 << k
;
350 static int constrain_params_by_rules(struct snd_pcm_substream
*substream
,
351 struct snd_pcm_hw_params
*params
)
353 struct snd_pcm_hw_constraints
*constrs
=
354 &substream
->runtime
->hw_constraints
;
356 unsigned int *rstamps
;
357 unsigned int vstamps
[SNDRV_PCM_HW_PARAM_LAST_INTERVAL
+ 1];
359 struct snd_pcm_hw_rule
*r
;
361 struct snd_mask old_mask
;
362 struct snd_interval old_interval
;
364 int changed
, err
= 0;
367 * Each application of rule has own sequence number.
369 * Each member of 'rstamps' array represents the sequence number of
370 * recent application of corresponding rule.
372 rstamps
= kcalloc(constrs
->rules_num
, sizeof(unsigned int), GFP_KERNEL
);
377 * Each member of 'vstamps' array represents the sequence number of
378 * recent application of rule in which corresponding parameters were
381 * In initial state, elements corresponding to parameters requested by
382 * a caller is 1. For unrequested parameters, corresponding members
383 * have 0 so that the parameters are never changed anymore.
385 for (k
= 0; k
<= SNDRV_PCM_HW_PARAM_LAST_INTERVAL
; k
++)
386 vstamps
[k
] = (params
->rmask
& (1 << k
)) ? 1 : 0;
388 /* Due to the above design, actual sequence number starts at 2. */
391 /* Apply all rules in order. */
393 for (k
= 0; k
< constrs
->rules_num
; k
++) {
394 r
= &constrs
->rules
[k
];
397 * Check condition bits of this rule. When the rule has
398 * some condition bits, parameter without the bits is
399 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
400 * is an example of the condition bits.
402 if (r
->cond
&& !(r
->cond
& params
->flags
))
406 * The 'deps' array includes maximum three dependencies
407 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
408 * member of this array is a sentinel and should be
411 * This rule should be processed in this time when dependent
412 * parameters were changed at former applications of the other
415 for (d
= 0; r
->deps
[d
] >= 0; d
++) {
416 if (vstamps
[r
->deps
[d
]] > rstamps
[k
])
422 if (trace_hw_mask_param_enabled()) {
423 if (hw_is_mask(r
->var
))
424 old_mask
= *hw_param_mask(params
, r
->var
);
426 if (trace_hw_interval_param_enabled()) {
427 if (hw_is_interval(r
->var
))
428 old_interval
= *hw_param_interval(params
, r
->var
);
431 changed
= r
->func(params
, r
);
438 * When the parameter is changed, notify it to the caller
439 * by corresponding returned bit, then preparing for next
442 if (changed
&& r
->var
>= 0) {
443 if (hw_is_mask(r
->var
)) {
444 trace_hw_mask_param(substream
, r
->var
,
446 hw_param_mask(params
, r
->var
));
448 if (hw_is_interval(r
->var
)) {
449 trace_hw_interval_param(substream
, r
->var
,
450 k
+ 1, &old_interval
,
451 hw_param_interval(params
, r
->var
));
454 params
->cmask
|= (1 << r
->var
);
455 vstamps
[r
->var
] = stamp
;
459 rstamps
[k
] = stamp
++;
462 /* Iterate to evaluate all rules till no parameters are changed. */
471 static int fixup_unreferenced_params(struct snd_pcm_substream
*substream
,
472 struct snd_pcm_hw_params
*params
)
474 const struct snd_interval
*i
;
475 const struct snd_mask
*m
;
478 if (!params
->msbits
) {
479 i
= hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_SAMPLE_BITS
);
480 if (snd_interval_single(i
))
481 params
->msbits
= snd_interval_value(i
);
484 if (!params
->rate_den
) {
485 i
= hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_RATE
);
486 if (snd_interval_single(i
)) {
487 params
->rate_num
= snd_interval_value(i
);
488 params
->rate_den
= 1;
492 if (!params
->fifo_size
) {
493 m
= hw_param_mask_c(params
, SNDRV_PCM_HW_PARAM_FORMAT
);
494 i
= hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_CHANNELS
);
495 if (snd_mask_single(m
) && snd_interval_single(i
)) {
496 err
= substream
->ops
->ioctl(substream
,
497 SNDRV_PCM_IOCTL1_FIFO_SIZE
, params
);
504 params
->info
= substream
->runtime
->hw
.info
;
505 params
->info
&= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES
|
506 SNDRV_PCM_INFO_DRAIN_TRIGGER
);
507 if (!hw_support_mmap(substream
))
508 params
->info
&= ~(SNDRV_PCM_INFO_MMAP
|
509 SNDRV_PCM_INFO_MMAP_VALID
);
515 int snd_pcm_hw_refine(struct snd_pcm_substream
*substream
,
516 struct snd_pcm_hw_params
*params
)
521 params
->fifo_size
= 0;
522 if (params
->rmask
& (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS
))
524 if (params
->rmask
& (1 << SNDRV_PCM_HW_PARAM_RATE
)) {
525 params
->rate_num
= 0;
526 params
->rate_den
= 0;
529 err
= constrain_mask_params(substream
, params
);
533 err
= constrain_interval_params(substream
, params
);
537 err
= constrain_params_by_rules(substream
, params
);
545 EXPORT_SYMBOL(snd_pcm_hw_refine
);
547 static int snd_pcm_hw_refine_user(struct snd_pcm_substream
*substream
,
548 struct snd_pcm_hw_params __user
* _params
)
550 struct snd_pcm_hw_params
*params
;
553 params
= memdup_user(_params
, sizeof(*params
));
555 return PTR_ERR(params
);
557 err
= snd_pcm_hw_refine(substream
, params
);
561 err
= fixup_unreferenced_params(substream
, params
);
565 if (copy_to_user(_params
, params
, sizeof(*params
)))
572 static int period_to_usecs(struct snd_pcm_runtime
*runtime
)
577 return -1; /* invalid */
579 /* take 75% of period time as the deadline */
580 usecs
= (750000 / runtime
->rate
) * runtime
->period_size
;
581 usecs
+= ((750000 % runtime
->rate
) * runtime
->period_size
) /
587 static void snd_pcm_set_state(struct snd_pcm_substream
*substream
, int state
)
589 snd_pcm_stream_lock_irq(substream
);
590 if (substream
->runtime
->status
->state
!= SNDRV_PCM_STATE_DISCONNECTED
)
591 substream
->runtime
->status
->state
= state
;
592 snd_pcm_stream_unlock_irq(substream
);
595 static inline void snd_pcm_timer_notify(struct snd_pcm_substream
*substream
,
598 #ifdef CONFIG_SND_PCM_TIMER
599 if (substream
->timer
)
600 snd_timer_notify(substream
->timer
, event
,
601 &substream
->runtime
->trigger_tstamp
);
606 * snd_pcm_hw_param_choose - choose a configuration defined by @params
608 * @params: the hw_params instance
610 * Choose one configuration from configuration space defined by @params.
611 * The configuration chosen is that obtained fixing in this order:
612 * first access, first format, first subformat, min channels,
613 * min rate, min period time, max buffer size, min tick time
615 * Return: Zero if successful, or a negative error code on failure.
617 static int snd_pcm_hw_params_choose(struct snd_pcm_substream
*pcm
,
618 struct snd_pcm_hw_params
*params
)
620 static const int vars
[] = {
621 SNDRV_PCM_HW_PARAM_ACCESS
,
622 SNDRV_PCM_HW_PARAM_FORMAT
,
623 SNDRV_PCM_HW_PARAM_SUBFORMAT
,
624 SNDRV_PCM_HW_PARAM_CHANNELS
,
625 SNDRV_PCM_HW_PARAM_RATE
,
626 SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
627 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
628 SNDRV_PCM_HW_PARAM_TICK_TIME
,
632 struct snd_mask old_mask
;
633 struct snd_interval old_interval
;
636 for (v
= vars
; *v
!= -1; v
++) {
637 /* Keep old parameter to trace. */
638 if (trace_hw_mask_param_enabled()) {
640 old_mask
= *hw_param_mask(params
, *v
);
642 if (trace_hw_interval_param_enabled()) {
643 if (hw_is_interval(*v
))
644 old_interval
= *hw_param_interval(params
, *v
);
646 if (*v
!= SNDRV_PCM_HW_PARAM_BUFFER_SIZE
)
647 changed
= snd_pcm_hw_param_first(pcm
, params
, *v
, NULL
);
649 changed
= snd_pcm_hw_param_last(pcm
, params
, *v
, NULL
);
655 /* Trace the changed parameter. */
656 if (hw_is_mask(*v
)) {
657 trace_hw_mask_param(pcm
, *v
, 0, &old_mask
,
658 hw_param_mask(params
, *v
));
660 if (hw_is_interval(*v
)) {
661 trace_hw_interval_param(pcm
, *v
, 0, &old_interval
,
662 hw_param_interval(params
, *v
));
669 static int snd_pcm_hw_params(struct snd_pcm_substream
*substream
,
670 struct snd_pcm_hw_params
*params
)
672 struct snd_pcm_runtime
*runtime
;
675 snd_pcm_uframes_t frames
;
677 if (PCM_RUNTIME_CHECK(substream
))
679 runtime
= substream
->runtime
;
680 snd_pcm_stream_lock_irq(substream
);
681 switch (runtime
->status
->state
) {
682 case SNDRV_PCM_STATE_OPEN
:
683 case SNDRV_PCM_STATE_SETUP
:
684 case SNDRV_PCM_STATE_PREPARED
:
687 snd_pcm_stream_unlock_irq(substream
);
690 snd_pcm_stream_unlock_irq(substream
);
691 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
692 if (!substream
->oss
.oss
)
694 if (atomic_read(&substream
->mmap_count
))
698 err
= snd_pcm_hw_refine(substream
, params
);
702 err
= snd_pcm_hw_params_choose(substream
, params
);
706 err
= fixup_unreferenced_params(substream
, params
);
710 if (substream
->ops
->hw_params
!= NULL
) {
711 err
= substream
->ops
->hw_params(substream
, params
);
716 runtime
->access
= params_access(params
);
717 runtime
->format
= params_format(params
);
718 runtime
->subformat
= params_subformat(params
);
719 runtime
->channels
= params_channels(params
);
720 runtime
->rate
= params_rate(params
);
721 runtime
->period_size
= params_period_size(params
);
722 runtime
->periods
= params_periods(params
);
723 runtime
->buffer_size
= params_buffer_size(params
);
724 runtime
->info
= params
->info
;
725 runtime
->rate_num
= params
->rate_num
;
726 runtime
->rate_den
= params
->rate_den
;
727 runtime
->no_period_wakeup
=
728 (params
->info
& SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
) &&
729 (params
->flags
& SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
);
731 bits
= snd_pcm_format_physical_width(runtime
->format
);
732 runtime
->sample_bits
= bits
;
733 bits
*= runtime
->channels
;
734 runtime
->frame_bits
= bits
;
736 while (bits
% 8 != 0) {
740 runtime
->byte_align
= bits
/ 8;
741 runtime
->min_align
= frames
;
743 /* Default sw params */
744 runtime
->tstamp_mode
= SNDRV_PCM_TSTAMP_NONE
;
745 runtime
->period_step
= 1;
746 runtime
->control
->avail_min
= runtime
->period_size
;
747 runtime
->start_threshold
= 1;
748 runtime
->stop_threshold
= runtime
->buffer_size
;
749 runtime
->silence_threshold
= 0;
750 runtime
->silence_size
= 0;
751 runtime
->boundary
= runtime
->buffer_size
;
752 while (runtime
->boundary
* 2 <= LONG_MAX
- runtime
->buffer_size
)
753 runtime
->boundary
*= 2;
755 /* clear the buffer for avoiding possible kernel info leaks */
756 if (runtime
->dma_area
&& !substream
->ops
->copy_user
) {
757 size_t size
= runtime
->dma_bytes
;
759 if (runtime
->info
& SNDRV_PCM_INFO_MMAP
)
760 size
= PAGE_ALIGN(size
);
761 memset(runtime
->dma_area
, 0, size
);
764 snd_pcm_timer_resolution_change(substream
);
765 snd_pcm_set_state(substream
, SNDRV_PCM_STATE_SETUP
);
767 if (pm_qos_request_active(&substream
->latency_pm_qos_req
))
768 pm_qos_remove_request(&substream
->latency_pm_qos_req
);
769 if ((usecs
= period_to_usecs(runtime
)) >= 0)
770 pm_qos_add_request(&substream
->latency_pm_qos_req
,
771 PM_QOS_CPU_DMA_LATENCY
, usecs
);
774 /* hardware might be unusable from this time,
775 so we force application to retry to set
776 the correct hardware parameter settings */
777 snd_pcm_set_state(substream
, SNDRV_PCM_STATE_OPEN
);
778 if (substream
->ops
->hw_free
!= NULL
)
779 substream
->ops
->hw_free(substream
);
783 static int snd_pcm_hw_params_user(struct snd_pcm_substream
*substream
,
784 struct snd_pcm_hw_params __user
* _params
)
786 struct snd_pcm_hw_params
*params
;
789 params
= memdup_user(_params
, sizeof(*params
));
791 return PTR_ERR(params
);
793 err
= snd_pcm_hw_params(substream
, params
);
797 if (copy_to_user(_params
, params
, sizeof(*params
)))
804 static int snd_pcm_hw_free(struct snd_pcm_substream
*substream
)
806 struct snd_pcm_runtime
*runtime
;
809 if (PCM_RUNTIME_CHECK(substream
))
811 runtime
= substream
->runtime
;
812 snd_pcm_stream_lock_irq(substream
);
813 switch (runtime
->status
->state
) {
814 case SNDRV_PCM_STATE_SETUP
:
815 case SNDRV_PCM_STATE_PREPARED
:
818 snd_pcm_stream_unlock_irq(substream
);
821 snd_pcm_stream_unlock_irq(substream
);
822 if (atomic_read(&substream
->mmap_count
))
824 if (substream
->ops
->hw_free
)
825 result
= substream
->ops
->hw_free(substream
);
826 snd_pcm_set_state(substream
, SNDRV_PCM_STATE_OPEN
);
827 pm_qos_remove_request(&substream
->latency_pm_qos_req
);
831 static int snd_pcm_sw_params(struct snd_pcm_substream
*substream
,
832 struct snd_pcm_sw_params
*params
)
834 struct snd_pcm_runtime
*runtime
;
837 if (PCM_RUNTIME_CHECK(substream
))
839 runtime
= substream
->runtime
;
840 snd_pcm_stream_lock_irq(substream
);
841 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
) {
842 snd_pcm_stream_unlock_irq(substream
);
845 snd_pcm_stream_unlock_irq(substream
);
847 if (params
->tstamp_mode
< 0 ||
848 params
->tstamp_mode
> SNDRV_PCM_TSTAMP_LAST
)
850 if (params
->proto
>= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
851 params
->tstamp_type
> SNDRV_PCM_TSTAMP_TYPE_LAST
)
853 if (params
->avail_min
== 0)
855 if (params
->silence_size
>= runtime
->boundary
) {
856 if (params
->silence_threshold
!= 0)
859 if (params
->silence_size
> params
->silence_threshold
)
861 if (params
->silence_threshold
> runtime
->buffer_size
)
865 snd_pcm_stream_lock_irq(substream
);
866 runtime
->tstamp_mode
= params
->tstamp_mode
;
867 if (params
->proto
>= SNDRV_PROTOCOL_VERSION(2, 0, 12))
868 runtime
->tstamp_type
= params
->tstamp_type
;
869 runtime
->period_step
= params
->period_step
;
870 runtime
->control
->avail_min
= params
->avail_min
;
871 runtime
->start_threshold
= params
->start_threshold
;
872 runtime
->stop_threshold
= params
->stop_threshold
;
873 runtime
->silence_threshold
= params
->silence_threshold
;
874 runtime
->silence_size
= params
->silence_size
;
875 params
->boundary
= runtime
->boundary
;
876 if (snd_pcm_running(substream
)) {
877 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
&&
878 runtime
->silence_size
> 0)
879 snd_pcm_playback_silence(substream
, ULONG_MAX
);
880 err
= snd_pcm_update_state(substream
, runtime
);
882 snd_pcm_stream_unlock_irq(substream
);
886 static int snd_pcm_sw_params_user(struct snd_pcm_substream
*substream
,
887 struct snd_pcm_sw_params __user
* _params
)
889 struct snd_pcm_sw_params params
;
891 if (copy_from_user(¶ms
, _params
, sizeof(params
)))
893 err
= snd_pcm_sw_params(substream
, ¶ms
);
894 if (copy_to_user(_params
, ¶ms
, sizeof(params
)))
899 static inline snd_pcm_uframes_t
900 snd_pcm_calc_delay(struct snd_pcm_substream
*substream
)
902 snd_pcm_uframes_t delay
;
904 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
905 delay
= snd_pcm_playback_hw_avail(substream
->runtime
);
907 delay
= snd_pcm_capture_avail(substream
->runtime
);
908 return delay
+ substream
->runtime
->delay
;
911 int snd_pcm_status(struct snd_pcm_substream
*substream
,
912 struct snd_pcm_status
*status
)
914 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
916 snd_pcm_stream_lock_irq(substream
);
918 snd_pcm_unpack_audio_tstamp_config(status
->audio_tstamp_data
,
919 &runtime
->audio_tstamp_config
);
921 /* backwards compatible behavior */
922 if (runtime
->audio_tstamp_config
.type_requested
==
923 SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT
) {
924 if (runtime
->hw
.info
& SNDRV_PCM_INFO_HAS_WALL_CLOCK
)
925 runtime
->audio_tstamp_config
.type_requested
=
926 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK
;
928 runtime
->audio_tstamp_config
.type_requested
=
929 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT
;
930 runtime
->audio_tstamp_report
.valid
= 0;
932 runtime
->audio_tstamp_report
.valid
= 1;
934 status
->state
= runtime
->status
->state
;
935 status
->suspended_state
= runtime
->status
->suspended_state
;
936 if (status
->state
== SNDRV_PCM_STATE_OPEN
)
938 status
->trigger_tstamp
= runtime
->trigger_tstamp
;
939 if (snd_pcm_running(substream
)) {
940 snd_pcm_update_hw_ptr(substream
);
941 if (runtime
->tstamp_mode
== SNDRV_PCM_TSTAMP_ENABLE
) {
942 status
->tstamp
= runtime
->status
->tstamp
;
943 status
->driver_tstamp
= runtime
->driver_tstamp
;
944 status
->audio_tstamp
=
945 runtime
->status
->audio_tstamp
;
946 if (runtime
->audio_tstamp_report
.valid
== 1)
947 /* backwards compatibility, no report provided in COMPAT mode */
948 snd_pcm_pack_audio_tstamp_report(&status
->audio_tstamp_data
,
949 &status
->audio_tstamp_accuracy
,
950 &runtime
->audio_tstamp_report
);
955 /* get tstamp only in fallback mode and only if enabled */
956 if (runtime
->tstamp_mode
== SNDRV_PCM_TSTAMP_ENABLE
)
957 snd_pcm_gettime(runtime
, &status
->tstamp
);
960 status
->appl_ptr
= runtime
->control
->appl_ptr
;
961 status
->hw_ptr
= runtime
->status
->hw_ptr
;
962 status
->avail
= snd_pcm_avail(substream
);
963 status
->delay
= snd_pcm_running(substream
) ?
964 snd_pcm_calc_delay(substream
) : 0;
965 status
->avail_max
= runtime
->avail_max
;
966 status
->overrange
= runtime
->overrange
;
967 runtime
->avail_max
= 0;
968 runtime
->overrange
= 0;
970 snd_pcm_stream_unlock_irq(substream
);
974 static int snd_pcm_status_user(struct snd_pcm_substream
*substream
,
975 struct snd_pcm_status __user
* _status
,
978 struct snd_pcm_status status
;
981 memset(&status
, 0, sizeof(status
));
983 * with extension, parameters are read/write,
984 * get audio_tstamp_data from user,
985 * ignore rest of status structure
987 if (ext
&& get_user(status
.audio_tstamp_data
,
988 (u32 __user
*)(&_status
->audio_tstamp_data
)))
990 res
= snd_pcm_status(substream
, &status
);
993 if (copy_to_user(_status
, &status
, sizeof(status
)))
998 static int snd_pcm_channel_info(struct snd_pcm_substream
*substream
,
999 struct snd_pcm_channel_info
* info
)
1001 struct snd_pcm_runtime
*runtime
;
1002 unsigned int channel
;
1004 channel
= info
->channel
;
1005 runtime
= substream
->runtime
;
1006 snd_pcm_stream_lock_irq(substream
);
1007 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
) {
1008 snd_pcm_stream_unlock_irq(substream
);
1011 snd_pcm_stream_unlock_irq(substream
);
1012 if (channel
>= runtime
->channels
)
1014 memset(info
, 0, sizeof(*info
));
1015 info
->channel
= channel
;
1016 return substream
->ops
->ioctl(substream
, SNDRV_PCM_IOCTL1_CHANNEL_INFO
, info
);
1019 static int snd_pcm_channel_info_user(struct snd_pcm_substream
*substream
,
1020 struct snd_pcm_channel_info __user
* _info
)
1022 struct snd_pcm_channel_info info
;
1025 if (copy_from_user(&info
, _info
, sizeof(info
)))
1027 res
= snd_pcm_channel_info(substream
, &info
);
1030 if (copy_to_user(_info
, &info
, sizeof(info
)))
1035 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream
*substream
)
1037 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1038 if (runtime
->trigger_master
== NULL
)
1040 if (runtime
->trigger_master
== substream
) {
1041 if (!runtime
->trigger_tstamp_latched
)
1042 snd_pcm_gettime(runtime
, &runtime
->trigger_tstamp
);
1044 snd_pcm_trigger_tstamp(runtime
->trigger_master
);
1045 runtime
->trigger_tstamp
= runtime
->trigger_master
->runtime
->trigger_tstamp
;
1047 runtime
->trigger_master
= NULL
;
1051 int (*pre_action
)(struct snd_pcm_substream
*substream
, int state
);
1052 int (*do_action
)(struct snd_pcm_substream
*substream
, int state
);
1053 void (*undo_action
)(struct snd_pcm_substream
*substream
, int state
);
1054 void (*post_action
)(struct snd_pcm_substream
*substream
, int state
);
1058 * this functions is core for handling of linked stream
1059 * Note: the stream state might be changed also on failure
1060 * Note2: call with calling stream lock + link lock
1062 static int snd_pcm_action_group(const struct action_ops
*ops
,
1063 struct snd_pcm_substream
*substream
,
1064 int state
, int do_lock
)
1066 struct snd_pcm_substream
*s
= NULL
;
1067 struct snd_pcm_substream
*s1
;
1068 int res
= 0, depth
= 1;
1070 snd_pcm_group_for_each_entry(s
, substream
) {
1071 if (do_lock
&& s
!= substream
) {
1072 if (s
->pcm
->nonatomic
)
1073 mutex_lock_nested(&s
->self_group
.mutex
, depth
);
1075 spin_lock_nested(&s
->self_group
.lock
, depth
);
1078 res
= ops
->pre_action(s
, state
);
1082 snd_pcm_group_for_each_entry(s
, substream
) {
1083 res
= ops
->do_action(s
, state
);
1085 if (ops
->undo_action
) {
1086 snd_pcm_group_for_each_entry(s1
, substream
) {
1087 if (s1
== s
) /* failed stream */
1089 ops
->undo_action(s1
, state
);
1092 s
= NULL
; /* unlock all */
1096 snd_pcm_group_for_each_entry(s
, substream
) {
1097 ops
->post_action(s
, state
);
1101 /* unlock streams */
1102 snd_pcm_group_for_each_entry(s1
, substream
) {
1103 if (s1
!= substream
) {
1104 if (s1
->pcm
->nonatomic
)
1105 mutex_unlock(&s1
->self_group
.mutex
);
1107 spin_unlock(&s1
->self_group
.lock
);
1109 if (s1
== s
) /* end */
1117 * Note: call with stream lock
1119 static int snd_pcm_action_single(const struct action_ops
*ops
,
1120 struct snd_pcm_substream
*substream
,
1125 res
= ops
->pre_action(substream
, state
);
1128 res
= ops
->do_action(substream
, state
);
1130 ops
->post_action(substream
, state
);
1131 else if (ops
->undo_action
)
1132 ops
->undo_action(substream
, state
);
1137 * Note: call with stream lock
1139 static int snd_pcm_action(const struct action_ops
*ops
,
1140 struct snd_pcm_substream
*substream
,
1145 if (!snd_pcm_stream_linked(substream
))
1146 return snd_pcm_action_single(ops
, substream
, state
);
1148 if (substream
->pcm
->nonatomic
) {
1149 if (!mutex_trylock(&substream
->group
->mutex
)) {
1150 mutex_unlock(&substream
->self_group
.mutex
);
1151 mutex_lock(&substream
->group
->mutex
);
1152 mutex_lock(&substream
->self_group
.mutex
);
1154 res
= snd_pcm_action_group(ops
, substream
, state
, 1);
1155 mutex_unlock(&substream
->group
->mutex
);
1157 if (!spin_trylock(&substream
->group
->lock
)) {
1158 spin_unlock(&substream
->self_group
.lock
);
1159 spin_lock(&substream
->group
->lock
);
1160 spin_lock(&substream
->self_group
.lock
);
1162 res
= snd_pcm_action_group(ops
, substream
, state
, 1);
1163 spin_unlock(&substream
->group
->lock
);
1169 * Note: don't use any locks before
1171 static int snd_pcm_action_lock_irq(const struct action_ops
*ops
,
1172 struct snd_pcm_substream
*substream
,
1177 snd_pcm_stream_lock_irq(substream
);
1178 res
= snd_pcm_action(ops
, substream
, state
);
1179 snd_pcm_stream_unlock_irq(substream
);
1185 static int snd_pcm_action_nonatomic(const struct action_ops
*ops
,
1186 struct snd_pcm_substream
*substream
,
1191 down_read(&snd_pcm_link_rwsem
);
1192 if (snd_pcm_stream_linked(substream
))
1193 res
= snd_pcm_action_group(ops
, substream
, state
, 0);
1195 res
= snd_pcm_action_single(ops
, substream
, state
);
1196 up_read(&snd_pcm_link_rwsem
);
1203 static int snd_pcm_pre_start(struct snd_pcm_substream
*substream
, int state
)
1205 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1206 if (runtime
->status
->state
!= SNDRV_PCM_STATE_PREPARED
)
1208 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
&&
1209 !snd_pcm_playback_data(substream
))
1211 runtime
->trigger_tstamp_latched
= false;
1212 runtime
->trigger_master
= substream
;
1216 static int snd_pcm_do_start(struct snd_pcm_substream
*substream
, int state
)
1218 if (substream
->runtime
->trigger_master
!= substream
)
1220 return substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_START
);
1223 static void snd_pcm_undo_start(struct snd_pcm_substream
*substream
, int state
)
1225 if (substream
->runtime
->trigger_master
== substream
)
1226 substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_STOP
);
1229 static void snd_pcm_post_start(struct snd_pcm_substream
*substream
, int state
)
1231 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1232 snd_pcm_trigger_tstamp(substream
);
1233 runtime
->hw_ptr_jiffies
= jiffies
;
1234 runtime
->hw_ptr_buffer_jiffies
= (runtime
->buffer_size
* HZ
) /
1236 runtime
->status
->state
= state
;
1237 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
&&
1238 runtime
->silence_size
> 0)
1239 snd_pcm_playback_silence(substream
, ULONG_MAX
);
1240 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MSTART
);
1243 static const struct action_ops snd_pcm_action_start
= {
1244 .pre_action
= snd_pcm_pre_start
,
1245 .do_action
= snd_pcm_do_start
,
1246 .undo_action
= snd_pcm_undo_start
,
1247 .post_action
= snd_pcm_post_start
1251 * snd_pcm_start - start all linked streams
1252 * @substream: the PCM substream instance
1254 * Return: Zero if successful, or a negative error code.
1255 * The stream lock must be acquired before calling this function.
1257 int snd_pcm_start(struct snd_pcm_substream
*substream
)
1259 return snd_pcm_action(&snd_pcm_action_start
, substream
,
1260 SNDRV_PCM_STATE_RUNNING
);
1263 /* take the stream lock and start the streams */
1264 static int snd_pcm_start_lock_irq(struct snd_pcm_substream
*substream
)
1266 return snd_pcm_action_lock_irq(&snd_pcm_action_start
, substream
,
1267 SNDRV_PCM_STATE_RUNNING
);
1273 static int snd_pcm_pre_stop(struct snd_pcm_substream
*substream
, int state
)
1275 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1276 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
1278 runtime
->trigger_master
= substream
;
1282 static int snd_pcm_do_stop(struct snd_pcm_substream
*substream
, int state
)
1284 if (substream
->runtime
->trigger_master
== substream
&&
1285 snd_pcm_running(substream
))
1286 substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_STOP
);
1287 return 0; /* unconditonally stop all substreams */
1290 static void snd_pcm_post_stop(struct snd_pcm_substream
*substream
, int state
)
1292 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1293 if (runtime
->status
->state
!= state
) {
1294 snd_pcm_trigger_tstamp(substream
);
1295 runtime
->status
->state
= state
;
1296 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MSTOP
);
1298 wake_up(&runtime
->sleep
);
1299 wake_up(&runtime
->tsleep
);
1302 static const struct action_ops snd_pcm_action_stop
= {
1303 .pre_action
= snd_pcm_pre_stop
,
1304 .do_action
= snd_pcm_do_stop
,
1305 .post_action
= snd_pcm_post_stop
1309 * snd_pcm_stop - try to stop all running streams in the substream group
1310 * @substream: the PCM substream instance
1311 * @state: PCM state after stopping the stream
1313 * The state of each stream is then changed to the given state unconditionally.
1315 * Return: Zero if successful, or a negative error code.
1317 int snd_pcm_stop(struct snd_pcm_substream
*substream
, snd_pcm_state_t state
)
1319 return snd_pcm_action(&snd_pcm_action_stop
, substream
, state
);
1321 EXPORT_SYMBOL(snd_pcm_stop
);
1324 * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1325 * @substream: the PCM substream
1327 * After stopping, the state is changed to SETUP.
1328 * Unlike snd_pcm_stop(), this affects only the given stream.
1330 * Return: Zero if succesful, or a negative error code.
1332 int snd_pcm_drain_done(struct snd_pcm_substream
*substream
)
1334 return snd_pcm_action_single(&snd_pcm_action_stop
, substream
,
1335 SNDRV_PCM_STATE_SETUP
);
1339 * snd_pcm_stop_xrun - stop the running streams as XRUN
1340 * @substream: the PCM substream instance
1342 * This stops the given running substream (and all linked substreams) as XRUN.
1343 * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1345 * Return: Zero if successful, or a negative error code.
1347 int snd_pcm_stop_xrun(struct snd_pcm_substream
*substream
)
1349 unsigned long flags
;
1351 snd_pcm_stream_lock_irqsave(substream
, flags
);
1352 if (substream
->runtime
&& snd_pcm_running(substream
))
1353 __snd_pcm_xrun(substream
);
1354 snd_pcm_stream_unlock_irqrestore(substream
, flags
);
1357 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun
);
1362 static int snd_pcm_pre_pause(struct snd_pcm_substream
*substream
, int push
)
1364 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1365 if (!(runtime
->info
& SNDRV_PCM_INFO_PAUSE
))
1368 if (runtime
->status
->state
!= SNDRV_PCM_STATE_RUNNING
)
1370 } else if (runtime
->status
->state
!= SNDRV_PCM_STATE_PAUSED
)
1372 runtime
->trigger_master
= substream
;
1376 static int snd_pcm_do_pause(struct snd_pcm_substream
*substream
, int push
)
1378 if (substream
->runtime
->trigger_master
!= substream
)
1380 /* some drivers might use hw_ptr to recover from the pause -
1381 update the hw_ptr now */
1383 snd_pcm_update_hw_ptr(substream
);
1384 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1385 * a delta between the current jiffies, this gives a large enough
1386 * delta, effectively to skip the check once.
1388 substream
->runtime
->hw_ptr_jiffies
= jiffies
- HZ
* 1000;
1389 return substream
->ops
->trigger(substream
,
1390 push
? SNDRV_PCM_TRIGGER_PAUSE_PUSH
:
1391 SNDRV_PCM_TRIGGER_PAUSE_RELEASE
);
1394 static void snd_pcm_undo_pause(struct snd_pcm_substream
*substream
, int push
)
1396 if (substream
->runtime
->trigger_master
== substream
)
1397 substream
->ops
->trigger(substream
,
1398 push
? SNDRV_PCM_TRIGGER_PAUSE_RELEASE
:
1399 SNDRV_PCM_TRIGGER_PAUSE_PUSH
);
1402 static void snd_pcm_post_pause(struct snd_pcm_substream
*substream
, int push
)
1404 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1405 snd_pcm_trigger_tstamp(substream
);
1407 runtime
->status
->state
= SNDRV_PCM_STATE_PAUSED
;
1408 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MPAUSE
);
1409 wake_up(&runtime
->sleep
);
1410 wake_up(&runtime
->tsleep
);
1412 runtime
->status
->state
= SNDRV_PCM_STATE_RUNNING
;
1413 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MCONTINUE
);
1417 static const struct action_ops snd_pcm_action_pause
= {
1418 .pre_action
= snd_pcm_pre_pause
,
1419 .do_action
= snd_pcm_do_pause
,
1420 .undo_action
= snd_pcm_undo_pause
,
1421 .post_action
= snd_pcm_post_pause
1425 * Push/release the pause for all linked streams.
1427 static int snd_pcm_pause(struct snd_pcm_substream
*substream
, int push
)
1429 return snd_pcm_action(&snd_pcm_action_pause
, substream
, push
);
1435 static int snd_pcm_pre_suspend(struct snd_pcm_substream
*substream
, int state
)
1437 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1438 switch (runtime
->status
->state
) {
1439 case SNDRV_PCM_STATE_SUSPENDED
:
1441 /* unresumable PCM state; return -EBUSY for skipping suspend */
1442 case SNDRV_PCM_STATE_OPEN
:
1443 case SNDRV_PCM_STATE_SETUP
:
1444 case SNDRV_PCM_STATE_DISCONNECTED
:
1447 runtime
->trigger_master
= substream
;
1451 static int snd_pcm_do_suspend(struct snd_pcm_substream
*substream
, int state
)
1453 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1454 if (runtime
->trigger_master
!= substream
)
1456 if (! snd_pcm_running(substream
))
1458 substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_SUSPEND
);
1459 return 0; /* suspend unconditionally */
1462 static void snd_pcm_post_suspend(struct snd_pcm_substream
*substream
, int state
)
1464 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1465 snd_pcm_trigger_tstamp(substream
);
1466 runtime
->status
->suspended_state
= runtime
->status
->state
;
1467 runtime
->status
->state
= SNDRV_PCM_STATE_SUSPENDED
;
1468 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MSUSPEND
);
1469 wake_up(&runtime
->sleep
);
1470 wake_up(&runtime
->tsleep
);
1473 static const struct action_ops snd_pcm_action_suspend
= {
1474 .pre_action
= snd_pcm_pre_suspend
,
1475 .do_action
= snd_pcm_do_suspend
,
1476 .post_action
= snd_pcm_post_suspend
1480 * snd_pcm_suspend - trigger SUSPEND to all linked streams
1481 * @substream: the PCM substream
1483 * After this call, all streams are changed to SUSPENDED state.
1485 * Return: Zero if successful (or @substream is %NULL), or a negative error
1488 int snd_pcm_suspend(struct snd_pcm_substream
*substream
)
1491 unsigned long flags
;
1496 snd_pcm_stream_lock_irqsave(substream
, flags
);
1497 err
= snd_pcm_action(&snd_pcm_action_suspend
, substream
, 0);
1498 snd_pcm_stream_unlock_irqrestore(substream
, flags
);
1501 EXPORT_SYMBOL(snd_pcm_suspend
);
1504 * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1505 * @pcm: the PCM instance
1507 * After this call, all streams are changed to SUSPENDED state.
1509 * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1511 int snd_pcm_suspend_all(struct snd_pcm
*pcm
)
1513 struct snd_pcm_substream
*substream
;
1514 int stream
, err
= 0;
1519 for (stream
= 0; stream
< 2; stream
++) {
1520 for (substream
= pcm
->streams
[stream
].substream
;
1521 substream
; substream
= substream
->next
) {
1522 /* FIXME: the open/close code should lock this as well */
1523 if (substream
->runtime
== NULL
)
1527 * Skip BE dai link PCM's that are internal and may
1528 * not have their substream ops set.
1530 if (!substream
->ops
)
1533 err
= snd_pcm_suspend(substream
);
1534 if (err
< 0 && err
!= -EBUSY
)
1540 EXPORT_SYMBOL(snd_pcm_suspend_all
);
1544 static int snd_pcm_pre_resume(struct snd_pcm_substream
*substream
, int state
)
1546 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1547 if (!(runtime
->info
& SNDRV_PCM_INFO_RESUME
))
1549 runtime
->trigger_master
= substream
;
1553 static int snd_pcm_do_resume(struct snd_pcm_substream
*substream
, int state
)
1555 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1556 if (runtime
->trigger_master
!= substream
)
1558 /* DMA not running previously? */
1559 if (runtime
->status
->suspended_state
!= SNDRV_PCM_STATE_RUNNING
&&
1560 (runtime
->status
->suspended_state
!= SNDRV_PCM_STATE_DRAINING
||
1561 substream
->stream
!= SNDRV_PCM_STREAM_PLAYBACK
))
1563 return substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_RESUME
);
1566 static void snd_pcm_undo_resume(struct snd_pcm_substream
*substream
, int state
)
1568 if (substream
->runtime
->trigger_master
== substream
&&
1569 snd_pcm_running(substream
))
1570 substream
->ops
->trigger(substream
, SNDRV_PCM_TRIGGER_SUSPEND
);
1573 static void snd_pcm_post_resume(struct snd_pcm_substream
*substream
, int state
)
1575 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1576 snd_pcm_trigger_tstamp(substream
);
1577 runtime
->status
->state
= runtime
->status
->suspended_state
;
1578 snd_pcm_timer_notify(substream
, SNDRV_TIMER_EVENT_MRESUME
);
1581 static const struct action_ops snd_pcm_action_resume
= {
1582 .pre_action
= snd_pcm_pre_resume
,
1583 .do_action
= snd_pcm_do_resume
,
1584 .undo_action
= snd_pcm_undo_resume
,
1585 .post_action
= snd_pcm_post_resume
1588 static int snd_pcm_resume(struct snd_pcm_substream
*substream
)
1590 return snd_pcm_action_lock_irq(&snd_pcm_action_resume
, substream
, 0);
1595 static int snd_pcm_resume(struct snd_pcm_substream
*substream
)
1600 #endif /* CONFIG_PM */
1605 * Change the RUNNING stream(s) to XRUN state.
1607 static int snd_pcm_xrun(struct snd_pcm_substream
*substream
)
1609 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1612 snd_pcm_stream_lock_irq(substream
);
1613 switch (runtime
->status
->state
) {
1614 case SNDRV_PCM_STATE_XRUN
:
1615 result
= 0; /* already there */
1617 case SNDRV_PCM_STATE_RUNNING
:
1618 __snd_pcm_xrun(substream
);
1624 snd_pcm_stream_unlock_irq(substream
);
1631 static int snd_pcm_pre_reset(struct snd_pcm_substream
*substream
, int state
)
1633 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1634 switch (runtime
->status
->state
) {
1635 case SNDRV_PCM_STATE_RUNNING
:
1636 case SNDRV_PCM_STATE_PREPARED
:
1637 case SNDRV_PCM_STATE_PAUSED
:
1638 case SNDRV_PCM_STATE_SUSPENDED
:
1645 static int snd_pcm_do_reset(struct snd_pcm_substream
*substream
, int state
)
1647 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1648 int err
= substream
->ops
->ioctl(substream
, SNDRV_PCM_IOCTL1_RESET
, NULL
);
1651 runtime
->hw_ptr_base
= 0;
1652 runtime
->hw_ptr_interrupt
= runtime
->status
->hw_ptr
-
1653 runtime
->status
->hw_ptr
% runtime
->period_size
;
1654 runtime
->silence_start
= runtime
->status
->hw_ptr
;
1655 runtime
->silence_filled
= 0;
1659 static void snd_pcm_post_reset(struct snd_pcm_substream
*substream
, int state
)
1661 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1662 runtime
->control
->appl_ptr
= runtime
->status
->hw_ptr
;
1663 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
&&
1664 runtime
->silence_size
> 0)
1665 snd_pcm_playback_silence(substream
, ULONG_MAX
);
1668 static const struct action_ops snd_pcm_action_reset
= {
1669 .pre_action
= snd_pcm_pre_reset
,
1670 .do_action
= snd_pcm_do_reset
,
1671 .post_action
= snd_pcm_post_reset
1674 static int snd_pcm_reset(struct snd_pcm_substream
*substream
)
1676 return snd_pcm_action_nonatomic(&snd_pcm_action_reset
, substream
, 0);
1682 /* we use the second argument for updating f_flags */
1683 static int snd_pcm_pre_prepare(struct snd_pcm_substream
*substream
,
1686 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1687 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
||
1688 runtime
->status
->state
== SNDRV_PCM_STATE_DISCONNECTED
)
1690 if (snd_pcm_running(substream
))
1692 substream
->f_flags
= f_flags
;
1696 static int snd_pcm_do_prepare(struct snd_pcm_substream
*substream
, int state
)
1699 err
= substream
->ops
->prepare(substream
);
1702 return snd_pcm_do_reset(substream
, 0);
1705 static void snd_pcm_post_prepare(struct snd_pcm_substream
*substream
, int state
)
1707 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1708 runtime
->control
->appl_ptr
= runtime
->status
->hw_ptr
;
1709 snd_pcm_set_state(substream
, SNDRV_PCM_STATE_PREPARED
);
1712 static const struct action_ops snd_pcm_action_prepare
= {
1713 .pre_action
= snd_pcm_pre_prepare
,
1714 .do_action
= snd_pcm_do_prepare
,
1715 .post_action
= snd_pcm_post_prepare
1719 * snd_pcm_prepare - prepare the PCM substream to be triggerable
1720 * @substream: the PCM substream instance
1721 * @file: file to refer f_flags
1723 * Return: Zero if successful, or a negative error code.
1725 static int snd_pcm_prepare(struct snd_pcm_substream
*substream
,
1731 f_flags
= file
->f_flags
;
1733 f_flags
= substream
->f_flags
;
1735 snd_pcm_stream_lock_irq(substream
);
1736 switch (substream
->runtime
->status
->state
) {
1737 case SNDRV_PCM_STATE_PAUSED
:
1738 snd_pcm_pause(substream
, 0);
1740 case SNDRV_PCM_STATE_SUSPENDED
:
1741 snd_pcm_stop(substream
, SNDRV_PCM_STATE_SETUP
);
1744 snd_pcm_stream_unlock_irq(substream
);
1746 return snd_pcm_action_nonatomic(&snd_pcm_action_prepare
,
1747 substream
, f_flags
);
1754 static int snd_pcm_pre_drain_init(struct snd_pcm_substream
*substream
, int state
)
1756 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1757 switch (runtime
->status
->state
) {
1758 case SNDRV_PCM_STATE_OPEN
:
1759 case SNDRV_PCM_STATE_DISCONNECTED
:
1760 case SNDRV_PCM_STATE_SUSPENDED
:
1763 runtime
->trigger_master
= substream
;
1767 static int snd_pcm_do_drain_init(struct snd_pcm_substream
*substream
, int state
)
1769 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
1770 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
) {
1771 switch (runtime
->status
->state
) {
1772 case SNDRV_PCM_STATE_PREPARED
:
1773 /* start playback stream if possible */
1774 if (! snd_pcm_playback_empty(substream
)) {
1775 snd_pcm_do_start(substream
, SNDRV_PCM_STATE_DRAINING
);
1776 snd_pcm_post_start(substream
, SNDRV_PCM_STATE_DRAINING
);
1778 runtime
->status
->state
= SNDRV_PCM_STATE_SETUP
;
1781 case SNDRV_PCM_STATE_RUNNING
:
1782 runtime
->status
->state
= SNDRV_PCM_STATE_DRAINING
;
1784 case SNDRV_PCM_STATE_XRUN
:
1785 runtime
->status
->state
= SNDRV_PCM_STATE_SETUP
;
1791 /* stop running stream */
1792 if (runtime
->status
->state
== SNDRV_PCM_STATE_RUNNING
) {
1793 int new_state
= snd_pcm_capture_avail(runtime
) > 0 ?
1794 SNDRV_PCM_STATE_DRAINING
: SNDRV_PCM_STATE_SETUP
;
1795 snd_pcm_do_stop(substream
, new_state
);
1796 snd_pcm_post_stop(substream
, new_state
);
1800 if (runtime
->status
->state
== SNDRV_PCM_STATE_DRAINING
&&
1801 runtime
->trigger_master
== substream
&&
1802 (runtime
->hw
.info
& SNDRV_PCM_INFO_DRAIN_TRIGGER
))
1803 return substream
->ops
->trigger(substream
,
1804 SNDRV_PCM_TRIGGER_DRAIN
);
1809 static void snd_pcm_post_drain_init(struct snd_pcm_substream
*substream
, int state
)
1813 static const struct action_ops snd_pcm_action_drain_init
= {
1814 .pre_action
= snd_pcm_pre_drain_init
,
1815 .do_action
= snd_pcm_do_drain_init
,
1816 .post_action
= snd_pcm_post_drain_init
1819 static int snd_pcm_drop(struct snd_pcm_substream
*substream
);
1822 * Drain the stream(s).
1823 * When the substream is linked, sync until the draining of all playback streams
1825 * After this call, all streams are supposed to be either SETUP or DRAINING
1826 * (capture only) state.
1828 static int snd_pcm_drain(struct snd_pcm_substream
*substream
,
1831 struct snd_card
*card
;
1832 struct snd_pcm_runtime
*runtime
;
1833 struct snd_pcm_substream
*s
;
1834 wait_queue_entry_t wait
;
1838 card
= substream
->pcm
->card
;
1839 runtime
= substream
->runtime
;
1841 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
1845 if (file
->f_flags
& O_NONBLOCK
)
1847 } else if (substream
->f_flags
& O_NONBLOCK
)
1850 down_read(&snd_pcm_link_rwsem
);
1851 snd_pcm_stream_lock_irq(substream
);
1853 if (runtime
->status
->state
== SNDRV_PCM_STATE_PAUSED
)
1854 snd_pcm_pause(substream
, 0);
1856 /* pre-start/stop - all running streams are changed to DRAINING state */
1857 result
= snd_pcm_action(&snd_pcm_action_drain_init
, substream
, 0);
1860 /* in non-blocking, we don't wait in ioctl but let caller poll */
1868 struct snd_pcm_runtime
*to_check
;
1869 if (signal_pending(current
)) {
1870 result
= -ERESTARTSYS
;
1873 /* find a substream to drain */
1875 snd_pcm_group_for_each_entry(s
, substream
) {
1876 if (s
->stream
!= SNDRV_PCM_STREAM_PLAYBACK
)
1878 runtime
= s
->runtime
;
1879 if (runtime
->status
->state
== SNDRV_PCM_STATE_DRAINING
) {
1885 break; /* all drained */
1886 init_waitqueue_entry(&wait
, current
);
1887 add_wait_queue(&to_check
->sleep
, &wait
);
1888 snd_pcm_stream_unlock_irq(substream
);
1889 up_read(&snd_pcm_link_rwsem
);
1890 if (runtime
->no_period_wakeup
)
1891 tout
= MAX_SCHEDULE_TIMEOUT
;
1894 if (runtime
->rate
) {
1895 long t
= runtime
->period_size
* 2 / runtime
->rate
;
1896 tout
= max(t
, tout
);
1898 tout
= msecs_to_jiffies(tout
* 1000);
1900 tout
= schedule_timeout_interruptible(tout
);
1901 down_read(&snd_pcm_link_rwsem
);
1902 snd_pcm_stream_lock_irq(substream
);
1903 remove_wait_queue(&to_check
->sleep
, &wait
);
1904 if (card
->shutdown
) {
1909 if (substream
->runtime
->status
->state
== SNDRV_PCM_STATE_SUSPENDED
)
1912 dev_dbg(substream
->pcm
->card
->dev
,
1913 "playback drain error (DMA or IRQ trouble?)\n");
1914 snd_pcm_stop(substream
, SNDRV_PCM_STATE_SETUP
);
1922 snd_pcm_stream_unlock_irq(substream
);
1923 up_read(&snd_pcm_link_rwsem
);
1931 * Immediately put all linked substreams into SETUP state.
1933 static int snd_pcm_drop(struct snd_pcm_substream
*substream
)
1935 struct snd_pcm_runtime
*runtime
;
1938 if (PCM_RUNTIME_CHECK(substream
))
1940 runtime
= substream
->runtime
;
1942 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
||
1943 runtime
->status
->state
== SNDRV_PCM_STATE_DISCONNECTED
)
1946 snd_pcm_stream_lock_irq(substream
);
1948 if (runtime
->status
->state
== SNDRV_PCM_STATE_PAUSED
)
1949 snd_pcm_pause(substream
, 0);
1951 snd_pcm_stop(substream
, SNDRV_PCM_STATE_SETUP
);
1952 /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
1953 snd_pcm_stream_unlock_irq(substream
);
1959 static bool is_pcm_file(struct file
*file
)
1961 struct inode
*inode
= file_inode(file
);
1964 if (!S_ISCHR(inode
->i_mode
) || imajor(inode
) != snd_major
)
1966 minor
= iminor(inode
);
1967 return snd_lookup_minor_data(minor
, SNDRV_DEVICE_TYPE_PCM_PLAYBACK
) ||
1968 snd_lookup_minor_data(minor
, SNDRV_DEVICE_TYPE_PCM_CAPTURE
);
1974 static int snd_pcm_link(struct snd_pcm_substream
*substream
, int fd
)
1977 struct snd_pcm_file
*pcm_file
;
1978 struct snd_pcm_substream
*substream1
;
1979 struct snd_pcm_group
*group
;
1980 struct fd f
= fdget(fd
);
1984 if (!is_pcm_file(f
.file
)) {
1988 pcm_file
= f
.file
->private_data
;
1989 substream1
= pcm_file
->substream
;
1990 if (substream
== substream1
) {
1995 group
= kmalloc(sizeof(*group
), GFP_KERNEL
);
2000 down_write_nonfifo(&snd_pcm_link_rwsem
);
2001 write_lock_irq(&snd_pcm_link_rwlock
);
2002 if (substream
->runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
||
2003 substream
->runtime
->status
->state
!= substream1
->runtime
->status
->state
||
2004 substream
->pcm
->nonatomic
!= substream1
->pcm
->nonatomic
) {
2008 if (snd_pcm_stream_linked(substream1
)) {
2012 if (!snd_pcm_stream_linked(substream
)) {
2013 substream
->group
= group
;
2015 spin_lock_init(&substream
->group
->lock
);
2016 mutex_init(&substream
->group
->mutex
);
2017 INIT_LIST_HEAD(&substream
->group
->substreams
);
2018 list_add_tail(&substream
->link_list
, &substream
->group
->substreams
);
2019 substream
->group
->count
= 1;
2021 list_add_tail(&substream1
->link_list
, &substream
->group
->substreams
);
2022 substream
->group
->count
++;
2023 substream1
->group
= substream
->group
;
2025 write_unlock_irq(&snd_pcm_link_rwlock
);
2026 up_write(&snd_pcm_link_rwsem
);
2028 snd_card_unref(substream1
->pcm
->card
);
2035 static void relink_to_local(struct snd_pcm_substream
*substream
)
2037 substream
->group
= &substream
->self_group
;
2038 INIT_LIST_HEAD(&substream
->self_group
.substreams
);
2039 list_add_tail(&substream
->link_list
, &substream
->self_group
.substreams
);
2042 static int snd_pcm_unlink(struct snd_pcm_substream
*substream
)
2044 struct snd_pcm_substream
*s
;
2047 down_write_nonfifo(&snd_pcm_link_rwsem
);
2048 write_lock_irq(&snd_pcm_link_rwlock
);
2049 if (!snd_pcm_stream_linked(substream
)) {
2053 list_del(&substream
->link_list
);
2054 substream
->group
->count
--;
2055 if (substream
->group
->count
== 1) { /* detach the last stream, too */
2056 snd_pcm_group_for_each_entry(s
, substream
) {
2060 kfree(substream
->group
);
2062 relink_to_local(substream
);
2064 write_unlock_irq(&snd_pcm_link_rwlock
);
2065 up_write(&snd_pcm_link_rwsem
);
2072 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params
*params
,
2073 struct snd_pcm_hw_rule
*rule
)
2075 struct snd_interval t
;
2076 snd_interval_mul(hw_param_interval_c(params
, rule
->deps
[0]),
2077 hw_param_interval_c(params
, rule
->deps
[1]), &t
);
2078 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2081 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params
*params
,
2082 struct snd_pcm_hw_rule
*rule
)
2084 struct snd_interval t
;
2085 snd_interval_div(hw_param_interval_c(params
, rule
->deps
[0]),
2086 hw_param_interval_c(params
, rule
->deps
[1]), &t
);
2087 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2090 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params
*params
,
2091 struct snd_pcm_hw_rule
*rule
)
2093 struct snd_interval t
;
2094 snd_interval_muldivk(hw_param_interval_c(params
, rule
->deps
[0]),
2095 hw_param_interval_c(params
, rule
->deps
[1]),
2096 (unsigned long) rule
->private, &t
);
2097 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2100 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params
*params
,
2101 struct snd_pcm_hw_rule
*rule
)
2103 struct snd_interval t
;
2104 snd_interval_mulkdiv(hw_param_interval_c(params
, rule
->deps
[0]),
2105 (unsigned long) rule
->private,
2106 hw_param_interval_c(params
, rule
->deps
[1]), &t
);
2107 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2110 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params
*params
,
2111 struct snd_pcm_hw_rule
*rule
)
2114 const struct snd_interval
*i
=
2115 hw_param_interval_c(params
, rule
->deps
[0]);
2117 struct snd_mask
*mask
= hw_param_mask(params
, SNDRV_PCM_HW_PARAM_FORMAT
);
2119 for (k
= 0; k
<= SNDRV_PCM_FORMAT_LAST
; ++k
) {
2121 if (! snd_mask_test(mask
, k
))
2123 bits
= snd_pcm_format_physical_width(k
);
2125 continue; /* ignore invalid formats */
2126 if ((unsigned)bits
< i
->min
|| (unsigned)bits
> i
->max
)
2127 snd_mask_reset(&m
, k
);
2129 return snd_mask_refine(mask
, &m
);
2132 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params
*params
,
2133 struct snd_pcm_hw_rule
*rule
)
2135 struct snd_interval t
;
2141 for (k
= 0; k
<= SNDRV_PCM_FORMAT_LAST
; ++k
) {
2143 if (! snd_mask_test(hw_param_mask(params
, SNDRV_PCM_HW_PARAM_FORMAT
), k
))
2145 bits
= snd_pcm_format_physical_width(k
);
2147 continue; /* ignore invalid formats */
2148 if (t
.min
> (unsigned)bits
)
2150 if (t
.max
< (unsigned)bits
)
2154 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2157 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
2158 #error "Change this table"
2161 static const unsigned int rates
[] = {
2162 5512, 8000, 11025, 16000, 22050, 32000, 44100,
2163 48000, 64000, 88200, 96000, 176400, 192000
2166 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates
= {
2167 .count
= ARRAY_SIZE(rates
),
2171 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params
*params
,
2172 struct snd_pcm_hw_rule
*rule
)
2174 struct snd_pcm_hardware
*hw
= rule
->private;
2175 return snd_interval_list(hw_param_interval(params
, rule
->var
),
2176 snd_pcm_known_rates
.count
,
2177 snd_pcm_known_rates
.list
, hw
->rates
);
2180 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params
*params
,
2181 struct snd_pcm_hw_rule
*rule
)
2183 struct snd_interval t
;
2184 struct snd_pcm_substream
*substream
= rule
->private;
2186 t
.max
= substream
->buffer_bytes_max
;
2190 return snd_interval_refine(hw_param_interval(params
, rule
->var
), &t
);
2193 int snd_pcm_hw_constraints_init(struct snd_pcm_substream
*substream
)
2195 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2196 struct snd_pcm_hw_constraints
*constrs
= &runtime
->hw_constraints
;
2199 for (k
= SNDRV_PCM_HW_PARAM_FIRST_MASK
; k
<= SNDRV_PCM_HW_PARAM_LAST_MASK
; k
++) {
2200 snd_mask_any(constrs_mask(constrs
, k
));
2203 for (k
= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL
; k
<= SNDRV_PCM_HW_PARAM_LAST_INTERVAL
; k
++) {
2204 snd_interval_any(constrs_interval(constrs
, k
));
2207 snd_interval_setinteger(constrs_interval(constrs
, SNDRV_PCM_HW_PARAM_CHANNELS
));
2208 snd_interval_setinteger(constrs_interval(constrs
, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
));
2209 snd_interval_setinteger(constrs_interval(constrs
, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
));
2210 snd_interval_setinteger(constrs_interval(constrs
, SNDRV_PCM_HW_PARAM_SAMPLE_BITS
));
2211 snd_interval_setinteger(constrs_interval(constrs
, SNDRV_PCM_HW_PARAM_FRAME_BITS
));
2213 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_FORMAT
,
2214 snd_pcm_hw_rule_format
, NULL
,
2215 SNDRV_PCM_HW_PARAM_SAMPLE_BITS
, -1);
2218 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS
,
2219 snd_pcm_hw_rule_sample_bits
, NULL
,
2220 SNDRV_PCM_HW_PARAM_FORMAT
,
2221 SNDRV_PCM_HW_PARAM_SAMPLE_BITS
, -1);
2224 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS
,
2225 snd_pcm_hw_rule_div
, NULL
,
2226 SNDRV_PCM_HW_PARAM_FRAME_BITS
, SNDRV_PCM_HW_PARAM_CHANNELS
, -1);
2229 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS
,
2230 snd_pcm_hw_rule_mul
, NULL
,
2231 SNDRV_PCM_HW_PARAM_SAMPLE_BITS
, SNDRV_PCM_HW_PARAM_CHANNELS
, -1);
2234 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS
,
2235 snd_pcm_hw_rule_mulkdiv
, (void*) 8,
2236 SNDRV_PCM_HW_PARAM_PERIOD_BYTES
, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, -1);
2239 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS
,
2240 snd_pcm_hw_rule_mulkdiv
, (void*) 8,
2241 SNDRV_PCM_HW_PARAM_BUFFER_BYTES
, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, -1);
2244 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_CHANNELS
,
2245 snd_pcm_hw_rule_div
, NULL
,
2246 SNDRV_PCM_HW_PARAM_FRAME_BITS
, SNDRV_PCM_HW_PARAM_SAMPLE_BITS
, -1);
2249 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_RATE
,
2250 snd_pcm_hw_rule_mulkdiv
, (void*) 1000000,
2251 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, SNDRV_PCM_HW_PARAM_PERIOD_TIME
, -1);
2254 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_RATE
,
2255 snd_pcm_hw_rule_mulkdiv
, (void*) 1000000,
2256 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, SNDRV_PCM_HW_PARAM_BUFFER_TIME
, -1);
2259 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIODS
,
2260 snd_pcm_hw_rule_div
, NULL
,
2261 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, -1);
2264 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
2265 snd_pcm_hw_rule_div
, NULL
,
2266 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, SNDRV_PCM_HW_PARAM_PERIODS
, -1);
2269 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
2270 snd_pcm_hw_rule_mulkdiv
, (void*) 8,
2271 SNDRV_PCM_HW_PARAM_PERIOD_BYTES
, SNDRV_PCM_HW_PARAM_FRAME_BITS
, -1);
2274 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
2275 snd_pcm_hw_rule_muldivk
, (void*) 1000000,
2276 SNDRV_PCM_HW_PARAM_PERIOD_TIME
, SNDRV_PCM_HW_PARAM_RATE
, -1);
2279 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
2280 snd_pcm_hw_rule_mul
, NULL
,
2281 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, SNDRV_PCM_HW_PARAM_PERIODS
, -1);
2284 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
2285 snd_pcm_hw_rule_mulkdiv
, (void*) 8,
2286 SNDRV_PCM_HW_PARAM_BUFFER_BYTES
, SNDRV_PCM_HW_PARAM_FRAME_BITS
, -1);
2289 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
2290 snd_pcm_hw_rule_muldivk
, (void*) 1000000,
2291 SNDRV_PCM_HW_PARAM_BUFFER_TIME
, SNDRV_PCM_HW_PARAM_RATE
, -1);
2294 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES
,
2295 snd_pcm_hw_rule_muldivk
, (void*) 8,
2296 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, SNDRV_PCM_HW_PARAM_FRAME_BITS
, -1);
2299 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
,
2300 snd_pcm_hw_rule_muldivk
, (void*) 8,
2301 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, SNDRV_PCM_HW_PARAM_FRAME_BITS
, -1);
2304 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
2305 snd_pcm_hw_rule_mulkdiv
, (void*) 1000000,
2306 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
, SNDRV_PCM_HW_PARAM_RATE
, -1);
2309 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME
,
2310 snd_pcm_hw_rule_mulkdiv
, (void*) 1000000,
2311 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
, SNDRV_PCM_HW_PARAM_RATE
, -1);
2317 int snd_pcm_hw_constraints_complete(struct snd_pcm_substream
*substream
)
2319 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2320 struct snd_pcm_hardware
*hw
= &runtime
->hw
;
2322 unsigned int mask
= 0;
2324 if (hw
->info
& SNDRV_PCM_INFO_INTERLEAVED
)
2325 mask
|= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED
;
2326 if (hw
->info
& SNDRV_PCM_INFO_NONINTERLEAVED
)
2327 mask
|= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED
;
2328 if (hw_support_mmap(substream
)) {
2329 if (hw
->info
& SNDRV_PCM_INFO_INTERLEAVED
)
2330 mask
|= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED
;
2331 if (hw
->info
& SNDRV_PCM_INFO_NONINTERLEAVED
)
2332 mask
|= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED
;
2333 if (hw
->info
& SNDRV_PCM_INFO_COMPLEX
)
2334 mask
|= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX
;
2336 err
= snd_pcm_hw_constraint_mask(runtime
, SNDRV_PCM_HW_PARAM_ACCESS
, mask
);
2340 err
= snd_pcm_hw_constraint_mask64(runtime
, SNDRV_PCM_HW_PARAM_FORMAT
, hw
->formats
);
2344 err
= snd_pcm_hw_constraint_mask(runtime
, SNDRV_PCM_HW_PARAM_SUBFORMAT
, 1 << SNDRV_PCM_SUBFORMAT_STD
);
2348 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_CHANNELS
,
2349 hw
->channels_min
, hw
->channels_max
);
2353 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_RATE
,
2354 hw
->rate_min
, hw
->rate_max
);
2358 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_PERIOD_BYTES
,
2359 hw
->period_bytes_min
, hw
->period_bytes_max
);
2363 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_PERIODS
,
2364 hw
->periods_min
, hw
->periods_max
);
2368 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
,
2369 hw
->period_bytes_min
, hw
->buffer_bytes_max
);
2373 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
,
2374 snd_pcm_hw_rule_buffer_bytes_max
, substream
,
2375 SNDRV_PCM_HW_PARAM_BUFFER_BYTES
, -1);
2380 if (runtime
->dma_bytes
) {
2381 err
= snd_pcm_hw_constraint_minmax(runtime
, SNDRV_PCM_HW_PARAM_BUFFER_BYTES
, 0, runtime
->dma_bytes
);
2386 if (!(hw
->rates
& (SNDRV_PCM_RATE_KNOT
| SNDRV_PCM_RATE_CONTINUOUS
))) {
2387 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_RATE
,
2388 snd_pcm_hw_rule_rate
, hw
,
2389 SNDRV_PCM_HW_PARAM_RATE
, -1);
2394 /* FIXME: this belong to lowlevel */
2395 snd_pcm_hw_constraint_integer(runtime
, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
);
2400 static void pcm_release_private(struct snd_pcm_substream
*substream
)
2402 if (snd_pcm_stream_linked(substream
))
2403 snd_pcm_unlink(substream
);
2406 void snd_pcm_release_substream(struct snd_pcm_substream
*substream
)
2408 substream
->ref_count
--;
2409 if (substream
->ref_count
> 0)
2412 snd_pcm_drop(substream
);
2413 if (substream
->hw_opened
) {
2414 if (substream
->ops
->hw_free
&&
2415 substream
->runtime
->status
->state
!= SNDRV_PCM_STATE_OPEN
)
2416 substream
->ops
->hw_free(substream
);
2417 substream
->ops
->close(substream
);
2418 substream
->hw_opened
= 0;
2420 if (pm_qos_request_active(&substream
->latency_pm_qos_req
))
2421 pm_qos_remove_request(&substream
->latency_pm_qos_req
);
2422 if (substream
->pcm_release
) {
2423 substream
->pcm_release(substream
);
2424 substream
->pcm_release
= NULL
;
2426 snd_pcm_detach_substream(substream
);
2428 EXPORT_SYMBOL(snd_pcm_release_substream
);
2430 int snd_pcm_open_substream(struct snd_pcm
*pcm
, int stream
,
2432 struct snd_pcm_substream
**rsubstream
)
2434 struct snd_pcm_substream
*substream
;
2437 err
= snd_pcm_attach_substream(pcm
, stream
, file
, &substream
);
2440 if (substream
->ref_count
> 1) {
2441 *rsubstream
= substream
;
2445 err
= snd_pcm_hw_constraints_init(substream
);
2447 pcm_dbg(pcm
, "snd_pcm_hw_constraints_init failed\n");
2451 if ((err
= substream
->ops
->open(substream
)) < 0)
2454 substream
->hw_opened
= 1;
2456 err
= snd_pcm_hw_constraints_complete(substream
);
2458 pcm_dbg(pcm
, "snd_pcm_hw_constraints_complete failed\n");
2462 *rsubstream
= substream
;
2466 snd_pcm_release_substream(substream
);
2469 EXPORT_SYMBOL(snd_pcm_open_substream
);
2471 static int snd_pcm_open_file(struct file
*file
,
2472 struct snd_pcm
*pcm
,
2475 struct snd_pcm_file
*pcm_file
;
2476 struct snd_pcm_substream
*substream
;
2479 err
= snd_pcm_open_substream(pcm
, stream
, file
, &substream
);
2483 pcm_file
= kzalloc(sizeof(*pcm_file
), GFP_KERNEL
);
2484 if (pcm_file
== NULL
) {
2485 snd_pcm_release_substream(substream
);
2488 pcm_file
->substream
= substream
;
2489 if (substream
->ref_count
== 1) {
2490 substream
->file
= pcm_file
;
2491 substream
->pcm_release
= pcm_release_private
;
2493 file
->private_data
= pcm_file
;
2498 static int snd_pcm_playback_open(struct inode
*inode
, struct file
*file
)
2500 struct snd_pcm
*pcm
;
2501 int err
= nonseekable_open(inode
, file
);
2504 pcm
= snd_lookup_minor_data(iminor(inode
),
2505 SNDRV_DEVICE_TYPE_PCM_PLAYBACK
);
2506 err
= snd_pcm_open(file
, pcm
, SNDRV_PCM_STREAM_PLAYBACK
);
2508 snd_card_unref(pcm
->card
);
2512 static int snd_pcm_capture_open(struct inode
*inode
, struct file
*file
)
2514 struct snd_pcm
*pcm
;
2515 int err
= nonseekable_open(inode
, file
);
2518 pcm
= snd_lookup_minor_data(iminor(inode
),
2519 SNDRV_DEVICE_TYPE_PCM_CAPTURE
);
2520 err
= snd_pcm_open(file
, pcm
, SNDRV_PCM_STREAM_CAPTURE
);
2522 snd_card_unref(pcm
->card
);
2526 static int snd_pcm_open(struct file
*file
, struct snd_pcm
*pcm
, int stream
)
2529 wait_queue_entry_t wait
;
2535 err
= snd_card_file_add(pcm
->card
, file
);
2538 if (!try_module_get(pcm
->card
->module
)) {
2542 init_waitqueue_entry(&wait
, current
);
2543 add_wait_queue(&pcm
->open_wait
, &wait
);
2544 mutex_lock(&pcm
->open_mutex
);
2546 err
= snd_pcm_open_file(file
, pcm
, stream
);
2549 if (err
== -EAGAIN
) {
2550 if (file
->f_flags
& O_NONBLOCK
) {
2556 set_current_state(TASK_INTERRUPTIBLE
);
2557 mutex_unlock(&pcm
->open_mutex
);
2559 mutex_lock(&pcm
->open_mutex
);
2560 if (pcm
->card
->shutdown
) {
2564 if (signal_pending(current
)) {
2569 remove_wait_queue(&pcm
->open_wait
, &wait
);
2570 mutex_unlock(&pcm
->open_mutex
);
2576 module_put(pcm
->card
->module
);
2578 snd_card_file_remove(pcm
->card
, file
);
2583 static int snd_pcm_release(struct inode
*inode
, struct file
*file
)
2585 struct snd_pcm
*pcm
;
2586 struct snd_pcm_substream
*substream
;
2587 struct snd_pcm_file
*pcm_file
;
2589 pcm_file
= file
->private_data
;
2590 substream
= pcm_file
->substream
;
2591 if (snd_BUG_ON(!substream
))
2593 pcm
= substream
->pcm
;
2594 mutex_lock(&pcm
->open_mutex
);
2595 snd_pcm_release_substream(substream
);
2597 mutex_unlock(&pcm
->open_mutex
);
2598 wake_up(&pcm
->open_wait
);
2599 module_put(pcm
->card
->module
);
2600 snd_card_file_remove(pcm
->card
, file
);
2604 /* check and update PCM state; return 0 or a negative error
2605 * call this inside PCM lock
2607 static int do_pcm_hwsync(struct snd_pcm_substream
*substream
)
2609 switch (substream
->runtime
->status
->state
) {
2610 case SNDRV_PCM_STATE_DRAINING
:
2611 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
)
2614 case SNDRV_PCM_STATE_RUNNING
:
2615 return snd_pcm_update_hw_ptr(substream
);
2616 case SNDRV_PCM_STATE_PREPARED
:
2617 case SNDRV_PCM_STATE_PAUSED
:
2619 case SNDRV_PCM_STATE_SUSPENDED
:
2621 case SNDRV_PCM_STATE_XRUN
:
2628 /* increase the appl_ptr; returns the processed frames or a negative error */
2629 static snd_pcm_sframes_t
forward_appl_ptr(struct snd_pcm_substream
*substream
,
2630 snd_pcm_uframes_t frames
,
2631 snd_pcm_sframes_t avail
)
2633 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2634 snd_pcm_sframes_t appl_ptr
;
2639 if (frames
> (snd_pcm_uframes_t
)avail
)
2641 appl_ptr
= runtime
->control
->appl_ptr
+ frames
;
2642 if (appl_ptr
>= (snd_pcm_sframes_t
)runtime
->boundary
)
2643 appl_ptr
-= runtime
->boundary
;
2644 ret
= pcm_lib_apply_appl_ptr(substream
, appl_ptr
);
2645 return ret
< 0 ? ret
: frames
;
2648 /* decrease the appl_ptr; returns the processed frames or zero for error */
2649 static snd_pcm_sframes_t
rewind_appl_ptr(struct snd_pcm_substream
*substream
,
2650 snd_pcm_uframes_t frames
,
2651 snd_pcm_sframes_t avail
)
2653 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2654 snd_pcm_sframes_t appl_ptr
;
2659 if (frames
> (snd_pcm_uframes_t
)avail
)
2661 appl_ptr
= runtime
->control
->appl_ptr
- frames
;
2663 appl_ptr
+= runtime
->boundary
;
2664 ret
= pcm_lib_apply_appl_ptr(substream
, appl_ptr
);
2665 /* NOTE: we return zero for errors because PulseAudio gets depressed
2666 * upon receiving an error from rewind ioctl and stops processing
2667 * any longer. Returning zero means that no rewind is done, so
2668 * it's not absolutely wrong to answer like that.
2670 return ret
< 0 ? 0 : frames
;
2673 static snd_pcm_sframes_t
snd_pcm_rewind(struct snd_pcm_substream
*substream
,
2674 snd_pcm_uframes_t frames
)
2676 snd_pcm_sframes_t ret
;
2681 snd_pcm_stream_lock_irq(substream
);
2682 ret
= do_pcm_hwsync(substream
);
2684 ret
= rewind_appl_ptr(substream
, frames
,
2685 snd_pcm_hw_avail(substream
));
2686 snd_pcm_stream_unlock_irq(substream
);
2690 static snd_pcm_sframes_t
snd_pcm_forward(struct snd_pcm_substream
*substream
,
2691 snd_pcm_uframes_t frames
)
2693 snd_pcm_sframes_t ret
;
2698 snd_pcm_stream_lock_irq(substream
);
2699 ret
= do_pcm_hwsync(substream
);
2701 ret
= forward_appl_ptr(substream
, frames
,
2702 snd_pcm_avail(substream
));
2703 snd_pcm_stream_unlock_irq(substream
);
2707 static int snd_pcm_hwsync(struct snd_pcm_substream
*substream
)
2711 snd_pcm_stream_lock_irq(substream
);
2712 err
= do_pcm_hwsync(substream
);
2713 snd_pcm_stream_unlock_irq(substream
);
2717 static int snd_pcm_delay(struct snd_pcm_substream
*substream
,
2718 snd_pcm_sframes_t
*delay
)
2721 snd_pcm_sframes_t n
= 0;
2723 snd_pcm_stream_lock_irq(substream
);
2724 err
= do_pcm_hwsync(substream
);
2726 n
= snd_pcm_calc_delay(substream
);
2727 snd_pcm_stream_unlock_irq(substream
);
2733 static int snd_pcm_sync_ptr(struct snd_pcm_substream
*substream
,
2734 struct snd_pcm_sync_ptr __user
*_sync_ptr
)
2736 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2737 struct snd_pcm_sync_ptr sync_ptr
;
2738 volatile struct snd_pcm_mmap_status
*status
;
2739 volatile struct snd_pcm_mmap_control
*control
;
2742 memset(&sync_ptr
, 0, sizeof(sync_ptr
));
2743 if (get_user(sync_ptr
.flags
, (unsigned __user
*)&(_sync_ptr
->flags
)))
2745 if (copy_from_user(&sync_ptr
.c
.control
, &(_sync_ptr
->c
.control
), sizeof(struct snd_pcm_mmap_control
)))
2747 status
= runtime
->status
;
2748 control
= runtime
->control
;
2749 if (sync_ptr
.flags
& SNDRV_PCM_SYNC_PTR_HWSYNC
) {
2750 err
= snd_pcm_hwsync(substream
);
2754 snd_pcm_stream_lock_irq(substream
);
2755 if (!(sync_ptr
.flags
& SNDRV_PCM_SYNC_PTR_APPL
)) {
2756 err
= pcm_lib_apply_appl_ptr(substream
,
2757 sync_ptr
.c
.control
.appl_ptr
);
2759 snd_pcm_stream_unlock_irq(substream
);
2763 sync_ptr
.c
.control
.appl_ptr
= control
->appl_ptr
;
2765 if (!(sync_ptr
.flags
& SNDRV_PCM_SYNC_PTR_AVAIL_MIN
))
2766 control
->avail_min
= sync_ptr
.c
.control
.avail_min
;
2768 sync_ptr
.c
.control
.avail_min
= control
->avail_min
;
2769 sync_ptr
.s
.status
.state
= status
->state
;
2770 sync_ptr
.s
.status
.hw_ptr
= status
->hw_ptr
;
2771 sync_ptr
.s
.status
.tstamp
= status
->tstamp
;
2772 sync_ptr
.s
.status
.suspended_state
= status
->suspended_state
;
2773 sync_ptr
.s
.status
.audio_tstamp
= status
->audio_tstamp
;
2774 snd_pcm_stream_unlock_irq(substream
);
2775 if (copy_to_user(_sync_ptr
, &sync_ptr
, sizeof(sync_ptr
)))
2780 static int snd_pcm_tstamp(struct snd_pcm_substream
*substream
, int __user
*_arg
)
2782 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2785 if (get_user(arg
, _arg
))
2787 if (arg
< 0 || arg
> SNDRV_PCM_TSTAMP_TYPE_LAST
)
2789 runtime
->tstamp_type
= arg
;
2793 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream
*substream
,
2794 struct snd_xferi __user
*_xferi
)
2796 struct snd_xferi xferi
;
2797 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2798 snd_pcm_sframes_t result
;
2800 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
2802 if (put_user(0, &_xferi
->result
))
2804 if (copy_from_user(&xferi
, _xferi
, sizeof(xferi
)))
2806 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
2807 result
= snd_pcm_lib_write(substream
, xferi
.buf
, xferi
.frames
);
2809 result
= snd_pcm_lib_read(substream
, xferi
.buf
, xferi
.frames
);
2810 __put_user(result
, &_xferi
->result
);
2811 return result
< 0 ? result
: 0;
2814 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream
*substream
,
2815 struct snd_xfern __user
*_xfern
)
2817 struct snd_xfern xfern
;
2818 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
2820 snd_pcm_sframes_t result
;
2822 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
2824 if (runtime
->channels
> 128)
2826 if (put_user(0, &_xfern
->result
))
2828 if (copy_from_user(&xfern
, _xfern
, sizeof(xfern
)))
2831 bufs
= memdup_user(xfern
.bufs
, sizeof(void *) * runtime
->channels
);
2833 return PTR_ERR(bufs
);
2834 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
2835 result
= snd_pcm_lib_writev(substream
, bufs
, xfern
.frames
);
2837 result
= snd_pcm_lib_readv(substream
, bufs
, xfern
.frames
);
2839 __put_user(result
, &_xfern
->result
);
2840 return result
< 0 ? result
: 0;
2843 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream
*substream
,
2844 snd_pcm_uframes_t __user
*_frames
)
2846 snd_pcm_uframes_t frames
;
2847 snd_pcm_sframes_t result
;
2849 if (get_user(frames
, _frames
))
2851 if (put_user(0, _frames
))
2853 result
= snd_pcm_rewind(substream
, frames
);
2854 __put_user(result
, _frames
);
2855 return result
< 0 ? result
: 0;
2858 static int snd_pcm_forward_ioctl(struct snd_pcm_substream
*substream
,
2859 snd_pcm_uframes_t __user
*_frames
)
2861 snd_pcm_uframes_t frames
;
2862 snd_pcm_sframes_t result
;
2864 if (get_user(frames
, _frames
))
2866 if (put_user(0, _frames
))
2868 result
= snd_pcm_forward(substream
, frames
);
2869 __put_user(result
, _frames
);
2870 return result
< 0 ? result
: 0;
2873 static int snd_pcm_common_ioctl(struct file
*file
,
2874 struct snd_pcm_substream
*substream
,
2875 unsigned int cmd
, void __user
*arg
)
2877 struct snd_pcm_file
*pcm_file
= file
->private_data
;
2880 if (PCM_RUNTIME_CHECK(substream
))
2883 res
= snd_power_wait(substream
->pcm
->card
, SNDRV_CTL_POWER_D0
);
2888 case SNDRV_PCM_IOCTL_PVERSION
:
2889 return put_user(SNDRV_PCM_VERSION
, (int __user
*)arg
) ? -EFAULT
: 0;
2890 case SNDRV_PCM_IOCTL_INFO
:
2891 return snd_pcm_info_user(substream
, arg
);
2892 case SNDRV_PCM_IOCTL_TSTAMP
: /* just for compatibility */
2894 case SNDRV_PCM_IOCTL_TTSTAMP
:
2895 return snd_pcm_tstamp(substream
, arg
);
2896 case SNDRV_PCM_IOCTL_USER_PVERSION
:
2897 if (get_user(pcm_file
->user_pversion
,
2898 (unsigned int __user
*)arg
))
2901 case SNDRV_PCM_IOCTL_HW_REFINE
:
2902 return snd_pcm_hw_refine_user(substream
, arg
);
2903 case SNDRV_PCM_IOCTL_HW_PARAMS
:
2904 return snd_pcm_hw_params_user(substream
, arg
);
2905 case SNDRV_PCM_IOCTL_HW_FREE
:
2906 return snd_pcm_hw_free(substream
);
2907 case SNDRV_PCM_IOCTL_SW_PARAMS
:
2908 return snd_pcm_sw_params_user(substream
, arg
);
2909 case SNDRV_PCM_IOCTL_STATUS
:
2910 return snd_pcm_status_user(substream
, arg
, false);
2911 case SNDRV_PCM_IOCTL_STATUS_EXT
:
2912 return snd_pcm_status_user(substream
, arg
, true);
2913 case SNDRV_PCM_IOCTL_CHANNEL_INFO
:
2914 return snd_pcm_channel_info_user(substream
, arg
);
2915 case SNDRV_PCM_IOCTL_PREPARE
:
2916 return snd_pcm_prepare(substream
, file
);
2917 case SNDRV_PCM_IOCTL_RESET
:
2918 return snd_pcm_reset(substream
);
2919 case SNDRV_PCM_IOCTL_START
:
2920 return snd_pcm_start_lock_irq(substream
);
2921 case SNDRV_PCM_IOCTL_LINK
:
2922 return snd_pcm_link(substream
, (int)(unsigned long) arg
);
2923 case SNDRV_PCM_IOCTL_UNLINK
:
2924 return snd_pcm_unlink(substream
);
2925 case SNDRV_PCM_IOCTL_RESUME
:
2926 return snd_pcm_resume(substream
);
2927 case SNDRV_PCM_IOCTL_XRUN
:
2928 return snd_pcm_xrun(substream
);
2929 case SNDRV_PCM_IOCTL_HWSYNC
:
2930 return snd_pcm_hwsync(substream
);
2931 case SNDRV_PCM_IOCTL_DELAY
:
2933 snd_pcm_sframes_t delay
;
2934 snd_pcm_sframes_t __user
*res
= arg
;
2937 err
= snd_pcm_delay(substream
, &delay
);
2940 if (put_user(delay
, res
))
2944 case SNDRV_PCM_IOCTL_SYNC_PTR
:
2945 return snd_pcm_sync_ptr(substream
, arg
);
2946 #ifdef CONFIG_SND_SUPPORT_OLD_API
2947 case SNDRV_PCM_IOCTL_HW_REFINE_OLD
:
2948 return snd_pcm_hw_refine_old_user(substream
, arg
);
2949 case SNDRV_PCM_IOCTL_HW_PARAMS_OLD
:
2950 return snd_pcm_hw_params_old_user(substream
, arg
);
2952 case SNDRV_PCM_IOCTL_DRAIN
:
2953 return snd_pcm_drain(substream
, file
);
2954 case SNDRV_PCM_IOCTL_DROP
:
2955 return snd_pcm_drop(substream
);
2956 case SNDRV_PCM_IOCTL_PAUSE
:
2957 return snd_pcm_action_lock_irq(&snd_pcm_action_pause
,
2959 (int)(unsigned long)arg
);
2960 case SNDRV_PCM_IOCTL_WRITEI_FRAMES
:
2961 case SNDRV_PCM_IOCTL_READI_FRAMES
:
2962 return snd_pcm_xferi_frames_ioctl(substream
, arg
);
2963 case SNDRV_PCM_IOCTL_WRITEN_FRAMES
:
2964 case SNDRV_PCM_IOCTL_READN_FRAMES
:
2965 return snd_pcm_xfern_frames_ioctl(substream
, arg
);
2966 case SNDRV_PCM_IOCTL_REWIND
:
2967 return snd_pcm_rewind_ioctl(substream
, arg
);
2968 case SNDRV_PCM_IOCTL_FORWARD
:
2969 return snd_pcm_forward_ioctl(substream
, arg
);
2971 pcm_dbg(substream
->pcm
, "unknown ioctl = 0x%x\n", cmd
);
2975 static long snd_pcm_ioctl(struct file
*file
, unsigned int cmd
,
2978 struct snd_pcm_file
*pcm_file
;
2980 pcm_file
= file
->private_data
;
2982 if (((cmd
>> 8) & 0xff) != 'A')
2985 return snd_pcm_common_ioctl(file
, pcm_file
->substream
, cmd
,
2986 (void __user
*)arg
);
2990 * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
2991 * @substream: PCM substream
2993 * @arg: IOCTL argument
2995 * The function is provided primarily for OSS layer and USB gadget drivers,
2996 * and it allows only the limited set of ioctls (hw_params, sw_params,
2997 * prepare, start, drain, drop, forward).
2999 int snd_pcm_kernel_ioctl(struct snd_pcm_substream
*substream
,
3000 unsigned int cmd
, void *arg
)
3002 snd_pcm_uframes_t
*frames
= arg
;
3003 snd_pcm_sframes_t result
;
3006 case SNDRV_PCM_IOCTL_FORWARD
:
3008 /* provided only for OSS; capture-only and no value returned */
3009 if (substream
->stream
!= SNDRV_PCM_STREAM_CAPTURE
)
3011 result
= snd_pcm_forward(substream
, *frames
);
3012 return result
< 0 ? result
: 0;
3014 case SNDRV_PCM_IOCTL_HW_PARAMS
:
3015 return snd_pcm_hw_params(substream
, arg
);
3016 case SNDRV_PCM_IOCTL_SW_PARAMS
:
3017 return snd_pcm_sw_params(substream
, arg
);
3018 case SNDRV_PCM_IOCTL_PREPARE
:
3019 return snd_pcm_prepare(substream
, NULL
);
3020 case SNDRV_PCM_IOCTL_START
:
3021 return snd_pcm_start_lock_irq(substream
);
3022 case SNDRV_PCM_IOCTL_DRAIN
:
3023 return snd_pcm_drain(substream
, NULL
);
3024 case SNDRV_PCM_IOCTL_DROP
:
3025 return snd_pcm_drop(substream
);
3026 case SNDRV_PCM_IOCTL_DELAY
:
3027 return snd_pcm_delay(substream
, frames
);
3032 EXPORT_SYMBOL(snd_pcm_kernel_ioctl
);
3034 static ssize_t
snd_pcm_read(struct file
*file
, char __user
*buf
, size_t count
,
3037 struct snd_pcm_file
*pcm_file
;
3038 struct snd_pcm_substream
*substream
;
3039 struct snd_pcm_runtime
*runtime
;
3040 snd_pcm_sframes_t result
;
3042 pcm_file
= file
->private_data
;
3043 substream
= pcm_file
->substream
;
3044 if (PCM_RUNTIME_CHECK(substream
))
3046 runtime
= substream
->runtime
;
3047 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
3049 if (!frame_aligned(runtime
, count
))
3051 count
= bytes_to_frames(runtime
, count
);
3052 result
= snd_pcm_lib_read(substream
, buf
, count
);
3054 result
= frames_to_bytes(runtime
, result
);
3058 static ssize_t
snd_pcm_write(struct file
*file
, const char __user
*buf
,
3059 size_t count
, loff_t
* offset
)
3061 struct snd_pcm_file
*pcm_file
;
3062 struct snd_pcm_substream
*substream
;
3063 struct snd_pcm_runtime
*runtime
;
3064 snd_pcm_sframes_t result
;
3066 pcm_file
= file
->private_data
;
3067 substream
= pcm_file
->substream
;
3068 if (PCM_RUNTIME_CHECK(substream
))
3070 runtime
= substream
->runtime
;
3071 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
3073 if (!frame_aligned(runtime
, count
))
3075 count
= bytes_to_frames(runtime
, count
);
3076 result
= snd_pcm_lib_write(substream
, buf
, count
);
3078 result
= frames_to_bytes(runtime
, result
);
3082 static ssize_t
snd_pcm_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
3084 struct snd_pcm_file
*pcm_file
;
3085 struct snd_pcm_substream
*substream
;
3086 struct snd_pcm_runtime
*runtime
;
3087 snd_pcm_sframes_t result
;
3090 snd_pcm_uframes_t frames
;
3092 pcm_file
= iocb
->ki_filp
->private_data
;
3093 substream
= pcm_file
->substream
;
3094 if (PCM_RUNTIME_CHECK(substream
))
3096 runtime
= substream
->runtime
;
3097 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
3099 if (!iter_is_iovec(to
))
3101 if (to
->nr_segs
> 1024 || to
->nr_segs
!= runtime
->channels
)
3103 if (!frame_aligned(runtime
, to
->iov
->iov_len
))
3105 frames
= bytes_to_samples(runtime
, to
->iov
->iov_len
);
3106 bufs
= kmalloc_array(to
->nr_segs
, sizeof(void *), GFP_KERNEL
);
3109 for (i
= 0; i
< to
->nr_segs
; ++i
)
3110 bufs
[i
] = to
->iov
[i
].iov_base
;
3111 result
= snd_pcm_lib_readv(substream
, bufs
, frames
);
3113 result
= frames_to_bytes(runtime
, result
);
3118 static ssize_t
snd_pcm_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
3120 struct snd_pcm_file
*pcm_file
;
3121 struct snd_pcm_substream
*substream
;
3122 struct snd_pcm_runtime
*runtime
;
3123 snd_pcm_sframes_t result
;
3126 snd_pcm_uframes_t frames
;
3128 pcm_file
= iocb
->ki_filp
->private_data
;
3129 substream
= pcm_file
->substream
;
3130 if (PCM_RUNTIME_CHECK(substream
))
3132 runtime
= substream
->runtime
;
3133 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
3135 if (!iter_is_iovec(from
))
3137 if (from
->nr_segs
> 128 || from
->nr_segs
!= runtime
->channels
||
3138 !frame_aligned(runtime
, from
->iov
->iov_len
))
3140 frames
= bytes_to_samples(runtime
, from
->iov
->iov_len
);
3141 bufs
= kmalloc_array(from
->nr_segs
, sizeof(void *), GFP_KERNEL
);
3144 for (i
= 0; i
< from
->nr_segs
; ++i
)
3145 bufs
[i
] = from
->iov
[i
].iov_base
;
3146 result
= snd_pcm_lib_writev(substream
, bufs
, frames
);
3148 result
= frames_to_bytes(runtime
, result
);
3153 static __poll_t
snd_pcm_poll(struct file
*file
, poll_table
*wait
)
3155 struct snd_pcm_file
*pcm_file
;
3156 struct snd_pcm_substream
*substream
;
3157 struct snd_pcm_runtime
*runtime
;
3159 snd_pcm_uframes_t avail
;
3161 pcm_file
= file
->private_data
;
3163 substream
= pcm_file
->substream
;
3164 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
)
3165 ok
= EPOLLOUT
| EPOLLWRNORM
;
3167 ok
= EPOLLIN
| EPOLLRDNORM
;
3168 if (PCM_RUNTIME_CHECK(substream
))
3169 return ok
| EPOLLERR
;
3171 runtime
= substream
->runtime
;
3172 poll_wait(file
, &runtime
->sleep
, wait
);
3175 snd_pcm_stream_lock_irq(substream
);
3176 avail
= snd_pcm_avail(substream
);
3177 switch (runtime
->status
->state
) {
3178 case SNDRV_PCM_STATE_RUNNING
:
3179 case SNDRV_PCM_STATE_PREPARED
:
3180 case SNDRV_PCM_STATE_PAUSED
:
3181 if (avail
>= runtime
->control
->avail_min
)
3184 case SNDRV_PCM_STATE_DRAINING
:
3185 if (substream
->stream
== SNDRV_PCM_STREAM_CAPTURE
) {
3192 mask
= ok
| EPOLLERR
;
3195 snd_pcm_stream_unlock_irq(substream
);
3204 * Only on coherent architectures, we can mmap the status and the control records
3205 * for effcient data transfer. On others, we have to use HWSYNC ioctl...
3207 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3209 * mmap status record
3211 static vm_fault_t
snd_pcm_mmap_status_fault(struct vm_fault
*vmf
)
3213 struct snd_pcm_substream
*substream
= vmf
->vma
->vm_private_data
;
3214 struct snd_pcm_runtime
*runtime
;
3216 if (substream
== NULL
)
3217 return VM_FAULT_SIGBUS
;
3218 runtime
= substream
->runtime
;
3219 vmf
->page
= virt_to_page(runtime
->status
);
3220 get_page(vmf
->page
);
3224 static const struct vm_operations_struct snd_pcm_vm_ops_status
=
3226 .fault
= snd_pcm_mmap_status_fault
,
3229 static int snd_pcm_mmap_status(struct snd_pcm_substream
*substream
, struct file
*file
,
3230 struct vm_area_struct
*area
)
3233 if (!(area
->vm_flags
& VM_READ
))
3235 size
= area
->vm_end
- area
->vm_start
;
3236 if (size
!= PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status
)))
3238 area
->vm_ops
= &snd_pcm_vm_ops_status
;
3239 area
->vm_private_data
= substream
;
3240 area
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3245 * mmap control record
3247 static vm_fault_t
snd_pcm_mmap_control_fault(struct vm_fault
*vmf
)
3249 struct snd_pcm_substream
*substream
= vmf
->vma
->vm_private_data
;
3250 struct snd_pcm_runtime
*runtime
;
3252 if (substream
== NULL
)
3253 return VM_FAULT_SIGBUS
;
3254 runtime
= substream
->runtime
;
3255 vmf
->page
= virt_to_page(runtime
->control
);
3256 get_page(vmf
->page
);
3260 static const struct vm_operations_struct snd_pcm_vm_ops_control
=
3262 .fault
= snd_pcm_mmap_control_fault
,
3265 static int snd_pcm_mmap_control(struct snd_pcm_substream
*substream
, struct file
*file
,
3266 struct vm_area_struct
*area
)
3269 if (!(area
->vm_flags
& VM_READ
))
3271 size
= area
->vm_end
- area
->vm_start
;
3272 if (size
!= PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control
)))
3274 area
->vm_ops
= &snd_pcm_vm_ops_control
;
3275 area
->vm_private_data
= substream
;
3276 area
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3280 static bool pcm_status_mmap_allowed(struct snd_pcm_file
*pcm_file
)
3282 if (pcm_file
->no_compat_mmap
)
3284 /* See pcm_control_mmap_allowed() below.
3285 * Since older alsa-lib requires both status and control mmaps to be
3286 * coupled, we have to disable the status mmap for old alsa-lib, too.
3288 if (pcm_file
->user_pversion
< SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3289 (pcm_file
->substream
->runtime
->hw
.info
& SNDRV_PCM_INFO_SYNC_APPLPTR
))
3294 static bool pcm_control_mmap_allowed(struct snd_pcm_file
*pcm_file
)
3296 if (pcm_file
->no_compat_mmap
)
3298 /* Disallow the control mmap when SYNC_APPLPTR flag is set;
3299 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3300 * thus it effectively assures the manual update of appl_ptr.
3302 if (pcm_file
->substream
->runtime
->hw
.info
& SNDRV_PCM_INFO_SYNC_APPLPTR
)
3307 #else /* ! coherent mmap */
3309 * don't support mmap for status and control records.
3311 #define pcm_status_mmap_allowed(pcm_file) false
3312 #define pcm_control_mmap_allowed(pcm_file) false
3314 static int snd_pcm_mmap_status(struct snd_pcm_substream
*substream
, struct file
*file
,
3315 struct vm_area_struct
*area
)
3319 static int snd_pcm_mmap_control(struct snd_pcm_substream
*substream
, struct file
*file
,
3320 struct vm_area_struct
*area
)
3324 #endif /* coherent mmap */
3326 static inline struct page
*
3327 snd_pcm_default_page_ops(struct snd_pcm_substream
*substream
, unsigned long ofs
)
3329 void *vaddr
= substream
->runtime
->dma_area
+ ofs
;
3330 return virt_to_page(vaddr
);
3334 * fault callback for mmapping a RAM page
3336 static vm_fault_t
snd_pcm_mmap_data_fault(struct vm_fault
*vmf
)
3338 struct snd_pcm_substream
*substream
= vmf
->vma
->vm_private_data
;
3339 struct snd_pcm_runtime
*runtime
;
3340 unsigned long offset
;
3344 if (substream
== NULL
)
3345 return VM_FAULT_SIGBUS
;
3346 runtime
= substream
->runtime
;
3347 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
3348 dma_bytes
= PAGE_ALIGN(runtime
->dma_bytes
);
3349 if (offset
> dma_bytes
- PAGE_SIZE
)
3350 return VM_FAULT_SIGBUS
;
3351 if (substream
->ops
->page
)
3352 page
= substream
->ops
->page(substream
, offset
);
3354 page
= snd_pcm_default_page_ops(substream
, offset
);
3356 return VM_FAULT_SIGBUS
;
3362 static const struct vm_operations_struct snd_pcm_vm_ops_data
= {
3363 .open
= snd_pcm_mmap_data_open
,
3364 .close
= snd_pcm_mmap_data_close
,
3367 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault
= {
3368 .open
= snd_pcm_mmap_data_open
,
3369 .close
= snd_pcm_mmap_data_close
,
3370 .fault
= snd_pcm_mmap_data_fault
,
3374 * mmap the DMA buffer on RAM
3378 * snd_pcm_lib_default_mmap - Default PCM data mmap function
3379 * @substream: PCM substream
3382 * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
3383 * this function is invoked implicitly.
3385 int snd_pcm_lib_default_mmap(struct snd_pcm_substream
*substream
,
3386 struct vm_area_struct
*area
)
3388 area
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3389 #ifdef CONFIG_GENERIC_ALLOCATOR
3390 if (substream
->dma_buffer
.dev
.type
== SNDRV_DMA_TYPE_DEV_IRAM
) {
3391 area
->vm_page_prot
= pgprot_writecombine(area
->vm_page_prot
);
3392 return remap_pfn_range(area
, area
->vm_start
,
3393 substream
->dma_buffer
.addr
>> PAGE_SHIFT
,
3394 area
->vm_end
- area
->vm_start
, area
->vm_page_prot
);
3396 #endif /* CONFIG_GENERIC_ALLOCATOR */
3397 #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
3398 if (IS_ENABLED(CONFIG_HAS_DMA
) && !substream
->ops
->page
&&
3399 substream
->dma_buffer
.dev
.type
== SNDRV_DMA_TYPE_DEV
)
3400 return dma_mmap_coherent(substream
->dma_buffer
.dev
.dev
,
3402 substream
->runtime
->dma_area
,
3403 substream
->runtime
->dma_addr
,
3404 substream
->runtime
->dma_bytes
);
3405 #endif /* CONFIG_X86 */
3406 /* mmap with fault handler */
3407 area
->vm_ops
= &snd_pcm_vm_ops_data_fault
;
3410 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap
);
3413 * mmap the DMA buffer on I/O memory area
3415 #if SNDRV_PCM_INFO_MMAP_IOMEM
3417 * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3418 * @substream: PCM substream
3421 * When your hardware uses the iomapped pages as the hardware buffer and
3422 * wants to mmap it, pass this function as mmap pcm_ops. Note that this
3423 * is supposed to work only on limited architectures.
3425 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream
*substream
,
3426 struct vm_area_struct
*area
)
3428 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
3430 area
->vm_page_prot
= pgprot_noncached(area
->vm_page_prot
);
3431 return vm_iomap_memory(area
, runtime
->dma_addr
, runtime
->dma_bytes
);
3433 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem
);
3434 #endif /* SNDRV_PCM_INFO_MMAP */
3439 int snd_pcm_mmap_data(struct snd_pcm_substream
*substream
, struct file
*file
,
3440 struct vm_area_struct
*area
)
3442 struct snd_pcm_runtime
*runtime
;
3444 unsigned long offset
;
3448 if (substream
->stream
== SNDRV_PCM_STREAM_PLAYBACK
) {
3449 if (!(area
->vm_flags
& (VM_WRITE
|VM_READ
)))
3452 if (!(area
->vm_flags
& VM_READ
))
3455 runtime
= substream
->runtime
;
3456 if (runtime
->status
->state
== SNDRV_PCM_STATE_OPEN
)
3458 if (!(runtime
->info
& SNDRV_PCM_INFO_MMAP
))
3460 if (runtime
->access
== SNDRV_PCM_ACCESS_RW_INTERLEAVED
||
3461 runtime
->access
== SNDRV_PCM_ACCESS_RW_NONINTERLEAVED
)
3463 size
= area
->vm_end
- area
->vm_start
;
3464 offset
= area
->vm_pgoff
<< PAGE_SHIFT
;
3465 dma_bytes
= PAGE_ALIGN(runtime
->dma_bytes
);
3466 if ((size_t)size
> dma_bytes
)
3468 if (offset
> dma_bytes
- size
)
3471 area
->vm_ops
= &snd_pcm_vm_ops_data
;
3472 area
->vm_private_data
= substream
;
3473 if (substream
->ops
->mmap
)
3474 err
= substream
->ops
->mmap(substream
, area
);
3476 err
= snd_pcm_lib_default_mmap(substream
, area
);
3478 atomic_inc(&substream
->mmap_count
);
3481 EXPORT_SYMBOL(snd_pcm_mmap_data
);
3483 static int snd_pcm_mmap(struct file
*file
, struct vm_area_struct
*area
)
3485 struct snd_pcm_file
* pcm_file
;
3486 struct snd_pcm_substream
*substream
;
3487 unsigned long offset
;
3489 pcm_file
= file
->private_data
;
3490 substream
= pcm_file
->substream
;
3491 if (PCM_RUNTIME_CHECK(substream
))
3494 offset
= area
->vm_pgoff
<< PAGE_SHIFT
;
3496 case SNDRV_PCM_MMAP_OFFSET_STATUS
:
3497 if (!pcm_status_mmap_allowed(pcm_file
))
3499 return snd_pcm_mmap_status(substream
, file
, area
);
3500 case SNDRV_PCM_MMAP_OFFSET_CONTROL
:
3501 if (!pcm_control_mmap_allowed(pcm_file
))
3503 return snd_pcm_mmap_control(substream
, file
, area
);
3505 return snd_pcm_mmap_data(substream
, file
, area
);
3510 static int snd_pcm_fasync(int fd
, struct file
* file
, int on
)
3512 struct snd_pcm_file
* pcm_file
;
3513 struct snd_pcm_substream
*substream
;
3514 struct snd_pcm_runtime
*runtime
;
3516 pcm_file
= file
->private_data
;
3517 substream
= pcm_file
->substream
;
3518 if (PCM_RUNTIME_CHECK(substream
))
3520 runtime
= substream
->runtime
;
3521 return fasync_helper(fd
, file
, on
, &runtime
->fasync
);
3527 #ifdef CONFIG_COMPAT
3528 #include "pcm_compat.c"
3530 #define snd_pcm_ioctl_compat NULL
3534 * To be removed helpers to keep binary compatibility
3537 #ifdef CONFIG_SND_SUPPORT_OLD_API
3538 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
3539 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
3541 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params
*params
,
3542 struct snd_pcm_hw_params_old
*oparams
)
3546 memset(params
, 0, sizeof(*params
));
3547 params
->flags
= oparams
->flags
;
3548 for (i
= 0; i
< ARRAY_SIZE(oparams
->masks
); i
++)
3549 params
->masks
[i
].bits
[0] = oparams
->masks
[i
];
3550 memcpy(params
->intervals
, oparams
->intervals
, sizeof(oparams
->intervals
));
3551 params
->rmask
= __OLD_TO_NEW_MASK(oparams
->rmask
);
3552 params
->cmask
= __OLD_TO_NEW_MASK(oparams
->cmask
);
3553 params
->info
= oparams
->info
;
3554 params
->msbits
= oparams
->msbits
;
3555 params
->rate_num
= oparams
->rate_num
;
3556 params
->rate_den
= oparams
->rate_den
;
3557 params
->fifo_size
= oparams
->fifo_size
;
3560 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old
*oparams
,
3561 struct snd_pcm_hw_params
*params
)
3565 memset(oparams
, 0, sizeof(*oparams
));
3566 oparams
->flags
= params
->flags
;
3567 for (i
= 0; i
< ARRAY_SIZE(oparams
->masks
); i
++)
3568 oparams
->masks
[i
] = params
->masks
[i
].bits
[0];
3569 memcpy(oparams
->intervals
, params
->intervals
, sizeof(oparams
->intervals
));
3570 oparams
->rmask
= __NEW_TO_OLD_MASK(params
->rmask
);
3571 oparams
->cmask
= __NEW_TO_OLD_MASK(params
->cmask
);
3572 oparams
->info
= params
->info
;
3573 oparams
->msbits
= params
->msbits
;
3574 oparams
->rate_num
= params
->rate_num
;
3575 oparams
->rate_den
= params
->rate_den
;
3576 oparams
->fifo_size
= params
->fifo_size
;
3579 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream
*substream
,
3580 struct snd_pcm_hw_params_old __user
* _oparams
)
3582 struct snd_pcm_hw_params
*params
;
3583 struct snd_pcm_hw_params_old
*oparams
= NULL
;
3586 params
= kmalloc(sizeof(*params
), GFP_KERNEL
);
3590 oparams
= memdup_user(_oparams
, sizeof(*oparams
));
3591 if (IS_ERR(oparams
)) {
3592 err
= PTR_ERR(oparams
);
3595 snd_pcm_hw_convert_from_old_params(params
, oparams
);
3596 err
= snd_pcm_hw_refine(substream
, params
);
3600 err
= fixup_unreferenced_params(substream
, params
);
3604 snd_pcm_hw_convert_to_old_params(oparams
, params
);
3605 if (copy_to_user(_oparams
, oparams
, sizeof(*oparams
)))
3614 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream
*substream
,
3615 struct snd_pcm_hw_params_old __user
* _oparams
)
3617 struct snd_pcm_hw_params
*params
;
3618 struct snd_pcm_hw_params_old
*oparams
= NULL
;
3621 params
= kmalloc(sizeof(*params
), GFP_KERNEL
);
3625 oparams
= memdup_user(_oparams
, sizeof(*oparams
));
3626 if (IS_ERR(oparams
)) {
3627 err
= PTR_ERR(oparams
);
3631 snd_pcm_hw_convert_from_old_params(params
, oparams
);
3632 err
= snd_pcm_hw_params(substream
, params
);
3636 snd_pcm_hw_convert_to_old_params(oparams
, params
);
3637 if (copy_to_user(_oparams
, oparams
, sizeof(*oparams
)))
3645 #endif /* CONFIG_SND_SUPPORT_OLD_API */
3648 static unsigned long snd_pcm_get_unmapped_area(struct file
*file
,
3651 unsigned long pgoff
,
3652 unsigned long flags
)
3654 struct snd_pcm_file
*pcm_file
= file
->private_data
;
3655 struct snd_pcm_substream
*substream
= pcm_file
->substream
;
3656 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
3657 unsigned long offset
= pgoff
<< PAGE_SHIFT
;
3660 case SNDRV_PCM_MMAP_OFFSET_STATUS
:
3661 return (unsigned long)runtime
->status
;
3662 case SNDRV_PCM_MMAP_OFFSET_CONTROL
:
3663 return (unsigned long)runtime
->control
;
3665 return (unsigned long)runtime
->dma_area
+ offset
;
3669 # define snd_pcm_get_unmapped_area NULL
3676 const struct file_operations snd_pcm_f_ops
[2] = {
3678 .owner
= THIS_MODULE
,
3679 .write
= snd_pcm_write
,
3680 .write_iter
= snd_pcm_writev
,
3681 .open
= snd_pcm_playback_open
,
3682 .release
= snd_pcm_release
,
3683 .llseek
= no_llseek
,
3684 .poll
= snd_pcm_poll
,
3685 .unlocked_ioctl
= snd_pcm_ioctl
,
3686 .compat_ioctl
= snd_pcm_ioctl_compat
,
3687 .mmap
= snd_pcm_mmap
,
3688 .fasync
= snd_pcm_fasync
,
3689 .get_unmapped_area
= snd_pcm_get_unmapped_area
,
3692 .owner
= THIS_MODULE
,
3693 .read
= snd_pcm_read
,
3694 .read_iter
= snd_pcm_readv
,
3695 .open
= snd_pcm_capture_open
,
3696 .release
= snd_pcm_release
,
3697 .llseek
= no_llseek
,
3698 .poll
= snd_pcm_poll
,
3699 .unlocked_ioctl
= snd_pcm_ioctl
,
3700 .compat_ioctl
= snd_pcm_ioctl_compat
,
3701 .mmap
= snd_pcm_mmap
,
3702 .fasync
= snd_pcm_fasync
,
3703 .get_unmapped_area
= snd_pcm_get_unmapped_area
,