1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 /* Always support Linux tracing subsystem. */
24 #define CREATE_TRACE_POINTS
25 #include "amdtp-stream-trace.h"
27 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
29 /* isochronous header parameters */
30 #define ISO_DATA_LENGTH_SHIFT 16
31 #define TAG_NO_CIP_HEADER 0
34 /* common isochronous packet header parameters */
35 #define CIP_EOH_SHIFT 31
36 #define CIP_EOH (1u << CIP_EOH_SHIFT)
37 #define CIP_EOH_MASK 0x80000000
38 #define CIP_SID_SHIFT 24
39 #define CIP_SID_MASK 0x3f000000
40 #define CIP_DBS_MASK 0x00ff0000
41 #define CIP_DBS_SHIFT 16
42 #define CIP_SPH_MASK 0x00000400
43 #define CIP_SPH_SHIFT 10
44 #define CIP_DBC_MASK 0x000000ff
45 #define CIP_FMT_SHIFT 24
46 #define CIP_FMT_MASK 0x3f000000
47 #define CIP_FDF_MASK 0x00ff0000
48 #define CIP_FDF_SHIFT 16
49 #define CIP_SYT_MASK 0x0000ffff
50 #define CIP_SYT_NO_INFO 0xffff
52 /* Audio and Music transfer protocol specific parameters */
53 #define CIP_FMT_AM 0x10
54 #define AMDTP_FDF_NO_DATA 0xff
56 // For iso header, tstamp and 2 CIP header.
57 #define IR_CTX_HEADER_SIZE_CIP 16
58 // For iso header and tstamp.
59 #define IR_CTX_HEADER_SIZE_NO_CIP 8
60 #define HEADER_TSTAMP_MASK 0x0000ffff
62 #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
63 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
65 static void pcm_period_tasklet(unsigned long data
);
68 * amdtp_stream_init - initialize an AMDTP stream structure
69 * @s: the AMDTP stream to initialize
70 * @unit: the target of the stream
71 * @dir: the direction of stream
72 * @flags: the packet transmission method to use
73 * @fmt: the value of fmt field in CIP header
74 * @process_ctx_payloads: callback handler to process payloads of isoc context
75 * @protocol_size: the size to allocate newly for protocol
77 int amdtp_stream_init(struct amdtp_stream
*s
, struct fw_unit
*unit
,
78 enum amdtp_stream_direction dir
, enum cip_flags flags
,
80 amdtp_stream_process_ctx_payloads_t process_ctx_payloads
,
81 unsigned int protocol_size
)
83 if (process_ctx_payloads
== NULL
)
86 s
->protocol
= kzalloc(protocol_size
, GFP_KERNEL
);
93 s
->context
= ERR_PTR(-1);
94 mutex_init(&s
->mutex
);
95 tasklet_init(&s
->period_tasklet
, pcm_period_tasklet
, (unsigned long)s
);
98 init_waitqueue_head(&s
->callback_wait
);
99 s
->callbacked
= false;
102 s
->process_ctx_payloads
= process_ctx_payloads
;
104 if (dir
== AMDTP_OUT_STREAM
)
105 s
->ctx_data
.rx
.syt_override
= -1;
109 EXPORT_SYMBOL(amdtp_stream_init
);
112 * amdtp_stream_destroy - free stream resources
113 * @s: the AMDTP stream to destroy
115 void amdtp_stream_destroy(struct amdtp_stream
*s
)
117 /* Not initialized. */
118 if (s
->protocol
== NULL
)
121 WARN_ON(amdtp_stream_running(s
));
123 mutex_destroy(&s
->mutex
);
125 EXPORT_SYMBOL(amdtp_stream_destroy
);
127 const unsigned int amdtp_syt_intervals
[CIP_SFC_COUNT
] = {
131 [CIP_SFC_88200
] = 16,
132 [CIP_SFC_96000
] = 16,
133 [CIP_SFC_176400
] = 32,
134 [CIP_SFC_192000
] = 32,
136 EXPORT_SYMBOL(amdtp_syt_intervals
);
138 const unsigned int amdtp_rate_table
[CIP_SFC_COUNT
] = {
139 [CIP_SFC_32000
] = 32000,
140 [CIP_SFC_44100
] = 44100,
141 [CIP_SFC_48000
] = 48000,
142 [CIP_SFC_88200
] = 88200,
143 [CIP_SFC_96000
] = 96000,
144 [CIP_SFC_176400
] = 176400,
145 [CIP_SFC_192000
] = 192000,
147 EXPORT_SYMBOL(amdtp_rate_table
);
149 static int apply_constraint_to_size(struct snd_pcm_hw_params
*params
,
150 struct snd_pcm_hw_rule
*rule
)
152 struct snd_interval
*s
= hw_param_interval(params
, rule
->var
);
153 const struct snd_interval
*r
=
154 hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_RATE
);
155 struct snd_interval t
= {0};
156 unsigned int step
= 0;
159 for (i
= 0; i
< CIP_SFC_COUNT
; ++i
) {
160 if (snd_interval_test(r
, amdtp_rate_table
[i
]))
161 step
= max(step
, amdtp_syt_intervals
[i
]);
164 t
.min
= roundup(s
->min
, step
);
165 t
.max
= rounddown(s
->max
, step
);
168 return snd_interval_refine(s
, &t
);
172 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
173 * @s: the AMDTP stream, which must be initialized.
174 * @runtime: the PCM substream runtime
176 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream
*s
,
177 struct snd_pcm_runtime
*runtime
)
179 struct snd_pcm_hardware
*hw
= &runtime
->hw
;
180 unsigned int ctx_header_size
;
181 unsigned int maximum_usec_per_period
;
184 hw
->info
= SNDRV_PCM_INFO_BATCH
|
185 SNDRV_PCM_INFO_BLOCK_TRANSFER
|
186 SNDRV_PCM_INFO_INTERLEAVED
|
187 SNDRV_PCM_INFO_JOINT_DUPLEX
|
188 SNDRV_PCM_INFO_MMAP
|
189 SNDRV_PCM_INFO_MMAP_VALID
;
191 /* SNDRV_PCM_INFO_BATCH */
193 hw
->periods_max
= UINT_MAX
;
195 /* bytes for a frame */
196 hw
->period_bytes_min
= 4 * hw
->channels_max
;
198 /* Just to prevent from allocating much pages. */
199 hw
->period_bytes_max
= hw
->period_bytes_min
* 2048;
200 hw
->buffer_bytes_max
= hw
->period_bytes_max
* hw
->periods_min
;
202 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
203 // context when total size of accumulated context header reaches
204 // PAGE_SIZE. This kicks tasklet for the isoc context and brings
205 // callback in the middle of scheduled interrupts.
206 // Although AMDTP streams in the same domain use the same events per
207 // IRQ, use the largest size of context header between IT/IR contexts.
208 // Here, use the value of context header in IR context is for both
210 if (!(s
->flags
& CIP_NO_HEADER
))
211 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
213 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
214 maximum_usec_per_period
= USEC_PER_SEC
* PAGE_SIZE
/
215 CYCLES_PER_SECOND
/ ctx_header_size
;
217 // In IEC 61883-6, one isoc packet can transfer events up to the value
218 // of syt interval. This comes from the interval of isoc cycle. As 1394
219 // OHCI controller can generate hardware IRQ per isoc packet, the
220 // interval is 125 usec.
221 // However, there are two ways of transmission in IEC 61883-6; blocking
222 // and non-blocking modes. In blocking mode, the sequence of isoc packet
223 // includes 'empty' or 'NODATA' packets which include no event. In
224 // non-blocking mode, the number of events per packet is variable up to
226 // Due to the above protocol design, the minimum PCM frames per
227 // interrupt should be double of the value of syt interval, thus it is
229 err
= snd_pcm_hw_constraint_minmax(runtime
,
230 SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
231 250, maximum_usec_per_period
);
235 /* Non-Blocking stream has no more constraints */
236 if (!(s
->flags
& CIP_BLOCKING
))
240 * One AMDTP packet can include some frames. In blocking mode, the
241 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
242 * depending on its sampling rate. For accurate period interrupt, it's
243 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
245 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
246 apply_constraint_to_size
, NULL
,
247 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
248 SNDRV_PCM_HW_PARAM_RATE
, -1);
251 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
252 apply_constraint_to_size
, NULL
,
253 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
254 SNDRV_PCM_HW_PARAM_RATE
, -1);
260 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints
);
263 * amdtp_stream_set_parameters - set stream parameters
264 * @s: the AMDTP stream to configure
265 * @rate: the sample rate
266 * @data_block_quadlets: the size of a data block in quadlet unit
268 * The parameters must be set before the stream is started, and must not be
269 * changed while the stream is running.
271 int amdtp_stream_set_parameters(struct amdtp_stream
*s
, unsigned int rate
,
272 unsigned int data_block_quadlets
)
276 for (sfc
= 0; sfc
< ARRAY_SIZE(amdtp_rate_table
); ++sfc
) {
277 if (amdtp_rate_table
[sfc
] == rate
)
280 if (sfc
== ARRAY_SIZE(amdtp_rate_table
))
284 s
->data_block_quadlets
= data_block_quadlets
;
285 s
->syt_interval
= amdtp_syt_intervals
[sfc
];
287 // default buffering in the device.
288 if (s
->direction
== AMDTP_OUT_STREAM
) {
289 s
->ctx_data
.rx
.transfer_delay
=
290 TRANSFER_DELAY_TICKS
- TICKS_PER_CYCLE
;
292 if (s
->flags
& CIP_BLOCKING
) {
293 // additional buffering needed to adjust for no-data
295 s
->ctx_data
.rx
.transfer_delay
+=
296 TICKS_PER_SECOND
* s
->syt_interval
/ rate
;
302 EXPORT_SYMBOL(amdtp_stream_set_parameters
);
305 * amdtp_stream_get_max_payload - get the stream's packet size
306 * @s: the AMDTP stream
308 * This function must not be called before the stream has been configured
309 * with amdtp_stream_set_parameters().
311 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream
*s
)
313 unsigned int multiplier
= 1;
314 unsigned int cip_header_size
= 0;
316 if (s
->flags
& CIP_JUMBO_PAYLOAD
)
318 if (!(s
->flags
& CIP_NO_HEADER
))
319 cip_header_size
= sizeof(__be32
) * 2;
321 return cip_header_size
+
322 s
->syt_interval
* s
->data_block_quadlets
* sizeof(__be32
) * multiplier
;
324 EXPORT_SYMBOL(amdtp_stream_get_max_payload
);
327 * amdtp_stream_pcm_prepare - prepare PCM device for running
328 * @s: the AMDTP stream
330 * This function should be called from the PCM device's .prepare callback.
332 void amdtp_stream_pcm_prepare(struct amdtp_stream
*s
)
334 tasklet_kill(&s
->period_tasklet
);
335 s
->pcm_buffer_pointer
= 0;
336 s
->pcm_period_pointer
= 0;
338 EXPORT_SYMBOL(amdtp_stream_pcm_prepare
);
340 static unsigned int calculate_data_blocks(struct amdtp_stream
*s
,
343 unsigned int phase
, data_blocks
;
346 if (s
->flags
& CIP_BLOCKING
) {
347 /* This module generate empty packet for 'no data'. */
348 if (syt
== CIP_SYT_NO_INFO
)
351 data_blocks
= s
->syt_interval
;
352 /* Non-blocking mode. */
354 if (!cip_sfc_is_base_44100(s
->sfc
)) {
355 // Sample_rate / 8000 is an integer, and precomputed.
356 data_blocks
= s
->ctx_data
.rx
.data_block_state
;
358 phase
= s
->ctx_data
.rx
.data_block_state
;
361 * This calculates the number of data blocks per packet so that
362 * 1) the overall rate is correct and exactly synchronized to
364 * 2) packets with a rounded-up number of blocks occur as early
365 * as possible in the sequence (to prevent underruns of the
368 if (s
->sfc
== CIP_SFC_44100
)
369 /* 6 6 5 6 5 6 5 ... */
370 data_blocks
= 5 + ((phase
& 1) ^
371 (phase
== 0 || phase
>= 40));
373 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
374 data_blocks
= 11 * (s
->sfc
>> 1) + (phase
== 0);
375 if (++phase
>= (80 >> (s
->sfc
>> 1)))
377 s
->ctx_data
.rx
.data_block_state
= phase
;
384 static unsigned int calculate_syt(struct amdtp_stream
*s
,
387 unsigned int syt_offset
, phase
, index
, syt
;
389 if (s
->ctx_data
.rx
.last_syt_offset
< TICKS_PER_CYCLE
) {
390 if (!cip_sfc_is_base_44100(s
->sfc
))
391 syt_offset
= s
->ctx_data
.rx
.last_syt_offset
+
392 s
->ctx_data
.rx
.syt_offset_state
;
395 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
396 * n * SYT_INTERVAL * 24576000 / sample_rate
397 * Modulo TICKS_PER_CYCLE, the difference between successive
398 * elements is about 1386.23. Rounding the results of this
399 * formula to the SYT precision results in a sequence of
400 * differences that begins with:
401 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
402 * This code generates _exactly_ the same sequence.
404 phase
= s
->ctx_data
.rx
.syt_offset_state
;
406 syt_offset
= s
->ctx_data
.rx
.last_syt_offset
;
407 syt_offset
+= 1386 + ((index
&& !(index
& 3)) ||
411 s
->ctx_data
.rx
.syt_offset_state
= phase
;
414 syt_offset
= s
->ctx_data
.rx
.last_syt_offset
- TICKS_PER_CYCLE
;
415 s
->ctx_data
.rx
.last_syt_offset
= syt_offset
;
417 if (syt_offset
< TICKS_PER_CYCLE
) {
418 syt_offset
+= s
->ctx_data
.rx
.transfer_delay
;
419 syt
= (cycle
+ syt_offset
/ TICKS_PER_CYCLE
) << 12;
420 syt
+= syt_offset
% TICKS_PER_CYCLE
;
422 return syt
& CIP_SYT_MASK
;
424 return CIP_SYT_NO_INFO
;
428 static void update_pcm_pointers(struct amdtp_stream
*s
,
429 struct snd_pcm_substream
*pcm
,
434 ptr
= s
->pcm_buffer_pointer
+ frames
;
435 if (ptr
>= pcm
->runtime
->buffer_size
)
436 ptr
-= pcm
->runtime
->buffer_size
;
437 WRITE_ONCE(s
->pcm_buffer_pointer
, ptr
);
439 s
->pcm_period_pointer
+= frames
;
440 if (s
->pcm_period_pointer
>= pcm
->runtime
->period_size
) {
441 s
->pcm_period_pointer
-= pcm
->runtime
->period_size
;
442 tasklet_hi_schedule(&s
->period_tasklet
);
446 static void pcm_period_tasklet(unsigned long data
)
448 struct amdtp_stream
*s
= (void *)data
;
449 struct snd_pcm_substream
*pcm
= READ_ONCE(s
->pcm
);
452 snd_pcm_period_elapsed(pcm
);
455 static int queue_packet(struct amdtp_stream
*s
, struct fw_iso_packet
*params
,
460 params
->interrupt
= sched_irq
;
461 params
->tag
= s
->tag
;
464 err
= fw_iso_context_queue(s
->context
, params
, &s
->buffer
.iso_buffer
,
465 s
->buffer
.packets
[s
->packet_index
].offset
);
467 dev_err(&s
->unit
->device
, "queueing error: %d\n", err
);
471 if (++s
->packet_index
>= s
->queue_size
)
477 static inline int queue_out_packet(struct amdtp_stream
*s
,
478 struct fw_iso_packet
*params
, bool sched_irq
)
481 !!(params
->header_length
== 0 && params
->payload_length
== 0);
482 return queue_packet(s
, params
, sched_irq
);
485 static inline int queue_in_packet(struct amdtp_stream
*s
,
486 struct fw_iso_packet
*params
)
488 // Queue one packet for IR context.
489 params
->header_length
= s
->ctx_data
.tx
.ctx_header_size
;
490 params
->payload_length
= s
->ctx_data
.tx
.max_ctx_payload_length
;
491 params
->skip
= false;
492 return queue_packet(s
, params
, false);
495 static void generate_cip_header(struct amdtp_stream
*s
, __be32 cip_header
[2],
496 unsigned int data_block_counter
, unsigned int syt
)
498 cip_header
[0] = cpu_to_be32(READ_ONCE(s
->source_node_id_field
) |
499 (s
->data_block_quadlets
<< CIP_DBS_SHIFT
) |
500 ((s
->sph
<< CIP_SPH_SHIFT
) & CIP_SPH_MASK
) |
502 cip_header
[1] = cpu_to_be32(CIP_EOH
|
503 ((s
->fmt
<< CIP_FMT_SHIFT
) & CIP_FMT_MASK
) |
504 ((s
->ctx_data
.rx
.fdf
<< CIP_FDF_SHIFT
) & CIP_FDF_MASK
) |
505 (syt
& CIP_SYT_MASK
));
508 static void build_it_pkt_header(struct amdtp_stream
*s
, unsigned int cycle
,
509 struct fw_iso_packet
*params
,
510 unsigned int data_blocks
,
511 unsigned int data_block_counter
,
512 unsigned int syt
, unsigned int index
)
514 unsigned int payload_length
;
517 payload_length
= data_blocks
* sizeof(__be32
) * s
->data_block_quadlets
;
518 params
->payload_length
= payload_length
;
520 if (!(s
->flags
& CIP_NO_HEADER
)) {
521 cip_header
= (__be32
*)params
->header
;
522 generate_cip_header(s
, cip_header
, data_block_counter
, syt
);
523 params
->header_length
= 2 * sizeof(__be32
);
524 payload_length
+= params
->header_length
;
529 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
, data_blocks
,
530 data_block_counter
, index
);
533 static int check_cip_header(struct amdtp_stream
*s
, const __be32
*buf
,
534 unsigned int payload_length
,
535 unsigned int *data_blocks
,
536 unsigned int *data_block_counter
, unsigned int *syt
)
545 cip_header
[0] = be32_to_cpu(buf
[0]);
546 cip_header
[1] = be32_to_cpu(buf
[1]);
549 * This module supports 'Two-quadlet CIP header with SYT field'.
550 * For convenience, also check FMT field is AM824 or not.
552 if ((((cip_header
[0] & CIP_EOH_MASK
) == CIP_EOH
) ||
553 ((cip_header
[1] & CIP_EOH_MASK
) != CIP_EOH
)) &&
554 (!(s
->flags
& CIP_HEADER_WITHOUT_EOH
))) {
555 dev_info_ratelimited(&s
->unit
->device
,
556 "Invalid CIP header for AMDTP: %08X:%08X\n",
557 cip_header
[0], cip_header
[1]);
561 /* Check valid protocol or not. */
562 sph
= (cip_header
[0] & CIP_SPH_MASK
) >> CIP_SPH_SHIFT
;
563 fmt
= (cip_header
[1] & CIP_FMT_MASK
) >> CIP_FMT_SHIFT
;
564 if (sph
!= s
->sph
|| fmt
!= s
->fmt
) {
565 dev_info_ratelimited(&s
->unit
->device
,
566 "Detect unexpected protocol: %08x %08x\n",
567 cip_header
[0], cip_header
[1]);
571 /* Calculate data blocks */
572 fdf
= (cip_header
[1] & CIP_FDF_MASK
) >> CIP_FDF_SHIFT
;
573 if (payload_length
< sizeof(__be32
) * 2 ||
574 (fmt
== CIP_FMT_AM
&& fdf
== AMDTP_FDF_NO_DATA
)) {
577 unsigned int data_block_quadlets
=
578 (cip_header
[0] & CIP_DBS_MASK
) >> CIP_DBS_SHIFT
;
579 /* avoid division by zero */
580 if (data_block_quadlets
== 0) {
581 dev_err(&s
->unit
->device
,
582 "Detect invalid value in dbs field: %08X\n",
586 if (s
->flags
& CIP_WRONG_DBS
)
587 data_block_quadlets
= s
->data_block_quadlets
;
589 *data_blocks
= (payload_length
/ sizeof(__be32
) - 2) /
593 /* Check data block counter continuity */
594 dbc
= cip_header
[0] & CIP_DBC_MASK
;
595 if (*data_blocks
== 0 && (s
->flags
& CIP_EMPTY_HAS_WRONG_DBC
) &&
596 *data_block_counter
!= UINT_MAX
)
597 dbc
= *data_block_counter
;
599 if ((dbc
== 0x00 && (s
->flags
& CIP_SKIP_DBC_ZERO_CHECK
)) ||
600 *data_block_counter
== UINT_MAX
) {
602 } else if (!(s
->flags
& CIP_DBC_IS_END_EVENT
)) {
603 lost
= dbc
!= *data_block_counter
;
605 unsigned int dbc_interval
;
607 if (*data_blocks
> 0 && s
->ctx_data
.tx
.dbc_interval
> 0)
608 dbc_interval
= s
->ctx_data
.tx
.dbc_interval
;
610 dbc_interval
= *data_blocks
;
612 lost
= dbc
!= ((*data_block_counter
+ dbc_interval
) & 0xff);
616 dev_err(&s
->unit
->device
,
617 "Detect discontinuity of CIP: %02X %02X\n",
618 *data_block_counter
, dbc
);
622 *data_block_counter
= dbc
;
624 *syt
= cip_header
[1] & CIP_SYT_MASK
;
629 static int parse_ir_ctx_header(struct amdtp_stream
*s
, unsigned int cycle
,
630 const __be32
*ctx_header
,
631 unsigned int *payload_length
,
632 unsigned int *data_blocks
,
633 unsigned int *data_block_counter
,
634 unsigned int *syt
, unsigned int index
)
636 const __be32
*cip_header
;
639 *payload_length
= be32_to_cpu(ctx_header
[0]) >> ISO_DATA_LENGTH_SHIFT
;
640 if (*payload_length
> s
->ctx_data
.tx
.ctx_header_size
+
641 s
->ctx_data
.tx
.max_ctx_payload_length
) {
642 dev_err(&s
->unit
->device
,
643 "Detect jumbo payload: %04x %04x\n",
644 *payload_length
, s
->ctx_data
.tx
.max_ctx_payload_length
);
648 if (!(s
->flags
& CIP_NO_HEADER
)) {
649 cip_header
= ctx_header
+ 2;
650 err
= check_cip_header(s
, cip_header
, *payload_length
,
651 data_blocks
, data_block_counter
, syt
);
657 *data_blocks
= *payload_length
/ sizeof(__be32
) /
658 s
->data_block_quadlets
;
661 if (*data_block_counter
== UINT_MAX
)
662 *data_block_counter
= 0;
665 trace_amdtp_packet(s
, cycle
, cip_header
, *payload_length
, *data_blocks
,
666 *data_block_counter
, index
);
671 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
672 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
673 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
674 static inline u32
compute_cycle_count(__be32 ctx_header_tstamp
)
676 u32 tstamp
= be32_to_cpu(ctx_header_tstamp
) & HEADER_TSTAMP_MASK
;
677 return (((tstamp
>> 13) & 0x07) * 8000) + (tstamp
& 0x1fff);
680 static inline u32
increment_cycle_count(u32 cycle
, unsigned int addend
)
683 if (cycle
>= 8 * CYCLES_PER_SECOND
)
684 cycle
-= 8 * CYCLES_PER_SECOND
;
688 // Align to actual cycle count for the packet which is going to be scheduled.
689 // This module queued the same number of isochronous cycle as the size of queue
690 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
691 // the size of queue for scheduled cycle.
692 static inline u32
compute_it_cycle(const __be32 ctx_header_tstamp
,
693 unsigned int queue_size
)
695 u32 cycle
= compute_cycle_count(ctx_header_tstamp
);
696 return increment_cycle_count(cycle
, queue_size
);
699 static int generate_device_pkt_descs(struct amdtp_stream
*s
,
700 struct pkt_desc
*descs
,
701 const __be32
*ctx_header
,
702 unsigned int packets
)
704 unsigned int dbc
= s
->data_block_counter
;
708 for (i
= 0; i
< packets
; ++i
) {
709 struct pkt_desc
*desc
= descs
+ i
;
710 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
712 unsigned int payload_length
;
713 unsigned int data_blocks
;
716 cycle
= compute_cycle_count(ctx_header
[1]);
718 err
= parse_ir_ctx_header(s
, cycle
, ctx_header
, &payload_length
,
719 &data_blocks
, &dbc
, &syt
, i
);
725 desc
->data_blocks
= data_blocks
;
726 desc
->data_block_counter
= dbc
;
727 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
729 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
730 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
733 s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
736 s
->data_block_counter
= dbc
;
741 static void generate_ideal_pkt_descs(struct amdtp_stream
*s
,
742 struct pkt_desc
*descs
,
743 const __be32
*ctx_header
,
744 unsigned int packets
)
746 unsigned int dbc
= s
->data_block_counter
;
749 for (i
= 0; i
< packets
; ++i
) {
750 struct pkt_desc
*desc
= descs
+ i
;
751 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
753 desc
->cycle
= compute_it_cycle(*ctx_header
, s
->queue_size
);
754 desc
->syt
= calculate_syt(s
, desc
->cycle
);
755 desc
->data_blocks
= calculate_data_blocks(s
, desc
->syt
);
757 if (s
->flags
& CIP_DBC_IS_END_EVENT
)
758 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
760 desc
->data_block_counter
= dbc
;
762 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
763 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
765 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
770 s
->data_block_counter
= dbc
;
773 static inline void cancel_stream(struct amdtp_stream
*s
)
775 s
->packet_index
= -1;
777 amdtp_stream_pcm_abort(s
);
778 WRITE_ONCE(s
->pcm_buffer_pointer
, SNDRV_PCM_POS_XRUN
);
781 static void process_ctx_payloads(struct amdtp_stream
*s
,
782 const struct pkt_desc
*descs
,
783 unsigned int packets
)
785 struct snd_pcm_substream
*pcm
;
786 unsigned int pcm_frames
;
788 pcm
= READ_ONCE(s
->pcm
);
789 pcm_frames
= s
->process_ctx_payloads(s
, descs
, packets
, pcm
);
791 update_pcm_pointers(s
, pcm
, pcm_frames
);
794 static void amdtp_stream_master_callback(struct fw_iso_context
*context
,
795 u32 tstamp
, size_t header_length
,
796 void *header
, void *private_data
);
798 static void amdtp_stream_master_first_callback(struct fw_iso_context
*context
,
799 u32 tstamp
, size_t header_length
,
800 void *header
, void *private_data
);
802 static void out_stream_callback(struct fw_iso_context
*context
, u32 tstamp
,
803 size_t header_length
, void *header
,
806 struct amdtp_stream
*s
= private_data
;
807 const __be32
*ctx_header
= header
;
808 unsigned int events_per_period
= s
->ctx_data
.rx
.events_per_period
;
809 unsigned int event_count
= s
->ctx_data
.rx
.event_count
;
810 unsigned int packets
;
814 if (s
->packet_index
< 0)
817 // Calculate the number of packets in buffer and check XRUN.
818 packets
= header_length
/ sizeof(*ctx_header
);
820 generate_ideal_pkt_descs(s
, s
->pkt_descs
, ctx_header
, packets
);
822 process_ctx_payloads(s
, s
->pkt_descs
, packets
);
825 !!(context
->callback
.sc
== amdtp_stream_master_callback
||
826 context
->callback
.sc
== amdtp_stream_master_first_callback
);
828 for (i
= 0; i
< packets
; ++i
) {
829 const struct pkt_desc
*desc
= s
->pkt_descs
+ i
;
832 struct fw_iso_packet params
;
833 __be32 header
[IT_PKT_HEADER_SIZE_CIP
/ sizeof(__be32
)];
834 } template = { {0}, {0} };
835 bool sched_irq
= false;
837 if (s
->ctx_data
.rx
.syt_override
< 0)
840 syt
= s
->ctx_data
.rx
.syt_override
;
842 build_it_pkt_header(s
, desc
->cycle
, &template.params
,
843 desc
->data_blocks
, desc
->data_block_counter
,
847 event_count
+= desc
->data_blocks
;
848 if (event_count
>= events_per_period
) {
849 event_count
-= events_per_period
;
854 if (queue_out_packet(s
, &template.params
, sched_irq
) < 0) {
860 s
->ctx_data
.rx
.event_count
= event_count
;
863 static void in_stream_callback(struct fw_iso_context
*context
, u32 tstamp
,
864 size_t header_length
, void *header
,
867 struct amdtp_stream
*s
= private_data
;
868 __be32
*ctx_header
= header
;
869 unsigned int packets
;
873 if (s
->packet_index
< 0)
876 // Calculate the number of packets in buffer and check XRUN.
877 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
879 err
= generate_device_pkt_descs(s
, s
->pkt_descs
, ctx_header
, packets
);
881 if (err
!= -EAGAIN
) {
886 process_ctx_payloads(s
, s
->pkt_descs
, packets
);
889 for (i
= 0; i
< packets
; ++i
) {
890 struct fw_iso_packet params
= {0};
892 if (queue_in_packet(s
, ¶ms
) < 0) {
899 static void amdtp_stream_master_callback(struct fw_iso_context
*context
,
900 u32 tstamp
, size_t header_length
,
901 void *header
, void *private_data
)
903 struct amdtp_domain
*d
= private_data
;
904 struct amdtp_stream
*irq_target
= d
->irq_target
;
905 struct amdtp_stream
*s
;
907 out_stream_callback(context
, tstamp
, header_length
, header
, irq_target
);
908 if (amdtp_streaming_error(irq_target
))
911 list_for_each_entry(s
, &d
->streams
, list
) {
912 if (s
!= irq_target
&& amdtp_stream_running(s
)) {
913 fw_iso_context_flush_completions(s
->context
);
914 if (amdtp_streaming_error(s
))
921 if (amdtp_stream_running(irq_target
))
922 cancel_stream(irq_target
);
924 list_for_each_entry(s
, &d
->streams
, list
) {
925 if (amdtp_stream_running(s
))
930 // this is executed one time.
931 static void amdtp_stream_first_callback(struct fw_iso_context
*context
,
932 u32 tstamp
, size_t header_length
,
933 void *header
, void *private_data
)
935 struct amdtp_stream
*s
= private_data
;
936 const __be32
*ctx_header
= header
;
940 * For in-stream, first packet has come.
941 * For out-stream, prepared to transmit first packet
943 s
->callbacked
= true;
944 wake_up(&s
->callback_wait
);
946 if (s
->direction
== AMDTP_IN_STREAM
) {
947 cycle
= compute_cycle_count(ctx_header
[1]);
949 context
->callback
.sc
= in_stream_callback
;
951 cycle
= compute_it_cycle(*ctx_header
, s
->queue_size
);
953 context
->callback
.sc
= out_stream_callback
;
956 s
->start_cycle
= cycle
;
958 context
->callback
.sc(context
, tstamp
, header_length
, header
, s
);
961 static void amdtp_stream_master_first_callback(struct fw_iso_context
*context
,
962 u32 tstamp
, size_t header_length
,
963 void *header
, void *private_data
)
965 struct amdtp_domain
*d
= private_data
;
966 struct amdtp_stream
*s
= d
->irq_target
;
967 const __be32
*ctx_header
= header
;
969 s
->callbacked
= true;
970 wake_up(&s
->callback_wait
);
972 s
->start_cycle
= compute_it_cycle(*ctx_header
, s
->queue_size
);
974 context
->callback
.sc
= amdtp_stream_master_callback
;
976 context
->callback
.sc(context
, tstamp
, header_length
, header
, d
);
980 * amdtp_stream_start - start transferring packets
981 * @s: the AMDTP stream to start
982 * @channel: the isochronous channel on the bus
983 * @speed: firewire speed code
984 * @d: the AMDTP domain to which the AMDTP stream belongs
985 * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
987 * @start_cycle: the isochronous cycle to start the context. Start immediately
988 * if negative value is given.
990 * The stream cannot be started until it has been configured with
991 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
992 * device can be started.
994 static int amdtp_stream_start(struct amdtp_stream
*s
, int channel
, int speed
,
995 struct amdtp_domain
*d
, bool is_irq_target
,
998 static const struct {
999 unsigned int data_block
;
1000 unsigned int syt_offset
;
1001 } *entry
, initial_state
[] = {
1002 [CIP_SFC_32000
] = { 4, 3072 },
1003 [CIP_SFC_48000
] = { 6, 1024 },
1004 [CIP_SFC_96000
] = { 12, 1024 },
1005 [CIP_SFC_192000
] = { 24, 1024 },
1006 [CIP_SFC_44100
] = { 0, 67 },
1007 [CIP_SFC_88200
] = { 0, 67 },
1008 [CIP_SFC_176400
] = { 0, 67 },
1010 unsigned int events_per_buffer
= d
->events_per_buffer
;
1011 unsigned int events_per_period
= d
->events_per_period
;
1012 unsigned int idle_irq_interval
;
1013 unsigned int ctx_header_size
;
1014 unsigned int max_ctx_payload_size
;
1015 enum dma_data_direction dir
;
1017 fw_iso_callback_t ctx_cb
;
1020 mutex_lock(&s
->mutex
);
1022 if (WARN_ON(amdtp_stream_running(s
) ||
1023 (s
->data_block_quadlets
< 1))) {
1028 if (s
->direction
== AMDTP_IN_STREAM
) {
1029 // NOTE: IT context should be used for constant IRQ.
1030 if (is_irq_target
) {
1035 s
->data_block_counter
= UINT_MAX
;
1037 entry
= &initial_state
[s
->sfc
];
1039 s
->data_block_counter
= 0;
1040 s
->ctx_data
.rx
.data_block_state
= entry
->data_block
;
1041 s
->ctx_data
.rx
.syt_offset_state
= entry
->syt_offset
;
1042 s
->ctx_data
.rx
.last_syt_offset
= TICKS_PER_CYCLE
;
1045 /* initialize packet buffer */
1046 if (s
->direction
== AMDTP_IN_STREAM
) {
1047 dir
= DMA_FROM_DEVICE
;
1048 type
= FW_ISO_CONTEXT_RECEIVE
;
1049 if (!(s
->flags
& CIP_NO_HEADER
))
1050 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
1052 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
1054 max_ctx_payload_size
= amdtp_stream_get_max_payload(s
) -
1057 dir
= DMA_TO_DEVICE
;
1058 type
= FW_ISO_CONTEXT_TRANSMIT
;
1059 ctx_header_size
= 0; // No effect for IT context.
1061 max_ctx_payload_size
= amdtp_stream_get_max_payload(s
);
1062 if (!(s
->flags
& CIP_NO_HEADER
))
1063 max_ctx_payload_size
-= IT_PKT_HEADER_SIZE_CIP
;
1066 // This is a case that AMDTP streams in domain run just for MIDI
1067 // substream. Use the number of events equivalent to 10 msec as
1068 // interval of hardware IRQ.
1069 if (events_per_period
== 0)
1070 events_per_period
= amdtp_rate_table
[s
->sfc
] / 100;
1071 if (events_per_buffer
== 0)
1072 events_per_buffer
= events_per_period
* 3;
1074 idle_irq_interval
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_period
,
1075 amdtp_rate_table
[s
->sfc
]);
1076 s
->queue_size
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_buffer
,
1077 amdtp_rate_table
[s
->sfc
]);
1079 err
= iso_packets_buffer_init(&s
->buffer
, s
->unit
, s
->queue_size
,
1080 max_ctx_payload_size
, dir
);
1084 if (is_irq_target
) {
1085 s
->ctx_data
.rx
.events_per_period
= events_per_period
;
1086 s
->ctx_data
.rx
.event_count
= 0;
1087 ctx_cb
= amdtp_stream_master_first_callback
;
1090 ctx_cb
= amdtp_stream_first_callback
;
1094 s
->context
= fw_iso_context_create(fw_parent_device(s
->unit
)->card
,
1095 type
, channel
, speed
, ctx_header_size
,
1097 if (IS_ERR(s
->context
)) {
1098 err
= PTR_ERR(s
->context
);
1100 dev_err(&s
->unit
->device
,
1101 "no free stream on this controller\n");
1105 amdtp_stream_update(s
);
1107 if (s
->direction
== AMDTP_IN_STREAM
) {
1108 s
->ctx_data
.tx
.max_ctx_payload_length
= max_ctx_payload_size
;
1109 s
->ctx_data
.tx
.ctx_header_size
= ctx_header_size
;
1112 if (s
->flags
& CIP_NO_HEADER
)
1113 s
->tag
= TAG_NO_CIP_HEADER
;
1117 s
->pkt_descs
= kcalloc(s
->queue_size
, sizeof(*s
->pkt_descs
),
1119 if (!s
->pkt_descs
) {
1124 s
->packet_index
= 0;
1126 struct fw_iso_packet params
;
1128 if (s
->direction
== AMDTP_IN_STREAM
) {
1129 err
= queue_in_packet(s
, ¶ms
);
1131 bool sched_irq
= false;
1133 params
.header_length
= 0;
1134 params
.payload_length
= 0;
1136 if (is_irq_target
) {
1137 sched_irq
= !((s
->packet_index
+ 1) %
1141 err
= queue_out_packet(s
, ¶ms
, sched_irq
);
1145 } while (s
->packet_index
> 0);
1147 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1148 tag
= FW_ISO_CONTEXT_MATCH_TAG1
;
1149 if ((s
->flags
& CIP_EMPTY_WITH_TAG0
) || (s
->flags
& CIP_NO_HEADER
))
1150 tag
|= FW_ISO_CONTEXT_MATCH_TAG0
;
1152 s
->callbacked
= false;
1153 err
= fw_iso_context_start(s
->context
, start_cycle
, 0, tag
);
1157 mutex_unlock(&s
->mutex
);
1161 kfree(s
->pkt_descs
);
1163 fw_iso_context_destroy(s
->context
);
1164 s
->context
= ERR_PTR(-1);
1166 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1168 mutex_unlock(&s
->mutex
);
1174 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1175 * @d: the AMDTP domain.
1176 * @s: the AMDTP stream that transports the PCM data
1178 * Returns the current buffer position, in frames.
1180 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain
*d
,
1181 struct amdtp_stream
*s
)
1183 struct amdtp_stream
*irq_target
= d
->irq_target
;
1185 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1186 // This function is called in software IRQ context of
1187 // period_tasklet or process context.
1189 // When the software IRQ context was scheduled by software IRQ
1190 // context of IT contexts, queued packets were already handled.
1191 // Therefore, no need to flush the queue in buffer furthermore.
1193 // When the process context reach here, some packets will be
1194 // already queued in the buffer. These packets should be handled
1195 // immediately to keep better granularity of PCM pointer.
1197 // Later, the process context will sometimes schedules software
1198 // IRQ context of the period_tasklet. Then, no need to flush the
1199 // queue by the same reason as described in the above
1200 if (!in_interrupt()) {
1201 // Queued packet should be processed without any kernel
1202 // preemption to keep latency against bus cycle.
1204 fw_iso_context_flush_completions(irq_target
->context
);
1209 return READ_ONCE(s
->pcm_buffer_pointer
);
1211 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer
);
1214 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1215 * @d: the AMDTP domain.
1216 * @s: the AMDTP stream that transfers the PCM frames
1218 * Returns zero always.
1220 int amdtp_domain_stream_pcm_ack(struct amdtp_domain
*d
, struct amdtp_stream
*s
)
1222 struct amdtp_stream
*irq_target
= d
->irq_target
;
1224 // Process isochronous packets for recent isochronous cycle to handle
1225 // queued PCM frames.
1226 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1227 // Queued packet should be processed without any kernel
1228 // preemption to keep latency against bus cycle.
1230 fw_iso_context_flush_completions(irq_target
->context
);
1236 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack
);
1239 * amdtp_stream_update - update the stream after a bus reset
1240 * @s: the AMDTP stream
1242 void amdtp_stream_update(struct amdtp_stream
*s
)
1245 WRITE_ONCE(s
->source_node_id_field
,
1246 (fw_parent_device(s
->unit
)->card
->node_id
<< CIP_SID_SHIFT
) & CIP_SID_MASK
);
1248 EXPORT_SYMBOL(amdtp_stream_update
);
1251 * amdtp_stream_stop - stop sending packets
1252 * @s: the AMDTP stream to stop
1254 * All PCM and MIDI devices of the stream must be stopped before the stream
1255 * itself can be stopped.
1257 static void amdtp_stream_stop(struct amdtp_stream
*s
)
1259 mutex_lock(&s
->mutex
);
1261 if (!amdtp_stream_running(s
)) {
1262 mutex_unlock(&s
->mutex
);
1266 tasklet_kill(&s
->period_tasklet
);
1267 fw_iso_context_stop(s
->context
);
1268 fw_iso_context_destroy(s
->context
);
1269 s
->context
= ERR_PTR(-1);
1270 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1271 kfree(s
->pkt_descs
);
1273 s
->callbacked
= false;
1275 mutex_unlock(&s
->mutex
);
1279 * amdtp_stream_pcm_abort - abort the running PCM device
1280 * @s: the AMDTP stream about to be stopped
1282 * If the isochronous stream needs to be stopped asynchronously, call this
1283 * function first to stop the PCM device.
1285 void amdtp_stream_pcm_abort(struct amdtp_stream
*s
)
1287 struct snd_pcm_substream
*pcm
;
1289 pcm
= READ_ONCE(s
->pcm
);
1291 snd_pcm_stop_xrun(pcm
);
1293 EXPORT_SYMBOL(amdtp_stream_pcm_abort
);
1296 * amdtp_domain_init - initialize an AMDTP domain structure
1297 * @d: the AMDTP domain to initialize.
1299 int amdtp_domain_init(struct amdtp_domain
*d
)
1301 INIT_LIST_HEAD(&d
->streams
);
1303 d
->events_per_period
= 0;
1307 EXPORT_SYMBOL_GPL(amdtp_domain_init
);
1310 * amdtp_domain_destroy - destroy an AMDTP domain structure
1311 * @d: the AMDTP domain to destroy.
1313 void amdtp_domain_destroy(struct amdtp_domain
*d
)
1315 // At present nothing to do.
1318 EXPORT_SYMBOL_GPL(amdtp_domain_destroy
);
1321 * amdtp_domain_add_stream - register isoc context into the domain.
1322 * @d: the AMDTP domain.
1323 * @s: the AMDTP stream.
1324 * @channel: the isochronous channel on the bus.
1325 * @speed: firewire speed code.
1327 int amdtp_domain_add_stream(struct amdtp_domain
*d
, struct amdtp_stream
*s
,
1328 int channel
, int speed
)
1330 struct amdtp_stream
*tmp
;
1332 list_for_each_entry(tmp
, &d
->streams
, list
) {
1337 list_add(&s
->list
, &d
->streams
);
1339 s
->channel
= channel
;
1344 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream
);
1346 static int get_current_cycle_time(struct fw_card
*fw_card
, int *cur_cycle
)
1353 // This is a request to local 1394 OHCI controller and expected to
1354 // complete without any event waiting.
1355 generation
= fw_card
->generation
;
1356 smp_rmb(); // node_id vs. generation.
1357 rcode
= fw_run_transaction(fw_card
, TCODE_READ_QUADLET_REQUEST
,
1358 fw_card
->node_id
, generation
, SCODE_100
,
1359 CSR_REGISTER_BASE
+ CSR_CYCLE_TIME
,
1361 if (rcode
!= RCODE_COMPLETE
)
1364 data
= be32_to_cpu(reg
);
1365 *cur_cycle
= data
>> 12;
1371 * amdtp_domain_start - start sending packets for isoc context in the domain.
1372 * @d: the AMDTP domain.
1373 * @ir_delay_cycle: the cycle delay to start all IR contexts.
1375 int amdtp_domain_start(struct amdtp_domain
*d
, unsigned int ir_delay_cycle
)
1377 struct amdtp_stream
*s
;
1381 // Select an IT context as IRQ target.
1382 list_for_each_entry(s
, &d
->streams
, list
) {
1383 if (s
->direction
== AMDTP_OUT_STREAM
)
1390 if (ir_delay_cycle
> 0) {
1391 struct fw_card
*fw_card
= fw_parent_device(s
->unit
)->card
;
1393 err
= get_current_cycle_time(fw_card
, &cycle
);
1397 // No need to care overflow in cycle field because of enough
1399 cycle
+= ir_delay_cycle
;
1401 // Round up to sec field.
1402 if ((cycle
& 0x00001fff) >= CYCLES_PER_SECOND
) {
1405 // The sec field can overflow.
1406 sec
= (cycle
& 0xffffe000) >> 13;
1407 cycle
= (++sec
<< 13) |
1408 ((cycle
& 0x00001fff) / CYCLES_PER_SECOND
);
1411 // In OHCI 1394 specification, lower 2 bits are available for
1413 cycle
&= 0x00007fff;
1418 list_for_each_entry(s
, &d
->streams
, list
) {
1421 if (s
->direction
== AMDTP_IN_STREAM
) {
1422 cycle_match
= cycle
;
1424 // IT context starts immediately.
1428 if (s
!= d
->irq_target
) {
1429 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
, d
,
1430 false, cycle_match
);
1437 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
, d
, true, -1);
1443 list_for_each_entry(s
, &d
->streams
, list
)
1444 amdtp_stream_stop(s
);
1447 EXPORT_SYMBOL_GPL(amdtp_domain_start
);
1450 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1451 * @d: the AMDTP domain to which the isoc contexts belong.
1453 void amdtp_domain_stop(struct amdtp_domain
*d
)
1455 struct amdtp_stream
*s
, *next
;
1458 amdtp_stream_stop(d
->irq_target
);
1460 list_for_each_entry_safe(s
, next
, &d
->streams
, list
) {
1463 if (s
!= d
->irq_target
)
1464 amdtp_stream_stop(s
);
1467 d
->events_per_period
= 0;
1468 d
->irq_target
= NULL
;
1470 EXPORT_SYMBOL_GPL(amdtp_domain_stop
);