1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_MAX_SECOND 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 /* common isochronous packet header parameters */
37 #define CIP_EOH_SHIFT 31
38 #define CIP_EOH (1u << CIP_EOH_SHIFT)
39 #define CIP_EOH_MASK 0x80000000
40 #define CIP_SID_SHIFT 24
41 #define CIP_SID_MASK 0x3f000000
42 #define CIP_DBS_MASK 0x00ff0000
43 #define CIP_DBS_SHIFT 16
44 #define CIP_SPH_MASK 0x00000400
45 #define CIP_SPH_SHIFT 10
46 #define CIP_DBC_MASK 0x000000ff
47 #define CIP_FMT_SHIFT 24
48 #define CIP_FMT_MASK 0x3f000000
49 #define CIP_FDF_MASK 0x00ff0000
50 #define CIP_FDF_SHIFT 16
51 #define CIP_SYT_MASK 0x0000ffff
52 #define CIP_SYT_NO_INFO 0xffff
54 /* Audio and Music transfer protocol specific parameters */
55 #define CIP_FMT_AM 0x10
56 #define AMDTP_FDF_NO_DATA 0xff
58 // For iso header, tstamp and 2 CIP header.
59 #define IR_CTX_HEADER_SIZE_CIP 16
60 // For iso header and tstamp.
61 #define IR_CTX_HEADER_SIZE_NO_CIP 8
62 #define HEADER_TSTAMP_MASK 0x0000ffff
64 #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
65 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
67 static void pcm_period_work(struct work_struct
*work
);
70 * amdtp_stream_init - initialize an AMDTP stream structure
71 * @s: the AMDTP stream to initialize
72 * @unit: the target of the stream
73 * @dir: the direction of stream
74 * @flags: the packet transmission method to use
75 * @fmt: the value of fmt field in CIP header
76 * @process_ctx_payloads: callback handler to process payloads of isoc context
77 * @protocol_size: the size to allocate newly for protocol
79 int amdtp_stream_init(struct amdtp_stream
*s
, struct fw_unit
*unit
,
80 enum amdtp_stream_direction dir
, enum cip_flags flags
,
82 amdtp_stream_process_ctx_payloads_t process_ctx_payloads
,
83 unsigned int protocol_size
)
85 if (process_ctx_payloads
== NULL
)
88 s
->protocol
= kzalloc(protocol_size
, GFP_KERNEL
);
95 s
->context
= ERR_PTR(-1);
96 mutex_init(&s
->mutex
);
97 INIT_WORK(&s
->period_work
, pcm_period_work
);
100 init_waitqueue_head(&s
->callback_wait
);
101 s
->callbacked
= false;
104 s
->process_ctx_payloads
= process_ctx_payloads
;
106 if (dir
== AMDTP_OUT_STREAM
)
107 s
->ctx_data
.rx
.syt_override
= -1;
111 EXPORT_SYMBOL(amdtp_stream_init
);
114 * amdtp_stream_destroy - free stream resources
115 * @s: the AMDTP stream to destroy
117 void amdtp_stream_destroy(struct amdtp_stream
*s
)
119 /* Not initialized. */
120 if (s
->protocol
== NULL
)
123 WARN_ON(amdtp_stream_running(s
));
125 mutex_destroy(&s
->mutex
);
127 EXPORT_SYMBOL(amdtp_stream_destroy
);
129 const unsigned int amdtp_syt_intervals
[CIP_SFC_COUNT
] = {
133 [CIP_SFC_88200
] = 16,
134 [CIP_SFC_96000
] = 16,
135 [CIP_SFC_176400
] = 32,
136 [CIP_SFC_192000
] = 32,
138 EXPORT_SYMBOL(amdtp_syt_intervals
);
140 const unsigned int amdtp_rate_table
[CIP_SFC_COUNT
] = {
141 [CIP_SFC_32000
] = 32000,
142 [CIP_SFC_44100
] = 44100,
143 [CIP_SFC_48000
] = 48000,
144 [CIP_SFC_88200
] = 88200,
145 [CIP_SFC_96000
] = 96000,
146 [CIP_SFC_176400
] = 176400,
147 [CIP_SFC_192000
] = 192000,
149 EXPORT_SYMBOL(amdtp_rate_table
);
151 static int apply_constraint_to_size(struct snd_pcm_hw_params
*params
,
152 struct snd_pcm_hw_rule
*rule
)
154 struct snd_interval
*s
= hw_param_interval(params
, rule
->var
);
155 const struct snd_interval
*r
=
156 hw_param_interval_c(params
, SNDRV_PCM_HW_PARAM_RATE
);
157 struct snd_interval t
= {0};
158 unsigned int step
= 0;
161 for (i
= 0; i
< CIP_SFC_COUNT
; ++i
) {
162 if (snd_interval_test(r
, amdtp_rate_table
[i
]))
163 step
= max(step
, amdtp_syt_intervals
[i
]);
166 t
.min
= roundup(s
->min
, step
);
167 t
.max
= rounddown(s
->max
, step
);
170 return snd_interval_refine(s
, &t
);
174 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
175 * @s: the AMDTP stream, which must be initialized.
176 * @runtime: the PCM substream runtime
178 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream
*s
,
179 struct snd_pcm_runtime
*runtime
)
181 struct snd_pcm_hardware
*hw
= &runtime
->hw
;
182 unsigned int ctx_header_size
;
183 unsigned int maximum_usec_per_period
;
186 hw
->info
= SNDRV_PCM_INFO_BATCH
|
187 SNDRV_PCM_INFO_BLOCK_TRANSFER
|
188 SNDRV_PCM_INFO_INTERLEAVED
|
189 SNDRV_PCM_INFO_JOINT_DUPLEX
|
190 SNDRV_PCM_INFO_MMAP
|
191 SNDRV_PCM_INFO_MMAP_VALID
;
193 /* SNDRV_PCM_INFO_BATCH */
195 hw
->periods_max
= UINT_MAX
;
197 /* bytes for a frame */
198 hw
->period_bytes_min
= 4 * hw
->channels_max
;
200 /* Just to prevent from allocating much pages. */
201 hw
->period_bytes_max
= hw
->period_bytes_min
* 2048;
202 hw
->buffer_bytes_max
= hw
->period_bytes_max
* hw
->periods_min
;
204 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
205 // context when total size of accumulated context header reaches
206 // PAGE_SIZE. This kicks work for the isoc context and brings
207 // callback in the middle of scheduled interrupts.
208 // Although AMDTP streams in the same domain use the same events per
209 // IRQ, use the largest size of context header between IT/IR contexts.
210 // Here, use the value of context header in IR context is for both
212 if (!(s
->flags
& CIP_NO_HEADER
))
213 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
215 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
216 maximum_usec_per_period
= USEC_PER_SEC
* PAGE_SIZE
/
217 CYCLES_PER_SECOND
/ ctx_header_size
;
219 // In IEC 61883-6, one isoc packet can transfer events up to the value
220 // of syt interval. This comes from the interval of isoc cycle. As 1394
221 // OHCI controller can generate hardware IRQ per isoc packet, the
222 // interval is 125 usec.
223 // However, there are two ways of transmission in IEC 61883-6; blocking
224 // and non-blocking modes. In blocking mode, the sequence of isoc packet
225 // includes 'empty' or 'NODATA' packets which include no event. In
226 // non-blocking mode, the number of events per packet is variable up to
228 // Due to the above protocol design, the minimum PCM frames per
229 // interrupt should be double of the value of syt interval, thus it is
231 err
= snd_pcm_hw_constraint_minmax(runtime
,
232 SNDRV_PCM_HW_PARAM_PERIOD_TIME
,
233 250, maximum_usec_per_period
);
237 /* Non-Blocking stream has no more constraints */
238 if (!(s
->flags
& CIP_BLOCKING
))
242 * One AMDTP packet can include some frames. In blocking mode, the
243 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
244 * depending on its sampling rate. For accurate period interrupt, it's
245 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
247 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
248 apply_constraint_to_size
, NULL
,
249 SNDRV_PCM_HW_PARAM_PERIOD_SIZE
,
250 SNDRV_PCM_HW_PARAM_RATE
, -1);
253 err
= snd_pcm_hw_rule_add(runtime
, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
254 apply_constraint_to_size
, NULL
,
255 SNDRV_PCM_HW_PARAM_BUFFER_SIZE
,
256 SNDRV_PCM_HW_PARAM_RATE
, -1);
262 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints
);
265 * amdtp_stream_set_parameters - set stream parameters
266 * @s: the AMDTP stream to configure
267 * @rate: the sample rate
268 * @data_block_quadlets: the size of a data block in quadlet unit
270 * The parameters must be set before the stream is started, and must not be
271 * changed while the stream is running.
273 int amdtp_stream_set_parameters(struct amdtp_stream
*s
, unsigned int rate
,
274 unsigned int data_block_quadlets
)
278 for (sfc
= 0; sfc
< ARRAY_SIZE(amdtp_rate_table
); ++sfc
) {
279 if (amdtp_rate_table
[sfc
] == rate
)
282 if (sfc
== ARRAY_SIZE(amdtp_rate_table
))
286 s
->data_block_quadlets
= data_block_quadlets
;
287 s
->syt_interval
= amdtp_syt_intervals
[sfc
];
289 // default buffering in the device.
290 if (s
->direction
== AMDTP_OUT_STREAM
) {
291 s
->ctx_data
.rx
.transfer_delay
=
292 TRANSFER_DELAY_TICKS
- TICKS_PER_CYCLE
;
294 if (s
->flags
& CIP_BLOCKING
) {
295 // additional buffering needed to adjust for no-data
297 s
->ctx_data
.rx
.transfer_delay
+=
298 TICKS_PER_SECOND
* s
->syt_interval
/ rate
;
304 EXPORT_SYMBOL(amdtp_stream_set_parameters
);
307 * amdtp_stream_get_max_payload - get the stream's packet size
308 * @s: the AMDTP stream
310 * This function must not be called before the stream has been configured
311 * with amdtp_stream_set_parameters().
313 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream
*s
)
315 unsigned int multiplier
= 1;
316 unsigned int cip_header_size
= 0;
318 if (s
->flags
& CIP_JUMBO_PAYLOAD
)
320 if (!(s
->flags
& CIP_NO_HEADER
))
321 cip_header_size
= sizeof(__be32
) * 2;
323 return cip_header_size
+
324 s
->syt_interval
* s
->data_block_quadlets
* sizeof(__be32
) * multiplier
;
326 EXPORT_SYMBOL(amdtp_stream_get_max_payload
);
329 * amdtp_stream_pcm_prepare - prepare PCM device for running
330 * @s: the AMDTP stream
332 * This function should be called from the PCM device's .prepare callback.
334 void amdtp_stream_pcm_prepare(struct amdtp_stream
*s
)
336 cancel_work_sync(&s
->period_work
);
337 s
->pcm_buffer_pointer
= 0;
338 s
->pcm_period_pointer
= 0;
340 EXPORT_SYMBOL(amdtp_stream_pcm_prepare
);
342 static unsigned int calculate_data_blocks(unsigned int *data_block_state
,
343 bool is_blocking
, bool is_no_info
,
344 unsigned int syt_interval
, enum cip_sfc sfc
)
346 unsigned int data_blocks
;
350 /* This module generate empty packet for 'no data'. */
354 data_blocks
= syt_interval
;
355 /* Non-blocking mode. */
357 if (!cip_sfc_is_base_44100(sfc
)) {
358 // Sample_rate / 8000 is an integer, and precomputed.
359 data_blocks
= *data_block_state
;
361 unsigned int phase
= *data_block_state
;
364 * This calculates the number of data blocks per packet so that
365 * 1) the overall rate is correct and exactly synchronized to
367 * 2) packets with a rounded-up number of blocks occur as early
368 * as possible in the sequence (to prevent underruns of the
371 if (sfc
== CIP_SFC_44100
)
372 /* 6 6 5 6 5 6 5 ... */
373 data_blocks
= 5 + ((phase
& 1) ^
374 (phase
== 0 || phase
>= 40));
376 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
377 data_blocks
= 11 * (sfc
>> 1) + (phase
== 0);
378 if (++phase
>= (80 >> (sfc
>> 1)))
380 *data_block_state
= phase
;
387 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset
,
388 unsigned int *syt_offset_state
, enum cip_sfc sfc
)
390 unsigned int syt_offset
;
392 if (*last_syt_offset
< TICKS_PER_CYCLE
) {
393 if (!cip_sfc_is_base_44100(sfc
))
394 syt_offset
= *last_syt_offset
+ *syt_offset_state
;
397 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
398 * n * SYT_INTERVAL * 24576000 / sample_rate
399 * Modulo TICKS_PER_CYCLE, the difference between successive
400 * elements is about 1386.23. Rounding the results of this
401 * formula to the SYT precision results in a sequence of
402 * differences that begins with:
403 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
404 * This code generates _exactly_ the same sequence.
406 unsigned int phase
= *syt_offset_state
;
407 unsigned int index
= phase
% 13;
409 syt_offset
= *last_syt_offset
;
410 syt_offset
+= 1386 + ((index
&& !(index
& 3)) ||
414 *syt_offset_state
= phase
;
417 syt_offset
= *last_syt_offset
- TICKS_PER_CYCLE
;
418 *last_syt_offset
= syt_offset
;
420 if (syt_offset
>= TICKS_PER_CYCLE
)
421 syt_offset
= CIP_SYT_NO_INFO
;
426 static void update_pcm_pointers(struct amdtp_stream
*s
,
427 struct snd_pcm_substream
*pcm
,
432 ptr
= s
->pcm_buffer_pointer
+ frames
;
433 if (ptr
>= pcm
->runtime
->buffer_size
)
434 ptr
-= pcm
->runtime
->buffer_size
;
435 WRITE_ONCE(s
->pcm_buffer_pointer
, ptr
);
437 s
->pcm_period_pointer
+= frames
;
438 if (s
->pcm_period_pointer
>= pcm
->runtime
->period_size
) {
439 s
->pcm_period_pointer
-= pcm
->runtime
->period_size
;
440 queue_work(system_highpri_wq
, &s
->period_work
);
444 static void pcm_period_work(struct work_struct
*work
)
446 struct amdtp_stream
*s
= container_of(work
, struct amdtp_stream
,
448 struct snd_pcm_substream
*pcm
= READ_ONCE(s
->pcm
);
451 snd_pcm_period_elapsed(pcm
);
454 static int queue_packet(struct amdtp_stream
*s
, struct fw_iso_packet
*params
,
459 params
->interrupt
= sched_irq
;
460 params
->tag
= s
->tag
;
463 err
= fw_iso_context_queue(s
->context
, params
, &s
->buffer
.iso_buffer
,
464 s
->buffer
.packets
[s
->packet_index
].offset
);
466 dev_err(&s
->unit
->device
, "queueing error: %d\n", err
);
470 if (++s
->packet_index
>= s
->queue_size
)
476 static inline int queue_out_packet(struct amdtp_stream
*s
,
477 struct fw_iso_packet
*params
, bool sched_irq
)
480 !!(params
->header_length
== 0 && params
->payload_length
== 0);
481 return queue_packet(s
, params
, sched_irq
);
484 static inline int queue_in_packet(struct amdtp_stream
*s
,
485 struct fw_iso_packet
*params
)
487 // Queue one packet for IR context.
488 params
->header_length
= s
->ctx_data
.tx
.ctx_header_size
;
489 params
->payload_length
= s
->ctx_data
.tx
.max_ctx_payload_length
;
490 params
->skip
= false;
491 return queue_packet(s
, params
, false);
494 static void generate_cip_header(struct amdtp_stream
*s
, __be32 cip_header
[2],
495 unsigned int data_block_counter
, unsigned int syt
)
497 cip_header
[0] = cpu_to_be32(READ_ONCE(s
->source_node_id_field
) |
498 (s
->data_block_quadlets
<< CIP_DBS_SHIFT
) |
499 ((s
->sph
<< CIP_SPH_SHIFT
) & CIP_SPH_MASK
) |
501 cip_header
[1] = cpu_to_be32(CIP_EOH
|
502 ((s
->fmt
<< CIP_FMT_SHIFT
) & CIP_FMT_MASK
) |
503 ((s
->ctx_data
.rx
.fdf
<< CIP_FDF_SHIFT
) & CIP_FDF_MASK
) |
504 (syt
& CIP_SYT_MASK
));
507 static void build_it_pkt_header(struct amdtp_stream
*s
, unsigned int cycle
,
508 struct fw_iso_packet
*params
,
509 unsigned int data_blocks
,
510 unsigned int data_block_counter
,
511 unsigned int syt
, unsigned int index
)
513 unsigned int payload_length
;
516 payload_length
= data_blocks
* sizeof(__be32
) * s
->data_block_quadlets
;
517 params
->payload_length
= payload_length
;
519 if (!(s
->flags
& CIP_NO_HEADER
)) {
520 cip_header
= (__be32
*)params
->header
;
521 generate_cip_header(s
, cip_header
, data_block_counter
, syt
);
522 params
->header_length
= 2 * sizeof(__be32
);
523 payload_length
+= params
->header_length
;
528 trace_amdtp_packet(s
, cycle
, cip_header
, payload_length
, data_blocks
,
529 data_block_counter
, index
);
532 static int check_cip_header(struct amdtp_stream
*s
, const __be32
*buf
,
533 unsigned int payload_length
,
534 unsigned int *data_blocks
,
535 unsigned int *data_block_counter
, unsigned int *syt
)
544 cip_header
[0] = be32_to_cpu(buf
[0]);
545 cip_header
[1] = be32_to_cpu(buf
[1]);
548 * This module supports 'Two-quadlet CIP header with SYT field'.
549 * For convenience, also check FMT field is AM824 or not.
551 if ((((cip_header
[0] & CIP_EOH_MASK
) == CIP_EOH
) ||
552 ((cip_header
[1] & CIP_EOH_MASK
) != CIP_EOH
)) &&
553 (!(s
->flags
& CIP_HEADER_WITHOUT_EOH
))) {
554 dev_info_ratelimited(&s
->unit
->device
,
555 "Invalid CIP header for AMDTP: %08X:%08X\n",
556 cip_header
[0], cip_header
[1]);
560 /* Check valid protocol or not. */
561 sph
= (cip_header
[0] & CIP_SPH_MASK
) >> CIP_SPH_SHIFT
;
562 fmt
= (cip_header
[1] & CIP_FMT_MASK
) >> CIP_FMT_SHIFT
;
563 if (sph
!= s
->sph
|| fmt
!= s
->fmt
) {
564 dev_info_ratelimited(&s
->unit
->device
,
565 "Detect unexpected protocol: %08x %08x\n",
566 cip_header
[0], cip_header
[1]);
570 /* Calculate data blocks */
571 fdf
= (cip_header
[1] & CIP_FDF_MASK
) >> CIP_FDF_SHIFT
;
572 if (payload_length
< sizeof(__be32
) * 2 ||
573 (fmt
== CIP_FMT_AM
&& fdf
== AMDTP_FDF_NO_DATA
)) {
576 unsigned int data_block_quadlets
=
577 (cip_header
[0] & CIP_DBS_MASK
) >> CIP_DBS_SHIFT
;
578 /* avoid division by zero */
579 if (data_block_quadlets
== 0) {
580 dev_err(&s
->unit
->device
,
581 "Detect invalid value in dbs field: %08X\n",
585 if (s
->flags
& CIP_WRONG_DBS
)
586 data_block_quadlets
= s
->data_block_quadlets
;
588 *data_blocks
= (payload_length
/ sizeof(__be32
) - 2) /
592 /* Check data block counter continuity */
593 dbc
= cip_header
[0] & CIP_DBC_MASK
;
594 if (*data_blocks
== 0 && (s
->flags
& CIP_EMPTY_HAS_WRONG_DBC
) &&
595 *data_block_counter
!= UINT_MAX
)
596 dbc
= *data_block_counter
;
598 if ((dbc
== 0x00 && (s
->flags
& CIP_SKIP_DBC_ZERO_CHECK
)) ||
599 *data_block_counter
== UINT_MAX
) {
601 } else if (!(s
->flags
& CIP_DBC_IS_END_EVENT
)) {
602 lost
= dbc
!= *data_block_counter
;
604 unsigned int dbc_interval
;
606 if (*data_blocks
> 0 && s
->ctx_data
.tx
.dbc_interval
> 0)
607 dbc_interval
= s
->ctx_data
.tx
.dbc_interval
;
609 dbc_interval
= *data_blocks
;
611 lost
= dbc
!= ((*data_block_counter
+ dbc_interval
) & 0xff);
615 dev_err(&s
->unit
->device
,
616 "Detect discontinuity of CIP: %02X %02X\n",
617 *data_block_counter
, dbc
);
621 *data_block_counter
= dbc
;
623 *syt
= cip_header
[1] & CIP_SYT_MASK
;
628 static int parse_ir_ctx_header(struct amdtp_stream
*s
, unsigned int cycle
,
629 const __be32
*ctx_header
,
630 unsigned int *payload_length
,
631 unsigned int *data_blocks
,
632 unsigned int *data_block_counter
,
633 unsigned int *syt
, unsigned int index
)
635 const __be32
*cip_header
;
638 *payload_length
= be32_to_cpu(ctx_header
[0]) >> ISO_DATA_LENGTH_SHIFT
;
639 if (*payload_length
> s
->ctx_data
.tx
.ctx_header_size
+
640 s
->ctx_data
.tx
.max_ctx_payload_length
) {
641 dev_err(&s
->unit
->device
,
642 "Detect jumbo payload: %04x %04x\n",
643 *payload_length
, s
->ctx_data
.tx
.max_ctx_payload_length
);
647 if (!(s
->flags
& CIP_NO_HEADER
)) {
648 cip_header
= ctx_header
+ 2;
649 err
= check_cip_header(s
, cip_header
, *payload_length
,
650 data_blocks
, data_block_counter
, syt
);
656 *data_blocks
= *payload_length
/ sizeof(__be32
) /
657 s
->data_block_quadlets
;
660 if (*data_block_counter
== UINT_MAX
)
661 *data_block_counter
= 0;
664 trace_amdtp_packet(s
, cycle
, cip_header
, *payload_length
, *data_blocks
,
665 *data_block_counter
, index
);
670 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
671 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
672 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
673 static inline u32
compute_cycle_count(__be32 ctx_header_tstamp
)
675 u32 tstamp
= be32_to_cpu(ctx_header_tstamp
) & HEADER_TSTAMP_MASK
;
676 return (((tstamp
>> 13) & 0x07) * 8000) + (tstamp
& 0x1fff);
679 static inline u32
increment_cycle_count(u32 cycle
, unsigned int addend
)
682 if (cycle
>= OHCI_MAX_SECOND
* CYCLES_PER_SECOND
)
683 cycle
-= OHCI_MAX_SECOND
* CYCLES_PER_SECOND
;
687 // Align to actual cycle count for the packet which is going to be scheduled.
688 // This module queued the same number of isochronous cycle as the size of queue
689 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
690 // the size of queue for scheduled cycle.
691 static inline u32
compute_it_cycle(const __be32 ctx_header_tstamp
,
692 unsigned int queue_size
)
694 u32 cycle
= compute_cycle_count(ctx_header_tstamp
);
695 return increment_cycle_count(cycle
, queue_size
);
698 static int generate_device_pkt_descs(struct amdtp_stream
*s
,
699 struct pkt_desc
*descs
,
700 const __be32
*ctx_header
,
701 unsigned int packets
)
703 unsigned int dbc
= s
->data_block_counter
;
707 for (i
= 0; i
< packets
; ++i
) {
708 struct pkt_desc
*desc
= descs
+ i
;
709 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
711 unsigned int payload_length
;
712 unsigned int data_blocks
;
715 cycle
= compute_cycle_count(ctx_header
[1]);
717 err
= parse_ir_ctx_header(s
, cycle
, ctx_header
, &payload_length
,
718 &data_blocks
, &dbc
, &syt
, i
);
724 desc
->data_blocks
= data_blocks
;
725 desc
->data_block_counter
= dbc
;
726 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
728 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
729 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
732 s
->ctx_data
.tx
.ctx_header_size
/ sizeof(*ctx_header
);
735 s
->data_block_counter
= dbc
;
740 static unsigned int compute_syt(unsigned int syt_offset
, unsigned int cycle
,
741 unsigned int transfer_delay
)
745 syt_offset
+= transfer_delay
;
746 syt
= ((cycle
+ syt_offset
/ TICKS_PER_CYCLE
) << 12) |
747 (syt_offset
% TICKS_PER_CYCLE
);
748 return syt
& CIP_SYT_MASK
;
751 static void generate_pkt_descs(struct amdtp_stream
*s
, struct pkt_desc
*descs
,
752 const __be32
*ctx_header
, unsigned int packets
,
753 const struct seq_desc
*seq_descs
,
754 unsigned int seq_size
)
756 unsigned int dbc
= s
->data_block_counter
;
757 unsigned int seq_index
= s
->ctx_data
.rx
.seq_index
;
760 for (i
= 0; i
< packets
; ++i
) {
761 struct pkt_desc
*desc
= descs
+ i
;
762 unsigned int index
= (s
->packet_index
+ i
) % s
->queue_size
;
763 const struct seq_desc
*seq
= seq_descs
+ seq_index
;
766 desc
->cycle
= compute_it_cycle(*ctx_header
, s
->queue_size
);
768 syt
= seq
->syt_offset
;
769 if (syt
!= CIP_SYT_NO_INFO
) {
770 syt
= compute_syt(syt
, desc
->cycle
,
771 s
->ctx_data
.rx
.transfer_delay
);
774 desc
->data_blocks
= seq
->data_blocks
;
776 if (s
->flags
& CIP_DBC_IS_END_EVENT
)
777 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
779 desc
->data_block_counter
= dbc
;
781 if (!(s
->flags
& CIP_DBC_IS_END_EVENT
))
782 dbc
= (dbc
+ desc
->data_blocks
) & 0xff;
784 desc
->ctx_payload
= s
->buffer
.packets
[index
].buffer
;
786 seq_index
= (seq_index
+ 1) % seq_size
;
791 s
->data_block_counter
= dbc
;
792 s
->ctx_data
.rx
.seq_index
= seq_index
;
795 static inline void cancel_stream(struct amdtp_stream
*s
)
797 s
->packet_index
= -1;
798 if (current_work() == &s
->period_work
)
799 amdtp_stream_pcm_abort(s
);
800 WRITE_ONCE(s
->pcm_buffer_pointer
, SNDRV_PCM_POS_XRUN
);
803 static void process_ctx_payloads(struct amdtp_stream
*s
,
804 const struct pkt_desc
*descs
,
805 unsigned int packets
)
807 struct snd_pcm_substream
*pcm
;
808 unsigned int pcm_frames
;
810 pcm
= READ_ONCE(s
->pcm
);
811 pcm_frames
= s
->process_ctx_payloads(s
, descs
, packets
, pcm
);
813 update_pcm_pointers(s
, pcm
, pcm_frames
);
816 static void out_stream_callback(struct fw_iso_context
*context
, u32 tstamp
,
817 size_t header_length
, void *header
,
820 struct amdtp_stream
*s
= private_data
;
821 const struct amdtp_domain
*d
= s
->domain
;
822 const __be32
*ctx_header
= header
;
823 unsigned int events_per_period
= s
->ctx_data
.rx
.events_per_period
;
824 unsigned int event_count
= s
->ctx_data
.rx
.event_count
;
825 unsigned int packets
;
828 if (s
->packet_index
< 0)
831 // Calculate the number of packets in buffer and check XRUN.
832 packets
= header_length
/ sizeof(*ctx_header
);
834 generate_pkt_descs(s
, s
->pkt_descs
, ctx_header
, packets
, d
->seq_descs
,
837 process_ctx_payloads(s
, s
->pkt_descs
, packets
);
839 for (i
= 0; i
< packets
; ++i
) {
840 const struct pkt_desc
*desc
= s
->pkt_descs
+ i
;
843 struct fw_iso_packet params
;
844 __be32 header
[IT_PKT_HEADER_SIZE_CIP
/ sizeof(__be32
)];
845 } template = { {0}, {0} };
846 bool sched_irq
= false;
848 if (s
->ctx_data
.rx
.syt_override
< 0)
851 syt
= s
->ctx_data
.rx
.syt_override
;
853 build_it_pkt_header(s
, desc
->cycle
, &template.params
,
854 desc
->data_blocks
, desc
->data_block_counter
,
857 if (s
== s
->domain
->irq_target
) {
858 event_count
+= desc
->data_blocks
;
859 if (event_count
>= events_per_period
) {
860 event_count
-= events_per_period
;
865 if (queue_out_packet(s
, &template.params
, sched_irq
) < 0) {
871 s
->ctx_data
.rx
.event_count
= event_count
;
874 static void in_stream_callback(struct fw_iso_context
*context
, u32 tstamp
,
875 size_t header_length
, void *header
,
878 struct amdtp_stream
*s
= private_data
;
879 __be32
*ctx_header
= header
;
880 unsigned int packets
;
884 if (s
->packet_index
< 0)
887 // Calculate the number of packets in buffer and check XRUN.
888 packets
= header_length
/ s
->ctx_data
.tx
.ctx_header_size
;
890 err
= generate_device_pkt_descs(s
, s
->pkt_descs
, ctx_header
, packets
);
892 if (err
!= -EAGAIN
) {
897 process_ctx_payloads(s
, s
->pkt_descs
, packets
);
900 for (i
= 0; i
< packets
; ++i
) {
901 struct fw_iso_packet params
= {0};
903 if (queue_in_packet(s
, ¶ms
) < 0) {
910 static void pool_ideal_seq_descs(struct amdtp_domain
*d
, unsigned int packets
)
912 struct amdtp_stream
*irq_target
= d
->irq_target
;
913 unsigned int seq_tail
= d
->seq_tail
;
914 unsigned int seq_size
= d
->seq_size
;
915 unsigned int min_avail
;
916 struct amdtp_stream
*s
;
918 min_avail
= d
->seq_size
;
919 list_for_each_entry(s
, &d
->streams
, list
) {
920 unsigned int seq_index
;
923 if (s
->direction
== AMDTP_IN_STREAM
)
926 seq_index
= s
->ctx_data
.rx
.seq_index
;
928 if (seq_index
> avail
)
929 avail
+= d
->seq_size
;
932 if (avail
< min_avail
)
936 while (min_avail
< packets
) {
937 struct seq_desc
*desc
= d
->seq_descs
+ seq_tail
;
939 desc
->syt_offset
= calculate_syt_offset(&d
->last_syt_offset
,
940 &d
->syt_offset_state
, irq_target
->sfc
);
941 desc
->data_blocks
= calculate_data_blocks(&d
->data_block_state
,
942 !!(irq_target
->flags
& CIP_BLOCKING
),
943 desc
->syt_offset
== CIP_SYT_NO_INFO
,
944 irq_target
->syt_interval
, irq_target
->sfc
);
947 seq_tail
%= seq_size
;
952 d
->seq_tail
= seq_tail
;
955 static void irq_target_callback(struct fw_iso_context
*context
, u32 tstamp
,
956 size_t header_length
, void *header
,
959 struct amdtp_stream
*irq_target
= private_data
;
960 struct amdtp_domain
*d
= irq_target
->domain
;
961 unsigned int packets
= header_length
/ sizeof(__be32
);
962 struct amdtp_stream
*s
;
964 // Record enough entries with extra 3 cycles at least.
965 pool_ideal_seq_descs(d
, packets
+ 3);
967 out_stream_callback(context
, tstamp
, header_length
, header
, irq_target
);
968 if (amdtp_streaming_error(irq_target
))
971 list_for_each_entry(s
, &d
->streams
, list
) {
972 if (s
!= irq_target
&& amdtp_stream_running(s
)) {
973 fw_iso_context_flush_completions(s
->context
);
974 if (amdtp_streaming_error(s
))
981 if (amdtp_stream_running(irq_target
))
982 cancel_stream(irq_target
);
984 list_for_each_entry(s
, &d
->streams
, list
) {
985 if (amdtp_stream_running(s
))
990 // this is executed one time.
991 static void amdtp_stream_first_callback(struct fw_iso_context
*context
,
992 u32 tstamp
, size_t header_length
,
993 void *header
, void *private_data
)
995 struct amdtp_stream
*s
= private_data
;
996 const __be32
*ctx_header
= header
;
1000 * For in-stream, first packet has come.
1001 * For out-stream, prepared to transmit first packet
1003 s
->callbacked
= true;
1004 wake_up(&s
->callback_wait
);
1006 if (s
->direction
== AMDTP_IN_STREAM
) {
1007 cycle
= compute_cycle_count(ctx_header
[1]);
1009 context
->callback
.sc
= in_stream_callback
;
1011 cycle
= compute_it_cycle(*ctx_header
, s
->queue_size
);
1013 if (s
== s
->domain
->irq_target
)
1014 context
->callback
.sc
= irq_target_callback
;
1016 context
->callback
.sc
= out_stream_callback
;
1019 s
->start_cycle
= cycle
;
1021 context
->callback
.sc(context
, tstamp
, header_length
, header
, s
);
1025 * amdtp_stream_start - start transferring packets
1026 * @s: the AMDTP stream to start
1027 * @channel: the isochronous channel on the bus
1028 * @speed: firewire speed code
1029 * @start_cycle: the isochronous cycle to start the context. Start immediately
1030 * if negative value is given.
1031 * @queue_size: The number of packets in the queue.
1032 * @idle_irq_interval: the interval to queue packet during initial state.
1034 * The stream cannot be started until it has been configured with
1035 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1036 * device can be started.
1038 static int amdtp_stream_start(struct amdtp_stream
*s
, int channel
, int speed
,
1039 int start_cycle
, unsigned int queue_size
,
1040 unsigned int idle_irq_interval
)
1042 bool is_irq_target
= (s
== s
->domain
->irq_target
);
1043 unsigned int ctx_header_size
;
1044 unsigned int max_ctx_payload_size
;
1045 enum dma_data_direction dir
;
1048 mutex_lock(&s
->mutex
);
1050 if (WARN_ON(amdtp_stream_running(s
) ||
1051 (s
->data_block_quadlets
< 1))) {
1056 if (s
->direction
== AMDTP_IN_STREAM
) {
1057 // NOTE: IT context should be used for constant IRQ.
1058 if (is_irq_target
) {
1063 s
->data_block_counter
= UINT_MAX
;
1065 s
->data_block_counter
= 0;
1068 /* initialize packet buffer */
1069 if (s
->direction
== AMDTP_IN_STREAM
) {
1070 dir
= DMA_FROM_DEVICE
;
1071 type
= FW_ISO_CONTEXT_RECEIVE
;
1072 if (!(s
->flags
& CIP_NO_HEADER
))
1073 ctx_header_size
= IR_CTX_HEADER_SIZE_CIP
;
1075 ctx_header_size
= IR_CTX_HEADER_SIZE_NO_CIP
;
1077 max_ctx_payload_size
= amdtp_stream_get_max_payload(s
) -
1080 dir
= DMA_TO_DEVICE
;
1081 type
= FW_ISO_CONTEXT_TRANSMIT
;
1082 ctx_header_size
= 0; // No effect for IT context.
1084 max_ctx_payload_size
= amdtp_stream_get_max_payload(s
);
1085 if (!(s
->flags
& CIP_NO_HEADER
))
1086 max_ctx_payload_size
-= IT_PKT_HEADER_SIZE_CIP
;
1089 err
= iso_packets_buffer_init(&s
->buffer
, s
->unit
, queue_size
,
1090 max_ctx_payload_size
, dir
);
1093 s
->queue_size
= queue_size
;
1095 s
->context
= fw_iso_context_create(fw_parent_device(s
->unit
)->card
,
1096 type
, channel
, speed
, ctx_header_size
,
1097 amdtp_stream_first_callback
, s
);
1098 if (IS_ERR(s
->context
)) {
1099 err
= PTR_ERR(s
->context
);
1101 dev_err(&s
->unit
->device
,
1102 "no free stream on this controller\n");
1106 amdtp_stream_update(s
);
1108 if (s
->direction
== AMDTP_IN_STREAM
) {
1109 s
->ctx_data
.tx
.max_ctx_payload_length
= max_ctx_payload_size
;
1110 s
->ctx_data
.tx
.ctx_header_size
= ctx_header_size
;
1113 if (s
->flags
& CIP_NO_HEADER
)
1114 s
->tag
= TAG_NO_CIP_HEADER
;
1118 s
->pkt_descs
= kcalloc(s
->queue_size
, sizeof(*s
->pkt_descs
),
1120 if (!s
->pkt_descs
) {
1125 s
->packet_index
= 0;
1127 struct fw_iso_packet params
;
1129 if (s
->direction
== AMDTP_IN_STREAM
) {
1130 err
= queue_in_packet(s
, ¶ms
);
1132 bool sched_irq
= false;
1134 params
.header_length
= 0;
1135 params
.payload_length
= 0;
1137 if (is_irq_target
) {
1138 sched_irq
= !((s
->packet_index
+ 1) %
1142 err
= queue_out_packet(s
, ¶ms
, sched_irq
);
1146 } while (s
->packet_index
> 0);
1148 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1149 tag
= FW_ISO_CONTEXT_MATCH_TAG1
;
1150 if ((s
->flags
& CIP_EMPTY_WITH_TAG0
) || (s
->flags
& CIP_NO_HEADER
))
1151 tag
|= FW_ISO_CONTEXT_MATCH_TAG0
;
1153 s
->callbacked
= false;
1154 err
= fw_iso_context_start(s
->context
, start_cycle
, 0, tag
);
1158 mutex_unlock(&s
->mutex
);
1162 kfree(s
->pkt_descs
);
1164 fw_iso_context_destroy(s
->context
);
1165 s
->context
= ERR_PTR(-1);
1167 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1169 mutex_unlock(&s
->mutex
);
1175 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1176 * @d: the AMDTP domain.
1177 * @s: the AMDTP stream that transports the PCM data
1179 * Returns the current buffer position, in frames.
1181 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain
*d
,
1182 struct amdtp_stream
*s
)
1184 struct amdtp_stream
*irq_target
= d
->irq_target
;
1186 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1187 // This function is called in software IRQ context of
1188 // period_work or process context.
1190 // When the software IRQ context was scheduled by software IRQ
1191 // context of IT contexts, queued packets were already handled.
1192 // Therefore, no need to flush the queue in buffer furthermore.
1194 // When the process context reach here, some packets will be
1195 // already queued in the buffer. These packets should be handled
1196 // immediately to keep better granularity of PCM pointer.
1198 // Later, the process context will sometimes schedules software
1199 // IRQ context of the period_work. Then, no need to flush the
1200 // queue by the same reason as described in the above
1201 if (current_work() != &s
->period_work
) {
1202 // Queued packet should be processed without any kernel
1203 // preemption to keep latency against bus cycle.
1205 fw_iso_context_flush_completions(irq_target
->context
);
1210 return READ_ONCE(s
->pcm_buffer_pointer
);
1212 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer
);
1215 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1216 * @d: the AMDTP domain.
1217 * @s: the AMDTP stream that transfers the PCM frames
1219 * Returns zero always.
1221 int amdtp_domain_stream_pcm_ack(struct amdtp_domain
*d
, struct amdtp_stream
*s
)
1223 struct amdtp_stream
*irq_target
= d
->irq_target
;
1225 // Process isochronous packets for recent isochronous cycle to handle
1226 // queued PCM frames.
1227 if (irq_target
&& amdtp_stream_running(irq_target
)) {
1228 // Queued packet should be processed without any kernel
1229 // preemption to keep latency against bus cycle.
1231 fw_iso_context_flush_completions(irq_target
->context
);
1237 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack
);
1240 * amdtp_stream_update - update the stream after a bus reset
1241 * @s: the AMDTP stream
1243 void amdtp_stream_update(struct amdtp_stream
*s
)
1246 WRITE_ONCE(s
->source_node_id_field
,
1247 (fw_parent_device(s
->unit
)->card
->node_id
<< CIP_SID_SHIFT
) & CIP_SID_MASK
);
1249 EXPORT_SYMBOL(amdtp_stream_update
);
1252 * amdtp_stream_stop - stop sending packets
1253 * @s: the AMDTP stream to stop
1255 * All PCM and MIDI devices of the stream must be stopped before the stream
1256 * itself can be stopped.
1258 static void amdtp_stream_stop(struct amdtp_stream
*s
)
1260 mutex_lock(&s
->mutex
);
1262 if (!amdtp_stream_running(s
)) {
1263 mutex_unlock(&s
->mutex
);
1267 cancel_work_sync(&s
->period_work
);
1268 fw_iso_context_stop(s
->context
);
1269 fw_iso_context_destroy(s
->context
);
1270 s
->context
= ERR_PTR(-1);
1271 iso_packets_buffer_destroy(&s
->buffer
, s
->unit
);
1272 kfree(s
->pkt_descs
);
1274 s
->callbacked
= false;
1276 mutex_unlock(&s
->mutex
);
1280 * amdtp_stream_pcm_abort - abort the running PCM device
1281 * @s: the AMDTP stream about to be stopped
1283 * If the isochronous stream needs to be stopped asynchronously, call this
1284 * function first to stop the PCM device.
1286 void amdtp_stream_pcm_abort(struct amdtp_stream
*s
)
1288 struct snd_pcm_substream
*pcm
;
1290 pcm
= READ_ONCE(s
->pcm
);
1292 snd_pcm_stop_xrun(pcm
);
1294 EXPORT_SYMBOL(amdtp_stream_pcm_abort
);
1297 * amdtp_domain_init - initialize an AMDTP domain structure
1298 * @d: the AMDTP domain to initialize.
1300 int amdtp_domain_init(struct amdtp_domain
*d
)
1302 INIT_LIST_HEAD(&d
->streams
);
1304 d
->events_per_period
= 0;
1306 d
->seq_descs
= NULL
;
1310 EXPORT_SYMBOL_GPL(amdtp_domain_init
);
1313 * amdtp_domain_destroy - destroy an AMDTP domain structure
1314 * @d: the AMDTP domain to destroy.
1316 void amdtp_domain_destroy(struct amdtp_domain
*d
)
1318 // At present nothing to do.
1321 EXPORT_SYMBOL_GPL(amdtp_domain_destroy
);
1324 * amdtp_domain_add_stream - register isoc context into the domain.
1325 * @d: the AMDTP domain.
1326 * @s: the AMDTP stream.
1327 * @channel: the isochronous channel on the bus.
1328 * @speed: firewire speed code.
1330 int amdtp_domain_add_stream(struct amdtp_domain
*d
, struct amdtp_stream
*s
,
1331 int channel
, int speed
)
1333 struct amdtp_stream
*tmp
;
1335 list_for_each_entry(tmp
, &d
->streams
, list
) {
1340 list_add(&s
->list
, &d
->streams
);
1342 s
->channel
= channel
;
1348 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream
);
1350 static int get_current_cycle_time(struct fw_card
*fw_card
, int *cur_cycle
)
1357 // This is a request to local 1394 OHCI controller and expected to
1358 // complete without any event waiting.
1359 generation
= fw_card
->generation
;
1360 smp_rmb(); // node_id vs. generation.
1361 rcode
= fw_run_transaction(fw_card
, TCODE_READ_QUADLET_REQUEST
,
1362 fw_card
->node_id
, generation
, SCODE_100
,
1363 CSR_REGISTER_BASE
+ CSR_CYCLE_TIME
,
1365 if (rcode
!= RCODE_COMPLETE
)
1368 data
= be32_to_cpu(reg
);
1369 *cur_cycle
= data
>> 12;
1375 * amdtp_domain_start - start sending packets for isoc context in the domain.
1376 * @d: the AMDTP domain.
1377 * @ir_delay_cycle: the cycle delay to start all IR contexts.
1379 int amdtp_domain_start(struct amdtp_domain
*d
, unsigned int ir_delay_cycle
)
1381 static const struct {
1382 unsigned int data_block
;
1383 unsigned int syt_offset
;
1384 } *entry
, initial_state
[] = {
1385 [CIP_SFC_32000
] = { 4, 3072 },
1386 [CIP_SFC_48000
] = { 6, 1024 },
1387 [CIP_SFC_96000
] = { 12, 1024 },
1388 [CIP_SFC_192000
] = { 24, 1024 },
1389 [CIP_SFC_44100
] = { 0, 67 },
1390 [CIP_SFC_88200
] = { 0, 67 },
1391 [CIP_SFC_176400
] = { 0, 67 },
1393 unsigned int events_per_buffer
= d
->events_per_buffer
;
1394 unsigned int events_per_period
= d
->events_per_period
;
1395 unsigned int idle_irq_interval
;
1396 unsigned int queue_size
;
1397 struct amdtp_stream
*s
;
1401 // Select an IT context as IRQ target.
1402 list_for_each_entry(s
, &d
->streams
, list
) {
1403 if (s
->direction
== AMDTP_OUT_STREAM
)
1410 // This is a case that AMDTP streams in domain run just for MIDI
1411 // substream. Use the number of events equivalent to 10 msec as
1412 // interval of hardware IRQ.
1413 if (events_per_period
== 0)
1414 events_per_period
= amdtp_rate_table
[d
->irq_target
->sfc
] / 100;
1415 if (events_per_buffer
== 0)
1416 events_per_buffer
= events_per_period
* 3;
1418 queue_size
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_buffer
,
1419 amdtp_rate_table
[d
->irq_target
->sfc
]);
1421 d
->seq_descs
= kcalloc(queue_size
, sizeof(*d
->seq_descs
), GFP_KERNEL
);
1424 d
->seq_size
= queue_size
;
1427 entry
= &initial_state
[s
->sfc
];
1428 d
->data_block_state
= entry
->data_block
;
1429 d
->syt_offset_state
= entry
->syt_offset
;
1430 d
->last_syt_offset
= TICKS_PER_CYCLE
;
1432 if (ir_delay_cycle
> 0) {
1433 struct fw_card
*fw_card
= fw_parent_device(s
->unit
)->card
;
1435 err
= get_current_cycle_time(fw_card
, &cycle
);
1439 // No need to care overflow in cycle field because of enough
1441 cycle
+= ir_delay_cycle
;
1443 // Round up to sec field.
1444 if ((cycle
& 0x00001fff) >= CYCLES_PER_SECOND
) {
1447 // The sec field can overflow.
1448 sec
= (cycle
& 0xffffe000) >> 13;
1449 cycle
= (++sec
<< 13) |
1450 ((cycle
& 0x00001fff) / CYCLES_PER_SECOND
);
1453 // In OHCI 1394 specification, lower 2 bits are available for
1455 cycle
&= 0x00007fff;
1460 list_for_each_entry(s
, &d
->streams
, list
) {
1463 if (s
->direction
== AMDTP_IN_STREAM
) {
1464 cycle_match
= cycle
;
1466 // IT context starts immediately.
1468 s
->ctx_data
.rx
.seq_index
= 0;
1471 if (s
!= d
->irq_target
) {
1472 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
,
1473 cycle_match
, queue_size
, 0);
1480 s
->ctx_data
.rx
.events_per_period
= events_per_period
;
1481 s
->ctx_data
.rx
.event_count
= 0;
1482 s
->ctx_data
.rx
.seq_index
= 0;
1484 idle_irq_interval
= DIV_ROUND_UP(CYCLES_PER_SECOND
* events_per_period
,
1485 amdtp_rate_table
[d
->irq_target
->sfc
]);
1486 err
= amdtp_stream_start(s
, s
->channel
, s
->speed
, -1, queue_size
,
1493 list_for_each_entry(s
, &d
->streams
, list
)
1494 amdtp_stream_stop(s
);
1495 kfree(d
->seq_descs
);
1496 d
->seq_descs
= NULL
;
1499 EXPORT_SYMBOL_GPL(amdtp_domain_start
);
1502 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1503 * @d: the AMDTP domain to which the isoc contexts belong.
1505 void amdtp_domain_stop(struct amdtp_domain
*d
)
1507 struct amdtp_stream
*s
, *next
;
1510 amdtp_stream_stop(d
->irq_target
);
1512 list_for_each_entry_safe(s
, next
, &d
->streams
, list
) {
1515 if (s
!= d
->irq_target
)
1516 amdtp_stream_stop(s
);
1519 d
->events_per_period
= 0;
1520 d
->irq_target
= NULL
;
1522 kfree(d
->seq_descs
);
1523 d
->seq_descs
= NULL
;
1525 EXPORT_SYMBOL_GPL(amdtp_domain_stop
);