1 /* -*- c-basic-offset: 8 -*-
3 * amdtp.c - Audio and Music Data Transmission Protocol Driver
4 * Copyright (C) 2001 Kristian Høgsberg
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * The AMDTP driver is designed to expose the IEEE1394 bus as a
25 * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
26 * then your favourite MP3 player, game or whatever sound program will
27 * output to an IEEE1394 isochronous channel. The signal destination
28 * could be a set of IEEE1394 loudspeakers (if and when such things
29 * become available) or an amplifier with IEEE1394 input (like the
30 * Sony STR-LSA1). The driver only handles the actual streaming, some
31 * connection management is also required for this to actually work.
32 * That is outside the scope of this driver, and furthermore it is not
33 * really standardized yet.
35 * The Audio and Music Data Tranmission Protocol is available at
37 * http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
43 * - We should be able to change input sample format between LE/BE, as
44 * we already shift the bytes around when we construct the iso
47 * - Fix DMA stop after bus reset!
49 * - Clean up iso context handling in ohci1394.
55 * - Receive data for local playback or recording. Playback requires
56 * soft syncing with the sound card.
58 * - Signal processing, i.e. receive packets, do some processing, and
59 * transmit them again using the same packet structure and timestamps
60 * offset by processing time.
62 * - Maybe make an ALSA interface, that is, create a file_ops
63 * implementation that recognizes ALSA ioctls and uses defaults for
64 * things that can't be controlled through ALSA (iso channel).
68 * - Audit copy_from_user in amdtp_write.
69 * Daniele Bellucci <bellucda@tiscali.it>
73 #include <linux/module.h>
74 #include <linux/list.h>
75 #include <linux/sched.h>
76 #include <linux/types.h>
78 #include <linux/ioctl.h>
79 #include <linux/wait.h>
80 #include <linux/pci.h>
81 #include <linux/interrupt.h>
82 #include <linux/poll.h>
83 #include <linux/ioctl32.h>
84 #include <linux/compat.h>
85 #include <linux/cdev.h>
86 #include <asm/uaccess.h>
87 #include <asm/atomic.h>
90 #include "highlevel.h"
92 #include "ieee1394_core.h"
98 #define FMT_AMDTP 0x10
99 #define FDF_AM824 0x00
100 #define FDF_SFC_32KHZ 0x00
101 #define FDF_SFC_44K1HZ 0x01
102 #define FDF_SFC_48KHZ 0x02
103 #define FDF_SFC_88K2HZ 0x03
104 #define FDF_SFC_96KHZ 0x04
105 #define FDF_SFC_176K4HZ 0x05
106 #define FDF_SFC_192KHZ 0x06
108 struct descriptor_block
{
109 struct output_more_immediate
{
126 struct descriptor_block
*db
;
128 struct iso_packet
*payload
;
129 dma_addr_t payload_bus
;
132 #include <asm/byteorder.h>
134 #if defined __BIG_ENDIAN_BITFIELD
138 unsigned int dbs
: 8;
139 unsigned int eoh0
: 2;
140 unsigned int sid
: 6;
142 unsigned int dbc
: 8;
144 unsigned int qpc
: 3;
145 unsigned int sph
: 1;
146 unsigned int reserved
: 2;
149 unsigned int fdf
: 8;
150 unsigned int eoh1
: 2;
151 unsigned int fmt
: 6;
153 unsigned int syt
: 16;
158 #elif defined __LITTLE_ENDIAN_BITFIELD
162 unsigned int sid
: 6;
163 unsigned int eoh0
: 2;
164 unsigned int dbs
: 8;
166 unsigned int reserved
: 2;
167 unsigned int sph
: 1;
168 unsigned int qpc
: 3;
170 unsigned int dbc
: 8;
173 unsigned int fmt
: 6;
174 unsigned int eoh1
: 2;
175 unsigned int fdf
: 8;
177 unsigned int syt
: 16;
184 #error Unknown bitfield type
194 #define PACKET_LIST_SIZE 256
195 #define MAX_PACKET_LISTS 4
198 struct list_head link
;
199 int last_cycle_count
;
200 struct packet packets
[PACKET_LIST_SIZE
];
203 #define BUFFER_SIZE 128
205 /* This implements a circular buffer for incoming samples. */
208 size_t head
, tail
, length
, size
;
209 unsigned char data
[0];
220 struct cmp_pcr
*opcr
;
222 /* Input samples are copied here. */
223 struct buffer
*input
;
225 /* ISO Packer state */
227 struct packet_list
*current_packet_list
;
229 struct fraction ready_samples
, samples_per_cycle
;
231 /* We use these to generate control bits when we are packing
234 int iec958_frame_count
;
235 int iec958_rate_code
;
237 /* The cycle_count and cycle_offset fields are used for the
238 * synchronization timestamps (syt) in the cip header. They
239 * are incremented by at least a cycle every time we put a
240 * time stamp in a packet. As we don't time stamp all
241 * packages, cycle_count isn't updated in every cycle, and
242 * sometimes it's incremented by 2. Thus, we have
243 * cycle_count2, which is simply incremented by one with each
244 * packet, so we can compare it to the transmission time
245 * written back in the dma programs.
247 atomic_t cycle_count
, cycle_count2
;
248 struct fraction cycle_offset
, ticks_per_syt_offset
;
252 /* Theses fields control the sample output to the DMA engine.
253 * The dma_packet_lists list holds packet lists currently
254 * queued for dma; the head of the list is currently being
255 * processed. The last program in a packet list generates an
256 * interrupt, which removes the head from dma_packet_lists and
257 * puts it back on the free list.
259 struct list_head dma_packet_lists
;
260 struct list_head free_packet_lists
;
261 wait_queue_head_t packet_list_wait
;
262 spinlock_t packet_list_lock
;
263 struct ohci1394_iso_tasklet iso_tasklet
;
264 struct pci_pool
*descriptor_pool
, *packet_pool
;
266 /* Streams at a host controller are chained through this field. */
267 struct list_head link
;
268 struct amdtp_host
*host
;
272 struct hpsb_host
*host
;
273 struct ti_ohci
*ohci
;
274 struct list_head stream_list
;
275 spinlock_t stream_list_lock
;
278 static struct hpsb_highlevel amdtp_highlevel
;
281 /* FIXME: This doesn't belong here... */
283 #define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
284 #define OHCI1394_CONTEXT_RUN 0x00008000
285 #define OHCI1394_CONTEXT_WAKE 0x00001000
286 #define OHCI1394_CONTEXT_DEAD 0x00000800
287 #define OHCI1394_CONTEXT_ACTIVE 0x00000400
289 static void ohci1394_start_it_ctx(struct ti_ohci
*ohci
, int ctx
,
290 dma_addr_t first_cmd
, int z
, int cycle_match
)
292 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << ctx
);
293 reg_write(ohci
, OHCI1394_IsoXmitCommandPtr
+ ctx
* 16, first_cmd
| z
);
294 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear
+ ctx
* 16, ~0);
296 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet
+ ctx
* 16,
297 OHCI1394_CONTEXT_CYCLE_MATCH
| (cycle_match
<< 16) |
298 OHCI1394_CONTEXT_RUN
);
301 static void ohci1394_wake_it_ctx(struct ti_ohci
*ohci
, int ctx
)
303 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet
+ ctx
* 16,
304 OHCI1394_CONTEXT_WAKE
);
307 static void ohci1394_stop_it_ctx(struct ti_ohci
*ohci
, int ctx
, int synchronous
)
312 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << ctx
);
313 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear
+ ctx
* 16,
314 OHCI1394_CONTEXT_RUN
);
318 for (wait
= 0; wait
< 5; wait
++) {
319 control
= reg_read(ohci
, OHCI1394_IsoXmitContextControlSet
+ ctx
* 16);
320 if ((control
& OHCI1394_CONTEXT_ACTIVE
) == 0)
323 schedule_timeout_interruptible(1);
328 /* Note: we can test if free_packet_lists is empty without aquiring
329 * the packet_list_lock. The interrupt handler only adds to the free
330 * list, there is no race condition between testing the list non-empty
331 * and acquiring the lock.
334 static struct packet_list
*stream_get_free_packet_list(struct stream
*s
)
336 struct packet_list
*pl
;
339 if (list_empty(&s
->free_packet_lists
))
342 spin_lock_irqsave(&s
->packet_list_lock
, flags
);
343 pl
= list_entry(s
->free_packet_lists
.next
, struct packet_list
, link
);
345 spin_unlock_irqrestore(&s
->packet_list_lock
, flags
);
350 static void stream_start_dma(struct stream
*s
, struct packet_list
*pl
)
352 u32 syt_cycle
, cycle_count
, start_cycle
;
354 cycle_count
= reg_read(s
->host
->ohci
,
355 OHCI1394_IsochronousCycleTimer
) >> 12;
356 syt_cycle
= (pl
->last_cycle_count
- PACKET_LIST_SIZE
+ 1) & 0x0f;
358 /* We program the DMA controller to start transmission at
359 * least 17 cycles from now - this happens when the lower four
360 * bits of cycle_count is 0x0f and syt_cycle is 0, in this
361 * case the start cycle is cycle_count - 15 + 32. */
362 start_cycle
= (cycle_count
& ~0x0f) + 32 + syt_cycle
;
363 if ((start_cycle
& 0x1fff) >= 8000)
364 start_cycle
= start_cycle
- 8000 + 0x2000;
366 ohci1394_start_it_ctx(s
->host
->ohci
, s
->iso_tasklet
.context
,
367 pl
->packets
[0].db_bus
, 3,
368 start_cycle
& 0x7fff);
371 static void stream_put_dma_packet_list(struct stream
*s
,
372 struct packet_list
*pl
)
375 struct packet_list
*prev
;
377 /* Remember the cycle_count used for timestamping the last packet. */
378 pl
->last_cycle_count
= atomic_read(&s
->cycle_count2
) - 1;
379 pl
->packets
[PACKET_LIST_SIZE
- 1].db
->payload_desc
.branch
= 0;
381 spin_lock_irqsave(&s
->packet_list_lock
, flags
);
382 list_add_tail(&pl
->link
, &s
->dma_packet_lists
);
383 spin_unlock_irqrestore(&s
->packet_list_lock
, flags
);
385 prev
= list_entry(pl
->link
.prev
, struct packet_list
, link
);
386 if (pl
->link
.prev
!= &s
->dma_packet_lists
) {
387 struct packet
*last
= &prev
->packets
[PACKET_LIST_SIZE
- 1];
388 last
->db
->payload_desc
.branch
= pl
->packets
[0].db_bus
| 3;
389 last
->db
->header_desc
.skip
= pl
->packets
[0].db_bus
| 3;
390 ohci1394_wake_it_ctx(s
->host
->ohci
, s
->iso_tasklet
.context
);
393 stream_start_dma(s
, pl
);
396 static void stream_shift_packet_lists(unsigned long l
)
398 struct stream
*s
= (struct stream
*) l
;
399 struct packet_list
*pl
;
403 if (list_empty(&s
->dma_packet_lists
)) {
404 HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__
);
408 /* Now that we know the list is non-empty, we can get the head
409 * of the list without locking, because the process context
410 * only adds to the tail.
412 pl
= list_entry(s
->dma_packet_lists
.next
, struct packet_list
, link
);
413 last
= &pl
->packets
[PACKET_LIST_SIZE
- 1];
415 /* This is weird... if we stop dma processing in the middle of
416 * a packet list, the dma context immediately generates an
417 * interrupt if we enable it again later. This only happens
418 * when amdtp_release is interrupted while waiting for dma to
419 * complete, though. Anyway, we detect this by seeing that
420 * the status of the dma descriptor that we expected an
421 * interrupt from is still 0.
423 if (last
->db
->payload_desc
.status
== 0) {
424 HPSB_INFO("weird interrupt...");
428 /* If the last descriptor block does not specify a branch
429 * address, we have a sample underflow.
431 if (last
->db
->payload_desc
.branch
== 0)
432 HPSB_INFO("FIXME: sample underflow...");
434 /* Here we check when (which cycle) the last packet was sent
435 * and compare it to what the iso packer was using at the
436 * time. If there is a mismatch, we adjust the cycle count in
437 * the iso packer. However, there are still up to
438 * MAX_PACKET_LISTS packet lists queued with bad time stamps,
439 * so we disable time stamp monitoring for the next
440 * MAX_PACKET_LISTS packet lists.
442 diff
= (last
->db
->payload_desc
.status
- pl
->last_cycle_count
) & 0xf;
443 if (diff
> 0 && s
->stale_count
== 0) {
444 atomic_add(diff
, &s
->cycle_count
);
445 atomic_add(diff
, &s
->cycle_count2
);
446 s
->stale_count
= MAX_PACKET_LISTS
;
449 if (s
->stale_count
> 0)
452 /* Finally, we move the packet list that was just processed
453 * back to the free list, and notify any waiters.
455 spin_lock(&s
->packet_list_lock
);
457 list_add_tail(&pl
->link
, &s
->free_packet_lists
);
458 spin_unlock(&s
->packet_list_lock
);
460 wake_up_interruptible(&s
->packet_list_wait
);
463 static struct packet
*stream_current_packet(struct stream
*s
)
465 if (s
->current_packet_list
== NULL
&&
466 (s
->current_packet_list
= stream_get_free_packet_list(s
)) == NULL
)
469 return &s
->current_packet_list
->packets
[s
->current_packet
];
472 static void stream_queue_packet(struct stream
*s
)
475 if (s
->current_packet
== PACKET_LIST_SIZE
) {
476 stream_put_dma_packet_list(s
, s
->current_packet_list
);
477 s
->current_packet_list
= NULL
;
478 s
->current_packet
= 0;
482 /* Integer fractional math. When we transmit a 44k1Hz signal we must
483 * send 5 41/80 samples per isochronous cycle, as these occur 8000
484 * times a second. Of course, we must send an integral number of
485 * samples in a packet, so we use the integer math to alternate
486 * between sending 5 and 6 samples per packet.
489 static void fraction_init(struct fraction
*f
, int numerator
, int denominator
)
491 f
->integer
= numerator
/ denominator
;
492 f
->numerator
= numerator
% denominator
;
493 f
->denominator
= denominator
;
496 static __inline__
void fraction_add(struct fraction
*dst
,
497 struct fraction
*src1
,
498 struct fraction
*src2
)
500 /* assert: src1->denominator == src2->denominator */
504 /* We use these two local variables to allow gcc to optimize
505 * the division and the modulo into only one division. */
507 sum
= src1
->numerator
+ src2
->numerator
;
508 denom
= src1
->denominator
;
509 dst
->integer
= src1
->integer
+ src2
->integer
+ sum
/ denom
;
510 dst
->numerator
= sum
% denom
;
511 dst
->denominator
= denom
;
514 static __inline__
void fraction_sub_int(struct fraction
*dst
,
515 struct fraction
*src
, int integer
)
517 dst
->integer
= src
->integer
- integer
;
518 dst
->numerator
= src
->numerator
;
519 dst
->denominator
= src
->denominator
;
522 static __inline__
int fraction_floor(struct fraction
*frac
)
524 return frac
->integer
;
527 static __inline__
int fraction_ceil(struct fraction
*frac
)
529 return frac
->integer
+ (frac
->numerator
> 0 ? 1 : 0);
532 static void packet_initialize(struct packet
*p
, struct packet
*next
)
534 /* Here we initialize the dma descriptor block for
535 * transferring one iso packet. We use two descriptors per
536 * packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
537 * IEEE1394 iso packet header and an OUTPUT_LAST descriptor
541 p
->db
->header_desc
.control
=
542 DMA_CTL_OUTPUT_MORE
| DMA_CTL_IMMEDIATE
| 8;
545 p
->db
->payload_desc
.control
=
546 DMA_CTL_OUTPUT_LAST
| DMA_CTL_BRANCH
;
547 p
->db
->payload_desc
.branch
= next
->db_bus
| 3;
548 p
->db
->header_desc
.skip
= next
->db_bus
| 3;
551 p
->db
->payload_desc
.control
=
552 DMA_CTL_OUTPUT_LAST
| DMA_CTL_BRANCH
|
553 DMA_CTL_UPDATE
| DMA_CTL_IRQ
;
554 p
->db
->payload_desc
.branch
= 0;
555 p
->db
->header_desc
.skip
= 0;
557 p
->db
->payload_desc
.data_address
= p
->payload_bus
;
558 p
->db
->payload_desc
.status
= 0;
561 static struct packet_list
*packet_list_alloc(struct stream
*s
)
564 struct packet_list
*pl
;
567 pl
= kmalloc(sizeof *pl
, SLAB_KERNEL
);
571 for (i
= 0; i
< PACKET_LIST_SIZE
; i
++) {
572 struct packet
*p
= &pl
->packets
[i
];
573 p
->db
= pci_pool_alloc(s
->descriptor_pool
, SLAB_KERNEL
,
575 p
->payload
= pci_pool_alloc(s
->packet_pool
, SLAB_KERNEL
,
579 for (i
= 0; i
< PACKET_LIST_SIZE
; i
++) {
580 if (i
< PACKET_LIST_SIZE
- 1)
581 next
= &pl
->packets
[i
+ 1];
584 packet_initialize(&pl
->packets
[i
], next
);
590 static void packet_list_free(struct packet_list
*pl
, struct stream
*s
)
594 for (i
= 0; i
< PACKET_LIST_SIZE
; i
++) {
595 struct packet
*p
= &pl
->packets
[i
];
596 pci_pool_free(s
->descriptor_pool
, p
->db
, p
->db_bus
);
597 pci_pool_free(s
->packet_pool
, p
->payload
, p
->payload_bus
);
602 static struct buffer
*buffer_alloc(int size
)
606 b
= kmalloc(sizeof *b
+ size
, SLAB_KERNEL
);
617 static unsigned char *buffer_get_bytes(struct buffer
*buffer
, int size
)
621 if (buffer
->head
+ size
> buffer
->size
)
624 p
= &buffer
->data
[buffer
->head
];
625 buffer
->head
+= size
;
626 if (buffer
->head
== buffer
->size
)
628 buffer
->length
-= size
;
633 static unsigned char *buffer_put_bytes(struct buffer
*buffer
,
634 size_t max
, size_t *actual
)
639 p
= &buffer
->data
[buffer
->tail
];
640 length
= min(buffer
->size
- buffer
->length
, max
);
641 if (buffer
->tail
+ length
< buffer
->size
) {
643 buffer
->tail
+= length
;
646 *actual
= buffer
->size
- buffer
->tail
;
650 buffer
->length
+= *actual
;
654 static u32
get_iec958_header_bits(struct stream
*s
, int sub_frame
, u32 sample
)
656 int csi
, parity
, shift
;
660 switch (s
->iec958_frame_count
) {
662 csi
= s
->format
== AMDTP_FORMAT_IEC958_AC3
;
669 csi
= (s
->iec958_rate_code
>> (27 - s
->iec958_frame_count
)) & 0x01;
676 block_start
= (s
->iec958_frame_count
== 0 && sub_frame
== 0);
678 /* The parity bit is the xor of the sample bits and the
679 * channel status info bit. */
680 for (shift
= 16, parity
= sample
^ csi
; shift
> 0; shift
>>= 1)
681 parity
^= (parity
>> shift
);
683 bits
= (block_start
<< 5) | /* Block start bit */
684 ((sub_frame
== 0) << 4) | /* Subframe bit */
685 ((parity
& 1) << 3) | /* Parity bit */
686 (csi
<< 2); /* Channel status info bit */
691 static u32
get_header_bits(struct stream
*s
, int sub_frame
, u32 sample
)
694 case AMDTP_FORMAT_IEC958_PCM
:
695 case AMDTP_FORMAT_IEC958_AC3
:
696 return get_iec958_header_bits(s
, sub_frame
, sample
);
698 case AMDTP_FORMAT_RAW
:
706 static void fill_payload_le16(struct stream
*s
, quadlet_t
*data
, int nevents
)
708 quadlet_t
*event
, sample
, bits
;
712 for (i
= 0, event
= data
; i
< nevents
; i
++) {
714 for (j
= 0; j
< s
->dimension
; j
++) {
715 p
= buffer_get_bytes(s
->input
, 2);
716 sample
= (p
[1] << 16) | (p
[0] << 8);
717 bits
= get_header_bits(s
, j
, sample
);
718 event
[j
] = cpu_to_be32((bits
<< 24) | sample
);
721 event
+= s
->dimension
;
722 if (++s
->iec958_frame_count
== 192)
723 s
->iec958_frame_count
= 0;
727 static void fill_packet(struct stream
*s
, struct packet
*packet
, int nevents
)
729 int syt_index
, syt
, size
;
732 size
= (nevents
* s
->dimension
+ 2) * sizeof(quadlet_t
);
734 /* Update DMA descriptors */
735 packet
->db
->payload_desc
.status
= 0;
736 control
= packet
->db
->payload_desc
.control
& 0xffff0000;
737 packet
->db
->payload_desc
.control
= control
| size
;
739 /* Fill IEEE1394 headers */
740 packet
->db
->header_desc
.header
[0] =
741 (IEEE1394_SPEED_100
<< 16) | (0x01 << 14) |
742 (s
->iso_channel
<< 8) | (TCODE_ISO_DATA
<< 4);
743 packet
->db
->header_desc
.header
[1] = size
<< 16;
745 /* Calculate synchronization timestamp (syt). First we
746 * determine syt_index, that is, the index in the packet of
747 * the sample for which the timestamp is valid. */
748 syt_index
= (s
->syt_interval
- s
->dbc
) & (s
->syt_interval
- 1);
749 if (syt_index
< nevents
) {
750 syt
= ((atomic_read(&s
->cycle_count
) << 12) |
751 s
->cycle_offset
.integer
) & 0xffff;
752 fraction_add(&s
->cycle_offset
,
753 &s
->cycle_offset
, &s
->ticks_per_syt_offset
);
755 /* This next addition should be modulo 8000 (0x1f40),
756 * but we only use the lower 4 bits of cycle_count, so
757 * we don't need the modulo. */
758 atomic_add(s
->cycle_offset
.integer
/ 3072, &s
->cycle_count
);
759 s
->cycle_offset
.integer
%= 3072;
764 atomic_inc(&s
->cycle_count2
);
766 /* Fill cip header */
767 packet
->payload
->eoh0
= 0;
768 packet
->payload
->sid
= s
->host
->host
->node_id
& 0x3f;
769 packet
->payload
->dbs
= s
->dimension
;
770 packet
->payload
->fn
= 0;
771 packet
->payload
->qpc
= 0;
772 packet
->payload
->sph
= 0;
773 packet
->payload
->reserved
= 0;
774 packet
->payload
->dbc
= s
->dbc
;
775 packet
->payload
->eoh1
= 2;
776 packet
->payload
->fmt
= FMT_AMDTP
;
777 packet
->payload
->fdf
= s
->fdf
;
778 packet
->payload
->syt
= cpu_to_be16(syt
);
780 switch (s
->sample_format
) {
781 case AMDTP_INPUT_LE16
:
782 fill_payload_le16(s
, packet
->payload
->data
, nevents
);
789 static void stream_flush(struct stream
*s
)
793 struct fraction next
;
795 /* The AMDTP specifies two transmission modes: blocking and
796 * non-blocking. In blocking mode you always transfer
797 * syt_interval or zero samples, whereas in non-blocking mode
798 * you send as many samples as you have available at transfer
801 * The fraction samples_per_cycle specifies the number of
802 * samples that become available per cycle. We add this to
803 * the fraction ready_samples, which specifies the number of
804 * leftover samples from the previous transmission. The sum,
805 * stored in the fraction next, specifies the number of
806 * samples available for transmission, and from this we
807 * determine the number of samples to actually transmit.
811 fraction_add(&next
, &s
->ready_samples
, &s
->samples_per_cycle
);
812 if (s
->mode
== AMDTP_MODE_BLOCKING
) {
813 if (fraction_floor(&next
) >= s
->syt_interval
)
814 nevents
= s
->syt_interval
;
819 nevents
= fraction_floor(&next
);
821 p
= stream_current_packet(s
);
822 if (s
->input
->length
< nevents
* s
->dimension
* 2 || p
== NULL
)
825 fill_packet(s
, p
, nevents
);
826 stream_queue_packet(s
);
828 /* Now that we have successfully queued the packet for
829 * transmission, we update the fraction ready_samples. */
830 fraction_sub_int(&s
->ready_samples
, &next
, nevents
);
834 static int stream_alloc_packet_lists(struct stream
*s
)
836 int max_nevents
, max_packet_size
, i
;
838 if (s
->mode
== AMDTP_MODE_BLOCKING
)
839 max_nevents
= s
->syt_interval
;
841 max_nevents
= fraction_ceil(&s
->samples_per_cycle
);
843 max_packet_size
= max_nevents
* s
->dimension
* 4 + 8;
844 s
->packet_pool
= pci_pool_create("packet pool", s
->host
->ohci
->dev
,
845 max_packet_size
, 0, 0);
847 if (s
->packet_pool
== NULL
)
850 INIT_LIST_HEAD(&s
->free_packet_lists
);
851 INIT_LIST_HEAD(&s
->dma_packet_lists
);
852 for (i
= 0; i
< MAX_PACKET_LISTS
; i
++) {
853 struct packet_list
*pl
= packet_list_alloc(s
);
856 list_add_tail(&pl
->link
, &s
->free_packet_lists
);
859 return i
< MAX_PACKET_LISTS
? -1 : 0;
862 static void stream_free_packet_lists(struct stream
*s
)
864 struct packet_list
*packet_l
, *packet_l_next
;
866 if (s
->current_packet_list
!= NULL
)
867 packet_list_free(s
->current_packet_list
, s
);
868 list_for_each_entry_safe(packet_l
, packet_l_next
, &s
->dma_packet_lists
, link
)
869 packet_list_free(packet_l
, s
);
870 list_for_each_entry_safe(packet_l
, packet_l_next
, &s
->free_packet_lists
, link
)
871 packet_list_free(packet_l
, s
);
872 if (s
->packet_pool
!= NULL
)
873 pci_pool_destroy(s
->packet_pool
);
875 s
->current_packet_list
= NULL
;
876 INIT_LIST_HEAD(&s
->free_packet_lists
);
877 INIT_LIST_HEAD(&s
->dma_packet_lists
);
878 s
->packet_pool
= NULL
;
881 static void plug_update(struct cmp_pcr
*plug
, void *data
)
883 struct stream
*s
= data
;
885 HPSB_INFO("plug update: p2p_count=%d, channel=%d",
886 plug
->p2p_count
, plug
->channel
);
887 s
->iso_channel
= plug
->channel
;
888 if (plug
->p2p_count
> 0) {
889 struct packet_list
*pl
;
891 pl
= list_entry(s
->dma_packet_lists
.next
, struct packet_list
, link
);
892 stream_start_dma(s
, pl
);
895 ohci1394_stop_it_ctx(s
->host
->ohci
, s
->iso_tasklet
.context
, 0);
899 static int stream_configure(struct stream
*s
, int cmd
, struct amdtp_ioctl
*cfg
)
901 const int transfer_delay
= 9000;
903 if (cfg
->format
<= AMDTP_FORMAT_IEC958_AC3
)
904 s
->format
= cfg
->format
;
911 s
->fdf
= FDF_SFC_32KHZ
;
912 s
->iec958_rate_code
= 0x0c;
916 s
->fdf
= FDF_SFC_44K1HZ
;
917 s
->iec958_rate_code
= 0x00;
921 s
->fdf
= FDF_SFC_48KHZ
;
922 s
->iec958_rate_code
= 0x04;
925 s
->syt_interval
= 16;
926 s
->fdf
= FDF_SFC_88K2HZ
;
927 s
->iec958_rate_code
= 0x00;
930 s
->syt_interval
= 16;
931 s
->fdf
= FDF_SFC_96KHZ
;
932 s
->iec958_rate_code
= 0x00;
935 s
->syt_interval
= 32;
936 s
->fdf
= FDF_SFC_176K4HZ
;
937 s
->iec958_rate_code
= 0x00;
940 s
->syt_interval
= 32;
941 s
->fdf
= FDF_SFC_192KHZ
;
942 s
->iec958_rate_code
= 0x00;
950 fraction_init(&s
->samples_per_cycle
, s
->rate
, 8000);
951 fraction_init(&s
->ready_samples
, 0, 8000);
953 /* The ticks_per_syt_offset is initialized to the number of
954 * ticks between syt_interval events. The number of ticks per
955 * second is 24.576e6, so the number of ticks between
956 * syt_interval events is 24.576e6 * syt_interval / rate.
958 fraction_init(&s
->ticks_per_syt_offset
,
959 24576000 * s
->syt_interval
, s
->rate
);
960 fraction_init(&s
->cycle_offset
, (transfer_delay
% 3072) * s
->rate
, s
->rate
);
961 atomic_set(&s
->cycle_count
, transfer_delay
/ 3072);
962 atomic_set(&s
->cycle_count2
, 0);
965 s
->sample_format
= AMDTP_INPUT_LE16
;
967 /* When using the AM824 raw subformat we can stream signals of
968 * any dimension. The IEC958 subformat, however, only
969 * supports 2 channels.
971 if (s
->format
== AMDTP_FORMAT_RAW
|| cfg
->dimension
== 2)
972 s
->dimension
= cfg
->dimension
;
976 if (s
->opcr
!= NULL
) {
977 cmp_unregister_opcr(s
->host
->host
, s
->opcr
);
983 s
->opcr
= cmp_register_opcr(s
->host
->host
, cfg
->u
.plug
,
984 /*payload*/ 12, plug_update
, s
);
987 s
->iso_channel
= s
->opcr
->channel
;
990 case AMDTP_IOC_CHANNEL
:
991 if (cfg
->u
.channel
>= 0 && cfg
->u
.channel
< 64)
992 s
->iso_channel
= cfg
->u
.channel
;
998 /* The ioctl settings were all valid, so we realloc the packet
999 * lists to make sure the packet size is big enough.
1001 if (s
->packet_pool
!= NULL
)
1002 stream_free_packet_lists(s
);
1004 if (stream_alloc_packet_lists(s
) < 0) {
1005 stream_free_packet_lists(s
);
1012 static struct stream
*stream_alloc(struct amdtp_host
*host
)
1015 unsigned long flags
;
1017 s
= kmalloc(sizeof(struct stream
), SLAB_KERNEL
);
1021 memset(s
, 0, sizeof(struct stream
));
1024 s
->input
= buffer_alloc(BUFFER_SIZE
);
1025 if (s
->input
== NULL
) {
1030 s
->descriptor_pool
= pci_pool_create("descriptor pool", host
->ohci
->dev
,
1031 sizeof(struct descriptor_block
),
1034 if (s
->descriptor_pool
== NULL
) {
1040 INIT_LIST_HEAD(&s
->free_packet_lists
);
1041 INIT_LIST_HEAD(&s
->dma_packet_lists
);
1043 init_waitqueue_head(&s
->packet_list_wait
);
1044 spin_lock_init(&s
->packet_list_lock
);
1046 ohci1394_init_iso_tasklet(&s
->iso_tasklet
, OHCI_ISO_TRANSMIT
,
1047 stream_shift_packet_lists
,
1050 if (ohci1394_register_iso_tasklet(host
->ohci
, &s
->iso_tasklet
) < 0) {
1051 pci_pool_destroy(s
->descriptor_pool
);
1057 spin_lock_irqsave(&host
->stream_list_lock
, flags
);
1058 list_add_tail(&s
->link
, &host
->stream_list
);
1059 spin_unlock_irqrestore(&host
->stream_list_lock
, flags
);
1064 static void stream_free(struct stream
*s
)
1066 unsigned long flags
;
1068 /* Stop the DMA. We wait for the dma packet list to become
1069 * empty and let the dma controller run out of programs. This
1070 * seems to be more reliable than stopping it directly, since
1071 * that sometimes generates an it transmit interrupt if we
1072 * later re-enable the context.
1074 wait_event_interruptible(s
->packet_list_wait
,
1075 list_empty(&s
->dma_packet_lists
));
1077 ohci1394_stop_it_ctx(s
->host
->ohci
, s
->iso_tasklet
.context
, 1);
1078 ohci1394_unregister_iso_tasklet(s
->host
->ohci
, &s
->iso_tasklet
);
1080 if (s
->opcr
!= NULL
)
1081 cmp_unregister_opcr(s
->host
->host
, s
->opcr
);
1083 spin_lock_irqsave(&s
->host
->stream_list_lock
, flags
);
1085 spin_unlock_irqrestore(&s
->host
->stream_list_lock
, flags
);
1089 stream_free_packet_lists(s
);
1090 pci_pool_destroy(s
->descriptor_pool
);
1095 /* File operations */
1097 static ssize_t
amdtp_write(struct file
*file
, const char __user
*buffer
, size_t count
,
1098 loff_t
*offset_is_ignored
)
1100 struct stream
*s
= file
->private_data
;
1105 if (s
->packet_pool
== NULL
)
1108 /* Fill the circular buffer from the input buffer and call the
1109 * iso packer when the buffer is full. The iso packer may
1110 * leave bytes in the buffer for two reasons: either the
1111 * remaining bytes wasn't enough to build a new packet, or
1112 * there were no free packet lists. In the first case we
1113 * re-fill the buffer and call the iso packer again or return
1114 * if we used all the data from userspace. In the second
1115 * case, the wait_event_interruptible will block until the irq
1116 * handler frees a packet list.
1119 for (i
= 0; i
< count
; i
+= length
) {
1120 p
= buffer_put_bytes(s
->input
, count
- i
, &length
);
1121 if (copy_from_user(p
, buffer
+ i
, length
))
1123 if (s
->input
->length
< s
->input
->size
)
1128 if (s
->current_packet_list
!= NULL
)
1131 if (file
->f_flags
& O_NONBLOCK
)
1132 return i
+ length
> 0 ? i
+ length
: -EAGAIN
;
1134 if (wait_event_interruptible(s
->packet_list_wait
,
1135 !list_empty(&s
->free_packet_lists
)))
1142 static long amdtp_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1144 struct stream
*s
= file
->private_data
;
1145 struct amdtp_ioctl cfg
;
1150 case AMDTP_IOC_PLUG
:
1151 case AMDTP_IOC_CHANNEL
:
1152 if (copy_from_user(&cfg
, (struct amdtp_ioctl __user
*) arg
, sizeof cfg
))
1155 err
= stream_configure(s
, cmd
, &cfg
);
1166 static unsigned int amdtp_poll(struct file
*file
, poll_table
*pt
)
1168 struct stream
*s
= file
->private_data
;
1170 poll_wait(file
, &s
->packet_list_wait
, pt
);
1172 if (!list_empty(&s
->free_packet_lists
))
1173 return POLLOUT
| POLLWRNORM
;
1178 static int amdtp_open(struct inode
*inode
, struct file
*file
)
1180 struct amdtp_host
*host
;
1181 int i
= ieee1394_file_to_instance(file
);
1183 host
= hpsb_get_hostinfo_bykey(&amdtp_highlevel
, i
);
1187 file
->private_data
= stream_alloc(host
);
1188 if (file
->private_data
== NULL
)
1194 static int amdtp_release(struct inode
*inode
, struct file
*file
)
1196 struct stream
*s
= file
->private_data
;
1203 static struct cdev amdtp_cdev
;
1204 static struct file_operations amdtp_fops
=
1206 .owner
= THIS_MODULE
,
1207 .write
= amdtp_write
,
1209 .unlocked_ioctl
= amdtp_ioctl
,
1210 .compat_ioctl
= amdtp_ioctl
, /* All amdtp ioctls are compatible */
1212 .release
= amdtp_release
1215 /* IEEE1394 Subsystem functions */
1217 static void amdtp_add_host(struct hpsb_host
*host
)
1219 struct amdtp_host
*ah
;
1222 if (strcmp(host
->driver
->name
, OHCI1394_DRIVER_NAME
) != 0)
1225 ah
= hpsb_create_hostinfo(&amdtp_highlevel
, host
, sizeof(*ah
));
1227 HPSB_ERR("amdtp: Unable able to alloc hostinfo");
1232 ah
->ohci
= host
->hostdata
;
1234 hpsb_set_hostinfo_key(&amdtp_highlevel
, host
, ah
->host
->id
);
1236 minor
= IEEE1394_MINOR_BLOCK_AMDTP
* 16 + ah
->host
->id
;
1238 INIT_LIST_HEAD(&ah
->stream_list
);
1239 spin_lock_init(&ah
->stream_list_lock
);
1241 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR
, minor
),
1242 S_IFCHR
|S_IRUSR
|S_IWUSR
, "amdtp/%d", ah
->host
->id
);
1245 static void amdtp_remove_host(struct hpsb_host
*host
)
1247 struct amdtp_host
*ah
= hpsb_get_hostinfo(&amdtp_highlevel
, host
);
1250 devfs_remove("amdtp/%d", ah
->host
->id
);
1255 static struct hpsb_highlevel amdtp_highlevel
= {
1257 .add_host
= amdtp_add_host
,
1258 .remove_host
= amdtp_remove_host
,
1261 /* Module interface */
1263 MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
1264 MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
1266 MODULE_SUPPORTED_DEVICE("amdtp");
1267 MODULE_LICENSE("GPL");
1269 static int __init
amdtp_init_module (void)
1271 cdev_init(&amdtp_cdev
, &amdtp_fops
);
1272 amdtp_cdev
.owner
= THIS_MODULE
;
1273 kobject_set_name(&amdtp_cdev
.kobj
, "amdtp");
1274 if (cdev_add(&amdtp_cdev
, IEEE1394_AMDTP_DEV
, 16)) {
1275 HPSB_ERR("amdtp: unable to add char device");
1279 devfs_mk_dir("amdtp");
1281 hpsb_register_highlevel(&amdtp_highlevel
);
1283 HPSB_INFO("Loaded AMDTP driver");
1288 static void __exit
amdtp_exit_module (void)
1290 hpsb_unregister_highlevel(&amdtp_highlevel
);
1291 devfs_remove("amdtp");
1292 cdev_del(&amdtp_cdev
);
1294 HPSB_INFO("Unloaded AMDTP driver");
1297 module_init(amdtp_init_module
);
1298 module_exit(amdtp_exit_module
);