Linux 2.6.22-rc3
[linux-2.6/next.git] / drivers / firewire / fw-ohci.c
blobc17342d3e6fd01089e2f058613a3167433926441
1 /*
2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/poll.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/mm.h>
31 #include <asm/uaccess.h>
32 #include <asm/semaphore.h>
34 #include "fw-transaction.h"
35 #include "fw-ohci.h"
37 #define DESCRIPTOR_OUTPUT_MORE 0
38 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
39 #define DESCRIPTOR_INPUT_MORE (2 << 12)
40 #define DESCRIPTOR_INPUT_LAST (3 << 12)
41 #define DESCRIPTOR_STATUS (1 << 11)
42 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
43 #define DESCRIPTOR_PING (1 << 7)
44 #define DESCRIPTOR_YY (1 << 6)
45 #define DESCRIPTOR_NO_IRQ (0 << 4)
46 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
47 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
48 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
49 #define DESCRIPTOR_WAIT (3 << 0)
51 struct descriptor {
52 __le16 req_count;
53 __le16 control;
54 __le32 data_address;
55 __le32 branch_address;
56 __le16 res_count;
57 __le16 transfer_status;
58 } __attribute__((aligned(16)));
60 struct db_descriptor {
61 __le16 first_size;
62 __le16 control;
63 __le16 second_req_count;
64 __le16 first_req_count;
65 __le32 branch_address;
66 __le16 second_res_count;
67 __le16 first_res_count;
68 __le32 reserved0;
69 __le32 first_buffer;
70 __le32 second_buffer;
71 __le32 reserved1;
72 } __attribute__((aligned(16)));
74 #define CONTROL_SET(regs) (regs)
75 #define CONTROL_CLEAR(regs) ((regs) + 4)
76 #define COMMAND_PTR(regs) ((regs) + 12)
77 #define CONTEXT_MATCH(regs) ((regs) + 16)
79 struct ar_buffer {
80 struct descriptor descriptor;
81 struct ar_buffer *next;
82 __le32 data[0];
85 struct ar_context {
86 struct fw_ohci *ohci;
87 struct ar_buffer *current_buffer;
88 struct ar_buffer *last_buffer;
89 void *pointer;
90 u32 regs;
91 struct tasklet_struct tasklet;
94 struct context;
96 typedef int (*descriptor_callback_t)(struct context *ctx,
97 struct descriptor *d,
98 struct descriptor *last);
99 struct context {
100 struct fw_ohci *ohci;
101 u32 regs;
103 struct descriptor *buffer;
104 dma_addr_t buffer_bus;
105 size_t buffer_size;
106 struct descriptor *head_descriptor;
107 struct descriptor *tail_descriptor;
108 struct descriptor *tail_descriptor_last;
109 struct descriptor *prev_descriptor;
111 descriptor_callback_t callback;
113 struct tasklet_struct tasklet;
116 #define IT_HEADER_SY(v) ((v) << 0)
117 #define IT_HEADER_TCODE(v) ((v) << 4)
118 #define IT_HEADER_CHANNEL(v) ((v) << 8)
119 #define IT_HEADER_TAG(v) ((v) << 14)
120 #define IT_HEADER_SPEED(v) ((v) << 16)
121 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
123 struct iso_context {
124 struct fw_iso_context base;
125 struct context context;
126 void *header;
127 size_t header_length;
130 #define CONFIG_ROM_SIZE 1024
132 struct fw_ohci {
133 struct fw_card card;
135 u32 version;
136 __iomem char *registers;
137 dma_addr_t self_id_bus;
138 __le32 *self_id_cpu;
139 struct tasklet_struct bus_reset_tasklet;
140 int node_id;
141 int generation;
142 int request_generation;
143 u32 bus_seconds;
146 * Spinlock for accessing fw_ohci data. Never call out of
147 * this driver with this lock held.
149 spinlock_t lock;
150 u32 self_id_buffer[512];
152 /* Config rom buffers */
153 __be32 *config_rom;
154 dma_addr_t config_rom_bus;
155 __be32 *next_config_rom;
156 dma_addr_t next_config_rom_bus;
157 u32 next_header;
159 struct ar_context ar_request_ctx;
160 struct ar_context ar_response_ctx;
161 struct context at_request_ctx;
162 struct context at_response_ctx;
164 u32 it_context_mask;
165 struct iso_context *it_context_list;
166 u32 ir_context_mask;
167 struct iso_context *ir_context_list;
170 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
172 return container_of(card, struct fw_ohci, card);
175 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
176 #define IR_CONTEXT_BUFFER_FILL 0x80000000
177 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
178 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
179 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
180 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
182 #define CONTEXT_RUN 0x8000
183 #define CONTEXT_WAKE 0x1000
184 #define CONTEXT_DEAD 0x0800
185 #define CONTEXT_ACTIVE 0x0400
187 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
188 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
189 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
191 #define FW_OHCI_MAJOR 240
192 #define OHCI1394_REGISTER_SIZE 0x800
193 #define OHCI_LOOP_COUNT 500
194 #define OHCI1394_PCI_HCI_Control 0x40
195 #define SELF_ID_BUF_SIZE 0x800
196 #define OHCI_TCODE_PHY_PACKET 0x0e
197 #define OHCI_VERSION_1_1 0x010010
198 #define ISO_BUFFER_SIZE (64 * 1024)
199 #define AT_BUFFER_SIZE 4096
201 static char ohci_driver_name[] = KBUILD_MODNAME;
203 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
205 writel(data, ohci->registers + offset);
208 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
210 return readl(ohci->registers + offset);
213 static inline void flush_writes(const struct fw_ohci *ohci)
215 /* Do a dummy read to flush writes. */
216 reg_read(ohci, OHCI1394_Version);
219 static int
220 ohci_update_phy_reg(struct fw_card *card, int addr,
221 int clear_bits, int set_bits)
223 struct fw_ohci *ohci = fw_ohci(card);
224 u32 val, old;
226 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
227 msleep(2);
228 val = reg_read(ohci, OHCI1394_PhyControl);
229 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
230 fw_error("failed to set phy reg bits.\n");
231 return -EBUSY;
234 old = OHCI1394_PhyControl_ReadData(val);
235 old = (old & ~clear_bits) | set_bits;
236 reg_write(ohci, OHCI1394_PhyControl,
237 OHCI1394_PhyControl_Write(addr, old));
239 return 0;
242 static int ar_context_add_page(struct ar_context *ctx)
244 struct device *dev = ctx->ohci->card.device;
245 struct ar_buffer *ab;
246 dma_addr_t ab_bus;
247 size_t offset;
249 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
250 if (ab == NULL)
251 return -ENOMEM;
253 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
254 if (dma_mapping_error(ab_bus)) {
255 free_page((unsigned long) ab);
256 return -ENOMEM;
259 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
260 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
261 DESCRIPTOR_STATUS |
262 DESCRIPTOR_BRANCH_ALWAYS);
263 offset = offsetof(struct ar_buffer, data);
264 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
265 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
266 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
267 ab->descriptor.branch_address = 0;
269 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
271 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
272 ctx->last_buffer->next = ab;
273 ctx->last_buffer = ab;
275 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
276 flush_writes(ctx->ohci);
278 return 0;
281 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
283 struct fw_ohci *ohci = ctx->ohci;
284 struct fw_packet p;
285 u32 status, length, tcode;
287 p.header[0] = le32_to_cpu(buffer[0]);
288 p.header[1] = le32_to_cpu(buffer[1]);
289 p.header[2] = le32_to_cpu(buffer[2]);
291 tcode = (p.header[0] >> 4) & 0x0f;
292 switch (tcode) {
293 case TCODE_WRITE_QUADLET_REQUEST:
294 case TCODE_READ_QUADLET_RESPONSE:
295 p.header[3] = (__force __u32) buffer[3];
296 p.header_length = 16;
297 p.payload_length = 0;
298 break;
300 case TCODE_READ_BLOCK_REQUEST :
301 p.header[3] = le32_to_cpu(buffer[3]);
302 p.header_length = 16;
303 p.payload_length = 0;
304 break;
306 case TCODE_WRITE_BLOCK_REQUEST:
307 case TCODE_READ_BLOCK_RESPONSE:
308 case TCODE_LOCK_REQUEST:
309 case TCODE_LOCK_RESPONSE:
310 p.header[3] = le32_to_cpu(buffer[3]);
311 p.header_length = 16;
312 p.payload_length = p.header[3] >> 16;
313 break;
315 case TCODE_WRITE_RESPONSE:
316 case TCODE_READ_QUADLET_REQUEST:
317 case OHCI_TCODE_PHY_PACKET:
318 p.header_length = 12;
319 p.payload_length = 0;
320 break;
323 p.payload = (void *) buffer + p.header_length;
325 /* FIXME: What to do about evt_* errors? */
326 length = (p.header_length + p.payload_length + 3) / 4;
327 status = le32_to_cpu(buffer[length]);
329 p.ack = ((status >> 16) & 0x1f) - 16;
330 p.speed = (status >> 21) & 0x7;
331 p.timestamp = status & 0xffff;
332 p.generation = ohci->request_generation;
335 * The OHCI bus reset handler synthesizes a phy packet with
336 * the new generation number when a bus reset happens (see
337 * section 8.4.2.3). This helps us determine when a request
338 * was received and make sure we send the response in the same
339 * generation. We only need this for requests; for responses
340 * we use the unique tlabel for finding the matching
341 * request.
344 if (p.ack + 16 == 0x09)
345 ohci->request_generation = (buffer[2] >> 16) & 0xff;
346 else if (ctx == &ohci->ar_request_ctx)
347 fw_core_handle_request(&ohci->card, &p);
348 else
349 fw_core_handle_response(&ohci->card, &p);
351 return buffer + length + 1;
354 static void ar_context_tasklet(unsigned long data)
356 struct ar_context *ctx = (struct ar_context *)data;
357 struct fw_ohci *ohci = ctx->ohci;
358 struct ar_buffer *ab;
359 struct descriptor *d;
360 void *buffer, *end;
362 ab = ctx->current_buffer;
363 d = &ab->descriptor;
365 if (d->res_count == 0) {
366 size_t size, rest, offset;
369 * This descriptor is finished and we may have a
370 * packet split across this and the next buffer. We
371 * reuse the page for reassembling the split packet.
374 offset = offsetof(struct ar_buffer, data);
375 dma_unmap_single(ohci->card.device,
376 ab->descriptor.data_address - offset,
377 PAGE_SIZE, DMA_BIDIRECTIONAL);
379 buffer = ab;
380 ab = ab->next;
381 d = &ab->descriptor;
382 size = buffer + PAGE_SIZE - ctx->pointer;
383 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
384 memmove(buffer, ctx->pointer, size);
385 memcpy(buffer + size, ab->data, rest);
386 ctx->current_buffer = ab;
387 ctx->pointer = (void *) ab->data + rest;
388 end = buffer + size + rest;
390 while (buffer < end)
391 buffer = handle_ar_packet(ctx, buffer);
393 free_page((unsigned long)buffer);
394 ar_context_add_page(ctx);
395 } else {
396 buffer = ctx->pointer;
397 ctx->pointer = end =
398 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
400 while (buffer < end)
401 buffer = handle_ar_packet(ctx, buffer);
405 static int
406 ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
408 struct ar_buffer ab;
410 ctx->regs = regs;
411 ctx->ohci = ohci;
412 ctx->last_buffer = &ab;
413 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
415 ar_context_add_page(ctx);
416 ar_context_add_page(ctx);
417 ctx->current_buffer = ab.next;
418 ctx->pointer = ctx->current_buffer->data;
420 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
421 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
422 flush_writes(ctx->ohci);
424 return 0;
427 static void context_tasklet(unsigned long data)
429 struct context *ctx = (struct context *) data;
430 struct fw_ohci *ohci = ctx->ohci;
431 struct descriptor *d, *last;
432 u32 address;
433 int z;
435 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
436 ctx->buffer_size, DMA_TO_DEVICE);
438 d = ctx->tail_descriptor;
439 last = ctx->tail_descriptor_last;
441 while (last->branch_address != 0) {
442 address = le32_to_cpu(last->branch_address);
443 z = address & 0xf;
444 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
445 last = (z == 2) ? d : d + z - 1;
447 if (!ctx->callback(ctx, d, last))
448 break;
450 ctx->tail_descriptor = d;
451 ctx->tail_descriptor_last = last;
455 static int
456 context_init(struct context *ctx, struct fw_ohci *ohci,
457 size_t buffer_size, u32 regs,
458 descriptor_callback_t callback)
460 ctx->ohci = ohci;
461 ctx->regs = regs;
462 ctx->buffer_size = buffer_size;
463 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
464 if (ctx->buffer == NULL)
465 return -ENOMEM;
467 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
468 ctx->callback = callback;
470 ctx->buffer_bus =
471 dma_map_single(ohci->card.device, ctx->buffer,
472 buffer_size, DMA_TO_DEVICE);
473 if (dma_mapping_error(ctx->buffer_bus)) {
474 kfree(ctx->buffer);
475 return -ENOMEM;
478 ctx->head_descriptor = ctx->buffer;
479 ctx->prev_descriptor = ctx->buffer;
480 ctx->tail_descriptor = ctx->buffer;
481 ctx->tail_descriptor_last = ctx->buffer;
484 * We put a dummy descriptor in the buffer that has a NULL
485 * branch address and looks like it's been sent. That way we
486 * have a descriptor to append DMA programs to. Also, the
487 * ring buffer invariant is that it always has at least one
488 * element so that head == tail means buffer full.
491 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
492 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
493 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
494 ctx->head_descriptor++;
496 return 0;
499 static void
500 context_release(struct context *ctx)
502 struct fw_card *card = &ctx->ohci->card;
504 dma_unmap_single(card->device, ctx->buffer_bus,
505 ctx->buffer_size, DMA_TO_DEVICE);
506 kfree(ctx->buffer);
509 static struct descriptor *
510 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
512 struct descriptor *d, *tail, *end;
514 d = ctx->head_descriptor;
515 tail = ctx->tail_descriptor;
516 end = ctx->buffer + ctx->buffer_size / sizeof(*d);
518 if (d + z <= tail) {
519 goto has_space;
520 } else if (d > tail && d + z <= end) {
521 goto has_space;
522 } else if (d > tail && ctx->buffer + z <= tail) {
523 d = ctx->buffer;
524 goto has_space;
527 return NULL;
529 has_space:
530 memset(d, 0, z * sizeof(*d));
531 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
533 return d;
536 static void context_run(struct context *ctx, u32 extra)
538 struct fw_ohci *ohci = ctx->ohci;
540 reg_write(ohci, COMMAND_PTR(ctx->regs),
541 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
542 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
543 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
544 flush_writes(ohci);
547 static void context_append(struct context *ctx,
548 struct descriptor *d, int z, int extra)
550 dma_addr_t d_bus;
552 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
554 ctx->head_descriptor = d + z + extra;
555 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
556 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
558 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
559 ctx->buffer_size, DMA_TO_DEVICE);
561 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
562 flush_writes(ctx->ohci);
565 static void context_stop(struct context *ctx)
567 u32 reg;
568 int i;
570 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
571 flush_writes(ctx->ohci);
573 for (i = 0; i < 10; i++) {
574 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
575 if ((reg & CONTEXT_ACTIVE) == 0)
576 break;
578 fw_notify("context_stop: still active (0x%08x)\n", reg);
579 msleep(1);
583 struct driver_data {
584 struct fw_packet *packet;
588 * This function apppends a packet to the DMA queue for transmission.
589 * Must always be called with the ochi->lock held to ensure proper
590 * generation handling and locking around packet queue manipulation.
592 static int
593 at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
595 struct fw_ohci *ohci = ctx->ohci;
596 dma_addr_t d_bus, payload_bus;
597 struct driver_data *driver_data;
598 struct descriptor *d, *last;
599 __le32 *header;
600 int z, tcode;
601 u32 reg;
603 d = context_get_descriptors(ctx, 4, &d_bus);
604 if (d == NULL) {
605 packet->ack = RCODE_SEND_ERROR;
606 return -1;
609 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
610 d[0].res_count = cpu_to_le16(packet->timestamp);
613 * The DMA format for asyncronous link packets is different
614 * from the IEEE1394 layout, so shift the fields around
615 * accordingly. If header_length is 8, it's a PHY packet, to
616 * which we need to prepend an extra quadlet.
619 header = (__le32 *) &d[1];
620 if (packet->header_length > 8) {
621 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
622 (packet->speed << 16));
623 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
624 (packet->header[0] & 0xffff0000));
625 header[2] = cpu_to_le32(packet->header[2]);
627 tcode = (packet->header[0] >> 4) & 0x0f;
628 if (TCODE_IS_BLOCK_PACKET(tcode))
629 header[3] = cpu_to_le32(packet->header[3]);
630 else
631 header[3] = (__force __le32) packet->header[3];
633 d[0].req_count = cpu_to_le16(packet->header_length);
634 } else {
635 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
636 (packet->speed << 16));
637 header[1] = cpu_to_le32(packet->header[0]);
638 header[2] = cpu_to_le32(packet->header[1]);
639 d[0].req_count = cpu_to_le16(12);
642 driver_data = (struct driver_data *) &d[3];
643 driver_data->packet = packet;
644 packet->driver_data = driver_data;
646 if (packet->payload_length > 0) {
647 payload_bus =
648 dma_map_single(ohci->card.device, packet->payload,
649 packet->payload_length, DMA_TO_DEVICE);
650 if (dma_mapping_error(payload_bus)) {
651 packet->ack = RCODE_SEND_ERROR;
652 return -1;
655 d[2].req_count = cpu_to_le16(packet->payload_length);
656 d[2].data_address = cpu_to_le32(payload_bus);
657 last = &d[2];
658 z = 3;
659 } else {
660 last = &d[0];
661 z = 2;
664 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
665 DESCRIPTOR_IRQ_ALWAYS |
666 DESCRIPTOR_BRANCH_ALWAYS);
668 /* FIXME: Document how the locking works. */
669 if (ohci->generation != packet->generation) {
670 packet->ack = RCODE_GENERATION;
671 return -1;
674 context_append(ctx, d, z, 4 - z);
676 /* If the context isn't already running, start it up. */
677 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
678 if ((reg & CONTEXT_RUN) == 0)
679 context_run(ctx, 0);
681 return 0;
684 static int handle_at_packet(struct context *context,
685 struct descriptor *d,
686 struct descriptor *last)
688 struct driver_data *driver_data;
689 struct fw_packet *packet;
690 struct fw_ohci *ohci = context->ohci;
691 dma_addr_t payload_bus;
692 int evt;
694 if (last->transfer_status == 0)
695 /* This descriptor isn't done yet, stop iteration. */
696 return 0;
698 driver_data = (struct driver_data *) &d[3];
699 packet = driver_data->packet;
700 if (packet == NULL)
701 /* This packet was cancelled, just continue. */
702 return 1;
704 payload_bus = le32_to_cpu(last->data_address);
705 if (payload_bus != 0)
706 dma_unmap_single(ohci->card.device, payload_bus,
707 packet->payload_length, DMA_TO_DEVICE);
709 evt = le16_to_cpu(last->transfer_status) & 0x1f;
710 packet->timestamp = le16_to_cpu(last->res_count);
712 switch (evt) {
713 case OHCI1394_evt_timeout:
714 /* Async response transmit timed out. */
715 packet->ack = RCODE_CANCELLED;
716 break;
718 case OHCI1394_evt_flushed:
720 * The packet was flushed should give same error as
721 * when we try to use a stale generation count.
723 packet->ack = RCODE_GENERATION;
724 break;
726 case OHCI1394_evt_missing_ack:
728 * Using a valid (current) generation count, but the
729 * node is not on the bus or not sending acks.
731 packet->ack = RCODE_NO_ACK;
732 break;
734 case ACK_COMPLETE + 0x10:
735 case ACK_PENDING + 0x10:
736 case ACK_BUSY_X + 0x10:
737 case ACK_BUSY_A + 0x10:
738 case ACK_BUSY_B + 0x10:
739 case ACK_DATA_ERROR + 0x10:
740 case ACK_TYPE_ERROR + 0x10:
741 packet->ack = evt - 0x10;
742 break;
744 default:
745 packet->ack = RCODE_SEND_ERROR;
746 break;
749 packet->callback(packet, &ohci->card, packet->ack);
751 return 1;
754 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
755 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
756 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
757 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
758 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
760 static void
761 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
763 struct fw_packet response;
764 int tcode, length, i;
766 tcode = HEADER_GET_TCODE(packet->header[0]);
767 if (TCODE_IS_BLOCK_PACKET(tcode))
768 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
769 else
770 length = 4;
772 i = csr - CSR_CONFIG_ROM;
773 if (i + length > CONFIG_ROM_SIZE) {
774 fw_fill_response(&response, packet->header,
775 RCODE_ADDRESS_ERROR, NULL, 0);
776 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
777 fw_fill_response(&response, packet->header,
778 RCODE_TYPE_ERROR, NULL, 0);
779 } else {
780 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
781 (void *) ohci->config_rom + i, length);
784 fw_core_handle_response(&ohci->card, &response);
787 static void
788 handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
790 struct fw_packet response;
791 int tcode, length, ext_tcode, sel;
792 __be32 *payload, lock_old;
793 u32 lock_arg, lock_data;
795 tcode = HEADER_GET_TCODE(packet->header[0]);
796 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
797 payload = packet->payload;
798 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
800 if (tcode == TCODE_LOCK_REQUEST &&
801 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
802 lock_arg = be32_to_cpu(payload[0]);
803 lock_data = be32_to_cpu(payload[1]);
804 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
805 lock_arg = 0;
806 lock_data = 0;
807 } else {
808 fw_fill_response(&response, packet->header,
809 RCODE_TYPE_ERROR, NULL, 0);
810 goto out;
813 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
814 reg_write(ohci, OHCI1394_CSRData, lock_data);
815 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
816 reg_write(ohci, OHCI1394_CSRControl, sel);
818 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
819 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
820 else
821 fw_notify("swap not done yet\n");
823 fw_fill_response(&response, packet->header,
824 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
825 out:
826 fw_core_handle_response(&ohci->card, &response);
829 static void
830 handle_local_request(struct context *ctx, struct fw_packet *packet)
832 u64 offset;
833 u32 csr;
835 if (ctx == &ctx->ohci->at_request_ctx) {
836 packet->ack = ACK_PENDING;
837 packet->callback(packet, &ctx->ohci->card, packet->ack);
840 offset =
841 ((unsigned long long)
842 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
843 packet->header[2];
844 csr = offset - CSR_REGISTER_BASE;
846 /* Handle config rom reads. */
847 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
848 handle_local_rom(ctx->ohci, packet, csr);
849 else switch (csr) {
850 case CSR_BUS_MANAGER_ID:
851 case CSR_BANDWIDTH_AVAILABLE:
852 case CSR_CHANNELS_AVAILABLE_HI:
853 case CSR_CHANNELS_AVAILABLE_LO:
854 handle_local_lock(ctx->ohci, packet, csr);
855 break;
856 default:
857 if (ctx == &ctx->ohci->at_request_ctx)
858 fw_core_handle_request(&ctx->ohci->card, packet);
859 else
860 fw_core_handle_response(&ctx->ohci->card, packet);
861 break;
864 if (ctx == &ctx->ohci->at_response_ctx) {
865 packet->ack = ACK_COMPLETE;
866 packet->callback(packet, &ctx->ohci->card, packet->ack);
870 static void
871 at_context_transmit(struct context *ctx, struct fw_packet *packet)
873 unsigned long flags;
874 int retval;
876 spin_lock_irqsave(&ctx->ohci->lock, flags);
878 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
879 ctx->ohci->generation == packet->generation) {
880 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
881 handle_local_request(ctx, packet);
882 return;
885 retval = at_context_queue_packet(ctx, packet);
886 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
888 if (retval < 0)
889 packet->callback(packet, &ctx->ohci->card, packet->ack);
893 static void bus_reset_tasklet(unsigned long data)
895 struct fw_ohci *ohci = (struct fw_ohci *)data;
896 int self_id_count, i, j, reg;
897 int generation, new_generation;
898 unsigned long flags;
900 reg = reg_read(ohci, OHCI1394_NodeID);
901 if (!(reg & OHCI1394_NodeID_idValid)) {
902 fw_error("node ID not valid, new bus reset in progress\n");
903 return;
905 ohci->node_id = reg & 0xffff;
908 * The count in the SelfIDCount register is the number of
909 * bytes in the self ID receive buffer. Since we also receive
910 * the inverted quadlets and a header quadlet, we shift one
911 * bit extra to get the actual number of self IDs.
914 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
915 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
917 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
918 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
919 fw_error("inconsistent self IDs\n");
920 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
924 * Check the consistency of the self IDs we just read. The
925 * problem we face is that a new bus reset can start while we
926 * read out the self IDs from the DMA buffer. If this happens,
927 * the DMA buffer will be overwritten with new self IDs and we
928 * will read out inconsistent data. The OHCI specification
929 * (section 11.2) recommends a technique similar to
930 * linux/seqlock.h, where we remember the generation of the
931 * self IDs in the buffer before reading them out and compare
932 * it to the current generation after reading them out. If
933 * the two generations match we know we have a consistent set
934 * of self IDs.
937 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
938 if (new_generation != generation) {
939 fw_notify("recursive bus reset detected, "
940 "discarding self ids\n");
941 return;
944 /* FIXME: Document how the locking works. */
945 spin_lock_irqsave(&ohci->lock, flags);
947 ohci->generation = generation;
948 context_stop(&ohci->at_request_ctx);
949 context_stop(&ohci->at_response_ctx);
950 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
953 * This next bit is unrelated to the AT context stuff but we
954 * have to do it under the spinlock also. If a new config rom
955 * was set up before this reset, the old one is now no longer
956 * in use and we can free it. Update the config rom pointers
957 * to point to the current config rom and clear the
958 * next_config_rom pointer so a new udpate can take place.
961 if (ohci->next_config_rom != NULL) {
962 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
963 ohci->config_rom, ohci->config_rom_bus);
964 ohci->config_rom = ohci->next_config_rom;
965 ohci->config_rom_bus = ohci->next_config_rom_bus;
966 ohci->next_config_rom = NULL;
969 * Restore config_rom image and manually update
970 * config_rom registers. Writing the header quadlet
971 * will indicate that the config rom is ready, so we
972 * do that last.
974 reg_write(ohci, OHCI1394_BusOptions,
975 be32_to_cpu(ohci->config_rom[2]));
976 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
977 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
980 spin_unlock_irqrestore(&ohci->lock, flags);
982 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
983 self_id_count, ohci->self_id_buffer);
986 static irqreturn_t irq_handler(int irq, void *data)
988 struct fw_ohci *ohci = data;
989 u32 event, iso_event, cycle_time;
990 int i;
992 event = reg_read(ohci, OHCI1394_IntEventClear);
994 if (!event)
995 return IRQ_NONE;
997 reg_write(ohci, OHCI1394_IntEventClear, event);
999 if (event & OHCI1394_selfIDComplete)
1000 tasklet_schedule(&ohci->bus_reset_tasklet);
1002 if (event & OHCI1394_RQPkt)
1003 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1005 if (event & OHCI1394_RSPkt)
1006 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1008 if (event & OHCI1394_reqTxComplete)
1009 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1011 if (event & OHCI1394_respTxComplete)
1012 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1014 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1015 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1017 while (iso_event) {
1018 i = ffs(iso_event) - 1;
1019 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1020 iso_event &= ~(1 << i);
1023 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1024 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1026 while (iso_event) {
1027 i = ffs(iso_event) - 1;
1028 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1029 iso_event &= ~(1 << i);
1032 if (event & OHCI1394_cycle64Seconds) {
1033 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1034 if ((cycle_time & 0x80000000) == 0)
1035 ohci->bus_seconds++;
1038 return IRQ_HANDLED;
1041 static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1043 struct fw_ohci *ohci = fw_ohci(card);
1044 struct pci_dev *dev = to_pci_dev(card->device);
1047 * When the link is not yet enabled, the atomic config rom
1048 * update mechanism described below in ohci_set_config_rom()
1049 * is not active. We have to update ConfigRomHeader and
1050 * BusOptions manually, and the write to ConfigROMmap takes
1051 * effect immediately. We tie this to the enabling of the
1052 * link, so we have a valid config rom before enabling - the
1053 * OHCI requires that ConfigROMhdr and BusOptions have valid
1054 * values before enabling.
1056 * However, when the ConfigROMmap is written, some controllers
1057 * always read back quadlets 0 and 2 from the config rom to
1058 * the ConfigRomHeader and BusOptions registers on bus reset.
1059 * They shouldn't do that in this initial case where the link
1060 * isn't enabled. This means we have to use the same
1061 * workaround here, setting the bus header to 0 and then write
1062 * the right values in the bus reset tasklet.
1065 ohci->next_config_rom =
1066 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1067 &ohci->next_config_rom_bus, GFP_KERNEL);
1068 if (ohci->next_config_rom == NULL)
1069 return -ENOMEM;
1071 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1072 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1074 ohci->next_header = config_rom[0];
1075 ohci->next_config_rom[0] = 0;
1076 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1077 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1078 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1080 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1082 if (request_irq(dev->irq, irq_handler,
1083 IRQF_SHARED, ohci_driver_name, ohci)) {
1084 fw_error("Failed to allocate shared interrupt %d.\n",
1085 dev->irq);
1086 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1087 ohci->config_rom, ohci->config_rom_bus);
1088 return -EIO;
1091 reg_write(ohci, OHCI1394_HCControlSet,
1092 OHCI1394_HCControl_linkEnable |
1093 OHCI1394_HCControl_BIBimageValid);
1094 flush_writes(ohci);
1097 * We are ready to go, initiate bus reset to finish the
1098 * initialization.
1101 fw_core_initiate_bus_reset(&ohci->card, 1);
1103 return 0;
1106 static int
1107 ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1109 struct fw_ohci *ohci;
1110 unsigned long flags;
1111 int retval = 0;
1112 __be32 *next_config_rom;
1113 dma_addr_t next_config_rom_bus;
1115 ohci = fw_ohci(card);
1118 * When the OHCI controller is enabled, the config rom update
1119 * mechanism is a bit tricky, but easy enough to use. See
1120 * section 5.5.6 in the OHCI specification.
1122 * The OHCI controller caches the new config rom address in a
1123 * shadow register (ConfigROMmapNext) and needs a bus reset
1124 * for the changes to take place. When the bus reset is
1125 * detected, the controller loads the new values for the
1126 * ConfigRomHeader and BusOptions registers from the specified
1127 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1128 * shadow register. All automatically and atomically.
1130 * Now, there's a twist to this story. The automatic load of
1131 * ConfigRomHeader and BusOptions doesn't honor the
1132 * noByteSwapData bit, so with a be32 config rom, the
1133 * controller will load be32 values in to these registers
1134 * during the atomic update, even on litte endian
1135 * architectures. The workaround we use is to put a 0 in the
1136 * header quadlet; 0 is endian agnostic and means that the
1137 * config rom isn't ready yet. In the bus reset tasklet we
1138 * then set up the real values for the two registers.
1140 * We use ohci->lock to avoid racing with the code that sets
1141 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1144 next_config_rom =
1145 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1146 &next_config_rom_bus, GFP_KERNEL);
1147 if (next_config_rom == NULL)
1148 return -ENOMEM;
1150 spin_lock_irqsave(&ohci->lock, flags);
1152 if (ohci->next_config_rom == NULL) {
1153 ohci->next_config_rom = next_config_rom;
1154 ohci->next_config_rom_bus = next_config_rom_bus;
1156 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1157 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1158 length * 4);
1160 ohci->next_header = config_rom[0];
1161 ohci->next_config_rom[0] = 0;
1163 reg_write(ohci, OHCI1394_ConfigROMmap,
1164 ohci->next_config_rom_bus);
1165 } else {
1166 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1167 next_config_rom, next_config_rom_bus);
1168 retval = -EBUSY;
1171 spin_unlock_irqrestore(&ohci->lock, flags);
1174 * Now initiate a bus reset to have the changes take
1175 * effect. We clean up the old config rom memory and DMA
1176 * mappings in the bus reset tasklet, since the OHCI
1177 * controller could need to access it before the bus reset
1178 * takes effect.
1180 if (retval == 0)
1181 fw_core_initiate_bus_reset(&ohci->card, 1);
1183 return retval;
1186 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1188 struct fw_ohci *ohci = fw_ohci(card);
1190 at_context_transmit(&ohci->at_request_ctx, packet);
1193 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1195 struct fw_ohci *ohci = fw_ohci(card);
1197 at_context_transmit(&ohci->at_response_ctx, packet);
1200 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1202 struct fw_ohci *ohci = fw_ohci(card);
1203 struct context *ctx = &ohci->at_request_ctx;
1204 struct driver_data *driver_data = packet->driver_data;
1205 int retval = -ENOENT;
1207 tasklet_disable(&ctx->tasklet);
1209 if (packet->ack != 0)
1210 goto out;
1212 driver_data->packet = NULL;
1213 packet->ack = RCODE_CANCELLED;
1214 packet->callback(packet, &ohci->card, packet->ack);
1215 retval = 0;
1217 out:
1218 tasklet_enable(&ctx->tasklet);
1220 return retval;
1223 static int
1224 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1226 struct fw_ohci *ohci = fw_ohci(card);
1227 unsigned long flags;
1228 int n, retval = 0;
1231 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1232 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1235 spin_lock_irqsave(&ohci->lock, flags);
1237 if (ohci->generation != generation) {
1238 retval = -ESTALE;
1239 goto out;
1243 * Note, if the node ID contains a non-local bus ID, physical DMA is
1244 * enabled for _all_ nodes on remote buses.
1247 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1248 if (n < 32)
1249 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1250 else
1251 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1253 flush_writes(ohci);
1254 out:
1255 spin_unlock_irqrestore(&ohci->lock, flags);
1256 return retval;
1259 static u64
1260 ohci_get_bus_time(struct fw_card *card)
1262 struct fw_ohci *ohci = fw_ohci(card);
1263 u32 cycle_time;
1264 u64 bus_time;
1266 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1267 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1269 return bus_time;
1272 static int handle_ir_dualbuffer_packet(struct context *context,
1273 struct descriptor *d,
1274 struct descriptor *last)
1276 struct iso_context *ctx =
1277 container_of(context, struct iso_context, context);
1278 struct db_descriptor *db = (struct db_descriptor *) d;
1279 __le32 *ir_header;
1280 size_t header_length;
1281 void *p, *end;
1282 int i;
1284 if (db->first_res_count > 0 && db->second_res_count > 0)
1285 /* This descriptor isn't done yet, stop iteration. */
1286 return 0;
1288 header_length = le16_to_cpu(db->first_req_count) -
1289 le16_to_cpu(db->first_res_count);
1291 i = ctx->header_length;
1292 p = db + 1;
1293 end = p + header_length;
1294 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1296 * The iso header is byteswapped to little endian by
1297 * the controller, but the remaining header quadlets
1298 * are big endian. We want to present all the headers
1299 * as big endian, so we have to swap the first
1300 * quadlet.
1302 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1303 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1304 i += ctx->base.header_size;
1305 p += ctx->base.header_size + 4;
1308 ctx->header_length = i;
1310 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1311 ir_header = (__le32 *) (db + 1);
1312 ctx->base.callback(&ctx->base,
1313 le32_to_cpu(ir_header[0]) & 0xffff,
1314 ctx->header_length, ctx->header,
1315 ctx->base.callback_data);
1316 ctx->header_length = 0;
1319 return 1;
1322 static int handle_it_packet(struct context *context,
1323 struct descriptor *d,
1324 struct descriptor *last)
1326 struct iso_context *ctx =
1327 container_of(context, struct iso_context, context);
1329 if (last->transfer_status == 0)
1330 /* This descriptor isn't done yet, stop iteration. */
1331 return 0;
1333 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1334 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1335 0, NULL, ctx->base.callback_data);
1337 return 1;
1340 static struct fw_iso_context *
1341 ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1343 struct fw_ohci *ohci = fw_ohci(card);
1344 struct iso_context *ctx, *list;
1345 descriptor_callback_t callback;
1346 u32 *mask, regs;
1347 unsigned long flags;
1348 int index, retval = -ENOMEM;
1350 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1351 mask = &ohci->it_context_mask;
1352 list = ohci->it_context_list;
1353 callback = handle_it_packet;
1354 } else {
1355 mask = &ohci->ir_context_mask;
1356 list = ohci->ir_context_list;
1357 callback = handle_ir_dualbuffer_packet;
1360 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1361 if (callback == handle_ir_dualbuffer_packet &&
1362 ohci->version < OHCI_VERSION_1_1)
1363 return ERR_PTR(-EINVAL);
1365 spin_lock_irqsave(&ohci->lock, flags);
1366 index = ffs(*mask) - 1;
1367 if (index >= 0)
1368 *mask &= ~(1 << index);
1369 spin_unlock_irqrestore(&ohci->lock, flags);
1371 if (index < 0)
1372 return ERR_PTR(-EBUSY);
1374 if (type == FW_ISO_CONTEXT_TRANSMIT)
1375 regs = OHCI1394_IsoXmitContextBase(index);
1376 else
1377 regs = OHCI1394_IsoRcvContextBase(index);
1379 ctx = &list[index];
1380 memset(ctx, 0, sizeof(*ctx));
1381 ctx->header_length = 0;
1382 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1383 if (ctx->header == NULL)
1384 goto out;
1386 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1387 regs, callback);
1388 if (retval < 0)
1389 goto out_with_header;
1391 return &ctx->base;
1393 out_with_header:
1394 free_page((unsigned long)ctx->header);
1395 out:
1396 spin_lock_irqsave(&ohci->lock, flags);
1397 *mask |= 1 << index;
1398 spin_unlock_irqrestore(&ohci->lock, flags);
1400 return ERR_PTR(retval);
1403 static int ohci_start_iso(struct fw_iso_context *base,
1404 s32 cycle, u32 sync, u32 tags)
1406 struct iso_context *ctx = container_of(base, struct iso_context, base);
1407 struct fw_ohci *ohci = ctx->context.ohci;
1408 u32 control, match;
1409 int index;
1411 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1412 index = ctx - ohci->it_context_list;
1413 match = 0;
1414 if (cycle >= 0)
1415 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1416 (cycle & 0x7fff) << 16;
1418 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1419 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1420 context_run(&ctx->context, match);
1421 } else {
1422 index = ctx - ohci->ir_context_list;
1423 control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
1424 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1425 if (cycle >= 0) {
1426 match |= (cycle & 0x07fff) << 12;
1427 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1430 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1431 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1432 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1433 context_run(&ctx->context, control);
1436 return 0;
1439 static int ohci_stop_iso(struct fw_iso_context *base)
1441 struct fw_ohci *ohci = fw_ohci(base->card);
1442 struct iso_context *ctx = container_of(base, struct iso_context, base);
1443 int index;
1445 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1446 index = ctx - ohci->it_context_list;
1447 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1448 } else {
1449 index = ctx - ohci->ir_context_list;
1450 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1452 flush_writes(ohci);
1453 context_stop(&ctx->context);
1455 return 0;
1458 static void ohci_free_iso_context(struct fw_iso_context *base)
1460 struct fw_ohci *ohci = fw_ohci(base->card);
1461 struct iso_context *ctx = container_of(base, struct iso_context, base);
1462 unsigned long flags;
1463 int index;
1465 ohci_stop_iso(base);
1466 context_release(&ctx->context);
1467 free_page((unsigned long)ctx->header);
1469 spin_lock_irqsave(&ohci->lock, flags);
1471 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1472 index = ctx - ohci->it_context_list;
1473 ohci->it_context_mask |= 1 << index;
1474 } else {
1475 index = ctx - ohci->ir_context_list;
1476 ohci->ir_context_mask |= 1 << index;
1479 spin_unlock_irqrestore(&ohci->lock, flags);
1482 static int
1483 ohci_queue_iso_transmit(struct fw_iso_context *base,
1484 struct fw_iso_packet *packet,
1485 struct fw_iso_buffer *buffer,
1486 unsigned long payload)
1488 struct iso_context *ctx = container_of(base, struct iso_context, base);
1489 struct descriptor *d, *last, *pd;
1490 struct fw_iso_packet *p;
1491 __le32 *header;
1492 dma_addr_t d_bus, page_bus;
1493 u32 z, header_z, payload_z, irq;
1494 u32 payload_index, payload_end_index, next_page_index;
1495 int page, end_page, i, length, offset;
1498 * FIXME: Cycle lost behavior should be configurable: lose
1499 * packet, retransmit or terminate..
1502 p = packet;
1503 payload_index = payload;
1505 if (p->skip)
1506 z = 1;
1507 else
1508 z = 2;
1509 if (p->header_length > 0)
1510 z++;
1512 /* Determine the first page the payload isn't contained in. */
1513 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1514 if (p->payload_length > 0)
1515 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1516 else
1517 payload_z = 0;
1519 z += payload_z;
1521 /* Get header size in number of descriptors. */
1522 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1524 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1525 if (d == NULL)
1526 return -ENOMEM;
1528 if (!p->skip) {
1529 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1530 d[0].req_count = cpu_to_le16(8);
1532 header = (__le32 *) &d[1];
1533 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1534 IT_HEADER_TAG(p->tag) |
1535 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1536 IT_HEADER_CHANNEL(ctx->base.channel) |
1537 IT_HEADER_SPEED(ctx->base.speed));
1538 header[1] =
1539 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1540 p->payload_length));
1543 if (p->header_length > 0) {
1544 d[2].req_count = cpu_to_le16(p->header_length);
1545 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
1546 memcpy(&d[z], p->header, p->header_length);
1549 pd = d + z - payload_z;
1550 payload_end_index = payload_index + p->payload_length;
1551 for (i = 0; i < payload_z; i++) {
1552 page = payload_index >> PAGE_SHIFT;
1553 offset = payload_index & ~PAGE_MASK;
1554 next_page_index = (page + 1) << PAGE_SHIFT;
1555 length =
1556 min(next_page_index, payload_end_index) - payload_index;
1557 pd[i].req_count = cpu_to_le16(length);
1559 page_bus = page_private(buffer->pages[page]);
1560 pd[i].data_address = cpu_to_le32(page_bus + offset);
1562 payload_index += length;
1565 if (p->interrupt)
1566 irq = DESCRIPTOR_IRQ_ALWAYS;
1567 else
1568 irq = DESCRIPTOR_NO_IRQ;
1570 last = z == 2 ? d : d + z - 1;
1571 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1572 DESCRIPTOR_STATUS |
1573 DESCRIPTOR_BRANCH_ALWAYS |
1574 irq);
1576 context_append(&ctx->context, d, z, header_z);
1578 return 0;
1581 static int
1582 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1583 struct fw_iso_packet *packet,
1584 struct fw_iso_buffer *buffer,
1585 unsigned long payload)
1587 struct iso_context *ctx = container_of(base, struct iso_context, base);
1588 struct db_descriptor *db = NULL;
1589 struct descriptor *d;
1590 struct fw_iso_packet *p;
1591 dma_addr_t d_bus, page_bus;
1592 u32 z, header_z, length, rest;
1593 int page, offset, packet_count, header_size;
1596 * FIXME: Cycle lost behavior should be configurable: lose
1597 * packet, retransmit or terminate..
1600 if (packet->skip) {
1601 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1602 if (d == NULL)
1603 return -ENOMEM;
1605 db = (struct db_descriptor *) d;
1606 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1607 DESCRIPTOR_BRANCH_ALWAYS |
1608 DESCRIPTOR_WAIT);
1609 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1610 context_append(&ctx->context, d, 2, 0);
1613 p = packet;
1614 z = 2;
1617 * The OHCI controller puts the status word in the header
1618 * buffer too, so we need 4 extra bytes per packet.
1620 packet_count = p->header_length / ctx->base.header_size;
1621 header_size = packet_count * (ctx->base.header_size + 4);
1623 /* Get header size in number of descriptors. */
1624 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1625 page = payload >> PAGE_SHIFT;
1626 offset = payload & ~PAGE_MASK;
1627 rest = p->payload_length;
1629 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1630 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1631 while (rest > 0) {
1632 d = context_get_descriptors(&ctx->context,
1633 z + header_z, &d_bus);
1634 if (d == NULL)
1635 return -ENOMEM;
1637 db = (struct db_descriptor *) d;
1638 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1639 DESCRIPTOR_BRANCH_ALWAYS);
1640 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1641 db->first_req_count = cpu_to_le16(header_size);
1642 db->first_res_count = db->first_req_count;
1643 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1645 if (offset + rest < PAGE_SIZE)
1646 length = rest;
1647 else
1648 length = PAGE_SIZE - offset;
1650 db->second_req_count = cpu_to_le16(length);
1651 db->second_res_count = db->second_req_count;
1652 page_bus = page_private(buffer->pages[page]);
1653 db->second_buffer = cpu_to_le32(page_bus + offset);
1655 if (p->interrupt && length == rest)
1656 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1658 context_append(&ctx->context, d, z, header_z);
1659 offset = (offset + length) & ~PAGE_MASK;
1660 rest -= length;
1661 page++;
1664 return 0;
1667 static int
1668 ohci_queue_iso(struct fw_iso_context *base,
1669 struct fw_iso_packet *packet,
1670 struct fw_iso_buffer *buffer,
1671 unsigned long payload)
1673 struct iso_context *ctx = container_of(base, struct iso_context, base);
1675 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1676 return ohci_queue_iso_transmit(base, packet, buffer, payload);
1677 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1678 return ohci_queue_iso_receive_dualbuffer(base, packet,
1679 buffer, payload);
1680 else
1681 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1682 return -EINVAL;
1685 static const struct fw_card_driver ohci_driver = {
1686 .name = ohci_driver_name,
1687 .enable = ohci_enable,
1688 .update_phy_reg = ohci_update_phy_reg,
1689 .set_config_rom = ohci_set_config_rom,
1690 .send_request = ohci_send_request,
1691 .send_response = ohci_send_response,
1692 .cancel_packet = ohci_cancel_packet,
1693 .enable_phys_dma = ohci_enable_phys_dma,
1694 .get_bus_time = ohci_get_bus_time,
1696 .allocate_iso_context = ohci_allocate_iso_context,
1697 .free_iso_context = ohci_free_iso_context,
1698 .queue_iso = ohci_queue_iso,
1699 .start_iso = ohci_start_iso,
1700 .stop_iso = ohci_stop_iso,
1703 static int software_reset(struct fw_ohci *ohci)
1705 int i;
1707 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1709 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1710 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1711 OHCI1394_HCControl_softReset) == 0)
1712 return 0;
1713 msleep(1);
1716 return -EBUSY;
1719 static int __devinit
1720 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1722 struct fw_ohci *ohci;
1723 u32 bus_options, max_receive, link_speed;
1724 u64 guid;
1725 int err;
1726 size_t size;
1728 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
1729 if (ohci == NULL) {
1730 fw_error("Could not malloc fw_ohci data.\n");
1731 return -ENOMEM;
1734 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1736 err = pci_enable_device(dev);
1737 if (err) {
1738 fw_error("Failed to enable OHCI hardware.\n");
1739 goto fail_put_card;
1742 pci_set_master(dev);
1743 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1744 pci_set_drvdata(dev, ohci);
1746 spin_lock_init(&ohci->lock);
1748 tasklet_init(&ohci->bus_reset_tasklet,
1749 bus_reset_tasklet, (unsigned long)ohci);
1751 err = pci_request_region(dev, 0, ohci_driver_name);
1752 if (err) {
1753 fw_error("MMIO resource unavailable\n");
1754 goto fail_disable;
1757 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1758 if (ohci->registers == NULL) {
1759 fw_error("Failed to remap registers\n");
1760 err = -ENXIO;
1761 goto fail_iomem;
1764 if (software_reset(ohci)) {
1765 fw_error("Failed to reset ohci card.\n");
1766 err = -EBUSY;
1767 goto fail_registers;
1771 * Now enable LPS, which we need in order to start accessing
1772 * most of the registers. In fact, on some cards (ALI M5251),
1773 * accessing registers in the SClk domain without LPS enabled
1774 * will lock up the machine. Wait 50msec to make sure we have
1775 * full link enabled.
1777 reg_write(ohci, OHCI1394_HCControlSet,
1778 OHCI1394_HCControl_LPS |
1779 OHCI1394_HCControl_postedWriteEnable);
1780 flush_writes(ohci);
1781 msleep(50);
1783 reg_write(ohci, OHCI1394_HCControlClear,
1784 OHCI1394_HCControl_noByteSwapData);
1786 reg_write(ohci, OHCI1394_LinkControlSet,
1787 OHCI1394_LinkControl_rcvSelfID |
1788 OHCI1394_LinkControl_cycleTimerEnable |
1789 OHCI1394_LinkControl_cycleMaster);
1791 ar_context_init(&ohci->ar_request_ctx, ohci,
1792 OHCI1394_AsReqRcvContextControlSet);
1794 ar_context_init(&ohci->ar_response_ctx, ohci,
1795 OHCI1394_AsRspRcvContextControlSet);
1797 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1798 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1800 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1801 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1803 reg_write(ohci, OHCI1394_ATRetries,
1804 OHCI1394_MAX_AT_REQ_RETRIES |
1805 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1806 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1808 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1809 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1810 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1811 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1812 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1814 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1815 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1816 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1817 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1818 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1820 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1821 fw_error("Out of memory for it/ir contexts.\n");
1822 err = -ENOMEM;
1823 goto fail_registers;
1826 /* self-id dma buffer allocation */
1827 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1828 SELF_ID_BUF_SIZE,
1829 &ohci->self_id_bus,
1830 GFP_KERNEL);
1831 if (ohci->self_id_cpu == NULL) {
1832 fw_error("Out of memory for self ID buffer.\n");
1833 err = -ENOMEM;
1834 goto fail_registers;
1837 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1838 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1839 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1840 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1841 reg_write(ohci, OHCI1394_IntMaskSet,
1842 OHCI1394_selfIDComplete |
1843 OHCI1394_RQPkt | OHCI1394_RSPkt |
1844 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1845 OHCI1394_isochRx | OHCI1394_isochTx |
1846 OHCI1394_masterIntEnable |
1847 OHCI1394_cycle64Seconds);
1849 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1850 max_receive = (bus_options >> 12) & 0xf;
1851 link_speed = bus_options & 0x7;
1852 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1853 reg_read(ohci, OHCI1394_GUIDLo);
1855 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1856 if (err < 0)
1857 goto fail_self_id;
1859 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1860 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1861 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1863 return 0;
1865 fail_self_id:
1866 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1867 ohci->self_id_cpu, ohci->self_id_bus);
1868 fail_registers:
1869 kfree(ohci->it_context_list);
1870 kfree(ohci->ir_context_list);
1871 pci_iounmap(dev, ohci->registers);
1872 fail_iomem:
1873 pci_release_region(dev, 0);
1874 fail_disable:
1875 pci_disable_device(dev);
1876 fail_put_card:
1877 fw_card_put(&ohci->card);
1879 return err;
1882 static void pci_remove(struct pci_dev *dev)
1884 struct fw_ohci *ohci;
1886 ohci = pci_get_drvdata(dev);
1887 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1888 flush_writes(ohci);
1889 fw_core_remove_card(&ohci->card);
1892 * FIXME: Fail all pending packets here, now that the upper
1893 * layers can't queue any more.
1896 software_reset(ohci);
1897 free_irq(dev->irq, ohci);
1898 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1899 ohci->self_id_cpu, ohci->self_id_bus);
1900 kfree(ohci->it_context_list);
1901 kfree(ohci->ir_context_list);
1902 pci_iounmap(dev, ohci->registers);
1903 pci_release_region(dev, 0);
1904 pci_disable_device(dev);
1905 fw_card_put(&ohci->card);
1907 fw_notify("Removed fw-ohci device.\n");
1910 static struct pci_device_id pci_table[] = {
1911 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1915 MODULE_DEVICE_TABLE(pci, pci_table);
1917 static struct pci_driver fw_ohci_pci_driver = {
1918 .name = ohci_driver_name,
1919 .id_table = pci_table,
1920 .probe = pci_probe,
1921 .remove = pci_remove,
1924 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1925 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1926 MODULE_LICENSE("GPL");
1928 /* Provide a module alias so root-on-sbp2 initrds don't break. */
1929 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
1930 MODULE_ALIAS("ohci1394");
1931 #endif
1933 static int __init fw_ohci_init(void)
1935 return pci_register_driver(&fw_ohci_pci_driver);
1938 static void __exit fw_ohci_cleanup(void)
1940 pci_unregister_driver(&fw_ohci_pci_driver);
1943 module_init(fw_ohci_init);
1944 module_exit(fw_ohci_cleanup);