Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / firewire / fw-ohci.c
blobac504dba57090922e34af3726d5620c15e1a901b
1 /*
2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
33 #include <asm/page.h>
34 #include <asm/system.h>
36 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
37 =======
38 #ifdef CONFIG_PPC_PMAC
39 #include <asm/pmac_feature.h>
40 #endif
42 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
43 #include "fw-ohci.h"
44 #include "fw-transaction.h"
46 #define DESCRIPTOR_OUTPUT_MORE 0
47 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
48 #define DESCRIPTOR_INPUT_MORE (2 << 12)
49 #define DESCRIPTOR_INPUT_LAST (3 << 12)
50 #define DESCRIPTOR_STATUS (1 << 11)
51 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
52 #define DESCRIPTOR_PING (1 << 7)
53 #define DESCRIPTOR_YY (1 << 6)
54 #define DESCRIPTOR_NO_IRQ (0 << 4)
55 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
56 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
57 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
58 #define DESCRIPTOR_WAIT (3 << 0)
60 struct descriptor {
61 __le16 req_count;
62 __le16 control;
63 __le32 data_address;
64 __le32 branch_address;
65 __le16 res_count;
66 __le16 transfer_status;
67 } __attribute__((aligned(16)));
69 struct db_descriptor {
70 __le16 first_size;
71 __le16 control;
72 __le16 second_req_count;
73 __le16 first_req_count;
74 __le32 branch_address;
75 __le16 second_res_count;
76 __le16 first_res_count;
77 __le32 reserved0;
78 __le32 first_buffer;
79 __le32 second_buffer;
80 __le32 reserved1;
81 } __attribute__((aligned(16)));
83 #define CONTROL_SET(regs) (regs)
84 #define CONTROL_CLEAR(regs) ((regs) + 4)
85 #define COMMAND_PTR(regs) ((regs) + 12)
86 #define CONTEXT_MATCH(regs) ((regs) + 16)
88 struct ar_buffer {
89 struct descriptor descriptor;
90 struct ar_buffer *next;
91 __le32 data[0];
94 struct ar_context {
95 struct fw_ohci *ohci;
96 struct ar_buffer *current_buffer;
97 struct ar_buffer *last_buffer;
98 void *pointer;
99 u32 regs;
100 struct tasklet_struct tasklet;
103 struct context;
105 typedef int (*descriptor_callback_t)(struct context *ctx,
106 struct descriptor *d,
107 struct descriptor *last);
110 * A buffer that contains a block of DMA-able coherent memory used for
111 * storing a portion of a DMA descriptor program.
113 struct descriptor_buffer {
114 struct list_head list;
115 dma_addr_t buffer_bus;
116 size_t buffer_size;
117 size_t used;
118 struct descriptor buffer[0];
121 struct context {
122 struct fw_ohci *ohci;
123 u32 regs;
124 int total_allocation;
127 * List of page-sized buffers for storing DMA descriptors.
128 * Head of list contains buffers in use and tail of list contains
129 * free buffers.
131 struct list_head buffer_list;
134 * Pointer to a buffer inside buffer_list that contains the tail
135 * end of the current DMA program.
137 struct descriptor_buffer *buffer_tail;
140 * The descriptor containing the branch address of the first
141 * descriptor that has not yet been filled by the device.
143 struct descriptor *last;
146 * The last descriptor in the DMA program. It contains the branch
147 * address that must be updated upon appending a new descriptor.
149 struct descriptor *prev;
151 descriptor_callback_t callback;
153 struct tasklet_struct tasklet;
156 #define IT_HEADER_SY(v) ((v) << 0)
157 #define IT_HEADER_TCODE(v) ((v) << 4)
158 #define IT_HEADER_CHANNEL(v) ((v) << 8)
159 #define IT_HEADER_TAG(v) ((v) << 14)
160 #define IT_HEADER_SPEED(v) ((v) << 16)
161 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
163 struct iso_context {
164 struct fw_iso_context base;
165 struct context context;
166 int excess_bytes;
167 void *header;
168 size_t header_length;
171 #define CONFIG_ROM_SIZE 1024
173 struct fw_ohci {
174 struct fw_card card;
176 u32 version;
177 __iomem char *registers;
178 dma_addr_t self_id_bus;
179 __le32 *self_id_cpu;
180 struct tasklet_struct bus_reset_tasklet;
181 int node_id;
182 int generation;
183 int request_generation;
184 u32 bus_seconds;
185 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
186 =======
187 bool old_uninorth;
188 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
191 * Spinlock for accessing fw_ohci data. Never call out of
192 * this driver with this lock held.
194 spinlock_t lock;
195 u32 self_id_buffer[512];
197 /* Config rom buffers */
198 __be32 *config_rom;
199 dma_addr_t config_rom_bus;
200 __be32 *next_config_rom;
201 dma_addr_t next_config_rom_bus;
202 u32 next_header;
204 struct ar_context ar_request_ctx;
205 struct ar_context ar_response_ctx;
206 struct context at_request_ctx;
207 struct context at_response_ctx;
209 u32 it_context_mask;
210 struct iso_context *it_context_list;
211 u32 ir_context_mask;
212 struct iso_context *ir_context_list;
215 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
217 return container_of(card, struct fw_ohci, card);
220 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
221 #define IR_CONTEXT_BUFFER_FILL 0x80000000
222 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
223 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
224 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
225 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
227 #define CONTEXT_RUN 0x8000
228 #define CONTEXT_WAKE 0x1000
229 #define CONTEXT_DEAD 0x0800
230 #define CONTEXT_ACTIVE 0x0400
232 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
233 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
234 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
236 #define FW_OHCI_MAJOR 240
237 #define OHCI1394_REGISTER_SIZE 0x800
238 #define OHCI_LOOP_COUNT 500
239 #define OHCI1394_PCI_HCI_Control 0x40
240 #define SELF_ID_BUF_SIZE 0x800
241 #define OHCI_TCODE_PHY_PACKET 0x0e
242 #define OHCI_VERSION_1_1 0x010010
244 static char ohci_driver_name[] = KBUILD_MODNAME;
246 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
248 writel(data, ohci->registers + offset);
251 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
253 return readl(ohci->registers + offset);
256 static inline void flush_writes(const struct fw_ohci *ohci)
258 /* Do a dummy read to flush writes. */
259 reg_read(ohci, OHCI1394_Version);
262 static int
263 ohci_update_phy_reg(struct fw_card *card, int addr,
264 int clear_bits, int set_bits)
266 struct fw_ohci *ohci = fw_ohci(card);
267 u32 val, old;
269 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
270 flush_writes(ohci);
271 msleep(2);
272 val = reg_read(ohci, OHCI1394_PhyControl);
273 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
274 fw_error("failed to set phy reg bits.\n");
275 return -EBUSY;
278 old = OHCI1394_PhyControl_ReadData(val);
279 old = (old & ~clear_bits) | set_bits;
280 reg_write(ohci, OHCI1394_PhyControl,
281 OHCI1394_PhyControl_Write(addr, old));
283 return 0;
286 static int ar_context_add_page(struct ar_context *ctx)
288 struct device *dev = ctx->ohci->card.device;
289 struct ar_buffer *ab;
290 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
291 dma_addr_t ab_bus;
292 =======
293 dma_addr_t uninitialized_var(ab_bus);
294 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
295 size_t offset;
297 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
298 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
299 =======
300 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
301 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
302 if (ab == NULL)
303 return -ENOMEM;
305 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
306 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
307 if (dma_mapping_error(ab_bus)) {
308 free_page((unsigned long) ab);
309 return -ENOMEM;
312 =======
313 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
314 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
315 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
316 DESCRIPTOR_STATUS |
317 DESCRIPTOR_BRANCH_ALWAYS);
318 offset = offsetof(struct ar_buffer, data);
319 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
320 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
321 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
322 ab->descriptor.branch_address = 0;
324 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
325 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
327 =======
328 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
329 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
330 ctx->last_buffer->next = ab;
331 ctx->last_buffer = ab;
333 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
334 flush_writes(ctx->ohci);
336 return 0;
339 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
340 =======
341 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
342 #define cond_le32_to_cpu(v) \
343 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
344 #else
345 #define cond_le32_to_cpu(v) le32_to_cpu(v)
346 #endif
348 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
349 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
351 struct fw_ohci *ohci = ctx->ohci;
352 struct fw_packet p;
353 u32 status, length, tcode;
355 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
356 p.header[0] = le32_to_cpu(buffer[0]);
357 p.header[1] = le32_to_cpu(buffer[1]);
358 p.header[2] = le32_to_cpu(buffer[2]);
359 =======
360 p.header[0] = cond_le32_to_cpu(buffer[0]);
361 p.header[1] = cond_le32_to_cpu(buffer[1]);
362 p.header[2] = cond_le32_to_cpu(buffer[2]);
363 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
365 tcode = (p.header[0] >> 4) & 0x0f;
366 switch (tcode) {
367 case TCODE_WRITE_QUADLET_REQUEST:
368 case TCODE_READ_QUADLET_RESPONSE:
369 p.header[3] = (__force __u32) buffer[3];
370 p.header_length = 16;
371 p.payload_length = 0;
372 break;
374 case TCODE_READ_BLOCK_REQUEST :
375 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
376 p.header[3] = le32_to_cpu(buffer[3]);
377 =======
378 p.header[3] = cond_le32_to_cpu(buffer[3]);
379 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
380 p.header_length = 16;
381 p.payload_length = 0;
382 break;
384 case TCODE_WRITE_BLOCK_REQUEST:
385 case TCODE_READ_BLOCK_RESPONSE:
386 case TCODE_LOCK_REQUEST:
387 case TCODE_LOCK_RESPONSE:
388 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
389 p.header[3] = le32_to_cpu(buffer[3]);
390 =======
391 p.header[3] = cond_le32_to_cpu(buffer[3]);
392 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
393 p.header_length = 16;
394 p.payload_length = p.header[3] >> 16;
395 break;
397 case TCODE_WRITE_RESPONSE:
398 case TCODE_READ_QUADLET_REQUEST:
399 case OHCI_TCODE_PHY_PACKET:
400 p.header_length = 12;
401 p.payload_length = 0;
402 break;
405 p.payload = (void *) buffer + p.header_length;
407 /* FIXME: What to do about evt_* errors? */
408 length = (p.header_length + p.payload_length + 3) / 4;
409 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
410 status = le32_to_cpu(buffer[length]);
411 =======
412 status = cond_le32_to_cpu(buffer[length]);
413 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
415 p.ack = ((status >> 16) & 0x1f) - 16;
416 p.speed = (status >> 21) & 0x7;
417 p.timestamp = status & 0xffff;
418 p.generation = ohci->request_generation;
421 * The OHCI bus reset handler synthesizes a phy packet with
422 * the new generation number when a bus reset happens (see
423 * section 8.4.2.3). This helps us determine when a request
424 * was received and make sure we send the response in the same
425 * generation. We only need this for requests; for responses
426 * we use the unique tlabel for finding the matching
427 * request.
430 if (p.ack + 16 == 0x09)
431 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
432 ohci->request_generation = (buffer[2] >> 16) & 0xff;
433 =======
434 ohci->request_generation = (p.header[2] >> 16) & 0xff;
435 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
436 else if (ctx == &ohci->ar_request_ctx)
437 fw_core_handle_request(&ohci->card, &p);
438 else
439 fw_core_handle_response(&ohci->card, &p);
441 return buffer + length + 1;
444 static void ar_context_tasklet(unsigned long data)
446 struct ar_context *ctx = (struct ar_context *)data;
447 struct fw_ohci *ohci = ctx->ohci;
448 struct ar_buffer *ab;
449 struct descriptor *d;
450 void *buffer, *end;
452 ab = ctx->current_buffer;
453 d = &ab->descriptor;
455 if (d->res_count == 0) {
456 size_t size, rest, offset;
457 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
458 =======
459 dma_addr_t buffer_bus;
460 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
463 * This descriptor is finished and we may have a
464 * packet split across this and the next buffer. We
465 * reuse the page for reassembling the split packet.
468 offset = offsetof(struct ar_buffer, data);
469 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
470 dma_unmap_single(ohci->card.device,
471 le32_to_cpu(ab->descriptor.data_address) - offset,
472 PAGE_SIZE, DMA_BIDIRECTIONAL);
473 =======
474 buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
475 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
477 buffer = ab;
478 ab = ab->next;
479 d = &ab->descriptor;
480 size = buffer + PAGE_SIZE - ctx->pointer;
481 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
482 memmove(buffer, ctx->pointer, size);
483 memcpy(buffer + size, ab->data, rest);
484 ctx->current_buffer = ab;
485 ctx->pointer = (void *) ab->data + rest;
486 end = buffer + size + rest;
488 while (buffer < end)
489 buffer = handle_ar_packet(ctx, buffer);
491 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
492 free_page((unsigned long)buffer);
493 =======
494 dma_free_coherent(ohci->card.device, PAGE_SIZE,
495 buffer, buffer_bus);
496 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
497 ar_context_add_page(ctx);
498 } else {
499 buffer = ctx->pointer;
500 ctx->pointer = end =
501 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
503 while (buffer < end)
504 buffer = handle_ar_packet(ctx, buffer);
508 static int
509 ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
511 struct ar_buffer ab;
513 ctx->regs = regs;
514 ctx->ohci = ohci;
515 ctx->last_buffer = &ab;
516 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
518 ar_context_add_page(ctx);
519 ar_context_add_page(ctx);
520 ctx->current_buffer = ab.next;
521 ctx->pointer = ctx->current_buffer->data;
523 return 0;
526 static void ar_context_run(struct ar_context *ctx)
528 struct ar_buffer *ab = ctx->current_buffer;
529 dma_addr_t ab_bus;
530 size_t offset;
532 offset = offsetof(struct ar_buffer, data);
533 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
535 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
536 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
537 flush_writes(ctx->ohci);
540 static struct descriptor *
541 find_branch_descriptor(struct descriptor *d, int z)
543 int b, key;
545 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
546 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
548 /* figure out which descriptor the branch address goes in */
549 if (z == 2 && (b == 3 || key == 2))
550 return d;
551 else
552 return d + z - 1;
555 static void context_tasklet(unsigned long data)
557 struct context *ctx = (struct context *) data;
558 struct descriptor *d, *last;
559 u32 address;
560 int z;
561 struct descriptor_buffer *desc;
563 desc = list_entry(ctx->buffer_list.next,
564 struct descriptor_buffer, list);
565 last = ctx->last;
566 while (last->branch_address != 0) {
567 struct descriptor_buffer *old_desc = desc;
568 address = le32_to_cpu(last->branch_address);
569 z = address & 0xf;
570 address &= ~0xf;
572 /* If the branch address points to a buffer outside of the
573 * current buffer, advance to the next buffer. */
574 if (address < desc->buffer_bus ||
575 address >= desc->buffer_bus + desc->used)
576 desc = list_entry(desc->list.next,
577 struct descriptor_buffer, list);
578 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
579 last = find_branch_descriptor(d, z);
581 if (!ctx->callback(ctx, d, last))
582 break;
584 if (old_desc != desc) {
585 /* If we've advanced to the next buffer, move the
586 * previous buffer to the free list. */
587 unsigned long flags;
588 old_desc->used = 0;
589 spin_lock_irqsave(&ctx->ohci->lock, flags);
590 list_move_tail(&old_desc->list, &ctx->buffer_list);
591 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
593 ctx->last = last;
598 * Allocate a new buffer and add it to the list of free buffers for this
599 * context. Must be called with ohci->lock held.
601 static int
602 context_add_buffer(struct context *ctx)
604 struct descriptor_buffer *desc;
605 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
606 dma_addr_t bus_addr;
607 =======
608 dma_addr_t uninitialized_var(bus_addr);
609 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
610 int offset;
613 * 16MB of descriptors should be far more than enough for any DMA
614 * program. This will catch run-away userspace or DoS attacks.
616 if (ctx->total_allocation >= 16*1024*1024)
617 return -ENOMEM;
619 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
620 &bus_addr, GFP_ATOMIC);
621 if (!desc)
622 return -ENOMEM;
624 offset = (void *)&desc->buffer - (void *)desc;
625 desc->buffer_size = PAGE_SIZE - offset;
626 desc->buffer_bus = bus_addr + offset;
627 desc->used = 0;
629 list_add_tail(&desc->list, &ctx->buffer_list);
630 ctx->total_allocation += PAGE_SIZE;
632 return 0;
635 static int
636 context_init(struct context *ctx, struct fw_ohci *ohci,
637 u32 regs, descriptor_callback_t callback)
639 ctx->ohci = ohci;
640 ctx->regs = regs;
641 ctx->total_allocation = 0;
643 INIT_LIST_HEAD(&ctx->buffer_list);
644 if (context_add_buffer(ctx) < 0)
645 return -ENOMEM;
647 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
648 struct descriptor_buffer, list);
650 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
651 ctx->callback = callback;
654 * We put a dummy descriptor in the buffer that has a NULL
655 * branch address and looks like it's been sent. That way we
656 * have a descriptor to append DMA programs to.
658 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
659 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
660 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
661 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
662 ctx->last = ctx->buffer_tail->buffer;
663 ctx->prev = ctx->buffer_tail->buffer;
665 return 0;
668 static void
669 context_release(struct context *ctx)
671 struct fw_card *card = &ctx->ohci->card;
672 struct descriptor_buffer *desc, *tmp;
674 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
675 dma_free_coherent(card->device, PAGE_SIZE, desc,
676 desc->buffer_bus -
677 ((void *)&desc->buffer - (void *)desc));
680 /* Must be called with ohci->lock held */
681 static struct descriptor *
682 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
684 struct descriptor *d = NULL;
685 struct descriptor_buffer *desc = ctx->buffer_tail;
687 if (z * sizeof(*d) > desc->buffer_size)
688 return NULL;
690 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
691 /* No room for the descriptor in this buffer, so advance to the
692 * next one. */
694 if (desc->list.next == &ctx->buffer_list) {
695 /* If there is no free buffer next in the list,
696 * allocate one. */
697 if (context_add_buffer(ctx) < 0)
698 return NULL;
700 desc = list_entry(desc->list.next,
701 struct descriptor_buffer, list);
702 ctx->buffer_tail = desc;
705 d = desc->buffer + desc->used / sizeof(*d);
706 memset(d, 0, z * sizeof(*d));
707 *d_bus = desc->buffer_bus + desc->used;
709 return d;
712 static void context_run(struct context *ctx, u32 extra)
714 struct fw_ohci *ohci = ctx->ohci;
716 reg_write(ohci, COMMAND_PTR(ctx->regs),
717 le32_to_cpu(ctx->last->branch_address));
718 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
719 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
720 flush_writes(ohci);
723 static void context_append(struct context *ctx,
724 struct descriptor *d, int z, int extra)
726 dma_addr_t d_bus;
727 struct descriptor_buffer *desc = ctx->buffer_tail;
729 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
731 desc->used += (z + extra) * sizeof(*d);
732 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
733 ctx->prev = find_branch_descriptor(d, z);
735 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
736 flush_writes(ctx->ohci);
739 static void context_stop(struct context *ctx)
741 u32 reg;
742 int i;
744 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
745 flush_writes(ctx->ohci);
747 for (i = 0; i < 10; i++) {
748 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
749 if ((reg & CONTEXT_ACTIVE) == 0)
750 break;
752 fw_notify("context_stop: still active (0x%08x)\n", reg);
753 mdelay(1);
757 struct driver_data {
758 struct fw_packet *packet;
762 * This function apppends a packet to the DMA queue for transmission.
763 * Must always be called with the ochi->lock held to ensure proper
764 * generation handling and locking around packet queue manipulation.
766 static int
767 at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
769 struct fw_ohci *ohci = ctx->ohci;
770 dma_addr_t d_bus, uninitialized_var(payload_bus);
771 struct driver_data *driver_data;
772 struct descriptor *d, *last;
773 __le32 *header;
774 int z, tcode;
775 u32 reg;
777 d = context_get_descriptors(ctx, 4, &d_bus);
778 if (d == NULL) {
779 packet->ack = RCODE_SEND_ERROR;
780 return -1;
783 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
784 d[0].res_count = cpu_to_le16(packet->timestamp);
787 * The DMA format for asyncronous link packets is different
788 * from the IEEE1394 layout, so shift the fields around
789 * accordingly. If header_length is 8, it's a PHY packet, to
790 * which we need to prepend an extra quadlet.
793 header = (__le32 *) &d[1];
794 if (packet->header_length > 8) {
795 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
796 (packet->speed << 16));
797 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
798 (packet->header[0] & 0xffff0000));
799 header[2] = cpu_to_le32(packet->header[2]);
801 tcode = (packet->header[0] >> 4) & 0x0f;
802 if (TCODE_IS_BLOCK_PACKET(tcode))
803 header[3] = cpu_to_le32(packet->header[3]);
804 else
805 header[3] = (__force __le32) packet->header[3];
807 d[0].req_count = cpu_to_le16(packet->header_length);
808 } else {
809 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
810 (packet->speed << 16));
811 header[1] = cpu_to_le32(packet->header[0]);
812 header[2] = cpu_to_le32(packet->header[1]);
813 d[0].req_count = cpu_to_le16(12);
816 driver_data = (struct driver_data *) &d[3];
817 driver_data->packet = packet;
818 packet->driver_data = driver_data;
820 if (packet->payload_length > 0) {
821 payload_bus =
822 dma_map_single(ohci->card.device, packet->payload,
823 packet->payload_length, DMA_TO_DEVICE);
824 if (dma_mapping_error(payload_bus)) {
825 packet->ack = RCODE_SEND_ERROR;
826 return -1;
829 d[2].req_count = cpu_to_le16(packet->payload_length);
830 d[2].data_address = cpu_to_le32(payload_bus);
831 last = &d[2];
832 z = 3;
833 } else {
834 last = &d[0];
835 z = 2;
838 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
839 DESCRIPTOR_IRQ_ALWAYS |
840 DESCRIPTOR_BRANCH_ALWAYS);
842 /* FIXME: Document how the locking works. */
843 if (ohci->generation != packet->generation) {
844 if (packet->payload_length > 0)
845 dma_unmap_single(ohci->card.device, payload_bus,
846 packet->payload_length, DMA_TO_DEVICE);
847 packet->ack = RCODE_GENERATION;
848 return -1;
851 context_append(ctx, d, z, 4 - z);
853 /* If the context isn't already running, start it up. */
854 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
855 if ((reg & CONTEXT_RUN) == 0)
856 context_run(ctx, 0);
858 return 0;
861 static int handle_at_packet(struct context *context,
862 struct descriptor *d,
863 struct descriptor *last)
865 struct driver_data *driver_data;
866 struct fw_packet *packet;
867 struct fw_ohci *ohci = context->ohci;
868 dma_addr_t payload_bus;
869 int evt;
871 if (last->transfer_status == 0)
872 /* This descriptor isn't done yet, stop iteration. */
873 return 0;
875 driver_data = (struct driver_data *) &d[3];
876 packet = driver_data->packet;
877 if (packet == NULL)
878 /* This packet was cancelled, just continue. */
879 return 1;
881 payload_bus = le32_to_cpu(last->data_address);
882 if (payload_bus != 0)
883 dma_unmap_single(ohci->card.device, payload_bus,
884 packet->payload_length, DMA_TO_DEVICE);
886 evt = le16_to_cpu(last->transfer_status) & 0x1f;
887 packet->timestamp = le16_to_cpu(last->res_count);
889 switch (evt) {
890 case OHCI1394_evt_timeout:
891 /* Async response transmit timed out. */
892 packet->ack = RCODE_CANCELLED;
893 break;
895 case OHCI1394_evt_flushed:
897 * The packet was flushed should give same error as
898 * when we try to use a stale generation count.
900 packet->ack = RCODE_GENERATION;
901 break;
903 case OHCI1394_evt_missing_ack:
905 * Using a valid (current) generation count, but the
906 * node is not on the bus or not sending acks.
908 packet->ack = RCODE_NO_ACK;
909 break;
911 case ACK_COMPLETE + 0x10:
912 case ACK_PENDING + 0x10:
913 case ACK_BUSY_X + 0x10:
914 case ACK_BUSY_A + 0x10:
915 case ACK_BUSY_B + 0x10:
916 case ACK_DATA_ERROR + 0x10:
917 case ACK_TYPE_ERROR + 0x10:
918 packet->ack = evt - 0x10;
919 break;
921 default:
922 packet->ack = RCODE_SEND_ERROR;
923 break;
926 packet->callback(packet, &ohci->card, packet->ack);
928 return 1;
931 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
932 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
933 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
934 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
935 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
937 static void
938 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
940 struct fw_packet response;
941 int tcode, length, i;
943 tcode = HEADER_GET_TCODE(packet->header[0]);
944 if (TCODE_IS_BLOCK_PACKET(tcode))
945 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
946 else
947 length = 4;
949 i = csr - CSR_CONFIG_ROM;
950 if (i + length > CONFIG_ROM_SIZE) {
951 fw_fill_response(&response, packet->header,
952 RCODE_ADDRESS_ERROR, NULL, 0);
953 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
954 fw_fill_response(&response, packet->header,
955 RCODE_TYPE_ERROR, NULL, 0);
956 } else {
957 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
958 (void *) ohci->config_rom + i, length);
961 fw_core_handle_response(&ohci->card, &response);
964 static void
965 handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
967 struct fw_packet response;
968 int tcode, length, ext_tcode, sel;
969 __be32 *payload, lock_old;
970 u32 lock_arg, lock_data;
972 tcode = HEADER_GET_TCODE(packet->header[0]);
973 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
974 payload = packet->payload;
975 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
977 if (tcode == TCODE_LOCK_REQUEST &&
978 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
979 lock_arg = be32_to_cpu(payload[0]);
980 lock_data = be32_to_cpu(payload[1]);
981 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
982 lock_arg = 0;
983 lock_data = 0;
984 } else {
985 fw_fill_response(&response, packet->header,
986 RCODE_TYPE_ERROR, NULL, 0);
987 goto out;
990 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
991 reg_write(ohci, OHCI1394_CSRData, lock_data);
992 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
993 reg_write(ohci, OHCI1394_CSRControl, sel);
995 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
996 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
997 else
998 fw_notify("swap not done yet\n");
1000 fw_fill_response(&response, packet->header,
1001 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1002 out:
1003 fw_core_handle_response(&ohci->card, &response);
1006 static void
1007 handle_local_request(struct context *ctx, struct fw_packet *packet)
1009 u64 offset;
1010 u32 csr;
1012 if (ctx == &ctx->ohci->at_request_ctx) {
1013 packet->ack = ACK_PENDING;
1014 packet->callback(packet, &ctx->ohci->card, packet->ack);
1017 offset =
1018 ((unsigned long long)
1019 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1020 packet->header[2];
1021 csr = offset - CSR_REGISTER_BASE;
1023 /* Handle config rom reads. */
1024 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1025 handle_local_rom(ctx->ohci, packet, csr);
1026 else switch (csr) {
1027 case CSR_BUS_MANAGER_ID:
1028 case CSR_BANDWIDTH_AVAILABLE:
1029 case CSR_CHANNELS_AVAILABLE_HI:
1030 case CSR_CHANNELS_AVAILABLE_LO:
1031 handle_local_lock(ctx->ohci, packet, csr);
1032 break;
1033 default:
1034 if (ctx == &ctx->ohci->at_request_ctx)
1035 fw_core_handle_request(&ctx->ohci->card, packet);
1036 else
1037 fw_core_handle_response(&ctx->ohci->card, packet);
1038 break;
1041 if (ctx == &ctx->ohci->at_response_ctx) {
1042 packet->ack = ACK_COMPLETE;
1043 packet->callback(packet, &ctx->ohci->card, packet->ack);
1047 static void
1048 at_context_transmit(struct context *ctx, struct fw_packet *packet)
1050 unsigned long flags;
1051 int retval;
1053 spin_lock_irqsave(&ctx->ohci->lock, flags);
1055 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1056 ctx->ohci->generation == packet->generation) {
1057 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1058 handle_local_request(ctx, packet);
1059 return;
1062 retval = at_context_queue_packet(ctx, packet);
1063 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1065 if (retval < 0)
1066 packet->callback(packet, &ctx->ohci->card, packet->ack);
1070 static void bus_reset_tasklet(unsigned long data)
1072 struct fw_ohci *ohci = (struct fw_ohci *)data;
1073 int self_id_count, i, j, reg;
1074 int generation, new_generation;
1075 unsigned long flags;
1076 void *free_rom = NULL;
1077 dma_addr_t free_rom_bus = 0;
1079 reg = reg_read(ohci, OHCI1394_NodeID);
1080 if (!(reg & OHCI1394_NodeID_idValid)) {
1081 fw_notify("node ID not valid, new bus reset in progress\n");
1082 return;
1084 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1085 fw_notify("malconfigured bus\n");
1086 return;
1088 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1089 OHCI1394_NodeID_nodeNumber);
1092 * The count in the SelfIDCount register is the number of
1093 * bytes in the self ID receive buffer. Since we also receive
1094 * the inverted quadlets and a header quadlet, we shift one
1095 * bit extra to get the actual number of self IDs.
1098 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
1099 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
1100 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1101 =======
1102 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1103 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
1104 rmb();
1106 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1107 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
1108 fw_error("inconsistent self IDs\n");
1109 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
1110 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
1111 =======
1112 ohci->self_id_buffer[j] =
1113 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1114 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
1116 rmb();
1119 * Check the consistency of the self IDs we just read. The
1120 * problem we face is that a new bus reset can start while we
1121 * read out the self IDs from the DMA buffer. If this happens,
1122 * the DMA buffer will be overwritten with new self IDs and we
1123 * will read out inconsistent data. The OHCI specification
1124 * (section 11.2) recommends a technique similar to
1125 * linux/seqlock.h, where we remember the generation of the
1126 * self IDs in the buffer before reading them out and compare
1127 * it to the current generation after reading them out. If
1128 * the two generations match we know we have a consistent set
1129 * of self IDs.
1132 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1133 if (new_generation != generation) {
1134 fw_notify("recursive bus reset detected, "
1135 "discarding self ids\n");
1136 return;
1139 /* FIXME: Document how the locking works. */
1140 spin_lock_irqsave(&ohci->lock, flags);
1142 ohci->generation = generation;
1143 context_stop(&ohci->at_request_ctx);
1144 context_stop(&ohci->at_response_ctx);
1145 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1148 * This next bit is unrelated to the AT context stuff but we
1149 * have to do it under the spinlock also. If a new config rom
1150 * was set up before this reset, the old one is now no longer
1151 * in use and we can free it. Update the config rom pointers
1152 * to point to the current config rom and clear the
1153 * next_config_rom pointer so a new udpate can take place.
1156 if (ohci->next_config_rom != NULL) {
1157 if (ohci->next_config_rom != ohci->config_rom) {
1158 free_rom = ohci->config_rom;
1159 free_rom_bus = ohci->config_rom_bus;
1161 ohci->config_rom = ohci->next_config_rom;
1162 ohci->config_rom_bus = ohci->next_config_rom_bus;
1163 ohci->next_config_rom = NULL;
1166 * Restore config_rom image and manually update
1167 * config_rom registers. Writing the header quadlet
1168 * will indicate that the config rom is ready, so we
1169 * do that last.
1171 reg_write(ohci, OHCI1394_BusOptions,
1172 be32_to_cpu(ohci->config_rom[2]));
1173 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
1174 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
1177 spin_unlock_irqrestore(&ohci->lock, flags);
1179 if (free_rom)
1180 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1181 free_rom, free_rom_bus);
1183 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1184 self_id_count, ohci->self_id_buffer);
1187 static irqreturn_t irq_handler(int irq, void *data)
1189 struct fw_ohci *ohci = data;
1190 u32 event, iso_event, cycle_time;
1191 int i;
1193 event = reg_read(ohci, OHCI1394_IntEventClear);
1195 if (!event || !~event)
1196 return IRQ_NONE;
1198 reg_write(ohci, OHCI1394_IntEventClear, event);
1200 if (event & OHCI1394_selfIDComplete)
1201 tasklet_schedule(&ohci->bus_reset_tasklet);
1203 if (event & OHCI1394_RQPkt)
1204 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1206 if (event & OHCI1394_RSPkt)
1207 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1209 if (event & OHCI1394_reqTxComplete)
1210 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1212 if (event & OHCI1394_respTxComplete)
1213 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1215 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1216 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1218 while (iso_event) {
1219 i = ffs(iso_event) - 1;
1220 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1221 iso_event &= ~(1 << i);
1224 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1225 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1227 while (iso_event) {
1228 i = ffs(iso_event) - 1;
1229 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1230 iso_event &= ~(1 << i);
1233 if (unlikely(event & OHCI1394_postedWriteErr))
1234 fw_error("PCI posted write error\n");
1236 if (unlikely(event & OHCI1394_cycleTooLong)) {
1237 if (printk_ratelimit())
1238 fw_notify("isochronous cycle too long\n");
1239 reg_write(ohci, OHCI1394_LinkControlSet,
1240 OHCI1394_LinkControl_cycleMaster);
1243 if (event & OHCI1394_cycle64Seconds) {
1244 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1245 if ((cycle_time & 0x80000000) == 0)
1246 ohci->bus_seconds++;
1249 return IRQ_HANDLED;
1252 static int software_reset(struct fw_ohci *ohci)
1254 int i;
1256 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1258 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1259 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1260 OHCI1394_HCControl_softReset) == 0)
1261 return 0;
1262 msleep(1);
1265 return -EBUSY;
1268 static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1270 struct fw_ohci *ohci = fw_ohci(card);
1271 struct pci_dev *dev = to_pci_dev(card->device);
1273 if (software_reset(ohci)) {
1274 fw_error("Failed to reset ohci card.\n");
1275 return -EBUSY;
1279 * Now enable LPS, which we need in order to start accessing
1280 * most of the registers. In fact, on some cards (ALI M5251),
1281 * accessing registers in the SClk domain without LPS enabled
1282 * will lock up the machine. Wait 50msec to make sure we have
1283 * full link enabled.
1285 reg_write(ohci, OHCI1394_HCControlSet,
1286 OHCI1394_HCControl_LPS |
1287 OHCI1394_HCControl_postedWriteEnable);
1288 flush_writes(ohci);
1289 msleep(50);
1291 reg_write(ohci, OHCI1394_HCControlClear,
1292 OHCI1394_HCControl_noByteSwapData);
1294 reg_write(ohci, OHCI1394_LinkControlSet,
1295 OHCI1394_LinkControl_rcvSelfID |
1296 OHCI1394_LinkControl_cycleTimerEnable |
1297 OHCI1394_LinkControl_cycleMaster);
1299 reg_write(ohci, OHCI1394_ATRetries,
1300 OHCI1394_MAX_AT_REQ_RETRIES |
1301 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1302 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1304 ar_context_run(&ohci->ar_request_ctx);
1305 ar_context_run(&ohci->ar_response_ctx);
1307 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1308 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1309 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1310 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1311 reg_write(ohci, OHCI1394_IntMaskSet,
1312 OHCI1394_selfIDComplete |
1313 OHCI1394_RQPkt | OHCI1394_RSPkt |
1314 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1315 OHCI1394_isochRx | OHCI1394_isochTx |
1316 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1317 OHCI1394_cycle64Seconds | OHCI1394_masterIntEnable);
1319 /* Activate link_on bit and contender bit in our self ID packets.*/
1320 if (ohci_update_phy_reg(card, 4, 0,
1321 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1322 return -EIO;
1325 * When the link is not yet enabled, the atomic config rom
1326 * update mechanism described below in ohci_set_config_rom()
1327 * is not active. We have to update ConfigRomHeader and
1328 * BusOptions manually, and the write to ConfigROMmap takes
1329 * effect immediately. We tie this to the enabling of the
1330 * link, so we have a valid config rom before enabling - the
1331 * OHCI requires that ConfigROMhdr and BusOptions have valid
1332 * values before enabling.
1334 * However, when the ConfigROMmap is written, some controllers
1335 * always read back quadlets 0 and 2 from the config rom to
1336 * the ConfigRomHeader and BusOptions registers on bus reset.
1337 * They shouldn't do that in this initial case where the link
1338 * isn't enabled. This means we have to use the same
1339 * workaround here, setting the bus header to 0 and then write
1340 * the right values in the bus reset tasklet.
1343 if (config_rom) {
1344 ohci->next_config_rom =
1345 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1346 &ohci->next_config_rom_bus,
1347 GFP_KERNEL);
1348 if (ohci->next_config_rom == NULL)
1349 return -ENOMEM;
1351 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1352 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1353 } else {
1355 * In the suspend case, config_rom is NULL, which
1356 * means that we just reuse the old config rom.
1358 ohci->next_config_rom = ohci->config_rom;
1359 ohci->next_config_rom_bus = ohci->config_rom_bus;
1362 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
1363 ohci->next_config_rom[0] = 0;
1364 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1365 reg_write(ohci, OHCI1394_BusOptions,
1366 be32_to_cpu(ohci->next_config_rom[2]));
1367 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1369 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1371 if (request_irq(dev->irq, irq_handler,
1372 IRQF_SHARED, ohci_driver_name, ohci)) {
1373 fw_error("Failed to allocate shared interrupt %d.\n",
1374 dev->irq);
1375 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1376 ohci->config_rom, ohci->config_rom_bus);
1377 return -EIO;
1380 reg_write(ohci, OHCI1394_HCControlSet,
1381 OHCI1394_HCControl_linkEnable |
1382 OHCI1394_HCControl_BIBimageValid);
1383 flush_writes(ohci);
1386 * We are ready to go, initiate bus reset to finish the
1387 * initialization.
1390 fw_core_initiate_bus_reset(&ohci->card, 1);
1392 return 0;
1395 static int
1396 ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1398 struct fw_ohci *ohci;
1399 unsigned long flags;
1400 int retval = -EBUSY;
1401 __be32 *next_config_rom;
1402 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
1403 dma_addr_t next_config_rom_bus;
1404 =======
1405 dma_addr_t uninitialized_var(next_config_rom_bus);
1406 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
1408 ohci = fw_ohci(card);
1411 * When the OHCI controller is enabled, the config rom update
1412 * mechanism is a bit tricky, but easy enough to use. See
1413 * section 5.5.6 in the OHCI specification.
1415 * The OHCI controller caches the new config rom address in a
1416 * shadow register (ConfigROMmapNext) and needs a bus reset
1417 * for the changes to take place. When the bus reset is
1418 * detected, the controller loads the new values for the
1419 * ConfigRomHeader and BusOptions registers from the specified
1420 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1421 * shadow register. All automatically and atomically.
1423 * Now, there's a twist to this story. The automatic load of
1424 * ConfigRomHeader and BusOptions doesn't honor the
1425 * noByteSwapData bit, so with a be32 config rom, the
1426 * controller will load be32 values in to these registers
1427 * during the atomic update, even on litte endian
1428 * architectures. The workaround we use is to put a 0 in the
1429 * header quadlet; 0 is endian agnostic and means that the
1430 * config rom isn't ready yet. In the bus reset tasklet we
1431 * then set up the real values for the two registers.
1433 * We use ohci->lock to avoid racing with the code that sets
1434 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1437 next_config_rom =
1438 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1439 &next_config_rom_bus, GFP_KERNEL);
1440 if (next_config_rom == NULL)
1441 return -ENOMEM;
1443 spin_lock_irqsave(&ohci->lock, flags);
1445 if (ohci->next_config_rom == NULL) {
1446 ohci->next_config_rom = next_config_rom;
1447 ohci->next_config_rom_bus = next_config_rom_bus;
1449 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1450 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1451 length * 4);
1453 ohci->next_header = config_rom[0];
1454 ohci->next_config_rom[0] = 0;
1456 reg_write(ohci, OHCI1394_ConfigROMmap,
1457 ohci->next_config_rom_bus);
1458 retval = 0;
1461 spin_unlock_irqrestore(&ohci->lock, flags);
1464 * Now initiate a bus reset to have the changes take
1465 * effect. We clean up the old config rom memory and DMA
1466 * mappings in the bus reset tasklet, since the OHCI
1467 * controller could need to access it before the bus reset
1468 * takes effect.
1470 if (retval == 0)
1471 fw_core_initiate_bus_reset(&ohci->card, 1);
1472 else
1473 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1474 next_config_rom, next_config_rom_bus);
1476 return retval;
1479 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1481 struct fw_ohci *ohci = fw_ohci(card);
1483 at_context_transmit(&ohci->at_request_ctx, packet);
1486 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1488 struct fw_ohci *ohci = fw_ohci(card);
1490 at_context_transmit(&ohci->at_response_ctx, packet);
1493 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1495 struct fw_ohci *ohci = fw_ohci(card);
1496 struct context *ctx = &ohci->at_request_ctx;
1497 struct driver_data *driver_data = packet->driver_data;
1498 int retval = -ENOENT;
1500 tasklet_disable(&ctx->tasklet);
1502 if (packet->ack != 0)
1503 goto out;
1505 driver_data->packet = NULL;
1506 packet->ack = RCODE_CANCELLED;
1507 packet->callback(packet, &ohci->card, packet->ack);
1508 retval = 0;
1510 out:
1511 tasklet_enable(&ctx->tasklet);
1513 return retval;
1516 static int
1517 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1519 struct fw_ohci *ohci = fw_ohci(card);
1520 unsigned long flags;
1521 int n, retval = 0;
1524 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1525 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1528 spin_lock_irqsave(&ohci->lock, flags);
1530 if (ohci->generation != generation) {
1531 retval = -ESTALE;
1532 goto out;
1536 * Note, if the node ID contains a non-local bus ID, physical DMA is
1537 * enabled for _all_ nodes on remote buses.
1540 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1541 if (n < 32)
1542 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1543 else
1544 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1546 flush_writes(ohci);
1547 out:
1548 spin_unlock_irqrestore(&ohci->lock, flags);
1549 return retval;
1552 static u64
1553 ohci_get_bus_time(struct fw_card *card)
1555 struct fw_ohci *ohci = fw_ohci(card);
1556 u32 cycle_time;
1557 u64 bus_time;
1559 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1560 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1562 return bus_time;
1565 static int handle_ir_dualbuffer_packet(struct context *context,
1566 struct descriptor *d,
1567 struct descriptor *last)
1569 struct iso_context *ctx =
1570 container_of(context, struct iso_context, context);
1571 struct db_descriptor *db = (struct db_descriptor *) d;
1572 __le32 *ir_header;
1573 size_t header_length;
1574 void *p, *end;
1575 int i;
1577 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
1578 if (db->first_res_count > 0 && db->second_res_count > 0) {
1579 =======
1580 if (db->first_res_count != 0 && db->second_res_count != 0) {
1581 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
1582 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1583 /* This descriptor isn't done yet, stop iteration. */
1584 return 0;
1586 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1589 header_length = le16_to_cpu(db->first_req_count) -
1590 le16_to_cpu(db->first_res_count);
1592 i = ctx->header_length;
1593 p = db + 1;
1594 end = p + header_length;
1595 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1597 * The iso header is byteswapped to little endian by
1598 * the controller, but the remaining header quadlets
1599 * are big endian. We want to present all the headers
1600 * as big endian, so we have to swap the first
1601 * quadlet.
1603 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1604 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1605 i += ctx->base.header_size;
1606 ctx->excess_bytes +=
1607 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
1608 (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff;
1609 =======
1610 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1611 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
1612 p += ctx->base.header_size + 4;
1614 ctx->header_length = i;
1616 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1617 le16_to_cpu(db->second_res_count);
1619 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1620 ir_header = (__le32 *) (db + 1);
1621 ctx->base.callback(&ctx->base,
1622 le32_to_cpu(ir_header[0]) & 0xffff,
1623 ctx->header_length, ctx->header,
1624 ctx->base.callback_data);
1625 ctx->header_length = 0;
1628 return 1;
1631 static int handle_ir_packet_per_buffer(struct context *context,
1632 struct descriptor *d,
1633 struct descriptor *last)
1635 struct iso_context *ctx =
1636 container_of(context, struct iso_context, context);
1637 struct descriptor *pd;
1638 __le32 *ir_header;
1639 void *p;
1640 int i;
1642 for (pd = d; pd <= last; pd++) {
1643 if (pd->transfer_status)
1644 break;
1646 if (pd > last)
1647 /* Descriptor(s) not done yet, stop iteration */
1648 return 0;
1650 i = ctx->header_length;
1651 p = last + 1;
1653 if (ctx->base.header_size > 0 &&
1654 i + ctx->base.header_size <= PAGE_SIZE) {
1656 * The iso header is byteswapped to little endian by
1657 * the controller, but the remaining header quadlets
1658 * are big endian. We want to present all the headers
1659 * as big endian, so we have to swap the first quadlet.
1661 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1662 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1663 ctx->header_length += ctx->base.header_size;
1666 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1667 ir_header = (__le32 *) p;
1668 ctx->base.callback(&ctx->base,
1669 le32_to_cpu(ir_header[0]) & 0xffff,
1670 ctx->header_length, ctx->header,
1671 ctx->base.callback_data);
1672 ctx->header_length = 0;
1675 return 1;
1678 static int handle_it_packet(struct context *context,
1679 struct descriptor *d,
1680 struct descriptor *last)
1682 struct iso_context *ctx =
1683 container_of(context, struct iso_context, context);
1685 if (last->transfer_status == 0)
1686 /* This descriptor isn't done yet, stop iteration. */
1687 return 0;
1689 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1690 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1691 0, NULL, ctx->base.callback_data);
1693 return 1;
1696 static struct fw_iso_context *
1697 ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1699 struct fw_ohci *ohci = fw_ohci(card);
1700 struct iso_context *ctx, *list;
1701 descriptor_callback_t callback;
1702 u32 *mask, regs;
1703 unsigned long flags;
1704 int index, retval = -ENOMEM;
1706 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1707 mask = &ohci->it_context_mask;
1708 list = ohci->it_context_list;
1709 callback = handle_it_packet;
1710 } else {
1711 mask = &ohci->ir_context_mask;
1712 list = ohci->ir_context_list;
1713 if (ohci->version >= OHCI_VERSION_1_1)
1714 callback = handle_ir_dualbuffer_packet;
1715 else
1716 callback = handle_ir_packet_per_buffer;
1719 spin_lock_irqsave(&ohci->lock, flags);
1720 index = ffs(*mask) - 1;
1721 if (index >= 0)
1722 *mask &= ~(1 << index);
1723 spin_unlock_irqrestore(&ohci->lock, flags);
1725 if (index < 0)
1726 return ERR_PTR(-EBUSY);
1728 if (type == FW_ISO_CONTEXT_TRANSMIT)
1729 regs = OHCI1394_IsoXmitContextBase(index);
1730 else
1731 regs = OHCI1394_IsoRcvContextBase(index);
1733 ctx = &list[index];
1734 memset(ctx, 0, sizeof(*ctx));
1735 ctx->header_length = 0;
1736 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1737 if (ctx->header == NULL)
1738 goto out;
1740 retval = context_init(&ctx->context, ohci, regs, callback);
1741 if (retval < 0)
1742 goto out_with_header;
1744 return &ctx->base;
1746 out_with_header:
1747 free_page((unsigned long)ctx->header);
1748 out:
1749 spin_lock_irqsave(&ohci->lock, flags);
1750 *mask |= 1 << index;
1751 spin_unlock_irqrestore(&ohci->lock, flags);
1753 return ERR_PTR(retval);
1756 static int ohci_start_iso(struct fw_iso_context *base,
1757 s32 cycle, u32 sync, u32 tags)
1759 struct iso_context *ctx = container_of(base, struct iso_context, base);
1760 struct fw_ohci *ohci = ctx->context.ohci;
1761 u32 control, match;
1762 int index;
1764 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1765 index = ctx - ohci->it_context_list;
1766 match = 0;
1767 if (cycle >= 0)
1768 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1769 (cycle & 0x7fff) << 16;
1771 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1772 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1773 context_run(&ctx->context, match);
1774 } else {
1775 index = ctx - ohci->ir_context_list;
1776 control = IR_CONTEXT_ISOCH_HEADER;
1777 if (ohci->version >= OHCI_VERSION_1_1)
1778 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
1779 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1780 if (cycle >= 0) {
1781 match |= (cycle & 0x07fff) << 12;
1782 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1785 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1786 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1787 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1788 context_run(&ctx->context, control);
1791 return 0;
1794 static int ohci_stop_iso(struct fw_iso_context *base)
1796 struct fw_ohci *ohci = fw_ohci(base->card);
1797 struct iso_context *ctx = container_of(base, struct iso_context, base);
1798 int index;
1800 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1801 index = ctx - ohci->it_context_list;
1802 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1803 } else {
1804 index = ctx - ohci->ir_context_list;
1805 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1807 flush_writes(ohci);
1808 context_stop(&ctx->context);
1810 return 0;
1813 static void ohci_free_iso_context(struct fw_iso_context *base)
1815 struct fw_ohci *ohci = fw_ohci(base->card);
1816 struct iso_context *ctx = container_of(base, struct iso_context, base);
1817 unsigned long flags;
1818 int index;
1820 ohci_stop_iso(base);
1821 context_release(&ctx->context);
1822 free_page((unsigned long)ctx->header);
1824 spin_lock_irqsave(&ohci->lock, flags);
1826 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1827 index = ctx - ohci->it_context_list;
1828 ohci->it_context_mask |= 1 << index;
1829 } else {
1830 index = ctx - ohci->ir_context_list;
1831 ohci->ir_context_mask |= 1 << index;
1834 spin_unlock_irqrestore(&ohci->lock, flags);
1837 static int
1838 ohci_queue_iso_transmit(struct fw_iso_context *base,
1839 struct fw_iso_packet *packet,
1840 struct fw_iso_buffer *buffer,
1841 unsigned long payload)
1843 struct iso_context *ctx = container_of(base, struct iso_context, base);
1844 struct descriptor *d, *last, *pd;
1845 struct fw_iso_packet *p;
1846 __le32 *header;
1847 dma_addr_t d_bus, page_bus;
1848 u32 z, header_z, payload_z, irq;
1849 u32 payload_index, payload_end_index, next_page_index;
1850 int page, end_page, i, length, offset;
1853 * FIXME: Cycle lost behavior should be configurable: lose
1854 * packet, retransmit or terminate..
1857 p = packet;
1858 payload_index = payload;
1860 if (p->skip)
1861 z = 1;
1862 else
1863 z = 2;
1864 if (p->header_length > 0)
1865 z++;
1867 /* Determine the first page the payload isn't contained in. */
1868 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1869 if (p->payload_length > 0)
1870 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1871 else
1872 payload_z = 0;
1874 z += payload_z;
1876 /* Get header size in number of descriptors. */
1877 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1879 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1880 if (d == NULL)
1881 return -ENOMEM;
1883 if (!p->skip) {
1884 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1885 d[0].req_count = cpu_to_le16(8);
1887 header = (__le32 *) &d[1];
1888 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1889 IT_HEADER_TAG(p->tag) |
1890 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1891 IT_HEADER_CHANNEL(ctx->base.channel) |
1892 IT_HEADER_SPEED(ctx->base.speed));
1893 header[1] =
1894 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1895 p->payload_length));
1898 if (p->header_length > 0) {
1899 d[2].req_count = cpu_to_le16(p->header_length);
1900 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
1901 memcpy(&d[z], p->header, p->header_length);
1904 pd = d + z - payload_z;
1905 payload_end_index = payload_index + p->payload_length;
1906 for (i = 0; i < payload_z; i++) {
1907 page = payload_index >> PAGE_SHIFT;
1908 offset = payload_index & ~PAGE_MASK;
1909 next_page_index = (page + 1) << PAGE_SHIFT;
1910 length =
1911 min(next_page_index, payload_end_index) - payload_index;
1912 pd[i].req_count = cpu_to_le16(length);
1914 page_bus = page_private(buffer->pages[page]);
1915 pd[i].data_address = cpu_to_le32(page_bus + offset);
1917 payload_index += length;
1920 if (p->interrupt)
1921 irq = DESCRIPTOR_IRQ_ALWAYS;
1922 else
1923 irq = DESCRIPTOR_NO_IRQ;
1925 last = z == 2 ? d : d + z - 1;
1926 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1927 DESCRIPTOR_STATUS |
1928 DESCRIPTOR_BRANCH_ALWAYS |
1929 irq);
1931 context_append(&ctx->context, d, z, header_z);
1933 return 0;
1936 static int
1937 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1938 struct fw_iso_packet *packet,
1939 struct fw_iso_buffer *buffer,
1940 unsigned long payload)
1942 struct iso_context *ctx = container_of(base, struct iso_context, base);
1943 struct db_descriptor *db = NULL;
1944 struct descriptor *d;
1945 struct fw_iso_packet *p;
1946 dma_addr_t d_bus, page_bus;
1947 u32 z, header_z, length, rest;
1948 int page, offset, packet_count, header_size;
1951 * FIXME: Cycle lost behavior should be configurable: lose
1952 * packet, retransmit or terminate..
1955 p = packet;
1956 z = 2;
1959 * The OHCI controller puts the status word in the header
1960 * buffer too, so we need 4 extra bytes per packet.
1962 packet_count = p->header_length / ctx->base.header_size;
1963 header_size = packet_count * (ctx->base.header_size + 4);
1965 /* Get header size in number of descriptors. */
1966 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1967 page = payload >> PAGE_SHIFT;
1968 offset = payload & ~PAGE_MASK;
1969 rest = p->payload_length;
1971 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1972 while (rest > 0) {
1973 d = context_get_descriptors(&ctx->context,
1974 z + header_z, &d_bus);
1975 if (d == NULL)
1976 return -ENOMEM;
1978 db = (struct db_descriptor *) d;
1979 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1980 DESCRIPTOR_BRANCH_ALWAYS);
1981 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1982 if (p->skip && rest == p->payload_length) {
1983 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
1984 db->first_req_count = db->first_size;
1985 } else {
1986 db->first_req_count = cpu_to_le16(header_size);
1988 db->first_res_count = db->first_req_count;
1989 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1991 if (p->skip && rest == p->payload_length)
1992 length = 4;
1993 else if (offset + rest < PAGE_SIZE)
1994 length = rest;
1995 else
1996 length = PAGE_SIZE - offset;
1998 db->second_req_count = cpu_to_le16(length);
1999 db->second_res_count = db->second_req_count;
2000 page_bus = page_private(buffer->pages[page]);
2001 db->second_buffer = cpu_to_le32(page_bus + offset);
2003 if (p->interrupt && length == rest)
2004 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2006 context_append(&ctx->context, d, z, header_z);
2007 offset = (offset + length) & ~PAGE_MASK;
2008 rest -= length;
2009 if (offset == 0)
2010 page++;
2013 return 0;
2016 static int
2017 ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2018 struct fw_iso_packet *packet,
2019 struct fw_iso_buffer *buffer,
2020 unsigned long payload)
2022 struct iso_context *ctx = container_of(base, struct iso_context, base);
2023 struct descriptor *d = NULL, *pd = NULL;
2024 struct fw_iso_packet *p = packet;
2025 dma_addr_t d_bus, page_bus;
2026 u32 z, header_z, rest;
2027 int i, j, length;
2028 int page, offset, packet_count, header_size, payload_per_buffer;
2031 * The OHCI controller puts the status word in the
2032 * buffer too, so we need 4 extra bytes per packet.
2034 packet_count = p->header_length / ctx->base.header_size;
2035 header_size = ctx->base.header_size + 4;
2037 /* Get header size in number of descriptors. */
2038 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2039 page = payload >> PAGE_SHIFT;
2040 offset = payload & ~PAGE_MASK;
2041 payload_per_buffer = p->payload_length / packet_count;
2043 for (i = 0; i < packet_count; i++) {
2044 /* d points to the header descriptor */
2045 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2046 d = context_get_descriptors(&ctx->context,
2047 z + header_z, &d_bus);
2048 if (d == NULL)
2049 return -ENOMEM;
2051 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2052 DESCRIPTOR_INPUT_MORE);
2053 if (p->skip && i == 0)
2054 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2055 d->req_count = cpu_to_le16(header_size);
2056 d->res_count = d->req_count;
2057 d->transfer_status = 0;
2058 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2060 rest = payload_per_buffer;
2061 for (j = 1; j < z; j++) {
2062 pd = d + j;
2063 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2064 DESCRIPTOR_INPUT_MORE);
2066 if (offset + rest < PAGE_SIZE)
2067 length = rest;
2068 else
2069 length = PAGE_SIZE - offset;
2070 pd->req_count = cpu_to_le16(length);
2071 pd->res_count = pd->req_count;
2072 pd->transfer_status = 0;
2074 page_bus = page_private(buffer->pages[page]);
2075 pd->data_address = cpu_to_le32(page_bus + offset);
2077 offset = (offset + length) & ~PAGE_MASK;
2078 rest -= length;
2079 if (offset == 0)
2080 page++;
2082 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2083 DESCRIPTOR_INPUT_LAST |
2084 DESCRIPTOR_BRANCH_ALWAYS);
2085 if (p->interrupt && i == packet_count - 1)
2086 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2088 context_append(&ctx->context, d, z, header_z);
2091 return 0;
2094 static int
2095 ohci_queue_iso(struct fw_iso_context *base,
2096 struct fw_iso_packet *packet,
2097 struct fw_iso_buffer *buffer,
2098 unsigned long payload)
2100 struct iso_context *ctx = container_of(base, struct iso_context, base);
2101 unsigned long flags;
2102 int retval;
2104 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2105 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2106 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
2107 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
2108 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
2109 buffer, payload);
2110 else
2111 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2112 buffer,
2113 payload);
2114 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2116 return retval;
2119 static const struct fw_card_driver ohci_driver = {
2120 .name = ohci_driver_name,
2121 .enable = ohci_enable,
2122 .update_phy_reg = ohci_update_phy_reg,
2123 .set_config_rom = ohci_set_config_rom,
2124 .send_request = ohci_send_request,
2125 .send_response = ohci_send_response,
2126 .cancel_packet = ohci_cancel_packet,
2127 .enable_phys_dma = ohci_enable_phys_dma,
2128 .get_bus_time = ohci_get_bus_time,
2130 .allocate_iso_context = ohci_allocate_iso_context,
2131 .free_iso_context = ohci_free_iso_context,
2132 .queue_iso = ohci_queue_iso,
2133 .start_iso = ohci_start_iso,
2134 .stop_iso = ohci_stop_iso,
2137 static int __devinit
2138 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2140 struct fw_ohci *ohci;
2141 u32 bus_options, max_receive, link_speed;
2142 u64 guid;
2143 int err;
2144 size_t size;
2146 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
2147 =======
2148 #ifdef CONFIG_PPC_PMAC
2149 /* Necessary on some machines if fw-ohci was loaded/ unloaded before */
2150 if (machine_is(powermac)) {
2151 struct device_node *ofn = pci_device_to_OF_node(dev);
2153 if (ofn) {
2154 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2155 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2158 #endif /* CONFIG_PPC_PMAC */
2160 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
2161 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2162 if (ohci == NULL) {
2163 fw_error("Could not malloc fw_ohci data.\n");
2164 return -ENOMEM;
2167 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2169 err = pci_enable_device(dev);
2170 if (err) {
2171 fw_error("Failed to enable OHCI hardware.\n");
2172 goto fail_put_card;
2175 pci_set_master(dev);
2176 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2177 pci_set_drvdata(dev, ohci);
2179 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
2180 =======
2181 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2182 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2183 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2184 #endif
2185 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
2186 spin_lock_init(&ohci->lock);
2188 tasklet_init(&ohci->bus_reset_tasklet,
2189 bus_reset_tasklet, (unsigned long)ohci);
2191 err = pci_request_region(dev, 0, ohci_driver_name);
2192 if (err) {
2193 fw_error("MMIO resource unavailable\n");
2194 goto fail_disable;
2197 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2198 if (ohci->registers == NULL) {
2199 fw_error("Failed to remap registers\n");
2200 err = -ENXIO;
2201 goto fail_iomem;
2204 ar_context_init(&ohci->ar_request_ctx, ohci,
2205 OHCI1394_AsReqRcvContextControlSet);
2207 ar_context_init(&ohci->ar_response_ctx, ohci,
2208 OHCI1394_AsRspRcvContextControlSet);
2210 context_init(&ohci->at_request_ctx, ohci,
2211 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2213 context_init(&ohci->at_response_ctx, ohci,
2214 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2216 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2217 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2218 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2219 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2220 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2222 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2223 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2224 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2225 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2226 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2228 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2229 fw_error("Out of memory for it/ir contexts.\n");
2230 err = -ENOMEM;
2231 goto fail_registers;
2234 /* self-id dma buffer allocation */
2235 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2236 SELF_ID_BUF_SIZE,
2237 &ohci->self_id_bus,
2238 GFP_KERNEL);
2239 if (ohci->self_id_cpu == NULL) {
2240 fw_error("Out of memory for self ID buffer.\n");
2241 err = -ENOMEM;
2242 goto fail_registers;
2245 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2246 max_receive = (bus_options >> 12) & 0xf;
2247 link_speed = bus_options & 0x7;
2248 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2249 reg_read(ohci, OHCI1394_GUIDLo);
2251 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2252 if (err < 0)
2253 goto fail_self_id;
2255 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2256 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2257 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
2258 return 0;
2260 fail_self_id:
2261 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2262 ohci->self_id_cpu, ohci->self_id_bus);
2263 fail_registers:
2264 kfree(ohci->it_context_list);
2265 kfree(ohci->ir_context_list);
2266 pci_iounmap(dev, ohci->registers);
2267 fail_iomem:
2268 pci_release_region(dev, 0);
2269 fail_disable:
2270 pci_disable_device(dev);
2271 fail_put_card:
2272 fw_card_put(&ohci->card);
2274 return err;
2277 static void pci_remove(struct pci_dev *dev)
2279 struct fw_ohci *ohci;
2281 ohci = pci_get_drvdata(dev);
2282 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2283 flush_writes(ohci);
2284 fw_core_remove_card(&ohci->card);
2287 * FIXME: Fail all pending packets here, now that the upper
2288 * layers can't queue any more.
2291 software_reset(ohci);
2292 free_irq(dev->irq, ohci);
2293 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2294 ohci->self_id_cpu, ohci->self_id_bus);
2295 kfree(ohci->it_context_list);
2296 kfree(ohci->ir_context_list);
2297 pci_iounmap(dev, ohci->registers);
2298 pci_release_region(dev, 0);
2299 pci_disable_device(dev);
2300 fw_card_put(&ohci->card);
2302 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
2303 =======
2304 #ifdef CONFIG_PPC_PMAC
2305 /* On UniNorth, power down the cable and turn off the chip clock
2306 * to save power on laptops */
2307 if (machine_is(powermac)) {
2308 struct device_node *ofn = pci_device_to_OF_node(dev);
2310 if (ofn) {
2311 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2312 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2315 #endif /* CONFIG_PPC_PMAC */
2317 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
2318 fw_notify("Removed fw-ohci device.\n");
2321 #ifdef CONFIG_PM
2322 static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
2324 struct fw_ohci *ohci = pci_get_drvdata(pdev);
2325 int err;
2327 software_reset(ohci);
2328 free_irq(pdev->irq, ohci);
2329 err = pci_save_state(pdev);
2330 if (err) {
2331 fw_error("pci_save_state failed\n");
2332 return err;
2334 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
2335 if (err)
2336 fw_error("pci_set_power_state failed with %d\n", err);
2338 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
2339 =======
2340 /* PowerMac suspend code comes last */
2341 #ifdef CONFIG_PPC_PMAC
2342 if (machine_is(powermac)) {
2343 struct device_node *ofn = pci_device_to_OF_node(pdev);
2345 if (ofn)
2346 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2348 #endif /* CONFIG_PPC_PMAC */
2350 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
2351 return 0;
2354 static int pci_resume(struct pci_dev *pdev)
2356 struct fw_ohci *ohci = pci_get_drvdata(pdev);
2357 int err;
2359 <<<<<<< HEAD:drivers/firewire/fw-ohci.c
2360 =======
2361 /* PowerMac resume code comes first */
2362 #ifdef CONFIG_PPC_PMAC
2363 if (machine_is(powermac)) {
2364 struct device_node *ofn = pci_device_to_OF_node(pdev);
2366 if (ofn)
2367 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2369 #endif /* CONFIG_PPC_PMAC */
2371 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/firewire/fw-ohci.c
2372 pci_set_power_state(pdev, PCI_D0);
2373 pci_restore_state(pdev);
2374 err = pci_enable_device(pdev);
2375 if (err) {
2376 fw_error("pci_enable_device failed\n");
2377 return err;
2380 return ohci_enable(&ohci->card, NULL, 0);
2382 #endif
2384 static struct pci_device_id pci_table[] = {
2385 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2389 MODULE_DEVICE_TABLE(pci, pci_table);
2391 static struct pci_driver fw_ohci_pci_driver = {
2392 .name = ohci_driver_name,
2393 .id_table = pci_table,
2394 .probe = pci_probe,
2395 .remove = pci_remove,
2396 #ifdef CONFIG_PM
2397 .resume = pci_resume,
2398 .suspend = pci_suspend,
2399 #endif
2402 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2403 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2404 MODULE_LICENSE("GPL");
2406 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2407 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2408 MODULE_ALIAS("ohci1394");
2409 #endif
2411 static int __init fw_ohci_init(void)
2413 return pci_register_driver(&fw_ohci_pci_driver);
2416 static void __exit fw_ohci_cleanup(void)
2418 pci_unregister_driver(&fw_ohci_pci_driver);
2421 module_init(fw_ohci_init);
2422 module_exit(fw_ohci_cleanup);