[PATCH] w1: Make w1 connector notifications depend on connector.
[linux-2.6/verdex.git] / drivers / ieee1394 / raw1394.c
blob20ce539580f122a56fcce4764a81be01d0315c96
1 /*
2 * IEEE 1394 for Linux
4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
7 * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 * 2002 Christian Toegel <christian.toegel@gmx.at>
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
14 * Contributions:
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * configuration ROM manipulation
18 * address range mapping
19 * adaptation for new (transparent) loopback mechanism
20 * sending of arbitrary async packets
21 * Christian Toegel <christian.toegel@gmx.at>
22 * address range mapping
23 * lock64 request
24 * transmit physical packet
25 * busreset notification control (switch on/off)
26 * busreset with selection of type (short/long)
27 * request_reply
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/string.h>
33 #include <linux/slab.h>
34 #include <linux/fs.h>
35 #include <linux/poll.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/smp_lock.h>
39 #include <linux/interrupt.h>
40 #include <linux/vmalloc.h>
41 #include <linux/cdev.h>
42 #include <asm/uaccess.h>
43 #include <asm/atomic.h>
44 #include <linux/compat.h>
46 #include "csr1212.h"
47 #include "ieee1394.h"
48 #include "ieee1394_types.h"
49 #include "ieee1394_core.h"
50 #include "nodemgr.h"
51 #include "hosts.h"
52 #include "highlevel.h"
53 #include "iso.h"
54 #include "ieee1394_transactions.h"
55 #include "raw1394.h"
56 #include "raw1394-private.h"
58 #define int2ptr(x) ((void __user *)(unsigned long)x)
59 #define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
61 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
62 #define RAW1394_DEBUG
63 #endif
65 #ifdef RAW1394_DEBUG
66 #define DBGMSG(fmt, args...) \
67 printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
68 #else
69 #define DBGMSG(fmt, args...)
70 #endif
72 static LIST_HEAD(host_info_list);
73 static int host_count;
74 static DEFINE_SPINLOCK(host_info_lock);
75 static atomic_t internal_generation = ATOMIC_INIT(0);
77 static atomic_t iso_buffer_size;
78 static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
80 static struct hpsb_highlevel raw1394_highlevel;
82 static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
83 u64 addr, size_t length, u16 flags);
84 static int arm_write(struct hpsb_host *host, int nodeid, int destid,
85 quadlet_t * data, u64 addr, size_t length, u16 flags);
86 static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
87 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
88 u16 flags);
89 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
90 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
91 u16 flags);
92 static struct hpsb_address_ops arm_ops = {
93 .read = arm_read,
94 .write = arm_write,
95 .lock = arm_lock,
96 .lock64 = arm_lock64,
99 static void queue_complete_cb(struct pending_request *req);
101 static struct pending_request *__alloc_pending_request(gfp_t flags)
103 struct pending_request *req;
105 req = kzalloc(sizeof(*req), flags);
106 if (req)
107 INIT_LIST_HEAD(&req->list);
109 return req;
112 static inline struct pending_request *alloc_pending_request(void)
114 return __alloc_pending_request(SLAB_KERNEL);
117 static void free_pending_request(struct pending_request *req)
119 if (req->ibs) {
120 if (atomic_dec_and_test(&req->ibs->refcount)) {
121 atomic_sub(req->ibs->data_size, &iso_buffer_size);
122 kfree(req->ibs);
124 } else if (req->free_data) {
125 kfree(req->data);
127 hpsb_free_packet(req->packet);
128 kfree(req);
131 /* fi->reqlists_lock must be taken */
132 static void __queue_complete_req(struct pending_request *req)
134 struct file_info *fi = req->file_info;
135 list_del(&req->list);
136 list_add_tail(&req->list, &fi->req_complete);
138 up(&fi->complete_sem);
139 wake_up_interruptible(&fi->poll_wait_complete);
142 static void queue_complete_req(struct pending_request *req)
144 unsigned long flags;
145 struct file_info *fi = req->file_info;
147 spin_lock_irqsave(&fi->reqlists_lock, flags);
148 __queue_complete_req(req);
149 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
152 static void queue_complete_cb(struct pending_request *req)
154 struct hpsb_packet *packet = req->packet;
155 int rcode = (packet->header[1] >> 12) & 0xf;
157 switch (packet->ack_code) {
158 case ACKX_NONE:
159 case ACKX_SEND_ERROR:
160 req->req.error = RAW1394_ERROR_SEND_ERROR;
161 break;
162 case ACKX_ABORTED:
163 req->req.error = RAW1394_ERROR_ABORTED;
164 break;
165 case ACKX_TIMEOUT:
166 req->req.error = RAW1394_ERROR_TIMEOUT;
167 break;
168 default:
169 req->req.error = (packet->ack_code << 16) | rcode;
170 break;
173 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
174 req->req.length = 0;
177 if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
178 (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
179 (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
180 (req->req.type == RAW1394_REQ_LOCK) ||
181 (req->req.type == RAW1394_REQ_LOCK64))
182 hpsb_free_tlabel(packet);
184 queue_complete_req(req);
187 static void add_host(struct hpsb_host *host)
189 struct host_info *hi;
190 unsigned long flags;
192 hi = kmalloc(sizeof(*hi), GFP_KERNEL);
194 if (hi) {
195 INIT_LIST_HEAD(&hi->list);
196 hi->host = host;
197 INIT_LIST_HEAD(&hi->file_info_list);
199 spin_lock_irqsave(&host_info_lock, flags);
200 list_add_tail(&hi->list, &host_info_list);
201 host_count++;
202 spin_unlock_irqrestore(&host_info_lock, flags);
205 atomic_inc(&internal_generation);
208 static struct host_info *find_host_info(struct hpsb_host *host)
210 struct host_info *hi;
212 list_for_each_entry(hi, &host_info_list, list)
213 if (hi->host == host)
214 return hi;
216 return NULL;
219 static void remove_host(struct hpsb_host *host)
221 struct host_info *hi;
222 unsigned long flags;
224 spin_lock_irqsave(&host_info_lock, flags);
225 hi = find_host_info(host);
227 if (hi != NULL) {
228 list_del(&hi->list);
229 host_count--;
231 FIXME: address ranges should be removed
232 and fileinfo states should be initialized
233 (including setting generation to
234 internal-generation ...)
237 spin_unlock_irqrestore(&host_info_lock, flags);
239 if (hi == NULL) {
240 printk(KERN_ERR "raw1394: attempt to remove unknown host "
241 "0x%p\n", host);
242 return;
245 kfree(hi);
247 atomic_inc(&internal_generation);
250 static void host_reset(struct hpsb_host *host)
252 unsigned long flags;
253 struct host_info *hi;
254 struct file_info *fi;
255 struct pending_request *req;
257 spin_lock_irqsave(&host_info_lock, flags);
258 hi = find_host_info(host);
260 if (hi != NULL) {
261 list_for_each_entry(fi, &hi->file_info_list, list) {
262 if (fi->notification == RAW1394_NOTIFY_ON) {
263 req = __alloc_pending_request(SLAB_ATOMIC);
265 if (req != NULL) {
266 req->file_info = fi;
267 req->req.type = RAW1394_REQ_BUS_RESET;
268 req->req.generation =
269 get_hpsb_generation(host);
270 req->req.misc = (host->node_id << 16)
271 | host->node_count;
272 if (fi->protocol_version > 3) {
273 req->req.misc |=
274 (NODEID_TO_NODE
275 (host->irm_id)
276 << 8);
279 queue_complete_req(req);
284 spin_unlock_irqrestore(&host_info_lock, flags);
287 static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
288 size_t length)
290 unsigned long flags;
291 struct host_info *hi;
292 struct file_info *fi;
293 struct pending_request *req, *req_next;
294 struct iso_block_store *ibs = NULL;
295 LIST_HEAD(reqs);
297 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
298 HPSB_INFO("dropped iso packet");
299 return;
302 spin_lock_irqsave(&host_info_lock, flags);
303 hi = find_host_info(host);
305 if (hi != NULL) {
306 list_for_each_entry(fi, &hi->file_info_list, list) {
307 if (!(fi->listen_channels & (1ULL << channel)))
308 continue;
310 req = __alloc_pending_request(SLAB_ATOMIC);
311 if (!req)
312 break;
314 if (!ibs) {
315 ibs = kmalloc(sizeof(*ibs) + length,
316 SLAB_ATOMIC);
317 if (!ibs) {
318 kfree(req);
319 break;
322 atomic_add(length, &iso_buffer_size);
323 atomic_set(&ibs->refcount, 0);
324 ibs->data_size = length;
325 memcpy(ibs->data, data, length);
328 atomic_inc(&ibs->refcount);
330 req->file_info = fi;
331 req->ibs = ibs;
332 req->data = ibs->data;
333 req->req.type = RAW1394_REQ_ISO_RECEIVE;
334 req->req.generation = get_hpsb_generation(host);
335 req->req.misc = 0;
336 req->req.recvb = ptr2int(fi->iso_buffer);
337 req->req.length = min(length, fi->iso_buffer_length);
339 list_add_tail(&req->list, &reqs);
342 spin_unlock_irqrestore(&host_info_lock, flags);
344 list_for_each_entry_safe(req, req_next, &reqs, list)
345 queue_complete_req(req);
348 static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
349 int cts, u8 * data, size_t length)
351 unsigned long flags;
352 struct host_info *hi;
353 struct file_info *fi;
354 struct pending_request *req, *req_next;
355 struct iso_block_store *ibs = NULL;
356 LIST_HEAD(reqs);
358 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
359 HPSB_INFO("dropped fcp request");
360 return;
363 spin_lock_irqsave(&host_info_lock, flags);
364 hi = find_host_info(host);
366 if (hi != NULL) {
367 list_for_each_entry(fi, &hi->file_info_list, list) {
368 if (!fi->fcp_buffer)
369 continue;
371 req = __alloc_pending_request(SLAB_ATOMIC);
372 if (!req)
373 break;
375 if (!ibs) {
376 ibs = kmalloc(sizeof(*ibs) + length,
377 SLAB_ATOMIC);
378 if (!ibs) {
379 kfree(req);
380 break;
383 atomic_add(length, &iso_buffer_size);
384 atomic_set(&ibs->refcount, 0);
385 ibs->data_size = length;
386 memcpy(ibs->data, data, length);
389 atomic_inc(&ibs->refcount);
391 req->file_info = fi;
392 req->ibs = ibs;
393 req->data = ibs->data;
394 req->req.type = RAW1394_REQ_FCP_REQUEST;
395 req->req.generation = get_hpsb_generation(host);
396 req->req.misc = nodeid | (direction << 16);
397 req->req.recvb = ptr2int(fi->fcp_buffer);
398 req->req.length = length;
400 list_add_tail(&req->list, &reqs);
403 spin_unlock_irqrestore(&host_info_lock, flags);
405 list_for_each_entry_safe(req, req_next, &reqs, list)
406 queue_complete_req(req);
409 #ifdef CONFIG_COMPAT
410 struct compat_raw1394_req {
411 __u32 type;
412 __s32 error;
413 __u32 misc;
415 __u32 generation;
416 __u32 length;
418 __u64 address;
420 __u64 tag;
422 __u64 sendb;
423 __u64 recvb;
424 } __attribute__((packed));
426 static const char __user *raw1394_compat_write(const char __user *buf)
428 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
429 struct raw1394_request __user *r;
430 r = compat_alloc_user_space(sizeof(struct raw1394_request));
432 #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
434 if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) ||
435 C(address) ||
436 C(tag) ||
437 C(sendb) ||
438 C(recvb))
439 return ERR_PTR(-EFAULT);
440 return (const char __user *)r;
442 #undef C
444 #define P(x) __put_user(r->x, &cr->x)
446 static int
447 raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
449 struct compat_raw1394_req __user *cr = (typeof(cr)) r;
450 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
451 P(type) ||
452 P(error) ||
453 P(misc) ||
454 P(generation) ||
455 P(length) ||
456 P(address) ||
457 P(tag) ||
458 P(sendb) ||
459 P(recvb))
460 return -EFAULT;
461 return sizeof(struct compat_raw1394_req);
463 #undef P
465 #endif
468 static ssize_t raw1394_read(struct file *file, char __user * buffer,
469 size_t count, loff_t * offset_is_ignored)
471 unsigned long flags;
472 struct file_info *fi = (struct file_info *)file->private_data;
473 struct list_head *lh;
474 struct pending_request *req;
475 ssize_t ret;
477 #ifdef CONFIG_COMPAT
478 if (count == sizeof(struct compat_raw1394_req)) {
479 /* ok */
480 } else
481 #endif
482 if (count != sizeof(struct raw1394_request)) {
483 return -EINVAL;
486 if (!access_ok(VERIFY_WRITE, buffer, count)) {
487 return -EFAULT;
490 if (file->f_flags & O_NONBLOCK) {
491 if (down_trylock(&fi->complete_sem)) {
492 return -EAGAIN;
494 } else {
495 if (down_interruptible(&fi->complete_sem)) {
496 return -ERESTARTSYS;
500 spin_lock_irqsave(&fi->reqlists_lock, flags);
501 lh = fi->req_complete.next;
502 list_del(lh);
503 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
505 req = list_entry(lh, struct pending_request, list);
507 if (req->req.length) {
508 if (copy_to_user(int2ptr(req->req.recvb), req->data,
509 req->req.length)) {
510 req->req.error = RAW1394_ERROR_MEMFAULT;
514 #ifdef CONFIG_COMPAT
515 if (count == sizeof(struct compat_raw1394_req) &&
516 sizeof(struct compat_raw1394_req) !=
517 sizeof(struct raw1394_request)) {
518 ret = raw1394_compat_read(buffer, &req->req);
519 } else
520 #endif
522 if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
523 ret = -EFAULT;
524 goto out;
526 ret = (ssize_t) sizeof(struct raw1394_request);
528 out:
529 free_pending_request(req);
530 return ret;
533 static int state_opened(struct file_info *fi, struct pending_request *req)
535 if (req->req.type == RAW1394_REQ_INITIALIZE) {
536 switch (req->req.misc) {
537 case RAW1394_KERNELAPI_VERSION:
538 case 3:
539 fi->state = initialized;
540 fi->protocol_version = req->req.misc;
541 req->req.error = RAW1394_ERROR_NONE;
542 req->req.generation = atomic_read(&internal_generation);
543 break;
545 default:
546 req->req.error = RAW1394_ERROR_COMPAT;
547 req->req.misc = RAW1394_KERNELAPI_VERSION;
549 } else {
550 req->req.error = RAW1394_ERROR_STATE_ORDER;
553 req->req.length = 0;
554 queue_complete_req(req);
555 return sizeof(struct raw1394_request);
558 static int state_initialized(struct file_info *fi, struct pending_request *req)
560 unsigned long flags;
561 struct host_info *hi;
562 struct raw1394_khost_list *khl;
564 if (req->req.generation != atomic_read(&internal_generation)) {
565 req->req.error = RAW1394_ERROR_GENERATION;
566 req->req.generation = atomic_read(&internal_generation);
567 req->req.length = 0;
568 queue_complete_req(req);
569 return sizeof(struct raw1394_request);
572 switch (req->req.type) {
573 case RAW1394_REQ_LIST_CARDS:
574 spin_lock_irqsave(&host_info_lock, flags);
575 khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
577 if (khl) {
578 req->req.misc = host_count;
579 req->data = (quadlet_t *) khl;
581 list_for_each_entry(hi, &host_info_list, list) {
582 khl->nodes = hi->host->node_count;
583 strcpy(khl->name, hi->host->driver->name);
584 khl++;
587 spin_unlock_irqrestore(&host_info_lock, flags);
589 if (khl) {
590 req->req.error = RAW1394_ERROR_NONE;
591 req->req.length = min(req->req.length,
592 (u32) (sizeof
593 (struct raw1394_khost_list)
594 * req->req.misc));
595 req->free_data = 1;
596 } else {
597 return -ENOMEM;
599 break;
601 case RAW1394_REQ_SET_CARD:
602 spin_lock_irqsave(&host_info_lock, flags);
603 if (req->req.misc < host_count) {
604 list_for_each_entry(hi, &host_info_list, list) {
605 if (!req->req.misc--)
606 break;
608 get_device(&hi->host->device); // XXX Need to handle failure case
609 list_add_tail(&fi->list, &hi->file_info_list);
610 fi->host = hi->host;
611 fi->state = connected;
613 req->req.error = RAW1394_ERROR_NONE;
614 req->req.generation = get_hpsb_generation(fi->host);
615 req->req.misc = (fi->host->node_id << 16)
616 | fi->host->node_count;
617 if (fi->protocol_version > 3) {
618 req->req.misc |=
619 NODEID_TO_NODE(fi->host->irm_id) << 8;
621 } else {
622 req->req.error = RAW1394_ERROR_INVALID_ARG;
624 spin_unlock_irqrestore(&host_info_lock, flags);
626 req->req.length = 0;
627 break;
629 default:
630 req->req.error = RAW1394_ERROR_STATE_ORDER;
631 req->req.length = 0;
632 break;
635 queue_complete_req(req);
636 return sizeof(struct raw1394_request);
639 static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
641 int channel = req->req.misc;
643 if ((channel > 63) || (channel < -64)) {
644 req->req.error = RAW1394_ERROR_INVALID_ARG;
645 } else if (channel >= 0) {
646 /* allocate channel req.misc */
647 if (fi->listen_channels & (1ULL << channel)) {
648 req->req.error = RAW1394_ERROR_ALREADY;
649 } else {
650 if (hpsb_listen_channel
651 (&raw1394_highlevel, fi->host, channel)) {
652 req->req.error = RAW1394_ERROR_ALREADY;
653 } else {
654 fi->listen_channels |= 1ULL << channel;
655 fi->iso_buffer = int2ptr(req->req.recvb);
656 fi->iso_buffer_length = req->req.length;
659 } else {
660 /* deallocate channel (one's complement neg) req.misc */
661 channel = ~channel;
663 if (fi->listen_channels & (1ULL << channel)) {
664 hpsb_unlisten_channel(&raw1394_highlevel, fi->host,
665 channel);
666 fi->listen_channels &= ~(1ULL << channel);
667 } else {
668 req->req.error = RAW1394_ERROR_INVALID_ARG;
672 req->req.length = 0;
673 queue_complete_req(req);
676 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
678 if (req->req.misc) {
679 if (fi->fcp_buffer) {
680 req->req.error = RAW1394_ERROR_ALREADY;
681 } else {
682 fi->fcp_buffer = int2ptr(req->req.recvb);
684 } else {
685 if (!fi->fcp_buffer) {
686 req->req.error = RAW1394_ERROR_ALREADY;
687 } else {
688 fi->fcp_buffer = NULL;
692 req->req.length = 0;
693 queue_complete_req(req);
696 static int handle_async_request(struct file_info *fi,
697 struct pending_request *req, int node)
699 unsigned long flags;
700 struct hpsb_packet *packet = NULL;
701 u64 addr = req->req.address & 0xffffffffffffULL;
703 switch (req->req.type) {
704 case RAW1394_REQ_ASYNC_READ:
705 DBGMSG("read_request called");
706 packet =
707 hpsb_make_readpacket(fi->host, node, addr, req->req.length);
709 if (!packet)
710 return -ENOMEM;
712 if (req->req.length == 4)
713 req->data = &packet->header[3];
714 else
715 req->data = packet->data;
717 break;
719 case RAW1394_REQ_ASYNC_WRITE:
720 DBGMSG("write_request called");
722 packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
723 req->req.length);
724 if (!packet)
725 return -ENOMEM;
727 if (req->req.length == 4) {
728 if (copy_from_user
729 (&packet->header[3], int2ptr(req->req.sendb),
730 req->req.length))
731 req->req.error = RAW1394_ERROR_MEMFAULT;
732 } else {
733 if (copy_from_user
734 (packet->data, int2ptr(req->req.sendb),
735 req->req.length))
736 req->req.error = RAW1394_ERROR_MEMFAULT;
739 req->req.length = 0;
740 break;
742 case RAW1394_REQ_ASYNC_STREAM:
743 DBGMSG("stream_request called");
745 packet =
746 hpsb_make_streampacket(fi->host, NULL, req->req.length,
747 node & 0x3f /*channel */ ,
748 (req->req.misc >> 16) & 0x3,
749 req->req.misc & 0xf);
750 if (!packet)
751 return -ENOMEM;
753 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
754 req->req.length))
755 req->req.error = RAW1394_ERROR_MEMFAULT;
757 req->req.length = 0;
758 break;
760 case RAW1394_REQ_LOCK:
761 DBGMSG("lock_request called");
762 if ((req->req.misc == EXTCODE_FETCH_ADD)
763 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
764 if (req->req.length != 4) {
765 req->req.error = RAW1394_ERROR_INVALID_ARG;
766 break;
768 } else {
769 if (req->req.length != 8) {
770 req->req.error = RAW1394_ERROR_INVALID_ARG;
771 break;
775 packet = hpsb_make_lockpacket(fi->host, node, addr,
776 req->req.misc, NULL, 0);
777 if (!packet)
778 return -ENOMEM;
780 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
781 req->req.length)) {
782 req->req.error = RAW1394_ERROR_MEMFAULT;
783 break;
786 req->data = packet->data;
787 req->req.length = 4;
788 break;
790 case RAW1394_REQ_LOCK64:
791 DBGMSG("lock64_request called");
792 if ((req->req.misc == EXTCODE_FETCH_ADD)
793 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
794 if (req->req.length != 8) {
795 req->req.error = RAW1394_ERROR_INVALID_ARG;
796 break;
798 } else {
799 if (req->req.length != 16) {
800 req->req.error = RAW1394_ERROR_INVALID_ARG;
801 break;
804 packet = hpsb_make_lock64packet(fi->host, node, addr,
805 req->req.misc, NULL, 0);
806 if (!packet)
807 return -ENOMEM;
809 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
810 req->req.length)) {
811 req->req.error = RAW1394_ERROR_MEMFAULT;
812 break;
815 req->data = packet->data;
816 req->req.length = 8;
817 break;
819 default:
820 req->req.error = RAW1394_ERROR_STATE_ORDER;
823 req->packet = packet;
825 if (req->req.error) {
826 req->req.length = 0;
827 queue_complete_req(req);
828 return sizeof(struct raw1394_request);
831 hpsb_set_packet_complete_task(packet,
832 (void (*)(void *))queue_complete_cb, req);
834 spin_lock_irqsave(&fi->reqlists_lock, flags);
835 list_add_tail(&req->list, &fi->req_pending);
836 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
838 packet->generation = req->req.generation;
840 if (hpsb_send_packet(packet) < 0) {
841 req->req.error = RAW1394_ERROR_SEND_ERROR;
842 req->req.length = 0;
843 hpsb_free_tlabel(packet);
844 queue_complete_req(req);
846 return sizeof(struct raw1394_request);
849 static int handle_iso_send(struct file_info *fi, struct pending_request *req,
850 int channel)
852 unsigned long flags;
853 struct hpsb_packet *packet;
855 packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
856 (req->req.misc >> 16) & 0x3,
857 req->req.misc & 0xf);
858 if (!packet)
859 return -ENOMEM;
861 packet->speed_code = req->req.address & 0x3;
863 req->packet = packet;
865 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
866 req->req.length)) {
867 req->req.error = RAW1394_ERROR_MEMFAULT;
868 req->req.length = 0;
869 queue_complete_req(req);
870 return sizeof(struct raw1394_request);
873 req->req.length = 0;
874 hpsb_set_packet_complete_task(packet,
875 (void (*)(void *))queue_complete_req,
876 req);
878 spin_lock_irqsave(&fi->reqlists_lock, flags);
879 list_add_tail(&req->list, &fi->req_pending);
880 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
882 /* Update the generation of the packet just before sending. */
883 packet->generation = req->req.generation;
885 if (hpsb_send_packet(packet) < 0) {
886 req->req.error = RAW1394_ERROR_SEND_ERROR;
887 queue_complete_req(req);
890 return sizeof(struct raw1394_request);
893 static int handle_async_send(struct file_info *fi, struct pending_request *req)
895 unsigned long flags;
896 struct hpsb_packet *packet;
897 int header_length = req->req.misc & 0xffff;
898 int expect_response = req->req.misc >> 16;
900 if ((header_length > req->req.length) || (header_length < 12)) {
901 req->req.error = RAW1394_ERROR_INVALID_ARG;
902 req->req.length = 0;
903 queue_complete_req(req);
904 return sizeof(struct raw1394_request);
907 packet = hpsb_alloc_packet(req->req.length - header_length);
908 req->packet = packet;
909 if (!packet)
910 return -ENOMEM;
912 if (copy_from_user(packet->header, int2ptr(req->req.sendb),
913 header_length)) {
914 req->req.error = RAW1394_ERROR_MEMFAULT;
915 req->req.length = 0;
916 queue_complete_req(req);
917 return sizeof(struct raw1394_request);
920 if (copy_from_user
921 (packet->data, int2ptr(req->req.sendb) + header_length,
922 packet->data_size)) {
923 req->req.error = RAW1394_ERROR_MEMFAULT;
924 req->req.length = 0;
925 queue_complete_req(req);
926 return sizeof(struct raw1394_request);
929 packet->type = hpsb_async;
930 packet->node_id = packet->header[0] >> 16;
931 packet->tcode = (packet->header[0] >> 4) & 0xf;
932 packet->tlabel = (packet->header[0] >> 10) & 0x3f;
933 packet->host = fi->host;
934 packet->expect_response = expect_response;
935 packet->header_size = header_length;
936 packet->data_size = req->req.length - header_length;
938 req->req.length = 0;
939 hpsb_set_packet_complete_task(packet,
940 (void (*)(void *))queue_complete_cb, req);
942 spin_lock_irqsave(&fi->reqlists_lock, flags);
943 list_add_tail(&req->list, &fi->req_pending);
944 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
946 /* Update the generation of the packet just before sending. */
947 packet->generation = req->req.generation;
949 if (hpsb_send_packet(packet) < 0) {
950 req->req.error = RAW1394_ERROR_SEND_ERROR;
951 queue_complete_req(req);
954 return sizeof(struct raw1394_request);
957 static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
958 u64 addr, size_t length, u16 flags)
960 unsigned long irqflags;
961 struct pending_request *req;
962 struct host_info *hi;
963 struct file_info *fi = NULL;
964 struct list_head *entry;
965 struct arm_addr *arm_addr = NULL;
966 struct arm_request *arm_req = NULL;
967 struct arm_response *arm_resp = NULL;
968 int found = 0, size = 0, rcode = -1;
969 struct arm_request_response *arm_req_resp = NULL;
971 DBGMSG("arm_read called by node: %X"
972 "addr: %4.4x %8.8x length: %Zu", nodeid,
973 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
974 length);
975 spin_lock_irqsave(&host_info_lock, irqflags);
976 hi = find_host_info(host); /* search address-entry */
977 if (hi != NULL) {
978 list_for_each_entry(fi, &hi->file_info_list, list) {
979 entry = fi->addr_list.next;
980 while (entry != &(fi->addr_list)) {
981 arm_addr =
982 list_entry(entry, struct arm_addr,
983 addr_list);
984 if (((arm_addr->start) <= (addr))
985 && ((arm_addr->end) >= (addr + length))) {
986 found = 1;
987 break;
989 entry = entry->next;
991 if (found) {
992 break;
996 rcode = -1;
997 if (!found) {
998 printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
999 " -> rcode_address_error\n");
1000 spin_unlock_irqrestore(&host_info_lock, irqflags);
1001 return (RCODE_ADDRESS_ERROR);
1002 } else {
1003 DBGMSG("arm_read addr_entry FOUND");
1005 if (arm_addr->rec_length < length) {
1006 DBGMSG("arm_read blocklength too big -> rcode_data_error");
1007 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1009 if (rcode == -1) {
1010 if (arm_addr->access_rights & ARM_READ) {
1011 if (!(arm_addr->client_transactions & ARM_READ)) {
1012 memcpy(buffer,
1013 (arm_addr->addr_space_buffer) + (addr -
1014 (arm_addr->
1015 start)),
1016 length);
1017 DBGMSG("arm_read -> (rcode_complete)");
1018 rcode = RCODE_COMPLETE;
1020 } else {
1021 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1022 DBGMSG("arm_read -> rcode_type_error (access denied)");
1025 if (arm_addr->notification_options & ARM_READ) {
1026 DBGMSG("arm_read -> entering notification-section");
1027 req = __alloc_pending_request(SLAB_ATOMIC);
1028 if (!req) {
1029 DBGMSG("arm_read -> rcode_conflict_error");
1030 spin_unlock_irqrestore(&host_info_lock, irqflags);
1031 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1032 The request may be retried */
1034 if (rcode == RCODE_COMPLETE) {
1035 size =
1036 sizeof(struct arm_request) +
1037 sizeof(struct arm_response) +
1038 length * sizeof(byte_t) +
1039 sizeof(struct arm_request_response);
1040 } else {
1041 size =
1042 sizeof(struct arm_request) +
1043 sizeof(struct arm_response) +
1044 sizeof(struct arm_request_response);
1046 req->data = kmalloc(size, SLAB_ATOMIC);
1047 if (!(req->data)) {
1048 free_pending_request(req);
1049 DBGMSG("arm_read -> rcode_conflict_error");
1050 spin_unlock_irqrestore(&host_info_lock, irqflags);
1051 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1052 The request may be retried */
1054 req->free_data = 1;
1055 req->file_info = fi;
1056 req->req.type = RAW1394_REQ_ARM;
1057 req->req.generation = get_hpsb_generation(host);
1058 req->req.misc =
1059 (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
1060 req->req.tag = arm_addr->arm_tag;
1061 req->req.recvb = arm_addr->recvb;
1062 req->req.length = size;
1063 arm_req_resp = (struct arm_request_response *)(req->data);
1064 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1065 (sizeof
1066 (struct
1067 arm_request_response)));
1068 arm_resp =
1069 (struct arm_response *)((byte_t *) (arm_req) +
1070 (sizeof(struct arm_request)));
1071 arm_req->buffer = NULL;
1072 arm_resp->buffer = NULL;
1073 if (rcode == RCODE_COMPLETE) {
1074 byte_t *buf =
1075 (byte_t *) arm_resp + sizeof(struct arm_response);
1076 memcpy(buf,
1077 (arm_addr->addr_space_buffer) + (addr -
1078 (arm_addr->
1079 start)),
1080 length);
1081 arm_resp->buffer =
1082 int2ptr((arm_addr->recvb) +
1083 sizeof(struct arm_request_response) +
1084 sizeof(struct arm_request) +
1085 sizeof(struct arm_response));
1087 arm_resp->buffer_length =
1088 (rcode == RCODE_COMPLETE) ? length : 0;
1089 arm_resp->response_code = rcode;
1090 arm_req->buffer_length = 0;
1091 arm_req->generation = req->req.generation;
1092 arm_req->extended_transaction_code = 0;
1093 arm_req->destination_offset = addr;
1094 arm_req->source_nodeid = nodeid;
1095 arm_req->destination_nodeid = host->node_id;
1096 arm_req->tlabel = (flags >> 10) & 0x3f;
1097 arm_req->tcode = (flags >> 4) & 0x0f;
1098 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1099 sizeof(struct
1100 arm_request_response));
1101 arm_req_resp->response =
1102 int2ptr((arm_addr->recvb) +
1103 sizeof(struct arm_request_response) +
1104 sizeof(struct arm_request));
1105 queue_complete_req(req);
1107 spin_unlock_irqrestore(&host_info_lock, irqflags);
1108 return (rcode);
1111 static int arm_write(struct hpsb_host *host, int nodeid, int destid,
1112 quadlet_t * data, u64 addr, size_t length, u16 flags)
1114 unsigned long irqflags;
1115 struct pending_request *req;
1116 struct host_info *hi;
1117 struct file_info *fi = NULL;
1118 struct list_head *entry;
1119 struct arm_addr *arm_addr = NULL;
1120 struct arm_request *arm_req = NULL;
1121 struct arm_response *arm_resp = NULL;
1122 int found = 0, size = 0, rcode = -1, length_conflict = 0;
1123 struct arm_request_response *arm_req_resp = NULL;
1125 DBGMSG("arm_write called by node: %X"
1126 "addr: %4.4x %8.8x length: %Zu", nodeid,
1127 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
1128 length);
1129 spin_lock_irqsave(&host_info_lock, irqflags);
1130 hi = find_host_info(host); /* search address-entry */
1131 if (hi != NULL) {
1132 list_for_each_entry(fi, &hi->file_info_list, list) {
1133 entry = fi->addr_list.next;
1134 while (entry != &(fi->addr_list)) {
1135 arm_addr =
1136 list_entry(entry, struct arm_addr,
1137 addr_list);
1138 if (((arm_addr->start) <= (addr))
1139 && ((arm_addr->end) >= (addr + length))) {
1140 found = 1;
1141 break;
1143 entry = entry->next;
1145 if (found) {
1146 break;
1150 rcode = -1;
1151 if (!found) {
1152 printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
1153 " -> rcode_address_error\n");
1154 spin_unlock_irqrestore(&host_info_lock, irqflags);
1155 return (RCODE_ADDRESS_ERROR);
1156 } else {
1157 DBGMSG("arm_write addr_entry FOUND");
1159 if (arm_addr->rec_length < length) {
1160 DBGMSG("arm_write blocklength too big -> rcode_data_error");
1161 length_conflict = 1;
1162 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1164 if (rcode == -1) {
1165 if (arm_addr->access_rights & ARM_WRITE) {
1166 if (!(arm_addr->client_transactions & ARM_WRITE)) {
1167 memcpy((arm_addr->addr_space_buffer) +
1168 (addr - (arm_addr->start)), data,
1169 length);
1170 DBGMSG("arm_write -> (rcode_complete)");
1171 rcode = RCODE_COMPLETE;
1173 } else {
1174 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1175 DBGMSG("arm_write -> rcode_type_error (access denied)");
1178 if (arm_addr->notification_options & ARM_WRITE) {
1179 DBGMSG("arm_write -> entering notification-section");
1180 req = __alloc_pending_request(SLAB_ATOMIC);
1181 if (!req) {
1182 DBGMSG("arm_write -> rcode_conflict_error");
1183 spin_unlock_irqrestore(&host_info_lock, irqflags);
1184 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1185 The request my be retried */
1187 size =
1188 sizeof(struct arm_request) + sizeof(struct arm_response) +
1189 (length) * sizeof(byte_t) +
1190 sizeof(struct arm_request_response);
1191 req->data = kmalloc(size, SLAB_ATOMIC);
1192 if (!(req->data)) {
1193 free_pending_request(req);
1194 DBGMSG("arm_write -> rcode_conflict_error");
1195 spin_unlock_irqrestore(&host_info_lock, irqflags);
1196 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1197 The request may be retried */
1199 req->free_data = 1;
1200 req->file_info = fi;
1201 req->req.type = RAW1394_REQ_ARM;
1202 req->req.generation = get_hpsb_generation(host);
1203 req->req.misc =
1204 (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
1205 req->req.tag = arm_addr->arm_tag;
1206 req->req.recvb = arm_addr->recvb;
1207 req->req.length = size;
1208 arm_req_resp = (struct arm_request_response *)(req->data);
1209 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1210 (sizeof
1211 (struct
1212 arm_request_response)));
1213 arm_resp =
1214 (struct arm_response *)((byte_t *) (arm_req) +
1215 (sizeof(struct arm_request)));
1216 arm_resp->buffer = NULL;
1217 memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
1218 data, length);
1219 arm_req->buffer = int2ptr((arm_addr->recvb) +
1220 sizeof(struct arm_request_response) +
1221 sizeof(struct arm_request) +
1222 sizeof(struct arm_response));
1223 arm_req->buffer_length = length;
1224 arm_req->generation = req->req.generation;
1225 arm_req->extended_transaction_code = 0;
1226 arm_req->destination_offset = addr;
1227 arm_req->source_nodeid = nodeid;
1228 arm_req->destination_nodeid = destid;
1229 arm_req->tlabel = (flags >> 10) & 0x3f;
1230 arm_req->tcode = (flags >> 4) & 0x0f;
1231 arm_resp->buffer_length = 0;
1232 arm_resp->response_code = rcode;
1233 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1234 sizeof(struct
1235 arm_request_response));
1236 arm_req_resp->response =
1237 int2ptr((arm_addr->recvb) +
1238 sizeof(struct arm_request_response) +
1239 sizeof(struct arm_request));
1240 queue_complete_req(req);
1242 spin_unlock_irqrestore(&host_info_lock, irqflags);
1243 return (rcode);
1246 static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
1247 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
1248 u16 flags)
1250 unsigned long irqflags;
1251 struct pending_request *req;
1252 struct host_info *hi;
1253 struct file_info *fi = NULL;
1254 struct list_head *entry;
1255 struct arm_addr *arm_addr = NULL;
1256 struct arm_request *arm_req = NULL;
1257 struct arm_response *arm_resp = NULL;
1258 int found = 0, size = 0, rcode = -1;
1259 quadlet_t old, new;
1260 struct arm_request_response *arm_req_resp = NULL;
1262 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1263 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1264 DBGMSG("arm_lock called by node: %X "
1265 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
1266 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1267 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1268 be32_to_cpu(data));
1269 } else {
1270 DBGMSG("arm_lock called by node: %X "
1271 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
1272 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1273 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1274 be32_to_cpu(data), be32_to_cpu(arg));
1276 spin_lock_irqsave(&host_info_lock, irqflags);
1277 hi = find_host_info(host); /* search address-entry */
1278 if (hi != NULL) {
1279 list_for_each_entry(fi, &hi->file_info_list, list) {
1280 entry = fi->addr_list.next;
1281 while (entry != &(fi->addr_list)) {
1282 arm_addr =
1283 list_entry(entry, struct arm_addr,
1284 addr_list);
1285 if (((arm_addr->start) <= (addr))
1286 && ((arm_addr->end) >=
1287 (addr + sizeof(*store)))) {
1288 found = 1;
1289 break;
1291 entry = entry->next;
1293 if (found) {
1294 break;
1298 rcode = -1;
1299 if (!found) {
1300 printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
1301 " -> rcode_address_error\n");
1302 spin_unlock_irqrestore(&host_info_lock, irqflags);
1303 return (RCODE_ADDRESS_ERROR);
1304 } else {
1305 DBGMSG("arm_lock addr_entry FOUND");
1307 if (rcode == -1) {
1308 if (arm_addr->access_rights & ARM_LOCK) {
1309 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1310 memcpy(&old,
1311 (arm_addr->addr_space_buffer) + (addr -
1312 (arm_addr->
1313 start)),
1314 sizeof(old));
1315 switch (ext_tcode) {
1316 case (EXTCODE_MASK_SWAP):
1317 new = data | (old & ~arg);
1318 break;
1319 case (EXTCODE_COMPARE_SWAP):
1320 if (old == arg) {
1321 new = data;
1322 } else {
1323 new = old;
1325 break;
1326 case (EXTCODE_FETCH_ADD):
1327 new =
1328 cpu_to_be32(be32_to_cpu(data) +
1329 be32_to_cpu(old));
1330 break;
1331 case (EXTCODE_LITTLE_ADD):
1332 new =
1333 cpu_to_le32(le32_to_cpu(data) +
1334 le32_to_cpu(old));
1335 break;
1336 case (EXTCODE_BOUNDED_ADD):
1337 if (old != arg) {
1338 new =
1339 cpu_to_be32(be32_to_cpu
1340 (data) +
1341 be32_to_cpu
1342 (old));
1343 } else {
1344 new = old;
1346 break;
1347 case (EXTCODE_WRAP_ADD):
1348 if (old != arg) {
1349 new =
1350 cpu_to_be32(be32_to_cpu
1351 (data) +
1352 be32_to_cpu
1353 (old));
1354 } else {
1355 new = data;
1357 break;
1358 default:
1359 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1360 printk(KERN_ERR
1361 "raw1394: arm_lock FAILED "
1362 "ext_tcode not allowed -> rcode_type_error\n");
1363 break;
1364 } /*switch */
1365 if (rcode == -1) {
1366 DBGMSG("arm_lock -> (rcode_complete)");
1367 rcode = RCODE_COMPLETE;
1368 memcpy(store, &old, sizeof(*store));
1369 memcpy((arm_addr->addr_space_buffer) +
1370 (addr - (arm_addr->start)),
1371 &new, sizeof(*store));
1374 } else {
1375 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1376 DBGMSG("arm_lock -> rcode_type_error (access denied)");
1379 if (arm_addr->notification_options & ARM_LOCK) {
1380 byte_t *buf1, *buf2;
1381 DBGMSG("arm_lock -> entering notification-section");
1382 req = __alloc_pending_request(SLAB_ATOMIC);
1383 if (!req) {
1384 DBGMSG("arm_lock -> rcode_conflict_error");
1385 spin_unlock_irqrestore(&host_info_lock, irqflags);
1386 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1387 The request may be retried */
1389 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1390 req->data = kmalloc(size, SLAB_ATOMIC);
1391 if (!(req->data)) {
1392 free_pending_request(req);
1393 DBGMSG("arm_lock -> rcode_conflict_error");
1394 spin_unlock_irqrestore(&host_info_lock, irqflags);
1395 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1396 The request may be retried */
1398 req->free_data = 1;
1399 arm_req_resp = (struct arm_request_response *)(req->data);
1400 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1401 (sizeof
1402 (struct
1403 arm_request_response)));
1404 arm_resp =
1405 (struct arm_response *)((byte_t *) (arm_req) +
1406 (sizeof(struct arm_request)));
1407 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1408 buf2 = buf1 + 2 * sizeof(*store);
1409 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1410 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1411 arm_req->buffer_length = sizeof(*store);
1412 memcpy(buf1, &data, sizeof(*store));
1414 } else {
1415 arm_req->buffer_length = 2 * sizeof(*store);
1416 memcpy(buf1, &arg, sizeof(*store));
1417 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1419 if (rcode == RCODE_COMPLETE) {
1420 arm_resp->buffer_length = sizeof(*store);
1421 memcpy(buf2, &old, sizeof(*store));
1422 } else {
1423 arm_resp->buffer_length = 0;
1425 req->file_info = fi;
1426 req->req.type = RAW1394_REQ_ARM;
1427 req->req.generation = get_hpsb_generation(host);
1428 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1429 (ARM_LOCK & 0xFF));
1430 req->req.tag = arm_addr->arm_tag;
1431 req->req.recvb = arm_addr->recvb;
1432 req->req.length = size;
1433 arm_req->generation = req->req.generation;
1434 arm_req->extended_transaction_code = ext_tcode;
1435 arm_req->destination_offset = addr;
1436 arm_req->source_nodeid = nodeid;
1437 arm_req->destination_nodeid = host->node_id;
1438 arm_req->tlabel = (flags >> 10) & 0x3f;
1439 arm_req->tcode = (flags >> 4) & 0x0f;
1440 arm_resp->response_code = rcode;
1441 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1442 sizeof(struct
1443 arm_request_response));
1444 arm_req_resp->response =
1445 int2ptr((arm_addr->recvb) +
1446 sizeof(struct arm_request_response) +
1447 sizeof(struct arm_request));
1448 arm_req->buffer =
1449 int2ptr((arm_addr->recvb) +
1450 sizeof(struct arm_request_response) +
1451 sizeof(struct arm_request) +
1452 sizeof(struct arm_response));
1453 arm_resp->buffer =
1454 int2ptr((arm_addr->recvb) +
1455 sizeof(struct arm_request_response) +
1456 sizeof(struct arm_request) +
1457 sizeof(struct arm_response) + 2 * sizeof(*store));
1458 queue_complete_req(req);
1460 spin_unlock_irqrestore(&host_info_lock, irqflags);
1461 return (rcode);
1464 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
1465 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
1466 u16 flags)
1468 unsigned long irqflags;
1469 struct pending_request *req;
1470 struct host_info *hi;
1471 struct file_info *fi = NULL;
1472 struct list_head *entry;
1473 struct arm_addr *arm_addr = NULL;
1474 struct arm_request *arm_req = NULL;
1475 struct arm_response *arm_resp = NULL;
1476 int found = 0, size = 0, rcode = -1;
1477 octlet_t old, new;
1478 struct arm_request_response *arm_req_resp = NULL;
1480 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1481 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1482 DBGMSG("arm_lock64 called by node: %X "
1483 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
1484 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1485 (u32) (addr & 0xFFFFFFFF),
1486 ext_tcode & 0xFF,
1487 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1488 (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
1489 } else {
1490 DBGMSG("arm_lock64 called by node: %X "
1491 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
1492 "%8.8X %8.8X ",
1493 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1494 (u32) (addr & 0xFFFFFFFF),
1495 ext_tcode & 0xFF,
1496 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1497 (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
1498 (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
1499 (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
1501 spin_lock_irqsave(&host_info_lock, irqflags);
1502 hi = find_host_info(host); /* search addressentry in file_info's for host */
1503 if (hi != NULL) {
1504 list_for_each_entry(fi, &hi->file_info_list, list) {
1505 entry = fi->addr_list.next;
1506 while (entry != &(fi->addr_list)) {
1507 arm_addr =
1508 list_entry(entry, struct arm_addr,
1509 addr_list);
1510 if (((arm_addr->start) <= (addr))
1511 && ((arm_addr->end) >=
1512 (addr + sizeof(*store)))) {
1513 found = 1;
1514 break;
1516 entry = entry->next;
1518 if (found) {
1519 break;
1523 rcode = -1;
1524 if (!found) {
1525 printk(KERN_ERR
1526 "raw1394: arm_lock64 FAILED addr_entry not found"
1527 " -> rcode_address_error\n");
1528 spin_unlock_irqrestore(&host_info_lock, irqflags);
1529 return (RCODE_ADDRESS_ERROR);
1530 } else {
1531 DBGMSG("arm_lock64 addr_entry FOUND");
1533 if (rcode == -1) {
1534 if (arm_addr->access_rights & ARM_LOCK) {
1535 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1536 memcpy(&old,
1537 (arm_addr->addr_space_buffer) + (addr -
1538 (arm_addr->
1539 start)),
1540 sizeof(old));
1541 switch (ext_tcode) {
1542 case (EXTCODE_MASK_SWAP):
1543 new = data | (old & ~arg);
1544 break;
1545 case (EXTCODE_COMPARE_SWAP):
1546 if (old == arg) {
1547 new = data;
1548 } else {
1549 new = old;
1551 break;
1552 case (EXTCODE_FETCH_ADD):
1553 new =
1554 cpu_to_be64(be64_to_cpu(data) +
1555 be64_to_cpu(old));
1556 break;
1557 case (EXTCODE_LITTLE_ADD):
1558 new =
1559 cpu_to_le64(le64_to_cpu(data) +
1560 le64_to_cpu(old));
1561 break;
1562 case (EXTCODE_BOUNDED_ADD):
1563 if (old != arg) {
1564 new =
1565 cpu_to_be64(be64_to_cpu
1566 (data) +
1567 be64_to_cpu
1568 (old));
1569 } else {
1570 new = old;
1572 break;
1573 case (EXTCODE_WRAP_ADD):
1574 if (old != arg) {
1575 new =
1576 cpu_to_be64(be64_to_cpu
1577 (data) +
1578 be64_to_cpu
1579 (old));
1580 } else {
1581 new = data;
1583 break;
1584 default:
1585 printk(KERN_ERR
1586 "raw1394: arm_lock64 FAILED "
1587 "ext_tcode not allowed -> rcode_type_error\n");
1588 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1589 break;
1590 } /*switch */
1591 if (rcode == -1) {
1592 DBGMSG
1593 ("arm_lock64 -> (rcode_complete)");
1594 rcode = RCODE_COMPLETE;
1595 memcpy(store, &old, sizeof(*store));
1596 memcpy((arm_addr->addr_space_buffer) +
1597 (addr - (arm_addr->start)),
1598 &new, sizeof(*store));
1601 } else {
1602 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1603 DBGMSG
1604 ("arm_lock64 -> rcode_type_error (access denied)");
1607 if (arm_addr->notification_options & ARM_LOCK) {
1608 byte_t *buf1, *buf2;
1609 DBGMSG("arm_lock64 -> entering notification-section");
1610 req = __alloc_pending_request(SLAB_ATOMIC);
1611 if (!req) {
1612 spin_unlock_irqrestore(&host_info_lock, irqflags);
1613 DBGMSG("arm_lock64 -> rcode_conflict_error");
1614 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1615 The request may be retried */
1617 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1618 req->data = kmalloc(size, SLAB_ATOMIC);
1619 if (!(req->data)) {
1620 free_pending_request(req);
1621 spin_unlock_irqrestore(&host_info_lock, irqflags);
1622 DBGMSG("arm_lock64 -> rcode_conflict_error");
1623 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1624 The request may be retried */
1626 req->free_data = 1;
1627 arm_req_resp = (struct arm_request_response *)(req->data);
1628 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1629 (sizeof
1630 (struct
1631 arm_request_response)));
1632 arm_resp =
1633 (struct arm_response *)((byte_t *) (arm_req) +
1634 (sizeof(struct arm_request)));
1635 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1636 buf2 = buf1 + 2 * sizeof(*store);
1637 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1638 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1639 arm_req->buffer_length = sizeof(*store);
1640 memcpy(buf1, &data, sizeof(*store));
1642 } else {
1643 arm_req->buffer_length = 2 * sizeof(*store);
1644 memcpy(buf1, &arg, sizeof(*store));
1645 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1647 if (rcode == RCODE_COMPLETE) {
1648 arm_resp->buffer_length = sizeof(*store);
1649 memcpy(buf2, &old, sizeof(*store));
1650 } else {
1651 arm_resp->buffer_length = 0;
1653 req->file_info = fi;
1654 req->req.type = RAW1394_REQ_ARM;
1655 req->req.generation = get_hpsb_generation(host);
1656 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1657 (ARM_LOCK & 0xFF));
1658 req->req.tag = arm_addr->arm_tag;
1659 req->req.recvb = arm_addr->recvb;
1660 req->req.length = size;
1661 arm_req->generation = req->req.generation;
1662 arm_req->extended_transaction_code = ext_tcode;
1663 arm_req->destination_offset = addr;
1664 arm_req->source_nodeid = nodeid;
1665 arm_req->destination_nodeid = host->node_id;
1666 arm_req->tlabel = (flags >> 10) & 0x3f;
1667 arm_req->tcode = (flags >> 4) & 0x0f;
1668 arm_resp->response_code = rcode;
1669 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1670 sizeof(struct
1671 arm_request_response));
1672 arm_req_resp->response =
1673 int2ptr((arm_addr->recvb) +
1674 sizeof(struct arm_request_response) +
1675 sizeof(struct arm_request));
1676 arm_req->buffer =
1677 int2ptr((arm_addr->recvb) +
1678 sizeof(struct arm_request_response) +
1679 sizeof(struct arm_request) +
1680 sizeof(struct arm_response));
1681 arm_resp->buffer =
1682 int2ptr((arm_addr->recvb) +
1683 sizeof(struct arm_request_response) +
1684 sizeof(struct arm_request) +
1685 sizeof(struct arm_response) + 2 * sizeof(*store));
1686 queue_complete_req(req);
1688 spin_unlock_irqrestore(&host_info_lock, irqflags);
1689 return (rcode);
1692 static int arm_register(struct file_info *fi, struct pending_request *req)
1694 int retval;
1695 struct arm_addr *addr;
1696 struct host_info *hi;
1697 struct file_info *fi_hlp = NULL;
1698 struct list_head *entry;
1699 struct arm_addr *arm_addr = NULL;
1700 int same_host, another_host;
1701 unsigned long flags;
1703 DBGMSG("arm_register called "
1704 "addr(Offset): %8.8x %8.8x length: %u "
1705 "rights: %2.2X notify: %2.2X "
1706 "max_blk_len: %4.4X",
1707 (u32) ((req->req.address >> 32) & 0xFFFF),
1708 (u32) (req->req.address & 0xFFFFFFFF),
1709 req->req.length, ((req->req.misc >> 8) & 0xFF),
1710 (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
1711 /* check addressrange */
1712 if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
1713 (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
1714 0)) {
1715 req->req.length = 0;
1716 return (-EINVAL);
1718 /* addr-list-entry for fileinfo */
1719 addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
1720 if (!addr) {
1721 req->req.length = 0;
1722 return (-ENOMEM);
1724 /* allocation of addr_space_buffer */
1725 addr->addr_space_buffer = vmalloc(req->req.length);
1726 if (!(addr->addr_space_buffer)) {
1727 kfree(addr);
1728 req->req.length = 0;
1729 return (-ENOMEM);
1731 /* initialization of addr_space_buffer */
1732 if ((req->req.sendb) == (unsigned long)NULL) {
1733 /* init: set 0 */
1734 memset(addr->addr_space_buffer, 0, req->req.length);
1735 } else {
1736 /* init: user -> kernel */
1737 if (copy_from_user
1738 (addr->addr_space_buffer, int2ptr(req->req.sendb),
1739 req->req.length)) {
1740 vfree(addr->addr_space_buffer);
1741 kfree(addr);
1742 return (-EFAULT);
1745 INIT_LIST_HEAD(&addr->addr_list);
1746 addr->arm_tag = req->req.tag;
1747 addr->start = req->req.address;
1748 addr->end = req->req.address + req->req.length;
1749 addr->access_rights = (u8) (req->req.misc & 0x0F);
1750 addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
1751 addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
1752 addr->access_rights |= addr->client_transactions;
1753 addr->notification_options |= addr->client_transactions;
1754 addr->recvb = req->req.recvb;
1755 addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
1756 spin_lock_irqsave(&host_info_lock, flags);
1757 hi = find_host_info(fi->host);
1758 same_host = 0;
1759 another_host = 0;
1760 /* same host with address-entry containing same addressrange ? */
1761 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1762 entry = fi_hlp->addr_list.next;
1763 while (entry != &(fi_hlp->addr_list)) {
1764 arm_addr =
1765 list_entry(entry, struct arm_addr, addr_list);
1766 if ((arm_addr->start == addr->start)
1767 && (arm_addr->end == addr->end)) {
1768 DBGMSG("same host ownes same "
1769 "addressrange -> EALREADY");
1770 same_host = 1;
1771 break;
1773 entry = entry->next;
1775 if (same_host) {
1776 break;
1779 if (same_host) {
1780 /* addressrange occupied by same host */
1781 vfree(addr->addr_space_buffer);
1782 kfree(addr);
1783 spin_unlock_irqrestore(&host_info_lock, flags);
1784 return (-EALREADY);
1786 /* another host with valid address-entry containing same addressrange */
1787 list_for_each_entry(hi, &host_info_list, list) {
1788 if (hi->host != fi->host) {
1789 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1790 entry = fi_hlp->addr_list.next;
1791 while (entry != &(fi_hlp->addr_list)) {
1792 arm_addr =
1793 list_entry(entry, struct arm_addr,
1794 addr_list);
1795 if ((arm_addr->start == addr->start)
1796 && (arm_addr->end == addr->end)) {
1797 DBGMSG
1798 ("another host ownes same "
1799 "addressrange");
1800 another_host = 1;
1801 break;
1803 entry = entry->next;
1805 if (another_host) {
1806 break;
1811 if (another_host) {
1812 DBGMSG("another hosts entry is valid -> SUCCESS");
1813 if (copy_to_user(int2ptr(req->req.recvb),
1814 &addr->start, sizeof(u64))) {
1815 printk(KERN_ERR "raw1394: arm_register failed "
1816 " address-range-entry is invalid -> EFAULT !!!\n");
1817 vfree(addr->addr_space_buffer);
1818 kfree(addr);
1819 spin_unlock_irqrestore(&host_info_lock, flags);
1820 return (-EFAULT);
1822 free_pending_request(req); /* immediate success or fail */
1823 /* INSERT ENTRY */
1824 list_add_tail(&addr->addr_list, &fi->addr_list);
1825 spin_unlock_irqrestore(&host_info_lock, flags);
1826 return sizeof(struct raw1394_request);
1828 retval =
1829 hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
1830 req->req.address,
1831 req->req.address + req->req.length);
1832 if (retval) {
1833 /* INSERT ENTRY */
1834 list_add_tail(&addr->addr_list, &fi->addr_list);
1835 } else {
1836 DBGMSG("arm_register failed errno: %d \n", retval);
1837 vfree(addr->addr_space_buffer);
1838 kfree(addr);
1839 spin_unlock_irqrestore(&host_info_lock, flags);
1840 return (-EALREADY);
1842 spin_unlock_irqrestore(&host_info_lock, flags);
1843 free_pending_request(req); /* immediate success or fail */
1844 return sizeof(struct raw1394_request);
1847 static int arm_unregister(struct file_info *fi, struct pending_request *req)
1849 int found = 0;
1850 int retval = 0;
1851 struct list_head *entry;
1852 struct arm_addr *addr = NULL;
1853 struct host_info *hi;
1854 struct file_info *fi_hlp = NULL;
1855 struct arm_addr *arm_addr = NULL;
1856 int another_host;
1857 unsigned long flags;
1859 DBGMSG("arm_Unregister called addr(Offset): "
1860 "%8.8x %8.8x",
1861 (u32) ((req->req.address >> 32) & 0xFFFF),
1862 (u32) (req->req.address & 0xFFFFFFFF));
1863 spin_lock_irqsave(&host_info_lock, flags);
1864 /* get addr */
1865 entry = fi->addr_list.next;
1866 while (entry != &(fi->addr_list)) {
1867 addr = list_entry(entry, struct arm_addr, addr_list);
1868 if (addr->start == req->req.address) {
1869 found = 1;
1870 break;
1872 entry = entry->next;
1874 if (!found) {
1875 DBGMSG("arm_Unregister addr not found");
1876 spin_unlock_irqrestore(&host_info_lock, flags);
1877 return (-EINVAL);
1879 DBGMSG("arm_Unregister addr found");
1880 another_host = 0;
1881 /* another host with valid address-entry containing
1882 same addressrange */
1883 list_for_each_entry(hi, &host_info_list, list) {
1884 if (hi->host != fi->host) {
1885 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1886 entry = fi_hlp->addr_list.next;
1887 while (entry != &(fi_hlp->addr_list)) {
1888 arm_addr = list_entry(entry,
1889 struct arm_addr,
1890 addr_list);
1891 if (arm_addr->start == addr->start) {
1892 DBGMSG("another host ownes "
1893 "same addressrange");
1894 another_host = 1;
1895 break;
1897 entry = entry->next;
1899 if (another_host) {
1900 break;
1905 if (another_host) {
1906 DBGMSG("delete entry from list -> success");
1907 list_del(&addr->addr_list);
1908 vfree(addr->addr_space_buffer);
1909 kfree(addr);
1910 free_pending_request(req); /* immediate success or fail */
1911 spin_unlock_irqrestore(&host_info_lock, flags);
1912 return sizeof(struct raw1394_request);
1914 retval =
1915 hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
1916 addr->start);
1917 if (!retval) {
1918 printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
1919 spin_unlock_irqrestore(&host_info_lock, flags);
1920 return (-EINVAL);
1922 DBGMSG("delete entry from list -> success");
1923 list_del(&addr->addr_list);
1924 spin_unlock_irqrestore(&host_info_lock, flags);
1925 vfree(addr->addr_space_buffer);
1926 kfree(addr);
1927 free_pending_request(req); /* immediate success or fail */
1928 return sizeof(struct raw1394_request);
1931 /* Copy data from ARM buffer(s) to user buffer. */
1932 static int arm_get_buf(struct file_info *fi, struct pending_request *req)
1934 struct arm_addr *arm_addr = NULL;
1935 unsigned long flags;
1936 unsigned long offset;
1938 struct list_head *entry;
1940 DBGMSG("arm_get_buf "
1941 "addr(Offset): %04X %08X length: %u",
1942 (u32) ((req->req.address >> 32) & 0xFFFF),
1943 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1945 spin_lock_irqsave(&host_info_lock, flags);
1946 entry = fi->addr_list.next;
1947 while (entry != &(fi->addr_list)) {
1948 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1949 if ((arm_addr->start <= req->req.address) &&
1950 (arm_addr->end > req->req.address)) {
1951 if (req->req.address + req->req.length <= arm_addr->end) {
1952 offset = req->req.address - arm_addr->start;
1954 DBGMSG
1955 ("arm_get_buf copy_to_user( %08X, %p, %u )",
1956 (u32) req->req.recvb,
1957 arm_addr->addr_space_buffer + offset,
1958 (u32) req->req.length);
1960 if (copy_to_user
1961 (int2ptr(req->req.recvb),
1962 arm_addr->addr_space_buffer + offset,
1963 req->req.length)) {
1964 spin_unlock_irqrestore(&host_info_lock,
1965 flags);
1966 return (-EFAULT);
1969 spin_unlock_irqrestore(&host_info_lock, flags);
1970 /* We have to free the request, because we
1971 * queue no response, and therefore nobody
1972 * will free it. */
1973 free_pending_request(req);
1974 return sizeof(struct raw1394_request);
1975 } else {
1976 DBGMSG("arm_get_buf request exceeded mapping");
1977 spin_unlock_irqrestore(&host_info_lock, flags);
1978 return (-EINVAL);
1981 entry = entry->next;
1983 spin_unlock_irqrestore(&host_info_lock, flags);
1984 return (-EINVAL);
1987 /* Copy data from user buffer to ARM buffer(s). */
1988 static int arm_set_buf(struct file_info *fi, struct pending_request *req)
1990 struct arm_addr *arm_addr = NULL;
1991 unsigned long flags;
1992 unsigned long offset;
1994 struct list_head *entry;
1996 DBGMSG("arm_set_buf "
1997 "addr(Offset): %04X %08X length: %u",
1998 (u32) ((req->req.address >> 32) & 0xFFFF),
1999 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
2001 spin_lock_irqsave(&host_info_lock, flags);
2002 entry = fi->addr_list.next;
2003 while (entry != &(fi->addr_list)) {
2004 arm_addr = list_entry(entry, struct arm_addr, addr_list);
2005 if ((arm_addr->start <= req->req.address) &&
2006 (arm_addr->end > req->req.address)) {
2007 if (req->req.address + req->req.length <= arm_addr->end) {
2008 offset = req->req.address - arm_addr->start;
2010 DBGMSG
2011 ("arm_set_buf copy_from_user( %p, %08X, %u )",
2012 arm_addr->addr_space_buffer + offset,
2013 (u32) req->req.sendb,
2014 (u32) req->req.length);
2016 if (copy_from_user
2017 (arm_addr->addr_space_buffer + offset,
2018 int2ptr(req->req.sendb),
2019 req->req.length)) {
2020 spin_unlock_irqrestore(&host_info_lock,
2021 flags);
2022 return (-EFAULT);
2025 spin_unlock_irqrestore(&host_info_lock, flags);
2026 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2027 return sizeof(struct raw1394_request);
2028 } else {
2029 DBGMSG("arm_set_buf request exceeded mapping");
2030 spin_unlock_irqrestore(&host_info_lock, flags);
2031 return (-EINVAL);
2034 entry = entry->next;
2036 spin_unlock_irqrestore(&host_info_lock, flags);
2037 return (-EINVAL);
2040 static int reset_notification(struct file_info *fi, struct pending_request *req)
2042 DBGMSG("reset_notification called - switch %s ",
2043 (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
2044 if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
2045 (req->req.misc == RAW1394_NOTIFY_ON)) {
2046 fi->notification = (u8) req->req.misc;
2047 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2048 return sizeof(struct raw1394_request);
2050 /* error EINVAL (22) invalid argument */
2051 return (-EINVAL);
2054 static int write_phypacket(struct file_info *fi, struct pending_request *req)
2056 struct hpsb_packet *packet = NULL;
2057 int retval = 0;
2058 quadlet_t data;
2059 unsigned long flags;
2061 data = be32_to_cpu((u32) req->req.sendb);
2062 DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
2063 packet = hpsb_make_phypacket(fi->host, data);
2064 if (!packet)
2065 return -ENOMEM;
2066 req->req.length = 0;
2067 req->packet = packet;
2068 hpsb_set_packet_complete_task(packet,
2069 (void (*)(void *))queue_complete_cb, req);
2070 spin_lock_irqsave(&fi->reqlists_lock, flags);
2071 list_add_tail(&req->list, &fi->req_pending);
2072 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2073 packet->generation = req->req.generation;
2074 retval = hpsb_send_packet(packet);
2075 DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
2076 if (retval < 0) {
2077 req->req.error = RAW1394_ERROR_SEND_ERROR;
2078 req->req.length = 0;
2079 queue_complete_req(req);
2081 return sizeof(struct raw1394_request);
2084 static int get_config_rom(struct file_info *fi, struct pending_request *req)
2086 int ret = sizeof(struct raw1394_request);
2087 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2088 int status;
2090 if (!data)
2091 return -ENOMEM;
2093 status =
2094 csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
2095 data, req->req.length);
2096 if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
2097 ret = -EFAULT;
2098 if (copy_to_user
2099 (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
2100 sizeof(fi->host->csr.rom->cache_head->len)))
2101 ret = -EFAULT;
2102 if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
2103 sizeof(fi->host->csr.generation)))
2104 ret = -EFAULT;
2105 if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
2106 ret = -EFAULT;
2107 kfree(data);
2108 if (ret >= 0) {
2109 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2111 return ret;
2114 static int update_config_rom(struct file_info *fi, struct pending_request *req)
2116 int ret = sizeof(struct raw1394_request);
2117 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2118 if (!data)
2119 return -ENOMEM;
2120 if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
2121 ret = -EFAULT;
2122 } else {
2123 int status = hpsb_update_config_rom(fi->host,
2124 data, req->req.length,
2125 (unsigned char)req->req.
2126 misc);
2127 if (copy_to_user
2128 (int2ptr(req->req.recvb), &status, sizeof(status)))
2129 ret = -ENOMEM;
2131 kfree(data);
2132 if (ret >= 0) {
2133 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2134 fi->cfgrom_upd = 1;
2136 return ret;
2139 static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2141 struct csr1212_keyval *kv;
2142 struct csr1212_csr_rom_cache *cache;
2143 struct csr1212_dentry *dentry;
2144 u32 dr;
2145 int ret = 0;
2147 if (req->req.misc == ~0) {
2148 if (req->req.length == 0)
2149 return -EINVAL;
2151 /* Find an unused slot */
2152 for (dr = 0;
2153 dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
2154 dr++) ;
2156 if (dr == RAW1394_MAX_USER_CSR_DIRS)
2157 return -ENOMEM;
2159 fi->csr1212_dirs[dr] =
2160 csr1212_new_directory(CSR1212_KV_ID_VENDOR);
2161 if (!fi->csr1212_dirs[dr])
2162 return -ENOMEM;
2163 } else {
2164 dr = req->req.misc;
2165 if (!fi->csr1212_dirs[dr])
2166 return -EINVAL;
2168 /* Delete old stuff */
2169 for (dentry =
2170 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2171 dentry; dentry = dentry->next) {
2172 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2173 root_kv,
2174 dentry->kv);
2177 if (req->req.length == 0) {
2178 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2179 fi->csr1212_dirs[dr] = NULL;
2181 hpsb_update_config_rom_image(fi->host);
2182 free_pending_request(req);
2183 return sizeof(struct raw1394_request);
2187 cache = csr1212_rom_cache_malloc(0, req->req.length);
2188 if (!cache) {
2189 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2190 fi->csr1212_dirs[dr] = NULL;
2191 return -ENOMEM;
2194 cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
2195 if (!cache->filled_head) {
2196 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2197 fi->csr1212_dirs[dr] = NULL;
2198 CSR1212_FREE(cache);
2199 return -ENOMEM;
2201 cache->filled_tail = cache->filled_head;
2203 if (copy_from_user(cache->data, int2ptr(req->req.sendb),
2204 req->req.length)) {
2205 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2206 fi->csr1212_dirs[dr] = NULL;
2207 ret = -EFAULT;
2208 } else {
2209 cache->len = req->req.length;
2210 cache->filled_head->offset_start = 0;
2211 cache->filled_head->offset_end = cache->size - 1;
2213 cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
2215 ret = CSR1212_SUCCESS;
2216 /* parse all the items */
2217 for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
2218 kv = kv->next) {
2219 ret = csr1212_parse_keyval(kv, cache);
2222 /* attach top level items to the root directory */
2223 for (dentry =
2224 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2225 ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
2226 ret =
2227 csr1212_attach_keyval_to_directory(fi->host->csr.
2228 rom->root_kv,
2229 dentry->kv);
2232 if (ret == CSR1212_SUCCESS) {
2233 ret = hpsb_update_config_rom_image(fi->host);
2235 if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
2236 &dr, sizeof(dr))) {
2237 ret = -ENOMEM;
2241 kfree(cache->filled_head);
2242 CSR1212_FREE(cache);
2244 if (ret >= 0) {
2245 /* we have to free the request, because we queue no response,
2246 * and therefore nobody will free it */
2247 free_pending_request(req);
2248 return sizeof(struct raw1394_request);
2249 } else {
2250 for (dentry =
2251 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2252 dentry; dentry = dentry->next) {
2253 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2254 root_kv,
2255 dentry->kv);
2257 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2258 fi->csr1212_dirs[dr] = NULL;
2259 return ret;
2263 static int state_connected(struct file_info *fi, struct pending_request *req)
2265 int node = req->req.address >> 48;
2267 req->req.error = RAW1394_ERROR_NONE;
2269 switch (req->req.type) {
2271 case RAW1394_REQ_ECHO:
2272 queue_complete_req(req);
2273 return sizeof(struct raw1394_request);
2275 case RAW1394_REQ_ISO_SEND:
2276 return handle_iso_send(fi, req, node);
2278 case RAW1394_REQ_ARM_REGISTER:
2279 return arm_register(fi, req);
2281 case RAW1394_REQ_ARM_UNREGISTER:
2282 return arm_unregister(fi, req);
2284 case RAW1394_REQ_ARM_SET_BUF:
2285 return arm_set_buf(fi, req);
2287 case RAW1394_REQ_ARM_GET_BUF:
2288 return arm_get_buf(fi, req);
2290 case RAW1394_REQ_RESET_NOTIFY:
2291 return reset_notification(fi, req);
2293 case RAW1394_REQ_ISO_LISTEN:
2294 handle_iso_listen(fi, req);
2295 return sizeof(struct raw1394_request);
2297 case RAW1394_REQ_FCP_LISTEN:
2298 handle_fcp_listen(fi, req);
2299 return sizeof(struct raw1394_request);
2301 case RAW1394_REQ_RESET_BUS:
2302 if (req->req.misc == RAW1394_LONG_RESET) {
2303 DBGMSG("busreset called (type: LONG)");
2304 hpsb_reset_bus(fi->host, LONG_RESET);
2305 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2306 return sizeof(struct raw1394_request);
2308 if (req->req.misc == RAW1394_SHORT_RESET) {
2309 DBGMSG("busreset called (type: SHORT)");
2310 hpsb_reset_bus(fi->host, SHORT_RESET);
2311 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2312 return sizeof(struct raw1394_request);
2314 /* error EINVAL (22) invalid argument */
2315 return (-EINVAL);
2316 case RAW1394_REQ_GET_ROM:
2317 return get_config_rom(fi, req);
2319 case RAW1394_REQ_UPDATE_ROM:
2320 return update_config_rom(fi, req);
2322 case RAW1394_REQ_MODIFY_ROM:
2323 return modify_config_rom(fi, req);
2326 if (req->req.generation != get_hpsb_generation(fi->host)) {
2327 req->req.error = RAW1394_ERROR_GENERATION;
2328 req->req.generation = get_hpsb_generation(fi->host);
2329 req->req.length = 0;
2330 queue_complete_req(req);
2331 return sizeof(struct raw1394_request);
2334 switch (req->req.type) {
2335 case RAW1394_REQ_PHYPACKET:
2336 return write_phypacket(fi, req);
2337 case RAW1394_REQ_ASYNC_SEND:
2338 return handle_async_send(fi, req);
2341 if (req->req.length == 0) {
2342 req->req.error = RAW1394_ERROR_INVALID_ARG;
2343 queue_complete_req(req);
2344 return sizeof(struct raw1394_request);
2347 return handle_async_request(fi, req, node);
2350 static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2351 size_t count, loff_t * offset_is_ignored)
2353 struct file_info *fi = (struct file_info *)file->private_data;
2354 struct pending_request *req;
2355 ssize_t retval = 0;
2357 #ifdef CONFIG_COMPAT
2358 if (count == sizeof(struct compat_raw1394_req) &&
2359 sizeof(struct compat_raw1394_req) !=
2360 sizeof(struct raw1394_request)) {
2361 buffer = raw1394_compat_write(buffer);
2362 if (IS_ERR(buffer))
2363 return PTR_ERR(buffer);
2364 } else
2365 #endif
2366 if (count != sizeof(struct raw1394_request)) {
2367 return -EINVAL;
2370 req = alloc_pending_request();
2371 if (req == NULL) {
2372 return -ENOMEM;
2374 req->file_info = fi;
2376 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
2377 free_pending_request(req);
2378 return -EFAULT;
2381 switch (fi->state) {
2382 case opened:
2383 retval = state_opened(fi, req);
2384 break;
2386 case initialized:
2387 retval = state_initialized(fi, req);
2388 break;
2390 case connected:
2391 retval = state_connected(fi, req);
2392 break;
2395 if (retval < 0) {
2396 free_pending_request(req);
2399 return retval;
2402 /* rawiso operations */
2404 /* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
2405 * completion queue (reqlists_lock must be taken) */
2406 static inline int __rawiso_event_in_queue(struct file_info *fi)
2408 struct pending_request *req;
2410 list_for_each_entry(req, &fi->req_complete, list)
2411 if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
2412 return 1;
2414 return 0;
2417 /* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
2418 static void queue_rawiso_event(struct file_info *fi)
2420 unsigned long flags;
2422 spin_lock_irqsave(&fi->reqlists_lock, flags);
2424 /* only one ISO activity event may be in the queue */
2425 if (!__rawiso_event_in_queue(fi)) {
2426 struct pending_request *req =
2427 __alloc_pending_request(SLAB_ATOMIC);
2429 if (req) {
2430 req->file_info = fi;
2431 req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
2432 req->req.generation = get_hpsb_generation(fi->host);
2433 __queue_complete_req(req);
2434 } else {
2435 /* on allocation failure, signal an overflow */
2436 if (fi->iso_handle) {
2437 atomic_inc(&fi->iso_handle->overflows);
2441 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2444 static void rawiso_activity_cb(struct hpsb_iso *iso)
2446 unsigned long flags;
2447 struct host_info *hi;
2448 struct file_info *fi;
2450 spin_lock_irqsave(&host_info_lock, flags);
2451 hi = find_host_info(iso->host);
2453 if (hi != NULL) {
2454 list_for_each_entry(fi, &hi->file_info_list, list) {
2455 if (fi->iso_handle == iso)
2456 queue_rawiso_event(fi);
2460 spin_unlock_irqrestore(&host_info_lock, flags);
2463 /* helper function - gather all the kernel iso status bits for returning to user-space */
2464 static void raw1394_iso_fill_status(struct hpsb_iso *iso,
2465 struct raw1394_iso_status *stat)
2467 stat->config.data_buf_size = iso->buf_size;
2468 stat->config.buf_packets = iso->buf_packets;
2469 stat->config.channel = iso->channel;
2470 stat->config.speed = iso->speed;
2471 stat->config.irq_interval = iso->irq_interval;
2472 stat->n_packets = hpsb_iso_n_ready(iso);
2473 stat->overflows = atomic_read(&iso->overflows);
2474 stat->xmit_cycle = iso->xmit_cycle;
2477 static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
2479 struct raw1394_iso_status stat;
2481 if (!fi->host)
2482 return -EINVAL;
2484 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2485 return -EFAULT;
2487 fi->iso_handle = hpsb_iso_xmit_init(fi->host,
2488 stat.config.data_buf_size,
2489 stat.config.buf_packets,
2490 stat.config.channel,
2491 stat.config.speed,
2492 stat.config.irq_interval,
2493 rawiso_activity_cb);
2494 if (!fi->iso_handle)
2495 return -ENOMEM;
2497 fi->iso_state = RAW1394_ISO_XMIT;
2499 raw1394_iso_fill_status(fi->iso_handle, &stat);
2500 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2501 return -EFAULT;
2503 /* queue an event to get things started */
2504 rawiso_activity_cb(fi->iso_handle);
2506 return 0;
2509 static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
2511 struct raw1394_iso_status stat;
2513 if (!fi->host)
2514 return -EINVAL;
2516 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2517 return -EFAULT;
2519 fi->iso_handle = hpsb_iso_recv_init(fi->host,
2520 stat.config.data_buf_size,
2521 stat.config.buf_packets,
2522 stat.config.channel,
2523 stat.config.dma_mode,
2524 stat.config.irq_interval,
2525 rawiso_activity_cb);
2526 if (!fi->iso_handle)
2527 return -ENOMEM;
2529 fi->iso_state = RAW1394_ISO_RECV;
2531 raw1394_iso_fill_status(fi->iso_handle, &stat);
2532 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2533 return -EFAULT;
2534 return 0;
2537 static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
2539 struct raw1394_iso_status stat;
2540 struct hpsb_iso *iso = fi->iso_handle;
2542 raw1394_iso_fill_status(fi->iso_handle, &stat);
2543 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2544 return -EFAULT;
2546 /* reset overflow counter */
2547 atomic_set(&iso->overflows, 0);
2549 return 0;
2552 /* copy N packet_infos out of the ringbuffer into user-supplied array */
2553 static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2555 struct raw1394_iso_packets upackets;
2556 unsigned int packet = fi->iso_handle->first_packet;
2557 int i;
2559 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2560 return -EFAULT;
2562 if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
2563 return -EINVAL;
2565 /* ensure user-supplied buffer is accessible and big enough */
2566 if (!access_ok(VERIFY_WRITE, upackets.infos,
2567 upackets.n_packets *
2568 sizeof(struct raw1394_iso_packet_info)))
2569 return -EFAULT;
2571 /* copy the packet_infos out */
2572 for (i = 0; i < upackets.n_packets; i++) {
2573 if (__copy_to_user(&upackets.infos[i],
2574 &fi->iso_handle->infos[packet],
2575 sizeof(struct raw1394_iso_packet_info)))
2576 return -EFAULT;
2578 packet = (packet + 1) % fi->iso_handle->buf_packets;
2581 return 0;
2584 /* copy N packet_infos from user to ringbuffer, and queue them for transmission */
2585 static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2587 struct raw1394_iso_packets upackets;
2588 int i, rv;
2590 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2591 return -EFAULT;
2593 if (upackets.n_packets >= fi->iso_handle->buf_packets)
2594 return -EINVAL;
2596 if (upackets.n_packets >= hpsb_iso_n_ready(fi->iso_handle))
2597 return -EAGAIN;
2599 /* ensure user-supplied buffer is accessible and big enough */
2600 if (!access_ok(VERIFY_READ, upackets.infos,
2601 upackets.n_packets *
2602 sizeof(struct raw1394_iso_packet_info)))
2603 return -EFAULT;
2605 /* copy the infos structs in and queue the packets */
2606 for (i = 0; i < upackets.n_packets; i++) {
2607 struct raw1394_iso_packet_info info;
2609 if (__copy_from_user(&info, &upackets.infos[i],
2610 sizeof(struct raw1394_iso_packet_info)))
2611 return -EFAULT;
2613 rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
2614 info.len, info.tag, info.sy);
2615 if (rv)
2616 return rv;
2619 return 0;
2622 static void raw1394_iso_shutdown(struct file_info *fi)
2624 if (fi->iso_handle)
2625 hpsb_iso_shutdown(fi->iso_handle);
2627 fi->iso_handle = NULL;
2628 fi->iso_state = RAW1394_ISO_INACTIVE;
2631 /* mmap the rawiso xmit/recv buffer */
2632 static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2634 struct file_info *fi = file->private_data;
2636 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2637 return -EINVAL;
2639 return dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
2642 /* ioctl is only used for rawiso operations */
2643 static int raw1394_ioctl(struct inode *inode, struct file *file,
2644 unsigned int cmd, unsigned long arg)
2646 struct file_info *fi = file->private_data;
2647 void __user *argp = (void __user *)arg;
2649 switch (fi->iso_state) {
2650 case RAW1394_ISO_INACTIVE:
2651 switch (cmd) {
2652 case RAW1394_IOC_ISO_XMIT_INIT:
2653 return raw1394_iso_xmit_init(fi, argp);
2654 case RAW1394_IOC_ISO_RECV_INIT:
2655 return raw1394_iso_recv_init(fi, argp);
2656 default:
2657 break;
2659 break;
2660 case RAW1394_ISO_RECV:
2661 switch (cmd) {
2662 case RAW1394_IOC_ISO_RECV_START:{
2663 /* copy args from user-space */
2664 int args[3];
2665 if (copy_from_user
2666 (&args[0], argp, sizeof(args)))
2667 return -EFAULT;
2668 return hpsb_iso_recv_start(fi->iso_handle,
2669 args[0], args[1],
2670 args[2]);
2672 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2673 hpsb_iso_stop(fi->iso_handle);
2674 return 0;
2675 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2676 return hpsb_iso_recv_listen_channel(fi->iso_handle,
2677 arg);
2678 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2679 return hpsb_iso_recv_unlisten_channel(fi->iso_handle,
2680 arg);
2681 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2682 /* copy the u64 from user-space */
2683 u64 mask;
2684 if (copy_from_user(&mask, argp, sizeof(mask)))
2685 return -EFAULT;
2686 return hpsb_iso_recv_set_channel_mask(fi->
2687 iso_handle,
2688 mask);
2690 case RAW1394_IOC_ISO_GET_STATUS:
2691 return raw1394_iso_get_status(fi, argp);
2692 case RAW1394_IOC_ISO_RECV_PACKETS:
2693 return raw1394_iso_recv_packets(fi, argp);
2694 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2695 return hpsb_iso_recv_release_packets(fi->iso_handle,
2696 arg);
2697 case RAW1394_IOC_ISO_RECV_FLUSH:
2698 return hpsb_iso_recv_flush(fi->iso_handle);
2699 case RAW1394_IOC_ISO_SHUTDOWN:
2700 raw1394_iso_shutdown(fi);
2701 return 0;
2702 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2703 queue_rawiso_event(fi);
2704 return 0;
2706 break;
2707 case RAW1394_ISO_XMIT:
2708 switch (cmd) {
2709 case RAW1394_IOC_ISO_XMIT_START:{
2710 /* copy two ints from user-space */
2711 int args[2];
2712 if (copy_from_user
2713 (&args[0], argp, sizeof(args)))
2714 return -EFAULT;
2715 return hpsb_iso_xmit_start(fi->iso_handle,
2716 args[0], args[1]);
2718 case RAW1394_IOC_ISO_XMIT_SYNC:
2719 return hpsb_iso_xmit_sync(fi->iso_handle);
2720 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2721 hpsb_iso_stop(fi->iso_handle);
2722 return 0;
2723 case RAW1394_IOC_ISO_GET_STATUS:
2724 return raw1394_iso_get_status(fi, argp);
2725 case RAW1394_IOC_ISO_XMIT_PACKETS:
2726 return raw1394_iso_send_packets(fi, argp);
2727 case RAW1394_IOC_ISO_SHUTDOWN:
2728 raw1394_iso_shutdown(fi);
2729 return 0;
2730 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2731 queue_rawiso_event(fi);
2732 return 0;
2734 break;
2735 default:
2736 break;
2739 return -EINVAL;
2742 static unsigned int raw1394_poll(struct file *file, poll_table * pt)
2744 struct file_info *fi = file->private_data;
2745 unsigned int mask = POLLOUT | POLLWRNORM;
2746 unsigned long flags;
2748 poll_wait(file, &fi->poll_wait_complete, pt);
2750 spin_lock_irqsave(&fi->reqlists_lock, flags);
2751 if (!list_empty(&fi->req_complete)) {
2752 mask |= POLLIN | POLLRDNORM;
2754 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2756 return mask;
2759 static int raw1394_open(struct inode *inode, struct file *file)
2761 struct file_info *fi;
2763 fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
2764 if (!fi)
2765 return -ENOMEM;
2767 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2769 INIT_LIST_HEAD(&fi->list);
2770 fi->state = opened;
2771 INIT_LIST_HEAD(&fi->req_pending);
2772 INIT_LIST_HEAD(&fi->req_complete);
2773 sema_init(&fi->complete_sem, 0);
2774 spin_lock_init(&fi->reqlists_lock);
2775 init_waitqueue_head(&fi->poll_wait_complete);
2776 INIT_LIST_HEAD(&fi->addr_list);
2778 file->private_data = fi;
2780 return 0;
2783 static int raw1394_release(struct inode *inode, struct file *file)
2785 struct file_info *fi = file->private_data;
2786 struct list_head *lh;
2787 struct pending_request *req;
2788 int done = 0, i, fail = 0;
2789 int retval = 0;
2790 struct list_head *entry;
2791 struct arm_addr *addr = NULL;
2792 struct host_info *hi;
2793 struct file_info *fi_hlp = NULL;
2794 struct arm_addr *arm_addr = NULL;
2795 int another_host;
2796 int csr_mod = 0;
2797 unsigned long flags;
2799 if (fi->iso_state != RAW1394_ISO_INACTIVE)
2800 raw1394_iso_shutdown(fi);
2802 for (i = 0; i < 64; i++) {
2803 if (fi->listen_channels & (1ULL << i)) {
2804 hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i);
2808 spin_lock_irqsave(&host_info_lock, flags);
2809 fi->listen_channels = 0;
2811 fail = 0;
2812 /* set address-entries invalid */
2814 while (!list_empty(&fi->addr_list)) {
2815 another_host = 0;
2816 lh = fi->addr_list.next;
2817 addr = list_entry(lh, struct arm_addr, addr_list);
2818 /* another host with valid address-entry containing
2819 same addressrange? */
2820 list_for_each_entry(hi, &host_info_list, list) {
2821 if (hi->host != fi->host) {
2822 list_for_each_entry(fi_hlp, &hi->file_info_list,
2823 list) {
2824 entry = fi_hlp->addr_list.next;
2825 while (entry != &(fi_hlp->addr_list)) {
2826 arm_addr = list_entry(entry, struct
2827 arm_addr,
2828 addr_list);
2829 if (arm_addr->start ==
2830 addr->start) {
2831 DBGMSG
2832 ("raw1394_release: "
2833 "another host ownes "
2834 "same addressrange");
2835 another_host = 1;
2836 break;
2838 entry = entry->next;
2840 if (another_host) {
2841 break;
2846 if (!another_host) {
2847 DBGMSG("raw1394_release: call hpsb_arm_unregister");
2848 retval =
2849 hpsb_unregister_addrspace(&raw1394_highlevel,
2850 fi->host, addr->start);
2851 if (!retval) {
2852 ++fail;
2853 printk(KERN_ERR
2854 "raw1394_release arm_Unregister failed\n");
2857 DBGMSG("raw1394_release: delete addr_entry from list");
2858 list_del(&addr->addr_list);
2859 vfree(addr->addr_space_buffer);
2860 kfree(addr);
2861 } /* while */
2862 spin_unlock_irqrestore(&host_info_lock, flags);
2863 if (fail > 0) {
2864 printk(KERN_ERR "raw1394: during addr_list-release "
2865 "error(s) occurred \n");
2868 while (!done) {
2869 spin_lock_irqsave(&fi->reqlists_lock, flags);
2871 while (!list_empty(&fi->req_complete)) {
2872 lh = fi->req_complete.next;
2873 list_del(lh);
2875 req = list_entry(lh, struct pending_request, list);
2877 free_pending_request(req);
2880 if (list_empty(&fi->req_pending))
2881 done = 1;
2883 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2885 if (!done)
2886 down_interruptible(&fi->complete_sem);
2889 /* Remove any sub-trees left by user space programs */
2890 for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
2891 struct csr1212_dentry *dentry;
2892 if (!fi->csr1212_dirs[i])
2893 continue;
2894 for (dentry =
2895 fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
2896 dentry = dentry->next) {
2897 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2898 root_kv,
2899 dentry->kv);
2901 csr1212_release_keyval(fi->csr1212_dirs[i]);
2902 fi->csr1212_dirs[i] = NULL;
2903 csr_mod = 1;
2906 if ((csr_mod || fi->cfgrom_upd)
2907 && hpsb_update_config_rom_image(fi->host) < 0)
2908 HPSB_ERR
2909 ("Failed to generate Configuration ROM image for host %d",
2910 fi->host->id);
2912 if (fi->state == connected) {
2913 spin_lock_irqsave(&host_info_lock, flags);
2914 list_del(&fi->list);
2915 spin_unlock_irqrestore(&host_info_lock, flags);
2917 put_device(&fi->host->device);
2920 kfree(fi);
2922 return 0;
2925 /*** HOTPLUG STUFF **********************************************************/
2927 * Export information about protocols/devices supported by this driver.
2929 static struct ieee1394_device_id raw1394_id_table[] = {
2931 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2932 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2933 .version = AVC_SW_VERSION_ENTRY & 0xffffff},
2935 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2936 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2937 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
2939 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2940 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2941 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
2943 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2944 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2945 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
2949 MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2951 static struct hpsb_protocol_driver raw1394_driver = {
2952 .name = "raw1394 Driver",
2953 .id_table = raw1394_id_table,
2954 .driver = {
2955 .name = "raw1394",
2956 .bus = &ieee1394_bus_type,
2960 /******************************************************************************/
2962 static struct hpsb_highlevel raw1394_highlevel = {
2963 .name = RAW1394_DEVICE_NAME,
2964 .add_host = add_host,
2965 .remove_host = remove_host,
2966 .host_reset = host_reset,
2967 .iso_receive = iso_receive,
2968 .fcp_request = fcp_request,
2971 static struct cdev raw1394_cdev;
2972 static struct file_operations raw1394_fops = {
2973 .owner = THIS_MODULE,
2974 .read = raw1394_read,
2975 .write = raw1394_write,
2976 .mmap = raw1394_mmap,
2977 .ioctl = raw1394_ioctl,
2978 // .compat_ioctl = ... someone needs to do this
2979 .poll = raw1394_poll,
2980 .open = raw1394_open,
2981 .release = raw1394_release,
2984 static int __init init_raw1394(void)
2986 int ret = 0;
2988 hpsb_register_highlevel(&raw1394_highlevel);
2990 if (IS_ERR
2991 (class_device_create
2992 (hpsb_protocol_class, NULL,
2993 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
2994 RAW1394_DEVICE_NAME))) {
2995 ret = -EFAULT;
2996 goto out_unreg;
2999 cdev_init(&raw1394_cdev, &raw1394_fops);
3000 raw1394_cdev.owner = THIS_MODULE;
3001 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
3002 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
3003 if (ret) {
3004 HPSB_ERR("raw1394 failed to register minor device block");
3005 goto out_dev;
3008 HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
3010 ret = hpsb_register_protocol(&raw1394_driver);
3011 if (ret) {
3012 HPSB_ERR("raw1394: failed to register protocol");
3013 cdev_del(&raw1394_cdev);
3014 goto out_dev;
3017 goto out;
3019 out_dev:
3020 class_device_destroy(hpsb_protocol_class,
3021 MKDEV(IEEE1394_MAJOR,
3022 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3023 out_unreg:
3024 hpsb_unregister_highlevel(&raw1394_highlevel);
3025 out:
3026 return ret;
3029 static void __exit cleanup_raw1394(void)
3031 class_device_destroy(hpsb_protocol_class,
3032 MKDEV(IEEE1394_MAJOR,
3033 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3034 cdev_del(&raw1394_cdev);
3035 hpsb_unregister_highlevel(&raw1394_highlevel);
3036 hpsb_unregister_protocol(&raw1394_driver);
3039 module_init(init_raw1394);
3040 module_exit(cleanup_raw1394);
3041 MODULE_LICENSE("GPL");