2 * I2O Configuration Interface Driver
4 * (C) Copyright 1999-2002 Red Hat
6 * Written by Alan Cox, Building Number Three Ltd
9 * Deepak Saxena (04/20/1999):
10 * Added basic ioctl() support
11 * Deepak Saxena (06/07/1999):
12 * Added software download ioctl (still testing)
13 * Auvo Häkkinen (09/10/1999):
14 * Changes to i2o_cfg_reply(), ioctl_parms()
15 * Added ioct_validate()
16 * Taneli Vähäkangas (09/30/1999):
18 * Taneli Vähäkangas (10/04/1999):
19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20 * Deepak Saxena (11/18/1999):
21 * Added event managmenet support
22 * Alan Cox <alan@lxorguk.ukuu.org.uk>:
23 * 2.4 rewrite ported to 2.5
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Added pass-thru support for Adaptec's raidutils
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
33 #include <linux/miscdevice.h>
34 #include <linux/mutex.h>
35 #include <linux/compat.h>
36 #include <linux/slab.h>
38 #include <asm/uaccess.h>
42 #define SG_TABLESIZE 30
44 static DEFINE_MUTEX(i2o_cfg_mutex
);
45 static long i2o_cfg_ioctl(struct file
*, unsigned int, unsigned long);
47 static spinlock_t i2o_config_lock
;
49 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
51 struct sg_simple_element
{
58 struct fasync_struct
*fasync
;
59 struct i2o_evt_info event_q
[I2O_EVT_Q_LEN
];
60 u16 q_in
; // Queue head index
61 u16 q_out
; // Queue tail index
62 u16 q_len
; // Queue length
63 u16 q_lost
; // Number of lost events
64 ulong q_id
; // Event queue ID...used as tx_context
65 struct i2o_cfg_info
*next
;
67 static struct i2o_cfg_info
*open_files
= NULL
;
68 static ulong i2o_cfg_info_id
= 0;
70 static int i2o_cfg_getiops(unsigned long arg
)
72 struct i2o_controller
*c
;
73 u8 __user
*user_iop_table
= (void __user
*)arg
;
74 u8 tmp
[MAX_I2O_CONTROLLERS
];
77 memset(tmp
, 0, MAX_I2O_CONTROLLERS
);
79 list_for_each_entry(c
, &i2o_controllers
, list
)
82 if (copy_to_user(user_iop_table
, tmp
, MAX_I2O_CONTROLLERS
))
88 static int i2o_cfg_gethrt(unsigned long arg
)
90 struct i2o_controller
*c
;
91 struct i2o_cmd_hrtlct __user
*cmd
= (struct i2o_cmd_hrtlct __user
*)arg
;
92 struct i2o_cmd_hrtlct kcmd
;
98 if (copy_from_user(&kcmd
, cmd
, sizeof(struct i2o_cmd_hrtlct
)))
101 if (get_user(reslen
, kcmd
.reslen
) < 0)
104 if (kcmd
.resbuf
== NULL
)
107 c
= i2o_find_iop(kcmd
.iop
);
111 hrt
= (i2o_hrt
*) c
->hrt
.virt
;
113 len
= 8 + ((hrt
->entry_len
* hrt
->num_entries
) << 2);
115 if (put_user(len
, kcmd
.reslen
))
117 else if (len
> reslen
)
119 else if (copy_to_user(kcmd
.resbuf
, (void *)hrt
, len
))
125 static int i2o_cfg_getlct(unsigned long arg
)
127 struct i2o_controller
*c
;
128 struct i2o_cmd_hrtlct __user
*cmd
= (struct i2o_cmd_hrtlct __user
*)arg
;
129 struct i2o_cmd_hrtlct kcmd
;
135 if (copy_from_user(&kcmd
, cmd
, sizeof(struct i2o_cmd_hrtlct
)))
138 if (get_user(reslen
, kcmd
.reslen
) < 0)
141 if (kcmd
.resbuf
== NULL
)
144 c
= i2o_find_iop(kcmd
.iop
);
148 lct
= (i2o_lct
*) c
->lct
;
150 len
= (unsigned int)lct
->table_size
<< 2;
151 if (put_user(len
, kcmd
.reslen
))
153 else if (len
> reslen
)
155 else if (copy_to_user(kcmd
.resbuf
, lct
, len
))
161 static int i2o_cfg_parms(unsigned long arg
, unsigned int type
)
164 struct i2o_controller
*c
;
165 struct i2o_device
*dev
;
166 struct i2o_cmd_psetget __user
*cmd
=
167 (struct i2o_cmd_psetget __user
*)arg
;
168 struct i2o_cmd_psetget kcmd
;
174 u32 i2o_cmd
= (type
== I2OPARMGET
?
175 I2O_CMD_UTIL_PARAMS_GET
: I2O_CMD_UTIL_PARAMS_SET
);
177 if (copy_from_user(&kcmd
, cmd
, sizeof(struct i2o_cmd_psetget
)))
180 if (get_user(reslen
, kcmd
.reslen
))
183 c
= i2o_find_iop(kcmd
.iop
);
187 dev
= i2o_iop_find_device(c
, kcmd
.tid
);
191 ops
= memdup_user(kcmd
.opbuf
, kcmd
.oplen
);
196 * It's possible to have a _very_ large table
197 * and that the user asks for all of it at once...
199 res
= kmalloc(65536, GFP_KERNEL
);
205 len
= i2o_parm_issue(dev
, i2o_cmd
, ops
, kcmd
.oplen
, res
, 65536);
213 if (put_user(len
, kcmd
.reslen
))
215 else if (len
> reslen
)
217 else if (copy_to_user(kcmd
.resbuf
, res
, len
))
225 static int i2o_cfg_swdl(unsigned long arg
)
227 struct i2o_sw_xfer kxfer
;
228 struct i2o_sw_xfer __user
*pxfer
= (struct i2o_sw_xfer __user
*)arg
;
229 unsigned char maxfrag
= 0, curfrag
= 1;
230 struct i2o_dma buffer
;
231 struct i2o_message
*msg
;
232 unsigned int status
= 0, swlen
= 0, fragsize
= 8192;
233 struct i2o_controller
*c
;
235 if (copy_from_user(&kxfer
, pxfer
, sizeof(struct i2o_sw_xfer
)))
238 if (get_user(swlen
, kxfer
.swlen
) < 0)
241 if (get_user(maxfrag
, kxfer
.maxfrag
) < 0)
244 if (get_user(curfrag
, kxfer
.curfrag
) < 0)
247 if (curfrag
== maxfrag
)
248 fragsize
= swlen
- (maxfrag
- 1) * 8192;
250 if (!kxfer
.buf
|| !access_ok(VERIFY_READ
, kxfer
.buf
, fragsize
))
253 c
= i2o_find_iop(kxfer
.iop
);
257 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
261 if (i2o_dma_alloc(&c
->pdev
->dev
, &buffer
, fragsize
)) {
266 if (__copy_from_user(buffer
.virt
, kxfer
.buf
, fragsize
)) {
268 i2o_dma_free(&c
->pdev
->dev
, &buffer
);
272 msg
->u
.head
[0] = cpu_to_le32(NINE_WORD_MSG_SIZE
| SGL_OFFSET_7
);
274 cpu_to_le32(I2O_CMD_SW_DOWNLOAD
<< 24 | HOST_TID
<< 12 |
276 msg
->u
.head
[2] = cpu_to_le32(i2o_config_driver
.context
);
277 msg
->u
.head
[3] = cpu_to_le32(0);
279 cpu_to_le32((((u32
) kxfer
.flags
) << 24) | (((u32
) kxfer
.
281 (((u32
) maxfrag
) << 8) | (((u32
) curfrag
)));
282 msg
->body
[1] = cpu_to_le32(swlen
);
283 msg
->body
[2] = cpu_to_le32(kxfer
.sw_id
);
284 msg
->body
[3] = cpu_to_le32(0xD0000000 | fragsize
);
285 msg
->body
[4] = cpu_to_le32(buffer
.phys
);
287 osm_debug("swdl frag %d/%d (size %d)\n", curfrag
, maxfrag
, fragsize
);
288 status
= i2o_msg_post_wait_mem(c
, msg
, 60, &buffer
);
290 if (status
!= -ETIMEDOUT
)
291 i2o_dma_free(&c
->pdev
->dev
, &buffer
);
293 if (status
!= I2O_POST_WAIT_OK
) {
294 // it fails if you try and send frags out of order
295 // and for some yet unknown reasons too
296 osm_info("swdl failed, DetailedStatus = %d\n", status
);
303 static int i2o_cfg_swul(unsigned long arg
)
305 struct i2o_sw_xfer kxfer
;
306 struct i2o_sw_xfer __user
*pxfer
= (struct i2o_sw_xfer __user
*)arg
;
307 unsigned char maxfrag
= 0, curfrag
= 1;
308 struct i2o_dma buffer
;
309 struct i2o_message
*msg
;
310 unsigned int status
= 0, swlen
= 0, fragsize
= 8192;
311 struct i2o_controller
*c
;
314 if (copy_from_user(&kxfer
, pxfer
, sizeof(struct i2o_sw_xfer
)))
317 if (get_user(swlen
, kxfer
.swlen
) < 0)
320 if (get_user(maxfrag
, kxfer
.maxfrag
) < 0)
323 if (get_user(curfrag
, kxfer
.curfrag
) < 0)
326 if (curfrag
== maxfrag
)
327 fragsize
= swlen
- (maxfrag
- 1) * 8192;
332 c
= i2o_find_iop(kxfer
.iop
);
336 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
340 if (i2o_dma_alloc(&c
->pdev
->dev
, &buffer
, fragsize
)) {
345 msg
->u
.head
[0] = cpu_to_le32(NINE_WORD_MSG_SIZE
| SGL_OFFSET_7
);
347 cpu_to_le32(I2O_CMD_SW_UPLOAD
<< 24 | HOST_TID
<< 12 | ADAPTER_TID
);
348 msg
->u
.head
[2] = cpu_to_le32(i2o_config_driver
.context
);
349 msg
->u
.head
[3] = cpu_to_le32(0);
351 cpu_to_le32((u32
) kxfer
.flags
<< 24 | (u32
) kxfer
.
352 sw_type
<< 16 | (u32
) maxfrag
<< 8 | (u32
) curfrag
);
353 msg
->body
[1] = cpu_to_le32(swlen
);
354 msg
->body
[2] = cpu_to_le32(kxfer
.sw_id
);
355 msg
->body
[3] = cpu_to_le32(0xD0000000 | fragsize
);
356 msg
->body
[4] = cpu_to_le32(buffer
.phys
);
358 osm_debug("swul frag %d/%d (size %d)\n", curfrag
, maxfrag
, fragsize
);
359 status
= i2o_msg_post_wait_mem(c
, msg
, 60, &buffer
);
361 if (status
!= I2O_POST_WAIT_OK
) {
362 if (status
!= -ETIMEDOUT
)
363 i2o_dma_free(&c
->pdev
->dev
, &buffer
);
365 osm_info("swul failed, DetailedStatus = %d\n", status
);
369 if (copy_to_user(kxfer
.buf
, buffer
.virt
, fragsize
))
372 i2o_dma_free(&c
->pdev
->dev
, &buffer
);
377 static int i2o_cfg_swdel(unsigned long arg
)
379 struct i2o_controller
*c
;
380 struct i2o_sw_xfer kxfer
;
381 struct i2o_sw_xfer __user
*pxfer
= (struct i2o_sw_xfer __user
*)arg
;
382 struct i2o_message
*msg
;
386 if (copy_from_user(&kxfer
, pxfer
, sizeof(struct i2o_sw_xfer
)))
389 if (get_user(swlen
, kxfer
.swlen
) < 0)
392 c
= i2o_find_iop(kxfer
.iop
);
396 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
400 msg
->u
.head
[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE
| SGL_OFFSET_0
);
402 cpu_to_le32(I2O_CMD_SW_REMOVE
<< 24 | HOST_TID
<< 12 | ADAPTER_TID
);
403 msg
->u
.head
[2] = cpu_to_le32(i2o_config_driver
.context
);
404 msg
->u
.head
[3] = cpu_to_le32(0);
406 cpu_to_le32((u32
) kxfer
.flags
<< 24 | (u32
) kxfer
.sw_type
<< 16);
407 msg
->body
[1] = cpu_to_le32(swlen
);
408 msg
->body
[2] = cpu_to_le32(kxfer
.sw_id
);
410 token
= i2o_msg_post_wait(c
, msg
, 10);
412 if (token
!= I2O_POST_WAIT_OK
) {
413 osm_info("swdel failed, DetailedStatus = %d\n", token
);
420 static int i2o_cfg_validate(unsigned long arg
)
424 struct i2o_message
*msg
;
425 struct i2o_controller
*c
;
427 c
= i2o_find_iop(iop
);
431 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
435 msg
->u
.head
[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE
| SGL_OFFSET_0
);
437 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE
<< 24 | HOST_TID
<< 12 | iop
);
438 msg
->u
.head
[2] = cpu_to_le32(i2o_config_driver
.context
);
439 msg
->u
.head
[3] = cpu_to_le32(0);
441 token
= i2o_msg_post_wait(c
, msg
, 10);
443 if (token
!= I2O_POST_WAIT_OK
) {
444 osm_info("Can't validate configuration, ErrorStatus = %d\n",
452 static int i2o_cfg_evt_reg(unsigned long arg
, struct file
*fp
)
454 struct i2o_message
*msg
;
455 struct i2o_evt_id __user
*pdesc
= (struct i2o_evt_id __user
*)arg
;
456 struct i2o_evt_id kdesc
;
457 struct i2o_controller
*c
;
458 struct i2o_device
*d
;
460 if (copy_from_user(&kdesc
, pdesc
, sizeof(struct i2o_evt_id
)))
464 c
= i2o_find_iop(kdesc
.iop
);
469 d
= i2o_iop_find_device(c
, kdesc
.tid
);
473 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
477 msg
->u
.head
[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE
| SGL_OFFSET_0
);
479 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER
<< 24 | HOST_TID
<< 12 |
481 msg
->u
.head
[2] = cpu_to_le32(i2o_config_driver
.context
);
482 msg
->u
.head
[3] = cpu_to_le32(i2o_cntxt_list_add(c
, fp
->private_data
));
483 msg
->body
[0] = cpu_to_le32(kdesc
.evt_mask
);
485 i2o_msg_post(c
, msg
);
490 static int i2o_cfg_evt_get(unsigned long arg
, struct file
*fp
)
492 struct i2o_cfg_info
*p
= NULL
;
493 struct i2o_evt_get __user
*uget
= (struct i2o_evt_get __user
*)arg
;
494 struct i2o_evt_get kget
;
497 for (p
= open_files
; p
; p
= p
->next
)
498 if (p
->q_id
== (ulong
) fp
->private_data
)
504 memcpy(&kget
.info
, &p
->event_q
[p
->q_out
], sizeof(struct i2o_evt_info
));
505 MODINC(p
->q_out
, I2O_EVT_Q_LEN
);
506 spin_lock_irqsave(&i2o_config_lock
, flags
);
508 kget
.pending
= p
->q_len
;
509 kget
.lost
= p
->q_lost
;
510 spin_unlock_irqrestore(&i2o_config_lock
, flags
);
512 if (copy_to_user(uget
, &kget
, sizeof(struct i2o_evt_get
)))
518 static int i2o_cfg_passthru32(struct file
*file
, unsigned cmnd
,
521 struct i2o_cmd_passthru32 __user
*cmd
;
522 struct i2o_controller
*c
;
523 u32 __user
*user_msg
;
525 u32 __user
*user_reply
= NULL
;
529 struct i2o_dma sg_list
[SG_TABLESIZE
];
534 i2o_status_block
*sb
;
535 struct i2o_message
*msg
;
538 cmd
= (struct i2o_cmd_passthru32 __user
*)arg
;
540 if (get_user(iop
, &cmd
->iop
) || get_user(i
, &cmd
->msg
))
543 user_msg
= compat_ptr(i
);
545 c
= i2o_find_iop(iop
);
547 osm_debug("controller %d not found\n", iop
);
551 sb
= c
->status_block
.virt
;
553 if (get_user(size
, &user_msg
[0])) {
554 osm_warn("unable to get size!\n");
559 if (size
> sb
->inbound_frame_size
) {
560 osm_warn("size of message > inbound_frame_size");
564 user_reply
= &user_msg
[size
];
566 size
<<= 2; // Convert to bytes
568 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
573 /* Copy in the user's I2O command */
574 if (copy_from_user(msg
, user_msg
, size
)) {
575 osm_warn("unable to copy user message\n");
578 i2o_dump_message(msg
);
580 if (get_user(reply_size
, &user_reply
[0]) < 0)
587 reply
= kzalloc(reply_size
, GFP_KERNEL
);
589 printk(KERN_WARNING
"%s: Could not allocate reply buffer\n",
594 sg_offset
= (msg
->u
.head
[0] >> 4) & 0x0f;
596 memset(sg_list
, 0, sizeof(sg_list
[0]) * SG_TABLESIZE
);
598 struct sg_simple_element
*sg
;
600 if (sg_offset
* 4 >= size
) {
605 sg
= (struct sg_simple_element
*)((&msg
->u
.head
[0]) +
608 (size
- sg_offset
* 4) / sizeof(struct sg_simple_element
);
609 if (sg_count
> SG_TABLESIZE
) {
610 printk(KERN_DEBUG
"%s:IOCTL SG List too large (%u)\n",
616 for (i
= 0; i
< sg_count
; i
++) {
620 if (!(sg
[i
].flag_count
& 0x10000000
621 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
623 "%s:Bad SG element %d - not simple (%x)\n",
624 c
->name
, i
, sg
[i
].flag_count
);
628 sg_size
= sg
[i
].flag_count
& 0xffffff;
629 p
= &(sg_list
[sg_index
]);
630 /* Allocate memory for the transfer */
631 if (i2o_dma_alloc(&c
->pdev
->dev
, p
, sg_size
)) {
633 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
634 c
->name
, sg_size
, i
, sg_count
);
636 goto sg_list_cleanup
;
639 /* Copy in the user's SG buffer if necessary */
641 flag_count
& 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
645 (void __user
*)(unsigned long)sg
[i
].
646 addr_bus
, sg_size
)) {
648 "%s: Could not copy SG buf %d FROM user\n",
651 goto sg_list_cleanup
;
655 sg
[i
].addr_bus
= (u32
) p
->phys
;
659 rcode
= i2o_msg_post_wait(c
, msg
, 60);
662 reply
[4] = ((u32
) rcode
) << 24;
663 goto sg_list_cleanup
;
667 u32 rmsg
[I2O_OUTBOUND_MSG_FRAME_SIZE
];
668 /* Copy back the Scatter Gather buffers back to user space */
671 struct sg_simple_element
*sg
;
674 // re-acquire the original message to handle correctly the sg copy operation
675 memset(&rmsg
, 0, I2O_OUTBOUND_MSG_FRAME_SIZE
* 4);
676 // get user msg size in u32s
677 if (get_user(size
, &user_msg
[0])) {
679 goto sg_list_cleanup
;
683 /* Copy in the user's I2O command */
684 if (copy_from_user(rmsg
, user_msg
, size
)) {
686 goto sg_list_cleanup
;
689 (size
- sg_offset
* 4) / sizeof(struct sg_simple_element
);
692 sg
= (struct sg_simple_element
*)(rmsg
+ sg_offset
);
693 for (j
= 0; j
< sg_count
; j
++) {
694 /* Copy out the SG list to user's buffer if necessary */
697 flag_count
& 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
698 sg_size
= sg
[j
].flag_count
& 0xffffff;
701 ((void __user
*)(u64
) sg
[j
].addr_bus
,
702 sg_list
[j
].virt
, sg_size
)) {
704 "%s: Could not copy %p TO user %x\n",
705 c
->name
, sg_list
[j
].virt
,
708 goto sg_list_cleanup
;
715 /* Copy back the reply to user space */
717 // we wrote our own values for context - now restore the user supplied ones
718 if (copy_from_user(reply
+ 2, user_msg
+ 2, sizeof(u32
) * 2)) {
720 "%s: Could not copy message context FROM user\n",
724 if (copy_to_user(user_reply
, reply
, reply_size
)) {
726 "%s: Could not copy reply TO user\n", c
->name
);
730 for (i
= 0; i
< sg_index
; i
++)
731 i2o_dma_free(&c
->pdev
->dev
, &sg_list
[i
]);
741 static long i2o_cfg_compat_ioctl(struct file
*file
, unsigned cmd
,
745 mutex_lock(&i2o_cfg_mutex
);
748 ret
= i2o_cfg_ioctl(file
, cmd
, arg
);
751 ret
= i2o_cfg_passthru32(file
, cmd
, arg
);
757 mutex_unlock(&i2o_cfg_mutex
);
763 #ifdef CONFIG_I2O_EXT_ADAPTEC
764 static int i2o_cfg_passthru(unsigned long arg
)
766 struct i2o_cmd_passthru __user
*cmd
=
767 (struct i2o_cmd_passthru __user
*)arg
;
768 struct i2o_controller
*c
;
769 u32 __user
*user_msg
;
771 u32 __user
*user_reply
= NULL
;
775 struct i2o_dma sg_list
[SG_TABLESIZE
];
780 i2o_status_block
*sb
;
781 struct i2o_message
*msg
;
784 if (get_user(iop
, &cmd
->iop
) || get_user(user_msg
, &cmd
->msg
))
787 c
= i2o_find_iop(iop
);
789 osm_warn("controller %d not found\n", iop
);
793 sb
= c
->status_block
.virt
;
795 if (get_user(size
, &user_msg
[0]))
799 if (size
> sb
->inbound_frame_size
) {
800 osm_warn("size of message > inbound_frame_size");
804 user_reply
= &user_msg
[size
];
806 size
<<= 2; // Convert to bytes
808 msg
= i2o_msg_get_wait(c
, I2O_TIMEOUT_MESSAGE_GET
);
813 /* Copy in the user's I2O command */
814 if (copy_from_user(msg
, user_msg
, size
))
817 if (get_user(reply_size
, &user_reply
[0]) < 0)
823 reply
= kzalloc(reply_size
, GFP_KERNEL
);
825 printk(KERN_WARNING
"%s: Could not allocate reply buffer\n",
831 sg_offset
= (msg
->u
.head
[0] >> 4) & 0x0f;
833 memset(sg_list
, 0, sizeof(sg_list
[0]) * SG_TABLESIZE
);
835 struct sg_simple_element
*sg
;
838 if (sg_offset
* 4 >= size
) {
843 sg
= (struct sg_simple_element
*)((&msg
->u
.head
[0]) +
846 (size
- sg_offset
* 4) / sizeof(struct sg_simple_element
);
847 if (sg_count
> SG_TABLESIZE
) {
848 printk(KERN_DEBUG
"%s:IOCTL SG List too large (%u)\n",
854 for (i
= 0; i
< sg_count
; i
++) {
857 if (!(sg
[i
].flag_count
& 0x10000000
858 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
860 "%s:Bad SG element %d - not simple (%x)\n",
861 c
->name
, i
, sg
[i
].flag_count
);
863 goto sg_list_cleanup
;
865 sg_size
= sg
[i
].flag_count
& 0xffffff;
866 p
= &(sg_list
[sg_index
]);
867 if (i2o_dma_alloc(&c
->pdev
->dev
, p
, sg_size
)) {
868 /* Allocate memory for the transfer */
870 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
871 c
->name
, sg_size
, i
, sg_count
);
873 goto sg_list_cleanup
;
876 /* Copy in the user's SG buffer if necessary */
878 flag_count
& 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
881 (p
->virt
, (void __user
*)sg
[i
].addr_bus
,
884 "%s: Could not copy SG buf %d FROM user\n",
887 goto sg_list_cleanup
;
890 sg
[i
].addr_bus
= p
->phys
;
894 rcode
= i2o_msg_post_wait(c
, msg
, 60);
897 reply
[4] = ((u32
) rcode
) << 24;
898 goto sg_list_cleanup
;
902 u32 rmsg
[I2O_OUTBOUND_MSG_FRAME_SIZE
];
903 /* Copy back the Scatter Gather buffers back to user space */
906 struct sg_simple_element
*sg
;
909 // re-acquire the original message to handle correctly the sg copy operation
910 memset(&rmsg
, 0, I2O_OUTBOUND_MSG_FRAME_SIZE
* 4);
911 // get user msg size in u32s
912 if (get_user(size
, &user_msg
[0])) {
914 goto sg_list_cleanup
;
918 /* Copy in the user's I2O command */
919 if (copy_from_user(rmsg
, user_msg
, size
)) {
921 goto sg_list_cleanup
;
924 (size
- sg_offset
* 4) / sizeof(struct sg_simple_element
);
927 sg
= (struct sg_simple_element
*)(rmsg
+ sg_offset
);
928 for (j
= 0; j
< sg_count
; j
++) {
929 /* Copy out the SG list to user's buffer if necessary */
932 flag_count
& 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
933 sg_size
= sg
[j
].flag_count
& 0xffffff;
936 ((void __user
*)sg
[j
].addr_bus
, sg_list
[j
].virt
,
939 "%s: Could not copy %p TO user %x\n",
940 c
->name
, sg_list
[j
].virt
,
943 goto sg_list_cleanup
;
950 /* Copy back the reply to user space */
952 // we wrote our own values for context - now restore the user supplied ones
953 if (copy_from_user(reply
+ 2, user_msg
+ 2, sizeof(u32
) * 2)) {
955 "%s: Could not copy message context FROM user\n",
959 if (copy_to_user(user_reply
, reply
, reply_size
)) {
961 "%s: Could not copy reply TO user\n", c
->name
);
966 for (i
= 0; i
< sg_index
; i
++)
967 i2o_dma_free(&c
->pdev
->dev
, &sg_list
[i
]);
981 static long i2o_cfg_ioctl(struct file
*fp
, unsigned int cmd
, unsigned long arg
)
985 mutex_lock(&i2o_cfg_mutex
);
988 ret
= i2o_cfg_getiops(arg
);
992 ret
= i2o_cfg_gethrt(arg
);
996 ret
= i2o_cfg_getlct(arg
);
1000 ret
= i2o_cfg_parms(arg
, I2OPARMSET
);
1004 ret
= i2o_cfg_parms(arg
, I2OPARMGET
);
1008 ret
= i2o_cfg_swdl(arg
);
1012 ret
= i2o_cfg_swul(arg
);
1016 ret
= i2o_cfg_swdel(arg
);
1020 ret
= i2o_cfg_validate(arg
);
1024 ret
= i2o_cfg_evt_reg(arg
, fp
);
1028 ret
= i2o_cfg_evt_get(arg
, fp
);
1031 #ifdef CONFIG_I2O_EXT_ADAPTEC
1033 ret
= i2o_cfg_passthru(arg
);
1038 osm_debug("unknown ioctl called!\n");
1041 mutex_unlock(&i2o_cfg_mutex
);
1045 static int cfg_open(struct inode
*inode
, struct file
*file
)
1047 struct i2o_cfg_info
*tmp
= kmalloc(sizeof(struct i2o_cfg_info
),
1049 unsigned long flags
;
1054 mutex_lock(&i2o_cfg_mutex
);
1055 file
->private_data
= (void *)(i2o_cfg_info_id
++);
1058 tmp
->q_id
= (ulong
) file
->private_data
;
1063 tmp
->next
= open_files
;
1065 spin_lock_irqsave(&i2o_config_lock
, flags
);
1067 spin_unlock_irqrestore(&i2o_config_lock
, flags
);
1068 mutex_unlock(&i2o_cfg_mutex
);
1073 static int cfg_fasync(int fd
, struct file
*fp
, int on
)
1075 ulong id
= (ulong
) fp
->private_data
;
1076 struct i2o_cfg_info
*p
;
1079 mutex_lock(&i2o_cfg_mutex
);
1080 for (p
= open_files
; p
; p
= p
->next
)
1085 ret
= fasync_helper(fd
, fp
, on
, &p
->fasync
);
1086 mutex_unlock(&i2o_cfg_mutex
);
1090 static int cfg_release(struct inode
*inode
, struct file
*file
)
1092 ulong id
= (ulong
) file
->private_data
;
1093 struct i2o_cfg_info
*p
, **q
;
1094 unsigned long flags
;
1096 mutex_lock(&i2o_cfg_mutex
);
1097 spin_lock_irqsave(&i2o_config_lock
, flags
);
1098 for (q
= &open_files
; (p
= *q
) != NULL
; q
= &p
->next
) {
1099 if (p
->q_id
== id
) {
1105 spin_unlock_irqrestore(&i2o_config_lock
, flags
);
1106 mutex_unlock(&i2o_cfg_mutex
);
1111 static const struct file_operations config_fops
= {
1112 .owner
= THIS_MODULE
,
1113 .llseek
= no_llseek
,
1114 .unlocked_ioctl
= i2o_cfg_ioctl
,
1115 #ifdef CONFIG_COMPAT
1116 .compat_ioctl
= i2o_cfg_compat_ioctl
,
1119 .release
= cfg_release
,
1120 .fasync
= cfg_fasync
,
1123 static struct miscdevice i2o_miscdev
= {
1129 static int __init
i2o_config_old_init(void)
1131 spin_lock_init(&i2o_config_lock
);
1133 if (misc_register(&i2o_miscdev
) < 0) {
1134 osm_err("can't register device.\n");
1141 static void i2o_config_old_exit(void)
1143 misc_deregister(&i2o_miscdev
);
1146 MODULE_AUTHOR("Red Hat Software");