2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
10 #include <interrupt.h>
19 * Helper function to make sure the io_op has everything set right
21 static int __verify_io_op(struct io_op
*ioop
)
23 FIXME("check everything that makes sense to check");
27 static void __reset_reserved_fields(struct io_op
*ioop
)
29 ioop
->orb
.__zero1
= 0;
31 ioop
->orb
.__reserved1
= 0;
32 ioop
->orb
.__reserved2
= 0;
33 ioop
->orb
.__reserved3
= 0;
34 ioop
->orb
.__reserved4
= 0;
35 ioop
->orb
.__reserved5
= 0;
36 ioop
->orb
.__reserved6
= 0;
39 /* NOTE: assumes dev->q_lock is held */
40 static void __submit_io(struct device
*dev
)
48 if (list_empty(&dev
->q_out
))
51 ioop
= list_entry(dev
->q_out
.next
, struct io_op
, list
);
53 err
= start_sch(dev
->sch
, &ioop
->orb
);
55 list_del(&ioop
->list
);
62 * Submit an I/O request to a subchannel, and set up everything needed to
63 * handle the operation
65 int submit_io(struct device
*dev
, struct io_op
*ioop
, int flags
)
67 static atomic_t op_id_counter
;
68 unsigned long intmask
;
71 err
= __verify_io_op(ioop
);
75 /* make sure all reserved fields have the right values */
76 __reset_reserved_fields(ioop
);
79 ioop
->orb
.param
= atomic_inc_return(&op_id_counter
);
80 atomic_set(&ioop
->done
, 0);
82 /* add it to the list of ops */
83 spin_lock_intsave(&dev
->q_lock
, &intmask
);
84 list_add_tail(&ioop
->list
, &dev
->q_out
);
86 __submit_io(dev
); /* try to submit an IO right now */
87 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
89 if (flags
& CAN_LOOP
) {
90 while(!atomic_read(&ioop
->done
))
92 } else if (flags
& CAN_SLEEP
) {
93 while(!atomic_read(&ioop
->done
))
101 * Initialize the channel I/O subsystem
109 /* enable all I/O interrupt classes */
122 static int default_io_handler(struct device
*dev
, struct io_op
*ioop
, struct irb
*irb
)
127 if (irb
->scsw
.dev_status
& 0x02) {
128 FIXME("we should bail");
129 ioop
->err
= -EUCHECK
;
132 /* Device End is set, we're done */
133 if (irb
->scsw
.dev_status
& 0x04)
139 static void __cpu_initiated_io(struct device
*dev
, struct io_op
*ioop
, struct irb
*irb
)
141 unsigned long intmask
;
143 ioop
->err
= test_sch(dev
->sch
, irb
);
145 if (!ioop
->err
&& ioop
->handler
)
146 ioop
->handler(dev
, ioop
, irb
);
148 default_io_handler(dev
, ioop
, irb
);
151 * We can do this, because the test_sch function sets ->err, and
152 * therefore regardless of ->handler being defined, ->err will have
155 if (ioop
->err
== -EAGAIN
)
156 return; /* leave handler registered */
158 /* ...and remove it form the list */
159 spin_lock_intsave(&dev
->q_lock
, &intmask
);
162 __submit_io(dev
); /* try to submit another IO */
163 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
165 /* flag io_op as done... */
166 atomic_set(&ioop
->done
, 1);
168 /* call the destructor if there is one */
170 ioop
->dtor(dev
, ioop
);
173 static void __dev_initiated_io(struct device
*dev
, struct irb
*irb
)
178 * I/O Interrupt handler (C portion)
180 void __io_int_handler(void)
182 unsigned long intmask
;
187 dev
= find_device_by_sch(IO_INT_CODE
->ssid
);
190 spin_lock_intsave(&dev
->q_lock
, &intmask
);
192 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
194 if (ioop
&& ioop
->orb
.param
== IO_INT_CODE
->param
&&
195 dev
->sch
== IO_INT_CODE
->ssid
) {
197 * CPU-initiated operation
200 __cpu_initiated_io(dev
, ioop
, &irb
);
206 * device-initiated operation
208 BUG_ON(test_sch(dev
->sch
, &irb
));
210 atomic_inc(&dev
->attention
);
212 if (dev
->dev
->interrupt
)
213 dev
->dev
->interrupt(dev
, &irb
);
215 __dev_initiated_io(dev
, &irb
);