2 * (C) Copyright 2007-2019 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
10 #include <interrupt.h>
19 * Helper function to make sure the io_op has everything set right
21 static int __verify_io_op(struct io_op
*ioop
)
23 FIXME("check everything that makes sense to check");
27 static void __reset_reserved_fields(struct io_op
*ioop
)
29 ioop
->orb
.__zero1
= 0;
31 ioop
->orb
.__reserved1
= 0;
32 ioop
->orb
.__reserved2
= 0;
33 ioop
->orb
.__reserved3
= 0;
34 ioop
->orb
.__reserved4
= 0;
35 ioop
->orb
.__reserved5
= 0;
36 ioop
->orb
.__reserved6
= 0;
39 /* NOTE: assumes dev->q_lock is held */
40 static void __submit_io(struct device
*dev
)
48 if (list_empty(&dev
->q_out
))
51 ioop
= list_entry(dev
->q_out
.next
, struct io_op
, list
);
53 err
= start_sch(dev
->sch
, &ioop
->orb
);
55 list_del(&ioop
->list
);
62 * Submit an I/O request to a subchannel, and set up everything needed to
63 * handle the operation
65 int submit_io(struct device
*dev
, struct io_op
*ioop
, int flags
)
67 static atomic_t op_id_counter
;
68 unsigned long intmask
;
71 err
= __verify_io_op(ioop
);
75 /* make sure all reserved fields have the right values */
76 __reset_reserved_fields(ioop
);
79 ioop
->orb
.param
= atomic_inc_return(&op_id_counter
);
80 atomic_set(&ioop
->done
, 0);
82 /* add it to the list of ops */
83 spin_lock_intsave(&dev
->q_lock
, &intmask
);
84 list_add_tail(&ioop
->list
, &dev
->q_out
);
86 __submit_io(dev
); /* try to submit an IO right now */
87 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
89 if (flags
& CAN_LOOP
) {
90 while(!atomic_read(&ioop
->done
))
92 } else if (flags
& CAN_SLEEP
) {
93 while(!atomic_read(&ioop
->done
))
101 * Initialize the channel I/O subsystem
105 /* enable all I/O interrupt classes */
106 enable_io_int_classes(0xff);
109 static int default_io_handler(struct device
*dev
, struct io_op
*ioop
, struct irb
*irb
)
114 if (irb
->scsw
.dev_status
& 0x02) {
115 FIXME("we should bail");
116 ioop
->err
= -EUCHECK
;
119 /* Device End is set, we're done */
120 if (irb
->scsw
.dev_status
& 0x04)
126 static void __cpu_initiated_io(struct device
*dev
, struct io_op
*ioop
, struct irb
*irb
)
128 unsigned long intmask
;
130 ioop
->err
= test_sch(dev
->sch
, irb
);
132 if (!ioop
->err
&& ioop
->handler
)
133 ioop
->handler(dev
, ioop
, irb
);
135 default_io_handler(dev
, ioop
, irb
);
138 * We can do this, because the test_sch function sets ->err, and
139 * therefore regardless of ->handler being defined, ->err will have
142 if (ioop
->err
== -EAGAIN
)
143 return; /* leave handler registered */
145 /* ...and remove it form the list */
146 spin_lock_intsave(&dev
->q_lock
, &intmask
);
149 __submit_io(dev
); /* try to submit another IO */
150 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
152 /* flag io_op as done... */
153 atomic_set(&ioop
->done
, 1);
155 /* call the destructor if there is one */
157 ioop
->dtor(dev
, ioop
);
160 static void __dev_initiated_io(struct device
*dev
, struct irb
*irb
)
165 * I/O Interrupt handler (C portion)
167 void __io_int_handler(void)
169 unsigned long intmask
;
174 dev
= find_device_by_sch(IO_INT_CODE
->ssid
);
177 spin_lock_intsave(&dev
->q_lock
, &intmask
);
179 spin_unlock_intrestore(&dev
->q_lock
, intmask
);
181 if (ioop
&& ioop
->orb
.param
== IO_INT_CODE
->param
&&
182 dev
->sch
== IO_INT_CODE
->ssid
) {
184 * CPU-initiated operation
187 __cpu_initiated_io(dev
, ioop
, &irb
);
193 * device-initiated operation
195 BUG_ON(test_sch(dev
->sch
, &irb
));
197 atomic_inc(&dev
->attention
);
199 if (dev
->dev
->interrupt
)
200 dev
->dev
->interrupt(dev
, &irb
);
202 __dev_initiated_io(dev
, &irb
);