loader: remove shouting from ORB's variable name
[hvf.git] / cp / nucleus / io.c
blob50f938f84040fde0e7b077924d6b5cfc6b42a68f
1 /*
2 * (C) Copyright 2007-2019 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
5 * details.
6 */
8 #include <channel.h>
9 #include <io.h>
10 #include <interrupt.h>
11 #include <device.h>
12 #include <sched.h>
13 #include <atomic.h>
14 #include <spinlock.h>
15 #include <buddy.h>
16 #include <sched.h>
19 * Helper function to make sure the io_op has everything set right
21 static int __verify_io_op(struct io_op *ioop)
23 FIXME("check everything that makes sense to check");
24 return 0;
27 static void __reset_reserved_fields(struct io_op *ioop)
29 ioop->orb.__zero1 = 0;
31 ioop->orb.__reserved1 = 0;
32 ioop->orb.__reserved2 = 0;
33 ioop->orb.__reserved3 = 0;
34 ioop->orb.__reserved4 = 0;
35 ioop->orb.__reserved5 = 0;
36 ioop->orb.__reserved6 = 0;
39 /* NOTE: assumes dev->q_lock is held */
40 static void __submit_io(struct device *dev)
42 struct io_op *ioop;
43 int err;
45 if (dev->q_cur)
46 return;
48 if (list_empty(&dev->q_out))
49 return;
51 ioop = list_entry(dev->q_out.next, struct io_op, list);
53 err = start_sch(dev->sch, &ioop->orb);
54 if (!err) {
55 list_del(&ioop->list);
56 dev->q_cur = ioop;
57 } else
58 ioop->err = err;
62 * Submit an I/O request to a subchannel, and set up everything needed to
63 * handle the operation
65 int submit_io(struct device *dev, struct io_op *ioop, int flags)
67 static atomic_t op_id_counter;
68 unsigned long intmask;
69 int err = -EBUSY;
71 err = __verify_io_op(ioop);
72 if (err)
73 return 0;
75 /* make sure all reserved fields have the right values */
76 __reset_reserved_fields(ioop);
78 ioop->err = 0;
79 ioop->orb.param = atomic_inc_return(&op_id_counter);
80 atomic_set(&ioop->done, 0);
82 /* add it to the list of ops */
83 spin_lock_intsave(&dev->q_lock, &intmask);
84 list_add_tail(&ioop->list, &dev->q_out);
86 __submit_io(dev); /* try to submit an IO right now */
87 spin_unlock_intrestore(&dev->q_lock, intmask);
89 if (flags & CAN_LOOP) {
90 while(!atomic_read(&ioop->done))
92 } else if (flags & CAN_SLEEP) {
93 while(!atomic_read(&ioop->done))
94 schedule();
97 return 0;
101 * Initialize the channel I/O subsystem
103 void init_io(void)
105 /* enable all I/O interrupt classes */
106 enable_io_int_classes(0xff);
109 static int default_io_handler(struct device *dev, struct io_op *ioop, struct irb *irb)
111 ioop->err = -EAGAIN;
113 /* Unit check? */
114 if (irb->scsw.dev_status & 0x02) {
115 FIXME("we should bail");
116 ioop->err = -EUCHECK;
119 /* Device End is set, we're done */
120 if (irb->scsw.dev_status & 0x04)
121 ioop->err = 0;
123 return 0;
126 static void __cpu_initiated_io(struct device *dev, struct io_op *ioop, struct irb *irb)
128 unsigned long intmask;
130 ioop->err = test_sch(dev->sch, irb);
132 if (!ioop->err && ioop->handler)
133 ioop->handler(dev, ioop, irb);
134 else if (!ioop->err)
135 default_io_handler(dev, ioop, irb);
138 * We can do this, because the test_sch function sets ->err, and
139 * therefore regardless of ->handler being defined, ->err will have
140 * a reasonable value
142 if (ioop->err == -EAGAIN)
143 return; /* leave handler registered */
145 /* ...and remove it form the list */
146 spin_lock_intsave(&dev->q_lock, &intmask);
147 dev->q_cur = NULL;
149 __submit_io(dev); /* try to submit another IO */
150 spin_unlock_intrestore(&dev->q_lock, intmask);
152 /* flag io_op as done... */
153 atomic_set(&ioop->done, 1);
155 /* call the destructor if there is one */
156 if (ioop->dtor)
157 ioop->dtor(dev, ioop);
160 static void __dev_initiated_io(struct device *dev, struct irb *irb)
165 * I/O Interrupt handler (C portion)
167 void __io_int_handler(void)
169 unsigned long intmask;
170 struct io_op *ioop;
171 struct device *dev;
172 struct irb irb;
174 dev = find_device_by_sch(IO_INT_CODE->ssid);
175 BUG_ON(IS_ERR(dev));
177 spin_lock_intsave(&dev->q_lock, &intmask);
178 ioop = dev->q_cur;
179 spin_unlock_intrestore(&dev->q_lock, intmask);
181 if (ioop && ioop->orb.param == IO_INT_CODE->param &&
182 dev->sch == IO_INT_CODE->ssid) {
184 * CPU-initiated operation
187 __cpu_initiated_io(dev, ioop, &irb);
188 dev_put(dev);
189 return;
193 * device-initiated operation
195 BUG_ON(test_sch(dev->sch, &irb));
197 atomic_inc(&dev->attention);
199 if (dev->dev->interrupt)
200 dev->dev->interrupt(dev, &irb);
201 else
202 __dev_initiated_io(dev, &irb);
203 dev_put(dev);