cp: allow prefixing all console messages with the guest name
[hvf.git] / cp / nucleus / io.c
blobf75923352eb850c6e7e5a816383a3a42de076031
1 /*
2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
5 * details.
6 */
8 #include <channel.h>
9 #include <io.h>
10 #include <interrupt.h>
11 #include <device.h>
12 #include <sched.h>
13 #include <atomic.h>
14 #include <spinlock.h>
15 #include <buddy.h>
16 #include <sched.h>
19 * Helper function to make sure the io_op has everything set right
21 static int __verify_io_op(struct io_op *ioop)
23 FIXME("check everything that makes sense to check");
24 return 0;
27 static void __reset_reserved_fields(struct io_op *ioop)
29 ioop->orb.__zero1 = 0;
31 ioop->orb.__reserved1 = 0;
32 ioop->orb.__reserved2 = 0;
33 ioop->orb.__reserved3 = 0;
34 ioop->orb.__reserved4 = 0;
35 ioop->orb.__reserved5 = 0;
36 ioop->orb.__reserved6 = 0;
39 /* NOTE: assumes dev->q_lock is held */
40 static void __submit_io(struct device *dev)
42 struct io_op *ioop;
43 int err;
45 if (dev->q_cur)
46 return;
48 if (list_empty(&dev->q_out))
49 return;
51 ioop = list_entry(dev->q_out.next, struct io_op, list);
53 err = start_sch(dev->sch, &ioop->orb);
54 if (!err) {
55 list_del(&ioop->list);
56 dev->q_cur = ioop;
57 } else
58 ioop->err = err;
62 * Submit an I/O request to a subchannel, and set up everything needed to
63 * handle the operation
65 int submit_io(struct device *dev, struct io_op *ioop, int flags)
67 static atomic_t op_id_counter;
68 unsigned long intmask;
69 int err = -EBUSY;
71 err = __verify_io_op(ioop);
72 if (err)
73 return 0;
75 /* make sure all reserved fields have the right values */
76 __reset_reserved_fields(ioop);
78 ioop->err = 0;
79 ioop->orb.param = atomic_inc_return(&op_id_counter);
80 atomic_set(&ioop->done, 0);
82 /* add it to the list of ops */
83 spin_lock_intsave(&dev->q_lock, &intmask);
84 list_add_tail(&ioop->list, &dev->q_out);
86 __submit_io(dev); /* try to submit an IO right now */
87 spin_unlock_intrestore(&dev->q_lock, intmask);
89 if (flags & CAN_LOOP) {
90 while(!atomic_read(&ioop->done))
92 } else if (flags & CAN_SLEEP) {
93 while(!atomic_read(&ioop->done))
94 schedule();
97 return 0;
101 * Initialize the channel I/O subsystem
103 void init_io(void)
105 u64 cr6;
107 cr6 = get_cr(6);
109 /* enable all I/O interrupt classes */
110 cr6 |= BIT64(32);
111 cr6 |= BIT64(33);
112 cr6 |= BIT64(34);
113 cr6 |= BIT64(35);
114 cr6 |= BIT64(36);
115 cr6 |= BIT64(37);
116 cr6 |= BIT64(38);
117 cr6 |= BIT64(39);
119 set_cr(6, cr6);
122 static int default_io_handler(struct device *dev, struct io_op *ioop, struct irb *irb)
124 ioop->err = -EAGAIN;
126 /* Unit check? */
127 if (irb->scsw.dev_status & 0x02) {
128 FIXME("we should bail");
129 ioop->err = -EUCHECK;
132 /* Device End is set, we're done */
133 if (irb->scsw.dev_status & 0x04)
134 ioop->err = 0;
136 return 0;
139 static void __cpu_initiated_io(struct device *dev, struct io_op *ioop, struct irb *irb)
141 unsigned long intmask;
143 ioop->err = test_sch(dev->sch, irb);
145 if (!ioop->err && ioop->handler)
146 ioop->handler(dev, ioop, irb);
147 else if (!ioop->err)
148 default_io_handler(dev, ioop, irb);
151 * We can do this, because the test_sch function sets ->err, and
152 * therefore regardless of ->handler being defined, ->err will have
153 * a reasonable value
155 if (ioop->err == -EAGAIN)
156 return; /* leave handler registered */
158 /* ...and remove it form the list */
159 spin_lock_intsave(&dev->q_lock, &intmask);
160 dev->q_cur = NULL;
162 __submit_io(dev); /* try to submit another IO */
163 spin_unlock_intrestore(&dev->q_lock, intmask);
165 /* flag io_op as done... */
166 atomic_set(&ioop->done, 1);
168 /* call the destructor if there is one */
169 if (ioop->dtor)
170 ioop->dtor(dev, ioop);
173 static void __dev_initiated_io(struct device *dev, struct irb *irb)
178 * I/O Interrupt handler (C portion)
180 void __io_int_handler(void)
182 unsigned long intmask;
183 struct io_op *ioop;
184 struct device *dev;
185 struct irb irb;
187 dev = find_device_by_sch(IO_INT_CODE->ssid);
188 BUG_ON(IS_ERR(dev));
190 spin_lock_intsave(&dev->q_lock, &intmask);
191 ioop = dev->q_cur;
192 spin_unlock_intrestore(&dev->q_lock, intmask);
194 if (ioop && ioop->orb.param == IO_INT_CODE->param &&
195 dev->sch == IO_INT_CODE->ssid) {
197 * CPU-initiated operation
200 __cpu_initiated_io(dev, ioop, &irb);
201 dev_put(dev);
202 return;
206 * device-initiated operation
208 BUG_ON(test_sch(dev->sch, &irb));
210 atomic_inc(&dev->attention);
212 if (dev->dev->interrupt)
213 dev->dev->interrupt(dev, &irb);
214 else
215 __dev_initiated_io(dev, &irb);
216 dev_put(dev);