* added 0.99 linux version
[mascara-docs.git] / i386 / linux / linux-2.3.21 / drivers / parport / share.c
blob7c127fd18a8b2c748d6e2d413c09a29bf4e53bd1
1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
3 *
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
8 * Andrea Arcangeli
10 * based on work by Grant Guenther <grant@torque.net>
11 * and Philip Blundell
14 #undef PARPORT_DEBUG_SHARING /* undef for production */
16 #include <linux/config.h>
17 #include <linux/string.h>
18 #include <linux/threads.h>
19 #include <linux/parport.h>
20 #include <linux/delay.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel.h>
25 #include <linux/malloc.h>
26 #include <linux/sched.h>
27 #include <linux/kmod.h>
29 #include <linux/spinlock.h>
30 #include <asm/irq.h>
32 #undef PARPORT_PARANOID
34 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
36 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
37 int parport_default_spintime = DEFAULT_SPIN_TIME;
39 static struct parport *portlist = NULL, *portlist_tail = NULL;
40 spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
42 static struct parport_driver *driver_chain = NULL;
43 spinlock_t driverlist_lock = SPIN_LOCK_UNLOCKED;
45 /* What you can do to a port that's gone away.. */
46 static void dead_write_lines (struct parport *p, unsigned char b){}
47 static unsigned char dead_read_lines (struct parport *p) { return 0; }
48 static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
49 unsigned char c) { return 0; }
50 static void dead_onearg (struct parport *p){}
51 static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
52 static void dead_state (struct parport *p, struct parport_state *s) { }
53 static void dead_noargs (void) { }
54 static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
55 { return 0; }
56 static size_t dead_read (struct parport *p, void *b, size_t l, int f)
57 { return 0; }
58 static struct parport_operations dead_ops = {
59 dead_write_lines, /* data */
60 dead_read_lines,
61 dead_write_lines, /* control */
62 dead_read_lines,
63 dead_frob_lines,
64 dead_read_lines, /* status */
65 dead_onearg, /* enable_irq */
66 dead_onearg, /* disable_irq */
67 dead_onearg, /* data_forward */
68 dead_onearg, /* data_reverse */
69 dead_initstate, /* init_state */
70 dead_state,
71 dead_state,
72 dead_noargs, /* xxx_use_count */
73 dead_noargs,
74 dead_write, /* epp */
75 dead_read,
76 dead_write,
77 dead_read,
78 dead_write, /* ecp */
79 dead_read,
80 dead_write,
81 dead_write, /* compat */
82 dead_read, /* nibble */
83 dead_read /* byte */
86 static void call_driver_chain(int attach, struct parport *port)
88 struct parport_driver *drv;
90 for (drv = driver_chain; drv; drv = drv->next) {
91 if (attach)
92 drv->attach (port);
93 else
94 drv->detach (port);
98 int parport_register_driver (struct parport_driver *drv)
100 struct parport *port;
102 spin_lock (&driverlist_lock);
103 drv->next = driver_chain;
104 driver_chain = drv;
105 spin_unlock (&driverlist_lock);
107 for (port = portlist; port; port = port->next)
108 drv->attach (port);
110 return 0;
113 void parport_unregister_driver (struct parport_driver *arg)
115 struct parport_driver *drv = driver_chain, *olddrv = NULL;
117 while (drv) {
118 if (drv == arg) {
119 spin_lock (&driverlist_lock);
120 if (olddrv)
121 olddrv->next = drv->next;
122 else
123 driver_chain = drv->next;
124 spin_unlock (&driverlist_lock);
125 return;
127 olddrv = drv;
128 drv = drv->next;
132 /* Return a list of all the ports we know about. */
133 struct parport *parport_enumerate(void)
135 /* Attempt to make things work on 2.2 systems. */
136 if (!portlist) {
137 request_module ("parport_lowlevel");
138 if (portlist)
139 /* The user has a parport_lowlevel alias in
140 * conf.modules. Warn them that it won't work
141 * for long. */
142 printk (KERN_WARNING
143 "parport: 'parport_lowlevel' is deprecated; "
144 "see parport.txt\n");
147 return portlist;
150 struct parport *parport_register_port(unsigned long base, int irq, int dma,
151 struct parport_operations *ops)
153 struct parport *tmp;
154 int portnum;
155 int device;
156 char *name;
158 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
159 if (!tmp) {
160 printk(KERN_WARNING "parport: memory squeeze\n");
161 return NULL;
164 /* Search for the lowest free parport number. */
165 for (portnum = 0; ; portnum++) {
166 struct parport *itr = portlist;
167 while (itr) {
168 if (itr->number == portnum)
169 /* No good, already used. */
170 break;
171 else
172 itr = itr->next;
175 if (itr == NULL)
176 /* Got to the end of the list. */
177 break;
180 /* Init our structure */
181 memset(tmp, 0, sizeof(struct parport));
182 tmp->base = base;
183 tmp->irq = irq;
184 tmp->dma = dma;
185 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
186 tmp->modes = 0;
187 tmp->next = NULL;
188 tmp->devices = tmp->cad = NULL;
189 tmp->flags = 0;
190 tmp->ops = ops;
191 tmp->portnum = tmp->number = portnum;
192 tmp->physport = tmp;
193 memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
194 tmp->cad_lock = RW_LOCK_UNLOCKED;
195 spin_lock_init(&tmp->waitlist_lock);
196 spin_lock_init(&tmp->pardevice_lock);
197 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
198 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
199 init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
200 tmp->spintime = parport_default_spintime;
202 name = kmalloc(15, GFP_KERNEL);
203 if (!name) {
204 printk(KERN_ERR "parport: memory squeeze\n");
205 kfree(tmp);
206 return NULL;
208 sprintf(name, "parport%d", portnum);
209 tmp->name = name;
212 * Chain the entry to our list.
214 * This function must not run from an irq handler so we don' t need
215 * to clear irq on the local CPU. -arca
217 spin_lock(&parportlist_lock);
218 if (portlist_tail)
219 portlist_tail->next = tmp;
220 portlist_tail = tmp;
221 if (!portlist)
222 portlist = tmp;
223 spin_unlock(&parportlist_lock);
225 for (device = 0; device < 5; device++)
226 /* assume the worst */
227 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
229 tmp->waithead = tmp->waittail = NULL;
231 return tmp;
234 void parport_announce_port (struct parport *port)
236 #ifdef CONFIG_PARPORT_1284
237 /* Analyse the IEEE1284.3 topology of the port. */
238 parport_daisy_init (port);
239 #endif
241 /* Let drivers know that a new port has arrived. */
242 call_driver_chain (1, port);
245 static void free_port (struct parport *port)
247 int d;
248 for (d = 0; d < 5; d++) {
249 if (port->probe_info[d].class_name)
250 kfree (port->probe_info[d].class_name);
251 if (port->probe_info[d].mfr)
252 kfree (port->probe_info[d].mfr);
253 if (port->probe_info[d].model)
254 kfree (port->probe_info[d].model);
255 if (port->probe_info[d].cmdset)
256 kfree (port->probe_info[d].cmdset);
257 if (port->probe_info[d].description)
258 kfree (port->probe_info[d].description);
261 kfree(port->name);
262 kfree(port);
265 void parport_unregister_port(struct parport *port)
267 struct parport *p;
269 port->ops = &dead_ops;
271 /* Spread the word. */
272 call_driver_chain (0, port);
274 #ifdef CONFIG_PARPORT_1284
275 /* Forget the IEEE1284.3 topology of the port. */
276 parport_daisy_fini (port);
277 #endif
279 spin_lock(&parportlist_lock);
280 if (portlist == port) {
281 if ((portlist = port->next) == NULL)
282 portlist_tail = NULL;
283 } else {
284 for (p = portlist; (p != NULL) && (p->next != port);
285 p=p->next);
286 if (p) {
287 if ((p->next = port->next) == NULL)
288 portlist_tail = p;
290 else printk (KERN_WARNING
291 "%s not found in port list!\n", port->name);
293 spin_unlock(&parportlist_lock);
295 if (!port->devices)
296 free_port (port);
299 struct pardevice *parport_register_device(struct parport *port, const char *name,
300 int (*pf)(void *), void (*kf)(void *),
301 void (*irq_func)(int, void *, struct pt_regs *),
302 int flags, void *handle)
304 struct pardevice *tmp;
306 if (port->physport->flags & PARPORT_FLAG_EXCL) {
307 /* An exclusive device is registered. */
308 printk (KERN_DEBUG "%s: no more devices allowed\n",
309 port->name);
310 return NULL;
313 if (flags & PARPORT_DEV_LURK) {
314 if (!pf || !kf) {
315 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
316 return NULL;
320 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
321 if (tmp == NULL) {
322 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
323 return NULL;
326 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
327 if (tmp->state == NULL) {
328 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
329 kfree(tmp);
330 return NULL;
333 tmp->name = name;
334 tmp->port = port;
335 tmp->daisy = -1;
336 tmp->preempt = pf;
337 tmp->wakeup = kf;
338 tmp->private = handle;
339 tmp->flags = flags;
340 tmp->irq_func = irq_func;
341 tmp->waiting = 0;
342 tmp->timeout = 5 * HZ;
344 /* Chain this onto the list */
345 tmp->prev = NULL;
347 * This function must not run from an irq handler so we don' t need
348 * to clear irq on the local CPU. -arca
350 spin_lock(&port->physport->pardevice_lock);
352 if (flags & PARPORT_DEV_EXCL) {
353 if (port->physport->devices) {
354 spin_unlock (&port->physport->pardevice_lock);
355 kfree (tmp->state);
356 kfree (tmp);
357 printk (KERN_DEBUG
358 "%s: cannot grant exclusive access for "
359 "device %s\n", port->name, name);
360 return NULL;
362 port->flags |= PARPORT_FLAG_EXCL;
365 tmp->next = port->physport->devices;
366 if (port->physport->devices)
367 port->physport->devices->prev = tmp;
368 port->physport->devices = tmp;
369 spin_unlock(&port->physport->pardevice_lock);
371 inc_parport_count();
372 port->ops->inc_use_count();
374 init_waitqueue_head(&tmp->wait_q);
375 tmp->timeslice = parport_default_timeslice;
376 tmp->waitnext = tmp->waitprev = NULL;
379 * This has to be run as last thing since init_state may need other
380 * pardevice fields. -arca
382 port->ops->init_state(tmp, tmp->state);
383 parport_device_proc_register(tmp);
384 return tmp;
387 void parport_unregister_device(struct pardevice *dev)
389 struct parport *port;
391 #ifdef PARPORT_PARANOID
392 if (dev == NULL) {
393 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
394 return;
396 #endif
398 parport_device_proc_unregister(dev);
400 port = dev->port->physport;
402 if (port->cad == dev) {
403 printk(KERN_DEBUG "%s: %s forgot to release port\n",
404 port->name, dev->name);
405 parport_release (dev);
408 spin_lock(&port->pardevice_lock);
409 if (dev->next)
410 dev->next->prev = dev->prev;
411 if (dev->prev)
412 dev->prev->next = dev->next;
413 else
414 port->devices = dev->next;
416 if (dev->flags & PARPORT_DEV_EXCL)
417 port->flags &= ~PARPORT_FLAG_EXCL;
419 spin_unlock(&port->pardevice_lock);
421 kfree(dev->state);
422 kfree(dev);
424 dec_parport_count();
425 port->ops->dec_use_count();
427 /* If this was the last device on a port that's already gone away,
428 * free up the resources. */
429 if (port->ops == &dead_ops && !port->devices)
430 free_port (port);
433 int parport_claim(struct pardevice *dev)
435 struct pardevice *oldcad;
436 struct parport *port = dev->port->physport;
437 unsigned long flags;
439 if (port->cad == dev) {
440 printk(KERN_INFO "%s: %s already owner\n",
441 dev->port->name,dev->name);
442 return 0;
445 try_again:
446 /* Preempt any current device */
447 if ((oldcad = port->cad) != NULL) {
448 if (oldcad->preempt) {
449 if (oldcad->preempt(oldcad->private))
450 goto blocked;
451 port->ops->save_state(port, dev->state);
452 } else
453 goto blocked;
455 if (port->cad != oldcad) {
456 printk(KERN_WARNING
457 "%s: %s released port when preempted!\n",
458 port->name, oldcad->name);
459 if (port->cad)
460 goto blocked;
464 /* Can't fail from now on, so mark ourselves as no longer waiting. */
465 if (dev->waiting & 1) {
466 dev->waiting = 0;
468 /* Take ourselves out of the wait list again. */
469 spin_lock_irqsave (&port->waitlist_lock, flags);
470 if (dev->waitprev)
471 dev->waitprev->waitnext = dev->waitnext;
472 else
473 port->waithead = dev->waitnext;
474 if (dev->waitnext)
475 dev->waitnext->waitprev = dev->waitprev;
476 else
477 port->waittail = dev->waitprev;
478 spin_unlock_irqrestore (&port->waitlist_lock, flags);
479 dev->waitprev = dev->waitnext = NULL;
482 /* Now we do the change of devices */
483 write_lock_irqsave(&port->cad_lock, flags);
484 port->cad = dev;
485 write_unlock_irqrestore(&port->cad_lock, flags);
487 #ifdef CONFIG_PARPORT_1284
488 /* If it's a mux port, select it. */
489 if (dev->port->muxport >= 0) {
490 /* FIXME */
491 port->muxsel = dev->port->muxport;
494 /* If it's a daisy chain device, select it. */
495 if (dev->daisy >= 0) {
496 /* This could be lazier. */
497 if (!parport_daisy_select (port, dev->daisy,
498 IEEE1284_MODE_COMPAT))
499 port->daisy = dev->daisy;
501 #endif /* IEEE1284.3 support */
503 /* Restore control registers */
504 port->ops->restore_state(port, dev->state);
505 dev->time = jiffies;
506 return 0;
508 blocked:
509 /* If this is the first time we tried to claim the port, register an
510 interest. This is only allowed for devices sleeping in
511 parport_claim_or_block(), or those with a wakeup function. */
512 if (dev->waiting & 2 || dev->wakeup) {
513 spin_lock_irqsave (&port->waitlist_lock, flags);
514 if (port->cad == NULL) {
515 /* The port got released in the meantime. */
516 spin_unlock_irqrestore (&port->waitlist_lock, flags);
517 goto try_again;
519 if (test_and_set_bit(0, &dev->waiting) == 0) {
520 /* First add ourselves to the end of the wait list. */
521 dev->waitnext = NULL;
522 dev->waitprev = port->waittail;
523 if (port->waittail) {
524 port->waittail->waitnext = dev;
525 port->waittail = dev;
526 } else
527 port->waithead = port->waittail = dev;
529 spin_unlock_irqrestore (&port->waitlist_lock, flags);
531 return -EAGAIN;
534 int parport_claim_or_block(struct pardevice *dev)
536 int r;
538 /* Signal to parport_claim() that we can wait even without a
539 wakeup function. */
540 dev->waiting = 2;
542 /* Try to claim the port. If this fails, we need to sleep. */
543 r = parport_claim(dev);
544 if (r == -EAGAIN) {
545 unsigned long flags;
546 #ifdef PARPORT_DEBUG_SHARING
547 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
548 #endif
549 save_flags (flags);
550 cli();
551 /* If dev->waiting is clear now, an interrupt
552 gave us the port and we would deadlock if we slept. */
553 if (dev->waiting) {
554 sleep_on(&dev->wait_q);
555 r = 1;
556 } else {
557 r = 0;
558 #ifdef PARPORT_DEBUG_SHARING
559 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
560 dev->name);
561 #endif
563 restore_flags(flags);
564 #ifdef PARPORT_DEBUG_SHARING
565 if (dev->port->physport->cad != dev)
566 printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
567 "but %s owns port!\n", dev->name,
568 dev->port->physport->cad ?
569 dev->port->physport->cad->name:"nobody");
570 #endif
572 dev->waiting = 0;
573 return r;
576 void parport_release(struct pardevice *dev)
578 struct parport *port = dev->port->physport;
579 struct pardevice *pd;
580 unsigned long flags;
582 /* Make sure that dev is the current device */
583 if (port->cad != dev) {
584 printk(KERN_WARNING "%s: %s tried to release parport "
585 "when not owner\n", port->name, dev->name);
586 return;
589 #ifdef CONFIG_PARPORT_1284
590 /* If this is on a mux port, deselect it. */
591 if (dev->port->muxport >= 0) {
592 /* FIXME */
593 port->muxsel = -1;
596 /* If this is a daisy device, deselect it. */
597 if (dev->daisy >= 0) {
598 parport_daisy_deselect_all (port);
599 port->daisy = -1;
601 #endif
603 write_lock_irqsave(&port->cad_lock, flags);
604 port->cad = NULL;
605 write_unlock_irqrestore(&port->cad_lock, flags);
607 /* Save control registers */
608 port->ops->save_state(port, dev->state);
610 /* If anybody is waiting, find out who's been there longest and
611 then wake them up. (Note: no locking required) */
612 for (pd = port->waithead; pd; pd = pd->waitnext) {
613 if (pd->waiting & 2) { /* sleeping in claim_or_block */
614 parport_claim(pd);
615 if (waitqueue_active(&pd->wait_q))
616 wake_up(&pd->wait_q);
617 return;
618 } else if (pd->wakeup) {
619 pd->wakeup(pd->private);
620 if (dev->port->cad)
621 return;
622 } else {
623 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
627 /* Nobody was waiting, so walk the list to see if anyone is
628 interested in being woken up. */
629 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
630 if (pd->wakeup && pd != dev)
631 pd->wakeup(pd->private);
635 static int parport_parse_params (int nports, const char *str[], int val[],
636 int automatic, int none, int nofifo)
638 unsigned int i;
639 for (i = 0; i < nports && str[i]; i++) {
640 if (!strncmp(str[i], "auto", 4))
641 val[i] = automatic;
642 else if (!strncmp(str[i], "none", 4))
643 val[i] = none;
644 else if (nofifo && !strncmp(str[i], "nofifo", 4))
645 val[i] = nofifo;
646 else {
647 char *ep;
648 unsigned long r = simple_strtoul(str[i], &ep, 0);
649 if (ep != str[i])
650 val[i] = r;
651 else {
652 printk("parport: bad specifier `%s'\n", str[i]);
653 return -1;
658 return 0;
661 int parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
663 return parport_parse_params (nports, irqstr, irqval, PARPORT_IRQ_AUTO,
664 PARPORT_IRQ_NONE, 0);
667 int parport_parse_dmas(int nports, const char *dmastr[], int dmaval[])
669 return parport_parse_params (nports, dmastr, dmaval, PARPORT_DMA_AUTO,
670 PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);