cmd64x: don't clear the other channel's interrupt
[linux-2.6/linux-mips/linux-dm7025.git] / drivers / pnp / resource.c
blob41d73a5e9312f16d87f751f2116036d11b8bef7c
1 /*
2 * resource.c - Contains functions for registering and analyzing resource information
4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz>
5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com>
6 */
8 #include <linux/module.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <asm/io.h>
13 #include <asm/dma.h>
14 #include <asm/irq.h>
15 #include <linux/pci.h>
16 #include <linux/ioport.h>
17 #include <linux/init.h>
19 #include <linux/pnp.h>
20 #include "base.h"
22 static int pnp_reserve_irq[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some IRQ */
23 static int pnp_reserve_dma[8] = {[0 ... 7] = -1 }; /* reserve (don't use) some DMA */
24 static int pnp_reserve_io[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some I/O region */
25 static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some memory region */
28 * option registration
31 static struct pnp_option *pnp_build_option(int priority)
33 struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option));
35 if (!option)
36 return NULL;
38 option->priority = priority & 0xff;
39 /* make sure the priority is valid */
40 if (option->priority > PNP_RES_PRIORITY_FUNCTIONAL)
41 option->priority = PNP_RES_PRIORITY_INVALID;
43 return option;
46 struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev)
48 struct pnp_option *option;
50 option = pnp_build_option(PNP_RES_PRIORITY_PREFERRED);
52 /* this should never happen but if it does we'll try to continue */
53 if (dev->independent)
54 dev_err(&dev->dev, "independent resource already registered\n");
55 dev->independent = option;
56 return option;
59 struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
60 int priority)
62 struct pnp_option *option;
64 option = pnp_build_option(priority);
66 if (dev->dependent) {
67 struct pnp_option *parent = dev->dependent;
68 while (parent->next)
69 parent = parent->next;
70 parent->next = option;
71 } else
72 dev->dependent = option;
73 return option;
76 int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
78 struct pnp_irq *ptr;
80 ptr = option->irq;
81 while (ptr && ptr->next)
82 ptr = ptr->next;
83 if (ptr)
84 ptr->next = data;
85 else
86 option->irq = data;
88 #ifdef CONFIG_PCI
90 int i;
92 for (i = 0; i < 16; i++)
93 if (test_bit(i, data->map))
94 pcibios_penalize_isa_irq(i, 0);
96 #endif
97 return 0;
100 int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
102 struct pnp_dma *ptr;
104 ptr = option->dma;
105 while (ptr && ptr->next)
106 ptr = ptr->next;
107 if (ptr)
108 ptr->next = data;
109 else
110 option->dma = data;
112 return 0;
115 int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
117 struct pnp_port *ptr;
119 ptr = option->port;
120 while (ptr && ptr->next)
121 ptr = ptr->next;
122 if (ptr)
123 ptr->next = data;
124 else
125 option->port = data;
127 return 0;
130 int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
132 struct pnp_mem *ptr;
134 ptr = option->mem;
135 while (ptr && ptr->next)
136 ptr = ptr->next;
137 if (ptr)
138 ptr->next = data;
139 else
140 option->mem = data;
141 return 0;
144 static void pnp_free_port(struct pnp_port *port)
146 struct pnp_port *next;
148 while (port) {
149 next = port->next;
150 kfree(port);
151 port = next;
155 static void pnp_free_irq(struct pnp_irq *irq)
157 struct pnp_irq *next;
159 while (irq) {
160 next = irq->next;
161 kfree(irq);
162 irq = next;
166 static void pnp_free_dma(struct pnp_dma *dma)
168 struct pnp_dma *next;
170 while (dma) {
171 next = dma->next;
172 kfree(dma);
173 dma = next;
177 static void pnp_free_mem(struct pnp_mem *mem)
179 struct pnp_mem *next;
181 while (mem) {
182 next = mem->next;
183 kfree(mem);
184 mem = next;
188 void pnp_free_option(struct pnp_option *option)
190 struct pnp_option *next;
192 while (option) {
193 next = option->next;
194 pnp_free_port(option->port);
195 pnp_free_irq(option->irq);
196 pnp_free_dma(option->dma);
197 pnp_free_mem(option->mem);
198 kfree(option);
199 option = next;
204 * resource validity checking
207 #define length(start, end) (*(end) - *(start) + 1)
209 /* Two ranges conflict if one doesn't end before the other starts */
210 #define ranged_conflict(starta, enda, startb, endb) \
211 !((*(enda) < *(startb)) || (*(endb) < *(starta)))
213 #define cannot_compare(flags) \
214 ((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
216 int pnp_check_port(struct pnp_dev *dev, int idx)
218 int tmp;
219 struct pnp_dev *tdev;
220 resource_size_t *port, *end, *tport, *tend;
222 port = &dev->res.port_resource[idx].start;
223 end = &dev->res.port_resource[idx].end;
225 /* if the resource doesn't exist, don't complain about it */
226 if (cannot_compare(dev->res.port_resource[idx].flags))
227 return 1;
229 /* check if the resource is already in use, skip if the
230 * device is active because it itself may be in use */
231 if (!dev->active) {
232 if (__check_region(&ioport_resource, *port, length(port, end)))
233 return 0;
236 /* check if the resource is reserved */
237 for (tmp = 0; tmp < 8; tmp++) {
238 int rport = pnp_reserve_io[tmp << 1];
239 int rend = pnp_reserve_io[(tmp << 1) + 1] + rport - 1;
240 if (ranged_conflict(port, end, &rport, &rend))
241 return 0;
244 /* check for internal conflicts */
245 for (tmp = 0; tmp < PNP_MAX_PORT && tmp != idx; tmp++) {
246 if (dev->res.port_resource[tmp].flags & IORESOURCE_IO) {
247 tport = &dev->res.port_resource[tmp].start;
248 tend = &dev->res.port_resource[tmp].end;
249 if (ranged_conflict(port, end, tport, tend))
250 return 0;
254 /* check for conflicts with other pnp devices */
255 pnp_for_each_dev(tdev) {
256 if (tdev == dev)
257 continue;
258 for (tmp = 0; tmp < PNP_MAX_PORT; tmp++) {
259 if (tdev->res.port_resource[tmp].flags & IORESOURCE_IO) {
260 if (cannot_compare
261 (tdev->res.port_resource[tmp].flags))
262 continue;
263 tport = &tdev->res.port_resource[tmp].start;
264 tend = &tdev->res.port_resource[tmp].end;
265 if (ranged_conflict(port, end, tport, tend))
266 return 0;
271 return 1;
274 int pnp_check_mem(struct pnp_dev *dev, int idx)
276 int tmp;
277 struct pnp_dev *tdev;
278 resource_size_t *addr, *end, *taddr, *tend;
280 addr = &dev->res.mem_resource[idx].start;
281 end = &dev->res.mem_resource[idx].end;
283 /* if the resource doesn't exist, don't complain about it */
284 if (cannot_compare(dev->res.mem_resource[idx].flags))
285 return 1;
287 /* check if the resource is already in use, skip if the
288 * device is active because it itself may be in use */
289 if (!dev->active) {
290 if (check_mem_region(*addr, length(addr, end)))
291 return 0;
294 /* check if the resource is reserved */
295 for (tmp = 0; tmp < 8; tmp++) {
296 int raddr = pnp_reserve_mem[tmp << 1];
297 int rend = pnp_reserve_mem[(tmp << 1) + 1] + raddr - 1;
298 if (ranged_conflict(addr, end, &raddr, &rend))
299 return 0;
302 /* check for internal conflicts */
303 for (tmp = 0; tmp < PNP_MAX_MEM && tmp != idx; tmp++) {
304 if (dev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
305 taddr = &dev->res.mem_resource[tmp].start;
306 tend = &dev->res.mem_resource[tmp].end;
307 if (ranged_conflict(addr, end, taddr, tend))
308 return 0;
312 /* check for conflicts with other pnp devices */
313 pnp_for_each_dev(tdev) {
314 if (tdev == dev)
315 continue;
316 for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) {
317 if (tdev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
318 if (cannot_compare
319 (tdev->res.mem_resource[tmp].flags))
320 continue;
321 taddr = &tdev->res.mem_resource[tmp].start;
322 tend = &tdev->res.mem_resource[tmp].end;
323 if (ranged_conflict(addr, end, taddr, tend))
324 return 0;
329 return 1;
332 static irqreturn_t pnp_test_handler(int irq, void *dev_id)
334 return IRQ_HANDLED;
337 int pnp_check_irq(struct pnp_dev *dev, int idx)
339 int tmp;
340 struct pnp_dev *tdev;
341 resource_size_t *irq = &dev->res.irq_resource[idx].start;
343 /* if the resource doesn't exist, don't complain about it */
344 if (cannot_compare(dev->res.irq_resource[idx].flags))
345 return 1;
347 /* check if the resource is valid */
348 if (*irq < 0 || *irq > 15)
349 return 0;
351 /* check if the resource is reserved */
352 for (tmp = 0; tmp < 16; tmp++) {
353 if (pnp_reserve_irq[tmp] == *irq)
354 return 0;
357 /* check for internal conflicts */
358 for (tmp = 0; tmp < PNP_MAX_IRQ && tmp != idx; tmp++) {
359 if (dev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
360 if (dev->res.irq_resource[tmp].start == *irq)
361 return 0;
365 #ifdef CONFIG_PCI
366 /* check if the resource is being used by a pci device */
368 struct pci_dev *pci = NULL;
369 for_each_pci_dev(pci) {
370 if (pci->irq == *irq)
371 return 0;
374 #endif
376 /* check if the resource is already in use, skip if the
377 * device is active because it itself may be in use */
378 if (!dev->active) {
379 if (request_irq(*irq, pnp_test_handler,
380 IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL))
381 return 0;
382 free_irq(*irq, NULL);
385 /* check for conflicts with other pnp devices */
386 pnp_for_each_dev(tdev) {
387 if (tdev == dev)
388 continue;
389 for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) {
390 if (tdev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
391 if (cannot_compare
392 (tdev->res.irq_resource[tmp].flags))
393 continue;
394 if ((tdev->res.irq_resource[tmp].start == *irq))
395 return 0;
400 return 1;
403 int pnp_check_dma(struct pnp_dev *dev, int idx)
405 #ifndef CONFIG_IA64
406 int tmp;
407 struct pnp_dev *tdev;
408 resource_size_t *dma = &dev->res.dma_resource[idx].start;
410 /* if the resource doesn't exist, don't complain about it */
411 if (cannot_compare(dev->res.dma_resource[idx].flags))
412 return 1;
414 /* check if the resource is valid */
415 if (*dma < 0 || *dma == 4 || *dma > 7)
416 return 0;
418 /* check if the resource is reserved */
419 for (tmp = 0; tmp < 8; tmp++) {
420 if (pnp_reserve_dma[tmp] == *dma)
421 return 0;
424 /* check for internal conflicts */
425 for (tmp = 0; tmp < PNP_MAX_DMA && tmp != idx; tmp++) {
426 if (dev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
427 if (dev->res.dma_resource[tmp].start == *dma)
428 return 0;
432 /* check if the resource is already in use, skip if the
433 * device is active because it itself may be in use */
434 if (!dev->active) {
435 if (request_dma(*dma, "pnp"))
436 return 0;
437 free_dma(*dma);
440 /* check for conflicts with other pnp devices */
441 pnp_for_each_dev(tdev) {
442 if (tdev == dev)
443 continue;
444 for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
445 if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
446 if (cannot_compare
447 (tdev->res.dma_resource[tmp].flags))
448 continue;
449 if ((tdev->res.dma_resource[tmp].start == *dma))
450 return 0;
455 return 1;
456 #else
457 /* IA64 does not have legacy DMA */
458 return 0;
459 #endif
462 /* format is: pnp_reserve_irq=irq1[,irq2] .... */
463 static int __init pnp_setup_reserve_irq(char *str)
465 int i;
467 for (i = 0; i < 16; i++)
468 if (get_option(&str, &pnp_reserve_irq[i]) != 2)
469 break;
470 return 1;
473 __setup("pnp_reserve_irq=", pnp_setup_reserve_irq);
475 /* format is: pnp_reserve_dma=dma1[,dma2] .... */
476 static int __init pnp_setup_reserve_dma(char *str)
478 int i;
480 for (i = 0; i < 8; i++)
481 if (get_option(&str, &pnp_reserve_dma[i]) != 2)
482 break;
483 return 1;
486 __setup("pnp_reserve_dma=", pnp_setup_reserve_dma);
488 /* format is: pnp_reserve_io=io1,size1[,io2,size2] .... */
489 static int __init pnp_setup_reserve_io(char *str)
491 int i;
493 for (i = 0; i < 16; i++)
494 if (get_option(&str, &pnp_reserve_io[i]) != 2)
495 break;
496 return 1;
499 __setup("pnp_reserve_io=", pnp_setup_reserve_io);
501 /* format is: pnp_reserve_mem=mem1,size1[,mem2,size2] .... */
502 static int __init pnp_setup_reserve_mem(char *str)
504 int i;
506 for (i = 0; i < 16; i++)
507 if (get_option(&str, &pnp_reserve_mem[i]) != 2)
508 break;
509 return 1;
512 __setup("pnp_reserve_mem=", pnp_setup_reserve_mem);