team: Replace rcu_read_lock with a mutex in team_vlan_rx_kill_vid
[linux/fpc-iii.git] / kernel / resource.c
blobd7386986e10e31edda59fcb15c4b83b236d52993
1 /*
2 * linux/kernel/resource.c
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 * Arbitrary resource management.
8 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/device.h>
23 #include <linux/pfn.h>
24 #include <linux/mm.h>
25 #include <asm/io.h>
28 struct resource ioport_resource = {
29 .name = "PCI IO",
30 .start = 0,
31 .end = IO_SPACE_LIMIT,
32 .flags = IORESOURCE_IO,
34 EXPORT_SYMBOL(ioport_resource);
36 struct resource iomem_resource = {
37 .name = "PCI mem",
38 .start = 0,
39 .end = -1,
40 .flags = IORESOURCE_MEM,
42 EXPORT_SYMBOL(iomem_resource);
44 /* constraints to be met while allocating resources */
45 struct resource_constraint {
46 resource_size_t min, max, align;
47 resource_size_t (*alignf)(void *, const struct resource *,
48 resource_size_t, resource_size_t);
49 void *alignf_data;
52 static DEFINE_RWLOCK(resource_lock);
55 * For memory hotplug, there is no way to free resource entries allocated
56 * by boot mem after the system is up. So for reusing the resource entry
57 * we need to remember the resource.
59 static struct resource *bootmem_resource_free;
60 static DEFINE_SPINLOCK(bootmem_resource_lock);
62 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
64 struct resource *p = v;
65 (*pos)++;
66 if (p->child)
67 return p->child;
68 while (!p->sibling && p->parent)
69 p = p->parent;
70 return p->sibling;
73 #ifdef CONFIG_PROC_FS
75 enum { MAX_IORES_LEVEL = 5 };
77 static void *r_start(struct seq_file *m, loff_t *pos)
78 __acquires(resource_lock)
80 struct resource *p = m->private;
81 loff_t l = 0;
82 read_lock(&resource_lock);
83 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
85 return p;
88 static void r_stop(struct seq_file *m, void *v)
89 __releases(resource_lock)
91 read_unlock(&resource_lock);
94 static int r_show(struct seq_file *m, void *v)
96 struct resource *root = m->private;
97 struct resource *r = v, *p;
98 int width = root->end < 0x10000 ? 4 : 8;
99 int depth;
101 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
102 if (p->parent == root)
103 break;
104 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
105 depth * 2, "",
106 width, (unsigned long long) r->start,
107 width, (unsigned long long) r->end,
108 r->name ? r->name : "<BAD>");
109 return 0;
112 static const struct seq_operations resource_op = {
113 .start = r_start,
114 .next = r_next,
115 .stop = r_stop,
116 .show = r_show,
119 static int ioports_open(struct inode *inode, struct file *file)
121 int res = seq_open(file, &resource_op);
122 if (!res) {
123 struct seq_file *m = file->private_data;
124 m->private = &ioport_resource;
126 return res;
129 static int iomem_open(struct inode *inode, struct file *file)
131 int res = seq_open(file, &resource_op);
132 if (!res) {
133 struct seq_file *m = file->private_data;
134 m->private = &iomem_resource;
136 return res;
139 static const struct file_operations proc_ioports_operations = {
140 .open = ioports_open,
141 .read = seq_read,
142 .llseek = seq_lseek,
143 .release = seq_release,
146 static const struct file_operations proc_iomem_operations = {
147 .open = iomem_open,
148 .read = seq_read,
149 .llseek = seq_lseek,
150 .release = seq_release,
153 static int __init ioresources_init(void)
155 proc_create("ioports", 0, NULL, &proc_ioports_operations);
156 proc_create("iomem", 0, NULL, &proc_iomem_operations);
157 return 0;
159 __initcall(ioresources_init);
161 #endif /* CONFIG_PROC_FS */
163 static void free_resource(struct resource *res)
165 if (!res)
166 return;
168 if (!PageSlab(virt_to_head_page(res))) {
169 spin_lock(&bootmem_resource_lock);
170 res->sibling = bootmem_resource_free;
171 bootmem_resource_free = res;
172 spin_unlock(&bootmem_resource_lock);
173 } else {
174 kfree(res);
178 static struct resource *alloc_resource(gfp_t flags)
180 struct resource *res = NULL;
182 spin_lock(&bootmem_resource_lock);
183 if (bootmem_resource_free) {
184 res = bootmem_resource_free;
185 bootmem_resource_free = res->sibling;
187 spin_unlock(&bootmem_resource_lock);
189 if (res)
190 memset(res, 0, sizeof(struct resource));
191 else
192 res = kzalloc(sizeof(struct resource), flags);
194 return res;
197 /* Return the conflict entry if you can't request it */
198 static struct resource * __request_resource(struct resource *root, struct resource *new)
200 resource_size_t start = new->start;
201 resource_size_t end = new->end;
202 struct resource *tmp, **p;
204 if (end < start)
205 return root;
206 if (start < root->start)
207 return root;
208 if (end > root->end)
209 return root;
210 p = &root->child;
211 for (;;) {
212 tmp = *p;
213 if (!tmp || tmp->start > end) {
214 new->sibling = tmp;
215 *p = new;
216 new->parent = root;
217 return NULL;
219 p = &tmp->sibling;
220 if (tmp->end < start)
221 continue;
222 return tmp;
226 static int __release_resource(struct resource *old)
228 struct resource *tmp, **p;
230 p = &old->parent->child;
231 for (;;) {
232 tmp = *p;
233 if (!tmp)
234 break;
235 if (tmp == old) {
236 *p = tmp->sibling;
237 old->parent = NULL;
238 return 0;
240 p = &tmp->sibling;
242 return -EINVAL;
245 static void __release_child_resources(struct resource *r)
247 struct resource *tmp, *p;
248 resource_size_t size;
250 p = r->child;
251 r->child = NULL;
252 while (p) {
253 tmp = p;
254 p = p->sibling;
256 tmp->parent = NULL;
257 tmp->sibling = NULL;
258 __release_child_resources(tmp);
260 printk(KERN_DEBUG "release child resource %pR\n", tmp);
261 /* need to restore size, and keep flags */
262 size = resource_size(tmp);
263 tmp->start = 0;
264 tmp->end = size - 1;
268 void release_child_resources(struct resource *r)
270 write_lock(&resource_lock);
271 __release_child_resources(r);
272 write_unlock(&resource_lock);
276 * request_resource_conflict - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
280 * Returns 0 for success, conflict resource on error.
282 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
284 struct resource *conflict;
286 write_lock(&resource_lock);
287 conflict = __request_resource(root, new);
288 write_unlock(&resource_lock);
289 return conflict;
293 * request_resource - request and reserve an I/O or memory resource
294 * @root: root resource descriptor
295 * @new: resource descriptor desired by caller
297 * Returns 0 for success, negative error code on error.
299 int request_resource(struct resource *root, struct resource *new)
301 struct resource *conflict;
303 conflict = request_resource_conflict(root, new);
304 return conflict ? -EBUSY : 0;
307 EXPORT_SYMBOL(request_resource);
310 * release_resource - release a previously reserved resource
311 * @old: resource pointer
313 int release_resource(struct resource *old)
315 int retval;
317 write_lock(&resource_lock);
318 retval = __release_resource(old);
319 write_unlock(&resource_lock);
320 return retval;
323 EXPORT_SYMBOL(release_resource);
325 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
327 * Finds the lowest memory reosurce exists within [res->start.res->end)
328 * the caller must specify res->start, res->end, res->flags and "name".
329 * If found, returns 0, res is overwritten, if not found, returns -1.
331 static int find_next_system_ram(struct resource *res, char *name)
333 resource_size_t start, end;
334 struct resource *p;
336 BUG_ON(!res);
338 start = res->start;
339 end = res->end;
340 BUG_ON(start >= end);
342 read_lock(&resource_lock);
343 for (p = iomem_resource.child; p ; p = p->sibling) {
344 /* system ram is just marked as IORESOURCE_MEM */
345 if (p->flags != res->flags)
346 continue;
347 if (name && strcmp(p->name, name))
348 continue;
349 if (p->start > end) {
350 p = NULL;
351 break;
353 if ((p->end >= start) && (p->start < end))
354 break;
356 read_unlock(&resource_lock);
357 if (!p)
358 return -1;
359 /* copy data */
360 if (res->start < p->start)
361 res->start = p->start;
362 if (res->end > p->end)
363 res->end = p->end;
364 return 0;
368 * This function calls callback against all memory range of "System RAM"
369 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
370 * Now, this function is only for "System RAM".
372 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
373 void *arg, int (*func)(unsigned long, unsigned long, void *))
375 struct resource res;
376 unsigned long pfn, end_pfn;
377 u64 orig_end;
378 int ret = -1;
380 res.start = (u64) start_pfn << PAGE_SHIFT;
381 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
382 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
383 orig_end = res.end;
384 while ((res.start < res.end) &&
385 (find_next_system_ram(&res, "System RAM") >= 0)) {
386 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 end_pfn = (res.end + 1) >> PAGE_SHIFT;
388 if (end_pfn > pfn)
389 ret = (*func)(pfn, end_pfn - pfn, arg);
390 if (ret)
391 break;
392 res.start = res.end + 1;
393 res.end = orig_end;
395 return ret;
398 #endif
400 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
402 return 1;
405 * This generic page_is_ram() returns true if specified address is
406 * registered as "System RAM" in iomem_resource list.
408 int __weak page_is_ram(unsigned long pfn)
410 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
413 void __weak arch_remove_reservations(struct resource *avail)
417 static resource_size_t simple_align_resource(void *data,
418 const struct resource *avail,
419 resource_size_t size,
420 resource_size_t align)
422 return avail->start;
425 static void resource_clip(struct resource *res, resource_size_t min,
426 resource_size_t max)
428 if (res->start < min)
429 res->start = min;
430 if (res->end > max)
431 res->end = max;
434 static bool resource_contains(struct resource *res1, struct resource *res2)
436 return res1->start <= res2->start && res1->end >= res2->end;
440 * Find empty slot in the resource tree with the given range and
441 * alignment constraints
443 static int __find_resource(struct resource *root, struct resource *old,
444 struct resource *new,
445 resource_size_t size,
446 struct resource_constraint *constraint)
448 struct resource *this = root->child;
449 struct resource tmp = *new, avail, alloc;
451 tmp.flags = new->flags;
452 tmp.start = root->start;
454 * Skip past an allocated resource that starts at 0, since the assignment
455 * of this->start - 1 to tmp->end below would cause an underflow.
457 if (this && this->start == root->start) {
458 tmp.start = (this == old) ? old->start : this->end + 1;
459 this = this->sibling;
461 for(;;) {
462 if (this)
463 tmp.end = (this == old) ? this->end : this->start - 1;
464 else
465 tmp.end = root->end;
467 if (tmp.end < tmp.start)
468 goto next;
470 resource_clip(&tmp, constraint->min, constraint->max);
471 arch_remove_reservations(&tmp);
473 /* Check for overflow after ALIGN() */
474 avail = *new;
475 avail.start = ALIGN(tmp.start, constraint->align);
476 avail.end = tmp.end;
477 if (avail.start >= tmp.start) {
478 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
479 size, constraint->align);
480 alloc.end = alloc.start + size - 1;
481 if (resource_contains(&avail, &alloc)) {
482 new->start = alloc.start;
483 new->end = alloc.end;
484 return 0;
488 next: if (!this || this->end == root->end)
489 break;
491 if (this != old)
492 tmp.start = this->end + 1;
493 this = this->sibling;
495 return -EBUSY;
499 * Find empty slot in the resource tree given range and alignment.
501 static int find_resource(struct resource *root, struct resource *new,
502 resource_size_t size,
503 struct resource_constraint *constraint)
505 return __find_resource(root, NULL, new, size, constraint);
509 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
510 * The resource will be relocated if the new size cannot be reallocated in the
511 * current location.
513 * @root: root resource descriptor
514 * @old: resource descriptor desired by caller
515 * @newsize: new size of the resource descriptor
516 * @constraint: the size and alignment constraints to be met.
518 int reallocate_resource(struct resource *root, struct resource *old,
519 resource_size_t newsize,
520 struct resource_constraint *constraint)
522 int err=0;
523 struct resource new = *old;
524 struct resource *conflict;
526 write_lock(&resource_lock);
528 if ((err = __find_resource(root, old, &new, newsize, constraint)))
529 goto out;
531 if (resource_contains(&new, old)) {
532 old->start = new.start;
533 old->end = new.end;
534 goto out;
537 if (old->child) {
538 err = -EBUSY;
539 goto out;
542 if (resource_contains(old, &new)) {
543 old->start = new.start;
544 old->end = new.end;
545 } else {
546 __release_resource(old);
547 *old = new;
548 conflict = __request_resource(root, old);
549 BUG_ON(conflict);
551 out:
552 write_unlock(&resource_lock);
553 return err;
558 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
559 * The resource will be reallocated with a new size if it was already allocated
560 * @root: root resource descriptor
561 * @new: resource descriptor desired by caller
562 * @size: requested resource region size
563 * @min: minimum boundary to allocate
564 * @max: maximum boundary to allocate
565 * @align: alignment requested, in bytes
566 * @alignf: alignment function, optional, called if not NULL
567 * @alignf_data: arbitrary data to pass to the @alignf function
569 int allocate_resource(struct resource *root, struct resource *new,
570 resource_size_t size, resource_size_t min,
571 resource_size_t max, resource_size_t align,
572 resource_size_t (*alignf)(void *,
573 const struct resource *,
574 resource_size_t,
575 resource_size_t),
576 void *alignf_data)
578 int err;
579 struct resource_constraint constraint;
581 if (!alignf)
582 alignf = simple_align_resource;
584 constraint.min = min;
585 constraint.max = max;
586 constraint.align = align;
587 constraint.alignf = alignf;
588 constraint.alignf_data = alignf_data;
590 if ( new->parent ) {
591 /* resource is already allocated, try reallocating with
592 the new constraints */
593 return reallocate_resource(root, new, size, &constraint);
596 write_lock(&resource_lock);
597 err = find_resource(root, new, size, &constraint);
598 if (err >= 0 && __request_resource(root, new))
599 err = -EBUSY;
600 write_unlock(&resource_lock);
601 return err;
604 EXPORT_SYMBOL(allocate_resource);
607 * lookup_resource - find an existing resource by a resource start address
608 * @root: root resource descriptor
609 * @start: resource start address
611 * Returns a pointer to the resource if found, NULL otherwise
613 struct resource *lookup_resource(struct resource *root, resource_size_t start)
615 struct resource *res;
617 read_lock(&resource_lock);
618 for (res = root->child; res; res = res->sibling) {
619 if (res->start == start)
620 break;
622 read_unlock(&resource_lock);
624 return res;
628 * Insert a resource into the resource tree. If successful, return NULL,
629 * otherwise return the conflicting resource (compare to __request_resource())
631 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
633 struct resource *first, *next;
635 for (;; parent = first) {
636 first = __request_resource(parent, new);
637 if (!first)
638 return first;
640 if (first == parent)
641 return first;
642 if (WARN_ON(first == new)) /* duplicated insertion */
643 return first;
645 if ((first->start > new->start) || (first->end < new->end))
646 break;
647 if ((first->start == new->start) && (first->end == new->end))
648 break;
651 for (next = first; ; next = next->sibling) {
652 /* Partial overlap? Bad, and unfixable */
653 if (next->start < new->start || next->end > new->end)
654 return next;
655 if (!next->sibling)
656 break;
657 if (next->sibling->start > new->end)
658 break;
661 new->parent = parent;
662 new->sibling = next->sibling;
663 new->child = first;
665 next->sibling = NULL;
666 for (next = first; next; next = next->sibling)
667 next->parent = new;
669 if (parent->child == first) {
670 parent->child = new;
671 } else {
672 next = parent->child;
673 while (next->sibling != first)
674 next = next->sibling;
675 next->sibling = new;
677 return NULL;
681 * insert_resource_conflict - Inserts resource in the resource tree
682 * @parent: parent of the new resource
683 * @new: new resource to insert
685 * Returns 0 on success, conflict resource if the resource can't be inserted.
687 * This function is equivalent to request_resource_conflict when no conflict
688 * happens. If a conflict happens, and the conflicting resources
689 * entirely fit within the range of the new resource, then the new
690 * resource is inserted and the conflicting resources become children of
691 * the new resource.
693 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
695 struct resource *conflict;
697 write_lock(&resource_lock);
698 conflict = __insert_resource(parent, new);
699 write_unlock(&resource_lock);
700 return conflict;
704 * insert_resource - Inserts a resource in the resource tree
705 * @parent: parent of the new resource
706 * @new: new resource to insert
708 * Returns 0 on success, -EBUSY if the resource can't be inserted.
710 int insert_resource(struct resource *parent, struct resource *new)
712 struct resource *conflict;
714 conflict = insert_resource_conflict(parent, new);
715 return conflict ? -EBUSY : 0;
719 * insert_resource_expand_to_fit - Insert a resource into the resource tree
720 * @root: root resource descriptor
721 * @new: new resource to insert
723 * Insert a resource into the resource tree, possibly expanding it in order
724 * to make it encompass any conflicting resources.
726 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
728 if (new->parent)
729 return;
731 write_lock(&resource_lock);
732 for (;;) {
733 struct resource *conflict;
735 conflict = __insert_resource(root, new);
736 if (!conflict)
737 break;
738 if (conflict == root)
739 break;
741 /* Ok, expand resource to cover the conflict, then try again .. */
742 if (conflict->start < new->start)
743 new->start = conflict->start;
744 if (conflict->end > new->end)
745 new->end = conflict->end;
747 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
749 write_unlock(&resource_lock);
752 static int __adjust_resource(struct resource *res, resource_size_t start,
753 resource_size_t size)
755 struct resource *tmp, *parent = res->parent;
756 resource_size_t end = start + size - 1;
757 int result = -EBUSY;
759 if (!parent)
760 goto skip;
762 if ((start < parent->start) || (end > parent->end))
763 goto out;
765 if (res->sibling && (res->sibling->start <= end))
766 goto out;
768 tmp = parent->child;
769 if (tmp != res) {
770 while (tmp->sibling != res)
771 tmp = tmp->sibling;
772 if (start <= tmp->end)
773 goto out;
776 skip:
777 for (tmp = res->child; tmp; tmp = tmp->sibling)
778 if ((tmp->start < start) || (tmp->end > end))
779 goto out;
781 res->start = start;
782 res->end = end;
783 result = 0;
785 out:
786 return result;
790 * adjust_resource - modify a resource's start and size
791 * @res: resource to modify
792 * @start: new start value
793 * @size: new size
795 * Given an existing resource, change its start and size to match the
796 * arguments. Returns 0 on success, -EBUSY if it can't fit.
797 * Existing children of the resource are assumed to be immutable.
799 int adjust_resource(struct resource *res, resource_size_t start,
800 resource_size_t size)
802 int result;
804 write_lock(&resource_lock);
805 result = __adjust_resource(res, start, size);
806 write_unlock(&resource_lock);
807 return result;
809 EXPORT_SYMBOL(adjust_resource);
811 static void __init __reserve_region_with_split(struct resource *root,
812 resource_size_t start, resource_size_t end,
813 const char *name)
815 struct resource *parent = root;
816 struct resource *conflict;
817 struct resource *res = alloc_resource(GFP_ATOMIC);
818 struct resource *next_res = NULL;
820 if (!res)
821 return;
823 res->name = name;
824 res->start = start;
825 res->end = end;
826 res->flags = IORESOURCE_BUSY;
828 while (1) {
830 conflict = __request_resource(parent, res);
831 if (!conflict) {
832 if (!next_res)
833 break;
834 res = next_res;
835 next_res = NULL;
836 continue;
839 /* conflict covered whole area */
840 if (conflict->start <= res->start &&
841 conflict->end >= res->end) {
842 free_resource(res);
843 WARN_ON(next_res);
844 break;
847 /* failed, split and try again */
848 if (conflict->start > res->start) {
849 end = res->end;
850 res->end = conflict->start - 1;
851 if (conflict->end < end) {
852 next_res = alloc_resource(GFP_ATOMIC);
853 if (!next_res) {
854 free_resource(res);
855 break;
857 next_res->name = name;
858 next_res->start = conflict->end + 1;
859 next_res->end = end;
860 next_res->flags = IORESOURCE_BUSY;
862 } else {
863 res->start = conflict->end + 1;
869 void __init reserve_region_with_split(struct resource *root,
870 resource_size_t start, resource_size_t end,
871 const char *name)
873 int abort = 0;
875 write_lock(&resource_lock);
876 if (root->start > start || root->end < end) {
877 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
878 (unsigned long long)start, (unsigned long long)end,
879 root);
880 if (start > root->end || end < root->start)
881 abort = 1;
882 else {
883 if (end > root->end)
884 end = root->end;
885 if (start < root->start)
886 start = root->start;
887 pr_err("fixing request to [0x%llx-0x%llx]\n",
888 (unsigned long long)start,
889 (unsigned long long)end);
891 dump_stack();
893 if (!abort)
894 __reserve_region_with_split(root, start, end, name);
895 write_unlock(&resource_lock);
899 * resource_alignment - calculate resource's alignment
900 * @res: resource pointer
902 * Returns alignment on success, 0 (invalid alignment) on failure.
904 resource_size_t resource_alignment(struct resource *res)
906 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
907 case IORESOURCE_SIZEALIGN:
908 return resource_size(res);
909 case IORESOURCE_STARTALIGN:
910 return res->start;
911 default:
912 return 0;
917 * This is compatibility stuff for IO resources.
919 * Note how this, unlike the above, knows about
920 * the IO flag meanings (busy etc).
922 * request_region creates a new busy region.
924 * check_region returns non-zero if the area is already busy.
926 * release_region releases a matching busy region.
929 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
932 * __request_region - create a new busy resource region
933 * @parent: parent resource descriptor
934 * @start: resource start address
935 * @n: resource region size
936 * @name: reserving caller's ID string
937 * @flags: IO resource flags
939 struct resource * __request_region(struct resource *parent,
940 resource_size_t start, resource_size_t n,
941 const char *name, int flags)
943 DECLARE_WAITQUEUE(wait, current);
944 struct resource *res = alloc_resource(GFP_KERNEL);
946 if (!res)
947 return NULL;
949 res->name = name;
950 res->start = start;
951 res->end = start + n - 1;
952 res->flags = IORESOURCE_BUSY;
953 res->flags |= flags;
955 write_lock(&resource_lock);
957 for (;;) {
958 struct resource *conflict;
960 conflict = __request_resource(parent, res);
961 if (!conflict)
962 break;
963 if (conflict != parent) {
964 parent = conflict;
965 if (!(conflict->flags & IORESOURCE_BUSY))
966 continue;
968 if (conflict->flags & flags & IORESOURCE_MUXED) {
969 add_wait_queue(&muxed_resource_wait, &wait);
970 write_unlock(&resource_lock);
971 set_current_state(TASK_UNINTERRUPTIBLE);
972 schedule();
973 remove_wait_queue(&muxed_resource_wait, &wait);
974 write_lock(&resource_lock);
975 continue;
977 /* Uhhuh, that didn't work out.. */
978 free_resource(res);
979 res = NULL;
980 break;
982 write_unlock(&resource_lock);
983 return res;
985 EXPORT_SYMBOL(__request_region);
988 * __check_region - check if a resource region is busy or free
989 * @parent: parent resource descriptor
990 * @start: resource start address
991 * @n: resource region size
993 * Returns 0 if the region is free at the moment it is checked,
994 * returns %-EBUSY if the region is busy.
996 * NOTE:
997 * This function is deprecated because its use is racy.
998 * Even if it returns 0, a subsequent call to request_region()
999 * may fail because another driver etc. just allocated the region.
1000 * Do NOT use it. It will be removed from the kernel.
1002 int __check_region(struct resource *parent, resource_size_t start,
1003 resource_size_t n)
1005 struct resource * res;
1007 res = __request_region(parent, start, n, "check-region", 0);
1008 if (!res)
1009 return -EBUSY;
1011 release_resource(res);
1012 free_resource(res);
1013 return 0;
1015 EXPORT_SYMBOL(__check_region);
1018 * __release_region - release a previously reserved resource region
1019 * @parent: parent resource descriptor
1020 * @start: resource start address
1021 * @n: resource region size
1023 * The described resource region must match a currently busy region.
1025 void __release_region(struct resource *parent, resource_size_t start,
1026 resource_size_t n)
1028 struct resource **p;
1029 resource_size_t end;
1031 p = &parent->child;
1032 end = start + n - 1;
1034 write_lock(&resource_lock);
1036 for (;;) {
1037 struct resource *res = *p;
1039 if (!res)
1040 break;
1041 if (res->start <= start && res->end >= end) {
1042 if (!(res->flags & IORESOURCE_BUSY)) {
1043 p = &res->child;
1044 continue;
1046 if (res->start != start || res->end != end)
1047 break;
1048 *p = res->sibling;
1049 write_unlock(&resource_lock);
1050 if (res->flags & IORESOURCE_MUXED)
1051 wake_up(&muxed_resource_wait);
1052 free_resource(res);
1053 return;
1055 p = &res->sibling;
1058 write_unlock(&resource_lock);
1060 printk(KERN_WARNING "Trying to free nonexistent resource "
1061 "<%016llx-%016llx>\n", (unsigned long long)start,
1062 (unsigned long long)end);
1064 EXPORT_SYMBOL(__release_region);
1066 #ifdef CONFIG_MEMORY_HOTREMOVE
1068 * release_mem_region_adjustable - release a previously reserved memory region
1069 * @parent: parent resource descriptor
1070 * @start: resource start address
1071 * @size: resource region size
1073 * This interface is intended for memory hot-delete. The requested region
1074 * is released from a currently busy memory resource. The requested region
1075 * must either match exactly or fit into a single busy resource entry. In
1076 * the latter case, the remaining resource is adjusted accordingly.
1077 * Existing children of the busy memory resource must be immutable in the
1078 * request.
1080 * Note:
1081 * - Additional release conditions, such as overlapping region, can be
1082 * supported after they are confirmed as valid cases.
1083 * - When a busy memory resource gets split into two entries, the code
1084 * assumes that all children remain in the lower address entry for
1085 * simplicity. Enhance this logic when necessary.
1087 int release_mem_region_adjustable(struct resource *parent,
1088 resource_size_t start, resource_size_t size)
1090 struct resource **p;
1091 struct resource *res;
1092 struct resource *new_res;
1093 resource_size_t end;
1094 int ret = -EINVAL;
1096 end = start + size - 1;
1097 if ((start < parent->start) || (end > parent->end))
1098 return ret;
1100 /* The alloc_resource() result gets checked later */
1101 new_res = alloc_resource(GFP_KERNEL);
1103 p = &parent->child;
1104 write_lock(&resource_lock);
1106 while ((res = *p)) {
1107 if (res->start >= end)
1108 break;
1110 /* look for the next resource if it does not fit into */
1111 if (res->start > start || res->end < end) {
1112 p = &res->sibling;
1113 continue;
1116 if (!(res->flags & IORESOURCE_MEM))
1117 break;
1119 if (!(res->flags & IORESOURCE_BUSY)) {
1120 p = &res->child;
1121 continue;
1124 /* found the target resource; let's adjust accordingly */
1125 if (res->start == start && res->end == end) {
1126 /* free the whole entry */
1127 *p = res->sibling;
1128 free_resource(res);
1129 ret = 0;
1130 } else if (res->start == start && res->end != end) {
1131 /* adjust the start */
1132 ret = __adjust_resource(res, end + 1,
1133 res->end - end);
1134 } else if (res->start != start && res->end == end) {
1135 /* adjust the end */
1136 ret = __adjust_resource(res, res->start,
1137 start - res->start);
1138 } else {
1139 /* split into two entries */
1140 if (!new_res) {
1141 ret = -ENOMEM;
1142 break;
1144 new_res->name = res->name;
1145 new_res->start = end + 1;
1146 new_res->end = res->end;
1147 new_res->flags = res->flags;
1148 new_res->parent = res->parent;
1149 new_res->sibling = res->sibling;
1150 new_res->child = NULL;
1152 ret = __adjust_resource(res, res->start,
1153 start - res->start);
1154 if (ret)
1155 break;
1156 res->sibling = new_res;
1157 new_res = NULL;
1160 break;
1163 write_unlock(&resource_lock);
1164 free_resource(new_res);
1165 return ret;
1167 #endif /* CONFIG_MEMORY_HOTREMOVE */
1170 * Managed region resource
1172 struct region_devres {
1173 struct resource *parent;
1174 resource_size_t start;
1175 resource_size_t n;
1178 static void devm_region_release(struct device *dev, void *res)
1180 struct region_devres *this = res;
1182 __release_region(this->parent, this->start, this->n);
1185 static int devm_region_match(struct device *dev, void *res, void *match_data)
1187 struct region_devres *this = res, *match = match_data;
1189 return this->parent == match->parent &&
1190 this->start == match->start && this->n == match->n;
1193 struct resource * __devm_request_region(struct device *dev,
1194 struct resource *parent, resource_size_t start,
1195 resource_size_t n, const char *name)
1197 struct region_devres *dr = NULL;
1198 struct resource *res;
1200 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1201 GFP_KERNEL);
1202 if (!dr)
1203 return NULL;
1205 dr->parent = parent;
1206 dr->start = start;
1207 dr->n = n;
1209 res = __request_region(parent, start, n, name, 0);
1210 if (res)
1211 devres_add(dev, dr);
1212 else
1213 devres_free(dr);
1215 return res;
1217 EXPORT_SYMBOL(__devm_request_region);
1219 void __devm_release_region(struct device *dev, struct resource *parent,
1220 resource_size_t start, resource_size_t n)
1222 struct region_devres match_data = { parent, start, n };
1224 __release_region(parent, start, n);
1225 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1226 &match_data));
1228 EXPORT_SYMBOL(__devm_release_region);
1231 * Called from init/main.c to reserve IO ports.
1233 #define MAXRESERVE 4
1234 static int __init reserve_setup(char *str)
1236 static int reserved;
1237 static struct resource reserve[MAXRESERVE];
1239 for (;;) {
1240 unsigned int io_start, io_num;
1241 int x = reserved;
1243 if (get_option (&str, &io_start) != 2)
1244 break;
1245 if (get_option (&str, &io_num) == 0)
1246 break;
1247 if (x < MAXRESERVE) {
1248 struct resource *res = reserve + x;
1249 res->name = "reserved";
1250 res->start = io_start;
1251 res->end = io_start + io_num - 1;
1252 res->flags = IORESOURCE_BUSY;
1253 res->child = NULL;
1254 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1255 reserved = x+1;
1258 return 1;
1261 __setup("reserve=", reserve_setup);
1264 * Check if the requested addr and size spans more than any slot in the
1265 * iomem resource tree.
1267 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1269 struct resource *p = &iomem_resource;
1270 int err = 0;
1271 loff_t l;
1273 read_lock(&resource_lock);
1274 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1276 * We can probably skip the resources without
1277 * IORESOURCE_IO attribute?
1279 if (p->start >= addr + size)
1280 continue;
1281 if (p->end < addr)
1282 continue;
1283 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1284 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1285 continue;
1287 * if a resource is "BUSY", it's not a hardware resource
1288 * but a driver mapping of such a resource; we don't want
1289 * to warn for those; some drivers legitimately map only
1290 * partial hardware resources. (example: vesafb)
1292 if (p->flags & IORESOURCE_BUSY)
1293 continue;
1295 printk(KERN_WARNING "resource map sanity check conflict: "
1296 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1297 (unsigned long long)addr,
1298 (unsigned long long)(addr + size - 1),
1299 (unsigned long long)p->start,
1300 (unsigned long long)p->end,
1301 p->name);
1302 err = -1;
1303 break;
1305 read_unlock(&resource_lock);
1307 return err;
1310 #ifdef CONFIG_STRICT_DEVMEM
1311 static int strict_iomem_checks = 1;
1312 #else
1313 static int strict_iomem_checks;
1314 #endif
1317 * check if an address is reserved in the iomem resource tree
1318 * returns 1 if reserved, 0 if not reserved.
1320 int iomem_is_exclusive(u64 addr)
1322 struct resource *p = &iomem_resource;
1323 int err = 0;
1324 loff_t l;
1325 int size = PAGE_SIZE;
1327 if (!strict_iomem_checks)
1328 return 0;
1330 addr = addr & PAGE_MASK;
1332 read_lock(&resource_lock);
1333 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1335 * We can probably skip the resources without
1336 * IORESOURCE_IO attribute?
1338 if (p->start >= addr + size)
1339 break;
1340 if (p->end < addr)
1341 continue;
1342 if (p->flags & IORESOURCE_BUSY &&
1343 p->flags & IORESOURCE_EXCLUSIVE) {
1344 err = 1;
1345 break;
1348 read_unlock(&resource_lock);
1350 return err;
1353 static int __init strict_iomem(char *str)
1355 if (strstr(str, "relaxed"))
1356 strict_iomem_checks = 0;
1357 if (strstr(str, "strict"))
1358 strict_iomem_checks = 1;
1359 return 1;
1362 __setup("iomem=", strict_iomem);