kvm tools, setup: Create private directory
[linux-2.6/next.git] / arch / arm / mm / vmregion.c
blob036fdbfdd62f6e91e0c329c7c12a9152dca21d7b
1 #include <linux/spinlock.h>
2 #include <linux/list.h>
3 #include <linux/slab.h>
5 #include "vmregion.h"
7 /*
8 * VM region handling support.
10 * This should become something generic, handling VM region allocations for
11 * vmalloc and similar (ioremap, module space, etc).
13 * I envisage vmalloc()'s supporting vm_struct becoming:
15 * struct vm_struct {
16 * struct vmregion region;
17 * unsigned long flags;
18 * struct page **pages;
19 * unsigned int nr_pages;
20 * unsigned long phys_addr;
21 * };
23 * get_vm_area() would then call vmregion_alloc with an appropriate
24 * struct vmregion head (eg):
26 * struct vmregion vmalloc_head = {
27 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
28 * .vm_start = VMALLOC_START,
29 * .vm_end = VMALLOC_END,
30 * };
32 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
33 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
34 * would have to initialise this each time prior to calling vmregion_alloc().
37 struct arm_vmregion *
38 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp)
41 unsigned long start = head->vm_start, addr = head->vm_end;
42 unsigned long flags;
43 struct arm_vmregion *c, *new;
45 if (head->vm_end - head->vm_start < size) {
46 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
47 __func__, size);
48 goto out;
51 new = kmalloc(sizeof(struct arm_vmregion), gfp);
52 if (!new)
53 goto out;
55 spin_lock_irqsave(&head->vm_lock, flags);
57 addr = rounddown(addr - size, align);
58 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 if (addr >= c->vm_end)
60 goto found;
61 addr = rounddown(c->vm_start - size, align);
62 if (addr < start)
63 goto nospc;
66 found:
68 * Insert this entry after the one we found.
70 list_add(&new->vm_list, &c->vm_list);
71 new->vm_start = addr;
72 new->vm_end = addr + size;
73 new->vm_active = 1;
75 spin_unlock_irqrestore(&head->vm_lock, flags);
76 return new;
78 nospc:
79 spin_unlock_irqrestore(&head->vm_lock, flags);
80 kfree(new);
81 out:
82 return NULL;
85 static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
87 struct arm_vmregion *c;
89 list_for_each_entry(c, &head->vm_list, vm_list) {
90 if (c->vm_active && c->vm_start == addr)
91 goto out;
93 c = NULL;
94 out:
95 return c;
98 struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
100 struct arm_vmregion *c;
101 unsigned long flags;
103 spin_lock_irqsave(&head->vm_lock, flags);
104 c = __arm_vmregion_find(head, addr);
105 spin_unlock_irqrestore(&head->vm_lock, flags);
106 return c;
109 struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
111 struct arm_vmregion *c;
112 unsigned long flags;
114 spin_lock_irqsave(&head->vm_lock, flags);
115 c = __arm_vmregion_find(head, addr);
116 if (c)
117 c->vm_active = 0;
118 spin_unlock_irqrestore(&head->vm_lock, flags);
119 return c;
122 void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
124 unsigned long flags;
126 spin_lock_irqsave(&head->vm_lock, flags);
127 list_del(&c->vm_list);
128 spin_unlock_irqrestore(&head->vm_lock, flags);
130 kfree(c);