2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
7 #include "linux/rbtree.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "linux/module.h"
12 #include "asm/types.h"
13 #include "asm/pgtable.h"
14 #include "kern_util.h"
15 #include "user_util.h"
16 #include "mode_kern.h"
29 struct list_head list
;
32 static struct rb_root phys_mappings
= RB_ROOT
;
34 static struct rb_node
**find_rb(void *virt
)
36 struct rb_node
**n
= &phys_mappings
.rb_node
;
40 d
= rb_entry(*n
, struct phys_desc
, rb
);
53 static struct phys_desc
*find_phys_mapping(void *virt
)
55 struct rb_node
**n
= find_rb(virt
);
60 return(rb_entry(*n
, struct phys_desc
, rb
));
63 static void insert_phys_mapping(struct phys_desc
*desc
)
65 struct rb_node
**n
= find_rb(desc
->virt
);
68 panic("Physical remapping for %p already present",
71 rb_link_node(&desc
->rb
, (*n
)->rb_parent
, n
);
72 rb_insert_color(&desc
->rb
, &phys_mappings
);
75 LIST_HEAD(descriptor_mappings
);
79 struct list_head list
;
80 struct list_head pages
;
83 static struct desc_mapping
*find_mapping(int fd
)
85 struct desc_mapping
*desc
;
86 struct list_head
*ele
;
88 list_for_each(ele
, &descriptor_mappings
){
89 desc
= list_entry(ele
, struct desc_mapping
, list
);
97 static struct desc_mapping
*descriptor_mapping(int fd
)
99 struct desc_mapping
*desc
;
101 desc
= find_mapping(fd
);
105 desc
= kmalloc(sizeof(*desc
), GFP_ATOMIC
);
109 *desc
= ((struct desc_mapping
)
111 .list
= LIST_HEAD_INIT(desc
->list
),
112 .pages
= LIST_HEAD_INIT(desc
->pages
) });
113 list_add(&desc
->list
, &descriptor_mappings
);
118 int physmem_subst_mapping(void *virt
, int fd
, __u64 offset
, int w
)
120 struct desc_mapping
*fd_maps
;
121 struct phys_desc
*desc
;
125 fd_maps
= descriptor_mapping(fd
);
130 desc
= find_phys_mapping(virt
);
132 panic("Address 0x%p is already substituted\n", virt
);
135 desc
= kmalloc(sizeof(*desc
), GFP_ATOMIC
);
139 *desc
= ((struct phys_desc
)
144 .list
= LIST_HEAD_INIT(desc
->list
) });
145 insert_phys_mapping(desc
);
147 list_add(&desc
->list
, &fd_maps
->pages
);
149 virt
= (void *) ((unsigned long) virt
& PAGE_MASK
);
150 err
= os_map_memory(virt
, fd
, offset
, PAGE_SIZE
, 1, w
, 0);
154 rb_erase(&desc
->rb
, &phys_mappings
);
160 static int physmem_fd
= -1;
162 static void remove_mapping(struct phys_desc
*desc
)
164 void *virt
= desc
->virt
;
167 rb_erase(&desc
->rb
, &phys_mappings
);
168 list_del(&desc
->list
);
171 err
= os_map_memory(virt
, physmem_fd
, __pa(virt
), PAGE_SIZE
, 1, 1, 0);
173 panic("Failed to unmap block device page from physical memory, "
177 int physmem_remove_mapping(void *virt
)
179 struct phys_desc
*desc
;
181 virt
= (void *) ((unsigned long) virt
& PAGE_MASK
);
182 desc
= find_phys_mapping(virt
);
186 remove_mapping(desc
);
190 void physmem_forget_descriptor(int fd
)
192 struct desc_mapping
*desc
;
193 struct phys_desc
*page
;
194 struct list_head
*ele
, *next
;
199 desc
= find_mapping(fd
);
203 list_for_each_safe(ele
, next
, &desc
->pages
){
204 page
= list_entry(ele
, struct phys_desc
, list
);
205 offset
= page
->offset
;
207 remove_mapping(page
);
208 err
= os_seek_file(fd
, offset
);
210 panic("physmem_forget_descriptor - failed to seek "
211 "to %lld in fd %d, error = %d\n",
213 err
= os_read_file(fd
, addr
, PAGE_SIZE
);
215 panic("physmem_forget_descriptor - failed to read "
216 "from fd %d to 0x%p, error = %d\n",
220 list_del(&desc
->list
);
224 EXPORT_SYMBOL(physmem_forget_descriptor
);
225 EXPORT_SYMBOL(physmem_remove_mapping
);
226 EXPORT_SYMBOL(physmem_subst_mapping
);
228 void arch_free_page(struct page
*page
, int order
)
233 for(i
= 0; i
< (1 << order
); i
++){
234 virt
= __va(page_to_phys(page
+ i
));
235 physmem_remove_mapping(virt
);
239 int is_remapped(void *virt
)
241 struct phys_desc
*desc
= find_phys_mapping(virt
);
243 return(desc
!= NULL
);
246 /* Changed during early boot */
247 unsigned long high_physmem
;
249 extern unsigned long physmem_size
;
251 void *to_virt(unsigned long phys
)
253 return((void *) uml_physmem
+ phys
);
256 unsigned long to_phys(void *virt
)
258 return(((unsigned long) virt
) - uml_physmem
);
261 int init_maps(unsigned long physmem
, unsigned long iomem
, unsigned long highmem
)
263 struct page
*p
, *map
;
264 unsigned long phys_len
, phys_pages
, highmem_len
, highmem_pages
;
265 unsigned long iomem_len
, iomem_pages
, total_len
, total_pages
;
268 phys_pages
= physmem
>> PAGE_SHIFT
;
269 phys_len
= phys_pages
* sizeof(struct page
);
271 iomem_pages
= iomem
>> PAGE_SHIFT
;
272 iomem_len
= iomem_pages
* sizeof(struct page
);
274 highmem_pages
= highmem
>> PAGE_SHIFT
;
275 highmem_len
= highmem_pages
* sizeof(struct page
);
277 total_pages
= phys_pages
+ iomem_pages
+ highmem_pages
;
278 total_len
= phys_len
+ iomem_pages
+ highmem_len
;
281 map
= kmalloc(total_len
, GFP_KERNEL
);
283 map
= vmalloc(total_len
);
285 else map
= alloc_bootmem_low_pages(total_len
);
290 for(i
= 0; i
< total_pages
; i
++){
292 set_page_count(p
, 0);
294 INIT_LIST_HEAD(&p
->lru
);
297 max_mapnr
= total_pages
;
301 struct page
*phys_to_page(const unsigned long phys
)
303 return(&mem_map
[phys
>> PAGE_SHIFT
]);
306 struct page
*__virt_to_page(const unsigned long virt
)
308 return(&mem_map
[__pa(virt
) >> PAGE_SHIFT
]);
311 phys_t
page_to_phys(struct page
*page
)
313 return((page
- mem_map
) << PAGE_SHIFT
);
316 pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
320 pte_set_val(pte
, page_to_phys(page
), pgprot
);
322 pte_mknewprot(pte_mknewpage(pte
));
326 /* Changed during early boot */
327 static unsigned long kmem_top
= 0;
329 unsigned long get_kmem_end(void)
332 kmem_top
= CHOOSE_MODE(kmem_end_tt
, kmem_end_skas
);
336 void map_memory(unsigned long virt
, unsigned long phys
, unsigned long len
,
342 fd
= phys_mapping(phys
, &offset
);
343 err
= os_map_memory((void *) virt
, fd
, offset
, len
, r
, w
, x
);
346 printk("try increasing the host's "
347 "/proc/sys/vm/max_map_count to <physical "
348 "memory size>/4096\n");
349 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
350 "err = %d\n", virt
, fd
, offset
, len
, r
, w
, x
, err
);
354 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
356 void setup_physmem(unsigned long start
, unsigned long reserve_end
,
357 unsigned long len
, unsigned long highmem
)
359 unsigned long reserve
= reserve_end
- start
;
360 int pfn
= PFN_UP(__pa(reserve_end
));
361 int delta
= (len
- reserve
) >> PAGE_SHIFT
;
362 int err
, offset
, bootmap_size
;
364 physmem_fd
= create_mem_file(len
+ highmem
);
366 offset
= uml_reserved
- uml_physmem
;
367 err
= os_map_memory((void *) uml_reserved
, physmem_fd
, offset
,
368 len
- offset
, 1, 1, 0);
370 os_print_error(err
, "Mapping memory");
374 bootmap_size
= init_bootmem(pfn
, pfn
+ delta
);
375 free_bootmem(__pa(reserve_end
) + bootmap_size
,
376 len
- bootmap_size
- reserve
);
379 int phys_mapping(unsigned long phys
, __u64
*offset_out
)
381 struct phys_desc
*desc
= find_phys_mapping(__va(phys
& PAGE_MASK
));
386 *offset_out
= desc
->offset
;
388 else if(phys
< physmem_size
){
392 else if(phys
< __pa(end_iomem
)){
393 struct iomem_region
*region
= iomem_regions
;
395 while(region
!= NULL
){
396 if((phys
>= region
->phys
) &&
397 (phys
< region
->phys
+ region
->size
)){
399 *offset_out
= phys
- region
->phys
;
402 region
= region
->next
;
405 else if(phys
< __pa(end_iomem
) + highmem
){
407 *offset_out
= phys
- iomem_size
;
413 static int __init
uml_mem_setup(char *line
, int *add
)
416 physmem_size
= memparse(line
,&retptr
);
419 __uml_setup("mem=", uml_mem_setup
,
420 "mem=<Amount of desired ram>\n"
421 " This controls how much \"physical\" memory the kernel allocates\n"
422 " for the system. The size is specified as a number followed by\n"
423 " one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
424 " This is not related to the amount of memory in the host. It can\n"
425 " be more, and the excess, if it's ever used, will just be swapped out.\n"
426 " Example: mem=64M\n\n"
429 unsigned long find_iomem(char *driver
, unsigned long *len_out
)
431 struct iomem_region
*region
= iomem_regions
;
433 while(region
!= NULL
){
434 if(!strcmp(region
->driver
, driver
)){
435 *len_out
= region
->size
;
436 return(region
->virt
);
443 int setup_iomem(void)
445 struct iomem_region
*region
= iomem_regions
;
446 unsigned long iomem_start
= high_physmem
+ PAGE_SIZE
;
449 while(region
!= NULL
){
450 err
= os_map_memory((void *) iomem_start
, region
->fd
, 0,
451 region
->size
, 1, 1, 0);
453 printk("Mapping iomem region for driver '%s' failed, "
454 "errno = %d\n", region
->driver
, -err
);
456 region
->virt
= iomem_start
;
457 region
->phys
= __pa(region
->virt
);
460 iomem_start
+= region
->size
+ PAGE_SIZE
;
461 region
= region
->next
;
467 __initcall(setup_iomem
);
470 * Overrides for Emacs so that we follow Linus's tabbing style.
471 * Emacs will notice this stuff at the end of the file and automatically
472 * adjust the settings for this buffer only. This must remain at the end
474 * ---------------------------------------------------------------------------
476 * c-file-style: "linux"