2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
7 #include "linux/rbtree.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "linux/module.h"
12 #include "asm/types.h"
13 #include "asm/pgtable.h"
14 #include "kern_util.h"
15 #include "user_util.h"
16 #include "mode_kern.h"
29 struct list_head list
;
32 static struct rb_root phys_mappings
= RB_ROOT
;
34 static struct rb_node
**find_rb(void *virt
)
36 struct rb_node
**n
= &phys_mappings
.rb_node
;
40 d
= rb_entry(*n
, struct phys_desc
, rb
);
53 static struct phys_desc
*find_phys_mapping(void *virt
)
55 struct rb_node
**n
= find_rb(virt
);
60 return(rb_entry(*n
, struct phys_desc
, rb
));
63 static void insert_phys_mapping(struct phys_desc
*desc
)
65 struct rb_node
**n
= find_rb(desc
->virt
);
68 panic("Physical remapping for %p already present",
71 rb_link_node(&desc
->rb
, (*n
)->rb_parent
, n
);
72 rb_insert_color(&desc
->rb
, &phys_mappings
);
75 LIST_HEAD(descriptor_mappings
);
79 struct list_head list
;
80 struct list_head pages
;
83 static struct desc_mapping
*find_mapping(int fd
)
85 struct desc_mapping
*desc
;
86 struct list_head
*ele
;
88 list_for_each(ele
, &descriptor_mappings
){
89 desc
= list_entry(ele
, struct desc_mapping
, list
);
97 static struct desc_mapping
*descriptor_mapping(int fd
)
99 struct desc_mapping
*desc
;
101 desc
= find_mapping(fd
);
105 desc
= kmalloc(sizeof(*desc
), GFP_ATOMIC
);
109 *desc
= ((struct desc_mapping
)
111 .list
= LIST_HEAD_INIT(desc
->list
),
112 .pages
= LIST_HEAD_INIT(desc
->pages
) });
113 list_add(&desc
->list
, &descriptor_mappings
);
118 int physmem_subst_mapping(void *virt
, int fd
, __u64 offset
, int w
)
120 struct desc_mapping
*fd_maps
;
121 struct phys_desc
*desc
;
125 fd_maps
= descriptor_mapping(fd
);
130 desc
= find_phys_mapping(virt
);
132 panic("Address 0x%p is already substituted\n", virt
);
135 desc
= kmalloc(sizeof(*desc
), GFP_ATOMIC
);
139 *desc
= ((struct phys_desc
)
144 .list
= LIST_HEAD_INIT(desc
->list
) });
145 insert_phys_mapping(desc
);
147 list_add(&desc
->list
, &fd_maps
->pages
);
149 virt
= (void *) ((unsigned long) virt
& PAGE_MASK
);
150 err
= os_map_memory(virt
, fd
, offset
, PAGE_SIZE
, 1, w
, 0);
154 rb_erase(&desc
->rb
, &phys_mappings
);
160 static int physmem_fd
= -1;
162 static void remove_mapping(struct phys_desc
*desc
)
164 void *virt
= desc
->virt
;
167 rb_erase(&desc
->rb
, &phys_mappings
);
168 list_del(&desc
->list
);
171 err
= os_map_memory(virt
, physmem_fd
, __pa(virt
), PAGE_SIZE
, 1, 1, 0);
173 panic("Failed to unmap block device page from physical memory, "
177 int physmem_remove_mapping(void *virt
)
179 struct phys_desc
*desc
;
181 virt
= (void *) ((unsigned long) virt
& PAGE_MASK
);
182 desc
= find_phys_mapping(virt
);
186 remove_mapping(desc
);
190 void physmem_forget_descriptor(int fd
)
192 struct desc_mapping
*desc
;
193 struct phys_desc
*page
;
194 struct list_head
*ele
, *next
;
199 desc
= find_mapping(fd
);
203 list_for_each_safe(ele
, next
, &desc
->pages
){
204 page
= list_entry(ele
, struct phys_desc
, list
);
205 offset
= page
->offset
;
207 remove_mapping(page
);
208 err
= os_seek_file(fd
, offset
);
210 panic("physmem_forget_descriptor - failed to seek "
211 "to %lld in fd %d, error = %d\n",
213 err
= os_read_file(fd
, addr
, PAGE_SIZE
);
215 panic("physmem_forget_descriptor - failed to read "
216 "from fd %d to 0x%p, error = %d\n",
220 list_del(&desc
->list
);
224 EXPORT_SYMBOL(physmem_forget_descriptor
);
225 EXPORT_SYMBOL(physmem_remove_mapping
);
226 EXPORT_SYMBOL(physmem_subst_mapping
);
228 void arch_free_page(struct page
*page
, int order
)
233 for(i
= 0; i
< (1 << order
); i
++){
234 virt
= __va(page_to_phys(page
+ i
));
235 physmem_remove_mapping(virt
);
239 int is_remapped(void *virt
)
241 struct phys_desc
*desc
= find_phys_mapping(virt
);
243 return(desc
!= NULL
);
246 /* Changed during early boot */
247 unsigned long high_physmem
;
249 extern unsigned long physmem_size
;
251 void *to_virt(unsigned long phys
)
253 return((void *) uml_physmem
+ phys
);
256 unsigned long to_phys(void *virt
)
258 return(((unsigned long) virt
) - uml_physmem
);
261 int init_maps(unsigned long physmem
, unsigned long iomem
, unsigned long highmem
)
263 struct page
*p
, *map
;
264 unsigned long phys_len
, phys_pages
, highmem_len
, highmem_pages
;
265 unsigned long iomem_len
, iomem_pages
, total_len
, total_pages
;
268 phys_pages
= physmem
>> PAGE_SHIFT
;
269 phys_len
= phys_pages
* sizeof(struct page
);
271 iomem_pages
= iomem
>> PAGE_SHIFT
;
272 iomem_len
= iomem_pages
* sizeof(struct page
);
274 highmem_pages
= highmem
>> PAGE_SHIFT
;
275 highmem_len
= highmem_pages
* sizeof(struct page
);
277 total_pages
= phys_pages
+ iomem_pages
+ highmem_pages
;
278 total_len
= phys_len
+ iomem_pages
+ highmem_len
;
281 map
= kmalloc(total_len
, GFP_KERNEL
);
283 map
= vmalloc(total_len
);
285 else map
= alloc_bootmem_low_pages(total_len
);
290 for(i
= 0; i
< total_pages
; i
++){
292 set_page_count(p
, 0);
294 INIT_LIST_HEAD(&p
->lru
);
297 max_mapnr
= total_pages
;
301 struct page
*phys_to_page(const unsigned long phys
)
303 return(&mem_map
[phys
>> PAGE_SHIFT
]);
306 struct page
*__virt_to_page(const unsigned long virt
)
308 return(&mem_map
[__pa(virt
) >> PAGE_SHIFT
]);
311 phys_t
page_to_phys(struct page
*page
)
313 return((page
- mem_map
) << PAGE_SHIFT
);
316 pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
320 pte_set_val(pte
, page_to_phys(page
), pgprot
);
322 pte_mknewprot(pte_mknewpage(pte
));
326 /* Changed during early boot */
327 static unsigned long kmem_top
= 0;
329 unsigned long get_kmem_end(void)
332 kmem_top
= CHOOSE_MODE(kmem_end_tt
, kmem_end_skas
);
336 void map_memory(unsigned long virt
, unsigned long phys
, unsigned long len
,
342 fd
= phys_mapping(phys
, &offset
);
343 err
= os_map_memory((void *) virt
, fd
, offset
, len
, r
, w
, x
);
346 printk("try increasing the host's "
347 "/proc/sys/vm/max_map_count to <physical "
348 "memory size>/4096\n");
349 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
350 "err = %d\n", virt
, fd
, offset
, len
, r
, w
, x
, err
);
354 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
356 extern int __syscall_stub_start
, __binary_start
;
358 void setup_physmem(unsigned long start
, unsigned long reserve_end
,
359 unsigned long len
, unsigned long highmem
)
361 unsigned long reserve
= reserve_end
- start
;
362 int pfn
= PFN_UP(__pa(reserve_end
));
363 int delta
= (len
- reserve
) >> PAGE_SHIFT
;
364 int err
, offset
, bootmap_size
;
366 physmem_fd
= create_mem_file(len
+ highmem
);
368 offset
= uml_reserved
- uml_physmem
;
369 err
= os_map_memory((void *) uml_reserved
, physmem_fd
, offset
,
370 len
- offset
, 1, 1, 0);
372 os_print_error(err
, "Mapping memory");
376 /* Special kludge - This page will be mapped in to userspace processes
377 * from physmem_fd, so it needs to be written out there.
379 os_seek_file(physmem_fd
, __pa(&__syscall_stub_start
));
380 os_write_file(physmem_fd
, &__syscall_stub_start
, PAGE_SIZE
);
382 bootmap_size
= init_bootmem(pfn
, pfn
+ delta
);
383 free_bootmem(__pa(reserve_end
) + bootmap_size
,
384 len
- bootmap_size
- reserve
);
387 int phys_mapping(unsigned long phys
, __u64
*offset_out
)
389 struct phys_desc
*desc
= find_phys_mapping(__va(phys
& PAGE_MASK
));
394 *offset_out
= desc
->offset
;
396 else if(phys
< physmem_size
){
400 else if(phys
< __pa(end_iomem
)){
401 struct iomem_region
*region
= iomem_regions
;
403 while(region
!= NULL
){
404 if((phys
>= region
->phys
) &&
405 (phys
< region
->phys
+ region
->size
)){
407 *offset_out
= phys
- region
->phys
;
410 region
= region
->next
;
413 else if(phys
< __pa(end_iomem
) + highmem
){
415 *offset_out
= phys
- iomem_size
;
421 static int __init
uml_mem_setup(char *line
, int *add
)
424 physmem_size
= memparse(line
,&retptr
);
427 __uml_setup("mem=", uml_mem_setup
,
428 "mem=<Amount of desired ram>\n"
429 " This controls how much \"physical\" memory the kernel allocates\n"
430 " for the system. The size is specified as a number followed by\n"
431 " one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
432 " This is not related to the amount of memory in the host. It can\n"
433 " be more, and the excess, if it's ever used, will just be swapped out.\n"
434 " Example: mem=64M\n\n"
437 unsigned long find_iomem(char *driver
, unsigned long *len_out
)
439 struct iomem_region
*region
= iomem_regions
;
441 while(region
!= NULL
){
442 if(!strcmp(region
->driver
, driver
)){
443 *len_out
= region
->size
;
444 return(region
->virt
);
451 int setup_iomem(void)
453 struct iomem_region
*region
= iomem_regions
;
454 unsigned long iomem_start
= high_physmem
+ PAGE_SIZE
;
457 while(region
!= NULL
){
458 err
= os_map_memory((void *) iomem_start
, region
->fd
, 0,
459 region
->size
, 1, 1, 0);
461 printk("Mapping iomem region for driver '%s' failed, "
462 "errno = %d\n", region
->driver
, -err
);
464 region
->virt
= iomem_start
;
465 region
->phys
= __pa(region
->virt
);
468 iomem_start
+= region
->size
+ PAGE_SIZE
;
469 region
= region
->next
;
475 __initcall(setup_iomem
);
478 * Overrides for Emacs so that we follow Linus's tabbing style.
479 * Emacs will notice this stuff at the end of the file and automatically
480 * adjust the settings for this buffer only. This must remain at the end
482 * ---------------------------------------------------------------------------
484 * c-file-style: "linux"