4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
15 #include <asm/pgalloc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
,
20 unsigned long size
, unsigned long phys_addr
, unsigned long flags
)
24 pgprot_t pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
|
25 _PAGE_DIRTY
| _PAGE_ACCESSED
|
26 _PAGE_HW_SHARED
| _PAGE_FLAGS_HARD
| flags
);
34 pfn
= phys_addr
>> PAGE_SHIFT
;
36 if (!pte_none(*pte
)) {
37 printk("remap_area_pte: page already exists\n");
40 set_pte(pte
, pfn_pte(pfn
, pgprot
));
44 } while (address
&& (address
< end
));
47 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
,
48 unsigned long size
, unsigned long phys_addr
, unsigned long flags
)
52 address
&= ~PGDIR_MASK
;
60 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
63 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
64 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
66 } while (address
&& (address
< end
));
70 int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
71 unsigned long size
, unsigned long flags
)
75 unsigned long end
= address
+ size
;
78 dir
= pgd_offset_k(address
);
82 spin_lock(&init_mm
.page_table_lock
);
85 pmd
= pmd_alloc(&init_mm
, dir
, address
);
89 if (remap_area_pmd(pmd
, address
, end
- address
,
90 phys_addr
+ address
, flags
))
93 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
95 } while (address
&& (address
< end
));
96 spin_unlock(&init_mm
.page_table_lock
);
102 * Generic mapping function (not visible outside):
106 * Remap an arbitrary physical address space into the kernel virtual
107 * address space. Needed when the kernel wants to access high addresses
110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111 * have to convert them into an offset in a page-aligned mapping, but the
112 * caller shouldn't need to know that small detail.
114 void * p3_ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
117 struct vm_struct
* area
;
118 unsigned long offset
, last_addr
;
120 /* Don't allow wraparound or zero size */
121 last_addr
= phys_addr
+ size
- 1;
122 if (!size
|| last_addr
< phys_addr
)
126 * Don't remap the low PCI/ISA area, it's always mapped..
128 if (phys_addr
>= 0xA0000 && last_addr
< 0x100000)
129 return phys_to_virt(phys_addr
);
132 * Don't allow anybody to remap normal RAM that we're using..
134 if (phys_addr
< virt_to_phys(high_memory
))
138 * Mappings have to be page-aligned
140 offset
= phys_addr
& ~PAGE_MASK
;
141 phys_addr
&= PAGE_MASK
;
142 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
147 area
= get_vm_area(size
, VM_IOREMAP
);
150 area
->phys_addr
= phys_addr
;
152 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
156 return (void *) (offset
+ (char *)addr
);
159 void p3_iounmap(void *addr
)
161 if (addr
> high_memory
)
162 vfree((void *)(PAGE_MASK
& (unsigned long)addr
));