2 * arch/s390/mm/ioremap.c
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/extable.c"
9 * (C) Copyright 1995 1996 Linus Torvalds
11 * Re-map IO memory to kernel address space so that we can access it.
12 * This is needed for high PCI addresses that aren't mapped in the
13 * 640k-1MB IO memory area on PC's
16 #include <linux/vmalloc.h>
19 #include <asm/pgalloc.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
24 unsigned long phys_addr
, unsigned long flags
)
35 pfn
= phys_addr
>> PAGE_SHIFT
;
37 if (!pte_none(*pte
)) {
38 printk("remap_area_pte: page already exists\n");
41 set_pte(pte
, pfn_pte(pfn
, __pgprot(flags
)));
45 } while (address
&& (address
< end
));
48 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
49 unsigned long phys_addr
, unsigned long flags
)
53 address
&= ~PGDIR_MASK
;
61 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
64 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
65 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
67 } while (address
&& (address
< end
));
71 static int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
72 unsigned long size
, unsigned long flags
)
76 unsigned long end
= address
+ size
;
79 dir
= pgd_offset(&init_mm
, address
);
83 spin_lock(&init_mm
.page_table_lock
);
86 pmd
= pmd_alloc(&init_mm
, dir
, address
);
90 if (remap_area_pmd(pmd
, address
, end
- address
,
91 phys_addr
+ address
, flags
))
94 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
96 } while (address
&& (address
< end
));
97 spin_unlock(&init_mm
.page_table_lock
);
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
111 void * __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
114 struct vm_struct
* area
;
116 if (phys_addr
< virt_to_phys(high_memory
))
117 return phys_to_virt(phys_addr
);
118 if (phys_addr
& ~PAGE_MASK
)
120 size
= PAGE_ALIGN(size
);
121 if (!size
|| size
> phys_addr
+ size
)
123 area
= get_vm_area(size
, VM_IOREMAP
);
127 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
134 void iounmap(void *addr
)
136 if (addr
> high_memory
)