2 * highmem.h: virtual kernel memory mappings for high memory
4 * PowerPC version, stolen from the i386 version.
6 * Used in CONFIG_HIGHMEM systems for memory pages which
7 * are not addressable by direct kernel virtual addresses.
9 * Copyright (C) 1999 Gerhard Wichert, Siemens AG
10 * Gerhard.Wichert@pdb.siemens.de
13 * Redesigned the x86 32-bit VM architecture to deal with
14 * up to 16 Terrabyte physical memory. With current x86 CPUs
15 * we now support up to 64 Gigabytes physical RAM.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #ifndef _ASM_HIGHMEM_H
21 #define _ASM_HIGHMEM_H
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/highmem.h>
28 #include <asm/kmap_types.h>
29 #include <asm/tlbflush.h>
31 #include <asm/fixmap.h>
33 extern pte_t
*kmap_pte
;
34 extern pgprot_t kmap_prot
;
35 extern pte_t
*pkmap_page_table
;
38 * Right now we initialize only a single pte table. It can be extended
39 * easily, subsequent pte tables have to be allocated in one physical
43 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
44 * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
45 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
46 * in case of 16K/64K/256K page sizes.
48 #ifdef CONFIG_PPC_4K_PAGES
49 #define PKMAP_ORDER PTE_SHIFT
53 #define LAST_PKMAP (1 << PKMAP_ORDER)
54 #ifndef CONFIG_PPC_4K_PAGES
55 #define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
57 #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
59 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
60 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
61 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
63 extern void *kmap_high(struct page
*page
);
64 extern void kunmap_high(struct page
*page
);
66 static inline void *kmap(struct page
*page
)
69 if (!PageHighMem(page
))
70 return page_address(page
);
71 return kmap_high(page
);
74 static inline void kunmap(struct page
*page
)
76 BUG_ON(in_interrupt());
77 if (!PageHighMem(page
))
83 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
84 * gives a more generic (and caching) interface. But kmap_atomic can
85 * be used in IRQ contexts, so in some (very limited) cases we need
88 static inline void *kmap_atomic_prot(struct page
*page
, enum km_type type
, pgprot_t prot
)
93 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
95 if (!PageHighMem(page
))
96 return page_address(page
);
98 debug_kmap_atomic(type
);
99 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
100 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
101 #ifdef CONFIG_DEBUG_HIGHMEM
102 BUG_ON(!pte_none(*(kmap_pte
-idx
)));
104 __set_pte_at(&init_mm
, vaddr
, kmap_pte
-idx
, mk_pte(page
, prot
), 1);
105 local_flush_tlb_page(NULL
, vaddr
);
107 return (void*) vaddr
;
110 static inline void *kmap_atomic(struct page
*page
, enum km_type type
)
112 return kmap_atomic_prot(page
, type
, kmap_prot
);
115 static inline void kunmap_atomic(void *kvaddr
, enum km_type type
)
117 #ifdef CONFIG_DEBUG_HIGHMEM
118 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
119 enum fixed_addresses idx
= type
+ KM_TYPE_NR
*smp_processor_id();
121 if (vaddr
< __fix_to_virt(FIX_KMAP_END
)) {
126 BUG_ON(vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
));
129 * force other mappings to Oops if they'll try to access
130 * this pte without first remap it
132 pte_clear(&init_mm
, vaddr
, kmap_pte
-idx
);
133 local_flush_tlb_page(NULL
, vaddr
);
138 static inline struct page
*kmap_atomic_to_page(void *ptr
)
140 unsigned long idx
, vaddr
= (unsigned long) ptr
;
143 if (vaddr
< FIXADDR_START
)
144 return virt_to_page(ptr
);
146 idx
= virt_to_fix(vaddr
);
147 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
148 return pte_page(*pte
);
151 #define flush_cache_kmaps() flush_cache_all()
153 #endif /* __KERNEL__ */
155 #endif /* _ASM_HIGHMEM_H */