1 #ifndef _ASM_IA64_PAGE_H
2 #define _ASM_IA64_PAGE_H
4 * Pagetable related stuff.
6 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
10 #include <asm/intrinsics.h>
11 #include <asm/types.h>
14 * The top three bits of an IA64 address are its Region Number.
15 * Different regions are assigned to different purposes.
17 #define RGN_SHIFT (61)
18 #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
19 #define RGN_BITS (RGN_BASE(-1))
21 #define RGN_KERNEL 7 /* Identity mapped region */
22 #define RGN_UNCACHED 6 /* Identity mapped I/O region */
23 #define RGN_GATE 5 /* Gate page, Kernel text, etc */
24 #define RGN_HPAGE 4 /* For Huge TLB pages */
27 * PAGE_SHIFT determines the actual kernel page size.
29 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
30 # define PAGE_SHIFT 12
31 #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
32 # define PAGE_SHIFT 13
33 #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
34 # define PAGE_SHIFT 14
35 #elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
36 # define PAGE_SHIFT 16
38 # error Unsupported page size!
41 #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
42 #define PAGE_MASK (~(PAGE_SIZE - 1))
44 #define PERCPU_PAGE_SHIFT 18 /* log2() of max. size of per-CPU area */
45 #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
48 #ifdef CONFIG_HUGETLB_PAGE
49 # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
50 # define HPAGE_SHIFT hpage_shift
51 # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
52 # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
53 # define HPAGE_MASK (~(HPAGE_SIZE - 1))
55 # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
56 #endif /* CONFIG_HUGETLB_PAGE */
59 # define __pa(x) ((x) - PAGE_OFFSET)
60 # define __va(x) ((x) + PAGE_OFFSET)
61 #else /* !__ASSEMBLY */
62 # define STRICT_MM_TYPECHECKS
64 extern void clear_page (void *page
);
65 extern void copy_page (void *to
, void *from
);
68 * clear_user_page() and copy_user_page() can't be inline functions because
69 * flush_dcache_page() can't be defined until later...
71 #define clear_user_page(addr, vaddr, page) \
74 flush_dcache_page(page); \
77 #define copy_user_page(to, from, vaddr, page) \
79 copy_page((to), (from)); \
80 flush_dcache_page(page); \
84 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
86 struct page *page = alloc_page_vma( \
87 GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
89 flush_dcache_page(page); \
93 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
97 #ifdef CONFIG_VIRTUAL_MEM_MAP
98 extern int ia64_pfn_valid (unsigned long pfn
);
100 # define ia64_pfn_valid(pfn) 1
103 #ifdef CONFIG_VIRTUAL_MEM_MAP
104 extern struct page
*vmem_map
;
105 #ifdef CONFIG_DISCONTIGMEM
106 # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
107 # define pfn_to_page(pfn) (vmem_map + (pfn))
109 # include <asm-generic/memory_model.h>
112 # include <asm-generic/memory_model.h>
115 #ifdef CONFIG_FLATMEM
116 # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
117 #elif defined(CONFIG_DISCONTIGMEM)
118 extern unsigned long min_low_pfn
;
119 extern unsigned long max_low_pfn
;
120 # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
123 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
124 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
127 typedef union ia64_va
{
129 unsigned long off
: 61; /* intra-region offset */
130 unsigned long reg
: 3; /* region number */
137 * Note: These macros depend on the fact that PAGE_OFFSET has all
138 * region bits set to 1 and all other bits set to zero. They are
139 * expressed in this way to ensure they result in a single "dep"
142 #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
143 #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
145 #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
146 #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
148 #ifdef CONFIG_HUGETLB_PAGE
149 # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
150 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
151 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
152 extern unsigned int hpage_shift
;
155 static __inline__
int
156 get_order (unsigned long size
)
158 long double d
= size
- 1;
161 order
= ia64_getf_exp(d
);
162 order
= order
- PAGE_SHIFT
- 0xffff + 1;
168 #endif /* !__ASSEMBLY__ */
170 #ifdef STRICT_MM_TYPECHECKS
172 * These are used to make use of C type-checking..
174 typedef struct { unsigned long pte
; } pte_t
;
175 typedef struct { unsigned long pmd
; } pmd_t
;
176 #ifdef CONFIG_PGTABLE_4
177 typedef struct { unsigned long pud
; } pud_t
;
179 typedef struct { unsigned long pgd
; } pgd_t
;
180 typedef struct { unsigned long pgprot
; } pgprot_t
;
181 typedef struct page
*pgtable_t
;
183 # define pte_val(x) ((x).pte)
184 # define pmd_val(x) ((x).pmd)
185 #ifdef CONFIG_PGTABLE_4
186 # define pud_val(x) ((x).pud)
188 # define pgd_val(x) ((x).pgd)
189 # define pgprot_val(x) ((x).pgprot)
191 # define __pte(x) ((pte_t) { (x) } )
192 # define __pmd(x) ((pmd_t) { (x) } )
193 # define __pgprot(x) ((pgprot_t) { (x) } )
195 #else /* !STRICT_MM_TYPECHECKS */
197 * .. while these make it easier on the compiler
199 # ifndef __ASSEMBLY__
200 typedef unsigned long pte_t
;
201 typedef unsigned long pmd_t
;
202 typedef unsigned long pgd_t
;
203 typedef unsigned long pgprot_t
;
204 typedef struct page
*pgtable_t
;
207 # define pte_val(x) (x)
208 # define pmd_val(x) (x)
209 # define pgd_val(x) (x)
210 # define pgprot_val(x) (x)
212 # define __pte(x) (x)
213 # define __pgd(x) (x)
214 # define __pgprot(x) (x)
215 #endif /* !STRICT_MM_TYPECHECKS */
217 #define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
219 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
221 (((current->personality & READ_IMPLIES_EXEC) != 0) \
224 #define GATE_ADDR RGN_BASE(RGN_GATE)
227 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
228 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
230 #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
231 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
232 #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
234 #endif /* _ASM_IA64_PAGE_H */