MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-x86_64 / page.h
blobc5c2b01cfcfaff3f393a7136974b3430d6b9d01c
1 #ifndef _X86_64_PAGE_H
2 #define _X86_64_PAGE_H
4 #include <linux/config.h>
6 /* PAGE_SHIFT determines the page size */
7 #define PAGE_SHIFT 12
8 #ifdef __ASSEMBLY__
9 #define PAGE_SIZE (0x1 << PAGE_SHIFT)
10 #else
11 #define PAGE_SIZE (1UL << PAGE_SHIFT)
12 #endif
13 #define PAGE_MASK (~(PAGE_SIZE-1))
14 #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
16 #define THREAD_ORDER 1
17 #ifdef __ASSEMBLY__
18 #define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
19 #else
20 #define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
21 #endif
22 #define CURRENT_MASK (~(THREAD_SIZE-1))
24 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
25 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
27 #define HPAGE_SHIFT PMD_SHIFT
28 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
29 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
30 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
32 #ifdef __KERNEL__
33 #ifndef __ASSEMBLY__
35 void clear_page(void *);
36 void copy_page(void *, void *);
38 #define clear_user_page(page, vaddr, pg) clear_page(page)
39 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
42 * These are used to make use of C type-checking..
44 typedef struct { unsigned long pte; } pte_t;
45 typedef struct { unsigned long pmd; } pmd_t;
46 typedef struct { unsigned long pgd; } pgd_t;
47 typedef struct { unsigned long pml4; } pml4_t;
48 #define PTE_MASK PHYSICAL_PAGE_MASK
50 typedef struct { unsigned long pgprot; } pgprot_t;
52 #define pte_val(x) ((x).pte)
53 #define pmd_val(x) ((x).pmd)
54 #define pgd_val(x) ((x).pgd)
55 #define pml4_val(x) ((x).pml4)
56 #define pgprot_val(x) ((x).pgprot)
58 #define __pte(x) ((pte_t) { (x) } )
59 #define __pmd(x) ((pmd_t) { (x) } )
60 #define __pgd(x) ((pgd_t) { (x) } )
61 #define __pml4(x) ((pml4_t) { (x) } )
62 #define __pgprot(x) ((pgprot_t) { (x) } )
64 extern unsigned long vm_stack_flags, vm_stack_flags32;
65 extern unsigned long vm_data_default_flags, vm_data_default_flags32;
66 extern unsigned long vm_force_exec32;
68 #define __START_KERNEL 0xffffffff80100000UL
69 #define __START_KERNEL_map 0xffffffff80000000UL
70 #define __PAGE_OFFSET 0x0000010000000000UL /* 1 << 40 */
72 #else
73 #define __START_KERNEL 0xffffffff80100000
74 #define __START_KERNEL_map 0xffffffff80000000
75 #define __PAGE_OFFSET 0x0000010000000000 /* 1 << 40 */
76 #endif /* !__ASSEMBLY__ */
78 /* to align the pointer to the (next) page boundary */
79 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
81 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
82 #define __PHYSICAL_MASK_SHIFT 40
83 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
84 #define __VIRTUAL_MASK_SHIFT 48
85 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
87 #define KERNEL_TEXT_SIZE (40UL*1024*1024)
88 #define KERNEL_TEXT_START 0xffffffff80000000UL
90 #ifndef __ASSEMBLY__
92 #include <asm/bug.h>
94 /* Pure 2^n version of get_order */
95 extern __inline__ int get_order(unsigned long size)
97 int order;
99 size = (size-1) >> (PAGE_SHIFT-1);
100 order = -1;
101 do {
102 size >>= 1;
103 order++;
104 } while (size);
105 return order;
108 #endif /* __ASSEMBLY__ */
110 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
112 /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
113 Otherwise you risk miscompilation. */
114 #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
115 /* __pa_symbol should be used for C visible symbols.
116 This seems to be the official gcc blessed way to do such arithmetic. */
117 #define __pa_symbol(x) \
118 ({unsigned long v; \
119 asm("" : "=r" (v) : "0" (x)); \
120 __pa(v); })
122 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
123 #ifndef CONFIG_DISCONTIGMEM
124 #define pfn_to_page(pfn) (mem_map + (pfn))
125 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
126 #define pfn_valid(pfn) ((pfn) < max_mapnr)
127 #endif
129 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
130 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
131 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
133 #define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
134 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
135 #define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \
136 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
138 #define VM_DATA_DEFAULT_FLAGS \
139 (test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \
140 vm_data_default_flags)
142 #define VM_STACK_DEFAULT_FLAGS \
143 (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
145 #define CONFIG_ARCH_GATE_AREA 1
147 #ifndef __ASSEMBLY__
148 struct task_struct;
149 struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
150 int in_gate_area(struct task_struct *task, unsigned long addr);
151 #endif
153 #endif /* __KERNEL__ */
155 #endif /* _X86_64_PAGE_H */