x86: section/warning fixes
[linux/fpc-iii.git] / include / asm-powerpc / page_64.h
blob25af4fc8daf40976d030c66dc553fd6c1802662e
1 #ifndef _ASM_POWERPC_PAGE_64_H
2 #define _ASM_POWERPC_PAGE_64_H
4 /*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
15 * specific, every notion of page number shared with the firmware, TCEs,
16 * iommu, etc... still uses a page size of 4K.
18 #define HW_PAGE_SHIFT 12
19 #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
20 #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
23 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
24 * HW_PAGE_SHIFT, that is 4K pages.
26 #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
28 /* Segment size; normal 256M segments */
29 #define SID_SHIFT 28
30 #define SID_MASK ASM_CONST(0xfffffffff)
31 #define ESID_MASK 0xfffffffff0000000UL
32 #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
34 /* 1T segments */
35 #define SID_SHIFT_1T 40
36 #define SID_MASK_1T 0xffffffUL
37 #define ESID_MASK_1T 0xffffff0000000000UL
38 #define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
40 #ifndef __ASSEMBLY__
41 #include <asm/cache.h>
43 typedef unsigned long pte_basic_t;
45 static __inline__ void clear_page(void *addr)
47 unsigned long lines, line_size;
49 line_size = ppc64_caches.dline_size;
50 lines = ppc64_caches.dlines_per_page;
52 __asm__ __volatile__(
53 "mtctr %1 # clear_page\n\
54 1: dcbz 0,%0\n\
55 add %0,%0,%3\n\
56 bdnz+ 1b"
57 : "=r" (addr)
58 : "r" (lines), "0" (addr), "r" (line_size)
59 : "ctr", "memory");
62 extern void copy_4K_page(void *to, void *from);
64 #ifdef CONFIG_PPC_64K_PAGES
65 static inline void copy_page(void *to, void *from)
67 unsigned int i;
68 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
69 copy_4K_page(to, from);
70 to += 4096;
71 from += 4096;
74 #else /* CONFIG_PPC_64K_PAGES */
75 static inline void copy_page(void *to, void *from)
77 copy_4K_page(to, from);
79 #endif /* CONFIG_PPC_64K_PAGES */
81 /* Log 2 of page table size */
82 extern u64 ppc64_pft_size;
84 /* Large pages size */
85 #ifdef CONFIG_HUGETLB_PAGE
86 extern unsigned int HPAGE_SHIFT;
87 #else
88 #define HPAGE_SHIFT PAGE_SHIFT
89 #endif
90 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
91 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
92 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
94 #endif /* __ASSEMBLY__ */
96 #ifdef CONFIG_PPC_MM_SLICES
98 #define SLICE_LOW_SHIFT 28
99 #define SLICE_HIGH_SHIFT 40
101 #define SLICE_LOW_TOP (0x100000000ul)
102 #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
103 #define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
105 #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
106 #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
108 #ifndef __ASSEMBLY__
110 struct slice_mask {
111 u16 low_slices;
112 u16 high_slices;
115 struct mm_struct;
117 extern unsigned long slice_get_unmapped_area(unsigned long addr,
118 unsigned long len,
119 unsigned long flags,
120 unsigned int psize,
121 int topdown,
122 int use_cache);
124 extern unsigned int get_slice_psize(struct mm_struct *mm,
125 unsigned long addr);
127 extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
128 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
129 #define slice_mm_new_context(mm) ((mm)->context.id == 0)
131 #endif /* __ASSEMBLY__ */
132 #else
133 #define slice_init()
134 #define slice_set_user_psize(mm, psize) \
135 do { \
136 (mm)->context.user_psize = (psize); \
137 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
138 } while (0)
139 #define slice_mm_new_context(mm) 1
140 #endif /* CONFIG_PPC_MM_SLICES */
142 #ifdef CONFIG_HUGETLB_PAGE
144 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
146 #endif /* !CONFIG_HUGETLB_PAGE */
148 #ifdef MODULE
149 #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
150 #else
151 #define __page_aligned \
152 __attribute__((__aligned__(PAGE_SIZE), \
153 __section__(".data.page_aligned")))
154 #endif
156 #define VM_DATA_DEFAULT_FLAGS \
157 (test_thread_flag(TIF_32BIT) ? \
158 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
161 * This is the default if a program doesn't have a PT_GNU_STACK
162 * program header entry. The PPC64 ELF ABI has a non executable stack
163 * stack by default, so in the absense of a PT_GNU_STACK program header
164 * we turn execute permission off.
166 #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
167 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
169 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
170 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
172 #define VM_STACK_DEFAULT_FLAGS \
173 (test_thread_flag(TIF_32BIT) ? \
174 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
176 #include <asm-generic/page.h>
178 #endif /* _ASM_POWERPC_PAGE_64_H */