[PATCH] x86_64: CPU hotplug sibling map cleanup
[linux/fpc-iii.git] / include / asm-m68k / page.h
blob99a51670921089fb3a30b9a95ff8e005febe8d92
1 #ifndef _M68K_PAGE_H
2 #define _M68K_PAGE_H
4 #include <linux/config.h>
6 /* PAGE_SHIFT determines the page size */
7 #ifndef CONFIG_SUN3
8 #define PAGE_SHIFT (12)
9 #else
10 #define PAGE_SHIFT (13)
11 #endif
12 #ifdef __ASSEMBLY__
13 #define PAGE_SIZE (1 << PAGE_SHIFT)
14 #else
15 #define PAGE_SIZE (1UL << PAGE_SHIFT)
16 #endif
17 #define PAGE_MASK (~(PAGE_SIZE-1))
19 #ifdef __KERNEL__
21 #include <asm/setup.h>
23 #if PAGE_SHIFT < 13
24 #define THREAD_SIZE (8192)
25 #else
26 #define THREAD_SIZE PAGE_SIZE
27 #endif
29 #ifndef __ASSEMBLY__
31 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
32 #define free_user_page(page, addr) free_page(addr)
35 * We don't need to check for alignment etc.
37 #ifdef CPU_M68040_OR_M68060_ONLY
38 static inline void copy_page(void *to, void *from)
40 unsigned long tmp;
42 __asm__ __volatile__("1:\t"
43 ".chip 68040\n\t"
44 "move16 %1@+,%0@+\n\t"
45 "move16 %1@+,%0@+\n\t"
46 ".chip 68k\n\t"
47 "dbra %2,1b\n\t"
48 : "=a" (to), "=a" (from), "=d" (tmp)
49 : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
53 static inline void clear_page(void *page)
55 unsigned long tmp;
56 unsigned long *sp = page;
58 *sp++ = 0;
59 *sp++ = 0;
60 *sp++ = 0;
61 *sp++ = 0;
63 __asm__ __volatile__("1:\t"
64 ".chip 68040\n\t"
65 "move16 %2@+,%0@+\n\t"
66 ".chip 68k\n\t"
67 "subqw #8,%2\n\t"
68 "subqw #8,%2\n\t"
69 "dbra %1,1b\n\t"
70 : "=a" (sp), "=d" (tmp)
71 : "a" (page), "0" (sp),
72 "1" ((PAGE_SIZE - 16) / 16 - 1));
75 #else
76 #define clear_page(page) memset((page), 0, PAGE_SIZE)
77 #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
78 #endif
80 #define clear_user_page(addr, vaddr, page) \
81 do { clear_page(addr); \
82 flush_dcache_page(page); \
83 } while (0)
84 #define copy_user_page(to, from, vaddr, page) \
85 do { copy_page(to, from); \
86 flush_dcache_page(page); \
87 } while (0)
90 * These are used to make use of C type-checking..
92 typedef struct { unsigned long pte; } pte_t;
93 typedef struct { unsigned long pmd[16]; } pmd_t;
94 typedef struct { unsigned long pgd; } pgd_t;
95 typedef struct { unsigned long pgprot; } pgprot_t;
97 #define pte_val(x) ((x).pte)
98 #define pmd_val(x) ((&x)->pmd[0])
99 #define pgd_val(x) ((x).pgd)
100 #define pgprot_val(x) ((x).pgprot)
102 #define __pte(x) ((pte_t) { (x) } )
103 #define __pmd(x) ((pmd_t) { (x) } )
104 #define __pgd(x) ((pgd_t) { (x) } )
105 #define __pgprot(x) ((pgprot_t) { (x) } )
107 /* to align the pointer to the (next) page boundary */
108 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
110 /* Pure 2^n version of get_order */
111 static inline int get_order(unsigned long size)
113 int order;
115 size = (size-1) >> (PAGE_SHIFT-1);
116 order = -1;
117 do {
118 size >>= 1;
119 order++;
120 } while (size);
121 return order;
124 #endif /* !__ASSEMBLY__ */
126 #include <asm/page_offset.h>
128 #define PAGE_OFFSET (PAGE_OFFSET_RAW)
130 #ifndef __ASSEMBLY__
132 #ifndef CONFIG_SUN3
134 #define WANT_PAGE_VIRTUAL
135 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
136 extern unsigned long m68k_memoffset;
138 #define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
139 #define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
140 #else
141 #define __pa(vaddr) virt_to_phys((void *)vaddr)
142 #define __va(paddr) phys_to_virt((unsigned long)paddr)
143 #endif
145 #else /* !CONFIG_SUN3 */
146 /* This #define is a horrible hack to suppress lots of warnings. --m */
147 #define __pa(x) ___pa((unsigned long)x)
148 static inline unsigned long ___pa(unsigned long x)
150 if(x == 0)
151 return 0;
152 if(x >= PAGE_OFFSET)
153 return (x-PAGE_OFFSET);
154 else
155 return (x+0x2000000);
158 static inline void *__va(unsigned long x)
160 if(x == 0)
161 return (void *)0;
163 if(x < 0x2000000)
164 return (void *)(x+PAGE_OFFSET);
165 else
166 return (void *)(x-0x2000000);
168 #endif /* CONFIG_SUN3 */
171 * NOTE: virtual isn't really correct, actually it should be the offset into the
172 * memory node, but we have no highmem, so that works for now.
173 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
174 * of the shifts unnecessary.
176 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
177 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
179 #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
180 #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
182 #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
183 #define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
185 #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
186 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
188 #endif /* __ASSEMBLY__ */
190 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
191 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
193 #endif /* __KERNEL__ */
195 #endif /* _M68K_PAGE_H */