2 * include/asm-xtensa/page.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as
6 * published by the Free Software Foundation.
8 * Copyright (C) 2001 - 2007 Tensilica Inc.
11 #ifndef _XTENSA_PAGE_H
12 #define _XTENSA_PAGE_H
14 #include <asm/processor.h>
15 #include <asm/types.h>
16 #include <asm/cache.h>
17 #include <platform/hardware.h>
20 * Fixed TLB translations in the processor.
23 #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
24 #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
25 #define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
26 #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
29 * PAGE_SHIFT determines the page size
33 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
34 #define PAGE_MASK (~(PAGE_SIZE-1))
37 #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
38 #define MAX_MEM_PFN XCHAL_KSEG_SIZE
40 #define PAGE_OFFSET __XTENSA_UL_CONST(0)
41 #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
44 #define PGTABLE_START 0x80000000
49 * If the cache size for one way is greater than the page size, we have to
50 * deal with cache aliasing. The cache index is wider than the page size:
52 * | |cache| cache index
53 * | pfn |off| virtual address
59 * |yyyy:Y|zzz| physical address
61 * When the page number is translated to the physical page address, the lowest
62 * bit(s) (X) that are part of the cache index are also translated (Y).
63 * If this translation changes bit(s) (X), the cache index is also afected,
64 * thus resulting in a different cache line than before.
65 * The kernel does not provide a mechanism to ensure that the page color
66 * (represented by this bit) remains the same when allocated or when pages
67 * are remapped. When user pages are mapped into kernel space, the color of
68 * the page might also change.
70 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
71 * to temporarily map a patch so we can match the color.
74 #if DCACHE_WAY_SIZE > PAGE_SIZE
75 # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
76 # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
77 # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
78 # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
80 # define DCACHE_ALIAS_ORDER 0
81 # define DCACHE_ALIAS(a) ((void)(a), 0)
83 #define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
85 #if ICACHE_WAY_SIZE > PAGE_SIZE
86 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
87 # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
88 # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
89 # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
91 # define ICACHE_ALIAS_ORDER 0
97 #define __pgprot(x) (x)
102 * These are used to make use of C type-checking..
105 typedef struct { unsigned long pte
; } pte_t
; /* page table entry */
106 typedef struct { unsigned long pgd
; } pgd_t
; /* PGD table entry */
107 typedef struct { unsigned long pgprot
; } pgprot_t
;
108 typedef struct page
*pgtable_t
;
110 #define pte_val(x) ((x).pte)
111 #define pgd_val(x) ((x).pgd)
112 #define pgprot_val(x) ((x).pgprot)
114 #define __pte(x) ((pte_t) { (x) } )
115 #define __pgd(x) ((pgd_t) { (x) } )
116 #define __pgprot(x) ((pgprot_t) { (x) } )
119 * Pure 2^n version of get_order
120 * Use 'nsau' instructions if supported by the processor or the generic version.
125 static inline __attribute_const__
int get_order(unsigned long size
)
128 asm ("nsau %0, %1" : "=r" (lz
) : "r" ((size
- 1) >> PAGE_SHIFT
));
134 # include <asm-generic/getorder.h>
139 struct vm_area_struct
;
140 extern void clear_page(void *page
);
141 extern void copy_page(void *to
, void *from
);
144 * If we have cache aliasing and writeback caches, we might have to do
148 #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
149 extern void clear_page_alias(void *vaddr
, unsigned long paddr
);
150 extern void copy_page_alias(void *to
, void *from
,
151 unsigned long to_paddr
, unsigned long from_paddr
);
153 #define clear_user_highpage clear_user_highpage
154 void clear_user_highpage(struct page
*page
, unsigned long vaddr
);
155 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
156 void copy_user_highpage(struct page
*to
, struct page
*from
,
157 unsigned long vaddr
, struct vm_area_struct
*vma
);
159 # define clear_user_page(page, vaddr, pg) clear_page(page)
160 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
164 * This handles the memory map. We handle pages at
165 * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
166 * These macros are for conversion of kernel address, not user
170 #define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
172 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
173 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
174 #define pfn_valid(pfn) \
175 ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
177 #ifdef CONFIG_DISCONTIGMEM
178 # error CONFIG_DISCONTIGMEM not supported
181 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
182 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
183 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
184 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
186 #endif /* __ASSEMBLY__ */
188 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
191 #include <asm-generic/memory_model.h>
192 #endif /* _XTENSA_PAGE_H */