2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
14 #include <asm/pgtable-32.h>
17 #include <asm/pgtable-64.h>
21 #include <asm/pgtable-bits.h>
24 struct vm_area_struct
;
26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
27 _page_cachable_default)
28 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
29 _page_cachable_default)
30 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
31 _page_cachable_default)
32 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
33 _page_cachable_default)
34 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
35 _PAGE_GLOBAL | _page_cachable_default)
36 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
38 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
39 _page_cachable_default)
40 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
41 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
44 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
45 * execute, and consider it to be the same as read. Also, write
46 * permissions imply read permissions. This is the closest we can get
47 * by reasonable means..
51 * Dummy values to fill the table in mmap.c
52 * The real values will be generated at runtime
54 #define __P000 __pgprot(0)
55 #define __P001 __pgprot(0)
56 #define __P010 __pgprot(0)
57 #define __P011 __pgprot(0)
58 #define __P100 __pgprot(0)
59 #define __P101 __pgprot(0)
60 #define __P110 __pgprot(0)
61 #define __P111 __pgprot(0)
63 #define __S000 __pgprot(0)
64 #define __S001 __pgprot(0)
65 #define __S010 __pgprot(0)
66 #define __S011 __pgprot(0)
67 #define __S100 __pgprot(0)
68 #define __S101 __pgprot(0)
69 #define __S110 __pgprot(0)
70 #define __S111 __pgprot(0)
72 extern unsigned long _page_cachable_default
;
75 * ZERO_PAGE is a global shared page that is always zero; used
76 * for zero-mapped memory areas etc..
79 extern unsigned long empty_zero_page
;
80 extern unsigned long zero_page_mask
;
82 #define ZERO_PAGE(vaddr) \
83 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
84 #define __HAVE_COLOR_ZERO_PAGE
86 extern void paging_init(void);
89 * Conversion functions: convert a page and protection to a page entry,
90 * and a page entry and page directory to the page they refer to.
92 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
94 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
96 #define pmd_page(pmd) __pmd_page(pmd)
97 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99 #define pmd_page_vaddr(pmd) pmd_val(pmd)
103 unsigned long flags; \
106 local_irq_save(flags); \
107 if(!raw_current_cpu_data.htw_seq++) { \
108 write_c0_pwctl(read_c0_pwctl() & \
109 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
110 back_to_back_c0_hazard(); \
112 local_irq_restore(flags); \
116 #define htw_start() \
118 unsigned long flags; \
121 local_irq_save(flags); \
122 if (!--raw_current_cpu_data.htw_seq) { \
123 write_c0_pwctl(read_c0_pwctl() | \
124 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
125 back_to_back_c0_hazard(); \
127 local_irq_restore(flags); \
131 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
132 pte_t
*ptep
, pte_t pteval
);
134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
137 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
139 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
142 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
143 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
145 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
147 ptep
->pte_high
= pte
.pte_high
;
149 ptep
->pte_low
= pte
.pte_low
;
152 if (pte
.pte_high
& _PAGE_GLOBAL
) {
154 if (pte
.pte_low
& _PAGE_GLOBAL
) {
156 pte_t
*buddy
= ptep_buddy(ptep
);
158 * Make sure the buddy is global too (if it's !none,
159 * it better already be global)
161 if (pte_none(*buddy
)) {
162 if (!IS_ENABLED(CONFIG_XPA
))
163 buddy
->pte_low
|= _PAGE_GLOBAL
;
164 buddy
->pte_high
|= _PAGE_GLOBAL
;
169 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
171 pte_t null
= __pte(0);
174 /* Preserve global status for the pair */
175 if (IS_ENABLED(CONFIG_XPA
)) {
176 if (ptep_buddy(ptep
)->pte_high
& _PAGE_GLOBAL
)
177 null
.pte_high
= _PAGE_GLOBAL
;
179 if (ptep_buddy(ptep
)->pte_low
& _PAGE_GLOBAL
)
180 null
.pte_low
= null
.pte_high
= _PAGE_GLOBAL
;
183 set_pte_at(mm
, addr
, ptep
, null
);
188 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
189 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
190 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
193 * Certain architectures need to do special things when pte's
194 * within a page table are directly modified. Thus, the following
195 * hook is made available.
197 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
200 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
201 if (pte_val(pteval
) & _PAGE_GLOBAL
) {
202 pte_t
*buddy
= ptep_buddy(ptep
);
204 * Make sure the buddy is global too (if it's !none,
205 * it better already be global)
209 * For SMP, multiple CPUs can race, so we need to do
212 unsigned long page_global
= _PAGE_GLOBAL
;
215 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
216 __asm__
__volatile__ (
217 " .set arch=r4000 \n"
220 "1:" __LL
"%[tmp], %[buddy] \n"
221 " bnez %[tmp], 2f \n"
222 " or %[tmp], %[tmp], %[global] \n"
223 __SC
"%[tmp], %[buddy] \n"
224 " beqzl %[tmp], 1b \n"
229 : [buddy
] "+m" (buddy
->pte
), [tmp
] "=&r" (tmp
)
230 : [global
] "r" (page_global
));
231 } else if (kernel_uses_llsc
) {
232 __asm__
__volatile__ (
233 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
236 "1:" __LL
"%[tmp], %[buddy] \n"
237 " bnez %[tmp], 2f \n"
238 " or %[tmp], %[tmp], %[global] \n"
239 __SC
"%[tmp], %[buddy] \n"
240 " beqz %[tmp], 1b \n"
245 : [buddy
] "+m" (buddy
->pte
), [tmp
] "=&r" (tmp
)
246 : [global
] "r" (page_global
));
248 #else /* !CONFIG_SMP */
249 if (pte_none(*buddy
))
250 pte_val(*buddy
) = pte_val(*buddy
) | _PAGE_GLOBAL
;
251 #endif /* CONFIG_SMP */
256 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
259 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
260 /* Preserve global status for the pair */
261 if (pte_val(*ptep_buddy(ptep
)) & _PAGE_GLOBAL
)
262 set_pte_at(mm
, addr
, ptep
, __pte(_PAGE_GLOBAL
));
265 set_pte_at(mm
, addr
, ptep
, __pte(0));
270 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
271 pte_t
*ptep
, pte_t pteval
)
273 extern void __update_cache(unsigned long address
, pte_t pte
);
275 if (!pte_present(pteval
))
276 goto cache_sync_done
;
278 if (pte_present(*ptep
) && (pte_pfn(*ptep
) == pte_pfn(pteval
)))
279 goto cache_sync_done
;
281 __update_cache(addr
, pteval
);
283 set_pte(ptep
, pteval
);
287 * (pmds are folded into puds so this doesn't get actually called,
288 * but the define is needed for a generic inline function.)
290 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
292 #ifndef __PAGETABLE_PMD_FOLDED
294 * (puds are folded into pgds so this doesn't get actually called,
295 * but the define is needed for a generic inline function.)
297 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
300 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
301 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
302 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
305 * We used to declare this array with size but gcc 3.3 and older are not able
306 * to find that this expression is a constant, so the size is dropped.
308 extern pgd_t swapper_pg_dir
[];
311 * The following only work if pte_present() is true.
312 * Undefined behaviour if not..
314 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
315 static inline int pte_write(pte_t pte
) { return pte
.pte_low
& _PAGE_WRITE
; }
316 static inline int pte_dirty(pte_t pte
) { return pte
.pte_low
& _PAGE_MODIFIED
; }
317 static inline int pte_young(pte_t pte
) { return pte
.pte_low
& _PAGE_ACCESSED
; }
319 static inline pte_t
pte_wrprotect(pte_t pte
)
321 pte
.pte_low
&= ~_PAGE_WRITE
;
322 if (!IS_ENABLED(CONFIG_XPA
))
323 pte
.pte_low
&= ~_PAGE_SILENT_WRITE
;
324 pte
.pte_high
&= ~_PAGE_SILENT_WRITE
;
328 static inline pte_t
pte_mkclean(pte_t pte
)
330 pte
.pte_low
&= ~_PAGE_MODIFIED
;
331 if (!IS_ENABLED(CONFIG_XPA
))
332 pte
.pte_low
&= ~_PAGE_SILENT_WRITE
;
333 pte
.pte_high
&= ~_PAGE_SILENT_WRITE
;
337 static inline pte_t
pte_mkold(pte_t pte
)
339 pte
.pte_low
&= ~_PAGE_ACCESSED
;
340 if (!IS_ENABLED(CONFIG_XPA
))
341 pte
.pte_low
&= ~_PAGE_SILENT_READ
;
342 pte
.pte_high
&= ~_PAGE_SILENT_READ
;
346 static inline pte_t
pte_mkwrite(pte_t pte
)
348 pte
.pte_low
|= _PAGE_WRITE
;
349 if (pte
.pte_low
& _PAGE_MODIFIED
) {
350 if (!IS_ENABLED(CONFIG_XPA
))
351 pte
.pte_low
|= _PAGE_SILENT_WRITE
;
352 pte
.pte_high
|= _PAGE_SILENT_WRITE
;
357 static inline pte_t
pte_mkdirty(pte_t pte
)
359 pte
.pte_low
|= _PAGE_MODIFIED
;
360 if (pte
.pte_low
& _PAGE_WRITE
) {
361 if (!IS_ENABLED(CONFIG_XPA
))
362 pte
.pte_low
|= _PAGE_SILENT_WRITE
;
363 pte
.pte_high
|= _PAGE_SILENT_WRITE
;
368 static inline pte_t
pte_mkyoung(pte_t pte
)
370 pte
.pte_low
|= _PAGE_ACCESSED
;
371 if (!(pte
.pte_low
& _PAGE_NO_READ
)) {
372 if (!IS_ENABLED(CONFIG_XPA
))
373 pte
.pte_low
|= _PAGE_SILENT_READ
;
374 pte
.pte_high
|= _PAGE_SILENT_READ
;
379 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_WRITE
; }
380 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_MODIFIED
; }
381 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
383 static inline pte_t
pte_wrprotect(pte_t pte
)
385 pte_val(pte
) &= ~(_PAGE_WRITE
| _PAGE_SILENT_WRITE
);
389 static inline pte_t
pte_mkclean(pte_t pte
)
391 pte_val(pte
) &= ~(_PAGE_MODIFIED
| _PAGE_SILENT_WRITE
);
395 static inline pte_t
pte_mkold(pte_t pte
)
397 pte_val(pte
) &= ~(_PAGE_ACCESSED
| _PAGE_SILENT_READ
);
401 static inline pte_t
pte_mkwrite(pte_t pte
)
403 pte_val(pte
) |= _PAGE_WRITE
;
404 if (pte_val(pte
) & _PAGE_MODIFIED
)
405 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
409 static inline pte_t
pte_mkdirty(pte_t pte
)
411 pte_val(pte
) |= _PAGE_MODIFIED
;
412 if (pte_val(pte
) & _PAGE_WRITE
)
413 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
417 static inline pte_t
pte_mkyoung(pte_t pte
)
419 pte_val(pte
) |= _PAGE_ACCESSED
;
420 if (!(pte_val(pte
) & _PAGE_NO_READ
))
421 pte_val(pte
) |= _PAGE_SILENT_READ
;
425 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
426 static inline int pte_huge(pte_t pte
) { return pte_val(pte
) & _PAGE_HUGE
; }
428 static inline pte_t
pte_mkhuge(pte_t pte
)
430 pte_val(pte
) |= _PAGE_HUGE
;
433 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
435 static inline int pte_special(pte_t pte
) { return 0; }
436 static inline pte_t
pte_mkspecial(pte_t pte
) { return pte
; }
439 * Macro to make mark a page protection value as "uncacheable". Note
440 * that "protection" is really a misnomer here as the protection value
441 * contains the memory attribute bits, dirty bits, and various other
444 #define pgprot_noncached pgprot_noncached
446 static inline pgprot_t
pgprot_noncached(pgprot_t _prot
)
448 unsigned long prot
= pgprot_val(_prot
);
450 prot
= (prot
& ~_CACHE_MASK
) | _CACHE_UNCACHED
;
452 return __pgprot(prot
);
455 #define pgprot_writecombine pgprot_writecombine
457 static inline pgprot_t
pgprot_writecombine(pgprot_t _prot
)
459 unsigned long prot
= pgprot_val(_prot
);
461 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
462 prot
= (prot
& ~_CACHE_MASK
) | cpu_data
[0].writecombine
;
464 return __pgprot(prot
);
468 * Conversion functions: convert a page and protection to a page entry,
469 * and a page entry and page directory to the page they refer to.
471 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
473 #if defined(CONFIG_XPA)
474 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
476 pte
.pte_low
&= (_PAGE_MODIFIED
| _PAGE_ACCESSED
| _PFNX_MASK
);
477 pte
.pte_high
&= (_PFN_MASK
| _CACHE_MASK
);
478 pte
.pte_low
|= pgprot_val(newprot
) & ~_PFNX_MASK
;
479 pte
.pte_high
|= pgprot_val(newprot
) & ~(_PFN_MASK
| _CACHE_MASK
);
482 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
483 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
485 pte
.pte_low
&= _PAGE_CHG_MASK
;
486 pte
.pte_high
&= (_PFN_MASK
| _CACHE_MASK
);
487 pte
.pte_low
|= pgprot_val(newprot
);
488 pte
.pte_high
|= pgprot_val(newprot
) & ~(_PFN_MASK
| _CACHE_MASK
);
492 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
494 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) |
495 (pgprot_val(newprot
) & ~_PAGE_CHG_MASK
));
500 extern void __update_tlb(struct vm_area_struct
*vma
, unsigned long address
,
503 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
504 unsigned long address
, pte_t
*ptep
)
507 __update_tlb(vma
, address
, pte
);
510 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
511 unsigned long address
, pmd_t
*pmdp
)
513 pte_t pte
= *(pte_t
*)pmdp
;
515 __update_tlb(vma
, address
, pte
);
518 #define kern_addr_valid(addr) (1)
520 #ifdef CONFIG_PHYS_ADDR_T_64BIT
521 extern int remap_pfn_range(struct vm_area_struct
*vma
, unsigned long from
, unsigned long pfn
, unsigned long size
, pgprot_t prot
);
523 static inline int io_remap_pfn_range(struct vm_area_struct
*vma
,
529 phys_addr_t phys_addr_high
= fixup_bigphys_addr(pfn
<< PAGE_SHIFT
, size
);
530 return remap_pfn_range(vma
, vaddr
, phys_addr_high
>> PAGE_SHIFT
, size
, prot
);
532 #define io_remap_pfn_range io_remap_pfn_range
535 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
537 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
538 #define pmdp_establish generic_pmdp_establish
540 #define has_transparent_hugepage has_transparent_hugepage
541 extern int has_transparent_hugepage(void);
543 static inline int pmd_trans_huge(pmd_t pmd
)
545 return !!(pmd_val(pmd
) & _PAGE_HUGE
);
548 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
550 pmd_val(pmd
) |= _PAGE_HUGE
;
555 extern void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
556 pmd_t
*pmdp
, pmd_t pmd
);
558 #define pmd_write pmd_write
559 static inline int pmd_write(pmd_t pmd
)
561 return !!(pmd_val(pmd
) & _PAGE_WRITE
);
564 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
566 pmd_val(pmd
) &= ~(_PAGE_WRITE
| _PAGE_SILENT_WRITE
);
570 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
572 pmd_val(pmd
) |= _PAGE_WRITE
;
573 if (pmd_val(pmd
) & _PAGE_MODIFIED
)
574 pmd_val(pmd
) |= _PAGE_SILENT_WRITE
;
579 static inline int pmd_dirty(pmd_t pmd
)
581 return !!(pmd_val(pmd
) & _PAGE_MODIFIED
);
584 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
586 pmd_val(pmd
) &= ~(_PAGE_MODIFIED
| _PAGE_SILENT_WRITE
);
590 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
592 pmd_val(pmd
) |= _PAGE_MODIFIED
;
593 if (pmd_val(pmd
) & _PAGE_WRITE
)
594 pmd_val(pmd
) |= _PAGE_SILENT_WRITE
;
599 static inline int pmd_young(pmd_t pmd
)
601 return !!(pmd_val(pmd
) & _PAGE_ACCESSED
);
604 static inline pmd_t
pmd_mkold(pmd_t pmd
)
606 pmd_val(pmd
) &= ~(_PAGE_ACCESSED
|_PAGE_SILENT_READ
);
611 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
613 pmd_val(pmd
) |= _PAGE_ACCESSED
;
615 if (!(pmd_val(pmd
) & _PAGE_NO_READ
))
616 pmd_val(pmd
) |= _PAGE_SILENT_READ
;
621 /* Extern to avoid header file madness */
622 extern pmd_t
mk_pmd(struct page
*page
, pgprot_t prot
);
624 static inline unsigned long pmd_pfn(pmd_t pmd
)
626 return pmd_val(pmd
) >> _PFN_SHIFT
;
629 static inline struct page
*pmd_page(pmd_t pmd
)
631 if (pmd_trans_huge(pmd
))
632 return pfn_to_page(pmd_pfn(pmd
));
634 return pfn_to_page(pmd_phys(pmd
) >> PAGE_SHIFT
);
637 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
639 pmd_val(pmd
) = (pmd_val(pmd
) & (_PAGE_CHG_MASK
| _PAGE_HUGE
)) |
640 (pgprot_val(newprot
) & ~_PAGE_CHG_MASK
);
644 static inline pmd_t
pmd_mknotpresent(pmd_t pmd
)
646 pmd_val(pmd
) &= ~(_PAGE_PRESENT
| _PAGE_VALID
| _PAGE_DIRTY
);
652 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
653 * different prototype.
655 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
656 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
657 unsigned long address
, pmd_t
*pmdp
)
666 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
668 #include <asm-generic/pgtable.h>
671 * uncached accelerated TLB map for video memory access
673 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
674 #define __HAVE_PHYS_MEM_ACCESS_PROT
677 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
678 unsigned long size
, pgprot_t vma_prot
);
682 * We provide our own get_unmapped area to cope with the virtual aliasing
683 * constraints placed on us by the cache architecture.
685 #define HAVE_ARCH_UNMAPPED_AREA
686 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
689 * No page table caches to initialise
691 #define pgtable_cache_init() do { } while (0)
693 #endif /* _ASM_PGTABLE_H */