2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
14 #include <asm/pgtable-32.h>
17 #include <asm/pgtable-64.h>
20 #include <asm/cmpxchg.h>
22 #include <asm/pgtable-bits.h>
23 #include <asm/cpu-features.h>
26 struct vm_area_struct
;
28 #define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
30 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
31 _PAGE_GLOBAL | _page_cachable_default)
32 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
35 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
39 * execute, and consider it to be the same as read. Also, write
40 * permissions imply read permissions. This is the closest we can get
41 * by reasonable means..
44 extern unsigned long _page_cachable_default
;
45 extern void __update_cache(unsigned long address
, pte_t pte
);
48 * ZERO_PAGE is a global shared page that is always zero; used
49 * for zero-mapped memory areas etc..
52 extern unsigned long empty_zero_page
;
53 extern unsigned long zero_page_mask
;
55 #define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
57 #define __HAVE_COLOR_ZERO_PAGE
59 extern void paging_init(void);
62 * Conversion functions: convert a page and protection to a page entry,
63 * and a page entry and page directory to the page they refer to.
65 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
67 static inline unsigned long pmd_pfn(pmd_t pmd
)
69 return pmd_val(pmd
) >> PFN_PTE_SHIFT
;
72 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
73 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
74 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
76 #define pmd_page_vaddr(pmd) pmd_val(pmd)
80 unsigned long __flags; \
83 local_irq_save(__flags); \
84 if(!raw_current_cpu_data.htw_seq++) { \
85 write_c0_pwctl(read_c0_pwctl() & \
86 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
87 back_to_back_c0_hazard(); \
89 local_irq_restore(__flags); \
95 unsigned long __flags; \
98 local_irq_save(__flags); \
99 if (!--raw_current_cpu_data.htw_seq) { \
100 write_c0_pwctl(read_c0_pwctl() | \
101 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
102 back_to_back_c0_hazard(); \
104 local_irq_restore(__flags); \
108 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
111 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
113 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
116 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
117 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
119 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
121 ptep
->pte_high
= pte
.pte_high
;
123 ptep
->pte_low
= pte
.pte_low
;
126 if (pte
.pte_high
& _PAGE_GLOBAL
) {
128 if (pte
.pte_low
& _PAGE_GLOBAL
) {
130 pte_t
*buddy
= ptep_buddy(ptep
);
132 * Make sure the buddy is global too (if it's !none,
133 * it better already be global)
135 if (pte_none(*buddy
)) {
136 if (!IS_ENABLED(CONFIG_XPA
))
137 buddy
->pte_low
|= _PAGE_GLOBAL
;
138 buddy
->pte_high
|= _PAGE_GLOBAL
;
143 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
145 pte_t null
= __pte(0);
148 /* Preserve global status for the pair */
149 if (IS_ENABLED(CONFIG_XPA
)) {
150 if (ptep_buddy(ptep
)->pte_high
& _PAGE_GLOBAL
)
151 null
.pte_high
= _PAGE_GLOBAL
;
153 if (ptep_buddy(ptep
)->pte_low
& _PAGE_GLOBAL
)
154 null
.pte_low
= null
.pte_high
= _PAGE_GLOBAL
;
162 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
163 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
164 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
167 * Certain architectures need to do special things when pte's
168 * within a page table are directly modified. Thus, the following
169 * hook is made available.
171 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
174 #if !defined(CONFIG_CPU_R3K_TLB)
175 if (pte_val(pteval
) & _PAGE_GLOBAL
) {
176 pte_t
*buddy
= ptep_buddy(ptep
);
178 * Make sure the buddy is global too (if it's !none,
179 * it better already be global)
181 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
182 cmpxchg64(&buddy
->pte
, 0, _PAGE_GLOBAL
);
184 cmpxchg(&buddy
->pte
, 0, _PAGE_GLOBAL
);
190 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
193 #if !defined(CONFIG_CPU_R3K_TLB)
194 /* Preserve global status for the pair */
195 if (pte_val(*ptep_buddy(ptep
)) & _PAGE_GLOBAL
)
196 set_pte(ptep
, __pte(_PAGE_GLOBAL
));
199 set_pte(ptep
, __pte(0));
204 static inline void set_ptes(struct mm_struct
*mm
, unsigned long addr
,
205 pte_t
*ptep
, pte_t pte
, unsigned int nr
)
208 bool do_sync
= false;
210 for (i
= 0; i
< nr
; i
++) {
211 if (!pte_present(pte
))
213 if (pte_present(ptep
[i
]) &&
214 (pte_pfn(ptep
[i
]) == pte_pfn(pte
)))
220 __update_cache(addr
, pte
);
227 pte
= __pte(pte_val(pte
) + (1UL << PFN_PTE_SHIFT
));
230 #define set_ptes set_ptes
233 * (pmds are folded into puds so this doesn't get actually called,
234 * but the define is needed for a generic inline function.)
236 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
238 #ifndef __PAGETABLE_PMD_FOLDED
240 * (puds are folded into pgds so this doesn't get actually called,
241 * but the define is needed for a generic inline function.)
243 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
246 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
247 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
248 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
251 * We used to declare this array with size but gcc 3.3 and older are not able
252 * to find that this expression is a constant, so the size is dropped.
254 extern pgd_t swapper_pg_dir
[];
257 * Platform specific pte_special() and pte_mkspecial() definitions
258 * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
260 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
261 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
262 static inline int pte_special(pte_t pte
)
264 return pte
.pte_low
& _PAGE_SPECIAL
;
267 static inline pte_t
pte_mkspecial(pte_t pte
)
269 pte
.pte_low
|= _PAGE_SPECIAL
;
273 static inline int pte_special(pte_t pte
)
275 return pte_val(pte
) & _PAGE_SPECIAL
;
278 static inline pte_t
pte_mkspecial(pte_t pte
)
280 pte_val(pte
) |= _PAGE_SPECIAL
;
284 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
287 * The following only work if pte_present() is true.
288 * Undefined behaviour if not..
290 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
291 static inline int pte_write(pte_t pte
) { return pte
.pte_low
& _PAGE_WRITE
; }
292 static inline int pte_dirty(pte_t pte
) { return pte
.pte_low
& _PAGE_MODIFIED
; }
293 static inline int pte_young(pte_t pte
) { return pte
.pte_low
& _PAGE_ACCESSED
; }
295 static inline pte_t
pte_wrprotect(pte_t pte
)
297 pte
.pte_low
&= ~_PAGE_WRITE
;
298 if (!IS_ENABLED(CONFIG_XPA
))
299 pte
.pte_low
&= ~_PAGE_SILENT_WRITE
;
300 pte
.pte_high
&= ~_PAGE_SILENT_WRITE
;
304 static inline pte_t
pte_mkclean(pte_t pte
)
306 pte
.pte_low
&= ~_PAGE_MODIFIED
;
307 if (!IS_ENABLED(CONFIG_XPA
))
308 pte
.pte_low
&= ~_PAGE_SILENT_WRITE
;
309 pte
.pte_high
&= ~_PAGE_SILENT_WRITE
;
313 static inline pte_t
pte_mkold(pte_t pte
)
315 pte
.pte_low
&= ~_PAGE_ACCESSED
;
316 if (!IS_ENABLED(CONFIG_XPA
))
317 pte
.pte_low
&= ~_PAGE_SILENT_READ
;
318 pte
.pte_high
&= ~_PAGE_SILENT_READ
;
322 static inline pte_t
pte_mkwrite_novma(pte_t pte
)
324 pte
.pte_low
|= _PAGE_WRITE
;
325 if (pte
.pte_low
& _PAGE_MODIFIED
) {
326 if (!IS_ENABLED(CONFIG_XPA
))
327 pte
.pte_low
|= _PAGE_SILENT_WRITE
;
328 pte
.pte_high
|= _PAGE_SILENT_WRITE
;
333 static inline pte_t
pte_mkdirty(pte_t pte
)
335 pte
.pte_low
|= _PAGE_MODIFIED
;
336 if (pte
.pte_low
& _PAGE_WRITE
) {
337 if (!IS_ENABLED(CONFIG_XPA
))
338 pte
.pte_low
|= _PAGE_SILENT_WRITE
;
339 pte
.pte_high
|= _PAGE_SILENT_WRITE
;
344 static inline pte_t
pte_mkyoung(pte_t pte
)
346 pte
.pte_low
|= _PAGE_ACCESSED
;
347 if (!(pte
.pte_low
& _PAGE_NO_READ
)) {
348 if (!IS_ENABLED(CONFIG_XPA
))
349 pte
.pte_low
|= _PAGE_SILENT_READ
;
350 pte
.pte_high
|= _PAGE_SILENT_READ
;
355 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_WRITE
; }
356 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_MODIFIED
; }
357 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
359 static inline pte_t
pte_wrprotect(pte_t pte
)
361 pte_val(pte
) &= ~(_PAGE_WRITE
| _PAGE_SILENT_WRITE
);
365 static inline pte_t
pte_mkclean(pte_t pte
)
367 pte_val(pte
) &= ~(_PAGE_MODIFIED
| _PAGE_SILENT_WRITE
);
371 static inline pte_t
pte_mkold(pte_t pte
)
373 pte_val(pte
) &= ~(_PAGE_ACCESSED
| _PAGE_SILENT_READ
);
377 static inline pte_t
pte_mkwrite_novma(pte_t pte
)
379 pte_val(pte
) |= _PAGE_WRITE
;
380 if (pte_val(pte
) & _PAGE_MODIFIED
)
381 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
385 static inline pte_t
pte_mkdirty(pte_t pte
)
387 pte_val(pte
) |= _PAGE_MODIFIED
| _PAGE_SOFT_DIRTY
;
388 if (pte_val(pte
) & _PAGE_WRITE
)
389 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
393 static inline pte_t
pte_mkyoung(pte_t pte
)
395 pte_val(pte
) |= _PAGE_ACCESSED
;
396 if (!(pte_val(pte
) & _PAGE_NO_READ
))
397 pte_val(pte
) |= _PAGE_SILENT_READ
;
401 #define pte_sw_mkyoung pte_mkyoung
403 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
404 static inline int pte_huge(pte_t pte
) { return pte_val(pte
) & _PAGE_HUGE
; }
406 static inline pte_t
pte_mkhuge(pte_t pte
)
408 pte_val(pte
) |= _PAGE_HUGE
;
412 #define pmd_write pmd_write
413 static inline int pmd_write(pmd_t pmd
)
415 return !!(pmd_val(pmd
) & _PAGE_WRITE
);
418 static inline struct page
*pmd_page(pmd_t pmd
)
420 if (pmd_val(pmd
) & _PAGE_HUGE
)
421 return pfn_to_page(pmd_pfn(pmd
));
423 return pfn_to_page(pmd_phys(pmd
) >> PAGE_SHIFT
);
425 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
427 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
428 static inline bool pte_soft_dirty(pte_t pte
)
430 return pte_val(pte
) & _PAGE_SOFT_DIRTY
;
432 #define pte_swp_soft_dirty pte_soft_dirty
434 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
436 pte_val(pte
) |= _PAGE_SOFT_DIRTY
;
439 #define pte_swp_mksoft_dirty pte_mksoft_dirty
441 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
443 pte_val(pte
) &= ~(_PAGE_SOFT_DIRTY
);
446 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
448 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
453 * Macro to make mark a page protection value as "uncacheable". Note
454 * that "protection" is really a misnomer here as the protection value
455 * contains the memory attribute bits, dirty bits, and various other
458 #define pgprot_noncached pgprot_noncached
460 static inline pgprot_t
pgprot_noncached(pgprot_t _prot
)
462 unsigned long prot
= pgprot_val(_prot
);
464 prot
= (prot
& ~_CACHE_MASK
) | _CACHE_UNCACHED
;
466 return __pgprot(prot
);
469 #define pgprot_writecombine pgprot_writecombine
471 static inline pgprot_t
pgprot_writecombine(pgprot_t _prot
)
473 unsigned long prot
= pgprot_val(_prot
);
475 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
476 prot
= (prot
& ~_CACHE_MASK
) | cpu_data
[0].writecombine
;
478 return __pgprot(prot
);
481 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct
*vma
,
482 unsigned long address
,
487 #define __HAVE_ARCH_PTE_SAME
488 static inline int pte_same(pte_t pte_a
, pte_t pte_b
)
490 return pte_val(pte_a
) == pte_val(pte_b
);
493 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
494 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
495 unsigned long address
, pte_t
*ptep
,
496 pte_t entry
, int dirty
)
498 if (!pte_same(*ptep
, entry
))
499 set_pte(ptep
, entry
);
501 * update_mmu_cache will unconditionally execute, handling both
502 * the case that the PTE changed and the spurious fault case.
508 * Conversion functions: convert a page and protection to a page entry,
509 * and a page entry and page directory to the page they refer to.
511 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
513 #if defined(CONFIG_XPA)
514 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
516 pte
.pte_low
&= (_PAGE_MODIFIED
| _PAGE_ACCESSED
| _PFNX_MASK
);
517 pte
.pte_high
&= (_PFN_MASK
| _CACHE_MASK
);
518 pte
.pte_low
|= pgprot_val(newprot
) & ~_PFNX_MASK
;
519 pte
.pte_high
|= pgprot_val(newprot
) & ~(_PFN_MASK
| _CACHE_MASK
);
522 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
523 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
525 pte
.pte_low
&= _PAGE_CHG_MASK
;
526 pte
.pte_high
&= (_PFN_MASK
| _CACHE_MASK
);
527 pte
.pte_low
|= pgprot_val(newprot
);
528 pte
.pte_high
|= pgprot_val(newprot
) & ~(_PFN_MASK
| _CACHE_MASK
);
532 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
534 pte_val(pte
) &= _PAGE_CHG_MASK
;
535 pte_val(pte
) |= pgprot_val(newprot
) & ~_PAGE_CHG_MASK
;
536 if ((pte_val(pte
) & _PAGE_ACCESSED
) && !(pte_val(pte
) & _PAGE_NO_READ
))
537 pte_val(pte
) |= _PAGE_SILENT_READ
;
542 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
543 static inline int pte_swp_exclusive(pte_t pte
)
545 return pte
.pte_low
& _PAGE_SWP_EXCLUSIVE
;
548 static inline pte_t
pte_swp_mkexclusive(pte_t pte
)
550 pte
.pte_low
|= _PAGE_SWP_EXCLUSIVE
;
554 static inline pte_t
pte_swp_clear_exclusive(pte_t pte
)
556 pte
.pte_low
&= ~_PAGE_SWP_EXCLUSIVE
;
560 static inline int pte_swp_exclusive(pte_t pte
)
562 return pte_val(pte
) & _PAGE_SWP_EXCLUSIVE
;
565 static inline pte_t
pte_swp_mkexclusive(pte_t pte
)
567 pte_val(pte
) |= _PAGE_SWP_EXCLUSIVE
;
571 static inline pte_t
pte_swp_clear_exclusive(pte_t pte
)
573 pte_val(pte
) &= ~_PAGE_SWP_EXCLUSIVE
;
578 extern void __update_tlb(struct vm_area_struct
*vma
, unsigned long address
,
581 static inline void update_mmu_cache_range(struct vm_fault
*vmf
,
582 struct vm_area_struct
*vma
, unsigned long address
,
583 pte_t
*ptep
, unsigned int nr
)
587 __update_tlb(vma
, address
, pte
);
591 address
+= PAGE_SIZE
;
594 #define update_mmu_cache(vma, address, ptep) \
595 update_mmu_cache_range(NULL, vma, address, ptep, 1)
597 #define update_mmu_tlb_range(vma, address, ptep, nr) \
598 update_mmu_cache_range(NULL, vma, address, ptep, nr)
600 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
601 unsigned long address
, pmd_t
*pmdp
)
603 pte_t pte
= *(pte_t
*)pmdp
;
605 __update_tlb(vma
, address
, pte
);
609 * Allow physical addresses to be fixed up to help 36-bit peripherals.
611 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
612 phys_addr_t
fixup_bigphys_addr(phys_addr_t addr
, phys_addr_t size
);
613 int io_remap_pfn_range(struct vm_area_struct
*vma
, unsigned long vaddr
,
614 unsigned long pfn
, unsigned long size
, pgprot_t prot
);
615 #define io_remap_pfn_range io_remap_pfn_range
617 #define fixup_bigphys_addr(addr, size) (addr)
618 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
620 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
622 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
623 #define pmdp_establish generic_pmdp_establish
625 #define has_transparent_hugepage has_transparent_hugepage
626 extern int has_transparent_hugepage(void);
628 static inline int pmd_trans_huge(pmd_t pmd
)
630 return !!(pmd_val(pmd
) & _PAGE_HUGE
);
633 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
635 pmd_val(pmd
) |= _PAGE_HUGE
;
640 extern void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
641 pmd_t
*pmdp
, pmd_t pmd
);
643 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
645 pmd_val(pmd
) &= ~(_PAGE_WRITE
| _PAGE_SILENT_WRITE
);
649 static inline pmd_t
pmd_mkwrite_novma(pmd_t pmd
)
651 pmd_val(pmd
) |= _PAGE_WRITE
;
652 if (pmd_val(pmd
) & _PAGE_MODIFIED
)
653 pmd_val(pmd
) |= _PAGE_SILENT_WRITE
;
658 #define pmd_dirty pmd_dirty
659 static inline int pmd_dirty(pmd_t pmd
)
661 return !!(pmd_val(pmd
) & _PAGE_MODIFIED
);
664 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
666 pmd_val(pmd
) &= ~(_PAGE_MODIFIED
| _PAGE_SILENT_WRITE
);
670 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
672 pmd_val(pmd
) |= _PAGE_MODIFIED
| _PAGE_SOFT_DIRTY
;
673 if (pmd_val(pmd
) & _PAGE_WRITE
)
674 pmd_val(pmd
) |= _PAGE_SILENT_WRITE
;
679 #define pmd_young pmd_young
680 static inline int pmd_young(pmd_t pmd
)
682 return !!(pmd_val(pmd
) & _PAGE_ACCESSED
);
685 static inline pmd_t
pmd_mkold(pmd_t pmd
)
687 pmd_val(pmd
) &= ~(_PAGE_ACCESSED
|_PAGE_SILENT_READ
);
692 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
694 pmd_val(pmd
) |= _PAGE_ACCESSED
;
696 if (!(pmd_val(pmd
) & _PAGE_NO_READ
))
697 pmd_val(pmd
) |= _PAGE_SILENT_READ
;
702 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
703 static inline int pmd_soft_dirty(pmd_t pmd
)
705 return !!(pmd_val(pmd
) & _PAGE_SOFT_DIRTY
);
708 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
710 pmd_val(pmd
) |= _PAGE_SOFT_DIRTY
;
714 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
716 pmd_val(pmd
) &= ~(_PAGE_SOFT_DIRTY
);
720 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
722 /* Extern to avoid header file madness */
723 extern pmd_t
mk_pmd(struct page
*page
, pgprot_t prot
);
725 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
727 pmd_val(pmd
) = (pmd_val(pmd
) & (_PAGE_CHG_MASK
| _PAGE_HUGE
)) |
728 (pgprot_val(newprot
) & ~_PAGE_CHG_MASK
);
732 static inline pmd_t
pmd_mkinvalid(pmd_t pmd
)
734 pmd_val(pmd
) &= ~(_PAGE_PRESENT
| _PAGE_VALID
| _PAGE_DIRTY
);
740 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
741 * different prototype.
743 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
744 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
745 unsigned long address
, pmd_t
*pmdp
)
754 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
757 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
758 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
761 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
764 * We provide our own get_unmapped area to cope with the virtual aliasing
765 * constraints placed on us by the cache architecture.
767 #define HAVE_ARCH_UNMAPPED_AREA
768 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
770 #endif /* _ASM_PGTABLE_H */