mm-only debug patch...
[mmotm.git] / arch / blackfin / include / asm / pgtable.h
blob821c699c22387c2a607170a128cb2a962866360a
1 /*
2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
5 */
7 #ifndef _BLACKFIN_PGTABLE_H
8 #define _BLACKFIN_PGTABLE_H
10 #include <asm-generic/4level-fixup.h>
12 #include <asm/page.h>
13 #include <asm/def_LPBlackfin.h>
15 typedef pte_t *pte_addr_t;
17 * Trivial page table functions.
19 #define pgd_present(pgd) (1)
20 #define pgd_none(pgd) (0)
21 #define pgd_bad(pgd) (0)
22 #define pgd_clear(pgdp)
23 #define kern_addr_valid(addr) (1)
25 #define pmd_offset(a, b) ((void *)0)
26 #define pmd_none(x) (!pmd_val(x))
27 #define pmd_present(x) (pmd_val(x))
28 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
29 #define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
31 #define kern_addr_valid(addr) (1)
33 #define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
34 #define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
35 #define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
36 #define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
37 #define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
38 #define pgprot_noncached(prot) (prot)
40 extern void paging_init(void);
42 #define __swp_type(x) (0)
43 #define __swp_offset(x) (0)
44 #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
45 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
46 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
48 static inline int pte_file(pte_t pte)
50 return 0;
53 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
54 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
57 * Page assess control based on Blackfin CPLB management
59 #define _PAGE_RD (CPLB_USER_RD)
60 #define _PAGE_WR (CPLB_USER_WR)
61 #define _PAGE_USER (CPLB_USER_RD | CPLB_USER_WR)
62 #define _PAGE_ACCESSED CPLB_ALL_ACCESS
63 #define _PAGE_DIRTY (CPLB_DIRTY)
65 #define PTE_BIT_FUNC(fn, op) \
66 static inline pte_t pte_##fn(pte_t _pte) { _pte.pte op; return _pte; }
68 PTE_BIT_FUNC(rdprotect, &= ~_PAGE_RD);
69 PTE_BIT_FUNC(mkread, |= _PAGE_RD);
70 PTE_BIT_FUNC(wrprotect, &= ~_PAGE_WR);
71 PTE_BIT_FUNC(mkwrite, |= _PAGE_WR);
72 PTE_BIT_FUNC(exprotect, &= ~_PAGE_USER);
73 PTE_BIT_FUNC(mkexec, |= _PAGE_USER);
74 PTE_BIT_FUNC(mkclean, &= ~_PAGE_DIRTY);
75 PTE_BIT_FUNC(mkdirty, |= _PAGE_DIRTY);
76 PTE_BIT_FUNC(mkold, &= ~_PAGE_ACCESSED);
77 PTE_BIT_FUNC(mkyoung, |= _PAGE_ACCESSED);
80 * ZERO_PAGE is a global shared page that is always zero: used
81 * for zero-mapped memory areas etc..
83 #define ZERO_PAGE(vaddr) (virt_to_page(0))
85 extern unsigned int kobjsize(const void *objp);
87 #define swapper_pg_dir ((pgd_t *) 0)
89 * No page table caches to initialise.
91 #define pgtable_cache_init() do { } while (0)
92 #define io_remap_pfn_range remap_pfn_range
95 * All 32bit addresses are effectively valid for vmalloc...
96 * Sort of meaningless for non-VM targets.
98 #define VMALLOC_START 0
99 #define VMALLOC_END 0xffffffff
101 /* provide a special get_unmapped_area for framebuffer mmaps of nommu */
102 extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
103 unsigned long, unsigned long,
104 unsigned long);
105 #define HAVE_ARCH_FB_UNMAPPED_AREA
107 #include <asm-generic/pgtable.h>
109 #endif /* _BLACKFIN_PGTABLE_H */