fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-m68knommu / pgtable.h
blob46251016e8212bdfef68b594e6ad1cf0e939248d
1 #ifndef _M68KNOMMU_PGTABLE_H
2 #define _M68KNOMMU_PGTABLE_H
4 #include <asm-generic/4level-fixup.h>
6 /*
7 * (C) Copyright 2000-2002, Greg Ungerer <gerg@snapgear.com>
8 */
10 #include <linux/slab.h>
11 #include <asm/processor.h>
12 #include <asm/page.h>
13 #include <asm/io.h>
16 * Trivial page table functions.
18 #define pgd_present(pgd) (1)
19 #define pgd_none(pgd) (0)
20 #define pgd_bad(pgd) (0)
21 #define pgd_clear(pgdp)
22 #define kern_addr_valid(addr) (1)
23 #define pmd_offset(a, b) ((void *)0)
25 #define PAGE_NONE __pgprot(0)
26 #define PAGE_SHARED __pgprot(0)
27 #define PAGE_COPY __pgprot(0)
28 #define PAGE_READONLY __pgprot(0)
29 #define PAGE_KERNEL __pgprot(0)
31 extern void paging_init(void);
32 #define swapper_pg_dir ((pgd_t *) 0)
34 #define __swp_type(x) (0)
35 #define __swp_offset(x) (0)
36 #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
37 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
38 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
40 static inline int pte_file(pte_t pte) { return 0; }
43 * ZERO_PAGE is a global shared page that is always zero: used
44 * for zero-mapped memory areas etc..
46 #define ZERO_PAGE(vaddr) (virt_to_page(0))
49 * These would be in other places but having them here reduces the diffs.
51 extern unsigned int kobjsize(const void *objp);
54 * No page table caches to initialise.
56 #define pgtable_cache_init() do { } while (0)
58 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
59 remap_pfn_range(vma, vaddr, pfn, size, prot)
62 * All 32bit addresses are effectively valid for vmalloc...
63 * Sort of meaningless for non-VM targets.
65 #define VMALLOC_START 0
66 #define VMALLOC_END 0xffffffff
68 #include <asm-generic/pgtable.h>
70 #endif /* _M68KNOMMU_PGTABLE_H */